mirror of
https://github.com/yt-dlp/yt-dlp
synced 2025-07-09 23:33:50 -05:00
[cleanup] Upgrade syntax
Using https://github.com/asottile/pyupgrade 1. `__future__` imports and `coding: utf-8` were removed 2. Files were rewritten with `pyupgrade --py36-plus --keep-percent-format` 3. f-strings were cherry-picked from `pyupgrade --py36-plus` Extractors are left untouched (except removing header) to avoid unnecessary merge conflicts
This commit is contained in:
@ -1,6 +1,3 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import xml.etree.ElementTree
|
||||
@ -92,7 +89,7 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class InfoExtractor(object):
|
||||
class InfoExtractor:
|
||||
"""Information Extractor class.
|
||||
|
||||
Information extractors are the classes that, given a URL, extract
|
||||
@ -628,7 +625,7 @@ class InfoExtractor(object):
|
||||
if country:
|
||||
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
|
||||
self._downloader.write_debug(
|
||||
'Using fake IP %s (%s) as X-Forwarded-For' % (self._x_forwarded_for_ip, country.upper()))
|
||||
f'Using fake IP {self._x_forwarded_for_ip} ({country.upper()}) as X-Forwarded-For')
|
||||
|
||||
def extract(self, url):
|
||||
"""Extracts URL information and returns it in list of dicts."""
|
||||
@ -741,9 +738,9 @@ class InfoExtractor(object):
|
||||
self.report_download_webpage(video_id)
|
||||
elif note is not False:
|
||||
if video_id is None:
|
||||
self.to_screen('%s' % (note,))
|
||||
self.to_screen(str(note))
|
||||
else:
|
||||
self.to_screen('%s: %s' % (video_id, note))
|
||||
self.to_screen(f'{video_id}: {note}')
|
||||
|
||||
# Some sites check X-Forwarded-For HTTP header in order to figure out
|
||||
# the origin of the client behind proxy. This allows bypassing geo
|
||||
@ -779,7 +776,7 @@ class InfoExtractor(object):
|
||||
if errnote is None:
|
||||
errnote = 'Unable to download webpage'
|
||||
|
||||
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
|
||||
errmsg = f'{errnote}: {error_to_compat_str(err)}'
|
||||
if fatal:
|
||||
raise ExtractorError(errmsg, cause=err)
|
||||
else:
|
||||
@ -860,7 +857,7 @@ class InfoExtractor(object):
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
if self.get_param('write_pages', False):
|
||||
basen = '%s_%s' % (video_id, urlh.geturl())
|
||||
basen = f'{video_id}_{urlh.geturl()}'
|
||||
trim_length = self.get_param('trim_file_name') or 240
|
||||
if len(basen) > trim_length:
|
||||
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
||||
@ -1098,10 +1095,10 @@ class InfoExtractor(object):
|
||||
|
||||
def to_screen(self, msg, *args, **kwargs):
|
||||
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
||||
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
|
||||
self._downloader.to_screen(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
|
||||
|
||||
def write_debug(self, msg, *args, **kwargs):
|
||||
self._downloader.write_debug('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs)
|
||||
self._downloader.write_debug(f'[{self.IE_NAME}] {msg}', *args, **kwargs)
|
||||
|
||||
def get_param(self, name, default=None, *args, **kwargs):
|
||||
if self._downloader:
|
||||
@ -1138,7 +1135,7 @@ class InfoExtractor(object):
|
||||
method = 'any' if self.supports_login() else 'cookies'
|
||||
if method is not None:
|
||||
assert method in self._LOGIN_HINTS, 'Invalid login method'
|
||||
msg = '%s. %s' % (msg, self._LOGIN_HINTS[method])
|
||||
msg = f'{msg}. {self._LOGIN_HINTS[method]}'
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
def raise_geo_restricted(
|
||||
@ -1257,7 +1254,7 @@ class InfoExtractor(object):
|
||||
else:
|
||||
raise netrc.NetrcParseError(
|
||||
'No authenticators for %s' % netrc_machine)
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
except (OSError, netrc.NetrcParseError) as err:
|
||||
self.report_warning(
|
||||
'parsing .netrc: %s' % error_to_compat_str(err))
|
||||
|
||||
@ -3333,7 +3330,7 @@ class InfoExtractor(object):
|
||||
http_f = f.copy()
|
||||
del http_f['manifest_url']
|
||||
http_url = re.sub(
|
||||
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
|
||||
REPL_REGEX, protocol + fr'://{http_host}/\g<1>{qualities[i]}\3', f['url'])
|
||||
http_f.update({
|
||||
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
|
||||
'url': http_url,
|
||||
@ -3354,7 +3351,7 @@ class InfoExtractor(object):
|
||||
formats = []
|
||||
|
||||
def manifest_url(manifest):
|
||||
m_url = '%s/%s' % (http_base_url, manifest)
|
||||
m_url = f'{http_base_url}/{manifest}'
|
||||
if query:
|
||||
m_url += '?%s' % query
|
||||
return m_url
|
||||
@ -3391,7 +3388,7 @@ class InfoExtractor(object):
|
||||
for protocol in ('rtmp', 'rtsp'):
|
||||
if protocol not in skip_protocols:
|
||||
formats.append({
|
||||
'url': '%s:%s' % (protocol, url_base),
|
||||
'url': f'{protocol}:{url_base}',
|
||||
'format_id': protocol,
|
||||
'protocol': protocol,
|
||||
})
|
||||
@ -3557,7 +3554,7 @@ class InfoExtractor(object):
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
res = int_or_none(v, **kwargs)
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
msg = f'Failed to extract {name}: Could not parse value {v!r}'
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
@ -3567,7 +3564,7 @@ class InfoExtractor(object):
|
||||
def _float(self, v, name, fatal=False, **kwargs):
|
||||
res = float_or_none(v, **kwargs)
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
msg = f'Failed to extract {name}: Could not parse value {v!r}'
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
@ -3685,7 +3682,7 @@ class InfoExtractor(object):
|
||||
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
||||
""" Merge subtitle items for one language. Items with duplicated URLs/data
|
||||
will be dropped. """
|
||||
list1_data = set((item.get('url'), item.get('data')) for item in subtitle_list1)
|
||||
list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
|
||||
ret = list(subtitle_list1)
|
||||
ret.extend(item for item in subtitle_list2 if (item.get('url'), item.get('data')) not in list1_data)
|
||||
return ret
|
||||
@ -3798,7 +3795,7 @@ class SearchInfoExtractor(InfoExtractor):
|
||||
else:
|
||||
n = int(prefix)
|
||||
if n <= 0:
|
||||
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
|
||||
raise ExtractorError(f'invalid download number {n} for query "{query}"')
|
||||
elif n > self._MAX_RESULTS:
|
||||
self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
||||
n = self._MAX_RESULTS
|
||||
|
Reference in New Issue
Block a user