mirror of
https://github.com/ytdl-org/youtube-dl.git
synced 2025-08-01 10:04:15 -05:00
Compare commits
32 Commits
2013.03.29
...
2013.04.11
Author | SHA1 | Date | |
---|---|---|---|
![]() |
b625bc2c31 | ||
![]() |
f4381ab88a | ||
![]() |
744435f2a4 | ||
![]() |
855703e55e | ||
![]() |
927c8c4924 | ||
![]() |
0ba994e9e3 | ||
![]() |
af9ad45cd4 | ||
![]() |
e0fee250c3 | ||
![]() |
72ca05016d | ||
![]() |
844d1f9fa1 | ||
![]() |
213c31ae16 | ||
![]() |
04f3d551a0 | ||
![]() |
e8600d69fd | ||
![]() |
b03d65c237 | ||
![]() |
8743974189 | ||
![]() |
dc36bc9434 | ||
![]() |
df2dedeefb | ||
![]() |
adb029ed81 | ||
![]() |
43ff1a347d | ||
![]() |
c2b293ba30 | ||
![]() |
37cd9f522f | ||
![]() |
f33154cd39 | ||
![]() |
bafeed9f5d | ||
![]() |
ef767f9fd5 | ||
![]() |
bc97f6d60c | ||
![]() |
90a99c1b5e | ||
![]() |
f375d4b7de | ||
![]() |
fa41fbd318 | ||
![]() |
6a205c8876 | ||
![]() |
0fb3756409 | ||
![]() |
fbbdf475b1 | ||
![]() |
c238be3e3a |
208
README.md
208
README.md
@@ -14,119 +14,125 @@ your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||
which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version
|
||||
-i, --ignore-errors continue on download errors
|
||||
-r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default
|
||||
is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version
|
||||
-i, --ignore-errors continue on download errors
|
||||
-r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or caseless
|
||||
sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than SIZE (e.g.
|
||||
50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
||||
50k or 44.6m)
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or caseless
|
||||
sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than SIZE
|
||||
(e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
||||
50k or 44.6m)
|
||||
|
||||
## Filesystem Options:
|
||||
-t, --title use title in file name
|
||||
--id use video ID in file name
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to get the
|
||||
title, %(uploader)s for the uploader name,
|
||||
%(uploader_id)s for the uploader nickname if
|
||||
different, %(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the filename
|
||||
extension, %(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the video id
|
||||
and %% for a literal percent. Use - to output to
|
||||
stdout. Can also be used to download to a different
|
||||
directory, for example with -o '/my/downloads/%(upl
|
||||
oader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
||||
avoid "&" and spaces in filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue resume partially downloaded files
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set the file
|
||||
modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
-t, --title use title in file name
|
||||
--id use video ID in file name
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to get
|
||||
the title, %(uploader)s for the uploader name,
|
||||
%(uploader_id)s for the uploader nickname if
|
||||
different, %(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the filename
|
||||
extension, %(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the video id
|
||||
and %% for a literal percent. Use - to output to
|
||||
stdout. Can also be used to download to a
|
||||
different directory, for example with -o '/my/dow
|
||||
nloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
|
||||
when it is present in output filename template or
|
||||
--autonumber option is given
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
||||
avoid "&" and spaces in filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue resume partially downloaded files
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set the
|
||||
file modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
|
||||
## Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write anything
|
||||
to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems(very
|
||||
verbose)
|
||||
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one is
|
||||
requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats (currently youtube only)
|
||||
--write-sub write subtitle file (currently youtube only)
|
||||
--only-sub downloads only the subtitles (no video)
|
||||
--all-subs downloads all the available subtitles of the video
|
||||
(currently youtube only)
|
||||
--list-subs lists all available subtitles for the video
|
||||
(currently youtube only)
|
||||
--sub-format LANG subtitle format [srt/sbv] (default=srt) (currently
|
||||
youtube only)
|
||||
--sub-lang LANG language of the subtitles to download (optional)
|
||||
use IETF language tags like 'en'
|
||||
-f, --format FORMAT video format code
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one
|
||||
is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats (currently youtube
|
||||
only)
|
||||
--write-sub write subtitle file (currently youtube only)
|
||||
--only-sub downloads only the subtitles (no video)
|
||||
--all-subs downloads all the available subtitles of the
|
||||
video (currently youtube only)
|
||||
--list-subs lists all available subtitles for the video
|
||||
(currently youtube only)
|
||||
--sub-format LANG subtitle format [srt/sbv] (default=srt)
|
||||
(currently youtube only)
|
||||
--sub-lang LANG language of the subtitles to download (optional)
|
||||
use IETF language tags like 'en'
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
||||
"wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert a
|
||||
value between 0 (better) and 9 (worse) for VBR or a
|
||||
specific bitrate like 128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary
|
||||
(currently supported: mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
processed files are overwritten by default
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
||||
"wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
|
||||
a value between 0 (better) and 9 (worse) for VBR
|
||||
or a specific bitrate like 128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary
|
||||
(currently supported: mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
processed files are overwritten by default
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
|
@@ -8,8 +8,8 @@ import json
|
||||
|
||||
atom_template=textwrap.dedent("""\
|
||||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<atom:subtitle>Updates feed.</atom:subtitle>
|
||||
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<atom:title>youtube-dl releases</atom:title>
|
||||
<atom:id>youtube-dl-updates-feed</atom:id>
|
||||
<atom:updated>@TIMESTAMP@</atom:updated>
|
||||
@ENTRIES@
|
||||
@@ -19,7 +19,7 @@ entry_template=textwrap.dedent("""
|
||||
<atom:entry>
|
||||
<atom:id>youtube-dl-@VERSION@</atom:id>
|
||||
<atom:title>New version @VERSION@</atom:title>
|
||||
<atom:link href="http://rg3.github.com/youtube-dl" />
|
||||
<atom:link href="http://rg3.github.io/youtube-dl" />
|
||||
<atom:content type="xhtml">
|
||||
<div xmlns="http://www.w3.org/1999/xhtml">
|
||||
Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
|
||||
@@ -51,7 +51,7 @@ for v in versions:
|
||||
entries_str = textwrap.indent(''.join(entries), '\t')
|
||||
atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
|
||||
with open('update/atom.atom','w',encoding='utf-8') as atom_file:
|
||||
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
||||
|
||||
|
@@ -40,7 +40,7 @@ raw_input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
|
||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
||||
JSON_URL = UPDATE_URL + 'versions.json'
|
||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||
|
@@ -58,6 +58,7 @@ with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
maxDiff = None
|
||||
def setUp(self):
|
||||
self.parameters = parameters
|
||||
self.defs = defs
|
||||
@@ -81,9 +82,8 @@ def generator(test_case):
|
||||
params.update(test_case.get('params', {}))
|
||||
|
||||
fd = FileDownloader(params)
|
||||
fd.add_info_extractor(ie())
|
||||
for ien in test_case.get('add_ie', []):
|
||||
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
|
||||
for ie in youtube_dl.InfoExtractors.gen_extractors():
|
||||
fd.add_info_extractor(ie)
|
||||
finished_hook_called = set()
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
@@ -103,7 +103,7 @@ def generator(test_case):
|
||||
if retry == RETRIES: raise
|
||||
|
||||
# Check if the exception is not a network related one
|
||||
if not err.exc_info[0] in (ZeroDivisionError, compat_urllib_error.URLError, socket.timeout):
|
||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
|
||||
raise
|
||||
|
||||
print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
|
||||
|
@@ -76,8 +76,7 @@
|
||||
"name": "StanfordOpenClassroom",
|
||||
"md5": "544a9468546059d4e80d76265b0443b8",
|
||||
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
|
||||
"file": "PracticalUnix_intro-environment.mp4",
|
||||
"skip": "Currently offline"
|
||||
"file": "PracticalUnix_intro-environment.mp4"
|
||||
},
|
||||
{
|
||||
"name": "XNXX",
|
||||
@@ -328,5 +327,16 @@
|
||||
"info_dict": {
|
||||
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "ARD",
|
||||
"url": "http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640",
|
||||
"file": "14077640.mp4",
|
||||
"md5": "6ca8824255460c787376353f9e20bbd8",
|
||||
"info_dict": {
|
||||
"title": "11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
|
||||
},
|
||||
"skip": "Requires rtmpdump"
|
||||
}
|
||||
|
||||
]
|
||||
|
BIN
youtube-dl
BIN
youtube-dl
Binary file not shown.
@@ -388,7 +388,11 @@ class FileDownloader(object):
|
||||
template_dict = dict(info_dict)
|
||||
|
||||
template_dict['epoch'] = int(time.time())
|
||||
template_dict['autonumber'] = u'%05d' % self._num_downloads
|
||||
autonumber_size = self.params.get('autonumber_size')
|
||||
if autonumber_size is None:
|
||||
autonumber_size = 5
|
||||
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
|
||||
template_dict['autonumber'] = autonumber_templ % self._num_downloads
|
||||
|
||||
sanitize = lambda k,v: sanitize_filename(
|
||||
u'NA' if v is None else compat_str(v),
|
||||
@@ -485,14 +489,17 @@ class FileDownloader(object):
|
||||
subtitle = info_dict['subtitles'][0]
|
||||
(sub_error, sub_lang, sub) = subtitle
|
||||
sub_format = self.params.get('subtitlesformat')
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
if sub_error:
|
||||
self.report_warning("Some error while getting the subtitles")
|
||||
else:
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
if self.params.get('onlysubtitles', False):
|
||||
return
|
||||
|
||||
@@ -501,14 +508,17 @@ class FileDownloader(object):
|
||||
sub_format = self.params.get('subtitlesformat')
|
||||
for subtitle in subtitles:
|
||||
(sub_error, sub_lang, sub) = subtitle
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
if sub_error:
|
||||
self.report_warning("Some error while getting the subtitles")
|
||||
else:
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
if self.params.get('onlysubtitles', False):
|
||||
return
|
||||
|
||||
@@ -623,7 +633,7 @@ class FileDownloader(object):
|
||||
except (IOError, OSError):
|
||||
self.report_warning(u'Unable to remove downloaded video file')
|
||||
|
||||
def _download_with_rtmpdump(self, filename, url, player_url, page_url):
|
||||
def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
@@ -642,6 +652,8 @@ class FileDownloader(object):
|
||||
basic_args += ['-W', player_url]
|
||||
if page_url is not None:
|
||||
basic_args += ['--pageUrl', page_url]
|
||||
if play_path is not None:
|
||||
basic_args += ['-y', play_path]
|
||||
args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
|
||||
if self.params.get('verbose', False):
|
||||
try:
|
||||
@@ -696,7 +708,8 @@ class FileDownloader(object):
|
||||
if url.startswith('rtmp'):
|
||||
return self._download_with_rtmpdump(filename, url,
|
||||
info_dict.get('player_url', None),
|
||||
info_dict.get('page_url', None))
|
||||
info_dict.get('page_url', None),
|
||||
info_dict.get('play_path', None))
|
||||
|
||||
tmpfilename = self.temp_name(filename)
|
||||
stream = None
|
||||
|
@@ -115,7 +115,8 @@ class InfoExtractor(object):
|
||||
""" Returns the response handle """
|
||||
if note is None:
|
||||
note = u'Downloading video webpage'
|
||||
self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
|
||||
if note is not False:
|
||||
self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note))
|
||||
try:
|
||||
return compat_urllib_request.urlopen(url_or_request)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
@@ -133,6 +134,14 @@ class InfoExtractor(object):
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
webpage_bytes = urlh.read()
|
||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
self._downloader.to_screen(u'Dumping request to ' + url)
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
return webpage_bytes.decode(encoding, 'replace')
|
||||
|
||||
|
||||
@@ -253,11 +262,11 @@ class YoutubeIE(InfoExtractor):
|
||||
try:
|
||||
sub_list = compat_urllib_request.urlopen(request).read().decode('utf-8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
|
||||
return (u'unable to download video subtitles: %s' % compat_str(err), None)
|
||||
sub_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', sub_list)
|
||||
sub_lang_list = dict((l[1], l[0]) for l in sub_lang_list)
|
||||
if not sub_lang_list:
|
||||
return (u'WARNING: video doesn\'t have subtitles', None)
|
||||
return (u'video doesn\'t have subtitles', None)
|
||||
return sub_lang_list
|
||||
|
||||
def _list_available_subtitles(self, video_id):
|
||||
@@ -265,6 +274,10 @@ class YoutubeIE(InfoExtractor):
|
||||
self.report_video_subtitles_available(video_id, sub_lang_list)
|
||||
|
||||
def _request_subtitle(self, sub_lang, sub_name, video_id, format):
|
||||
"""
|
||||
Return tuple:
|
||||
(error_message, sub_lang, sub)
|
||||
"""
|
||||
self.report_video_subtitles_request(video_id, sub_lang, format)
|
||||
params = compat_urllib_parse.urlencode({
|
||||
'lang': sub_lang,
|
||||
@@ -276,14 +289,20 @@ class YoutubeIE(InfoExtractor):
|
||||
try:
|
||||
sub = compat_urllib_request.urlopen(url).read().decode('utf-8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None)
|
||||
return (u'unable to download video subtitles: %s' % compat_str(err), None, None)
|
||||
if not sub:
|
||||
return (u'WARNING: Did not fetch video subtitles', None)
|
||||
return (u'Did not fetch video subtitles', None, None)
|
||||
return (None, sub_lang, sub)
|
||||
|
||||
def _extract_subtitle(self, video_id):
|
||||
"""
|
||||
Return a list with a tuple:
|
||||
[(error_message, sub_lang, sub)]
|
||||
"""
|
||||
sub_lang_list = self._get_available_subtitles(video_id)
|
||||
sub_format = self._downloader.params.get('subtitlesformat')
|
||||
if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
|
||||
return [(sub_lang_list[0], None, None)]
|
||||
if self._downloader.params.get('subtitleslang', False):
|
||||
sub_lang = self._downloader.params.get('subtitleslang')
|
||||
elif 'en' in sub_lang_list:
|
||||
@@ -291,7 +310,7 @@ class YoutubeIE(InfoExtractor):
|
||||
else:
|
||||
sub_lang = list(sub_lang_list.keys())[0]
|
||||
if not sub_lang in sub_lang_list:
|
||||
return (u'WARNING: no closed captions found in the specified language "%s"' % sub_lang, None)
|
||||
return [(u'no closed captions found in the specified language "%s"' % sub_lang, None, None)]
|
||||
|
||||
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
|
||||
return [subtitle]
|
||||
@@ -299,6 +318,8 @@ class YoutubeIE(InfoExtractor):
|
||||
def _extract_all_subtitles(self, video_id):
|
||||
sub_lang_list = self._get_available_subtitles(video_id)
|
||||
sub_format = self._downloader.params.get('subtitlesformat')
|
||||
if isinstance(sub_lang_list,tuple): #There was some error, it didn't get the available subtitles
|
||||
return [(sub_lang_list[0], None, None)]
|
||||
subtitles = []
|
||||
for sub_lang in sub_lang_list:
|
||||
subtitle = self._request_subtitle(sub_lang, sub_lang_list[sub_lang].encode('utf-8'), video_id, sub_format)
|
||||
@@ -451,18 +472,14 @@ class YoutubeIE(InfoExtractor):
|
||||
# Get video info
|
||||
self.report_video_info_webpage_download(video_id)
|
||||
for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']:
|
||||
video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
|
||||
video_info_url = ('https://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
|
||||
% (video_id, el_type))
|
||||
request = compat_urllib_request.Request(video_info_url)
|
||||
try:
|
||||
video_info_webpage_bytes = compat_urllib_request.urlopen(request).read()
|
||||
video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore')
|
||||
video_info = compat_parse_qs(video_info_webpage)
|
||||
if 'token' in video_info:
|
||||
break
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.report_error(u'unable to download video info webpage: %s' % compat_str(err))
|
||||
return
|
||||
video_info_webpage = self._download_webpage(video_info_url, video_id,
|
||||
note=False,
|
||||
errnote='unable to download video info webpage')
|
||||
video_info = compat_parse_qs(video_info_webpage)
|
||||
if 'token' in video_info:
|
||||
break
|
||||
if 'token' not in video_info:
|
||||
if 'reason' in video_info:
|
||||
self._downloader.report_error(u'YouTube said: %s' % video_info['reason'][0])
|
||||
@@ -532,14 +549,14 @@ class YoutubeIE(InfoExtractor):
|
||||
if video_subtitles:
|
||||
(sub_error, sub_lang, sub) = video_subtitles[0]
|
||||
if sub_error:
|
||||
self._downloader.trouble(sub_error)
|
||||
self._downloader.report_error(sub_error)
|
||||
|
||||
if self._downloader.params.get('allsubtitles', False):
|
||||
video_subtitles = self._extract_all_subtitles(video_id)
|
||||
for video_subtitle in video_subtitles:
|
||||
(sub_error, sub_lang, sub) = video_subtitle
|
||||
if sub_error:
|
||||
self._downloader.trouble(sub_error)
|
||||
self._downloader.report_error(sub_error)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
sub_lang_list = self._list_available_subtitles(video_id)
|
||||
@@ -1118,7 +1135,7 @@ class VimeoIE(InfoExtractor):
|
||||
# Extract video description
|
||||
video_description = get_element_by_attribute("itemprop", "description", webpage)
|
||||
if video_description: video_description = clean_html(video_description)
|
||||
else: video_description = ''
|
||||
else: video_description = u''
|
||||
|
||||
# Extract upload date
|
||||
video_upload_date = None
|
||||
@@ -1710,9 +1727,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||
(?:
|
||||
(?:course|view_play_list|my_playlists|artist|playlist|watch)
|
||||
\? (?:.*?&)*? (?:p|a|list)=
|
||||
| user/.*?/user/
|
||||
| p/
|
||||
| user/.*?#[pg]/c/
|
||||
)
|
||||
((?:PL|EC|UU)?[0-9A-Za-z-_]{10,})
|
||||
.*
|
||||
@@ -3796,7 +3811,7 @@ class WorldStarHipHopIE(InfoExtractor):
|
||||
_title = r"""<title>(.*)</title>"""
|
||||
|
||||
mobj = re.search(_title, webpage_src)
|
||||
|
||||
|
||||
if mobj is not None:
|
||||
title = mobj.group(1)
|
||||
else:
|
||||
@@ -3814,7 +3829,7 @@ class WorldStarHipHopIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
title = mobj.group(1)
|
||||
thumbnail = None
|
||||
|
||||
|
||||
results = [{
|
||||
'id': video_id,
|
||||
'url' : video_url,
|
||||
@@ -4121,7 +4136,7 @@ class KeekIE(InfoExtractor):
|
||||
video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
|
||||
thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
m = re.search(r'<meta property="og:title" content="(?P<title>.+)"', webpage)
|
||||
m = re.search(r'<meta property="og:title" content="(?P<title>.*?)"', webpage)
|
||||
title = unescapeHTML(m.group('title'))
|
||||
m = re.search(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>', webpage)
|
||||
uploader = clean_html(m.group('uploader'))
|
||||
@@ -4346,6 +4361,46 @@ class LiveLeakIE(InfoExtractor):
|
||||
|
||||
return [info]
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
|
||||
_TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
|
||||
_MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
|
||||
numid = re.search(r'documentId=([0-9]+)', url)
|
||||
if numid:
|
||||
video_id = numid.group(1)
|
||||
else:
|
||||
video_id = m.group('video_id')
|
||||
|
||||
# determine title and media streams from webpage
|
||||
html = self._download_webpage(url, video_id)
|
||||
title = re.search(self._TITLE, html).group('title')
|
||||
streams = [m.groupdict() for m in re.finditer(self._MEDIA_STREAM, html)]
|
||||
if not streams:
|
||||
assert '"fsk"' in html
|
||||
self._downloader.report_error(u'this video is only available after 8:00 pm')
|
||||
return
|
||||
|
||||
# choose default media type and highest quality for now
|
||||
stream = max([s for s in streams if int(s["media_type"]) == 0],
|
||||
key=lambda s: int(s["quality"]))
|
||||
|
||||
# there's two possibilities: RTMP stream or HTTP download
|
||||
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
|
||||
if stream['rtmp_url']:
|
||||
self._downloader.to_screen(u'[%s] RTMP download detected' % self.IE_NAME)
|
||||
assert stream['video_url'].startswith('mp4:')
|
||||
info["url"] = stream["rtmp_url"]
|
||||
info["play_path"] = stream['video_url']
|
||||
else:
|
||||
assert stream["video_url"].endswith('.mp4')
|
||||
info["url"] = stream["video_url"]
|
||||
return [info]
|
||||
|
||||
|
||||
def gen_extractors():
|
||||
""" Return a list of an instance of every supported extractor.
|
||||
@@ -4399,5 +4454,6 @@ def gen_extractors():
|
||||
MySpassIE(),
|
||||
SpiegelIE(),
|
||||
LiveLeakIE(),
|
||||
ARDIE(),
|
||||
GenericIE()
|
||||
]
|
||||
|
@@ -24,6 +24,7 @@ __authors__ = (
|
||||
'Jaime Marquínez Ferrándiz',
|
||||
'Jeff Crouse',
|
||||
'Osama Khalid',
|
||||
'Michael Walter',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
@@ -223,6 +224,9 @@ def parseOpts():
|
||||
help='display progress in console titlebar', default=False)
|
||||
verbosity.add_option('-v', '--verbose',
|
||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||
verbosity.add_option('--dump-intermediate-pages',
|
||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||
help='print downloaded pages to debug problems(very verbose)')
|
||||
|
||||
filesystem.add_option('-t', '--title',
|
||||
action='store_true', dest='usetitle', help='use title in file name', default=False)
|
||||
@@ -235,6 +239,9 @@ def parseOpts():
|
||||
help='number downloaded files starting from 00000', default=False)
|
||||
filesystem.add_option('-o', '--output',
|
||||
dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')
|
||||
filesystem.add_option('--autonumber-size',
|
||||
dest='autonumber_size', metavar='NUMBER',
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given')
|
||||
filesystem.add_option('--restrict-filenames',
|
||||
action='store_true', dest='restrictfilenames',
|
||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||
@@ -451,6 +458,7 @@ def _real_main():
|
||||
'format_limit': opts.format_limit,
|
||||
'listformats': opts.listformats,
|
||||
'outtmpl': outtmpl,
|
||||
'autonumber_size': opts.autonumber_size,
|
||||
'restrictfilenames': opts.restrictfilenames,
|
||||
'ignoreerrors': opts.ignoreerrors,
|
||||
'ratelimit': opts.ratelimit,
|
||||
@@ -480,6 +488,7 @@ def _real_main():
|
||||
'max_downloads': opts.max_downloads,
|
||||
'prefer_free_formats': opts.prefer_free_formats,
|
||||
'verbose': opts.verbose,
|
||||
'dump_intermediate_pages': opts.dump_intermediate_pages,
|
||||
'test': opts.test,
|
||||
'keepvideo': opts.keepvideo,
|
||||
'min_filesize': opts.min_filesize,
|
||||
|
@@ -9,7 +9,8 @@ import sys
|
||||
if __package__ is None and not hasattr(sys, "frozen"):
|
||||
# direct call of __main__.py
|
||||
import os.path
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
path = os.path.realpath(os.path.abspath(__file__))
|
||||
sys.path.append(os.path.dirname(os.path.dirname(path)))
|
||||
|
||||
import youtube_dl
|
||||
|
||||
|
@@ -37,7 +37,7 @@ def rsa_verify(message, signature, key):
|
||||
def update_self(to_screen, verbose, filename):
|
||||
"""Update the program file with the latest version from the repository"""
|
||||
|
||||
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
|
||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
||||
JSON_URL = UPDATE_URL + 'versions.json'
|
||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2013.03.29'
|
||||
__version__ = '2013.04.11'
|
||||
|
Reference in New Issue
Block a user