Compare commits

..

192 Commits

Author SHA1 Message Date
8cb94542f4 release 2013.04.22 2013-04-22 20:01:56 +02:00
c681a03918 Fix --list-formats (Closes #799) 2013-04-22 19:51:56 +02:00
74e3452b9e Add playlist and playlist_index to the help string for the output option
Also split the help string in different lines to make editing easier.
2013-04-22 10:06:07 +02:00
9e1cf0c200 SteamIE returns a playlist
With the game name as title.
2013-04-21 22:05:21 +02:00
e11eb11906 Allow to download videos with age check from Steam
Also move method report_age_confirmation to the base IE class.
2013-04-21 21:56:13 +02:00
c04bca6f60 release 2013.04.21 2013-04-21 12:52:45 +02:00
41a6eb949a Clean duplicate method report_extraction in InfoExtractors
A lot of IEs had implemented the method in the same way.
2013-04-20 21:12:29 +02:00
f17ce13a92 Write the method to_screen in InfoExtractor (related #608)
Except the ones in youtube subtypes (user, channels ..) all calls to _downloader.to_screen has been changed.
The calls not prefixed with the IE name hasn't been touched.
2013-04-20 20:55:40 +02:00
8c416ad29a Remove calls to _downloader.download in Youtube searchs
Instead, return the urls of the videos.
2013-04-20 19:22:45 +02:00
c72938240e Get the title of Youtube playlists 2013-04-20 18:57:05 +02:00
e905b6f80e TEDIE can now return a playlist 2013-04-20 13:31:21 +02:00
6de8f1afb7 Allows to specify which IE should be used for extracting info for a result of type url 2013-04-20 12:58:35 +02:00
9341212642 Create a function in InfoExtractors that returns the InfoExtractor class with the given name 2013-04-20 12:42:57 +02:00
f7a9721e16 Fix some metacafe videos, closes #562 2013-04-20 12:06:58 +02:00
089e843b0f Use _download_webpage in MetacafeIE 2013-04-20 11:40:05 +02:00
c8056d866a Add myself to travis notifications 2013-04-20 11:17:03 +02:00
49da66e459 The test video for subtitles has added a new language 2013-04-20 10:39:02 +02:00
fb6c319904 Add tests for YoutubeChannelIE
- tests for identifying channel urls
- test retrieval of paginated channel
- test retrieval of autogenerated channel
2013-04-19 18:11:05 -04:00
5a8d13199c Fix YoutubeChannelIE
- urls with query parameters now match
- fixes regex for identifying videos
- fixes pagination
2013-04-19 18:05:35 -04:00
dce9027045 Merge branch 'extract_info_rewrite' 2013-04-19 21:57:08 +02:00
feba604e92 Fix playlists with size 50i ∀ i∉ℕ (Closes #782) 2013-04-18 07:28:43 +02:00
d22f65413a release 2013.04.18 2013-04-18 06:29:32 +02:00
0599ef8c08 Limit titles to 200 characters (Closes #789) 2013-04-18 06:27:11 +02:00
bfdf469295 Fix FunnyOrDie extraction for a special video (#789) 2013-04-18 06:21:46 +02:00
32c96387c1 Fix facebook IE 2013-04-18 04:41:48 +02:00
c8c5443bb5 Revert "disable YT ratelimit; this should enable to max out the connection bandwidth"
Although cool, that seems to break a lot of youtube videos.

This reverts commit a60b854d90.
2013-04-17 23:22:25 +02:00
a60b854d90 disable YT ratelimit; this should enable to max out the connection bandwidth 2013-04-17 19:48:35 +02:00
d281274bf2 Add a playlist_index key to the info_dict, can be used in the output template 2013-04-16 15:13:29 +02:00
b625bc2c31 release 2013.04.11 2013-04-11 18:42:57 +02:00
f4381ab88a Fix keek title extraction 2013-04-11 18:39:13 +02:00
744435f2a4 Show whole diff in error cases 2013-04-11 18:38:43 +02:00
855703e55e Option to dump intermediate pages 2013-04-11 18:31:35 +02:00
927c8c4924 Use download_webpage in youtube IE 2013-04-11 18:18:15 +02:00
0ba994e9e3 Skip ARD test as it requires rtmpdump 2013-04-11 17:20:17 +02:00
af9ad45cd4 Re-enable Stanford OC test 2013-04-11 17:20:05 +02:00
e0fee250c3 Fix default for variable-size autonumbering 2013-04-11 17:07:55 +02:00
72ca05016d Merge remote-tracking branch 'sagittarian/vimeo-no-desc' 2013-04-11 10:56:01 +02:00
844d1f9fa1 Removed overly verbose options and arguments (Should be obvious from the previous lines) 2013-04-11 10:54:37 +02:00
213c31ae16 Added option --autonumber-size:
Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given
2013-04-11 10:53:57 +02:00
04f3d551a0 Merge remote-tracking branch 'sagittarian/resolve-symlinks' 2013-04-11 10:51:13 +02:00
e8600d69fd Credit @catch22 for ARD IE 2013-04-11 10:48:37 +02:00
b03d65c237 Minor improvements for ARD IE 2013-04-11 10:47:21 +02:00
8743974189 Resolve the symlink if __main__.py is invoke as a symlink. 2013-04-11 08:02:17 +03:00
dc36bc9434 Fix bug when the vimeo description is empty on Python 2.x. 2013-04-11 07:27:04 +03:00
bce878a7c1 Implement the playlist/start options in FileDownloader
It makes it available for all the InfoExtractors
2013-04-10 14:32:03 +02:00
532d797824 In MetacafeIE return a url if YoutubeIE should do the job 2013-04-10 00:06:03 +02:00
146c12a2da Change the order for extracting/downloading
Now it gets a video info and directly downloads it, the it pass to the next video founded.
2013-04-10 00:05:04 +02:00
d39919c03e Add progress counter for playlists
Closes #276
2013-04-09 13:45:52 +02:00
df2dedeefb added ARD InfoExtractor (german state television) 2013-04-07 15:23:48 +02:00
adb029ed81 added --playpath/-y support to RTMP downloads (via 'play_path' entry in 'info_dict') 2013-04-07 15:17:36 +02:00
43ff1a347d Change rg3.github.com to rg3.github.io almost everywhere 2013-04-06 10:46:17 +02:00
14294236bf Merge branch 'master' into extract_info_rewrite 2013-04-05 12:39:51 +02:00
c2b293ba30 release 2013.04.03 2013-04-03 19:43:53 +02:00
37cd9f522f Restore youtube-dl (update) binary (#770) 2013-04-01 23:43:20 +02:00
f33154cd39 Merge pull request #764 from jaimeMF/subtitles_not_found
Fix crash when subtitles are not found
2013-03-31 19:02:18 -07:00
bafeed9f5d Don't crash in FileDownloader if subtitles couldn't be found and errors are ignored 2013-03-31 12:21:35 +02:00
ef767f9fd5 Fix crash when subtitles are not found and the option --all-subs is given 2013-03-31 12:19:13 +02:00
bc97f6d60c Use report_error in subtitles error handling 2013-03-31 12:10:12 +02:00
90a99c1b5e retry on UnavailableVideoError 2013-03-31 03:29:34 +02:00
f375d4b7de import all IEs when testing to resemble more closely the real env 2013-03-31 03:12:28 +02:00
fa41fbd318 don't catch YT user URLs in YoutubePlaylistIE (fix #754, fix #763) 2013-03-31 03:02:49 +02:00
6a205c8876 More fixes on subtitles errors handling 2013-03-30 14:17:12 +01:00
0fb3756409 Fix crash when subtitles are not found 2013-03-30 14:11:33 +01:00
fbbdf475b1 Different feed file name 2013-03-29 21:44:11 +01:00
c238be3e3a Correct feed title 2013-03-29 21:41:20 +01:00
1bf2801e6a release 2013.03.29 2013-03-29 21:22:57 +01:00
c9c8402093 Merge pull request #758 from jaimeMF/atom-feed
Add an Atom feed generator in devscripts
2013-03-29 12:50:20 -07:00
6060788083 Write a new feed each time, reading from versions.json 2013-03-29 19:42:33 +01:00
e3700fc9e4 Merge pull request #736 from rg3/retry
Exception stacking and test retry
2013-03-29 09:01:27 -07:00
b693216d8d Merge pull request #752 from dodo/master
SoundcloudSetIE
2013-03-29 08:40:22 -07:00
46b9d8295d Merge pull request #730 by @JohnyMoSwag
Support for Worldstarhiphop.com
2013-03-29 16:14:49 +01:00
7decf8951c fix FunnyOrDieIE, MyVideoIE, TEDIE 2013-03-29 15:59:13 +01:00
1f46c15262 fix SpiegelIE 2013-03-29 15:31:38 +01:00
0cd358676c Rebased, fixed and extended LiveLeak.com support
close #757 - close #761
2013-03-29 15:13:24 +01:00
43113d92cc Update InfoExtractors.py 2013-03-29 14:23:09 +01:00
7eab8dc750 Pass the playlist info_dict to process_info
the playlist value can be used in the output template
2013-03-29 12:32:42 +01:00
44e939514e Added test for WorldStarHipHop 2013-03-28 20:05:28 -07:00
95506f1235 Merge remote-tracking branch 'jaimeMF/color_error_messages' 2013-03-29 00:25:48 +01:00
a91556fd74 Add a note on MaxDownloadsReached (#732, thanks to CBGoodBuddy) 2013-03-29 00:20:13 +01:00
1447f728b5 Merge branch 'master' of github.com:rg3/youtube-dl 2013-03-29 00:06:48 +01:00
d2c690828a Add title and id to playlist results
Not all IE give both. They are not used yet.
2013-03-28 13:39:00 +01:00
cfa90f4adc Merge branch 'master' into extract_info_rewrite 2013-03-28 13:20:33 +01:00
898280a056 use sys.stdout.buffer only on Python3 2013-03-28 13:13:03 +01:00
59b4a2f0e4 Merge pull request #762 from jynnantonix/master
Use sys.stdout.buffer when writing to standard out
2013-03-28 05:11:51 -07:00
1ee9778405 Use sys.stdout.buffer instead of sys.stdout
sys.stdout defaults to text mode, we need to use the underlying buffer
instead when writing binary data.

Signed-off-by: Chirantan Ekbote <chirantan.ekbote@gmail.com>
2013-03-27 15:57:11 -04:00
db74c11d2b Add an Atom feed generator in devscripts 2013-03-26 18:13:52 +01:00
5011cded16 SoundcloudSetIE
info extractor for soundcloud sets
2013-03-24 02:24:07 +01:00
f10b2a9c14 fix KeekIE 2013-03-20 12:13:52 +01:00
5cb3c0b319 Merge pull request #699 by @iemejia
Removed innecesary function to convert subtitles, improved use of the youtube api
2013-03-20 11:35:55 +01:00
b9fc428494 add '--write-srt' and '--srt-lang' aliases for backwards compatibility 2013-03-20 11:29:07 +01:00
c0ba104674 Fixed typo in error message when no subtitles were available. 2013-03-20 08:41:54 +01:00
2a4093eaf3 Added new option '--list-subs' to show the available subtitle languages 2013-03-20 08:41:54 +01:00
9e62bc4439 Added new option '--sub-format' to choose the format of the subtitles to downloade (defaut=srt) 2013-03-20 08:41:54 +01:00
553d097442 Refactor subtitle options from srt to the more generic 'sub'.
In order to be more consistent with different subtitle formats.
From:
* --write-srt to --write-sub
* --only-srt to --only-sub
* --all-srt to --all-subs
* --srt-lang to --sub-lang'

Refactored also all the mentions of srt for sub in all the source code.
2013-03-20 08:41:53 +01:00
ae608b8076 Added new option '--all-srt' to download all the subtitles of a video.
Only works in youtube for the moment.
2013-03-20 08:41:53 +01:00
c397187061 Spiegel: Support hash at end of URL 2013-03-16 23:52:17 +01:00
e32b06e977 Spiegel IE 2013-03-12 01:08:54 +01:00
8c42c506cd Add configuration to -v output 2013-03-12 00:10:05 +01:00
8cc83b8dbe Bubble up all the stack of exceptions and retry download tests on timeout errors 2013-03-09 10:05:43 +01:00
51af426d89 forgot to fix this. 2013-03-08 22:52:17 -08:00
08ec0af7c6 catch fatal error 2013-03-08 22:48:05 -08:00
3b221c5406 removed str used for other project. 2013-03-08 22:39:45 -08:00
3d3423574d Fix Unicode handling GenericIE (Fixes #734) 2013-03-08 20:47:06 +01:00
e5edd51de4 Clear up error messages (#734) 2013-03-08 20:12:05 +01:00
64c78d50cc working - worldstarhiphop IE
Support for WorldStarHipHop
2013-03-07 16:27:21 -08:00
b3bcca0844 clean up 2013-03-07 15:39:17 -08:00
61e40c88a9 fixed typo 2013-03-06 21:14:46 -08:00
40634747f7 Support for WorldStarHipHop.com 2013-03-06 21:09:55 -08:00
c2e21f2f0d Merge pull request #728 from timdoug/fix-escapist-extension
Escapist videos are acutally .mp4, not .flv
2013-03-06 10:26:18 -08:00
47dcd621c0 Escapist videos are acutally .mp4, not .flv 2013-03-06 12:46:45 -05:00
a0d6fe7b92 When a redirect is found return the new url using the new style 2013-03-05 22:33:32 +01:00
c9fa1cbab6 More trouble calls changed in InfoExtractors.py
The calls with the message starting with 'WARNING' have been changed to report_warning instead of report_error
2013-03-05 21:13:17 +01:00
8a38a194fb Add auxiliary methods to InfoExtractor to set the '_type' key and use them for some playlist IEs 2013-03-05 20:55:48 +01:00
6ac7f082c4 extract_info now expects ie.extract to return a list in the format proposed in issue 608.
Each element should have a '_type' key specifying if it's a video, an url or a playlist.
`extract_info` will process each element to get the full info
2013-03-05 20:14:32 +01:00
f6e6da9525 Use extract_info in BlipTV User and Youtube Channel 2013-03-05 12:26:18 +01:00
597cc8a455 Use extract_info in YoutubePlaylist and YoutubeSearch 2013-03-05 11:58:01 +01:00
3370abd509 Merge branch 'master' into extract_info_rewrite 2013-03-04 22:25:46 +01:00
631f73978c Add a method for extracting info from a list of urls 2013-03-04 22:16:42 +01:00
e5f30ade10 Use report_error in InfoExtractors.py
Some calls haven't been changed
2013-03-04 15:56:14 +01:00
6622d22c79 Use report_error in FileDownloader.py 2013-03-04 11:47:58 +01:00
4e1582f372 Use red color when printing error messages 2013-03-04 11:27:25 +01:00
967897fd22 Fix Python 3 errors with rmtp downloads 2013-03-03 22:38:38 +01:00
f918ec7ea2 Clarify rate limit documentation (Closes #723) 2013-03-03 22:35:26 +01:00
a2ae43a55f Remove changed playlist test (#661) 2013-03-03 22:19:19 +01:00
7ae153ee9c Remove tweetreel - it has shut down 2013-03-03 22:15:06 +01:00
f7b567ff84 Use proper urlparse functions and simplify a bit 2013-03-03 22:09:44 +01:00
f2e237adc8 Merge remote-tracking branch 'jcarlosgarciasegovia/master' 2013-03-03 22:04:06 +01:00
2e5457be1d Use report_warning in InfoExtractors 2013-03-02 11:24:07 +01:00
7f9d41a55e Allow downloading http://blip.tv/play/ embeded URLs 2013-03-01 10:22:16 +00:00
8207626bbe Use color when printing warning messages 2013-02-28 22:07:29 +01:00
df8db1aa21 Create extract_info method 2013-02-26 23:33:58 +01:00
691db5ba02 Don't be too clever (Fixes Python 3) 2013-02-26 22:03:43 +01:00
acb8752f80 fix tests in Python3, and make them parallelizable 2013-02-26 22:03:33 +01:00
679790eee1 Do not user upper-case for non-constants 2013-02-26 20:03:19 +01:00
6bf48bd866 Merge remote-tracking branch 'origin/API_YT_playlists' 2013-02-26 19:58:04 +01:00
790d4fcbe1 Merge pull request #715 from joksnet/no_video_results
[YT Search] No results if items is not in response
2013-02-26 10:43:35 -08:00
89de9eb125 Modified Youtube video/playlist matching; fixes #668; fixes #585 2013-02-26 19:06:41 +01:00
6324fd1d74 Switch YTPlaylistIE to API (relevant: #586); fixes #651; fixes #673; fixes #661 2013-02-26 19:06:28 +01:00
9e07cf2955 [YT Search] No results if items is not in response
When a query results of 0 items, the key items is not present in the
api_response dictionary, raising a KeyError.

Intead, look for the key and call trouble if it's not present.
2013-02-26 18:06:43 +01:00
f03b88b3fb Merge remote-tracking branch 'joksnet/not_keep_video_message' 2013-02-25 00:35:12 +01:00
97d0365f49 release 2013.02.25 2013-02-25 00:28:19 +01:00
12887875a2 Fix typo 2013-02-25 00:22:55 +01:00
450e709972 Formalize URL creation (prepare for some cleanup in blip.tv:users) 2013-02-24 23:23:50 +01:00
9befce2b8c Merge remote-tracking branch 'joksnet/ytsearch_decode_request' 2013-02-24 23:14:34 +01:00
cb99797798 Test TED thumbnail 2013-02-24 01:01:20 +01:00
f82b28146a Merge remote-tracking branch 'jaimeMF/TED' 2013-02-24 00:59:22 +01:00
4dc72b830c Merge remote-tracking branch 'jaimeMF/Steam' 2013-02-24 00:59:03 +01:00
ea05129ebd release 2013.02.22 2013-02-24 00:47:08 +01:00
35d217133f Message for delete video it's not an error.
When using youtube-dl from another python script with the quiet option
on, and a post procesor for extract the audio. The message of deleting
video shows in the first script logs (as it goes to stderr).

There is no way to keep this quiet as it's treated as an error, even if,
for me, it's not.
2013-02-23 22:52:52 +01:00
d1b7a24354 Decode the data requested to the api in utf-8. 2013-02-23 22:47:22 +01:00
c85538dba1 TED: get thumbnails 2013-02-23 17:27:49 +01:00
60bd48b175 Steam: get thumbnails 2013-02-23 16:48:15 +01:00
4be0aa3539 release 2012.02.22 2013-02-22 16:41:36 +01:00
f636c34481 Stop early in nosetests (in release script) 2013-02-22 16:40:19 +01:00
3bf79c752e Print *all* release notes 2013-02-22 00:36:23 +01:00
cdb130b09a Added new option '--only-srt' to download only the subtitles of a video
Improved option '--srt-lang'
 - it shows the argument in case of missing subtitles
 - added language suffix for non-english languages (e.g. video.it.srt)
2013-02-21 22:12:36 +01:00
2e5d60b7db Removed conversion from youtube closed caption format to srt since youtube api supports the 'srt' format 2013-02-21 20:51:35 +01:00
8271226a55 Fix --match-title and --reject-title decoding (Closes #690) 2013-02-21 17:09:39 +01:00
1013186a17 Also check for JSLoader of JWSPlayer (thanks to @maximeg, Closes #685) 2013-02-21 16:56:48 +01:00
7c038b3c32 Import HTTPErrorProcessor from the correct module (Closes #696) 2013-02-21 16:49:05 +01:00
c8cd8e5f55 release 2013.02.19 2013-02-19 00:06:04 +01:00
471cf47796 include bash completion and manpage in PyPi dist 2013-02-18 23:56:13 +01:00
d8f64574a4 release 2013.02.18 2013-02-18 23:37:20 +01:00
e711babbd1 Fix YP IE 2013-02-18 23:30:33 +01:00
a72b0f2b6f Use proper echo commands 2013-02-18 23:22:01 +01:00
434eb6f26b Include man and bash completion in PyPi release 2013-02-18 23:19:57 +01:00
197080b10b Merge remote-tracking branch 'jaimeMF/TED' 2013-02-18 23:12:56 +01:00
7796e8c2cb facebook: also download lq videos 2013-02-18 23:12:48 +01:00
6d4363368a Fix MyVideo IE 2013-02-18 22:32:56 +01:00
414638cd50 TED: Add support for playlists 2013-02-18 21:42:06 +01:00
2a9983b78f Fix 8tracks 2013-02-18 19:11:32 +01:00
b17c974a88 Mark DailyMotion as broken for now (#680) 2013-02-18 18:53:40 +01:00
5717d91ab7 Correct --newline and give it a more meaningful title 2013-02-18 18:52:06 +01:00
79eb0287ab Merge remote-tracking branch 'glisignoli/master' 2013-02-18 18:47:35 +01:00
58994225bc Add tests to MySpass 2013-02-18 18:45:09 +01:00
59d4c2fe1b fix some titles in TED 2013-02-17 17:25:02 +01:00
3a468f2d8b Basic support for TED 2013-02-17 17:13:06 +01:00
1ad5d872b9 added new InfoExtractor for myspass.de 2013-02-16 13:46:13 +01:00
355fc8e944 Update README.md 2013-02-15 15:57:40 +13:00
380a29dbf7 Update youtube_dl/__init__.py 2013-02-15 15:55:11 +13:00
1528d6642d Forgot to remove \r 2013-02-13 16:43:08 +13:00
7311fef854 Modified youtube-dl to write new lines with the --newline switch. This
enables easier process monitoring when being called with external
scripts.
2013-02-13 14:02:31 +13:00
906417c7c5 Fix delayed title display in --console-title
With Python 3, the titlebar wouldn't get updated for a long time (due to
stderr buffering), and when it did, the title would be shown as b'...'
representation.
2013-02-09 22:58:12 +02:00
6aabe82035 Credit Osama Khalid for Keek support 2013-02-08 11:01:09 +01:00
f0877a445e Add tests for keek 2013-02-08 11:00:28 +01:00
da06e2daf8 Add KeekIE() 2013-02-08 10:25:55 +03:00
d3f5f9f6b9 Fix login (Closes #658) 2013-02-06 21:22:53 +01:00
bfc6ea7935 Ignore PyPi metadata 2013-02-05 13:42:52 +01:00
8edc2cf8ca Support direct vimeo links (Closes #666) 2013-02-05 13:42:08 +01:00
fb778e66df Fix encoding in youtube subtitle download (Closes #669) 2013-02-05 13:30:02 +01:00
3a9918d37f Escapist continues to be flaky on travis 2013-02-02 14:53:34 +01:00
ccb0cae134 Fix automatic release (oops) 2013-02-02 14:52:38 +01:00
22 changed files with 1790 additions and 870 deletions

1
.gitignore vendored
View File

@ -17,3 +17,4 @@ youtube-dl.tar.gz
.coverage
cover/
updates_key.pem
*.egg-info

View File

@ -8,6 +8,7 @@ notifications:
email:
- filippo.valsorda@gmail.com
- phihag@phihag.de
- jaime.marquinez.ferrandiz+travis@gmail.com
# irc:
# channels:
# - "irc.freenode.org#youtube-dl"

View File

@ -1,3 +1,5 @@
include README.md
include test/*.py
include test/*.json
include test/*.json
include youtube-dl.bash-completion
include youtube-dl.1

View File

@ -1,7 +1,10 @@
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
clean:
rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
cleanall: clean
rm -f youtube-dl youtube-dl.exe
PREFIX=/usr/local
BINDIR=$(PREFIX)/bin
@ -23,7 +26,9 @@ test:
tar: youtube-dl.tar.gz
.PHONY: all clean install test tar
.PHONY: all clean install test tar bash-completion pypi-files
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
youtube-dl: youtube_dl/*.py
zip --quiet youtube-dl youtube_dl/*.py
@ -45,6 +50,8 @@ youtube-dl.1: README.md
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
python devscripts/bash-completion.py
bash-completion: youtube-dl.bash-completion
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
--exclude '*.DS_Store' \

205
README.md
View File

@ -14,112 +14,127 @@ your Unix box, on Windows or on Mac OS X. It is released to the public domain,
which means you can modify it, redistribute it or use it however you like.
# OPTIONS
-h, --help print this help text and exit
--version print program version and exit
-U, --update update this program to latest version
-i, --ignore-errors continue on download errors
-r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m)
-R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default
is 1024)
--no-resize-buffer do not automatically adjust the buffer size. By
default, the buffer size is automatically resized
from an initial value of SIZE.
--dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent
--list-extractors List all supported extractors and the URLs they
would handle
-h, --help print this help text and exit
--version print program version and exit
-U, --update update this program to latest version
-i, --ignore-errors continue on download errors
-r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
-R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k)
(default is 1024)
--no-resize-buffer do not automatically adjust the buffer size. By
default, the buffer size is automatically resized
from an initial value of SIZE.
--dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent
--list-extractors List all supported extractors and the URLs they
would handle
## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1)
--playlist-end NUMBER playlist video to end at (default is last)
--match-title REGEX download only matching titles (regex or caseless
sub-string)
--reject-title REGEX skip download for matching titles (regex or
caseless sub-string)
--max-downloads NUMBER Abort after downloading NUMBER files
--min-filesize SIZE Do not download any videos smaller than SIZE (e.g.
50k or 44.6m)
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
50k or 44.6m)
--playlist-start NUMBER playlist video to start at (default is 1)
--playlist-end NUMBER playlist video to end at (default is last)
--match-title REGEX download only matching titles (regex or caseless
sub-string)
--reject-title REGEX skip download for matching titles (regex or
caseless sub-string)
--max-downloads NUMBER Abort after downloading NUMBER files
--min-filesize SIZE Do not download any videos smaller than SIZE
(e.g. 50k or 44.6m)
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
50k or 44.6m)
## Filesystem Options:
-t, --title use title in file name
--id use video ID in file name
-l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(title)s to get the
title, %(uploader)s for the uploader name,
%(uploader_id)s for the uploader nickname if
different, %(autonumber)s to get an automatically
incremented number, %(ext)s for the filename
extension, %(upload_date)s for the upload date
(YYYYMMDD), %(extractor)s for the provider
(youtube, metacafe, etc), %(id)s for the video id
and %% for a literal percent. Use - to output to
stdout. Can also be used to download to a different
directory, for example with -o '/my/downloads/%(upl
oader)s/%(title)s-%(id)s.%(ext)s' .
--restrict-filenames Restrict filenames to only ASCII characters, and
avoid "&" and spaces in filenames
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
-w, --no-overwrites do not overwrite files
-c, --continue resume partially downloaded files
--no-continue do not resume partially downloaded files (restart
from beginning)
--cookies FILE file to read cookies from and dump cookie jar in
--no-part do not use .part files
--no-mtime do not use the Last-modified header to set the file
modification time
--write-description write video description to a .description file
--write-info-json write video metadata to a .info.json file
-t, --title use title in file name
--id use video ID in file name
-l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(title)s to get
the title, %(uploader)s for the uploader name,
%(uploader_id)s for the uploader nickname if
different, %(autonumber)s to get an automatically
incremented number, %(ext)s for the filename
extension, %(upload_date)s for the upload date
(YYYYMMDD), %(extractor)s for the provider
(youtube, metacafe, etc), %(id)s for the video id
, %(playlist)s for the playlist the video is in,
%(playlist_index)s for the position in the
playlist and %% for a literal percent. Use - to
output to stdout. Can also be used to download to
a different directory, for example with -o '/my/d
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
when it is present in output filename template or
--autonumber option is given
--restrict-filenames Restrict filenames to only ASCII characters, and
avoid "&" and spaces in filenames
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
-w, --no-overwrites do not overwrite files
-c, --continue resume partially downloaded files
--no-continue do not resume partially downloaded files (restart
from beginning)
--cookies FILE file to read cookies from and dump cookie jar in
--no-part do not use .part files
--no-mtime do not use the Last-modified header to set the
file modification time
--write-description write video description to a .description file
--write-info-json write video metadata to a .info.json file
## Verbosity / Simulation Options:
-q, --quiet activates quiet mode
-s, --simulate do not download the video and do not write anything
to disk
--skip-download do not download the video
-g, --get-url simulate, quiet but print URL
-e, --get-title simulate, quiet but print title
--get-thumbnail simulate, quiet but print thumbnail URL
--get-description simulate, quiet but print video description
--get-filename simulate, quiet but print output filename
--get-format simulate, quiet but print output format
--no-progress do not print progress bar
--console-title display progress in console titlebar
-v, --verbose print various debugging information
-q, --quiet activates quiet mode
-s, --simulate do not download the video and do not write
anything to disk
--skip-download do not download the video
-g, --get-url simulate, quiet but print URL
-e, --get-title simulate, quiet but print title
--get-thumbnail simulate, quiet but print thumbnail URL
--get-description simulate, quiet but print video description
--get-filename simulate, quiet but print output filename
--get-format simulate, quiet but print output format
--newline output progress bar as new lines
--no-progress do not print progress bar
--console-title display progress in console titlebar
-v, --verbose print various debugging information
--dump-intermediate-pages print downloaded pages to debug problems(very
verbose)
## Video Format Options:
-f, --format FORMAT video format code
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific one is
requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats (currently youtube only)
--write-srt write video closed captions to a .srt file
(currently youtube only)
--srt-lang LANG language of the closed captions to download
(optional) use IETF language tags like 'en'
-f, --format FORMAT video format code
--all-formats download all available video formats
--prefer-free-formats prefer free video formats unless a specific one
is requested
--max-quality FORMAT highest quality format to download
-F, --list-formats list all available formats (currently youtube
only)
--write-sub write subtitle file (currently youtube only)
--only-sub downloads only the subtitles (no video)
--all-subs downloads all the available subtitles of the
video (currently youtube only)
--list-subs lists all available subtitles for the video
(currently youtube only)
--sub-format LANG subtitle format [srt/sbv] (default=srt)
(currently youtube only)
--sub-lang LANG language of the subtitles to download (optional)
use IETF language tags like 'en'
## Authentication Options:
-u, --username USERNAME account username
-p, --password PASSWORD account password
-n, --netrc use .netrc authentication data
-u, --username USERNAME account username
-p, --password PASSWORD account password
-n, --netrc use .netrc authentication data
## Post-processing Options:
-x, --extract-audio convert video files to audio-only files (requires
ffmpeg or avconv and ffprobe or avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
"wav"; best by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert a
value between 0 (better) and 9 (worse) for VBR or a
specific bitrate like 128K (default 5)
--recode-video FORMAT Encode the video to another format if necessary
(currently supported: mp4|flv|ogg|webm)
-k, --keep-video keeps the video file on disk after the post-
processing; the video is erased by default
--no-post-overwrites do not overwrite post-processed files; the post-
processed files are overwritten by default
-x, --extract-audio convert video files to audio-only files (requires
ffmpeg or avconv and ffprobe or avprobe)
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
"wav"; best by default
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
a value between 0 (better) and 9 (worse) for VBR
or a specific bitrate like 128K (default 5)
--recode-video FORMAT Encode the video to another format if necessary
(currently supported: mp4|flv|ogg|webm)
-k, --keep-video keeps the video file on disk after the post-
processing; the video is erased by default
--no-post-overwrites do not overwrite post-processed files; the post-
processed files are overwritten by default
# CONFIGURATION
@ -137,6 +152,8 @@ The `-o` option allows users to indicate a template for the output file names. T
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
- `playlist`: The name or the id of the playlist that contains the video.
- `playlist_index`: The index of the video in the playlist, a five-digit number.
The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python3
import datetime
import textwrap
import json
atom_template=textwrap.dedent("""\
<?xml version='1.0' encoding='utf-8'?>
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
<atom:title>youtube-dl releases</atom:title>
<atom:id>youtube-dl-updates-feed</atom:id>
<atom:updated>@TIMESTAMP@</atom:updated>
@ENTRIES@
</atom:feed>""")
entry_template=textwrap.dedent("""
<atom:entry>
<atom:id>youtube-dl-@VERSION@</atom:id>
<atom:title>New version @VERSION@</atom:title>
<atom:link href="http://rg3.github.io/youtube-dl" />
<atom:content type="xhtml">
<div xmlns="http://www.w3.org/1999/xhtml">
Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
</div>
</atom:content>
<atom:author>
<atom:name>The youtube-dl maintainers</atom:name>
</atom:author>
<atom:updated>@TIMESTAMP@</atom:updated>
</atom:entry>
""")
now = datetime.datetime.now()
now_iso = now.isoformat()
atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
entries=[]
versions_info = json.load(open('update/versions.json'))
versions = list(versions_info['versions'].keys())
versions.sort()
for v in versions:
entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
entry = entry.replace('@VERSION@',v)
entries.append(entry)
entries_str = textwrap.indent(''.join(entries), '\t')
atom_template = atom_template.replace('@ENTRIES@', entries_str)
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
atom_file.write(atom_template)

View File

@ -20,19 +20,19 @@ if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already pre
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
echo "\n### First of all, testing..."
make clean
nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1
/bin/echo -e "\n### First of all, testing..."
make cleanall
nosetests --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
echo "\n### Changing version in version.py..."
/bin/echo -e "\n### Changing version in version.py..."
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
make README.md
git add CHANGELOG README.md youtube_dl/version.py
git commit -m "release $version"
echo "\n### Now tagging, signing and pushing..."
/bin/echo -e "\n### Now tagging, signing and pushing..."
git tag -s -m "Release $version" "$version"
git show "$version"
read -p "Is it good, can I push? (y/n) " -n 1
@ -42,7 +42,7 @@ MASTER=$(git rev-parse --abbrev-ref HEAD)
git push origin $MASTER:master
git push origin "$version"
echo "\n### OK, now it is time to build the binaries..."
/bin/echo -e "\n### OK, now it is time to build the binaries..."
REV=$(git rev-parse HEAD)
make youtube-dl youtube-dl.tar.gz
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
@ -57,11 +57,11 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
git checkout HEAD -- youtube-dl youtube-dl.exe
echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
/bin/echo -e "\n### Signing and uploading the new binaries to youtube-dl.org..."
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
scp -r "build/$version" ytdl@youtube-dl.org:html/downloads/
echo "\n### Now switching to gh-pages..."
/bin/echo -e "\n### Now switching to gh-pages..."
git clone --branch gh-pages --single-branch . build/gh-pages
ROOT=$(pwd)
(
@ -69,6 +69,7 @@ ROOT=$(pwd)
ORIGIN_URL=$(git config --get remote.origin.url)
cd build/gh-pages
"$ROOT/devscripts/gh-pages/add-version.py" $version
"$ROOT/devscripts/gh-pages/update-feed.py"
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
"$ROOT/devscripts/gh-pages/generate-download.py"
"$ROOT/devscripts/gh-pages/update-copyright.py"
@ -83,7 +84,9 @@ ROOT=$(pwd)
)
rm -rf build
make pypi-files
echo "Uploading to PyPi ..."
pip sdist upload
python setup.py sdist upload
make clean
echo "\n### DONE!"
/bin/echo -e "\n### DONE!"

View File

@ -40,7 +40,7 @@ raw_input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)

View File

@ -29,6 +29,7 @@
"simulate": false,
"skip_download": false,
"subtitleslang": null,
"subtitlesformat": "srt",
"test": true,
"updatetime": true,
"usenetrc": false,
@ -36,5 +37,8 @@
"verbose": true,
"writedescription": false,
"writeinfojson": true,
"writesubtitles": false
}
"writesubtitles": false,
"onlysubtitles": false,
"allsubtitles": false,
"listssubtitles": false
}

View File

@ -7,16 +7,27 @@ import unittest
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE
class TestAllURLsMatching(unittest.TestCase):
def test_youtube_playlist_matching(self):
self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
self.assertTrue(YoutubePlaylistIE.suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
self.assertTrue(YoutubePlaylistIE.suitable(u'UUBABnxM4Ar9ten8Mdjj1j0Q')) #585
self.assertTrue(YoutubePlaylistIE.suitable(u'PL63F0C78739B09958'))
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q'))
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC'))
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
self.assertFalse(YoutubePlaylistIE.suitable(u'PLtS2H6bU1M'))
def test_youtube_matching(self):
self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
def test_youtube_channel_matching(self):
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM'))
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec'))
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM/videos'))
def test_youtube_extract(self):
self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')

View File

@ -20,6 +20,8 @@ from youtube_dl.utils import *
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
RETRIES = 3
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
@ -56,6 +58,7 @@ with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
class TestDownload(unittest.TestCase):
maxDiff = None
def setUp(self):
self.parameters = parameters
self.defs = defs
@ -64,7 +67,7 @@ class TestDownload(unittest.TestCase):
def generator(test_case):
def test_template(self):
ie = getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
ie = youtube_dl.InfoExtractors.get_info_extractor(test_case['name'])#getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
if not ie._WORKING:
print('Skipping: IE marked as not _WORKING')
return
@ -79,9 +82,8 @@ def generator(test_case):
params.update(test_case.get('params', {}))
fd = FileDownloader(params)
fd.add_info_extractor(ie())
for ien in test_case.get('add_ie', []):
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
for ie in youtube_dl.InfoExtractors.gen_extractors():
fd.add_info_extractor(ie)
finished_hook_called = set()
def _hook(status):
if status['status'] == 'finished':
@ -94,7 +96,19 @@ def generator(test_case):
_try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json')
try:
fd.download([test_case['url']])
for retry in range(1, RETRIES + 1):
try:
fd.download([test_case['url']])
except (DownloadError, ExtractorError) as err:
if retry == RETRIES: raise
# Check if the exception is not a network related one
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
raise
print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
else:
break
for tc in test_cases:
if not test_case.get('params', {}).get('skip_download', False):

View File

@ -8,8 +8,9 @@ import json
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
from youtube_dl.InfoExtractors import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE
from youtube_dl.utils import *
from youtube_dl.FileDownloader import FileDownloader
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
@ -22,7 +23,7 @@ proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FakeDownloader(object):
class FakeDownloader(FileDownloader):
def __init__(self):
self.result = []
self.params = parameters
@ -30,44 +31,72 @@ class FakeDownloader(object):
print(s)
def trouble(self, s):
raise Exception(s)
def download(self, x):
self.result.append(x)
def extract_info(self, url):
self.result.append(url)
return url
class TestYoutubeLists(unittest.TestCase):
def assertIsPlaylist(self,info):
"""Make sure the info has '_type' set to 'playlist'"""
self.assertEqual(info['_type'], 'playlist')
def test_youtube_playlist(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(DL.result, [
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
])
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')[0]
self.assertIsPlaylist(result)
self.assertEqual(result['title'], 'ytdl test PL')
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
def test_issue_673(self):
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
result = ie.extract('PLBB231211A4F62143')[0]
self.assertEqual(result['title'], 'Team Fortress 2')
self.assertTrue(len(result['entries']) > 40)
def test_youtube_playlist_long(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertTrue(len(DL.result) >= 799)
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')[0]
self.assertIsPlaylist(result)
self.assertTrue(len(result['entries']) >= 799)
def test_youtube_playlist_with_deleted(self):
#651
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')[0]
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
self.assertFalse('pElCt5oNDuI' in ytie_results)
self.assertFalse('KdPEApIVdWM' in ytie_results)
def test_youtube_course(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
dl = FakeDownloader()
ie = YoutubePlaylistIE(dl)
# TODO find a > 100 (paginating?) videos course
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
self.assertEqual(len(DL.result), 25)
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')[0]
entries = result['entries']
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
self.assertEqual(len(entries), 25)
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
def test_youtube_channel(self):
# I give up, please find a channel that does paginate and test this like test_youtube_playlist_long
pass # TODO
dl = FakeDownloader()
ie = YoutubeChannelIE(dl)
#test paginated channel
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')[0]
self.assertTrue(len(result['entries']) > 90)
#test autogenerated channel
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')[0]
self.assertTrue(len(result['entries']) > 20)
def test_youtube_user(self):
DL = FakeDownloader()
IE = YoutubeUserIE(DL)
IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(DL.result) >= 320)
dl = FakeDownloader()
ie = YoutubeUserIE(dl)
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')[0]
self.assertTrue(len(result['entries']) >= 320)
if __name__ == '__main__':
unittest.main()

View File

@ -38,20 +38,63 @@ class FakeDownloader(object):
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class TestYoutubeSubtitles(unittest.TestCase):
def setUp(self):
DL = FakeDownloader()
DL.params['allsubtitles'] = False
DL.params['writesubtitles'] = False
DL.params['subtitlesformat'] = 'srt'
DL.params['listsubtitles'] = False
def test_youtube_no_subtitles(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = False
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
subtitles = info_dict[0]['subtitles']
self.assertEqual(subtitles, None)
def test_youtube_subtitles(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033')
sub = info_dict[0]['subtitles'][0]
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
def test_youtube_subtitles_it(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
DL.params['subtitleslang'] = 'it'
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a')
sub = info_dict[0]['subtitles'][0]
self.assertEqual(md5(sub[2]), '164a51f16f260476a05b50fe4c2f161d')
def test_youtube_onlysubtitles(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
DL.params['onlysubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
sub = info_dict[0]['subtitles'][0]
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
def test_youtube_allsubtitles(self):
DL = FakeDownloader()
DL.params['allsubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
subtitles = info_dict[0]['subtitles']
self.assertEqual(len(subtitles), 13)
def test_youtube_subtitles_format(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
DL.params['subtitlesformat'] = 'sbv'
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
sub = info_dict[0]['subtitles'][0]
self.assertEqual(md5(sub[2]), '13aeaa0c245a8bed9a451cb643e3ad8b')
def test_youtube_list_subtitles(self):
DL = FakeDownloader()
DL.params['listsubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(info_dict, None)
if __name__ == '__main__':
unittest.main()

View File

@ -76,8 +76,7 @@
"name": "StanfordOpenClassroom",
"md5": "544a9468546059d4e80d76265b0443b8",
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
"file": "PracticalUnix_intro-environment.mp4",
"skip": "Currently offline"
"file": "PracticalUnix_intro-environment.mp4"
},
{
"name": "XNXX",
@ -114,7 +113,8 @@
"name": "Escapist",
"url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
"file": "6618-Breaking-Down-Baldurs-Gate.flv",
"md5": "c6793dbda81388f4264c1ba18684a74d"
"md5": "c6793dbda81388f4264c1ba18684a74d",
"skip": "Fails with timeout on Travis"
},
{
"name": "GooglePlus",
@ -127,18 +127,6 @@
"file": "0732f586d7.mp4",
"md5": "f647e9e90064b53b6e046e75d0241fbd"
},
{
"name": "TweetReel",
"url": "http://tweetreel.com/?77smq",
"file": "77smq.mov",
"md5": "56b4d9ca9de467920f3f99a6d91255d6",
"info_dict": {
"uploader": "itszero",
"uploader_id": "itszero",
"upload_date": "20091225",
"description": "Installing Gentoo Linux on Powerbook G4, it turns out the sleep indicator becomes HDD activity indicator :D"
}
},
{
"name": "Steam",
"url": "http://store.steampowered.com/video/105600/",
@ -275,5 +263,80 @@
}
}
]
},
{
"name": "Keek",
"url": "http://www.keek.com/ytdl/keeks/NODfbab",
"file": "NODfbab.mp4",
"md5": "9b0636f8c0f7614afa4ea5e4c6e57e83",
"info_dict": {
"title": "test chars: \"'/\\ä<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
}
},
{
"name": "TED",
"url": "http://www.ted.com/talks/dan_dennett_on_our_consciousness.html",
"file": "102.mp4",
"md5": "7bc087e71d16f18f9b8ab9fa62a8a031",
"info_dict": {
"title": "Dan Dennett: The illusion of consciousness",
"thumbnail": "http://images.ted.com/images/ted/488_389x292.jpg"
}
},
{
"name": "MySpass",
"url": "http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/",
"file": "11741.mp4",
"md5": "0b49f4844a068f8b33f4b7c88405862b",
"info_dict": {
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2"
}
},
{
"name": "Generic",
"url": "http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html",
"file": "13601338388002.mp4",
"md5": "85b90ccc9d73b4acd9138d3af4c27f89"
},
{
"name": "Spiegel",
"url": "http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html",
"file": "1259285.mp4",
"md5": "2c2754212136f35fb4b19767d242f66e",
"info_dict": {
"title": "Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
}
},
{
"name": "LiveLeak",
"md5": "0813c2430bea7a46bf13acf3406992f4",
"url": "http://www.liveleak.com/view?i=757_1364311680",
"file": "757_1364311680.mp4",
"info_dict": {
"title": "Most unlucky car accident",
"description": "extremely bad day for this guy..!",
"uploader": "ljfriel2"
}
},
{
"name": "WorldStarHipHop",
"url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
"file": "wshh6a7q1ny0G34ZwuIO.mp4",
"md5": "9d04de741161603bf7071bbf4e883186",
"info_dict": {
"title": "Video: KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick! "
}
},
{
"name": "ARD",
"url": "http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640",
"file": "14077640.mp4",
"md5": "6ca8824255460c787376353f9e20bbd8",
"info_dict": {
"title": "11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
},
"skip": "Requires rtmpdump"
}
]

View File

@ -38,7 +38,7 @@ def rsa_verify(message, signature, key):
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
try:
raw_input()
@ -47,7 +47,7 @@ except NameError: # Python 3
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)

View File

@ -17,6 +17,7 @@ if os.name == 'nt':
import ctypes
from .utils import *
from .InfoExtractors import get_info_extractor
class FileDownloader(object):
@ -78,7 +79,11 @@ class FileDownloader(object):
updatetime: Use the Last-modified header to set output file timestamps.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writesubtitles: Write the video subtitles to a .srt file
writesubtitles: Write the video subtitles to a file
onlysubtitles: Downloads only the subtitles of the video
allsubtitles: Downloads all the subtitles of the video
listsubtitles: Lists all available subtitles for the video
subtitlesformat: Subtitle format [sbv/srt] (default=srt)
subtitleslang: Language of the subtitles to download
test: Download only first bytes to test the downloader.
keepvideo: Keep the video file after post-processing
@ -104,7 +109,7 @@ class FileDownloader(object):
self.params = params
if '%(stitle)s' in self.params['outtmpl']:
self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
@staticmethod
def format_bytes(bytes):
@ -208,7 +213,7 @@ class FileDownloader(object):
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding()))
self.to_screen('\033]0;%s\007' % message, skip_eol=True)
def fixed_template(self):
"""Checks if the output template is fixed."""
@ -227,13 +232,47 @@ class FileDownloader(object):
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
tb_data = traceback.format_list(traceback.extract_stack())
tb = u''.join(tb_data)
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = u''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = u''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
raise DownloadError(message)
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty():
_msg_header=u'\033[0;33mWARNING:\033[0m'
else:
_msg_header=u'WARNING:'
warning_message=u'%s %s' % (_msg_header,message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if sys.stderr.isatty():
_msg_header = u'\033[0;31mERROR:\033[0m'
else:
_msg_header = u'ERROR:'
error_message = u'%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
@ -265,7 +304,7 @@ class FileDownloader(object):
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.trouble(u'ERROR: unable to rename file')
self.report_error(u'unable to rename file')
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
@ -289,9 +328,9 @@ class FileDownloader(object):
""" Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: ' + descfn)
def report_writesubtitles(self, srtfn):
def report_writesubtitles(self, sub_filename):
""" Report that the subtitles file is being written """
self.to_screen(u'[info] Writing video subtitles to: ' + srtfn)
self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
def report_writeinfojson(self, infofn):
""" Report that the metadata file has been written """
@ -305,7 +344,11 @@ class FileDownloader(object):
"""Report download progress."""
if self.params.get('noprogress', False):
return
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
if self.params.get('progress_with_newline', False):
self.to_screen(u'[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str))
else:
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
@ -346,7 +389,13 @@ class FileDownloader(object):
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
template_dict['autonumber'] = u'%05d' % self._num_downloads
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
template_dict['autonumber'] = autonumber_templ % self._num_downloads
if template_dict['playlist_index'] is not None:
template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
sanitize = lambda k,v: sanitize_filename(
u'NA' if v is None else compat_str(v),
@ -356,8 +405,11 @@ class FileDownloader(object):
filename = self.params['outtmpl'] % template_dict
return filename
except (ValueError, KeyError) as err:
self.trouble(u'ERROR: invalid system charset or erroneous output template')
except KeyError as err:
self.trouble(u'ERROR: Erroneous output template')
return None
except ValueError as err:
self.trouble(u'ERROR: Insufficient system charset ' + repr(preferredencoding()))
return None
def _match_entry(self, info_dict):
@ -366,19 +418,133 @@ class FileDownloader(object):
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
matchtitle = matchtitle.decode('utf8')
if not re.search(matchtitle, title, re.IGNORECASE):
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
rejecttitle = rejecttitle.decode('utf8')
if re.search(rejecttitle, title, re.IGNORECASE):
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
return None
def extract_info(self, url, download = True, ie_name = None):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
'''
suitable_found = False
#We copy the original list
ies = list(self._ies)
if ie_name is not None:
#We put in the first place the given info extractor
first_ie = get_info_extractor(ie_name)()
first_ie.set_downloader(self)
ies.insert(0, first_ie)
for ie in ies:
# Go to next InfoExtractor if not suitable
if not ie.suitable(url):
continue
# Warn if the _WORKING attribute is False
if not ie.working():
self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
u'and will probably not work. If you want to go on, use the -i option.')
# Suitable InfoExtractor found
suitable_found = True
# Extract information from URL and process it
try:
ie_results = ie.extract(url)
if ie_results is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
results = []
for ie_result in ie_results:
if not 'extractor' in ie_result:
#The extractor has already been set somewhere else
ie_result['extractor'] = ie.IE_NAME
results.append(self.process_ie_result(ie_result, download))
return results
except ExtractorError as de: # An error we somewhat expected
self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
break
except Exception as e:
if self.params.get('ignoreerrors', False):
self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
break
else:
raise
if not suitable_found:
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
def process_ie_result(self, ie_result, download = True):
"""
Take the result of the ie and return a list of videos.
For url elements it will search the suitable ie and get the videos
For playlist elements it will process each of the elements of the 'entries' key
It will also download the videos if 'download'.
"""
result_type = ie_result.get('_type', 'video') #If not given we suppose it's a video, support the dafault old system
if result_type == 'video':
if 'playlist' not in ie_result:
#It isn't part of a playlist
ie_result['playlist'] = None
ie_result['playlist_index'] = None
if download:
#Do the download:
self.process_info(ie_result)
return ie_result
elif result_type == 'url':
#We get the video pointed by the url
result = self.extract_info(ie_result['url'], download, ie_name = ie_result['ie_key'])[0]
return result
elif result_type == 'playlist':
#We process each entry in the playlist
playlist = ie_result.get('title', None) or ie_result.get('id', None)
self.to_screen(u'[download] Downloading playlist: %s' % playlist)
playlist_results = []
n_all_entries = len(ie_result['entries'])
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend', -1)
if playlistend == -1:
entries = ie_result['entries'][playliststart:]
else:
entries = ie_result['entries'][playliststart:playlistend]
n_entries = len(entries)
self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
for i,entry in enumerate(entries,1):
self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
entry_result = self.process_ie_result(entry, False)
entry_result['playlist'] = playlist
entry_result['playlist_index'] = i + playliststart
#We must do the download here to correctly set the 'playlist' key
if download:
self.process_info(entry_result)
playlist_results.append(entry_result)
result = ie_result.copy()
result['entries'] = playlist_results
return result
def process_info(self, info_dict):
"""Process a single dictionary returned by an InfoExtractor."""
#We increment the download the download count here to match the previous behaviour.
self.increment_downloads()
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + u'...'
# Keep for backwards compatibility
info_dict['stitle'] = info_dict['title']
@ -423,7 +589,7 @@ class FileDownloader(object):
if dn != '' and not os.path.exists(dn): # dn is already encoded
os.makedirs(dn)
except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
self.report_error(u'unable to create directory ' + compat_str(err))
return
if self.params.get('writedescription', False):
@ -433,20 +599,47 @@ class FileDownloader(object):
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.trouble(u'ERROR: Cannot write description file ' + descfn)
self.report_error(u'Cannot write description file ' + descfn)
return
if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
try:
srtfn = filename.rsplit('.', 1)[0] + u'.srt'
self.report_writesubtitles(srtfn)
with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile:
srtfile.write(info_dict['subtitles'])
except (OSError, IOError):
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
return
subtitle = info_dict['subtitles'][0]
(sub_error, sub_lang, sub) = subtitle
sub_format = self.params.get('subtitlesformat')
if sub_error:
self.report_warning("Some error while getting the subtitles")
else:
try:
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
self.report_writesubtitles(sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub)
except (OSError, IOError):
self.report_error(u'Cannot write subtitles file ' + descfn)
return
if self.params.get('onlysubtitles', False):
return
if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
subtitles = info_dict['subtitles']
sub_format = self.params.get('subtitlesformat')
for subtitle in subtitles:
(sub_error, sub_lang, sub) = subtitle
if sub_error:
self.report_warning("Some error while getting the subtitles")
else:
try:
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
self.report_writesubtitles(sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub)
except (OSError, IOError):
self.trouble(u'ERROR: Cannot write subtitles file ' + descfn)
return
if self.params.get('onlysubtitles', False):
return
if self.params.get('writeinfojson', False):
infofn = filename + u'.info.json'
@ -455,7 +648,7 @@ class FileDownloader(object):
json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError):
self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn)
self.report_error(u'Cannot write metadata to JSON file ' + infofn)
return
if not self.params.get('skip_download', False):
@ -467,17 +660,17 @@ class FileDownloader(object):
except (OSError, IOError) as err:
raise UnavailableVideoError()
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.trouble(u'ERROR: unable to download video data: %s' % str(err))
self.report_error(u'unable to download video data: %s' % str(err))
return
except (ContentTooShortError, ) as err:
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success:
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.trouble(u'ERROR: postprocessing: %s' % str(err))
self.report_error(u'postprocessing: %s' % str(err))
return
def download(self, url_list):
@ -486,49 +679,14 @@ class FileDownloader(object):
raise SameFileError(self.params['outtmpl'])
for url in url_list:
suitable_found = False
for ie in self._ies:
# Go to next InfoExtractor if not suitable
if not ie.suitable(url):
continue
# Warn if the _WORKING attribute is False
if not ie.working():
self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, '
u'and will probably not work. If you want to go on, use the -i option.')
# Suitable InfoExtractor found
suitable_found = True
# Extract information from URL and process it
try:
videos = ie.extract(url)
except ExtractorError as de: # An error we somewhat expected
self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback())
break
except Exception as e:
if self.params.get('ignoreerrors', False):
self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc()))
break
else:
raise
if len(videos or []) > 1 and self.fixed_template():
raise SameFileError(self.params['outtmpl'])
for video in videos or []:
video['extractor'] = ie.IE_NAME
try:
self.increment_downloads()
self.process_info(video)
except UnavailableVideoError:
self.trouble(u'\nERROR: unable to download video')
# Suitable InfoExtractor had been found; go to next URL
break
if not suitable_found:
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
try:
#It also downloads the videos
videos = self.extract_info(url)
except UnavailableVideoError:
self.trouble(u'\nERROR: unable to download video')
except MaxDownloadsReached:
self.to_screen(u'[info] Maximum number of downloaded files reached.')
raise
return self._download_retcode
@ -550,20 +708,20 @@ class FileDownloader(object):
self.to_stderr(u'ERROR: ' + e.msg)
if keep_video is False and not self.params.get('keepvideo', False):
try:
self.to_stderr(u'Deleting original file %s (pass -k to keep)' % filename)
self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
os.remove(encodeFilename(filename))
except (IOError, OSError):
self.to_stderr(u'WARNING: Unable to remove downloaded video file')
self.report_warning(u'Unable to remove downloaded video file')
def _download_with_rtmpdump(self, filename, url, player_url, page_url):
def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
# Check for rtmpdump first
try:
subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
except (OSError, IOError):
self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run')
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
return False
# Download using rtmpdump. rtmpdump returns exit code 2 when
@ -574,6 +732,8 @@ class FileDownloader(object):
basic_args += ['-W', player_url]
if page_url is not None:
basic_args += ['--pageUrl', page_url]
if play_path is not None:
basic_args += ['-y', play_path]
args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)]
if self.params.get('verbose', False):
try:
@ -608,7 +768,8 @@ class FileDownloader(object):
})
return True
else:
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
self.to_stderr(u"\n")
self.report_error(u'rtmpdump exited with code %d' % retval)
return False
def _do_download(self, filename, info_dict):
@ -627,7 +788,8 @@ class FileDownloader(object):
if url.startswith('rtmp'):
return self._download_with_rtmpdump(filename, url,
info_dict.get('player_url', None),
info_dict.get('page_url', None))
info_dict.get('page_url', None),
info_dict.get('play_path', None))
tmpfilename = self.temp_name(filename)
stream = None
@ -708,7 +870,7 @@ class FileDownloader(object):
self.report_retry(count, retries)
if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries)
self.report_error(u'giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
@ -744,12 +906,13 @@ class FileDownloader(object):
filename = self.undo_temp_name(tmpfilename)
self.report_destination(filename)
except (OSError, IOError) as err:
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
self.report_error(u'unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError) as err:
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
self.to_stderr(u"\n")
self.report_error(u'unable to write data: %s' % str(err))
return False
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
@ -775,7 +938,8 @@ class FileDownloader(object):
self.slow_down(start, byte_counter - resume_len)
if stream is None:
self.trouble(u'\nERROR: Did not get any data blocks')
self.to_stderr(u"\n")
self.report_error(u'Did not get any data blocks')
return False
stream.close()
self.report_finish()

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,8 @@ __authors__ = (
'Dave Vasilevsky',
'Jaime Marquínez Ferrándiz',
'Jeff Crouse',
'Osama Khalid',
'Michael Walter',
)
__license__ = 'Public Domain'
@ -125,7 +127,7 @@ def parseOpts():
general.add_option('-i', '--ignore-errors',
action='store_true', dest='ignoreerrors', help='continue on download errors', default=False)
general.add_option('-r', '--rate-limit',
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
dest='ratelimit', metavar='LIMIT', help='maximum download rate (e.g. 50k or 44.6m)')
general.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
general.add_option('--buffer-size',
@ -172,12 +174,24 @@ def parseOpts():
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-F', '--list-formats',
action='store_true', dest='listformats', help='list all available formats (currently youtube only)')
video_format.add_option('--write-srt',
video_format.add_option('--write-sub', '--write-srt',
action='store_true', dest='writesubtitles',
help='write video closed captions to a .srt file (currently youtube only)', default=False)
video_format.add_option('--srt-lang',
help='write subtitle file (currently youtube only)', default=False)
video_format.add_option('--only-sub',
action='store_true', dest='onlysubtitles',
help='downloads only the subtitles (no video)', default=False)
video_format.add_option('--all-subs',
action='store_true', dest='allsubtitles',
help='downloads all the available subtitles of the video (currently youtube only)', default=False)
video_format.add_option('--list-subs',
action='store_true', dest='listsubtitles',
help='lists all available subtitles for the video (currently youtube only)', default=False)
video_format.add_option('--sub-format',
action='store', dest='subtitlesformat', metavar='LANG',
help='subtitle format [srt/sbv] (default=srt) (currently youtube only)', default='srt')
video_format.add_option('--sub-lang', '--srt-lang',
action='store', dest='subtitleslang', metavar='LANG',
help='language of the closed captions to download (optional) use IETF language tags like \'en\'')
help='language of the subtitles to download (optional) use IETF language tags like \'en\'')
verbosity.add_option('-q', '--quiet',
action='store_true', dest='quiet', help='activates quiet mode', default=False)
@ -201,6 +215,8 @@ def parseOpts():
verbosity.add_option('--get-format',
action='store_true', dest='getformat',
help='simulate, quiet but print output format', default=False)
verbosity.add_option('--newline',
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
verbosity.add_option('--no-progress',
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
verbosity.add_option('--console-title',
@ -208,7 +224,9 @@ def parseOpts():
help='display progress in console titlebar', default=False)
verbosity.add_option('-v', '--verbose',
action='store_true', dest='verbose', help='print various debugging information', default=False)
verbosity.add_option('--dump-intermediate-pages',
action='store_true', dest='dump_intermediate_pages', default=False,
help='print downloaded pages to debug problems(very verbose)')
filesystem.add_option('-t', '--title',
action='store_true', dest='usetitle', help='use title in file name', default=False)
@ -220,7 +238,19 @@ def parseOpts():
action='store_true', dest='autonumber',
help='number downloaded files starting from 00000', default=False)
filesystem.add_option('-o', '--output',
dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')
dest='outtmpl', metavar='TEMPLATE',
help=('output filename template. Use %(title)s to get the title, '
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
'%(autonumber)s to get an automatically incremented number, '
'%(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), '
'%(extractor)s for the provider (youtube, metacafe, etc), '
'%(id)s for the video id , %(playlist)s for the playlist the video is in, '
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
'Use - to output to stdout. Can also be used to download to a different directory, '
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
filesystem.add_option('--autonumber-size',
dest='autonumber_size', metavar='NUMBER',
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --autonumber option is given')
filesystem.add_option('--restrict-filenames',
action='store_true', dest='restrictfilenames',
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
@ -272,12 +302,20 @@ def parseOpts():
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
if xdg_config_home:
userConf = os.path.join(xdg_config_home, 'youtube-dl.conf')
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
else:
userConf = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
argv = _readOptions('/etc/youtube-dl.conf') + _readOptions(userConf) + sys.argv[1:]
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
systemConf = _readOptions('/etc/youtube-dl.conf')
userConf = _readOptions(userConfFile)
commandLineConf = sys.argv[1:]
argv = systemConf + userConf + commandLineConf
opts, args = parser.parse_args(argv)
if opts.verbose:
print(u'[debug] System config: ' + repr(systemConf))
print(u'[debug] User config: ' + repr(userConf))
print(u'[debug] Command-line args: ' + repr(commandLineConf))
return parser, opts, args
def _real_main():
@ -410,6 +448,7 @@ def _real_main():
or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(id)s.%(ext)s')
# File downloader
fd = FileDownloader({
'usenetrc': opts.usenetrc,
@ -428,6 +467,7 @@ def _real_main():
'format_limit': opts.format_limit,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'ratelimit': opts.ratelimit,
@ -437,6 +477,7 @@ def _real_main():
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'logtostderr': opts.outtmpl == '-',
@ -446,12 +487,17 @@ def _real_main():
'writedescription': opts.writedescription,
'writeinfojson': opts.writeinfojson,
'writesubtitles': opts.writesubtitles,
'onlysubtitles': opts.onlysubtitles,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslang': opts.subtitleslang,
'matchtitle': opts.matchtitle,
'rejecttitle': opts.rejecttitle,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,

View File

@ -9,7 +9,8 @@ import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
path = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(path)))
import youtube_dl

View File

@ -37,7 +37,7 @@ def rsa_verify(message, signature, key):
def update_self(to_screen, verbose, filename):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
@ -77,10 +77,8 @@ def update_self(to_screen, verbose, filename):
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
version = versions_info['versions'][versions_info['latest']]
if version.get('notes'):
to_screen(u'PLEASE NOTE:')
for note in version['notes']:
to_screen(note)
print_notes(versions_info['versions'])
if not os.access(filename, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % filename)
@ -158,3 +156,13 @@ del "%s"
return
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
def print_notes(versions, fromVersion=__version__):
notes = []
for v,vdata in sorted(versions.items()):
if v > fromVersion:
notes.extend(vdata.get('notes', []))
if notes:
to_screen(u'PLEASE NOTE:')
for note in notes:
to_screen(note)

View File

@ -311,7 +311,7 @@ def clean_html(html):
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html
return html.strip()
def sanitize_open(filename, open_mode):
@ -329,7 +329,7 @@ def sanitize_open(filename, open_mode):
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
@ -420,6 +420,14 @@ def encodeFilename(s):
encoding = 'utf-8'
return s.encode(encoding, 'ignore')
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
class ExtractorError(Exception):
"""Error during info extraction."""
@ -427,6 +435,7 @@ class ExtractorError(Exception):
""" tb, if given, is the original traceback (so that it can be printed out). """
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
def format_traceback(self):
if self.traceback is None:
@ -441,7 +450,10 @@ class DownloadError(Exception):
configured to continue on errors. They will contain the appropriate
error message.
"""
pass
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(Exception):

View File

@ -1,2 +1,2 @@
__version__ = '2013.02.02'
__version__ = '2013.04.22'