Compare commits

..

118 Commits

Author SHA1 Message Date
bc2103f3bf release 2013.12.23.3 2013-12-23 04:39:55 +01:00
f82b18efc1 Merge remote-tracking branch 'rzhxeo/youtube' 2013-12-23 04:37:40 +01:00
504c668d3b release 2013.12.23.2 2013-12-23 04:31:45 +01:00
466617f539 [bliptv] Simplify (From #2000) 2013-12-23 04:31:38 +01:00
196938835a Remove debugging code
Introduced by accident in 5d681e960d
2013-12-23 04:30:57 +01:00
a94e129a65 release 2013.12.23.1 2013-12-23 04:20:25 +01:00
5d681e960d Use bidiv instead of fribidi if available (Fixes #1912) 2013-12-23 04:19:50 +01:00
c7b487d96b release 2013.12.23 2013-12-23 03:45:02 +01:00
7dbf5ae587 [smotri] Add support for moderated (?) videos (Fixes #2030) 2013-12-23 03:44:47 +01:00
8d0bdeba18 [smotri] Make optional attributes optional 2013-12-23 03:38:29 +01:00
1b969041d7 [blinkx] Support mobile URLs (Closes #2022) 2013-12-22 07:43:54 +01:00
e302f9ce32 [youtube:user] Speed up --match-title 2013-12-22 03:57:42 +01:00
5a94982abe Remove unused import 2013-12-22 03:52:12 +01:00
7115ca84aa [vimeo/generic] Add support for embedded SWF vimeo videos 2013-12-22 03:34:13 +01:00
04ff34ab89 Show all matching URLs 2013-12-22 03:25:55 +01:00
bbafbe20c2 [vimeo] Better formatting for regexp 2013-12-22 03:21:28 +01:00
c4d55a33fc [brightcove] Test checksum changed 2013-12-20 17:28:50 +01:00
147e4aece0 [vbox7] New video checksum 2013-12-20 17:27:43 +01:00
bd1488ae64 [mdr] Remove test
For context, refer to the http://de.wikipedia.org/wiki/Depublizieren
2013-12-20 17:24:48 +01:00
79fed2a4df [crunchyroll] Fix test (#1721) 2013-12-20 17:20:39 +01:00
304cbe981e Merge remote-tracking branch 'rzhxeo/crunchyroll' 2013-12-20 17:13:26 +01:00
3fefbf50e3 Merge pull request #2005 from dstftw/ivi.ru
Add support for ivi.ru
2013-12-20 08:12:38 -08:00
f65c1d2be0 release 2013.12.20 2013-12-20 17:08:16 +01:00
aa94a6d315 [aparat] Add support (Fixes #2012) 2013-12-20 17:05:39 +01:00
768df74538 [blinkxx] Add support for youtube videos 2013-12-19 21:02:25 +01:00
1f9da9049b [generic] Support YouTube swf embed (Fixes #2010) 2013-12-19 20:44:30 +01:00
c0d0b01f0e [generic] Detect ooyala videos (fixes #2013) 2013-12-19 20:32:12 +01:00
7c86a5b864 Merge pull request #2011 from dstftw/master
[imdb] Add support for mobile site URLs
2013-12-19 11:28:34 -08:00
dst
97e302a419 [imdb] Add support for mobile site URLs 2013-12-20 00:21:04 +07:00
71507a11c8 [soundcloud] Support mobile URLs (Fixes #2009) 2013-12-19 16:39:01 +01:00
dst
a51e37af62 [ivi] Simplify 2013-12-19 10:53:38 +07:00
1fb8f09273 Merge pull request #2006 from dstftw/master
[smotri] Fix duration field name
2013-12-18 15:40:40 -08:00
dst
6c6db72ed4 [ivi] Skip tests for travis build 2013-12-19 06:19:41 +07:00
dst
0cc83dc54b [smotri] Fix duration field name 2013-12-19 05:56:48 +07:00
dst
5ce54a8205 [ivi] Neat import 2013-12-19 05:53:34 +07:00
dst
8c21b7c647 [ivi] Add playlist tests 2013-12-19 05:39:22 +07:00
dst
77aa6b329d [ivi] Add support for ivi.ru 2013-12-19 05:28:16 +07:00
62d68c43ed Make prefer_free_formats sorting more robust 2013-12-18 21:25:13 +01:00
bfaae0a768 Filter and sort videos before calling list_formats 2013-12-18 21:24:39 +01:00
e56f22ae20 [YoutubeIE] Sort formats by resolution 2013-12-18 21:22:37 +01:00
dbd1988ed9 [YoutubeIE] Add width and height to format dict 2013-12-18 21:21:25 +01:00
4ea3be0a5c [YoutubeIE] Externalize format selection 2013-12-18 03:30:55 +01:00
3e78514568 [generic] Support application/ogg for direct links
Also remove some debugging code.
2013-12-17 16:26:34 +01:00
e029b8bd43 [utils] Remove duplicated line
This line was added by accident in 42393ce234
2013-12-17 16:12:20 +01:00
f5567e401c Merge pull request #1997 from rg3/simplify-url_basename
Simplify url_basename
2013-12-17 07:08:48 -08:00
9b8aaeed85 Simplify url_basename
Use urlparse from the standard library.
2013-12-17 14:56:29 +01:00
6086d121cb release 2013.12.17.2 2013-12-17 12:35:57 +01:00
7de6e075b4 [radiofrance] remove unused imports 2013-12-17 12:35:16 +01:00
946135aa2a [academicearth] remove unused imports 2013-12-17 12:34:30 +01:00
42393ce234 Add support for direct links to a video (#1973) 2013-12-17 12:33:55 +01:00
d6c7a367e8 [utils] Fix url_basename 2013-12-17 12:32:58 +01:00
cecaaf3f58 [generic] Do not use compatibility result fallback 2013-12-17 12:04:33 +01:00
f09828b4e1 release 2013.12.17.1 2013-12-17 04:13:41 +01:00
29eb517403 Add webpage_url_basename info_dict field (Fixes #1938) 2013-12-17 04:13:36 +01:00
44c471c3b8 release 2013.12.17 2013-12-17 02:51:22 +01:00
46374a56b2 [youtube] Do not warn for videos with allow_rating=0
This fixes #1982
Test video: http://www.youtube.com/watch?v=gi2uH3YxohU
2013-12-17 02:49:56 +01:00
ec98946ef9 [academicearth] Support playlists (Closes #1976) 2013-12-17 02:41:34 +01:00
fa77b742ac [radiofrance] Fill in test details 2013-12-16 23:07:57 +01:00
8b4e274610 [rtlnow] Fix URL calculation (Closes #1989) 2013-12-16 22:28:52 +01:00
d6756d3758 [playlist-test] require a string 2013-12-16 22:25:02 +01:00
11b68f6e1b release 2013.12.16.7 2013-12-16 22:18:58 +01:00
88bb52ee18 Merge branch 'master' of github.com:rg3/youtube-dl 2013-12-16 22:18:37 +01:00
d90df974c3 [academicearth] Add support for courses (#1976) 2013-12-16 22:18:27 +01:00
5c541b2cb7 [mtv] Add support for urls from the mobile site (fixes #1959) 2013-12-16 22:05:28 +01:00
87a28127d2 _search_regex's "isatty" call fails with Py2exe's
_search_regex calls the sys.stderr.isatty() function for unix systems.

Py2exe uses a custom Stderr() stream which doesn't have an `isatty()`
function, leading to it's crash.

Fixes easily with checking that it's a unix system first.
2013-12-16 21:50:26 +01:00
ebce53b3d8 [vevo] Add suppor for videoplayer. URLs (#1957) 2013-12-16 21:48:38 +01:00
83c632dc43 release 2013.12.16.6 2013-12-16 21:46:16 +01:00
ff07a05575 Merge branch 'master' of github.com:rg3/youtube-dl 2013-12-16 21:46:11 +01:00
f25571ffbf Add support for embedded vevo player (Fixes #1957) 2013-12-16 21:45:21 +01:00
f7a6892572 [arte:ddc] Remove test
video seems to expire in 7 days, as arte+7
2013-12-16 21:42:41 +01:00
8fe56478f8 release 2013.12.16.5 2013-12-16 21:34:47 +01:00
0e2a436dce [radiofrance] Add support (Fixes #1942) 2013-12-16 21:34:41 +01:00
24050dd11c release 2013.12.16.4 2013-12-16 21:10:18 +01:00
8c8e3eec79 [facebook] Recognize #! URLs (Fixes #1988) 2013-12-16 21:10:06 +01:00
7ebc9dee69 Merge pull request #1987 from rzhxeo/blip
[GenericIE] Add support for embedded blip.tv
2013-12-16 11:28:34 -08:00
ee3e63e477 [GenericIE] Add support for embedded blip.tv 2013-12-16 20:08:23 +01:00
e9c424c144 Merge pull request #1984 from alimirjamali/patch-1
Incorrect variable is used to check whether thumbnail exists
2013-12-16 09:04:36 -08:00
0a9ce268ba Incorrect variable is used to check whether thumbnail exists
Dear @phihag

I believe in line 848, the correct variable to check is 'thumb_filename' rather than 'infofn'

Kindly advise

Mit freundlichen Gruessen
Ali
2013-12-16 20:14:28 +03:30
4b2da48ea7 release 2013.12.16.3 2013-12-16 14:44:29 +01:00
e64eaaa97d Fix execution under Python 3 2013-12-16 14:44:17 +01:00
780603027f [videopremium] Skip test 2013-12-16 14:42:07 +01:00
00902cd601 release 2013.12.16.2 2013-12-16 14:13:51 +01:00
d67b0b1596 Reorder info_dict documentation 2013-12-16 14:13:40 +01:00
d7dda16888 [blinkx] Add extractor (Fixes #1972) 2013-12-16 13:56:30 +01:00
a19fd00cc4 Simplify --playlist-start / --playlist-end interface 2013-12-16 13:16:20 +01:00
d66152a898 [ndtv] Remove unused imports 2013-12-16 08:16:38 +01:00
8c5f0c9fbc [mdr] Clean up 2013-12-16 08:16:11 +01:00
6888a874a1 release 2013.12.16.1 2013-12-16 05:45:15 +01:00
09dacfa57f [mdr] Simplify 2013-12-16 05:44:34 +01:00
b2ae513586 Merge remote-tracking branch 'mc2avr/master' 2013-12-16 05:14:03 +01:00
e4a0489f6e Merge remote-tracking branch 'dstftw/channel9'
Conflicts:
	youtube_dl/extractor/__init__.py
2013-12-16 05:14:00 +01:00
b83be81d27 Credit @mjorlitzky for pornhd (#1961) 2013-12-16 05:11:19 +01:00
6f5dcd4eee [pornhd] Simplify 2013-12-16 05:10:42 +01:00
1bb2fc98e0 Merge remote-tracking branch 'mjorlitzky/master' 2013-12-16 05:07:58 +01:00
e3946f989e Set process title to youtube-dl
This allows killing all youtube-dl processes with killall youtube-dl, and shows up nicer in some programs.
2013-12-16 05:04:55 +01:00
8863d0de91 release 2013.12.16 2013-12-16 04:45:32 +01:00
7b6fefc9d4 Apply --no-overwrites for --write-* files as well (Fixes #1980) 2013-12-16 04:39:13 +01:00
525ef9227f Add --get-duration (Fixes #859) 2013-12-16 04:15:10 +01:00
c0ba0f4859 Document duration field 2013-12-16 04:09:43 +01:00
b466b7029d [youtube] Make duration an integer or None 2013-12-16 04:09:05 +01:00
fa3ae234e0 [cbs] Add extractor (Fixes #1977) 2013-12-16 03:53:43 +01:00
48462108f3 [theplatform] Fix geographic restriction check 2013-12-16 03:43:45 +01:00
f8b56e95b8 [theplatform] Detect geoblocked content 2013-12-16 03:34:46 +01:00
5fe18bdbde Add --min-views / --max-views (Fixes #1979) 2013-12-16 03:09:49 +01:00
dca02c80bc Fix detection of the extension if the 'extractaudio' is given and improve the error message (#1969)
Using 'foo.mp4' shouldn't raise an error.
If 'foo' is given suggest using 'foo.%(ext)s' for the template
2013-12-15 11:42:38 +01:00
9ee859b683 [daylimotion] Add support for urls from the mobile site (fixes #1953)
It uses the 'touch' subdomain and adds a '#' before 'video'
2013-12-14 14:20:12 +01:00
8e05c870b4 Add support for pornhd.com. 2013-12-13 22:24:32 -05:00
5d574e143f [ign] Update one of test video's title 2013-12-13 17:04:40 +01:00
2a203a6cda Merge pull request #1956 from dstftw/master
Fix typo in month name
2013-12-13 07:41:34 -08:00
dst
dadb8184e4 Fix typo in month name 2013-12-13 22:27:37 +07:00
7a563df90a [daum] Recognize mobile urls (#1952) 2013-12-12 13:05:38 +01:00
24b173fa5c [naver] Recognize mobile urls (fixes #1951) 2013-12-12 13:04:02 +01:00
dst
9b17ba0fa5 [channel9] Fix test description md5 2013-12-12 16:10:17 +07:00
dst
211f555d4c [channel9] Missing import in __init__ 2013-12-12 15:55:31 +07:00
dst
4d2ebb6bd7 [channel9] Cleanup 2013-12-12 15:19:23 +07:00
dst
df53747436 [channel9] Initial implementation (#1885) 2013-12-12 15:13:45 +07:00
df1d7da2af add MDRIE 2013-12-10 18:40:50 +01:00
c8434e8316 Add support for crunchyroll.com 2013-11-09 11:25:12 +01:00
44 changed files with 1738 additions and 451 deletions

View File

@ -39,7 +39,8 @@ which means you can modify it, redistribute it or use it however you like.
/youtube-dl . /youtube-dl .
--no-cache-dir Disable filesystem caching --no-cache-dir Disable filesystem caching
--bidi-workaround Work around terminals that lack bidirectional --bidi-workaround Work around terminals that lack bidirectional
text support. Requires fribidi executable in PATH text support. Requires bidiv or fribidi
executable in PATH
## Video Selection: ## Video Selection:
--playlist-start NUMBER playlist video to start at (default is 1) --playlist-start NUMBER playlist video to start at (default is 1)
@ -56,6 +57,10 @@ which means you can modify it, redistribute it or use it however you like.
--date DATE download only videos uploaded in this date --date DATE download only videos uploaded in this date
--datebefore DATE download only videos uploaded before this date --datebefore DATE download only videos uploaded before this date
--dateafter DATE download only videos uploaded after this date --dateafter DATE download only videos uploaded after this date
--min-views COUNT Do not download any videos with less than COUNT
views
--max-views COUNT Do not download any videos with more than COUNT
views
--no-playlist download only the currently playing video --no-playlist download only the currently playing video
--age-limit YEARS download only videos suitable for the given age --age-limit YEARS download only videos suitable for the given age
--download-archive FILE Download only videos not listed in the archive --download-archive FILE Download only videos not listed in the archive
@ -127,6 +132,7 @@ which means you can modify it, redistribute it or use it however you like.
--get-id simulate, quiet but print id --get-id simulate, quiet but print id
--get-thumbnail simulate, quiet but print thumbnail URL --get-thumbnail simulate, quiet but print thumbnail URL
--get-description simulate, quiet but print video description --get-description simulate, quiet but print video description
--get-duration simulate, quiet but print video length
--get-filename simulate, quiet but print output filename --get-filename simulate, quiet but print output filename
--get-format simulate, quiet but print output format --get-format simulate, quiet but print output format
-j, --dump-json simulate, quiet but print JSON information -j, --dump-json simulate, quiet but print JSON information

View File

@ -10,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_testcases from test.helper import get_testcases
from youtube_dl.extractor import ( from youtube_dl.extractor import (
FacebookIE,
gen_extractors, gen_extractors,
JustinTVIE, JustinTVIE,
YoutubeIE, YoutubeIE,
@ -87,12 +88,15 @@ class TestAllURLsMatching(unittest.TestCase):
assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc') assertExtractId('http://www.youtube.com/watch?v=BaW_jenozKcsharePLED17F32AD9753930', 'BaW_jenozKc')
assertExtractId('BaW_jenozKc', 'BaW_jenozKc') assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
def test_facebook_matching(self):
self.assertTrue(FacebookIE.suitable(u'https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
def test_no_duplicates(self): def test_no_duplicates(self):
ies = gen_extractors() ies = gen_extractors()
for tc in get_testcases(): for tc in get_testcases():
url = tc['url'] url = tc['url']
for ie in ies: for ie in ies:
if type(ie).__name__ in ['GenericIE', tc['name'] + 'IE']: if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url)) self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
else: else:
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url)) self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))

View File

@ -12,6 +12,7 @@ from test.helper import FakeYDL
from youtube_dl.extractor import ( from youtube_dl.extractor import (
AcademicEarthCourseIE,
DailymotionPlaylistIE, DailymotionPlaylistIE,
DailymotionUserIE, DailymotionUserIE,
VimeoChannelIE, VimeoChannelIE,
@ -26,7 +27,8 @@ from youtube_dl.extractor import (
BambuserChannelIE, BambuserChannelIE,
BandcampAlbumIE, BandcampAlbumIE,
SmotriCommunityIE, SmotriCommunityIE,
SmotriUserIE SmotriUserIE,
IviCompilationIE
) )
@ -158,5 +160,34 @@ class TestPlaylists(unittest.TestCase):
self.assertEqual(result['title'], u'Inspector') self.assertEqual(result['title'], u'Inspector')
self.assertTrue(len(result['entries']) >= 9) self.assertTrue(len(result['entries']) >= 9)
def test_AcademicEarthCourse(self):
dl = FakeYDL()
ie = AcademicEarthCourseIE(dl)
result = ie.extract(u'http://academicearth.org/courses/building-dynamic-websites/')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'building-dynamic-websites')
self.assertEqual(result['title'], u'Building Dynamic Websites')
self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
self.assertEqual(len(result['entries']), 10)
def test_ivi_compilation(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'dezhurnyi_angel')
self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012)')
self.assertTrue(len(result['entries']) >= 36)
def test_ivi_compilation_season(self):
dl = FakeYDL()
ie = IviCompilationIE(dl)
result = ie.extract('http://www.ivi.ru/watch/dezhurnyi_angel/season2')
self.assertIsPlaylist(result)
self.assertEqual(result['id'], u'dezhurnyi_angel/season2')
self.assertEqual(result['title'], u'Дежурный ангел (2010 - 2012) 2 сезон')
self.assertTrue(len(result['entries']) >= 20)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -13,20 +13,21 @@ import xml.etree.ElementTree
#from youtube_dl.utils import htmlentity_transform #from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import ( from youtube_dl.utils import (
timeconvert,
sanitize_filename,
unescapeHTML,
orderedSet,
DateRange, DateRange,
unified_strdate, encodeFilename,
find_xpath_attr, find_xpath_attr,
get_meta_content, get_meta_content,
xpath_with_ns, orderedSet,
smuggle_url, sanitize_filename,
unsmuggle_url,
shell_quote, shell_quote,
encodeFilename, smuggle_url,
str_to_int, str_to_int,
timeconvert,
unescapeHTML,
unified_strdate,
unsmuggle_url,
url_basename,
xpath_with_ns,
) )
if sys.version_info < (3, 0): if sys.version_info < (3, 0):
@ -181,6 +182,15 @@ class TestUtil(unittest.TestCase):
self.assertEqual(str_to_int('123,456'), 123456) self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456) self.assertEqual(str_to_int('123.456'), 123456)
def test_url_basename(self):
self.assertEqual(url_basename(u'http://foo.de/'), u'')
self.assertEqual(url_basename(u'http://foo.de/bar/baz'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz?x=y'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz#x=y'), u'baz')
self.assertEqual(url_basename(u'http://foo.de/bar/baz/'), u'baz')
self.assertEqual(
url_basename(u'http://media.w3.org/2010/05/sintel/trailer.mp4'),
u'trailer.mp4')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -34,6 +34,7 @@ from .utils import (
encodeFilename, encodeFilename,
ExtractorError, ExtractorError,
format_bytes, format_bytes,
formatSeconds,
get_term_width, get_term_width,
locked_file, locked_file,
make_HTTPS_handler, make_HTTPS_handler,
@ -46,6 +47,7 @@ from .utils import (
subtitles_filename, subtitles_filename,
takewhile_inclusive, takewhile_inclusive,
UnavailableVideoError, UnavailableVideoError,
url_basename,
write_json_file, write_json_file,
write_string, write_string,
YoutubeDLHandler, YoutubeDLHandler,
@ -94,6 +96,7 @@ class YoutubeDL(object):
forcethumbnail: Force printing thumbnail URL. forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description. forcedescription: Force printing description.
forcefilename: Force printing final filename. forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON. forcejson: Force printing info_dict as JSON.
simulate: Do not download the video files. simulate: Do not download the video files.
format: Video format code. format: Video format code.
@ -127,7 +130,16 @@ class YoutubeDL(object):
noplaylist: Download single video instead of a playlist if in doubt. noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years. age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped. Unsuitable videos for the given age are skipped.
download_archive: File name of a file where all downloads are recorded. min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded Videos already present in the file are not downloaded
again. again.
cookiefile: File name where cookies should be read from and dumped to. cookiefile: File name where cookies should be read from and dumped to.
@ -171,12 +183,18 @@ class YoutubeDL(object):
width_args = [] width_args = []
else: else:
width_args = ['-w', str(width)] width_args = ['-w', str(width)]
self._fribidi = subprocess.Popen( sp_kwargs = dict(
['fribidi', '-c', 'UTF-8'] + width_args,
stdin=subprocess.PIPE, stdin=subprocess.PIPE,
stdout=slave, stdout=slave,
stderr=self._err_file) stderr=self._err_file)
self._fribidi_channel = os.fdopen(master, 'rb') try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose: except OSError as ose:
if ose.errno == 2: if ose.errno == 2:
self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.') self.report_warning(u'Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
@ -231,14 +249,15 @@ class YoutubeDL(object):
pp.set_downloader(self) pp.set_downloader(self)
def _bidi_workaround(self, message): def _bidi_workaround(self, message):
if not hasattr(self, '_fribidi_channel'): if not hasattr(self, '_output_channel'):
return message return message
assert hasattr(self, '_output_process')
assert type(message) == type(u'') assert type(message) == type(u'')
line_count = message.count(u'\n') + 1 line_count = message.count(u'\n') + 1
self._fribidi.stdin.write((message + u'\n').encode('utf-8')) self._output_process.stdin.write((message + u'\n').encode('utf-8'))
self._fribidi.stdin.flush() self._output_process.stdin.flush()
res = u''.join(self._fribidi_channel.readline().decode('utf-8') res = u''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count)) for _ in range(line_count))
return res[:-len(u'\n')] return res[:-len(u'\n')]
@ -355,22 +374,6 @@ class YoutubeDL(object):
error_message = u'%s %s' % (_msg_header, message) error_message = u'%s %s' % (_msg_header, message)
self.trouble(error_message, tb) self.trouble(error_message, tb)
def report_writedescription(self, descfn):
""" Report that the description file is being written """
self.to_screen(u'[info] Writing video description to: ' + descfn)
def report_writesubtitles(self, sub_filename):
""" Report that the subtitles file is being written """
self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
def report_writeinfojson(self, infofn):
""" Report that the metadata file has been written """
self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
def report_writeannotations(self, annofn):
""" Report that the annotations file has been written. """
self.to_screen(u'[info] Writing video annotations to: ' + annofn)
def report_file_already_downloaded(self, file_name): def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded.""" """Report file has already been fully downloaded."""
try: try:
@ -415,13 +418,14 @@ class YoutubeDL(object):
def _match_entry(self, info_dict): def _match_entry(self, info_dict):
""" Returns None iff the file should be downloaded """ """ Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', u'video'))
if 'title' in info_dict: if 'title' in info_dict:
# This can happen when we're just evaluating the playlist # This can happen when we're just evaluating the playlist
title = info_dict['title'] title = info_dict['title']
matchtitle = self.params.get('matchtitle', False) matchtitle = self.params.get('matchtitle', False)
if matchtitle: if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE): if not re.search(matchtitle, title, re.IGNORECASE):
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' return u'"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False) rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle: if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE): if re.search(rejecttitle, title, re.IGNORECASE):
@ -430,14 +434,21 @@ class YoutubeDL(object):
if date is not None: if date is not None:
dateRange = self.params.get('daterange', DateRange()) dateRange = self.params.get('daterange', DateRange())
if date not in dateRange: if date not in dateRange:
return u'[download] %s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) return u'%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count', None)
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return u'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return u'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
age_limit = self.params.get('age_limit') age_limit = self.params.get('age_limit')
if age_limit is not None: if age_limit is not None:
if age_limit < info_dict.get('age_limit', 0): if age_limit < info_dict.get('age_limit', 0):
return u'Skipping "' + title + '" because it is age restricted' return u'Skipping "' + title + '" because it is age restricted'
if self.in_download_archive(info_dict): if self.in_download_archive(info_dict):
return (u'%s has already been recorded in archive' return u'%s has already been recorded in archive' % video_title
% info_dict.get('title', info_dict.get('id', u'video')))
return None return None
@staticmethod @staticmethod
@ -481,6 +492,7 @@ class YoutubeDL(object):
{ {
'extractor': ie.IE_NAME, 'extractor': ie.IE_NAME,
'webpage_url': url, 'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(), 'extractor_key': ie.ie_key(),
}) })
if process: if process:
@ -554,16 +566,16 @@ class YoutubeDL(object):
n_all_entries = len(ie_result['entries']) n_all_entries = len(ie_result['entries'])
playliststart = self.params.get('playliststart', 1) - 1 playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend', -1) playlistend = self.params.get('playlistend', None)
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1: if playlistend == -1:
entries = ie_result['entries'][playliststart:] playlistend = None
else:
entries = ie_result['entries'][playliststart:playlistend]
entries = ie_result['entries'][playliststart:playlistend]
n_entries = len(entries) n_entries = len(entries)
self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" % self.to_screen(
u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
(ie_result['extractor'], playlist, n_all_entries, n_entries)) (ie_result['extractor'], playlist, n_all_entries, n_entries))
for i, entry in enumerate(entries, 1): for i, entry in enumerate(entries, 1):
@ -573,6 +585,7 @@ class YoutubeDL(object):
'playlist_index': i + playliststart, 'playlist_index': i + playliststart,
'extractor': ie_result['extractor'], 'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'], 'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'], 'extractor_key': ie_result['extractor_key'],
} }
@ -593,6 +606,7 @@ class YoutubeDL(object):
{ {
'extractor': ie_result['extractor'], 'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'], 'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'], 'extractor_key': ie_result['extractor_key'],
}) })
return r return r
@ -629,7 +643,7 @@ class YoutubeDL(object):
info_dict['playlist_index'] = None info_dict['playlist_index'] = None
# This extractors handle format selection themselves # This extractors handle format selection themselves
if info_dict['extractor'] in [u'youtube', u'Youku']: if info_dict['extractor'] in [u'Youku']:
if download: if download:
self.process_info(info_dict) self.process_info(info_dict)
return info_dict return info_dict
@ -655,10 +669,6 @@ class YoutubeDL(object):
if 'ext' not in format: if 'ext' not in format:
format['ext'] = determine_ext(format['url']) format['ext'] = determine_ext(format['url'])
if self.params.get('listformats', None):
self.list_formats(info_dict)
return
format_limit = self.params.get('format_limit', None) format_limit = self.params.get('format_limit', None)
if format_limit: if format_limit:
formats = list(takewhile_inclusive( formats = list(takewhile_inclusive(
@ -671,9 +681,16 @@ class YoutubeDL(object):
except ValueError: except ValueError:
ext_ord = -1 ext_ord = -1
# We only compare the extension if they have the same height and width # We only compare the extension if they have the same height and width
return (f.get('height'), f.get('width'), ext_ord) return (f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_ord)
formats = sorted(formats, key=_free_formats_key) formats = sorted(formats, key=_free_formats_key)
info_dict['formats'] = formats
if self.params.get('listformats', None):
self.list_formats(info_dict)
return
req_format = self.params.get('format', 'best') req_format = self.params.get('format', 'best')
if req_format is None: if req_format is None:
req_format = 'best' req_format = 'best'
@ -748,6 +765,8 @@ class YoutubeDL(object):
self.to_stdout(info_dict['description']) self.to_stdout(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None: if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename) self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
if self.params.get('forceformat', False): if self.params.get('forceformat', False):
self.to_stdout(info_dict['format']) self.to_stdout(info_dict['format'])
if self.params.get('forcejson', False): if self.params.get('forcejson', False):
@ -770,28 +789,34 @@ class YoutubeDL(object):
return return
if self.params.get('writedescription', False): if self.params.get('writedescription', False):
try: descfn = filename + u'.description'
descfn = filename + u'.description' if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.report_writedescription(descfn) self.to_screen(u'[info] Video description is already present')
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: else:
descfile.write(info_dict['description']) try:
except (KeyError, TypeError): self.to_screen(u'[info] Writing video description to: ' + descfn)
self.report_warning(u'There\'s no description to write.') with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
except (OSError, IOError): descfile.write(info_dict['description'])
self.report_error(u'Cannot write description file ' + descfn) except (KeyError, TypeError):
return self.report_warning(u'There\'s no description to write.')
except (OSError, IOError):
self.report_error(u'Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False): if self.params.get('writeannotations', False):
try: annofn = filename + u'.annotations.xml'
annofn = filename + u'.annotations.xml' if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.report_writeannotations(annofn) self.to_screen(u'[info] Video annotations are already present')
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: else:
annofile.write(info_dict['annotations']) try:
except (KeyError, TypeError): self.to_screen(u'[info] Writing video annotations to: ' + annofn)
self.report_warning(u'There are no annotations to write.') with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
except (OSError, IOError): annofile.write(info_dict['annotations'])
self.report_error(u'Cannot write annotations file: ' + annofn) except (KeyError, TypeError):
return self.report_warning(u'There are no annotations to write.')
except (OSError, IOError):
self.report_error(u'Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False), subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')]) self.params.get('writeautomaticsub')])
@ -807,38 +832,48 @@ class YoutubeDL(object):
continue continue
try: try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format) sub_filename = subtitles_filename(filename, sub_lang, sub_format)
self.report_writesubtitles(sub_filename) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile: self.to_screen(u'[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
subfile.write(sub) else:
self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
subfile.write(sub)
except (OSError, IOError): except (OSError, IOError):
self.report_error(u'Cannot write subtitles file ' + descfn) self.report_error(u'Cannot write subtitles file ' + descfn)
return return
if self.params.get('writeinfojson', False): if self.params.get('writeinfojson', False):
infofn = os.path.splitext(filename)[0] + u'.info.json' infofn = os.path.splitext(filename)[0] + u'.info.json'
self.report_writeinfojson(infofn) if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
try: self.to_screen(u'[info] Video description metadata is already present')
json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle']) else:
write_json_file(json_info_dict, encodeFilename(infofn)) self.to_screen(u'[info] Writing video description metadata as JSON to: ' + infofn)
except (OSError, IOError): try:
self.report_error(u'Cannot write metadata to JSON file ' + infofn) json_info_dict = dict((k, v) for k, v in info_dict.items() if not k in ['urlhandle'])
return write_json_file(json_info_dict, encodeFilename(infofn))
except (OSError, IOError):
self.report_error(u'Cannot write metadata to JSON file ' + infofn)
return
if self.params.get('writethumbnail', False): if self.params.get('writethumbnail', False):
if info_dict.get('thumbnail') is not None: if info_dict.get('thumbnail') is not None:
thumb_format = determine_ext(info_dict['thumbnail'], u'jpg') thumb_format = determine_ext(info_dict['thumbnail'], u'jpg')
thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format thumb_filename = os.path.splitext(filename)[0] + u'.' + thumb_format
self.to_screen(u'[%s] %s: Downloading thumbnail ...' % if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
(info_dict['extractor'], info_dict['id'])) self.to_screen(u'[%s] %s: Thumbnail is already present' %
try: (info_dict['extractor'], info_dict['id']))
uf = compat_urllib_request.urlopen(info_dict['thumbnail']) else:
with open(thumb_filename, 'wb') as thumbf: self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
shutil.copyfileobj(uf, thumbf) (info_dict['extractor'], info_dict['id']))
self.to_screen(u'[%s] %s: Writing thumbnail to: %s' % try:
(info_dict['extractor'], info_dict['id'], thumb_filename)) uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: with open(thumb_filename, 'wb') as thumbf:
self.report_warning(u'Unable to download thumbnail "%s": %s' % shutil.copyfileobj(uf, thumbf)
(info_dict['thumbnail'], compat_str(err))) self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
(info_dict['extractor'], info_dict['id'], thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning(u'Unable to download thumbnail "%s": %s' %
(info_dict['thumbnail'], compat_str(err)))
if not self.params.get('skip_download', False): if not self.params.get('skip_download', False):
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):

View File

@ -37,6 +37,7 @@ __authors__ = (
'Anton Larionov', 'Anton Larionov',
'Takuya Tsuchida', 'Takuya Tsuchida',
'Sergey M.', 'Sergey M.',
'Michael Orlitzky',
) )
__license__ = 'Public Domain' __license__ = 'Public Domain'
@ -55,13 +56,13 @@ from .utils import (
compat_print, compat_print,
DateRange, DateRange,
decodeOption, decodeOption,
determine_ext,
get_term_width, get_term_width,
DownloadError, DownloadError,
get_cachedir, get_cachedir,
MaxDownloadsReached, MaxDownloadsReached,
preferredencoding, preferredencoding,
SameFileError, SameFileError,
setproctitle,
std_headers, std_headers,
write_string, write_string,
) )
@ -193,13 +194,17 @@ def parseOpts(overrideArguments=None):
type=float, default=None, help=optparse.SUPPRESS_HELP) type=float, default=None, help=optparse.SUPPRESS_HELP)
general.add_option( general.add_option(
'--bidi-workaround', dest='bidi_workaround', action='store_true', '--bidi-workaround', dest='bidi_workaround', action='store_true',
help=u'Work around terminals that lack bidirectional text support. Requires fribidi executable in PATH') help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
selection.add_option('--playlist-start', selection.add_option(
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is %default)', default=1) '--playlist-start',
selection.add_option('--playlist-end', dest='playliststart', metavar='NUMBER', default=1, type=int,
dest='playlistend', metavar='NUMBER', help='playlist video to end at (default is last)', default=-1) help='playlist video to start at (default is %default)')
selection.add_option(
'--playlist-end',
dest='playlistend', metavar='NUMBER', default=None, type=int,
help='playlist video to end at (default is last)')
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)') selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)') selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
selection.add_option('--max-downloads', metavar='NUMBER', selection.add_option('--max-downloads', metavar='NUMBER',
@ -210,6 +215,14 @@ def parseOpts(overrideArguments=None):
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None) selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None) selection.add_option('--datebefore', metavar='DATE', dest='datebefore', help='download only videos uploaded before this date', default=None)
selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None) selection.add_option('--dateafter', metavar='DATE', dest='dateafter', help='download only videos uploaded after this date', default=None)
selection.add_option(
'--min-views', metavar='COUNT', dest='min_views',
default=None, type=int,
help="Do not download any videos with less than COUNT views",)
selection.add_option(
'--max-views', metavar='COUNT', dest='max_views',
default=None, type=int,
help="Do not download any videos with more than COUNT views",)
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False) selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit', selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
help='download only videos suitable for the given age', help='download only videos suitable for the given age',
@ -290,6 +303,9 @@ def parseOpts(overrideArguments=None):
verbosity.add_option('--get-description', verbosity.add_option('--get-description',
action='store_true', dest='getdescription', action='store_true', dest='getdescription',
help='simulate, quiet but print video description', default=False) help='simulate, quiet but print video description', default=False)
verbosity.add_option('--get-duration',
action='store_true', dest='getduration',
help='simulate, quiet but print video length', default=False)
verbosity.add_option('--get-filename', verbosity.add_option('--get-filename',
action='store_true', dest='getfilename', action='store_true', dest='getfilename',
help='simulate, quiet but print output filename', default=False) help='simulate, quiet but print output filename', default=False)
@ -460,12 +476,15 @@ def parseOpts(overrideArguments=None):
return parser, opts, args return parser, opts, args
def _real_main(argv=None): def _real_main(argv=None):
# Compatibility fixes for Windows # Compatibility fixes for Windows
if sys.platform == 'win32': if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820 # https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
setproctitle(u'youtube-dl')
parser, opts, args = parseOpts(argv) parser, opts, args = parseOpts(argv)
# Set user agent # Set user agent
@ -505,7 +524,6 @@ def _real_main(argv=None):
for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()):
compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else ''))
matchedUrls = [url for url in all_urls if ie.suitable(url)] matchedUrls = [url for url in all_urls if ie.suitable(url)]
all_urls = [url for url in all_urls if url not in matchedUrls]
for mu in matchedUrls: for mu in matchedUrls:
compat_print(u' ' + mu) compat_print(u' ' + mu)
sys.exit(0) sys.exit(0)
@ -560,18 +578,10 @@ def _real_main(argv=None):
if numeric_buffersize is None: if numeric_buffersize is None:
parser.error(u'invalid buffer size specified') parser.error(u'invalid buffer size specified')
opts.buffersize = numeric_buffersize opts.buffersize = numeric_buffersize
try: if opts.playliststart <= 0:
opts.playliststart = int(opts.playliststart) raise ValueError(u'Playlist start must be positive')
if opts.playliststart <= 0: if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError(u'Playlist start must be positive') raise ValueError(u'Playlist end must be greater than playlist start')
except (TypeError, ValueError):
parser.error(u'invalid playlist start number specified')
try:
opts.playlistend = int(opts.playlistend)
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
raise ValueError(u'Playlist end must be greater than playlist start')
except (TypeError, ValueError):
parser.error(u'invalid playlist end number specified')
if opts.extractaudio: if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error(u'invalid audio format specified') parser.error(u'invalid audio format specified')
@ -604,27 +614,30 @@ def _real_main(argv=None):
or (opts.useid and u'%(id)s.%(ext)s') or (opts.useid and u'%(id)s.%(ext)s')
or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s')
or u'%(title)s-%(id)s.%(ext)s') or u'%(title)s-%(id)s.%(ext)s')
if '%(ext)s' not in outtmpl and opts.extractaudio: if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error(u'Cannot download a video and extract audio into the same' parser.error(u'Cannot download a video and extract audio into the same'
u' file! Use "%%(ext)s" instead of %r' % u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
determine_ext(outtmpl, u'')) u' template'.format(outtmpl))
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
ydl_opts = { ydl_opts = {
'usenetrc': opts.usenetrc, 'usenetrc': opts.usenetrc,
'username': opts.username, 'username': opts.username,
'password': opts.password, 'password': opts.password,
'videopassword': opts.videopassword, 'videopassword': opts.videopassword,
'quiet': (opts.quiet or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson), 'quiet': (opts.quiet or any_printing),
'forceurl': opts.geturl, 'forceurl': opts.geturl,
'forcetitle': opts.gettitle, 'forcetitle': opts.gettitle,
'forceid': opts.getid, 'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail, 'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription, 'forcedescription': opts.getdescription,
'forceduration': opts.getduration,
'forcefilename': opts.getfilename, 'forcefilename': opts.getfilename,
'forceformat': opts.getformat, 'forceformat': opts.getformat,
'forcejson': opts.dumpjson, 'forcejson': opts.dumpjson,
'simulate': opts.simulate, 'simulate': opts.simulate,
'skip_download': (opts.skip_download or opts.simulate or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.dumpjson), 'skip_download': (opts.skip_download or opts.simulate or any_printing),
'format': opts.format, 'format': opts.format,
'format_limit': opts.format_limit, 'format_limit': opts.format_limit,
'listformats': opts.listformats, 'listformats': opts.listformats,
@ -668,6 +681,8 @@ def _real_main(argv=None):
'keepvideo': opts.keepvideo, 'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize, 'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize, 'max_filesize': opts.max_filesize,
'min_views': opts.min_views,
'max_views': opts.max_views,
'daterange': date, 'daterange': date,
'cachedir': opts.cachedir, 'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code, 'youtube_print_sig_code': opts.youtube_print_sig_code,

View File

@ -1,4 +1,4 @@
__all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_decrypt_text'] __all__ = ['aes_encrypt', 'key_expansion', 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_decrypt_text']
import base64 import base64
from math import ceil from math import ceil
@ -32,6 +32,31 @@ def aes_ctr_decrypt(data, key, counter):
return decrypted_data return decrypted_data
def aes_cbc_decrypt(data, key, iv):
"""
Decrypt with aes in CBC mode
@param {int[]} data cipher
@param {int[]} key 16/24/32-Byte cipher key
@param {int[]} iv 16-Byte IV
@returns {int[]} decrypted data
"""
expanded_key = key_expansion(key)
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
decrypted_data=[]
previous_cipher_block = iv
for i in range(block_count):
block = data[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]
block += [0]*(BLOCK_SIZE_BYTES - len(block))
decrypted_block = aes_decrypt(block, expanded_key)
decrypted_data += xor(decrypted_block, previous_cipher_block)
previous_cipher_block = block
decrypted_data = decrypted_data[:len(data)]
return decrypted_data
def key_expansion(data): def key_expansion(data):
""" """
Generate key schedule Generate key schedule
@ -75,7 +100,7 @@ def aes_encrypt(data, expanded_key):
@returns {int[]} 16-Byte cipher @returns {int[]} 16-Byte cipher
""" """
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
for i in range(1, rounds+1): for i in range(1, rounds+1):
data = sub_bytes(data) data = sub_bytes(data)
@ -83,6 +108,26 @@ def aes_encrypt(data, expanded_key):
if i != rounds: if i != rounds:
data = mix_columns(data) data = mix_columns(data)
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES]) data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
return data
def aes_decrypt(data, expanded_key):
"""
Decrypt one block with aes
@param {int[]} data 16-Byte cipher
@param {int[]} expanded_key 176/208/240-Byte expanded key
@returns {int[]} 16-Byte state
"""
rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1
for i in range(rounds, 0, -1):
data = xor(data, expanded_key[i*BLOCK_SIZE_BYTES : (i+1)*BLOCK_SIZE_BYTES])
if i != rounds:
data = mix_columns_inv(data)
data = shift_rows_inv(data)
data = sub_bytes_inv(data)
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
return data return data
@ -139,14 +184,69 @@ SBOX = (0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16) 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16)
MIX_COLUMN_MATRIX = ((2,3,1,1), SBOX_INV = (0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
(1,2,3,1), 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
(1,1,2,3), 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
(3,1,1,2)) 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d)
MIX_COLUMN_MATRIX = ((0x2,0x3,0x1,0x1),
(0x1,0x2,0x3,0x1),
(0x1,0x1,0x2,0x3),
(0x3,0x1,0x1,0x2))
MIX_COLUMN_MATRIX_INV = ((0xE,0xB,0xD,0x9),
(0x9,0xE,0xB,0xD),
(0xD,0x9,0xE,0xB),
(0xB,0xD,0x9,0xE))
RIJNDAEL_EXP_TABLE = (0x01, 0x03, 0x05, 0x0F, 0x11, 0x33, 0x55, 0xFF, 0x1A, 0x2E, 0x72, 0x96, 0xA1, 0xF8, 0x13, 0x35,
0x5F, 0xE1, 0x38, 0x48, 0xD8, 0x73, 0x95, 0xA4, 0xF7, 0x02, 0x06, 0x0A, 0x1E, 0x22, 0x66, 0xAA,
0xE5, 0x34, 0x5C, 0xE4, 0x37, 0x59, 0xEB, 0x26, 0x6A, 0xBE, 0xD9, 0x70, 0x90, 0xAB, 0xE6, 0x31,
0x53, 0xF5, 0x04, 0x0C, 0x14, 0x3C, 0x44, 0xCC, 0x4F, 0xD1, 0x68, 0xB8, 0xD3, 0x6E, 0xB2, 0xCD,
0x4C, 0xD4, 0x67, 0xA9, 0xE0, 0x3B, 0x4D, 0xD7, 0x62, 0xA6, 0xF1, 0x08, 0x18, 0x28, 0x78, 0x88,
0x83, 0x9E, 0xB9, 0xD0, 0x6B, 0xBD, 0xDC, 0x7F, 0x81, 0x98, 0xB3, 0xCE, 0x49, 0xDB, 0x76, 0x9A,
0xB5, 0xC4, 0x57, 0xF9, 0x10, 0x30, 0x50, 0xF0, 0x0B, 0x1D, 0x27, 0x69, 0xBB, 0xD6, 0x61, 0xA3,
0xFE, 0x19, 0x2B, 0x7D, 0x87, 0x92, 0xAD, 0xEC, 0x2F, 0x71, 0x93, 0xAE, 0xE9, 0x20, 0x60, 0xA0,
0xFB, 0x16, 0x3A, 0x4E, 0xD2, 0x6D, 0xB7, 0xC2, 0x5D, 0xE7, 0x32, 0x56, 0xFA, 0x15, 0x3F, 0x41,
0xC3, 0x5E, 0xE2, 0x3D, 0x47, 0xC9, 0x40, 0xC0, 0x5B, 0xED, 0x2C, 0x74, 0x9C, 0xBF, 0xDA, 0x75,
0x9F, 0xBA, 0xD5, 0x64, 0xAC, 0xEF, 0x2A, 0x7E, 0x82, 0x9D, 0xBC, 0xDF, 0x7A, 0x8E, 0x89, 0x80,
0x9B, 0xB6, 0xC1, 0x58, 0xE8, 0x23, 0x65, 0xAF, 0xEA, 0x25, 0x6F, 0xB1, 0xC8, 0x43, 0xC5, 0x54,
0xFC, 0x1F, 0x21, 0x63, 0xA5, 0xF4, 0x07, 0x09, 0x1B, 0x2D, 0x77, 0x99, 0xB0, 0xCB, 0x46, 0xCA,
0x45, 0xCF, 0x4A, 0xDE, 0x79, 0x8B, 0x86, 0x91, 0xA8, 0xE3, 0x3E, 0x42, 0xC6, 0x51, 0xF3, 0x0E,
0x12, 0x36, 0x5A, 0xEE, 0x29, 0x7B, 0x8D, 0x8C, 0x8F, 0x8A, 0x85, 0x94, 0xA7, 0xF2, 0x0D, 0x17,
0x39, 0x4B, 0xDD, 0x7C, 0x84, 0x97, 0xA2, 0xFD, 0x1C, 0x24, 0x6C, 0xB4, 0xC7, 0x52, 0xF6, 0x01)
RIJNDAEL_LOG_TABLE = (0x00, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,
0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,
0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,
0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5c, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,
0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,
0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,
0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,
0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,
0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,
0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07)
def sub_bytes(data): def sub_bytes(data):
return [SBOX[x] for x in data] return [SBOX[x] for x in data]
def sub_bytes_inv(data):
return [SBOX_INV[x] for x in data]
def rotate(data): def rotate(data):
return data[1:] + [data[0]] return data[1:] + [data[0]]
@ -160,30 +260,31 @@ def key_schedule_core(data, rcon_iteration):
def xor(data1, data2): def xor(data1, data2):
return [x^y for x, y in zip(data1, data2)] return [x^y for x, y in zip(data1, data2)]
def mix_column(data): def rijndael_mul(a, b):
if(a==0 or b==0):
return 0
return RIJNDAEL_EXP_TABLE[(RIJNDAEL_LOG_TABLE[a] + RIJNDAEL_LOG_TABLE[b]) % 0xFF]
def mix_column(data, matrix):
data_mixed = [] data_mixed = []
for row in range(4): for row in range(4):
mixed = 0 mixed = 0
for column in range(4): for column in range(4):
addend = data[column] # xor is (+) and (-)
if MIX_COLUMN_MATRIX[row][column] in (2,3): mixed ^= rijndael_mul(data[column], matrix[row][column])
addend <<= 1
if addend > 0xff:
addend &= 0xff
addend ^= 0x1b
if MIX_COLUMN_MATRIX[row][column] == 3:
addend ^= data[column]
mixed ^= addend & 0xff
data_mixed.append(mixed) data_mixed.append(mixed)
return data_mixed return data_mixed
def mix_columns(data): def mix_columns(data, matrix=MIX_COLUMN_MATRIX):
data_mixed = [] data_mixed = []
for i in range(4): for i in range(4):
column = data[i*4 : (i+1)*4] column = data[i*4 : (i+1)*4]
data_mixed += mix_column(column) data_mixed += mix_column(column, matrix)
return data_mixed return data_mixed
def mix_columns_inv(data):
return mix_columns(data, MIX_COLUMN_MATRIX_INV)
def shift_rows(data): def shift_rows(data):
data_shifted = [] data_shifted = []
for column in range(4): for column in range(4):
@ -191,6 +292,13 @@ def shift_rows(data):
data_shifted.append( data[((column + row) & 0b11) * 4 + row] ) data_shifted.append( data[((column + row) & 0b11) * 4 + row] )
return data_shifted return data_shifted
def shift_rows_inv(data):
data_shifted = []
for column in range(4):
for row in range(4):
data_shifted.append( data[((column - row) & 0b11) * 4 + row] )
return data_shifted
def inc(data): def inc(data):
data = data[:] # copy data = data[:] # copy
for i in range(len(data)-1,-1,-1): for i in range(len(data)-1,-1,-1):

View File

@ -1,6 +1,8 @@
from .appletrailers import AppleTrailersIE from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE from .addanime import AddAnimeIE
from .anitube import AnitubeIE from .anitube import AnitubeIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE from .archiveorg import ArchiveOrgIE
from .ard import ARDIE from .ard import ARDIE
from .arte import ( from .arte import (
@ -13,6 +15,7 @@ from .arte import (
from .auengine import AUEngineIE from .auengine import AUEngineIE
from .bambuser import BambuserIE, BambuserChannelIE from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE from .bandcamp import BandcampIE, BandcampAlbumIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE from .bloomberg import BloombergIE
from .breakcom import BreakIE from .breakcom import BreakIE
@ -20,6 +23,8 @@ from .brightcove import BrightcoveIE
from .c56 import C56IE from .c56 import C56IE
from .canalplus import CanalplusIE from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .channel9 import Channel9IE
from .cinemassacre import CinemassacreIE from .cinemassacre import CinemassacreIE
from .clipfish import ClipfishIE from .clipfish import ClipfishIE
from .clipsyndicate import ClipsyndicateIE from .clipsyndicate import ClipsyndicateIE
@ -28,6 +33,7 @@ from .collegehumor import CollegeHumorIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .condenast import CondeNastIE from .condenast import CondeNastIE
from .criterion import CriterionIE from .criterion import CriterionIE
from .crunchyroll import CrunchyrollIE
from .cspan import CSpanIE from .cspan import CSpanIE
from .d8 import D8IE from .d8 import D8IE
from .dailymotion import ( from .dailymotion import (
@ -78,6 +84,10 @@ from .ina import InaIE
from .infoq import InfoQIE from .infoq import InfoQIE
from .instagram import InstagramIE from .instagram import InstagramIE
from .internetvideoarchive import InternetVideoArchiveIE from .internetvideoarchive import InternetVideoArchiveIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .jeuxvideo import JeuxVideoIE from .jeuxvideo import JeuxVideoIE
from .jukebox import JukeboxIE from .jukebox import JukeboxIE
from .justintv import JustinTVIE from .justintv import JustinTVIE
@ -87,6 +97,7 @@ from .kickstarter import KickStarterIE
from .keek import KeekIE from .keek import KeekIE
from .liveleak import LiveLeakIE from .liveleak import LiveLeakIE
from .livestream import LivestreamIE, LivestreamOriginalIE from .livestream import LivestreamIE, LivestreamOriginalIE
from .mdr import MDRIE
from .metacafe import MetacafeIE from .metacafe import MetacafeIE
from .metacritic import MetacriticIE from .metacritic import MetacriticIE
from .mit import TechTVMITIE, MITIE from .mit import TechTVMITIE, MITIE
@ -111,9 +122,11 @@ from .orf import ORFIE
from .pbs import PBSIE from .pbs import PBSIE
from .photobucket import PhotobucketIE from .photobucket import PhotobucketIE
from .podomatic import PodomaticIE from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import PornHubIE from .pornhub import PornHubIE
from .pornotube import PornotubeIE from .pornotube import PornotubeIE
from .pyvideo import PyvideoIE from .pyvideo import PyvideoIE
from .radiofrance import RadioFranceIE
from .rbmaradio import RBMARadioIE from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE from .redtube import RedTubeIE
from .ringtv import RingTVIE from .ringtv import RingTVIE

View File

@ -0,0 +1,31 @@
import re
from .common import InfoExtractor
class AcademicEarthCourseIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)'
IE_NAME = u'AcademicEarth:Course'
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
playlist_id = m.group('id')
webpage = self._download_webpage(url, playlist_id)
title = self._html_search_regex(
r'<h1 class="playlist-name">(.*?)</h1>', webpage, u'title')
description = self._html_search_regex(
r'<p class="excerpt">(.*?)</p>',
webpage, u'description', fatal=False)
urls = re.findall(
r'<h3 class="lecture-title"><a target="_blank" href="([^"]+)">',
webpage)
entries = [self.url_result(u) for u in urls]
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'description': description,
'entries': entries,
}

View File

@ -0,0 +1,56 @@
#coding: utf-8
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
HEADRequest,
)
class AparatIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
_TEST = {
u'url': u'http://www.aparat.com/v/wP8On',
u'file': u'wP8On.mp4',
u'md5': u'6714e0af7e0d875c5a39c4dc4ab46ad1',
u'info_dict': {
u"title": u"تیم گلکسی 11 - زومیت",
},
#u'skip': u'Extremely unreliable',
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
# Note: There is an easier-to-parse configuration at
# http://www.aparat.com/video/video/config/videohash/%video_id
# but the URL in there does not work
embed_url = (u'http://www.aparat.com/video/video/embed/videohash/' +
video_id + u'/vt/frame')
webpage = self._download_webpage(embed_url, video_id)
video_urls = re.findall(r'fileList\[[0-9]+\]\s*=\s*"([^"]+)"', webpage)
for i, video_url in enumerate(video_urls):
req = HEADRequest(video_url)
res = self._request_webpage(
req, video_id, note=u'Testing video URL %d' % i, errnote=False)
if res:
break
else:
raise ExtractorError(u'No working video URLs found')
title = self._search_regex(r'\s+title:\s*"([^"]+)"', webpage, u'title')
thumbnail = self._search_regex(
r'\s+image:\s*"([^"]+)"', webpage, u'thumbnail', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'ext': 'mp4',
'thumbnail': thumbnail,
}

View File

@ -266,20 +266,6 @@ class ArteTVDDCIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:ddc' IE_NAME = u'arte.tv:ddc'
_VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)' _VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
_TEST = {
u'url': u'http://ddc.arte.tv/folge/neues-aus-mauretanien',
u'file': u'049881-009_PLUS7-D.flv',
u'info_dict': {
u'title': u'Mit offenen Karten',
u'description': u'md5:57929b0eaeddeb8a0c983f58e9ebd3b6',
u'upload_date': u'20131207',
},
u'params': {
# rtmp download
u'skip_download': True,
},
}
def _real_extract(self, url): def _real_extract(self, url):
video_id, lang = self._extract_url_info(url) video_id, lang = self._extract_url_info(url)
if lang == 'folge': if lang == 'folge':

View File

@ -0,0 +1,90 @@
import datetime
import json
import re
from .common import InfoExtractor
from ..utils import (
remove_start,
)
class BlinkxIE(InfoExtractor):
_VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
_IE_NAME = u'blinkx'
_TEST = {
u'url': u'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
u'file': u'8aQUy7GV.mp4',
u'md5': u'2e9a07364af40163a908edbf10bb2492',
u'info_dict': {
u"title": u"Police Car Rolls Away",
u"uploader": u"stupidvideos.com",
u"upload_date": u"20131215",
u"description": u"A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!",
u"duration": 14.886,
u"thumbnails": [{
"width": 100,
"height": 76,
"url": "http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg",
}],
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
display_id = video_id[:8]
api_url = (u'https://apib4.blinkx.com/api.php?action=play_video&' +
u'video=%s' % video_id)
data_json = self._download_webpage(api_url, display_id)
data = json.loads(data_json)['api']['results'][0]
dt = datetime.datetime.fromtimestamp(data['pubdate_epoch'])
upload_date = dt.strftime('%Y%m%d')
duration = None
thumbnails = []
formats = []
for m in data['media']:
if m['type'] == 'jpg':
thumbnails.append({
'url': m['link'],
'width': int(m['w']),
'height': int(m['h']),
})
elif m['type'] == 'original':
duration = m['d']
elif m['type'] == 'youtube':
yt_id = m['link']
self.to_screen(u'Youtube video detected: %s' % yt_id)
return self.url_result(yt_id, 'Youtube', video_id=yt_id)
elif m['type'] in ('flv', 'mp4'):
vcodec = remove_start(m['vcodec'], 'ff')
acodec = remove_start(m['acodec'], 'ff')
format_id = (u'%s-%sk-%s' %
(vcodec,
(int(m['vbr']) + int(m['abr'])) // 1000,
m['w']))
formats.append({
'format_id': format_id,
'url': m['link'],
'vcodec': vcodec,
'acodec': acodec,
'abr': int(m['abr']) // 1000,
'vbr': int(m['vbr']) // 1000,
'width': int(m['w']),
'height': int(m['h']),
})
formats.sort(key=lambda f: (f['width'], f['vbr'], f['abr']))
return {
'id': display_id,
'fullid': video_id,
'title': data['title'],
'formats': formats,
'uploader': data['channel_name'],
'upload_date': upload_date,
'description': data.get('description'),
'thumbnails': thumbnails,
'duration': duration,
}

View File

@ -70,13 +70,14 @@ class BlipTVIE(InfoExtractor):
info = None info = None
urlh = self._request_webpage(request, None, False, urlh = self._request_webpage(request, None, False,
u'unable to download video info webpage') u'unable to download video info webpage')
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
basename = url.split('/')[-1] basename = url.split('/')[-1]
title,ext = os.path.splitext(basename) title,ext = os.path.splitext(basename)
title = title.decode('UTF-8') title = title.decode('UTF-8')
ext = ext.replace('.', '') ext = ext.replace('.', '')
self.report_direct_download(title) self.report_direct_download(title)
info = { return {
'id': title, 'id': title,
'url': url, 'url': url,
'uploader': None, 'uploader': None,
@ -85,49 +86,47 @@ class BlipTVIE(InfoExtractor):
'ext': ext, 'ext': ext,
'urlhandle': urlh 'urlhandle': urlh
} }
if info is None: # Regular URL
try:
json_code_bytes = urlh.read()
json_code = json_code_bytes.decode('utf-8')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
try: try:
json_data = json.loads(json_code) json_code_bytes = urlh.read()
if 'Post' in json_data: json_code = json_code_bytes.decode('utf-8')
data = json_data['Post'] except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
else: raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
data = json_data
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') try:
if 'additionalMedia' in data: json_data = json.loads(json_code)
formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height'])) if 'Post' in json_data:
best_format = formats[-1] data = json_data['Post']
video_url = best_format['url'] else:
else: data = json_data
video_url = data['media']['url']
umobj = re.match(self._URL_EXT, video_url)
if umobj is None:
raise ValueError('Can not determine filename extension')
ext = umobj.group(1)
info = { upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
'id': compat_str(data['item_id']), if 'additionalMedia' in data:
'url': video_url, formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
'uploader': data['display_name'], best_format = formats[-1]
'upload_date': upload_date, video_url = best_format['url']
'title': data['title'], else:
'ext': ext, video_url = data['media']['url']
'format': data['media']['mimeType'], umobj = re.match(self._URL_EXT, video_url)
'thumbnail': data['thumbnailUrl'], if umobj is None:
'description': data['description'], raise ValueError('Can not determine filename extension')
'player_url': data['embedUrl'], ext = umobj.group(1)
'user_agent': 'iTunes/10.6.1',
}
except (ValueError,KeyError) as err:
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
return [info] return {
'id': compat_str(data['item_id']),
'url': video_url,
'uploader': data['display_name'],
'upload_date': upload_date,
'title': data['title'],
'ext': ext,
'format': data['media']['mimeType'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
'player_url': data['embedUrl'],
'user_agent': 'iTunes/10.6.1',
}
except (ValueError, KeyError) as err:
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
class BlipTVUserIE(InfoExtractor): class BlipTVUserIE(InfoExtractor):

View File

@ -26,7 +26,7 @@ class BrightcoveIE(InfoExtractor):
# From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/ # From http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/
u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001', u'url': u'http://c.brightcove.com/services/viewer/htmlFederated?playerID=1654948606001&flashID=myExperience&%40videoPlayer=2371591881001',
u'file': u'2371591881001.mp4', u'file': u'2371591881001.mp4',
u'md5': u'8eccab865181d29ec2958f32a6a754f5', u'md5': u'5423e113865d26e40624dce2e4b45d95',
u'note': u'Test Brightcove downloads and detection in GenericIE', u'note': u'Test Brightcove downloads and detection in GenericIE',
u'info_dict': { u'info_dict': {
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”', u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',

View File

@ -0,0 +1,30 @@
import re
from .common import InfoExtractor
class CBSIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?cbs\.com/shows/[^/]+/video/(?P<id>[^/]+)/.*'
_TEST = {
u'url': u'http://www.cbs.com/shows/garth-brooks/video/_u7W953k6la293J7EPTd9oHkSPs6Xn6_/connect-chat-feat-garth-brooks/',
u'file': u'4JUVEwq3wUT7.flv',
u'info_dict': {
u'title': u'Connect Chat feat. Garth Brooks',
u'description': u'Connect with country music singer Garth Brooks, as he chats with fans on Wednesday November 27, 2013. Be sure to tune in to Garth Brooks: Live from Las Vegas, Friday November 29, at 9/8c on CBS!',
u'duration': 1495,
},
u'params': {
# rtmp download
u'skip_download': True,
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
real_id = self._search_regex(
r"video\.settings\.pid\s*=\s*'([^']+)';",
webpage, u'real video ID')
return self.url_result(u'theplatform:%s' % real_id)

View File

@ -0,0 +1,267 @@
# encoding: utf-8
import re
from .common import InfoExtractor
from ..utils import ExtractorError
class Channel9IE(InfoExtractor):
'''
Common extractor for channel9.msdn.com.
The type of provided URL (video or playlist) is determined according to
meta Search.PageType from web page HTML rather than URL itself, as it is
not always possible to do.
'''
IE_DESC = u'Channel 9'
IE_NAME = u'channel9'
_VALID_URL = r'^https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
_TESTS = [
{
u'url': u'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
u'file': u'Events_TechEd_Australia_2013_KOS002.mp4',
u'md5': u'bbd75296ba47916b754e73c3a4bbdf10',
u'info_dict': {
u'title': u'Developer Kick-Off Session: Stuff We Love',
u'description': u'md5:c08d72240b7c87fcecafe2692f80e35f',
u'duration': 4576,
u'thumbnail': u'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
u'session_code': u'KOS002',
u'session_day': u'Day 1',
u'session_room': u'Arena 1A',
u'session_speakers': [ u'Ed Blankenship', u'Andrew Coates', u'Brady Gaster', u'Patrick Klug', u'Mads Kristensen' ],
},
},
{
u'url': u'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
u'file': u'posts_Self-service-BI-with-Power-BI-nuclear-testing.mp4',
u'md5': u'b43ee4529d111bc37ba7ee4f34813e68',
u'info_dict': {
u'title': u'Self-service BI with Power BI - nuclear testing',
u'description': u'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
u'duration': 1540,
u'thumbnail': u'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
u'authors': [ u'Mike Wilmot' ],
},
}
]
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
# Sorted by quality
_known_formats = ['MP3', 'MP4', 'Mid Quality WMV', 'Mid Quality MP4', 'High Quality WMV', 'High Quality MP4']
def _restore_bytes(self, formatted_size):
if not formatted_size:
return 0
m = re.match(r'^(?P<size>\d+(?:\.\d+)?)\s+(?P<units>[a-zA-Z]+)', formatted_size)
if not m:
return 0
units = m.group('units')
try:
exponent = [u'B', u'KB', u'MB', u'GB', u'TB', u'PB', u'EB', u'ZB', u'YB'].index(units.upper())
except ValueError:
return 0
size = float(m.group('size'))
return int(size * (1024 ** exponent))
def _formats_from_html(self, html):
FORMAT_REGEX = r'''
(?x)
<a\s+href="(?P<url>[^"]+)">(?P<quality>[^<]+)</a>\s*
<span\s+class="usage">\((?P<note>[^\)]+)\)</span>\s*
(?:<div\s+class="popup\s+rounded">\s*
<h3>File\s+size</h3>\s*(?P<filesize>.*?)\s*
</div>)? # File size part may be missing
'''
# Extract known formats
formats = [{'url': x.group('url'),
'format_id': x.group('quality'),
'format_note': x.group('note'),
'format': '%s (%s)' % (x.group('quality'), x.group('note')),
'filesize': self._restore_bytes(x.group('filesize')), # File size is approximate
} for x in list(re.finditer(FORMAT_REGEX, html)) if x.group('quality') in self._known_formats]
# Sort according to known formats list
formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id']))
return formats
def _extract_title(self, html):
title = self._html_search_meta(u'title', html, u'title')
if title is None:
title = self._og_search_title(html)
TITLE_SUFFIX = u' (Channel 9)'
if title is not None and title.endswith(TITLE_SUFFIX):
title = title[:-len(TITLE_SUFFIX)]
return title
def _extract_description(self, html):
DESCRIPTION_REGEX = r'''(?sx)
<div\s+class="entry-content">\s*
<div\s+id="entry-body">\s*
(?P<description>.+?)\s*
</div>\s*
</div>
'''
m = re.search(DESCRIPTION_REGEX, html)
if m is not None:
return m.group('description')
return self._html_search_meta(u'description', html, u'description')
def _extract_duration(self, html):
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
def _extract_slides(self, html):
m = re.search(r'<a href="(?P<slidesurl>[^"]+)" class="slides">Slides</a>', html)
return m.group('slidesurl') if m is not None else None
def _extract_zip(self, html):
m = re.search(r'<a href="(?P<zipurl>[^"]+)" class="zip">Zip</a>', html)
return m.group('zipurl') if m is not None else None
def _extract_avg_rating(self, html):
m = re.search(r'<p class="avg-rating">Avg Rating: <span>(?P<avgrating>[^<]+)</span></p>', html)
return float(m.group('avgrating')) if m is not None else 0
def _extract_rating_count(self, html):
m = re.search(r'<div class="rating-count">\((?P<ratingcount>[^<]+)\)</div>', html)
return int(self._fix_count(m.group('ratingcount'))) if m is not None else 0
def _extract_view_count(self, html):
m = re.search(r'<li class="views">\s*<span class="count">(?P<viewcount>[^<]+)</span> Views\s*</li>', html)
return int(self._fix_count(m.group('viewcount'))) if m is not None else 0
def _extract_comment_count(self, html):
m = re.search(r'<li class="comments">\s*<a href="#comments">\s*<span class="count">(?P<commentcount>[^<]+)</span> Comments\s*</a>\s*</li>', html)
return int(self._fix_count(m.group('commentcount'))) if m is not None else 0
def _fix_count(self, count):
return int(str(count).replace(',', '')) if count is not None else None
def _extract_authors(self, html):
m = re.search(r'(?s)<li class="author">(.*?)</li>', html)
if m is None:
return None
return re.findall(r'<a href="/Niners/[^"]+">([^<]+)</a>', m.group(1))
def _extract_session_code(self, html):
m = re.search(r'<li class="code">\s*(?P<code>.+?)\s*</li>', html)
return m.group('code') if m is not None else None
def _extract_session_day(self, html):
m = re.search(r'<li class="day">\s*<a href="/Events/[^"]+">(?P<day>[^<]+)</a>\s*</li>', html)
return m.group('day') if m is not None else None
def _extract_session_room(self, html):
m = re.search(r'<li class="room">\s*(?P<room>.+?)\s*</li>', html)
return m.group('room') if m is not None else None
def _extract_session_speakers(self, html):
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
def _extract_content(self, html, content_path):
# Look for downloadable content
formats = self._formats_from_html(html)
slides = self._extract_slides(html)
zip_ = self._extract_zip(html)
# Nothing to download
if len(formats) == 0 and slides is None and zip_ is None:
self._downloader.report_warning(u'None of recording, slides or zip are available for %s' % content_path)
return
# Extract meta
title = self._extract_title(html)
description = self._extract_description(html)
thumbnail = self._og_search_thumbnail(html)
duration = self._extract_duration(html)
avg_rating = self._extract_avg_rating(html)
rating_count = self._extract_rating_count(html)
view_count = self._extract_view_count(html)
comment_count = self._extract_comment_count(html)
common = {'_type': 'video',
'id': content_path,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'avg_rating': avg_rating,
'rating_count': rating_count,
'view_count': view_count,
'comment_count': comment_count,
}
result = []
if slides is not None:
d = common.copy()
d.update({ 'title': title + '-Slides', 'url': slides })
result.append(d)
if zip_ is not None:
d = common.copy()
d.update({ 'title': title + '-Zip', 'url': zip_ })
result.append(d)
if len(formats) > 0:
d = common.copy()
d.update({ 'title': title, 'formats': formats })
result.append(d)
return result
def _extract_entry_item(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
authors = self._extract_authors(html)
for content in contents:
content['authors'] = authors
return contents
def _extract_session(self, html, content_path):
contents = self._extract_content(html, content_path)
if contents is None:
return contents
session_meta = {'session_code': self._extract_session_code(html),
'session_day': self._extract_session_day(html),
'session_room': self._extract_session_room(html),
'session_speakers': self._extract_session_speakers(html),
}
for content in contents:
content.update(session_meta)
return contents
def _extract_list(self, content_path):
rss = self._download_xml(self._RSS_URL % content_path, content_path, u'Downloading RSS')
entries = [self.url_result(session_url.text, 'Channel9')
for session_url in rss.findall('./channel/item/link')]
title_text = rss.find('./channel/title').text
return self.playlist_result(entries, content_path, title_text)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
content_path = mobj.group('contentpath')
webpage = self._download_webpage(url, content_path, u'Downloading web page')
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
if page_type_m is None:
raise ExtractorError(u'Search.PageType not found, don\'t know how to process this page', expected=True)
page_type = page_type_m.group('pagetype')
if page_type == 'List': # List page, may contain list of 'item'-like objects
return self._extract_list(content_path)
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
return self._extract_entry_item(webpage, content_path)
elif page_type == 'Session': # Event session page, may contain downloadable content
return self._extract_session(webpage, content_path)
else:
raise ExtractorError(u'Unexpected Search.PageType %s' % page_type, expected=True)

View File

@ -18,6 +18,7 @@ from ..utils import (
sanitize_filename, sanitize_filename,
unescapeHTML, unescapeHTML,
) )
_NO_DEFAULT = object()
class InfoExtractor(object): class InfoExtractor(object):
@ -34,15 +35,39 @@ class InfoExtractor(object):
The dictionaries must include the following fields: The dictionaries must include the following fields:
id: Video identifier. id: Video identifier.
url: Final video URL.
title: Video title, unescaped. title: Video title, unescaped.
ext: Video filename extension.
Instead of url and ext, formats can also specified. Additionally, it must contain either a formats entry or url and ext:
formats: A list of dictionaries for each format available, it must
be ordered from worst to best quality. Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19")
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* vbr Average video bitrate in KBit/s
* vcodec Name of the video codec in use
* filesize The number of bytes, if known in advance
* player_url SWF Player URL (used for rtmpdump).
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
The following fields are optional: The following fields are optional:
format: The video format, defaults to ext (used for --get-format)
thumbnails: A list of dictionaries (with the entries "resolution" and thumbnails: A list of dictionaries (with the entries "resolution" and
"url") for the varying thumbnails "url") for the varying thumbnails
thumbnail: Full URL to a video thumbnail image. thumbnail: Full URL to a video thumbnail image.
@ -51,35 +76,14 @@ class InfoExtractor(object):
upload_date: Video upload date (YYYYMMDD). upload_date: Video upload date (YYYYMMDD).
uploader_id: Nickname or id of the video uploader. uploader_id: Nickname or id of the video uploader.
location: Physical location of the video. location: Physical location of the video.
player_url: SWF Player URL (used for rtmpdump).
subtitles: The subtitle file contents as a dictionary in the format subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}. {language: subtitles}.
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform. view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video dislike_count: Number of negative ratings of the video
comment_count: Number of comments on the video comment_count: Number of comments on the video
urlhandle: [internal] The urlHandle to be used to download the file,
like returned by urllib.request.urlopen
age_limit: Age restriction for the video, as an integer (years) age_limit: Age restriction for the video, as an integer (years)
formats: A list of dictionaries for each format available, it must
be ordered from worst to best quality. Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19")
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* vbr Average video bitrate in KBit/s
* vcodec Name of the video codec in use
* filesize The number of bytes, if known in advance
webpage_url: The url to the video webpage, if given to youtube-dl it webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set should allow to get the same result again. (It will be set
by YoutubeDL if it's missing) by YoutubeDL if it's missing)
@ -166,6 +170,8 @@ class InfoExtractor(object):
try: try:
return self._downloader.urlopen(url_or_request) return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None: if errnote is None:
errnote = u'Unable to download webpage' errnote = u'Unable to download webpage'
errmsg = u'%s: %s' % (errnote, compat_str(err)) errmsg = u'%s: %s' % (errnote, compat_str(err))
@ -259,7 +265,8 @@ class InfoExtractor(object):
self.to_screen(u'Logging in') self.to_screen(u'Logging in')
#Methods for following #608 #Methods for following #608
def url_result(self, url, ie=None, video_id=None): @staticmethod
def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed""" """Returns a url that points to a page that should be processed"""
#TODO: ie should be the class used for getting the info #TODO: ie should be the class used for getting the info
video_info = {'_type': 'url', video_info = {'_type': 'url',
@ -268,7 +275,8 @@ class InfoExtractor(object):
if video_id is not None: if video_id is not None:
video_info['id'] = video_id video_info['id'] = video_id
return video_info return video_info
def playlist_result(self, entries, playlist_id=None, playlist_title=None): @staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None):
"""Returns a playlist""" """Returns a playlist"""
video_info = {'_type': 'playlist', video_info = {'_type': 'playlist',
'entries': entries} 'entries': entries}
@ -278,7 +286,7 @@ class InfoExtractor(object):
video_info['title'] = playlist_title video_info['title'] = playlist_title
return video_info return video_info
def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0): def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
""" """
Perform a regex search on the given string, using a single or a list of Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group. patterns returning the first matching group.
@ -292,7 +300,7 @@ class InfoExtractor(object):
mobj = re.search(p, string, flags) mobj = re.search(p, string, flags)
if mobj: break if mobj: break
if sys.stderr.isatty() and os.name != 'nt': if os.name != 'nt' and sys.stderr.isatty():
_name = u'\033[0;34m%s\033[0m' % name _name = u'\033[0;34m%s\033[0m' % name
else: else:
_name = name _name = name
@ -300,7 +308,7 @@ class InfoExtractor(object):
if mobj: if mobj:
# return the first matching group # return the first matching group
return next(g for g in mobj.groups() if g is not None) return next(g for g in mobj.groups() if g is not None)
elif default is not None: elif default is not _NO_DEFAULT:
return default return default
elif fatal: elif fatal:
raise RegexNotFoundError(u'Unable to extract %s' % _name) raise RegexNotFoundError(u'Unable to extract %s' % _name)
@ -309,7 +317,7 @@ class InfoExtractor(object):
u'please report this issue on http://yt-dl.org/bug' % _name) u'please report this issue on http://yt-dl.org/bug' % _name)
return None return None
def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0): def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
""" """
Like _search_regex, but strips HTML tags and unescapes entities. Like _search_regex, but strips HTML tags and unescapes entities.
""" """

View File

@ -0,0 +1,171 @@
# encoding: utf-8
import re, base64, zlib
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor
from ..utils import (
ExtractorError,
compat_urllib_parse,
compat_urllib_request,
bytes_to_intlist,
intlist_to_bytes,
unified_strdate,
clean_html,
)
from ..aes import (
aes_cbc_decrypt,
inc,
)
class CrunchyrollIE(InfoExtractor):
_VALID_URL = r'(?:https?://)?(?:www\.)?(?P<url>crunchyroll\.com/[^/]*/[^/?&]*?(?P<video_id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
u'url': u'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
u'file': u'645513.flv',
#u'md5': u'b1639fd6ddfaa43788c85f6d1dddd412',
u'info_dict': {
u'title': u'Wanna be the Strongest in the World Episode 1 An Idol-Wrestler is Born!',
u'description': u'md5:2d17137920c64f2f49981a7797d275ef',
u'thumbnail': u'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
u'uploader': u'Yomiuri Telecasting Corporation (YTV)',
u'upload_date': u'20131013',
},
u'params': {
# rtmp
u'skip_download': True,
},
}]
_FORMAT_IDS = {
u'360': (u'60', u'106'),
u'480': (u'61', u'106'),
u'720': (u'62', u'106'),
u'1080': (u'80', u'108'),
}
def _decrypt_subtitles(self, data, iv, id):
data = bytes_to_intlist(data)
iv = bytes_to_intlist(iv)
id = int(id)
def obfuscate_key_aux(count, modulo, start):
output = list(start)
for _ in range(count):
output.append(output[-1] + output[-2])
# cut off start values
output = output[2:]
output = list(map(lambda x: x % modulo + 33, output))
return output
def obfuscate_key(key):
num1 = int(floor(pow(2, 25) * sqrt(6.9)))
num2 = (num1 ^ key) << 5
num3 = key ^ num1
num4 = num3 ^ (num3 >> 3) ^ num2
prefix = intlist_to_bytes(obfuscate_key_aux(20, 97, (1, 2)))
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode(u'ascii')).digest())
# Extend 160 Bit hash to 256 Bit
return shaHash + [0] * 12
key = obfuscate_key(id)
class Counter:
__value = iv
def next_value(self):
temp = self.__value
self.__value = inc(self.__value)
return temp
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
return zlib.decompress(decrypted_data)
def _convert_subtitles_to_srt(self, subtitles):
i=1
output = u''
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
start = start.replace(u'.', u',')
end = end.replace(u'.', u',')
text = clean_html(text)
text = text.replace(u'\\N', u'\n')
if not text:
continue
output += u'%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
i+=1
return output
def _real_extract(self,url):
mobj = re.match(self._VALID_URL, url)
webpage_url = u'http://www.' + mobj.group('url')
video_id = mobj.group(u'video_id')
webpage = self._download_webpage(webpage_url, video_id)
note_m = self._html_search_regex(r'<div class="showmedia-trailer-notice">(.+?)</div>', webpage, u'trailer-notice', default=u'')
if note_m:
raise ExtractorError(note_m)
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, u'video_title', flags=re.DOTALL)
video_title = re.sub(r' {2,}', u' ', video_title)
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, u'video_description', default=u'')
if not video_description:
video_description = None
video_upload_date = self._html_search_regex(r'<div>Availability for free users:(.+?)</div>', webpage, u'video_upload_date', fatal=False, flags=re.DOTALL)
if video_upload_date:
video_upload_date = unified_strdate(video_upload_date)
video_uploader = self._html_search_regex(r'<div>\s*Publisher:(.+?)</div>', webpage, u'video_uploader', fatal=False, flags=re.DOTALL)
playerdata_url = compat_urllib_parse.unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, u'playerdata_url'))
playerdata_req = compat_urllib_request.Request(playerdata_url)
playerdata_req.data = compat_urllib_parse.urlencode({u'current_page': webpage_url})
playerdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded')
playerdata = self._download_webpage(playerdata_req, video_id, note=u'Downloading media info')
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, u'stream_id')
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, u'thumbnail', fatal=False)
formats = []
for fmt in re.findall(r'\?p([0-9]{3,4})=1', webpage):
stream_quality, stream_format = self._FORMAT_IDS[fmt]
video_format = fmt+u'p'
streamdata_req = compat_urllib_request.Request(u'http://www.crunchyroll.com/xml/')
# urlencode doesn't work!
streamdata_req.data = u'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+u'&media%5Fid='+stream_id+u'&video%5Fformat='+stream_format
streamdata_req.add_header(u'Content-Type', u'application/x-www-form-urlencoded')
streamdata_req.add_header(u'Content-Length', str(len(streamdata_req.data)))
streamdata = self._download_webpage(streamdata_req, video_id, note=u'Downloading media info for '+video_format)
video_url = self._search_regex(r'<host>([^<]+)', streamdata, u'video_url')
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, u'video_play_path')
formats.append({
u'url': video_url,
u'play_path': video_play_path,
u'ext': 'flv',
u'format': video_format,
u'format_id': video_format,
})
subtitles = {}
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
sub_page = self._download_webpage(u'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id='+sub_id,\
video_id, note=u'Downloading subtitles for '+sub_name)
id = self._search_regex(r'id=\'([0-9]+)', sub_page, u'subtitle_id', fatal=False)
iv = self._search_regex(r'<iv>([^<]+)', sub_page, u'subtitle_iv', fatal=False)
data = self._search_regex(r'<data>([^<]+)', sub_page, u'subtitle_data', fatal=False)
if not id or not iv or not data:
continue
id = int(id)
iv = base64.b64decode(iv)
data = base64.b64decode(data)
subtitle = self._decrypt_subtitles(data, iv, id).decode(u'utf-8')
lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, u'subtitle_lang_code', fatal=False)
if not lang_code:
continue
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
return {
u'id': video_id,
u'title': video_title,
u'description': video_description,
u'thumbnail': video_thumbnail,
u'uploader': video_uploader,
u'upload_date': video_upload_date,
u'subtitles': subtitles,
u'formats': formats,
}

View File

@ -28,7 +28,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor): class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
"""Information Extractor for Dailymotion""" """Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/(?:embed/)?video/([^/]+)' _VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = u'dailymotion' IE_NAME = u'dailymotion'
_FORMATS = [ _FORMATS = [
@ -81,7 +81,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
# Extract id and simplified title from URL # Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1).split('_')[0].split('?')[0] video_id = mobj.group('id')
url = 'http://www.dailymotion.com/video/%s' % video_id url = 'http://www.dailymotion.com/video/%s' % video_id

View File

@ -9,7 +9,7 @@ from ..utils import (
class DaumIE(InfoExtractor): class DaumIE(InfoExtractor):
_VALID_URL = r'https?://tvpot\.daum\.net/.*?clipid=(?P<id>\d+)' _VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
IE_NAME = u'daum.net' IE_NAME = u'daum.net'
_TEST = { _TEST = {

View File

@ -17,7 +17,7 @@ from ..utils import (
class FacebookIE(InfoExtractor): class FacebookIE(InfoExtractor):
"""Information Extractor for Facebook""" """Information Extractor for Facebook"""
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:[^#?]*#!/)?(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook' _NETRC_MACHINE = 'facebook'
@ -27,7 +27,7 @@ class FacebookIE(InfoExtractor):
u'file': u'120708114770723.mp4', u'file': u'120708114770723.mp4',
u'md5': u'48975a41ccc4b7a581abd68651c1a5a8', u'md5': u'48975a41ccc4b7a581abd68651c1a5a8',
u'info_dict': { u'info_dict': {
u"duration": 279, u"duration": 279,
u"title": u"PEOPLE ARE AWESOME 2013" u"title": u"PEOPLE ARE AWESOME 2013"
} }
} }

View File

@ -11,10 +11,14 @@ from ..utils import (
compat_urlparse, compat_urlparse,
ExtractorError, ExtractorError,
HEADRequest,
smuggle_url, smuggle_url,
unescapeHTML, unescapeHTML,
unified_strdate,
url_basename,
) )
from .brightcove import BrightcoveIE from .brightcove import BrightcoveIE
from .ooyala import OoyalaIE
class GenericIE(InfoExtractor): class GenericIE(InfoExtractor):
@ -71,6 +75,27 @@ class GenericIE(InfoExtractor):
u'skip_download': True, u'skip_download': True,
}, },
}, },
# Direct link to a video
{
u'url': u'http://media.w3.org/2010/05/sintel/trailer.mp4',
u'file': u'trailer.mp4',
u'md5': u'67d406c2bcb6af27fa886f31aa934bbe',
u'info_dict': {
u'id': u'trailer',
u'title': u'trailer',
u'upload_date': u'20100513',
}
},
# ooyala video
{
u'url': u'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
u'md5': u'5644c6ca5d5782c1d0d350dad9bd840c',
u'info_dict': {
u'id': u'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
u'ext': u'mp4',
u'title': u'2cc213299525360.mov', #that's what we get
},
},
] ]
def report_download_webpage(self, video_id): def report_download_webpage(self, video_id):
@ -83,23 +108,20 @@ class GenericIE(InfoExtractor):
"""Report information extraction.""" """Report information extraction."""
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
def _test_redirect(self, url): def _send_head(self, url):
"""Check if it is a redirect, like url shorteners, in case return the new url.""" """Check if it is a redirect, like url shorteners, in case return the new url."""
class HeadRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
""" """
Subclass the HTTPRedirectHandler to make it use our Subclass the HTTPRedirectHandler to make it use our
HeadRequest also on the redirected URL HEADRequest also on the redirected URL
""" """
def redirect_request(self, req, fp, code, msg, headers, newurl): def redirect_request(self, req, fp, code, msg, headers, newurl):
if code in (301, 302, 303, 307): if code in (301, 302, 303, 307):
newurl = newurl.replace(' ', '%20') newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items() newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")) if k.lower() not in ("content-length", "content-type"))
return HeadRequest(newurl, return HEADRequest(newurl,
headers=newheaders, headers=newheaders,
origin_req_host=req.get_origin_req_host(), origin_req_host=req.get_origin_req_host(),
unverifiable=True) unverifiable=True)
@ -128,32 +150,49 @@ class GenericIE(InfoExtractor):
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
opener.add_handler(handler()) opener.add_handler(handler())
response = opener.open(HeadRequest(url)) response = opener.open(HEADRequest(url))
if response is None: if response is None:
raise ExtractorError(u'Invalid URL protocol') raise ExtractorError(u'Invalid URL protocol')
new_url = response.geturl() return response
if url == new_url:
return False
self.report_following_redirect(new_url)
return new_url
def _real_extract(self, url): def _real_extract(self, url):
parsed_url = compat_urlparse.urlparse(url) parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme: if not parsed_url.scheme:
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http') self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url) return self.url_result('http://' + url)
video_id = os.path.splitext(url.split('/')[-1])[0]
try: try:
new_url = self._test_redirect(url) response = self._send_head(url)
if new_url:
return [self.url_result(new_url)] # Check for redirect
new_url = response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
return self.url_result(new_url)
# Check for direct link to a video
content_type = response.headers.get('Content-Type', '')
m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
if m:
upload_date = response.headers.get('Last-Modified')
if upload_date:
upload_date = unified_strdate(upload_date)
return {
'id': video_id,
'title': os.path.splitext(url_basename(url))[0],
'formats': [{
'format_id': m.group('format_id'),
'url': url,
'vcodec': u'none' if m.group('type') == 'audio' else None
}],
'upload_date': upload_date,
}
except compat_urllib_error.HTTPError: except compat_urllib_error.HTTPError:
# This may be a stupid server that doesn't like HEAD, our UA, or so # This may be a stupid server that doesn't like HEAD, our UA, or so
pass pass
video_id = url.split('/')[-1]
try: try:
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
except ValueError: except ValueError:
@ -183,7 +222,7 @@ class GenericIE(InfoExtractor):
self.to_screen(u'Brightcove video detected.') self.to_screen(u'Brightcove video detected.')
return self.url_result(bc_url, 'Brightcove') return self.url_result(bc_url, 'Brightcove')
# Look for embedded Vimeo player # Look for embedded (iframe) Vimeo player
mobj = re.search( mobj = re.search(
r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage) r'<iframe[^>]+?src="(https?://player.vimeo.com/video/.+?)"', webpage)
if mobj: if mobj:
@ -191,9 +230,18 @@ class GenericIE(InfoExtractor):
surl = smuggle_url(player_url, {'Referer': url}) surl = smuggle_url(player_url, {'Referer': url})
return self.url_result(surl, 'Vimeo') return self.url_result(surl, 'Vimeo')
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
r'<embed[^>]+?src="(https?://(?:www\.)?vimeo.com/moogaloop.swf.+?)"', webpage)
if mobj:
return self.url_result(mobj.group(1), 'Vimeo')
# Look for embedded YouTube player # Look for embedded YouTube player
matches = re.findall( matches = re.findall(r'''(?x)
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/embed/.+?)\1', webpage) (?:<iframe[^>]+?src=|embedSWF\(\s*)
(["\'])(?P<url>(?:https?:)?//(?:www\.)?youtube\.com/
(?:embed|v)/.+?)
\1''', webpage)
if matches: if matches:
urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube') urlrs = [self.url_result(unescapeHTML(tuppl[1]), 'Youtube')
for tuppl in matches] for tuppl in matches]
@ -222,6 +270,18 @@ class GenericIE(InfoExtractor):
'id': video_id, 'id': video_id,
} }
# Look for embedded blip.tv player
mobj = re.search(r'<meta\s[^>]*https?://api.blip.tv/\w+/redirect/\w+/(\d+)', webpage)
if mobj:
return self.url_result('http://blip.tv/seo/-'+mobj.group(1), 'BlipTV')
mobj = re.search(r'<(?:iframe|embed|object)\s[^>]*https?://(?:\w+\.)?blip.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', webpage)
if mobj:
player_url = 'http://blip.tv/play/%s.x?p=1' % mobj.group(1)
player_page = self._download_webpage(player_url, mobj.group(1))
blip_video_id = self._search_regex(r'data-episode-id="(\d+)', player_page, u'blip_video_id', fatal=False)
if blip_video_id:
return self.url_result('http://blip.tv/seo/-'+blip_video_id, 'BlipTV')
# Look for Bandcamp pages with custom domain # Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage) mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None: if mobj is not None:
@ -229,6 +289,22 @@ class GenericIE(InfoExtractor):
# Don't set the extractor because it can be a track url or an album # Don't set the extractor because it can be a track url or an album
return self.url_result(burl) return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=([^"&]+)', webpage)
if mobj is not None:
return OoyalaIE._build_url_result(mobj.group(1))
# Look for Aparat videos
mobj = re.search(r'<iframe src="(http://www.aparat.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Start with something easy: JW Player in SWFObject # Start with something easy: JW Player in SWFObject
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
if mobj is None: if mobj is None:

View File

@ -44,7 +44,7 @@ class IGNIE(InfoExtractor):
{ {
u'file': u'638672ee848ae4ff108df2a296418ee2.mp4', u'file': u'638672ee848ae4ff108df2a296418ee2.mp4',
u'info_dict': { u'info_dict': {
u'title': u'GTA 5\'s Twisted Beauty in Super Slow Motion', u'title': u'26 Twisted Moments from GTA 5 in Slow Motion',
u'description': u'The twisted beauty of GTA 5 in stunning slow motion.', u'description': u'The twisted beauty of GTA 5 in stunning slow motion.',
}, },
}, },

View File

@ -11,7 +11,7 @@ from ..utils import (
class ImdbIE(InfoExtractor): class ImdbIE(InfoExtractor):
IE_NAME = u'imdb' IE_NAME = u'imdb'
IE_DESC = u'Internet Movie Database trailers' IE_DESC = u'Internet Movie Database trailers'
_VALID_URL = r'http://www\.imdb\.com/video/imdb/vi(?P<id>\d+)' _VALID_URL = r'http://(?:www|m)\.imdb\.com/video/imdb/vi(?P<id>\d+)'
_TEST = { _TEST = {
u'url': u'http://www.imdb.com/video/imdb/vi2524815897', u'url': u'http://www.imdb.com/video/imdb/vi2524815897',
@ -27,7 +27,7 @@ class ImdbIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') video_id = mobj.group('id')
webpage = self._download_webpage(url,video_id) webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
descr = get_element_by_attribute('itemprop', 'description', webpage) descr = get_element_by_attribute('itemprop', 'description', webpage)
available_formats = re.findall( available_formats = re.findall(
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage, r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,

154
youtube_dl/extractor/ivi.py Normal file
View File

@ -0,0 +1,154 @@
# encoding: utf-8
import re
import json
from .common import InfoExtractor
from ..utils import (
compat_urllib_request,
ExtractorError,
)
class IviIE(InfoExtractor):
IE_DESC = u'ivi.ru'
IE_NAME = u'ivi'
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
_TESTS = [
# Single movie
{
u'url': u'http://www.ivi.ru/watch/53141',
u'file': u'53141.mp4',
u'md5': u'6ff5be2254e796ed346251d117196cf4',
u'info_dict': {
u'title': u'Иван Васильевич меняет профессию',
u'description': u'md5:14d8eda24e9d93d29b5857012c6d6346',
u'duration': 5498,
u'thumbnail': u'http://thumbs.ivi.ru/f20.vcp.digitalaccess.ru/contents/d/1/c3c885163a082c29bceeb7b5a267a6.jpg',
},
u'skip': u'Only works from Russia',
},
# Serial's serie
{
u'url': u'http://www.ivi.ru/watch/dezhurnyi_angel/74791',
u'file': u'74791.mp4',
u'md5': u'3e6cc9a848c1d2ebcc6476444967baa9',
u'info_dict': {
u'title': u'Дежурный ангел - 1 серия',
u'duration': 2490,
u'thumbnail': u'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
},
u'skip': u'Only works from Russia',
}
]
# Sorted by quality
_known_formats = ['MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi', 'MP4-SHQ']
# Sorted by size
_known_thumbnails = ['Thumb-120x90', 'Thumb-160', 'Thumb-640x480']
def _extract_description(self, html):
m = re.search(r'<meta name="description" content="(?P<description>[^"]+)"/>', html)
return m.group('description') if m is not None else None
def _extract_comment_count(self, html):
m = re.search(u'(?s)<a href="#" id="view-comments" class="action-button dim gradient">\s*Комментарии:\s*(?P<commentcount>\d+)\s*</a>', html)
return int(m.group('commentcount')) if m is not None else 0
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
api_url = 'http://api.digitalaccess.ru/api/json/'
data = {u'method': u'da.content.get',
u'params': [video_id, {u'site': u's183',
u'referrer': u'http://www.ivi.ru/watch/%s' % video_id,
u'contentid': video_id
}
]
}
request = compat_urllib_request.Request(api_url, json.dumps(data))
video_json_page = self._download_webpage(request, video_id, u'Downloading video JSON')
video_json = json.loads(video_json_page)
if u'error' in video_json:
error = video_json[u'error']
if error[u'origin'] == u'NoRedisValidData':
raise ExtractorError(u'Video %s does not exist' % video_id, expected=True)
raise ExtractorError(u'Unable to download video %s: %s' % (video_id, error[u'message']), expected=True)
result = video_json[u'result']
formats = [{'url': x[u'url'],
'format_id': x[u'content_format']
} for x in result[u'files'] if x[u'content_format'] in self._known_formats]
formats.sort(key=lambda fmt: self._known_formats.index(fmt['format_id']))
if len(formats) == 0:
self._downloader.report_warning(u'No media links available for %s' % video_id)
return
duration = result[u'duration']
compilation = result[u'compilation']
title = result[u'title']
title = '%s - %s' % (compilation, title) if compilation is not None else title
previews = result[u'preview']
previews.sort(key=lambda fmt: self._known_thumbnails.index(fmt['content_format']))
thumbnail = previews[-1][u'url'] if len(previews) > 0 else None
video_page = self._download_webpage(url, video_id, u'Downloading video page')
description = self._extract_description(video_page)
comment_count = self._extract_comment_count(video_page)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'comment_count': comment_count,
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = u'ivi.ru compilations'
IE_NAME = u'ivi:compilation'
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
def _extract_entries(self, html, compilation_id):
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
for serie in re.findall(r'<strong><a href="/watch/%s/(\d+)">(?:[^<]+)</a></strong>' % compilation_id, html)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
if season_id is not None: # Season link
season_page = self._download_webpage(url, compilation_id, u'Downloading season %s web page' % season_id)
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta(u'title', season_page, u'title')
entries = self._extract_entries(season_page, compilation_id)
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, u'Downloading compilation web page')
playlist_id = compilation_id
playlist_title = self._html_search_meta(u'title', compilation_page, u'title')
seasons = re.findall(r'<a href="/watch/%s/season(\d+)">[^<]+</a>' % compilation_id, compilation_page)
if len(seasons) == 0: # No seasons in this compilation
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage('http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, u'Downloading season %s web page' % season_id)
entries.extend(self._extract_entries(season_page, compilation_id))
return self.playlist_result(entries, playlist_id, playlist_title)

View File

@ -0,0 +1,63 @@
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class MDRIE(InfoExtractor):
_VALID_URL = r'^(?P<domain>(?:https?://)?(?:www\.)?mdr\.de)/mediathek/(?:.*)/(?P<type>video|audio)(?P<video_id>[^/_]+)_.*'
# No tests, MDR regularily deletes its videos
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('video_id')
domain = m.group('domain')
# determine title and media streams from webpage
html = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h2>(.*?)</h2>', html, u'title')
xmlurl = self._search_regex(
r'(/mediathek/(?:.+)/(?:video|audio)[0-9]+-avCustom.xml)', html, u'XML URL')
doc = self._download_xml(domain + xmlurl, video_id)
formats = []
for a in doc.findall('./assets/asset'):
url_el = a.find('.//progressiveDownloadUrl')
if url_el is None:
continue
abr = int(a.find('bitrateAudio').text) // 1000
media_type = a.find('mediaType').text
format = {
'abr': abr,
'filesize': int(a.find('fileSize').text),
'url': url_el.text,
}
vbr_el = a.find('bitrateVideo')
if vbr_el is None:
format.update({
'vcodec': 'none',
'format_id': u'%s-%d' % (media_type, abr),
})
else:
vbr = int(vbr_el.text) // 1000
format.update({
'vbr': vbr,
'width': int(a.find('frameWidth').text),
'height': int(a.find('frameHeight').text),
'format_id': u'%s-%d' % (media_type, vbr),
})
formats.append(format)
formats.sort(key=lambda f: (f.get('vbr'), f['abr']))
if not formats:
raise ExtractorError(u'Could not find any valid formats')
return {
'id': video_id,
'title': title,
'formats': formats,
}

View File

@ -93,7 +93,9 @@ class MTVServicesInfoExtractor(InfoExtractor):
class MTVIE(MTVServicesInfoExtractor): class MTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$' _VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/' _FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
@ -127,16 +129,17 @@ class MTVIE(MTVServicesInfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid') video_id = mobj.group('videoid')
uri = mobj.group('mgid')
webpage = self._download_webpage(url, video_id) if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";', # Some videos come from Vevo.com
webpage, re.DOTALL) m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
if m_vevo: webpage, re.DOTALL)
vevo_id = m_vevo.group(1); if m_vevo:
self.to_screen(u'Vevo video detected: %s' % vevo_id) vevo_id = m_vevo.group(1);
return self.url_result('vevo:%s' % vevo_id, ie='Vevo') self.to_screen(u'Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, u'uri')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, u'uri')
return self._get_videos_info(uri) return self._get_videos_info(uri)

View File

@ -9,7 +9,7 @@ from ..utils import (
class NaverIE(InfoExtractor): class NaverIE(InfoExtractor):
_VALID_URL = r'https?://tvcast\.naver\.com/v/(?P<id>\d+)' _VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P<id>\d+)'
_TEST = { _TEST = {
u'url': u'http://tvcast.naver.com/v/81652', u'url': u'http://tvcast.naver.com/v/81652',

View File

@ -1,6 +1,4 @@
import json
import re import re
import time
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import month_by_name from ..utils import month_by_name

View File

@ -22,6 +22,11 @@ class OoyalaIE(InfoExtractor):
def _url_for_embed_code(embed_code): def _url_for_embed_code(embed_code):
return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code return 'http://player.ooyala.com/player.js?embedCode=%s' % embed_code
@classmethod
def _build_url_result(cls, embed_code):
return cls.url_result(cls._url_for_embed_code(embed_code),
ie=cls.ie_key())
def _extract_result(self, info, more_info): def _extract_result(self, info, more_info):
return {'id': info['embedCode'], return {'id': info['embedCode'],
'ext': 'mp4', 'ext': 'mp4',

View File

@ -0,0 +1,38 @@
import re
from .common import InfoExtractor
from ..utils import compat_urllib_parse
class PornHdIE(InfoExtractor):
_VALID_URL = r'(?:http://)?(?:www\.)?pornhd\.com/videos/(?P<video_id>[0-9]+)/(?P<video_title>.+)'
_TEST = {
u'url': u'http://www.pornhd.com/videos/1962/sierra-day-gets-his-cum-all-over-herself-hd-porn-video',
u'file': u'1962.flv',
u'md5': u'35272469887dca97abd30abecc6cdf75',
u'info_dict': {
u"title": u"sierra-day-gets-his-cum-all-over-herself-hd-porn-video",
u"age_limit": 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
video_title = mobj.group('video_title')
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'&hd=(http.+?)&', webpage, u'video URL')
video_url = compat_urllib_parse.unquote(video_url)
age_limit = 18
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': video_title,
'age_limit': age_limit,
}

View File

@ -0,0 +1,55 @@
# coding: utf-8
import re
from .common import InfoExtractor
class RadioFranceIE(InfoExtractor):
_VALID_URL = r'^https?://maison\.radiofrance\.fr/radiovisions/(?P<id>[^?#]+)'
IE_NAME = u'radiofrance'
_TEST = {
u'url': u'http://maison.radiofrance.fr/radiovisions/one-one',
u'file': u'one-one.ogg',
u'md5': u'bdbb28ace95ed0e04faab32ba3160daf',
u'info_dict': {
u"title": u"One to one",
u"description": u"Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
u"uploader": u"Thomas Hercouët",
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, u'title')
description = self._html_search_regex(
r'<div class="bloc_page_wrapper"><div class="text">(.*?)</div>',
webpage, u'description', fatal=False)
uploader = self._html_search_regex(
r'<div class="credit">&nbsp;&nbsp;&copy;&nbsp;(.*?)</div>',
webpage, u'uploader', fatal=False)
formats_str = self._html_search_regex(
r'class="jp-jplayer[^"]*" data-source="([^"]+)">',
webpage, u'audio URLs')
formats = [
{
'format_id': fm[0],
'url': fm[1],
'vcodec': 'none',
}
for fm in
re.findall(r"([a-z0-9]+)\s*:\s*'([^']+)'", formats_str)
]
# No sorting, we don't know any more about these formats
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'uploader': uploader,
}

View File

@ -10,7 +10,7 @@ from ..utils import (
class RTLnowIE(InfoExtractor): class RTLnowIE(InfoExtractor):
"""Information Extractor for RTL NOW, RTL2 NOW, RTL NITRO, SUPER RTL NOW, VOX NOW and n-tv NOW""" """Information Extractor for RTL NOW, RTL2 NOW, RTL NITRO, SUPER RTL NOW, VOX NOW and n-tv NOW"""
_VALID_URL = r'(?:http://)?(?P<url>(?P<base_url>rtl-now\.rtl\.de|rtl2now\.rtl2\.de|(?:www\.)?voxnow\.de|(?:www\.)?rtlnitronow\.de|(?:www\.)?superrtlnow\.de|(?:www\.)?n-tvnow\.de)/+[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\?(?:container_id|film_id)=(?P<video_id>[0-9]+)&player=1(?:&season=[0-9]+)?(?:&.*)?)' _VALID_URL = r'(?:http://)?(?P<url>(?P<domain>rtl-now\.rtl\.de|rtl2now\.rtl2\.de|(?:www\.)?voxnow\.de|(?:www\.)?rtlnitronow\.de|(?:www\.)?superrtlnow\.de|(?:www\.)?n-tvnow\.de)/+[a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.php\?(?:container_id|film_id)=(?P<video_id>[0-9]+)&player=1(?:&season=[0-9]+)?(?:&.*)?)'
_TESTS = [{ _TESTS = [{
u'url': u'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1', u'url': u'http://rtl-now.rtl.de/ahornallee/folge-1.php?film_id=90419&player=1&season=1',
u'file': u'90419.flv', u'file': u'90419.flv',
@ -82,7 +82,7 @@ class RTLnowIE(InfoExtractor):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
webpage_url = u'http://' + mobj.group('url') webpage_url = u'http://' + mobj.group('url')
video_page_url = u'http://' + mobj.group('base_url') video_page_url = u'http://' + mobj.group('domain') + u'/'
video_id = mobj.group(u'video_id') video_id = mobj.group(u'video_id')
webpage = self._download_webpage(webpage_url, video_id) webpage = self._download_webpage(webpage_url, video_id)

View File

@ -1,5 +1,6 @@
# encoding: utf-8 # encoding: utf-8
import os.path
import re import re
import json import json
import hashlib import hashlib
@ -10,6 +11,7 @@ from ..utils import (
compat_urllib_parse, compat_urllib_parse,
compat_urllib_request, compat_urllib_request,
ExtractorError, ExtractorError,
url_basename,
) )
@ -132,7 +134,16 @@ class SmotriIE(InfoExtractor):
# We will extract some from the video web page instead # We will extract some from the video web page instead
video_page_url = 'http://' + mobj.group('url') video_page_url = 'http://' + mobj.group('url')
video_page = self._download_webpage(video_page_url, video_id, u'Downloading video page') video_page = self._download_webpage(video_page_url, video_id, u'Downloading video page')
# Warning if video is unavailable
warning = self._html_search_regex(
r'<div class="videoUnModer">(.*?)</div>', video_page,
u'warning messagef', default=None)
if warning is not None:
self._downloader.report_warning(
u'Video %s may not be available; smotri said: %s ' %
(video_id, warning))
# Adult content # Adult content
if re.search(u'EroConfirmText">', video_page) is not None: if re.search(u'EroConfirmText">', video_page) is not None:
self.report_age_confirmation() self.report_age_confirmation()
@ -148,38 +159,44 @@ class SmotriIE(InfoExtractor):
# Extract the rest of meta data # Extract the rest of meta data
video_title = self._search_meta(u'name', video_page, u'title') video_title = self._search_meta(u'name', video_page, u'title')
if not video_title: if not video_title:
video_title = video_url.rsplit('/', 1)[-1] video_title = os.path.splitext(url_basename(video_url))[0]
video_description = self._search_meta(u'description', video_page) video_description = self._search_meta(u'description', video_page)
END_TEXT = u' на сайте Smotri.com' END_TEXT = u' на сайте Smotri.com'
if video_description.endswith(END_TEXT): if video_description and video_description.endswith(END_TEXT):
video_description = video_description[:-len(END_TEXT)] video_description = video_description[:-len(END_TEXT)]
START_TEXT = u'Смотреть онлайн ролик ' START_TEXT = u'Смотреть онлайн ролик '
if video_description.startswith(START_TEXT): if video_description and video_description.startswith(START_TEXT):
video_description = video_description[len(START_TEXT):] video_description = video_description[len(START_TEXT):]
video_thumbnail = self._search_meta(u'thumbnail', video_page) video_thumbnail = self._search_meta(u'thumbnail', video_page)
upload_date_str = self._search_meta(u'uploadDate', video_page, u'upload date') upload_date_str = self._search_meta(u'uploadDate', video_page, u'upload date')
upload_date_m = re.search(r'(?P<year>\d{4})\.(?P<month>\d{2})\.(?P<day>\d{2})T', upload_date_str) if upload_date_str:
video_upload_date = ( upload_date_m = re.search(r'(?P<year>\d{4})\.(?P<month>\d{2})\.(?P<day>\d{2})T', upload_date_str)
( video_upload_date = (
upload_date_m.group('year') + (
upload_date_m.group('month') + upload_date_m.group('year') +
upload_date_m.group('day') upload_date_m.group('month') +
upload_date_m.group('day')
)
if upload_date_m else None
) )
if upload_date_m else None else:
) video_upload_date = None
duration_str = self._search_meta(u'duration', video_page) duration_str = self._search_meta(u'duration', video_page)
duration_m = re.search(r'T(?P<hours>[0-9]{2})H(?P<minutes>[0-9]{2})M(?P<seconds>[0-9]{2})S', duration_str) if duration_str:
video_duration = ( duration_m = re.search(r'T(?P<hours>[0-9]{2})H(?P<minutes>[0-9]{2})M(?P<seconds>[0-9]{2})S', duration_str)
( video_duration = (
(int(duration_m.group('hours')) * 60 * 60) + (
(int(duration_m.group('minutes')) * 60) + (int(duration_m.group('hours')) * 60 * 60) +
int(duration_m.group('seconds')) (int(duration_m.group('minutes')) * 60) +
int(duration_m.group('seconds'))
)
if duration_m else None
) )
if duration_m else None else:
) video_duration = None
video_uploader = self._html_search_regex( video_uploader = self._html_search_regex(
u'<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info[^"]+">(.*?)</a>', u'<div class="DescrUser"><div>Автор.*?onmouseover="popup_user_info[^"]+">(.*?)</a>',
@ -202,7 +219,7 @@ class SmotriIE(InfoExtractor):
'uploader': video_uploader, 'uploader': video_uploader,
'upload_date': video_upload_date, 'upload_date': video_upload_date,
'uploader_id': video_uploader_id, 'uploader_id': video_uploader_id,
'video_duration': video_duration, 'duration': video_duration,
'view_count': video_view_count, 'view_count': video_view_count,
'age_limit': 18 if adult_content else 0, 'age_limit': 18 if adult_content else 0,
'video_page_url': video_page_url 'video_page_url': video_page_url

View File

@ -24,7 +24,7 @@ class SoundcloudIE(InfoExtractor):
""" """
_VALID_URL = r'''^(?:https?://)? _VALID_URL = r'''^(?:https?://)?
(?:(?:(?:www\.)?soundcloud\.com/ (?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/ (?P<uploader>[\w\d-]+)/
(?!sets/)(?P<title>[\w\d-]+)/? (?!sets/)(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$) (?P<token>[^?]+?)?(?:[?].*)?$)

View File

@ -3,6 +3,7 @@ import json
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError,
xpath_with_ns, xpath_with_ns,
) )
@ -32,6 +33,17 @@ class ThePlatformIE(InfoExtractor):
smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?' smil_url = ('http://link.theplatform.com/s/dJ5BDC/{0}/meta.smil?'
'format=smil&mbr=true'.format(video_id)) 'format=smil&mbr=true'.format(video_id))
meta = self._download_xml(smil_url, video_id) meta = self._download_xml(smil_url, video_id)
try:
error_msg = next(
n.attrib['abstract']
for n in meta.findall(_x('.//smil:ref'))
if n.attrib.get('title') == u'Geographic Restriction')
except StopIteration:
pass
else:
raise ExtractorError(error_msg, expected=True)
info_url = 'http://link.theplatform.com/s/dJ5BDC/{0}?format=preview'.format(video_id) info_url = 'http://link.theplatform.com/s/dJ5BDC/{0}?format=preview'.format(video_id)
info_json = self._download_webpage(info_url, video_id) info_json = self._download_webpage(info_url, video_id)
info = json.loads(info_json) info = json.loads(info_json)

View File

@ -15,7 +15,7 @@ class Vbox7IE(InfoExtractor):
_TEST = { _TEST = {
u'url': u'http://vbox7.com/play:249bb972c2', u'url': u'http://vbox7.com/play:249bb972c2',
u'file': u'249bb972c2.flv', u'file': u'249bb972c2.flv',
u'md5': u'9c70d6d956f888bdc08c124acc120cfe', u'md5': u'99f65c0c9ef9b682b97313e052734c3f',
u'info_dict': { u'info_dict': {
u"title": u"\u0421\u043c\u044f\u0445! \u0427\u0443\u0434\u043e - \u0447\u0438\u0441\u0442 \u0437\u0430 \u0441\u0435\u043a\u0443\u043d\u0434\u0438 - \u0421\u043a\u0440\u0438\u0442\u0430 \u043a\u0430\u043c\u0435\u0440\u0430" u"title": u"\u0421\u043c\u044f\u0445! \u0427\u0443\u0434\u043e - \u0447\u0438\u0441\u0442 \u0437\u0430 \u0441\u0435\u043a\u0443\u043d\u0434\u0438 - \u0421\u043a\u0440\u0438\u0442\u0430 \u043a\u0430\u043c\u0435\u0440\u0430"
} }

View File

@ -15,7 +15,12 @@ class VevoIE(InfoExtractor):
Accepts urls from vevo.com or in the format 'vevo:{id}' Accepts urls from vevo.com or in the format 'vevo:{id}'
(currently used by MTVIE) (currently used by MTVIE)
""" """
_VALID_URL = r'((http://www\.vevo\.com/watch/(?:[^/]+/[^/]+/)?)|(vevo:))(?P<id>.*?)(\?|$)' _VALID_URL = r'''(?x)
(?:https?://www\.vevo\.com/watch/(?:[^/]+/[^/]+/)?|
https?://cache\.vevo\.com/m/html/embed\.html\?video=|
https?://videoplayer\.vevo\.com/embed/embedded\?videoId=|
vevo:)
(?P<id>[^&?#]+)'''
_TESTS = [{ _TESTS = [{
u'url': u'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280', u'url': u'http://www.vevo.com/watch/hurts/somebody-to-die-for/GB1101300280',
u'file': u'GB1101300280.mp4', u'file': u'GB1101300280.mp4',

View File

@ -15,6 +15,7 @@ class VideoPremiumIE(InfoExtractor):
u'params': { u'params': {
u'skip_download': True, u'skip_download': True,
}, },
u'skip': u'Test file has been deleted.',
} }
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -16,11 +16,20 @@ from ..utils import (
unsmuggle_url, unsmuggle_url,
) )
class VimeoIE(InfoExtractor): class VimeoIE(InfoExtractor):
"""Information extractor for vimeo.com.""" """Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs # _VALID_URL matches Vimeo URLs
_VALID_URL = r'(?P<proto>https?://)?(?:(?:www|(?P<player>player))\.)?vimeo(?P<pro>pro)?\.com/(?:.*?/)?(?P<direct_link>play_redirect_hls\?clip_id=)?(?:videos?/)?(?P<id>[0-9]+)/?(?:[?].*)?(?:#.*)?$' _VALID_URL = r'''(?x)
(?P<proto>https?://)?
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$'''
_NETRC_MACHINE = 'vimeo' _NETRC_MACHINE = 'vimeo'
IE_NAME = u'vimeo' IE_NAME = u'vimeo'
_TESTS = [ _TESTS = [

View File

@ -32,7 +32,7 @@ class XTubeIE(InfoExtractor):
video_title = self._html_search_regex(r'<div class="p_5px[^>]*>([^<]+)', webpage, u'title') video_title = self._html_search_regex(r'<div class="p_5px[^>]*>([^<]+)', webpage, u'title')
video_uploader = self._html_search_regex(r'so_s\.addVariable\("owner_u", "([^"]+)', webpage, u'uploader', fatal=False) video_uploader = self._html_search_regex(r'so_s\.addVariable\("owner_u", "([^"]+)', webpage, u'uploader', fatal=False)
video_description = self._html_search_regex(r'<p class="video_description">([^<]+)', webpage, u'description', default=None) video_description = self._html_search_regex(r'<p class="video_description">([^<]+)', webpage, u'description', fatal=False)
video_url= self._html_search_regex(r'var videoMp4 = "([^"]+)', webpage, u'video_url').replace('\\/', '/') video_url= self._html_search_regex(r'var videoMp4 = "([^"]+)', webpage, u'video_url').replace('\\/', '/')
path = compat_urllib_parse_urlparse(video_url).path path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:] extension = os.path.splitext(path)[1][1:]

View File

@ -162,23 +162,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
# Dash audio # Dash audio
'141', '172', '140', '171', '139', '141', '172', '140', '171', '139',
] ]
_available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '102', '84', '101', '83', '100', '82',
# Dash video
'138', '248', '137', '247', '136', '246', '245',
'244', '135', '243', '134', '242', '133', '160',
# Dash audio
'172', '141', '171', '140', '139',
]
_video_formats_map = {
'flv': ['35', '34', '6', '5'],
'3gp': ['36', '17', '13'],
'mp4': ['38', '37', '22', '18'],
'webm': ['46', '45', '44', '43'],
}
_video_extensions = { _video_extensions = {
'13': '3gp', '13': '3gp',
'17': '3gp', '17': '3gp',
@ -236,54 +219,54 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
'248': 'webm', '248': 'webm',
} }
_video_dimensions = { _video_dimensions = {
'5': '400x240', '5': {'width': 400, 'height': 240},
'6': '???', '6': {},
'13': '???', '13': {},
'17': '176x144', '17': {'width': 176, 'height': 144},
'18': '640x360', '18': {'width': 640, 'height': 360},
'22': '1280x720', '22': {'width': 1280, 'height': 720},
'34': '640x360', '34': {'width': 640, 'height': 360},
'35': '854x480', '35': {'width': 854, 'height': 480},
'36': '320x240', '36': {'width': 320, 'height': 240},
'37': '1920x1080', '37': {'width': 1920, 'height': 1080},
'38': '4096x3072', '38': {'width': 4096, 'height': 3072},
'43': '640x360', '43': {'width': 640, 'height': 360},
'44': '854x480', '44': {'width': 854, 'height': 480},
'45': '1280x720', '45': {'width': 1280, 'height': 720},
'46': '1920x1080', '46': {'width': 1920, 'height': 1080},
'82': '360p', '82': {'height': 360, 'display': '360p'},
'83': '480p', '83': {'height': 480, 'display': '480p'},
'84': '720p', '84': {'height': 720, 'display': '720p'},
'85': '1080p', '85': {'height': 1080, 'display': '1080p'},
'92': '240p', '92': {'height': 240, 'display': '240p'},
'93': '360p', '93': {'height': 360, 'display': '360p'},
'94': '480p', '94': {'height': 480, 'display': '480p'},
'95': '720p', '95': {'height': 720, 'display': '720p'},
'96': '1080p', '96': {'height': 1080, 'display': '1080p'},
'100': '360p', '100': {'height': 360, 'display': '360p'},
'101': '480p', '101': {'height': 480, 'display': '480p'},
'102': '720p', '102': {'height': 720, 'display': '720p'},
'132': '240p', '132': {'height': 240, 'display': '240p'},
'151': '72p', '151': {'height': 72, 'display': '72p'},
'133': '240p', '133': {'height': 240, 'display': '240p'},
'134': '360p', '134': {'height': 360, 'display': '360p'},
'135': '480p', '135': {'height': 480, 'display': '480p'},
'136': '720p', '136': {'height': 720, 'display': '720p'},
'137': '1080p', '137': {'height': 1080, 'display': '1080p'},
'138': '>1080p', '138': {'height': 1081, 'display': '>1080p'},
'139': '48k', '139': {'display': '48k'},
'140': '128k', '140': {'display': '128k'},
'141': '256k', '141': {'display': '256k'},
'160': '192p', '160': {'height': 192, 'display': '192p'},
'171': '128k', '171': {'display': '128k'},
'172': '256k', '172': {'display': '256k'},
'242': '240p', '242': {'height': 240, 'display': '240p'},
'243': '360p', '243': {'height': 360, 'display': '360p'},
'244': '480p', '244': {'height': 480, 'display': '480p'},
'245': '480p', '245': {'height': 480, 'display': '480p'},
'246': '480p', '246': {'height': 480, 'display': '480p'},
'247': '720p', '247': {'height': 720, 'display': '720p'},
'248': '1080p', '248': {'height': 1080, 'display': '1080p'},
} }
_special_itags = { _special_itags = {
'82': '3D', '82': '3D',
@ -1153,13 +1136,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
self._downloader.report_warning(err_msg) self._downloader.report_warning(err_msg)
return {} return {}
def _print_formats(self, formats):
print('Available formats:')
for x in formats:
print('%s\t:\t%s\t[%s]%s' %(x, self._video_extensions.get(x, 'flv'),
self._video_dimensions.get(x, '???'),
' ('+self._special_itags[x]+')' if x in self._special_itags else ''))
def _extract_id(self, url): def _extract_id(self, url):
mobj = re.match(self._VALID_URL, url, re.VERBOSE) mobj = re.match(self._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
@ -1172,48 +1148,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
Transform a dictionary in the format {itag:url} to a list of (itag, url) Transform a dictionary in the format {itag:url} to a list of (itag, url)
with the requested formats. with the requested formats.
""" """
req_format = self._downloader.params.get('format', None) existing_formats = [x for x in self._available_formats if x in url_map]
format_limit = self._downloader.params.get('format_limit', None)
available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats
if format_limit is not None and format_limit in available_formats:
format_list = available_formats[available_formats.index(format_limit):]
else:
format_list = available_formats
existing_formats = [x for x in format_list if x in url_map]
if len(existing_formats) == 0: if len(existing_formats) == 0:
raise ExtractorError(u'no known formats available for video') raise ExtractorError(u'no known formats available for video')
if self._downloader.params.get('listformats', None): video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
self._print_formats(existing_formats) video_url_list.reverse() # order worst to best
return
if req_format is None or req_format == 'best':
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
elif req_format == 'worst':
video_url_list = [(existing_formats[-1], url_map[existing_formats[-1]])] # worst quality
elif req_format in ('-1', 'all'):
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
else:
# Specific formats. We pick the first in a slash-delimeted sequence.
# Format can be specified as itag or 'mp4' or 'flv' etc. We pick the highest quality
# available in the specified format. For example,
# if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'.
# if '1/mp4/3/4' is requested and '1' and '5' (is a mp4) are available, we pick '1'.
# if '1/mp4/3/4' is requested and '4' and '5' (is a mp4) are available, we pick '5'.
req_formats = req_format.split('/')
video_url_list = None
for rf in req_formats:
if rf in url_map:
video_url_list = [(rf, url_map[rf])]
break
if rf in self._video_formats_map:
for srf in self._video_formats_map[rf]:
if srf in url_map:
video_url_list = [(srf, url_map[srf])]
break
else:
continue
break
if video_url_list is None:
raise ExtractorError(u'requested format not available')
return video_url_list return video_url_list
def _extract_from_m3u8(self, manifest_url, video_id): def _extract_from_m3u8(self, manifest_url, video_id):
@ -1361,7 +1300,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
video_description = u'' video_description = u''
def _extract_count(klass): def _extract_count(klass):
count = self._search_regex(r'class="%s">([\d,]+)</span>' % re.escape(klass), video_webpage, klass, fatal=False) count = self._search_regex(
r'class="%s">([\d,]+)</span>' % re.escape(klass),
video_webpage, klass, default=None)
if count is not None: if count is not None:
return int(count.replace(',', '')) return int(count.replace(',', ''))
return None return None
@ -1377,9 +1318,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
if 'length_seconds' not in video_info: if 'length_seconds' not in video_info:
self._downloader.report_warning(u'unable to extract video duration') self._downloader.report_warning(u'unable to extract video duration')
video_duration = '' video_duration = None
else: else:
video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) video_duration = int(compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]))
# annotations # annotations
video_annotations = None video_annotations = None
@ -1460,50 +1401,60 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
url += '&ratebypass=yes' url += '&ratebypass=yes'
url_map[url_data['itag'][0]] = url url_map[url_data['itag'][0]] = url
video_url_list = self._get_video_url_list(url_map) video_url_list = self._get_video_url_list(url_map)
if not video_url_list:
return
elif video_info.get('hlsvp'): elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0] manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id) url_map = self._extract_from_m3u8(manifest_url, video_id)
video_url_list = self._get_video_url_list(url_map) video_url_list = self._get_video_url_list(url_map)
if not video_url_list:
return
else: else:
raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info') raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
results = [] formats = []
for itag, video_real_url in video_url_list: for itag, video_real_url in video_url_list:
# Extension # Extension
video_extension = self._video_extensions.get(itag, 'flv') video_extension = self._video_extensions.get(itag, 'flv')
resolution = self._video_dimensions.get(itag, {}).get('display')
width = self._video_dimensions.get(itag, {}).get('width')
height = self._video_dimensions.get(itag, {}).get('height')
note = self._special_itags.get(itag)
video_format = '{0} - {1}{2}'.format(itag if itag else video_extension, video_format = '{0} - {1}{2}'.format(itag if itag else video_extension,
self._video_dimensions.get(itag, '???'), '%dx%d' % (width, height) if width is not None and height is not None else (resolution if resolution is not None else '???'),
' ('+self._special_itags[itag]+')' if itag in self._special_itags else '') ' ('+self._special_itags[itag]+')' if itag in self._special_itags else '')
results.append({ formats.append({
'id': video_id, 'url': video_real_url,
'url': video_real_url, 'ext': video_extension,
'uploader': video_uploader, 'format': video_format,
'uploader_id': video_uploader_id, 'format_id': itag,
'upload_date': upload_date, 'player_url': player_url,
'title': video_title, '_resolution': resolution,
'ext': video_extension, 'width': width,
'format': video_format, 'height': height,
'format_id': itag, 'format_note': note,
'thumbnail': video_thumbnail,
'description': video_description,
'player_url': player_url,
'subtitles': video_subtitles,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': 'https://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
}) })
return results def _formats_key(f):
return (f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1)
formats = sorted(formats, key=_formats_key)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'subtitles': video_subtitles,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': 'https://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'formats': formats,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor): class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = u'YouTube.com playlists' IE_DESC = u'YouTube.com playlists'
@ -1715,7 +1666,7 @@ class YoutubeUserIE(InfoExtractor):
# page by page until there are no video ids - it means we got # page by page until there are no video ids - it means we got
# all of them. # all of them.
video_ids = [] url_results = []
for pagenum in itertools.count(0): for pagenum in itertools.count(0):
start_index = pagenum * self._GDATA_PAGE_SIZE + 1 start_index = pagenum * self._GDATA_PAGE_SIZE + 1
@ -1733,10 +1684,17 @@ class YoutubeUserIE(InfoExtractor):
break break
# Extract video identifiers # Extract video identifiers
ids_in_page = [] entries = response['feed']['entry']
for entry in response['feed']['entry']: for entry in entries:
ids_in_page.append(entry['id']['$t'].split('/')[-1]) title = entry['title']['$t']
video_ids.extend(ids_in_page) video_id = entry['id']['$t'].split('/')[-1]
url_results.append({
'_type': 'url',
'url': video_id,
'ie_key': 'Youtube',
'id': 'video_id',
'title': title,
})
# A little optimization - if current page is not # A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then # "full", ie. does not contain PAGE_SIZE video ids then
@ -1744,12 +1702,9 @@ class YoutubeUserIE(InfoExtractor):
# are no more ids on further pages - no need to query # are no more ids on further pages - no need to query
# again. # again.
if len(ids_in_page) < self._GDATA_PAGE_SIZE: if len(entries) < self._GDATA_PAGE_SIZE:
break break
url_results = [
self.url_result(video_id, 'Youtube', video_id=video_id)
for video_id in video_ids]
return self.playlist_result(url_results, playlist_title=username) return self.playlist_result(url_results, playlist_title=username)

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import ctypes
import datetime import datetime
import email.utils import email.utils
import errno import errno
@ -766,6 +767,10 @@ def unified_strdate(date_str):
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except: except:
pass pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
return upload_date return upload_date
def determine_ext(url, default_ext=u'unknown_video'): def determine_ext(url, default_ext=u'unknown_video'):
@ -1051,7 +1056,7 @@ def month_by_name(name):
""" Return the number of a month by (locale-independently) English name """ """ Return the number of a month by (locale-independently) English name """
ENGLISH_NAMES = [ ENGLISH_NAMES = [
u'Januar', u'February', u'March', u'April', u'May', u'June', u'January', u'February', u'March', u'April', u'May', u'June',
u'July', u'August', u'September', u'October', u'November', u'December'] u'July', u'August', u'September', u'October', u'November', u'December']
try: try:
return ENGLISH_NAMES.index(name) + 1 return ENGLISH_NAMES.index(name) + 1
@ -1062,3 +1067,34 @@ def month_by_name(name):
def fix_xml_all_ampersand(xml_str): def fix_xml_all_ampersand(xml_str):
"""Replace all the '&' by '&amp;' in XML""" """Replace all the '&' by '&amp;' in XML"""
return xml_str.replace(u'&', u'&amp;') return xml_str.replace(u'&', u'&amp;')
def setproctitle(title):
assert isinstance(title, type(u''))
try:
libc = ctypes.cdll.LoadLibrary("libc.so.6")
except OSError:
return
title = title
buf = ctypes.create_string_buffer(len(title) + 1)
buf.value = title.encode('utf-8')
try:
libc.prctl(15, ctypes.byref(buf), 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
if s.startswith(start):
return s[len(start):]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip(u'/').split(u'/')[-1]
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"

View File

@ -1,2 +1,2 @@
__version__ = '2013.12.11.2' __version__ = '2013.12.23.3'