From 5aba6ea4fe6ad227d64a7e8b487d7cd7c3ad1f11 Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sat, 29 Jan 2011 11:55:20 +0100 Subject: [PATCH 1/9] =?UTF-8?q?Add=20YoutubeUserIE=20(code=20courtesy=20of?= =?UTF-8?q?=20Pawe=C5=82=20Paprota)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- youtube-dl | 79 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 23 deletions(-) diff --git a/youtube-dl b/youtube-dl index 8dd03daf3..c8c2ea88e 100755 --- a/youtube-dl +++ b/youtube-dl @@ -5,6 +5,7 @@ # Author: Benjamin Johnson # Author: Vasyl' Vavrychuk # Author: Witold Baryluk +# Author: Paweł Paprota # License: Public domain code import cookielib import ctypes @@ -2159,9 +2160,11 @@ class YoutubePlaylistIE(InfoExtractor): class YoutubeUserIE(InfoExtractor): """Information Extractor for YouTube users.""" - _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/user/(.*)' + _VALID_URL = r'(?:(?:(?:http://)?(?:\w+\.)?youtube.com/user/)|ytuser:)([A-Za-z0-9_-]+)' _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' - _VIDEO_INDICATOR = r'http://gdata.youtube.com/feeds/api/videos/(.*)' # XXX Fix this. + _GDATA_PAGE_SIZE = 50 + _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' + _VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _youtube_ie = None def __init__(self, youtube_ie, downloader=None): @@ -2172,9 +2175,10 @@ class YoutubeUserIE(InfoExtractor): def suitable(url): return (re.match(YoutubeUserIE._VALID_URL, url) is not None) - def report_download_page(self, username): + def report_download_page(self, username, start_index): """Report attempt to download user page.""" - self._downloader.to_screen(u'[youtube] user %s: Downloading page ' % (username)) + self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % + (username, start_index, start_index + self._GDATA_PAGE_SIZE)) def _real_initialize(self): self._youtube_ie.initialize() @@ -2186,34 +2190,63 @@ class YoutubeUserIE(InfoExtractor): self._downloader.trouble(u'ERROR: invalid url: %s' % url) return - # Download user page username = mobj.group(1) + + # Download video ids using YouTube Data API. Result size per + # query is limited (currently to 50 videos) so we need to query + # page by page until there are no video ids - it means we got + # all of them. + video_ids = [] - pagenum = 1 + pagenum = 0 - self.report_download_page(username) - request = urllib2.Request(self._TEMPLATE_URL % (username)) - try: - page = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) - return + while True: + start_index = pagenum * self._GDATA_PAGE_SIZE + 1 + self.report_download_page(username, start_index) - # Extract video identifiers - ids_in_page = [] + request = urllib2.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) - for mobj in re.finditer(self._VIDEO_INDICATOR, page): - if mobj.group(1) not in ids_in_page: - ids_in_page.append(mobj.group(1)) - video_ids.extend(ids_in_page) + try: + page = urllib2.urlopen(request).read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) + return + # Extract video identifiers + ids_in_page = [] + + for mobj in re.finditer(self._VIDEO_INDICATOR, page): + if mobj.group(1) not in ids_in_page: + ids_in_page.append(mobj.group(1)) + + video_ids.extend(ids_in_page) + + # A little optimization - if current page is not + # "full", ie. does not contain PAGE_SIZE video ids then + # we can assume that this page is the last one - there + # are no more ids on further pages - no need to query + # again. + + if len(ids_in_page) < self._GDATA_PAGE_SIZE: + break + + pagenum += 1 + + all_ids_count = len(video_ids) playliststart = self._downloader.params.get('playliststart', 1) - 1 playlistend = self._downloader.params.get('playlistend', -1) - video_ids = video_ids[playliststart:playlistend] - for id in video_ids: - self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id) - return + if playlistend == -1: + video_ids = video_ids[playliststart:] + else: + video_ids = video_ids[playliststart:playlistend] + + self._downloader.to_screen("[youtube] user %s: Collected %d video ids (downloading %d of them)" % + (username, all_ids_count, len(video_ids))) + + for video_id in video_ids: + self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % video_id) + class DepositFilesIE(InfoExtractor): """Information extractor for depositfiles.com""" From 9e0dd8692ea16d62564c6b05c6fdc3f2e0b2f02d Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sun, 30 Jan 2011 12:58:01 +0100 Subject: [PATCH 2/9] Bump version number --- LATEST_VERSION | 2 +- youtube-dl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/LATEST_VERSION b/LATEST_VERSION index a1c4173c8..4ab209346 100644 --- a/LATEST_VERSION +++ b/LATEST_VERSION @@ -1 +1 @@ -2010.12.09 +2011.01.30 diff --git a/youtube-dl b/youtube-dl index c8c2ea88e..e980f41f8 100755 --- a/youtube-dl +++ b/youtube-dl @@ -2405,7 +2405,7 @@ if __name__ == '__main__': # Parse command line parser = optparse.OptionParser( usage='Usage: %prog [options] url...', - version='2010.12.09', + version='2011.01.30', conflict_handler='resolve', ) From 5776c3295b085b9177b24152e33049a8c4b0c90b Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sun, 30 Jan 2011 12:59:18 +0100 Subject: [PATCH 3/9] Update User-Agent string --- youtube-dl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube-dl b/youtube-dl index e980f41f8..4d1e942ff 100755 --- a/youtube-dl +++ b/youtube-dl @@ -37,7 +37,7 @@ except ImportError: from cgi import parse_qs std_headers = { - 'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.12) Gecko/20101028 Firefox/3.6.12', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10' 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', From 16c73c2e513829197c4af5ee62bde88b2b2272e4 Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sun, 30 Jan 2011 13:03:15 +0100 Subject: [PATCH 4/9] Fix SyntaxError problem (oops) --- youtube-dl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/youtube-dl b/youtube-dl index 4d1e942ff..a4c8f2494 100755 --- a/youtube-dl +++ b/youtube-dl @@ -37,7 +37,7 @@ except ImportError: from cgi import parse_qs std_headers = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10' + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0b10) Gecko/20100101 Firefox/4.0b10', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', From f74e22ae280ac1680251350c4672abfeb2a047fe Mon Sep 17 00:00:00 2001 From: Gergely Imreh Date: Mon, 31 Jan 2011 18:54:47 +0800 Subject: [PATCH 5/9] Enable artist playlists in YoutubePlaylistIE Artist playlist pages have different format compared to user playlists, thus more format checking is needed to construct the correct URL. From the artist playlist this method downloads all listed below the "Videos by [Artist Name]" header, plus usually there's one more video on the side, titled "Youtube Mix for [Artist Name]", which has a link format that currently cannot be distinguished from the other videos in the list. --- youtube-dl | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/youtube-dl b/youtube-dl index a4c8f2494..dd875a38e 100755 --- a/youtube-dl +++ b/youtube-dl @@ -2096,8 +2096,8 @@ class YahooSearchIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor): """Information Extractor for YouTube playlists.""" - _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists)\?.*?p=|user/.*?/user/|p/)([^&]+).*' - _TEMPLATE_URL = 'http://www.youtube.com/view_play_list?p=%s&page=%s&gl=US&hl=en' + _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/)([^&]+).*' + _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' _youtube_ie = None @@ -2125,13 +2125,19 @@ class YoutubePlaylistIE(InfoExtractor): return # Download playlist pages - playlist_id = mobj.group(1) + # prefix is 'p' as default for playlists but there are other types that need extra care + playlist_prefix = mobj.group(1) + if playlist_prefix == 'a': + playlist_access = 'artist' + else: + playlist_access = 'view_play_list' + playlist_id = mobj.group(2) video_ids = [] pagenum = 1 while True: self.report_download_page(playlist_id, pagenum) - request = urllib2.Request(self._TEMPLATE_URL % (playlist_id, pagenum)) + request = urllib2.Request(self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum)) try: page = urllib2.urlopen(request).read() except (urllib2.URLError, httplib.HTTPException, socket.error), err: From d119b54df6a02d3985284c36586f6ff7e4cac969 Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sat, 12 Feb 2011 20:19:20 +0100 Subject: [PATCH 6/9] Support more common YouTube playlist URLs --- youtube-dl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/youtube-dl b/youtube-dl index dd875a38e..0f9724637 100755 --- a/youtube-dl +++ b/youtube-dl @@ -2096,7 +2096,7 @@ class YahooSearchIE(InfoExtractor): class YoutubePlaylistIE(InfoExtractor): """Information Extractor for YouTube playlists.""" - _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/)([^&]+).*' + _VALID_URL = r'(?:http://)?(?:\w+\.)?youtube.com/(?:(?:view_play_list|my_playlists|artist)\?.*?(p|a)=|user/.*?/user/|p/|user/.*?#[pg]/c/)([0-9A-Za-z]+)(?:/.*?/([0-9A-Za-z_-]+))?.*' _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' _VIDEO_INDICATOR = r'/watch\?v=(.+?)&' _MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*' @@ -2124,6 +2124,11 @@ class YoutubePlaylistIE(InfoExtractor): self._downloader.trouble(u'ERROR: invalid url: %s' % url) return + # Single video case + if mobj.group(3) is not None: + self._youtube_ie.extract(mobj.group(3)) + return + # Download playlist pages # prefix is 'p' as default for playlists but there are other types that need extra care playlist_prefix = mobj.group(1) From 7cc3c6fd62d82bac36c583a8d1dc6c2f6da8c178 Mon Sep 17 00:00:00 2001 From: Gergely Imreh Date: Sun, 13 Feb 2011 19:02:56 +0800 Subject: [PATCH 7/9] Fix possible missing parameter in playlist url extraction The "playlist_prefix" parameter was missing when parsing playlist urls that match the recently added format, e.g.: http://www.youtube.com/user/stanforduniversity#g/c/9D558D49CA734A02 For these URLs (basically, for every playlist type so far, except the artist list) playlist_prefix has to be equal to "p" for correct exctraction. --- youtube-dl | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube-dl b/youtube-dl index 0f9724637..26af2e5bc 100755 --- a/youtube-dl +++ b/youtube-dl @@ -2135,6 +2135,7 @@ class YoutubePlaylistIE(InfoExtractor): if playlist_prefix == 'a': playlist_access = 'artist' else: + playlist_prefix = 'p' playlist_access = 'view_play_list' playlist_id = mobj.group(2) video_ids = [] From 9f5f9602131c440919c29aa81b586fe5b83891a8 Mon Sep 17 00:00:00 2001 From: Gergely Imreh Date: Sun, 13 Feb 2011 21:26:58 +0800 Subject: [PATCH 8/9] Facebook info extractor This IE should be full-featured. Public videos can be downloaded without login, e.g: https://www.facebook.com/video/video.php?v=696729990595 Private videos need login, and subject to login rate limit of a couple of tries / minute. --- youtube-dl | 225 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) diff --git a/youtube-dl b/youtube-dl index 26af2e5bc..a1061a4e4 100755 --- a/youtube-dl +++ b/youtube-dl @@ -2339,6 +2339,229 @@ class DepositFilesIE(InfoExtractor): except UnavailableVideoError, err: self._downloader.trouble(u'ERROR: unable to download file') +class FacebookIE(InfoExtractor): + """Information Extractor for Facebook""" + + _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook.com/video/video.php\?(?:.*?)v=(?P\d+)(?:.*)' + _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' + _NETRC_MACHINE = 'facebook' + _available_formats = ['highqual', 'lowqual'] + _video_extensions = { + 'highqual': 'mp4', + 'lowqual': 'mp4', + } + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + @staticmethod + def suitable(url): + return (re.match(FacebookIE._VALID_URL, url) is not None) + + def _reporter(self, message): + """Add header and report message.""" + self._downloader.to_screen(u'[facebook] %s' % message) + + def report_login(self): + """Report attempt to log in.""" + self._reporter(u'Logging in') + + def report_video_webpage_download(self, video_id): + """Report attempt to download video webpage.""" + self._reporter(u'%s: Downloading video webpage' % video_id) + + def report_information_extraction(self, video_id): + """Report attempt to extract video information.""" + self._reporter(u'%s: Extracting video information' % video_id) + + def _parse_page(self, video_webpage): + """Extract video information from page""" + # General data + data = {'title': r'class="video_title datawrap">(.*?)(.*?)', + 'owner': r'\("video_owner_name", "(.*?)"\)', + 'upload_date': r'data-date="(.*?)"', + 'thumbnail': r'\("thumb_url", "(?P.*?)"\)', + } + video_info = {} + for piece in data.keys(): + mobj = re.search(data[piece], video_webpage) + if mobj is not None: + video_info[piece] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) + + # Video urls + video_urls = {} + for fmt in self._available_formats: + mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) + if mobj is not None: + # URL is in a Javascript segment inside an escaped Unicode format within + # the generally utf-8 page + video_urls[fmt] = urllib.unquote_plus(mobj.group(1).decode("unicode_escape")) + video_info['video_urls'] = video_urls + + return video_info + + def _real_initialize(self): + if self._downloader is None: + return + + useremail = None + password = None + downloader_params = self._downloader.params + + # Attempt to use provided username and password or .netrc data + if downloader_params.get('username', None) is not None: + useremail = downloader_params['username'] + password = downloader_params['password'] + elif downloader_params.get('usenetrc', False): + try: + info = netrc.netrc().authenticators(self._NETRC_MACHINE) + if info is not None: + useremail = info[0] + password = info[2] + else: + raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) + except (IOError, netrc.NetrcParseError), err: + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) + return + + if useremail is None: + return + + # Log in + login_form = { + 'email': useremail, + 'pass': password, + 'login': 'Log+In' + } + request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) + try: + self.report_login() + login_results = urllib2.urlopen(request).read() + if re.search(r'', login_results) is not None: + self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') + return + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) + return + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group('ID') + + # Get video webpage + self.report_video_webpage_download(video_id) + request = urllib2.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) + try: + page = urllib2.urlopen(request) + video_webpage = page.read() + except (urllib2.URLError, httplib.HTTPException, socket.error), err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) + return + + # Start extracting information + self.report_information_extraction(video_id) + + # Extract information + video_info = self._parse_page(video_webpage) + + # uploader + if 'owner' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = video_info['owner'] + + # title + if 'title' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = video_info['title'] + video_title = video_title.decode('utf-8') + video_title = sanitize_title(video_title) + + # simplified title + simple_title = re.sub(ur'(?u)([^%s]+)' % simple_title_chars, ur'_', video_title) + simple_title = simple_title.strip(ur'_') + + # thumbnail image + if 'thumbnail' not in video_info: + self._downloader.trouble(u'WARNING: unable to extract video thumbnail') + video_thumbnail = '' + else: + video_thumbnail = video_info['thumbnail'] + + # upload date + upload_date = u'NA' + if 'upload_date' in video_info: + upload_time = video_info['upload_date'] + timetuple = email.utils.parsedate_tz(upload_time) + if timetuple is not None: + try: + upload_date = time.strftime('%Y%m%d', timetuple[0:9]) + except: + pass + + # description + video_description = 'No description available.' + if (self._downloader.params.get('forcedescription', False) and + 'description' in video_info): + video_description = video_info['description'] + + url_map = video_info['video_urls'] + if len(url_map.keys()) > 0: + # Decide which formats to download + req_format = self._downloader.params.get('format', None) + format_limit = self._downloader.params.get('format_limit', None) + + if format_limit is not None and format_limit in self._available_formats: + format_list = self._available_formats[self._available_formats.index(format_limit):] + else: + format_list = self._available_formats + existing_formats = [x for x in format_list if x in url_map] + if len(existing_formats) == 0: + self._downloader.trouble(u'ERROR: no known formats available for video') + return + if req_format is None: + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == '-1': + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats + else: + # Specific format + if req_format not in url_map: + self._downloader.trouble(u'ERROR: requested format not available') + return + video_url_list = [(req_format, url_map[req_format])] # Specific format + + for format_param, video_real_url in video_url_list: + + # At this point we have a new video + self._downloader.increment_downloads() + + # Extension + video_extension = self._video_extensions.get(format_param, 'mp4') + + # Find the video URL in fmt_url_map or conn paramters + try: + # Process video information + self._downloader.process_info({ + 'id': video_id.decode('utf-8'), + 'url': video_real_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': upload_date, + 'title': video_title, + 'stitle': simple_title, + 'ext': video_extension.decode('utf-8'), + 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), + 'thumbnail': video_thumbnail.decode('utf-8'), + 'description': video_description.decode('utf-8'), + 'player_url': None, + }) + except UnavailableVideoError, err: + self._downloader.trouble(u'\nERROR: unable to download video') + class PostProcessor(object): """Post Processor class. @@ -2594,6 +2817,7 @@ if __name__ == '__main__': yahoo_ie = YahooIE() yahoo_search_ie = YahooSearchIE(yahoo_ie) deposit_files_ie = DepositFilesIE() + facebook_ie = FacebookIE() generic_ie = GenericIE() # File downloader @@ -2645,6 +2869,7 @@ if __name__ == '__main__': fd.add_info_extractor(yahoo_ie) fd.add_info_extractor(yahoo_search_ie) fd.add_info_extractor(deposit_files_ie) + fd.add_info_extractor(facebook_ie) # This must come last since it's the # fallback if none of the others work From ef9f8451c8c33308a277b4933d6b3fa728c1adc0 Mon Sep 17 00:00:00 2001 From: Ricardo Garcia Date: Sun, 20 Feb 2011 18:01:57 +0100 Subject: [PATCH 9/9] Add Gergely Imreh to the author list --- youtube-dl | 1 + 1 file changed, 1 insertion(+) diff --git a/youtube-dl b/youtube-dl index a1061a4e4..0dd872e50 100755 --- a/youtube-dl +++ b/youtube-dl @@ -6,6 +6,7 @@ # Author: Vasyl' Vavrychuk # Author: Witold Baryluk # Author: Paweł Paprota +# Author: Gergely Imreh # License: Public domain code import cookielib import ctypes