[tf1] improve extraction(closes #27980)(closes #28040)

pull/28270/head
Remita Amine 3 years ago
parent 8cb4b71909
commit 295860ff00

@ -1,92 +1,87 @@
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_iso8601,
try_get,
)
class TF1IE(InfoExtractor):
"""TF1 uses the wat.tv player."""
_VALID_URL = r'https?://(?:(?:videos|www|lci)\.tf1|(?:www\.)?(?:tfou|ushuaiatv|histoire|tvbreizh))\.fr/(?:[^/]+/)*(?P<id>[^/?#.]+)'
_VALID_URL = r'https?://(?:www\.)?tf1\.fr/[^/]+/(?P<program_slug>[^/]+)/videos/(?P<id>[^/?&#]+)\.html'
_TESTS = [{
'url': 'http://videos.tf1.fr/auto-moto/citroen-grand-c4-picasso-2013-presentation-officielle-8062060.html',
'info_dict': {
'id': '10635995',
'ext': 'mp4',
'title': 'Citroën Grand C4 Picasso 2013 : présentation officielle',
'description': 'Vidéo officielle du nouveau Citroën Grand C4 Picasso, lancé à l\'automne 2013.',
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
'expected_warnings': ['HTTP Error 404'],
}, {
'url': 'http://www.tfou.fr/chuggington/videos/le-grand-mysterioso-chuggington-7085291-739.html',
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
'info_dict': {
'id': 'le-grand-mysterioso-chuggington-7085291-739',
'id': '13641379',
'ext': 'mp4',
'title': 'Le grand Mystérioso - Chuggington',
'description': 'Le grand Mystérioso - Emery rêve qu\'un article lui soit consacré dans le journal.',
'upload_date': '20150103',
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
'description': 'md5:a02cdb217141fb2d469d6216339b052f',
'upload_date': '20190611',
'timestamp': 1560273989,
'duration': 1738,
'series': 'Quotidien avec Yann Barthès',
'tags': ['intégrale', 'quotidien', 'Replay'],
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
'format': 'bestvideo',
},
'skip': 'HTTP Error 410: Gone',
}, {
'url': 'http://www.tf1.fr/tf1/koh-lanta/videos/replay-koh-lanta-22-mai-2015.html',
'only_matching': True,
}, {
'url': 'http://lci.tf1.fr/sept-a-huit/videos/sept-a-huit-du-24-mai-2015-8611550.html',
'only_matching': True,
}, {
'url': 'http://www.tf1.fr/hd1/documentaire/videos/mylene-farmer-d-une-icone.html',
'only_matching': True,
}, {
'url': 'https://www.tf1.fr/tmc/quotidien-avec-yann-barthes/videos/quotidien-premiere-partie-11-juin-2019.html',
'info_dict': {
'id': '13641379',
'ext': 'mp4',
'title': 'md5:f392bc52245dc5ad43771650c96fb620',
'description': 'md5:44bc54f0a21322f5b91d68e76a544eae',
'upload_date': '20190611',
},
'params': {
# Sometimes wat serves the whole file with the --test option
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
wat_id = None
program_slug, slug = re.match(self._VALID_URL, url).groups()
video = self._download_json(
'https://www.tf1.fr/graphql/web', slug, query={
'id': '9b80783950b85247541dd1d851f9cc7fa36574af015621f853ab111a679ce26f',
'variables': json.dumps({
'programSlug': program_slug,
'slug': slug,
})
})['data']['videoBySlug']
wat_id = video['streamId']
data = self._parse_json(
self._search_regex(
r'__APOLLO_STATE__\s*=\s*({.+?})\s*(?:;|</script>)', webpage,
'data', default='{}'), video_id, fatal=False)
tags = []
for tag in (video.get('tags') or []):
label = tag.get('label')
if not label:
continue
tags.append(label)
if data:
try:
wat_id = next(
video.get('streamId')
for key, video in data.items()
if isinstance(video, dict)
and video.get('slug') == video_id)
if not isinstance(wat_id, compat_str) or not wat_id.isdigit():
wat_id = None
except StopIteration:
pass
decoration = video.get('decoration') or {}
if not wat_id:
wat_id = self._html_search_regex(
(r'(["\'])(?:https?:)?//www\.wat\.tv/embedframe/.*?(?P<id>\d{8})\1',
r'(["\']?)streamId\1\s*:\s*(["\']?)(?P<id>\d+)\2'),
webpage, 'wat id', group='id')
thumbnails = []
for source in (try_get(decoration, lambda x: x['image']['sources'], list) or []):
source_url = source.get('url')
if not source_url:
continue
thumbnails.append({
'url': source_url,
'width': int_or_none(source.get('width')),
})
return self.url_result('wat:%s' % wat_id, 'Wat')
return {
'_type': 'url_transparent',
'id': wat_id,
'url': 'wat:' + wat_id,
'title': video.get('title'),
'thumbnails': thumbnails,
'description': decoration.get('description'),
'timestamp': parse_iso8601(video.get('date')),
'duration': int_or_none(try_get(video, lambda x: x['publicPlayingInfos']['duration'])),
'tags': tags,
'series': decoration.get('programLabel'),
'season_number': int_or_none(video.get('season')),
'episode_number': int_or_none(video.get('episode')),
}

@ -4,9 +4,10 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
unified_strdate,
HEADRequest,
ExtractorError,
int_or_none,
try_get,
unified_strdate,
)
@ -29,6 +30,7 @@ class WatIE(InfoExtractor):
'skip_download': True,
},
'expected_warnings': ['HTTP Error 404'],
'skip': 'This content is no longer available',
},
{
'url': 'http://www.wat.tv/video/gregory-lemarchal-voix-ange-6z1v7_6ygkj_.html',
@ -40,8 +42,10 @@ class WatIE(InfoExtractor):
'upload_date': '20140816',
},
'expected_warnings': ["Ce contenu n'est pas disponible pour l'instant."],
'skip': 'This content is no longer available',
},
]
_GEO_BYPASS = False
def _real_extract(self, url):
video_id = self._match_id(url)
@ -49,71 +53,52 @@ class WatIE(InfoExtractor):
# 'contentv4' is used in the website, but it also returns the related
# videos, we don't need them
# video_data = self._download_json(
# 'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
video_data = self._download_json(
'http://www.wat.tv/interface/contentv4s/' + video_id, video_id)
'https://mediainfo.tf1.fr/mediainfocombo/' + video_id,
video_id, query={'context': 'MYTF1'})
video_info = video_data['media']
error_desc = video_info.get('error_desc')
if error_desc:
self.report_warning(
'%s returned error: %s' % (self.IE_NAME, error_desc))
chapters = video_info['chapters']
if chapters:
first_chapter = chapters[0]
if video_info.get('error_code') == 'GEOBLOCKED':
self.raise_geo_restricted(error_desc, video_info.get('geoList'))
raise ExtractorError(error_desc)
def video_id_for_chapter(chapter):
return chapter['tc_start'].split('-')[0]
title = video_info['title']
if video_id_for_chapter(first_chapter) != video_id:
self.to_screen('Multipart video detected')
entries = [self.url_result('wat:%s' % video_id_for_chapter(chapter)) for chapter in chapters]
return self.playlist_result(entries, video_id, video_info['title'])
# Otherwise we can continue and extract just one part, we have to use
# the video id for getting the video url
else:
first_chapter = video_info
formats = []
title = first_chapter['title']
def extract_formats(manifest_urls):
for f, f_url in manifest_urls.items():
if not f_url:
continue
if f in ('dash', 'mpd'):
formats.extend(self._extract_mpd_formats(
f_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
video_id, mpd_id='dash', fatal=False))
elif f == 'hls':
formats.extend(self._extract_m3u8_formats(
f_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
def extract_url(path_template, url_type):
req_url = 'http://www.wat.tv/get/%s' % (path_template % video_id)
head = self._request_webpage(HEADRequest(req_url), video_id, 'Extracting %s url' % url_type, fatal=False)
if head:
red_url = head.geturl()
if req_url != red_url:
return red_url
return None
delivery = video_data.get('delivery') or {}
extract_formats({delivery.get('format'): delivery.get('url')})
if not formats:
manifest_urls = self._download_json(
'http://www.wat.tv/get/webhtml/' + video_id, video_id, fatal=False)
if manifest_urls:
extract_formats(manifest_urls)
formats = []
manifest_urls = self._download_json(
'http://www.wat.tv/get/webhtml/' + video_id, video_id)
m3u8_url = manifest_urls.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
mpd_url = manifest_urls.get('mpd')
if mpd_url:
formats.extend(self._extract_mpd_formats(
mpd_url.replace('://das-q1.tf1.fr/', '://das-q1-ssl.tf1.fr/'),
video_id, mpd_id='dash', fatal=False))
self._sort_formats(formats)
date_diffusion = first_chapter.get('date_diffusion') or video_data.get('configv4', {}).get('estatS4')
upload_date = unified_strdate(date_diffusion) if date_diffusion else None
duration = None
files = video_info['files']
if files:
duration = int_or_none(files[0].get('duration'))
return {
'id': video_id,
'title': title,
'thumbnail': first_chapter.get('preview'),
'description': first_chapter.get('description'),
'view_count': int_or_none(video_info.get('views')),
'upload_date': upload_date,
'duration': duration,
'thumbnail': video_info.get('preview'),
'upload_date': unified_strdate(try_get(
video_data, lambda x: x['mediametrie']['chapters'][0]['estatS4'])),
'duration': int_or_none(video_info.get('duration')),
'formats': formats,
}

Loading…
Cancel
Save