|
|
|
@ -3,49 +3,39 @@ from __future__ import unicode_literals
|
|
|
|
|
import re
|
|
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
|
from ..utils import ExtractorError
|
|
|
|
|
from ..utils import urljoin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NhkBaseIE(InfoExtractor):
|
|
|
|
|
_API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'
|
|
|
|
|
_BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
|
|
|
|
|
_TYPE_REGEX = r'/(?P<type>video|audio)/'
|
|
|
|
|
|
|
|
|
|
def _get_clean_field(self, episode, key):
|
|
|
|
|
return episode.get(key + '_clean') or episode.get(key)
|
|
|
|
|
|
|
|
|
|
def _list_episodes(self, m_id, lang, is_video, is_episode):
|
|
|
|
|
def _call_api(self, m_id, lang, is_video, is_episode, is_clip):
|
|
|
|
|
return self._download_json(
|
|
|
|
|
self._API_URL_TEMPLATE % (
|
|
|
|
|
'v' if is_video else 'r',
|
|
|
|
|
'clip' if m_id[:4] == '9999' else 'esd',
|
|
|
|
|
'clip' if is_clip else 'esd',
|
|
|
|
|
'episode' if is_episode else 'program',
|
|
|
|
|
m_id, lang, '/all' if is_video else ''),
|
|
|
|
|
m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes']
|
|
|
|
|
m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []
|
|
|
|
|
|
|
|
|
|
def _parse_episode_json(self, episode, lang, is_video):
|
|
|
|
|
title = episode.get('sub_title_clean') or episode['sub_title']
|
|
|
|
|
|
|
|
|
|
episode_id = None
|
|
|
|
|
if is_video:
|
|
|
|
|
pgm_id = episode.get('pgm_id')
|
|
|
|
|
pgm_no = episode.get('pgm_no')
|
|
|
|
|
|
|
|
|
|
if not (pgm_id and pgm_no):
|
|
|
|
|
missing_field = 'pgm_id' if not pgm_id else 'pgm_no'
|
|
|
|
|
raise ExtractorError('Cannot download episode. Field %s is missing from episode JSON.' % missing_field)
|
|
|
|
|
|
|
|
|
|
episode_id = pgm_id + pgm_no
|
|
|
|
|
else:
|
|
|
|
|
pgm_gr_id = episode.get('pgm_gr_id')
|
|
|
|
|
first_onair_date = episode.get('first_onair_date')
|
|
|
|
|
first_onair_no = episode.get('first_onair_no')
|
|
|
|
|
def _extract_episode_info(self, url, episode=None):
|
|
|
|
|
fetch_episode = episode is None
|
|
|
|
|
lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
|
|
|
|
|
if episode_id.isdigit():
|
|
|
|
|
episode_id = episode_id[:4] + '-' + episode_id[4:]
|
|
|
|
|
|
|
|
|
|
if not (pgm_gr_id and first_onair_date and first_onair_no):
|
|
|
|
|
missing_field = 'pgm_gr_id' if not pgm_gr_id else 'first_onair_date' if not first_onair_date else 'first_onair_no'
|
|
|
|
|
raise ExtractorError('Cannot download episode. Field %s is missing from episode JSON.' % missing_field)
|
|
|
|
|
is_video = m_type == 'video'
|
|
|
|
|
if fetch_episode:
|
|
|
|
|
episode = self._call_api(
|
|
|
|
|
episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
|
|
|
|
|
title = episode.get('sub_title_clean') or episode['sub_title']
|
|
|
|
|
|
|
|
|
|
episode_id = pgm_gr_id + '-' + first_onair_date + '-' + first_onair_no
|
|
|
|
|
def get_clean_field(key):
|
|
|
|
|
return episode.get(key + '_clean') or episode.get(key)
|
|
|
|
|
|
|
|
|
|
series = self._get_clean_field(episode, 'title')
|
|
|
|
|
series = get_clean_field('title')
|
|
|
|
|
|
|
|
|
|
thumbnails = []
|
|
|
|
|
for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:
|
|
|
|
@ -62,37 +52,43 @@ class NhkBaseIE(InfoExtractor):
|
|
|
|
|
info = {
|
|
|
|
|
'id': episode_id + '-' + lang,
|
|
|
|
|
'title': '%s - %s' % (series, title) if series and title else title,
|
|
|
|
|
'description': self._get_clean_field(episode, 'description'),
|
|
|
|
|
'description': get_clean_field('description'),
|
|
|
|
|
'thumbnails': thumbnails,
|
|
|
|
|
'series': series,
|
|
|
|
|
'episode': title,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if is_video:
|
|
|
|
|
vod_id = episode['vod_id']
|
|
|
|
|
info.update({
|
|
|
|
|
'_type': 'url_transparent',
|
|
|
|
|
'ie_key': 'Piksel',
|
|
|
|
|
'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + episode['vod_id'],
|
|
|
|
|
'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,
|
|
|
|
|
'id': vod_id,
|
|
|
|
|
})
|
|
|
|
|
else:
|
|
|
|
|
audio = episode['audio']
|
|
|
|
|
audio_path = audio['audio']
|
|
|
|
|
if fetch_episode:
|
|
|
|
|
audio_path = episode['audio']['audio']
|
|
|
|
|
info['formats'] = self._extract_m3u8_formats(
|
|
|
|
|
'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
|
|
|
|
|
episode_id, 'm4a', entry_protocol='m3u8_native',
|
|
|
|
|
m3u8_id='hls', fatal=False)
|
|
|
|
|
for f in info['formats']:
|
|
|
|
|
f['language'] = lang
|
|
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
info.update({
|
|
|
|
|
'_type': 'url_transparent',
|
|
|
|
|
'ie_key': NhkVodIE.ie_key(),
|
|
|
|
|
'url': url,
|
|
|
|
|
})
|
|
|
|
|
return info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NhkVodIE(NhkBaseIE):
|
|
|
|
|
_VALID_URL = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand/(?P<type>video|audio)/(?P<id>\d{7}|[^/]+?-\d{8}-\d+)'
|
|
|
|
|
_VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
|
|
|
|
|
# Content available only for a limited period of time. Visit
|
|
|
|
|
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
|
|
|
|
|
_TESTS = [{
|
|
|
|
|
# clip
|
|
|
|
|
# video clip
|
|
|
|
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
|
|
|
|
|
'md5': '256a1be14f48d960a7e61e2532d95ec3',
|
|
|
|
|
'info_dict': {
|
|
|
|
@ -131,42 +127,52 @@ class NhkVodIE(NhkBaseIE):
|
|
|
|
|
}]
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
lang, m_type, episode_id = re.match(self._VALID_URL, url).groups()
|
|
|
|
|
|
|
|
|
|
if episode_id.isdigit():
|
|
|
|
|
episode_id = episode_id[:4] + '-' + episode_id[4:]
|
|
|
|
|
|
|
|
|
|
episode = self._list_episodes(episode_id, lang, m_type == 'video', True)[0]
|
|
|
|
|
|
|
|
|
|
return self._parse_episode_json(episode, lang, m_type == 'video')
|
|
|
|
|
return self._extract_episode_info(url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NhkVodProgramIE(NhkBaseIE):
|
|
|
|
|
_VALID_URL = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand/(program/video)/(?P<id>\w+)'
|
|
|
|
|
# Content available only for a limited period of time. Visit
|
|
|
|
|
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
|
|
|
|
|
_VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
|
|
|
|
|
_TESTS = [{
|
|
|
|
|
# video program
|
|
|
|
|
# video program episodes
|
|
|
|
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
|
|
|
|
|
'info_dict': {
|
|
|
|
|
'id': 'japanrailway',
|
|
|
|
|
'title': 'Japan Railway Journal',
|
|
|
|
|
},
|
|
|
|
|
'playlist_mincount': 1,
|
|
|
|
|
}, {
|
|
|
|
|
# video program clips
|
|
|
|
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',
|
|
|
|
|
'info_dict': {
|
|
|
|
|
'id': 'japanrailway',
|
|
|
|
|
'title': 'Japan Railway Journal',
|
|
|
|
|
},
|
|
|
|
|
'playlist_mincount': 5,
|
|
|
|
|
}, {
|
|
|
|
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',
|
|
|
|
|
'only_matching': True,
|
|
|
|
|
}, {
|
|
|
|
|
# audio program
|
|
|
|
|
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',
|
|
|
|
|
'only_matching': True,
|
|
|
|
|
}]
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
lang, m_type, program_id = re.match(self._VALID_URL, url).groups()
|
|
|
|
|
lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()
|
|
|
|
|
|
|
|
|
|
episodes = self._list_episodes(program_id, lang, True, False)
|
|
|
|
|
episodes = self._call_api(
|
|
|
|
|
program_id, lang, m_type == 'video', False, episode_type == 'clip')
|
|
|
|
|
|
|
|
|
|
if episodes:
|
|
|
|
|
return self.playlist_result(
|
|
|
|
|
[self._parse_episode_json(episode, lang, True)
|
|
|
|
|
for episode in episodes],
|
|
|
|
|
self._get_clean_field(episodes[0], 'pgm_gr_id'), self._get_clean_field(episodes[0], 'title'))
|
|
|
|
|
else:
|
|
|
|
|
raise ExtractorError('No episodes returned for program with ID: %s' % program_id, expected=True)
|
|
|
|
|
entries = []
|
|
|
|
|
for episode in episodes:
|
|
|
|
|
episode_path = episode.get('url')
|
|
|
|
|
if not episode_path:
|
|
|
|
|
continue
|
|
|
|
|
entries.append(self._extract_episode_info(
|
|
|
|
|
urljoin(url, episode_path), episode))
|
|
|
|
|
|
|
|
|
|
program_title = None
|
|
|
|
|
if entries:
|
|
|
|
|
program_title = entries[0].get('series')
|
|
|
|
|
|
|
|
|
|
return self.playlist_result(entries, program_id, program_title)
|
|
|
|
|