Update to 76fe4ba3b22a689ba7b5c6ea9064796ce1f4af55

[redditr] Extract duration
76fe4ba3b2
pull/280/head
pukkandan 5 years ago
parent c09b3b1318
commit c228e573cb

@ -9,6 +9,7 @@ import re
import time import time
from .common import InfoExtractor from .common import InfoExtractor
# from .anvato_token_generator import NFLTokenGenerator
from ..aes import aes_encrypt from ..aes import aes_encrypt
from ..compat import compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (
@ -116,7 +117,76 @@ class AnvatoIE(InfoExtractor):
'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn', 'anvato_scripps_app_ios_prod_409c41960c60b308db43c3cc1da79cab9f1c3d93': 'WPxj5GraLTkYCyj3M7RozLqIycjrXOEcDGFMIJPn',
'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W', 'EZqvRyKBJLrgpClDPDF8I7Xpdp40Vx73': '4OxGd2dEakylntVKjKF0UK9PDPYB6A9W',
'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ', 'M2v78QkpleXm9hPp9jUXI63x5vA6BogR': 'ka6K32k7ZALmpINkjJUGUo0OE42Md1BQ',
'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ' 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ',
'X8POa4zPPaKVZHqmWjuEzfP31b1QM9VN': 'Dn5vOY9ooDw7VSl9qztjZI5o0g08mA0z',
'M2v78QkBMpNJlSPp9diX5F2PBmBy6Bog': 'ka6K32kyo7nDZfNkjQCGWf1lpApXMd1B',
'bvJ0dQpav07l0hG5JgfVLF2dv1vARwpP': 'BzoQW24GrJZoJfmNodiJKSPeB9B8NOxj',
'lxQMLg2XZKuEZaWgsqubBxV9INZ6bryY': 'Vm2Mx6noKds9jB71h6urazwlTG3m9x8l',
'04EnjvXeoSmkbJ9ckPs7oY0mcxv7PlyN': 'aXERQP9LMfQVlEDsgGs6eEA1SWznAQ8P',
'mQbO2ge6BFRWVPYCYpU06YvNt80XLvAX': 'E2BV1NGmasN5v7eujECVPJgwflnLPm2A',
'g43oeBzJrCml7o6fa5fRL1ErCdeD8z4K': 'RX34mZ6zVH4Nr6whbxIGLv9WSbxEKo8V',
'VQrDJoP7mtdBzkxhXbSPwGB1coeElk4x': 'j2VejQx0VFKQepAF7dI0mJLKtOVJE18z',
'WxA5NzLRjCrmq0NUgaU5pdMDuZO7RJ4w': 'lyY5ADLKaIOLEgAsGQCveEMAcqnx3rY9',
'M4lpMXB71ie0PjMCjdFzVXq0SeRVqz49': 'n2zVkOqaLIv3GbLfBjcwW51LcveWOZ2e',
'dyDZGEqN8u8nkJZcJns0oxYmtP7KbGAn': 'VXOEqQW9BtEVLajfZQSLEqxgS5B7qn2D',
'E7QNjrVY5u5mGvgu67IoDgV1CjEND8QR': 'rz8AaDmdKIkLmPNhB5ILPJnjS5PnlL8d',
'a4zrqjoKlfzg0dwHEWtP31VqcLBpjm4g': 'LY9J16gwETdGWa3hjBu5o0RzuoQDjqXQ',
'dQP5BZroMsMVLO1hbmT5r2Enu86GjxA6': '7XR3oOdbPF6x3PRFLDCq9RkgsRjAo48V',
'M4lKNBO1NFe0PjMCj1tzVXq0SeRVqzA9': 'n2zoRqGLRUv3GbLfBmTwW51LcveWOZYe',
'nAZ7MZdpGCGg1pqFEbsoJOz2C60mv143': 'dYJgdqA9aT4yojETqGi7yNgoFADxqmXP',
'3y1MERYgOuE9NzbFgwhV6Wv2F0YKvbyz': '081xpZDQgC4VadLTavhWQxrku56DAgXV',
'bmQvmEXr5HWklBMCZOcpE2Z3HBYwqGyl': 'zxXPbVNyMiMAZldhr9FkOmA0fl4aKr2v',
'wA7oDNYldfr6050Hwxi52lPZiVlB86Ap': 'ZYK16aA7ni0d3l3c34uwpxD7CbReMm8Q',
'g43MbKMWmFml7o7sJoSRkXxZiXRvJ3QK': 'RX3oBJonvs4Nr6rUWBCGn3matRGqJPXV',
'mA9VdlqpLS0raGaSDvtoqNrBTzb8XY4q': '0XN4OjBD3fnW7r7IbmtJB4AyfOmlrE2r',
'mAajOwgkGt17oGoFmEuklMP9H0GnW54d': 'lXbBLPGyzikNGeGujAuAJGjZiwLRxyXR',
'vy8vjJ9kbUwrRqRu59Cj5dWZfzYErlAb': 'K8l7gpwaGcBpnAnCLNCmPZRdin3eaQX0',
'xQMWBpR8oHEZaWaSMGUb0avOHjLVYn4Y': 'm2MrN4vEaf9jB7BFy5Srb40jTrN67AYl',
'xyKEmVO3miRr6D6UVkt7oB8jtD6aJEAv': 'g2ddDebqDfqdgKgswyUKwGjbTWwzq923',
'7Qk0wa2D9FjKapacoJF27aLvUDKkLGA0': 'b2kgBEkephJaMkMTL7s1PLe4Ua6WyP2P',
'3QLg6nqmNTJ5VvVTo7f508LPidz1xwyY': 'g2L1GgpraipmAOAUqmIbBnPxHOmw4MYa',
'3y1B7zZjXTE9NZNSzZSVNPZaTNLjo6Qz': '081b5G6wzH4VagaURmcWbN5mT4JGEe2V',
'lAqnwvkw6SG6D8DSqmUg6DRLUp0w3G4x': 'O2pbP0xPDFNJjpjIEvcdryOJtpkVM4X5',
'awA7xd1N0Hr6050Hw2c52lPZiVlB864p': 'GZYKpn4aoT0d3l3c3PiwpxD7CbReMmXQ',
'jQVqPLl9YHL1WGWtR1HDgWBGT63qRNyV': '6X03ne6vrU4oWyWUN7tQVoajikxJR3Ye',
'GQRMR8mL7uZK797t7xH3eNzPIP5dOny1': 'm2vqPWGd4U31zWzSyasDRAoMT1PKRp8o',
'zydq9RdmRhXLkNkfNoTJlMzaF0lWekQB': '3X7LnvE7vH5nkEkSqLiey793Un7dLB8e',
'VQrDzwkB2IdBzjzu9MHPbEYkSB50gR4x': 'j2VebLzoKUKQeEesmVh0gM1eIp9jKz8z',
'mAa2wMamBs17oGoFmktklMP9H0GnW54d': 'lXbgP74xZTkNGeGujVUAJGjZiwLRxy8R',
'7yjB6ZLG6sW8R6RF2xcan1KGfJ5dNoyd': 'wXQkPorvPHZ45N5t4Jf6qwg5Tp4xvw29',
'a4zPpNeWGuzg0m0iX3tPeanGSkRKWXQg': 'LY9oa3QAyHdGW9Wu3Ri5JGeEik7l1N8Q',
'k2rneA2M38k25cXDwwSknTJlxPxQLZ6M': '61lyA2aEVDzklfdwmmh31saPxQx2VRjp',
'bK9Zk4OvPnvxduLgxvi8VUeojnjA02eV': 'o5jANYjbeMb4nfBaQvcLAt1jzLzYx6ze',
'5VD6EydM3R9orHmNMGInGCJwbxbQvGRw': 'w3zjmX7g4vnxzCxElvUEOiewkokXprkZ',
'70X35QbVYVYNPUmP9YfbzI06YqYQk2R1': 'vG4Aj2BMjMjoztB7zeFOnCVPJpJ8lMOa',
'26qYwQVG9p1Bks2GgBckjfDJOXOAMgG1': 'r4ev9X0mv5zqJc0yk5IBDcQOwZw8mnwQ',
'rvVKpA56MBXWlSxMw3cobT5pdkd4Dm7q': '1J7ZkY53pZ645c93owcLZuveE7E8B3rL',
'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo': 'qN1zdy1zlYL23IWZGWtDvfV6WeWQWkJo',
'jdKqRGF16dKsBviMDae7IGDl7oTjEbVV': 'Q09l7vhlNxPFErIOK6BVCe7KnwUW5DVV',
'3QLkogW1OUJ5VvPsrDH56DY2u7lgZWyY': 'g2LRE1V9espmAOPhE4ubj4ZdUA57yDXa',
'wyJvWbXGBSdbkEzhv0CW8meou82aqRy8': 'M2wolPvyBIpQGkbT4juedD4ruzQGdK2y',
'7QkdZrzEkFjKap6IYDU2PB0oCNZORmA0': 'b2kN1l96qhJaMkPs9dt1lpjBfwqZoA8P',
'pvA05113MHG1w3JTYxc6DVlRCjErVz4O': 'gQXeAbblBUnDJ7vujbHvbRd1cxlz3AXO',
'mA9blJDZwT0raG1cvkuoeVjLC7ZWd54q': '0XN9jRPwMHnW7rvumgfJZOD9CJgVkWYr',
'5QwRN5qKJTvGKlDTmnf7xwNZcjRmvEy9': 'R2GP6LWBJU1QlnytwGt0B9pytWwAdDYy',
'eyn5rPPbkfw2KYxH32fG1q58CbLJzM40': 'p2gyqooZnS56JWeiDgfmOy1VugOQEBXn',
'3BABn3b5RfPJGDwilbHe7l82uBoR05Am': '7OYZG7KMVhbPdKJS3xcWEN3AuDlLNmXj',
'xA5zNGXD3HrmqMlF6OS5pdMDuZO7RJ4w': 'yY5DAm6r1IOLE3BCVMFveEMAcqnx3r29',
'g43PgW3JZfml7o6fDEURL1ErCdeD8zyK': 'RX3aQn1zrS4Nr6whDgCGLv9WSbxEKo2V',
'lAqp8WbGgiG6D8LTKJcg3O72CDdre1Qx': 'O2pnm6473HNJjpKuVosd3vVeh975yrX5',
'wyJbYEDxKSdbkJ6S6RhW8meou82aqRy8': 'M2wPm7EgRSpQGlAh70CedD4ruzQGdKYy',
'M4lgW28nLCe0PVdtaXszVXq0SeRVqzA9': 'n2zmJvg4jHv3G0ETNgiwW51LcveWOZ8e',
'5Qw3OVvp9FvGKlDTmOC7xwNZcjRmvEQ9': 'R2GzDdml9F1Qlnytw9s0B9pytWwAdD8y',
'vy8a98X7zCwrRqbHrLUjYzwDiK2b70Qb': 'K8lVwzyjZiBpnAaSGeUmnAgxuGOBxmY0',
'g4eGjJLLoiqRD3Pf9oT5O03LuNbLRDQp': '6XqD59zzpfN4EwQuaGt67qNpSyRBlnYy',
'g43OPp9boIml7o6fDOIRL1ErCdeD8z4K': 'RX33alNB4s4Nr6whDPUGLv9WSbxEKoXV',
'xA2ng9OkBcGKzDbTkKsJlx7dUK8R3dA5': 'z2aPnJvzBfObkwGC3vFaPxeBhxoMqZ8K',
'xyKEgBajZuRr6DEC0Kt7XpD1cnNW9gAv': 'g2ddlEBvRsqdgKaI4jUK9PrgfMexGZ23',
'BAogww51jIMa2JnH1BcYpXM5F658RNAL': 'rYWDmm0KptlkGv4FGJFMdZmjs9RDE6XR',
'BAokpg62VtMa2JnH1mHYpXM5F658RNAL': 'rYWryDnlNslkGv4FG4HMdZmjs9RDE62R',
'a4z1Px5e2hzg0m0iMMCPeanGSkRKWXAg': 'LY9eorNQGUdGW9WuKKf5JGeEik7l1NYQ',
'kAx69R58kF9nY5YcdecJdl2pFXP53WyX': 'gXyRxELpbfPvLeLSaRil0mp6UEzbZJ8L',
'BAoY13nwViMa2J2uo2cY6BlETgmdwryL': 'rYWwKzJmNFlkGvGtNoUM9bzwIJVzB1YR',
} }
_MCP_TO_ACCESS_KEY_TABLE = { _MCP_TO_ACCESS_KEY_TABLE = {
@ -134,6 +204,10 @@ class AnvatoIE(InfoExtractor):
'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582' 'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582'
} }
_TOKEN_GENERATORS = {
# 'GXvEgwyJeWem8KCYXfeoHWknwP48Mboj': NFLTokenGenerator,
}
_API_KEY = '3hwbSuqqT690uxjNYBktSQpa5ZrpYYR0Iofx7NcJHyA' _API_KEY = '3hwbSuqqT690uxjNYBktSQpa5ZrpYYR0Iofx7NcJHyA'
_ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1' _ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1'
@ -189,19 +263,20 @@ class AnvatoIE(InfoExtractor):
video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii') video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii')
anvrid = md5_text(time.time() * 1000 * random.random())[:30] anvrid = md5_text(time.time() * 1000 * random.random())[:30]
payload = { api = {
'api': { 'anvrid': anvrid,
'anvrid': anvrid, 'anvts': server_time,
'anvstk': md5_text('%s|%s|%d|%s' % (
access_key, anvrid, server_time,
self._ANVACK_TABLE.get(access_key, self._API_KEY))),
'anvts': server_time,
},
} }
if access_key in self._TOKEN_GENERATORS:
api['anvstk2'] = self._TOKEN_GENERATORS[access_key].generate(self, access_key, video_id)
else:
api['anvstk'] = md5_text('%s|%s|%d|%s' % (
access_key, anvrid, server_time,
self._ANVACK_TABLE.get(access_key, self._API_KEY)))
return self._download_json( return self._download_json(
video_data_url, video_id, transform_source=strip_jsonp, video_data_url, video_id, transform_source=strip_jsonp,
data=json.dumps(payload).encode('utf-8')) data=json.dumps({'api': api}).encode('utf-8'))
def _get_anvato_videos(self, access_key, video_id): def _get_anvato_videos(self, access_key, video_id):
video_data = self._get_video_json(access_key, video_id) video_data = self._get_video_json(access_key, video_id)
@ -259,7 +334,7 @@ class AnvatoIE(InfoExtractor):
'description': video_data.get('def_description'), 'description': video_data.get('def_description'),
'tags': video_data.get('def_tags', '').split(','), 'tags': video_data.get('def_tags', '').split(','),
'categories': video_data.get('categories'), 'categories': video_data.get('categories'),
'thumbnail': video_data.get('thumbnail'), 'thumbnail': video_data.get('src_image_url') or video_data.get('thumbnail'),
'timestamp': int_or_none(video_data.get( 'timestamp': int_or_none(video_data.get(
'ts_published') or video_data.get('ts_added')), 'ts_published') or video_data.get('ts_added')),
'uploader': video_data.get('mcp_id'), 'uploader': video_data.get('mcp_id'),

@ -0,0 +1,7 @@
from __future__ import unicode_literals
from .nfl import NFLTokenGenerator
__all__ = [
'NFLTokenGenerator',
]

@ -0,0 +1,6 @@
from __future__ import unicode_literals
class TokenGenerator:
def generate(self, anvack, mcp_id):
raise NotImplementedError('This method must be implemented by subclasses')

@ -0,0 +1,30 @@
from __future__ import unicode_literals
import json
from .common import TokenGenerator
class NFLTokenGenerator(TokenGenerator):
_AUTHORIZATION = None
def generate(ie, anvack, mcp_id):
if not NFLTokenGenerator._AUTHORIZATION:
reroute = ie._download_json(
'https://api.nfl.com/v1/reroute', mcp_id,
data=b'grant_type=client_credentials',
headers={'X-Domain-Id': 100})
NFLTokenGenerator._AUTHORIZATION = '%s %s' % (reroute.get('token_type') or 'Bearer', reroute['access_token'])
return ie._download_json(
'https://api.nfl.com/v3/shield/', mcp_id, data=json.dumps({
'query': '''{
viewer {
mediaToken(anvack: "%s", id: %s) {
token
}
}
}''' % (anvack, mcp_id),
}).encode(), headers={
'Authorization': NFLTokenGenerator._AUTHORIZATION,
'Content-Type': 'application/json',
})['data']['viewer']['mediaToken']['token']

@ -1,27 +1,91 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import functools
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from .kaltura import KalturaIE from .kaltura import KalturaIE
from ..utils import extract_attributes from ..utils import (
extract_attributes,
int_or_none,
OnDemandPagedList,
parse_age_limit,
strip_or_none,
try_get,
)
class AsianCrushBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?(?P<host>(?:(?:asiancrush|yuyutv|midnightpulp)\.com|(?:cocoro|retrocrush)\.tv))'
_KALTURA_KEYS = [
'video_url', 'progressive_url', 'download_url', 'thumbnail_url',
'widescreen_thumbnail_url', 'screencap_widescreen',
]
_API_SUFFIX = {'retrocrush.tv': '-ott'}
def _call_api(self, host, endpoint, video_id, query, resource):
return self._download_json(
'https://api%s.%s/%s' % (self._API_SUFFIX.get(host, ''), host, endpoint), video_id,
'Downloading %s JSON metadata' % resource, query=query,
headers=self.geo_verification_headers())['objects']
def _download_object_data(self, host, object_id, resource):
return self._call_api(
host, 'search', object_id, {'id': object_id}, resource)[0]
def _get_object_description(self, obj):
return strip_or_none(obj.get('long_description') or obj.get('short_description'))
def _parse_video_data(self, video):
title = video['name']
entry_id, partner_id = [None] * 2
for k in self._KALTURA_KEYS:
k_url = video.get(k)
if k_url:
mobj = re.search(r'/p/(\d+)/.+?/entryId/([^/]+)/', k_url)
if mobj:
partner_id, entry_id = mobj.groups()
break
meta_categories = try_get(video, lambda x: x['meta']['categories'], list) or []
categories = list(filter(None, [c.get('name') for c in meta_categories]))
show_info = video.get('show_info') or {}
return {
'_type': 'url_transparent',
'url': 'kaltura:%s:%s' % (partner_id, entry_id),
'ie_key': KalturaIE.ie_key(),
'id': entry_id,
'title': title,
'description': self._get_object_description(video),
'age_limit': parse_age_limit(video.get('mpaa_rating') or video.get('tv_rating')),
'categories': categories,
'series': show_info.get('show_name'),
'season_number': int_or_none(show_info.get('season_num')),
'season_id': show_info.get('season_id'),
'episode_number': int_or_none(show_info.get('episode_num')),
}
class AsianCrushIE(InfoExtractor): class AsianCrushIE(AsianCrushBaseIE):
_VALID_URL_BASE = r'https?://(?:www\.)?(?P<host>(?:(?:asiancrush|yuyutv|midnightpulp)\.com|cocoro\.tv))' _VALID_URL = r'%s/video/(?:[^/]+/)?0+(?P<id>\d+)v\b' % AsianCrushBaseIE._VALID_URL_BASE
_VALID_URL = r'%s/video/(?:[^/]+/)?0+(?P<id>\d+)v\b' % _VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'https://www.asiancrush.com/video/012869v/women-who-flirt/', 'url': 'https://www.asiancrush.com/video/004289v/women-who-flirt',
'md5': 'c3b740e48d0ba002a42c0b72857beae6', 'md5': 'c3b740e48d0ba002a42c0b72857beae6',
'info_dict': { 'info_dict': {
'id': '1_y4tmjm5r', 'id': '1_y4tmjm5r',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Women Who Flirt', 'title': 'Women Who Flirt',
'description': 'md5:7e986615808bcfb11756eb503a751487', 'description': 'md5:b65c7e0ae03a85585476a62a186f924c',
'timestamp': 1496936429, 'timestamp': 1496936429,
'upload_date': '20170608', 'upload_date': '20170608',
'uploader_id': 'craig@crifkin.com', 'uploader_id': 'craig@crifkin.com',
'age_limit': 13,
'categories': 'count:5',
'duration': 5812,
}, },
}, { }, {
'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/', 'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/',
@ -41,67 +105,35 @@ class AsianCrushIE(InfoExtractor):
}, { }, {
'url': 'https://www.cocoro.tv/video/the-wonderful-wizard-of-oz/008878v-the-wonderful-wizard-of-oz-ep01/', 'url': 'https://www.cocoro.tv/video/the-wonderful-wizard-of-oz/008878v-the-wonderful-wizard-of-oz-ep01/',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.retrocrush.tv/video/true-tears/012328v-i...gave-away-my-tears',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) host, video_id = re.match(self._VALID_URL, url).groups()
host = mobj.group('host')
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
entry_id, partner_id, title = [None] * 3 if host == 'cocoro.tv':
webpage = self._download_webpage(url, video_id)
vars = self._parse_json( embed_vars = self._parse_json(self._search_regex(
self._search_regex(
r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars', r'iEmbedVars\s*=\s*({.+?})', webpage, 'embed vars',
default='{}'), video_id, fatal=False) default='{}'), video_id, fatal=False) or {}
if vars: video_id = embed_vars.get('entry_id') or video_id
entry_id = vars.get('entry_id')
partner_id = vars.get('partner_id')
title = vars.get('vid_label')
if not entry_id:
entry_id = self._search_regex(
r'\bentry_id["\']\s*:\s*["\'](\d+)', webpage, 'entry id')
player = self._download_webpage( video = self._download_object_data(host, video_id, 'video')
'https://api.%s/embeddedVideoPlayer' % host, video_id, return self._parse_video_data(video)
query={'id': entry_id})
kaltura_id = self._search_regex(
r'entry_id["\']\s*:\s*(["\'])(?P<id>(?:(?!\1).)+)\1', player,
'kaltura id', group='id')
if not partner_id: class AsianCrushPlaylistIE(AsianCrushBaseIE):
partner_id = self._search_regex( _VALID_URL = r'%s/series/0+(?P<id>\d+)s\b' % AsianCrushBaseIE._VALID_URL_BASE
r'/p(?:artner_id)?/(\d+)', player, 'partner id',
default='513551')
description = self._html_search_regex(
r'(?s)<div[^>]+\bclass=["\']description["\'][^>]*>(.+?)</div>',
webpage, 'description', fatal=False)
return {
'_type': 'url_transparent',
'url': 'kaltura:%s:%s' % (partner_id, kaltura_id),
'ie_key': KalturaIE.ie_key(),
'id': video_id,
'title': title,
'description': description,
}
class AsianCrushPlaylistIE(InfoExtractor):
_VALID_URL = r'%s/series/0+(?P<id>\d+)s\b' % AsianCrushIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'https://www.asiancrush.com/series/012481s/scholar-walks-night/', 'url': 'https://www.asiancrush.com/series/006447s/fruity-samurai',
'info_dict': { 'info_dict': {
'id': '12481', 'id': '6447',
'title': 'Scholar Who Walks the Night', 'title': 'Fruity Samurai',
'description': 'md5:7addd7c5132a09fd4741152d96cce886', 'description': 'md5:7535174487e4a202d3872a7fc8f2f154',
}, },
'playlist_count': 20, 'playlist_count': 13,
}, { }, {
'url': 'https://www.yuyutv.com/series/013920s/peep-show/', 'url': 'https://www.yuyutv.com/series/013920s/peep-show/',
'only_matching': True, 'only_matching': True,
@ -111,35 +143,58 @@ class AsianCrushPlaylistIE(InfoExtractor):
}, { }, {
'url': 'https://www.cocoro.tv/series/008549s/the-wonderful-wizard-of-oz/', 'url': 'https://www.cocoro.tv/series/008549s/the-wonderful-wizard-of-oz/',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.retrocrush.tv/series/012355s/true-tears',
'only_matching': True,
}] }]
_PAGE_SIZE = 1000000000
def _fetch_page(self, domain, parent_id, page):
videos = self._call_api(
domain, 'getreferencedobjects', parent_id, {
'max': self._PAGE_SIZE,
'object_type': 'video',
'parent_id': parent_id,
'start': page * self._PAGE_SIZE,
}, 'page %d' % (page + 1))
for video in videos:
yield self._parse_video_data(video)
def _real_extract(self, url): def _real_extract(self, url):
playlist_id = self._match_id(url) host, playlist_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, playlist_id) if host == 'cocoro.tv':
webpage = self._download_webpage(url, playlist_id)
entries = []
entries = []
for mobj in re.finditer(
r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL, for mobj in re.finditer(
webpage): r'<a[^>]+href=(["\'])(?P<url>%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL,
attrs = extract_attributes(mobj.group(0)) webpage):
if attrs.get('class') == 'clearfix': attrs = extract_attributes(mobj.group(0))
entries.append(self.url_result( if attrs.get('class') == 'clearfix':
mobj.group('url'), ie=AsianCrushIE.ie_key())) entries.append(self.url_result(
mobj.group('url'), ie=AsianCrushIE.ie_key()))
title = self._html_search_regex(
r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage, title = self._html_search_regex(
'title', default=None) or self._og_search_title( r'(?s)<h1\b[^>]\bid=["\']movieTitle[^>]+>(.+?)</h1>', webpage,
webpage, default=None) or self._html_search_meta( 'title', default=None) or self._og_search_title(
'twitter:title', webpage, 'title', webpage, default=None) or self._html_search_meta(
default=None) or self._search_regex( 'twitter:title', webpage, 'title',
r'<title>([^<]+)</title>', webpage, 'title', fatal=False) default=None) or self._search_regex(
if title: r'<title>([^<]+)</title>', webpage, 'title', fatal=False)
title = re.sub(r'\s*\|\s*.+?$', '', title) if title:
title = re.sub(r'\s*\|\s*.+?$', '', title)
description = self._og_search_description(
webpage, default=None) or self._html_search_meta( description = self._og_search_description(
'twitter:description', webpage, 'description', fatal=False) webpage, default=None) or self._html_search_meta(
'twitter:description', webpage, 'description', fatal=False)
else:
show = self._download_object_data(host, playlist_id, 'show')
title = show.get('name')
description = self._get_object_description(show)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, host, playlist_id),
self._PAGE_SIZE)
return self.playlist_result(entries, playlist_id, title, description) return self.playlist_result(entries, playlist_id, title, description)

@ -750,8 +750,14 @@ from .nexx import (
NexxIE, NexxIE,
NexxEmbedIE, NexxEmbedIE,
) )
from .nfl import NFLIE from .nfl import (
from .nhk import NhkVodIE NFLIE,
NFLArticleIE,
)
from .nhk import (
NhkVodIE,
NhkVodProgramIE,
)
from .nhl import NHLIE from .nhl import NHLIE
from .nick import ( from .nick import (
NickIE, NickIE,
@ -768,7 +774,6 @@ from .nintendo import NintendoIE
from .nitter import NitterIE from .nitter import NitterIE
from .njpwworld import NJPWWorldIE from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE from .nobelprize import NobelPrizeIE
from .noco import NocoIE
from .nonktube import NonkTubeIE from .nonktube import NonkTubeIE
from .noovo import NoovoIE from .noovo import NoovoIE
from .normalboots import NormalbootsIE from .normalboots import NormalbootsIE
@ -1253,6 +1258,10 @@ from .tv2dk import (
from .tv2hu import TV2HuIE from .tv2hu import TV2HuIE
from .tv4 import TV4IE from .tv4 import TV4IE
from .tv5mondeplus import TV5MondePlusIE from .tv5mondeplus import TV5MondePlusIE
from .tv5unis import (
TV5UnisVideoIE,
TV5UnisIE,
)
from .tva import ( from .tva import (
TVAIE, TVAIE,
QubIE, QubIE,
@ -1558,7 +1567,6 @@ from .youtube import (
YoutubeWatchLaterIE, YoutubeWatchLaterIE,
) )
from .zapiks import ZapiksIE from .zapiks import ZapiksIE
from .zaq1 import Zaq1IE
from .zattoo import ( from .zattoo import (
BBVTVIE, BBVTVIE,
EinsUndEinsTVIE, EinsUndEinsTVIE,

@ -4,19 +4,15 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError, clean_html,
int_or_none, determine_ext,
remove_end, get_element_by_class,
) )
class NFLIE(InfoExtractor): class NFLBaseIE(InfoExtractor):
IE_NAME = 'nfl.com' _VALID_URL_BASE = r'''(?x)
_VALID_URL = r'''(?x)
https?:// https?://
(?P<host> (?P<host>
(?:www\.)? (?:www\.)?
@ -34,15 +30,15 @@ class NFLIE(InfoExtractor):
houstontexans| houstontexans|
colts| colts|
jaguars| jaguars|
titansonline| (?:titansonline|tennesseetitans)|
denverbroncos| denverbroncos|
kcchiefs| (?:kc)?chiefs|
raiders| raiders|
chargers| chargers|
dallascowboys| dallascowboys|
giants| giants|
philadelphiaeagles| philadelphiaeagles|
redskins| (?:redskins|washingtonfootball)|
chicagobears| chicagobears|
detroitlions| detroitlions|
packers| packers|
@ -52,180 +48,113 @@ class NFLIE(InfoExtractor):
neworleanssaints| neworleanssaints|
buccaneers| buccaneers|
azcardinals| azcardinals|
stlouisrams| (?:stlouis|the)rams|
49ers| 49ers|
seahawks seahawks
)\.com| )\.com|
.+?\.clubs\.nfl\.com .+?\.clubs\.nfl\.com
) )
)/ )/
(?:.+?/)*
(?P<id>[^/#?&]+)
''' '''
_VIDEO_CONFIG_REGEX = r'<script[^>]+id="[^"]*video-config-[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}[^"]*"[^>]*>\s*({.+})'
_WORKING = False
def _parse_video_config(self, video_config, display_id):
video_config = self._parse_json(video_config, display_id)
item = video_config['playlist'][0]
mcp_id = item.get('mcpID')
if mcp_id:
info = self.url_result(
'anvato:GXvEgwyJeWem8KCYXfeoHWknwP48Mboj:' + mcp_id,
'Anvato', mcp_id)
else:
media_id = item.get('id') or item['entityId']
title = item['title']
item_url = item['url']
info = {'id': media_id}
ext = determine_ext(item_url)
if ext == 'm3u8':
info['formats'] = self._extract_m3u8_formats(item_url, media_id, 'mp4')
self._sort_formats(info['formats'])
else:
info['url'] = item_url
if item.get('audio') is True:
info['vcodec'] = 'none'
is_live = video_config.get('live') is True
thumbnails = None
image_url = item.get(item.get('imageSrc')) or item.get(item.get('posterImage'))
if image_url:
thumbnails = [{
'url': image_url,
'ext': determine_ext(image_url, 'jpg'),
}]
info.update({
'title': self._live_title(title) if is_live else title,
'is_live': is_live,
'description': clean_html(item.get('description')),
'thumbnails': thumbnails,
})
return info
class NFLIE(NFLBaseIE):
IE_NAME = 'nfl.com'
_VALID_URL = NFLBaseIE._VALID_URL_BASE + r'(?:videos?|listen|audio)/(?P<id>[^/#?&]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://www.nfl.com/videos/nfl-game-highlights/0ap3000000398478/Week-3-Redskins-vs-Eagles-highlights', 'url': 'https://www.nfl.com/videos/baker-mayfield-s-game-changing-plays-from-3-td-game-week-14',
'md5': '394ef771ddcd1354f665b471d78ec4c6',
'info_dict': { 'info_dict': {
'id': '0ap3000000398478', 'id': '899441',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Week 3: Redskins vs. Eagles highlights', 'title': "Baker Mayfield's game-changing plays from 3-TD game Week 14",
'description': 'md5:56323bfb0ac4ee5ab24bd05fdf3bf478', 'description': 'md5:85e05a3cc163f8c344340f220521136d',
'upload_date': '20140921', 'upload_date': '20201215',
'timestamp': 1411337580, 'timestamp': 1608009755,
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'NFL',
} }
}, { }, {
'url': 'http://prod.www.steelers.clubs.nfl.com/video-and-audio/videos/LIVE_Post_Game_vs_Browns/9d72f26a-9e2b-4718-84d3-09fb4046c266', 'url': 'https://www.chiefs.com/listen/patrick-mahomes-travis-kelce-react-to-win-over-dolphins-the-breakdown',
'md5': 'cf85bdb4bc49f6e9d3816d130c78279c', 'md5': '6886b32c24b463038c760ceb55a34566',
'info_dict': { 'info_dict': {
'id': '9d72f26a-9e2b-4718-84d3-09fb4046c266', 'id': 'd87e8790-3e14-11eb-8ceb-ff05c2867f99',
'ext': 'mp4', 'ext': 'mp3',
'title': 'LIVE: Post Game vs. Browns', 'title': 'Patrick Mahomes, Travis Kelce React to Win Over Dolphins | The Breakdown',
'description': 'md5:6a97f7e5ebeb4c0e69a418a89e0636e8', 'description': 'md5:12ada8ee70e6762658c30e223e095075',
'upload_date': '20131229',
'timestamp': 1388354455,
'thumbnail': r're:^https?://.*\.jpg$',
} }
}, { }, {
'url': 'http://www.nfl.com/news/story/0ap3000000467586/article/patriots-seahawks-involved-in-lategame-skirmish', 'url': 'https://www.buffalobills.com/video/buffalo-bills-military-recognition-week-14',
'info_dict': {
'id': '0ap3000000467607',
'ext': 'mp4',
'title': 'Frustrations flare on the field',
'description': 'Emotions ran high at the end of the Super Bowl on both sides of the ball after a dramatic finish.',
'timestamp': 1422850320,
'upload_date': '20150202',
},
}, {
'url': 'http://www.patriots.com/video/2015/09/18/10-days-gillette',
'md5': '4c319e2f625ffd0b481b4382c6fc124c',
'info_dict': {
'id': 'n-238346',
'ext': 'mp4',
'title': '10 Days at Gillette',
'description': 'md5:8cd9cd48fac16de596eadc0b24add951',
'timestamp': 1442618809,
'upload_date': '20150918',
},
}, {
# lowercase data-contentid
'url': 'http://www.steelers.com/news/article-1/Tomlin-on-Ben-getting-Vick-ready/56399c96-4160-48cf-a7ad-1d17d4a3aef7',
'info_dict': {
'id': '12693586-6ea9-4743-9c1c-02c59e4a5ef2',
'ext': 'mp4',
'title': 'Tomlin looks ahead to Ravens on a short week',
'description': 'md5:32f3f7b139f43913181d5cbb24ecad75',
'timestamp': 1443459651,
'upload_date': '20150928',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.nfl.com/videos/nfl-network-top-ten/09000d5d810a6bd4/Top-10-Gutsiest-Performances-Jack-Youngblood',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'http://www.buffalobills.com/video/videos/Rex_Ryan_Show_World_Wide_Rex/b1dcfab2-3190-4bb1-bfc0-d6e603d6601a', 'url': 'https://www.raiders.com/audio/instant-reactions-raiders-week-14-loss-to-indianapolis-colts-espn-jason-fitz',
'only_matching': True, 'only_matching': True,
}] }]
@staticmethod
def prepend_host(host, url):
if not url.startswith('http'):
if not url.startswith('/'):
url = '/%s' % url
url = 'http://{0:}{1:}'.format(host, url)
return url
@staticmethod
def format_from_stream(stream, protocol, host, path_prefix='',
preference=0, note=None):
url = '{protocol:}://{host:}/{prefix:}{path:}'.format(
protocol=protocol,
host=host,
prefix=path_prefix,
path=stream.get('path'),
)
return {
'url': url,
'vbr': int_or_none(stream.get('rate', 0), 1000),
'preference': preference,
'format_note': note,
}
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) display_id = self._match_id(url)
video_id, host = mobj.group('id'), mobj.group('host') webpage = self._download_webpage(url, display_id)
return self._parse_video_config(self._search_regex(
webpage = self._download_webpage(url, video_id) self._VIDEO_CONFIG_REGEX, webpage, 'video config'), display_id)
config_url = NFLIE.prepend_host(host, self._search_regex(
r'(?:(?:config|configURL)\s*:\s*|<nflcs:avplayer[^>]+data-config\s*=\s*)(["\'])(?P<config>.+?)\1',
webpage, 'config URL', default='static/content/static/config/video/config.json',
group='config'))
# For articles, the id in the url is not the video id
video_id = self._search_regex(
r'(?:<nflcs:avplayer[^>]+data-content[Ii]d\s*=\s*|content[Ii]d\s*:\s*)(["\'])(?P<id>(?:(?!\1).)+)\1',
webpage, 'video id', default=video_id, group='id')
config = self._download_json(config_url, video_id, 'Downloading player config')
url_template = NFLIE.prepend_host(
host, '{contentURLTemplate:}'.format(**config))
video_data = self._download_json(
url_template.format(id=video_id), video_id)
formats = []
cdn_data = video_data.get('cdnData', {})
streams = cdn_data.get('bitrateInfo', [])
if cdn_data.get('format') == 'EXTERNAL_HTTP_STREAM':
parts = compat_urllib_parse_urlparse(cdn_data.get('uri'))
protocol, host = parts.scheme, parts.netloc
for stream in streams:
formats.append(
NFLIE.format_from_stream(stream, protocol, host))
else:
cdns = config.get('cdns')
if not cdns:
raise ExtractorError('Failed to get CDN data', expected=True)
for name, cdn in cdns.items():
# LimeLight streams don't seem to work
if cdn.get('name') == 'LIMELIGHT':
continue
protocol = cdn.get('protocol')
host = remove_end(cdn.get('host', ''), '/')
if not (protocol and host):
continue
prefix = cdn.get('pathprefix', '') class NFLArticleIE(NFLBaseIE):
if prefix and not prefix.endswith('/'): IE_NAME = 'nfl.com:article'
prefix = '%s/' % prefix _VALID_URL = NFLBaseIE._VALID_URL_BASE + r'news/(?P<id>[^/#?&]+)'
_TEST = {
preference = 0 'url': 'https://www.buffalobills.com/news/the-only-thing-we-ve-earned-is-the-noise-bills-coaches-discuss-handling-rising-e',
if protocol == 'rtmp': 'info_dict': {
preference = -2 'id': 'the-only-thing-we-ve-earned-is-the-noise-bills-coaches-discuss-handling-rising-e',
elif 'prog' in name.lower(): 'title': "'The only thing we've earned is the noise' | Bills coaches discuss handling rising expectations",
preference = 1 },
'playlist_count': 4,
for stream in streams: }
formats.append(
NFLIE.format_from_stream(stream, protocol, host,
prefix, preference, name))
self._sort_formats(formats)
thumbnail = None
for q in ('xl', 'l', 'm', 's', 'xs'):
thumbnail = video_data.get('imagePaths', {}).get(q)
if thumbnail:
break
return { def _real_extract(self, url):
'id': video_id, display_id = self._match_id(url)
'title': video_data.get('headline'), webpage = self._download_webpage(url, display_id)
'formats': formats, entries = []
'description': video_data.get('caption'), for video_config in re.findall(self._VIDEO_CONFIG_REGEX, webpage):
'duration': video_data.get('duration'), entries.append(self._parse_video_config(video_config, display_id))
'thumbnail': thumbnail, title = clean_html(get_element_by_class(
'timestamp': int_or_none(video_data.get('posted'), 1000), 'nfl-c-article__title', webpage)) or self._html_search_meta(
} ['og:title', 'twitter:title'], webpage)
return self.playlist_result(entries, display_id, title)

@ -3,10 +3,88 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import urljoin
class NhkVodIE(InfoExtractor): class NhkBaseIE(InfoExtractor):
_VALID_URL = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand/(?P<type>video|audio)/(?P<id>\d{7}|[^/]+?-\d{8}-\d+)' _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'
_BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
_TYPE_REGEX = r'/(?P<type>video|audio)/'
def _call_api(self, m_id, lang, is_video, is_episode, is_clip):
return self._download_json(
self._API_URL_TEMPLATE % (
'v' if is_video else 'r',
'clip' if is_clip else 'esd',
'episode' if is_episode else 'program',
m_id, lang, '/all' if is_video else ''),
m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []
def _extract_episode_info(self, url, episode=None):
fetch_episode = episode is None
lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
if episode_id.isdigit():
episode_id = episode_id[:4] + '-' + episode_id[4:]
is_video = m_type == 'video'
if fetch_episode:
episode = self._call_api(
episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
title = episode.get('sub_title_clean') or episode['sub_title']
def get_clean_field(key):
return episode.get(key + '_clean') or episode.get(key)
series = get_clean_field('title')
thumbnails = []
for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:
img_path = episode.get('image' + s)
if not img_path:
continue
thumbnails.append({
'id': '%dp' % h,
'height': h,
'width': w,
'url': 'https://www3.nhk.or.jp' + img_path,
})
info = {
'id': episode_id + '-' + lang,
'title': '%s - %s' % (series, title) if series and title else title,
'description': get_clean_field('description'),
'thumbnails': thumbnails,
'series': series,
'episode': title,
}
if is_video:
vod_id = episode['vod_id']
info.update({
'_type': 'url_transparent',
'ie_key': 'Piksel',
'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,
'id': vod_id,
})
else:
if fetch_episode:
audio_path = episode['audio']['audio']
info['formats'] = self._extract_m3u8_formats(
'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
episode_id, 'm4a', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in info['formats']:
f['language'] = lang
else:
info.update({
'_type': 'url_transparent',
'ie_key': NhkVodIE.ie_key(),
'url': url,
})
return info
class NhkVodIE(NhkBaseIE):
_VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
# Content available only for a limited period of time. Visit # Content available only for a limited period of time. Visit
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
_TESTS = [{ _TESTS = [{
@ -47,60 +125,54 @@ class NhkVodIE(InfoExtractor):
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/', 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
'only_matching': True, 'only_matching': True,
}] }]
_API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/episode/%s/%s/all%s.json'
def _real_extract(self, url): def _real_extract(self, url):
lang, m_type, episode_id = re.match(self._VALID_URL, url).groups() return self._extract_episode_info(url)
if episode_id.isdigit():
episode_id = episode_id[:4] + '-' + episode_id[4:]
is_video = m_type == 'video'
episode = self._download_json(
self._API_URL_TEMPLATE % (
'v' if is_video else 'r',
'clip' if episode_id[:4] == '9999' else 'esd',
episode_id, lang, '/all' if is_video else ''),
episode_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'][0]
title = episode.get('sub_title_clean') or episode['sub_title']
def get_clean_field(key): class NhkVodProgramIE(NhkBaseIE):
return episode.get(key + '_clean') or episode.get(key) _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
_TESTS = [{
# video program episodes
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
'info_dict': {
'id': 'japanrailway',
'title': 'Japan Railway Journal',
},
'playlist_mincount': 1,
}, {
# video program clips
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',
'info_dict': {
'id': 'japanrailway',
'title': 'Japan Railway Journal',
},
'playlist_mincount': 5,
}, {
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',
'only_matching': True,
}, {
# audio program
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',
'only_matching': True,
}]
series = get_clean_field('title') def _real_extract(self, url):
lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()
thumbnails = [] episodes = self._call_api(
for s, w, h in [('', 640, 360), ('_l', 1280, 720)]: program_id, lang, m_type == 'video', False, episode_type == 'clip')
img_path = episode.get('image' + s)
if not img_path: entries = []
for episode in episodes:
episode_path = episode.get('url')
if not episode_path:
continue continue
thumbnails.append({ entries.append(self._extract_episode_info(
'id': '%dp' % h, urljoin(url, episode_path), episode))
'height': h,
'width': w,
'url': 'https://www3.nhk.or.jp' + img_path,
})
info = { program_title = None
'id': episode_id + '-' + lang, if entries:
'title': '%s - %s' % (series, title) if series and title else title, program_title = entries[0].get('series')
'description': get_clean_field('description'),
'thumbnails': thumbnails, return self.playlist_result(entries, program_id, program_title)
'series': series,
'episode': title,
}
if is_video:
info.update({
'_type': 'url_transparent',
'ie_key': 'Piksel',
'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + episode['vod_id'],
})
else:
audio = episode['audio']
audio_path = audio['audio']
info['formats'] = self._extract_m3u8_formats(
'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
episode_id, 'm4a', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
for f in info['formats']:
f['language'] = lang
return info

@ -7,6 +7,7 @@ from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
float_or_none, float_or_none,
try_get,
url_or_none, url_or_none,
) )
@ -59,6 +60,7 @@ class RedditRIE(InfoExtractor):
'timestamp': 1501941939, 'timestamp': 1501941939,
'upload_date': '20170805', 'upload_date': '20170805',
'uploader': 'Antw87', 'uploader': 'Antw87',
'duration': 12,
'like_count': int, 'like_count': int,
'dislike_count': int, 'dislike_count': int,
'comment_count': int, 'comment_count': int,
@ -123,6 +125,10 @@ class RedditRIE(InfoExtractor):
'thumbnail': url_or_none(data.get('thumbnail')), 'thumbnail': url_or_none(data.get('thumbnail')),
'timestamp': float_or_none(data.get('created_utc')), 'timestamp': float_or_none(data.get('created_utc')),
'uploader': data.get('author'), 'uploader': data.get('author'),
'duration': int_or_none(try_get(
data,
(lambda x: x['media']['reddit_video']['duration'],
lambda x: x['secure_media']['reddit_video']['duration']))),
'like_count': int_or_none(data.get('ups')), 'like_count': int_or_none(data.get('ups')),
'dislike_count': int_or_none(data.get('downs')), 'dislike_count': int_or_none(data.get('downs')),
'comment_count': int_or_none(data.get('num_comments')), 'comment_count': int_or_none(data.get('num_comments')),

@ -41,8 +41,8 @@ class SkyBaseIE(InfoExtractor):
class SkySportsIE(SkyBaseIE): class SkySportsIE(SkyBaseIE):
_VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/(?P<id>[0-9]+)' _VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/([^/]+/)*(?P<id>[0-9]+)'
_TEST = { _TESTS = [{
'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine', 'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine',
'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec', 'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec',
'info_dict': { 'info_dict': {
@ -52,7 +52,13 @@ class SkySportsIE(SkyBaseIE):
'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d', 'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d',
}, },
'add_ie': ['Ooyala'], 'add_ie': ['Ooyala'],
} }, {
'url': 'https://www.skysports.com/watch/video/sports/f1/12160544/abu-dhabi-gp-the-notebook',
'only_matching': True,
}, {
'url': 'https://www.skysports.com/watch/video/tv-shows/12118508/rainford-brent-how-ace-programme-helps',
'only_matching': True,
}]
class SkyNewsIE(SkyBaseIE): class SkyNewsIE(SkyBaseIE):

@ -0,0 +1,121 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
smuggle_url,
try_get,
)
class TV5UnisBaseIE(InfoExtractor):
_GEO_COUNTRIES = ['CA']
def _real_extract(self, url):
groups = re.match(self._VALID_URL, url).groups()
product = self._download_json(
'https://api.tv5unis.ca/graphql', groups[0], query={
'query': '''{
%s(%s) {
collection {
title
}
episodeNumber
rating {
name
}
seasonNumber
tags
title
videoElement {
... on Video {
mediaId
}
}
}
}''' % (self._GQL_QUERY_NAME, self._gql_args(groups)),
})['data'][self._GQL_QUERY_NAME]
media_id = product['videoElement']['mediaId']
return {
'_type': 'url_transparent',
'id': media_id,
'title': product.get('title'),
'url': smuggle_url('limelight:media:' + media_id, {'geo_countries': self._GEO_COUNTRIES}),
'age_limit': parse_age_limit(try_get(product, lambda x: x['rating']['name'])),
'tags': product.get('tags'),
'series': try_get(product, lambda x: x['collection']['title']),
'season_number': int_or_none(product.get('seasonNumber')),
'episode_number': int_or_none(product.get('episodeNumber')),
'ie_key': 'LimelightMedia',
}
class TV5UnisVideoIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis:video'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/[^/]+/(?P<id>\d+)'
_TEST = {
'url': 'https://www.tv5unis.ca/videos/bande-annonces/71843',
'md5': '3d794164928bda97fb87a17e89923d9b',
'info_dict': {
'id': 'a883684aecb2486cad9bdc7bbe17f861',
'ext': 'mp4',
'title': 'Watatatow',
'duration': 10.01,
}
}
_GQL_QUERY_NAME = 'productById'
@staticmethod
def _gql_args(groups):
return 'id: %s' % groups
class TV5UnisIE(TV5UnisBaseIE):
IE_NAME = 'tv5unis'
_VALID_URL = r'https?://(?:www\.)?tv5unis\.ca/videos/(?P<id>[^/]+)(?:/saisons/(?P<season_number>\d+)/episodes/(?P<episode_number>\d+))?/?(?:[?#&]|$)'
_TESTS = [{
'url': 'https://www.tv5unis.ca/videos/watatatow/saisons/6/episodes/1',
'md5': 'a479907d2e531a73e1f8dc48d6388d02',
'info_dict': {
'id': 'e5ee23a586c44612a56aad61accf16ef',
'ext': 'mp4',
'title': 'Je ne peux pas lui résister',
'description': "Atys, le nouveau concierge de l'école, a réussi à ébranler la confiance de Mado en affirmant qu\'une médaille, ce n'est que du métal. Comme Mado essaie de lui prouver que ses valeurs sont solides, il veut la mettre à l'épreuve...",
'subtitles': {
'fr': 'count:1',
},
'duration': 1370,
'age_limit': 8,
'tags': 'count:3',
'series': 'Watatatow',
'season_number': 6,
'episode_number': 1,
},
}, {
'url': 'https://www.tv5unis.ca/videos/le-voyage-de-fanny',
'md5': '9ca80ebb575c681d10cae1adff3d4774',
'info_dict': {
'id': '726188eefe094d8faefb13381d42bc06',
'ext': 'mp4',
'title': 'Le voyage de Fanny',
'description': "Fanny, 12 ans, cachée dans un foyer loin de ses parents, s'occupe de ses deux soeurs. Devant fuir, Fanny prend la tête d'un groupe de huit enfants et s'engage dans un dangereux périple à travers la France occupée pour rejoindre la frontière suisse.",
'subtitles': {
'fr': 'count:1',
},
'duration': 5587.034,
'tags': 'count:4',
},
}]
_GQL_QUERY_NAME = 'productByRootProductSlug'
@staticmethod
def _gql_args(groups):
args = 'rootProductSlug: "%s"' % groups[0]
if groups[1]:
args += ', seasonNumber: %s, episodeNumber: %s' % groups[1:]
return args

@ -4,30 +4,50 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import (
compat_parse_qs,
compat_str,
compat_urllib_parse_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError,
int_or_none, int_or_none,
orderedSet,
parse_duration,
str_or_none,
unified_strdate,
url_or_none,
xpath_element,
xpath_text,
) )
class VideomoreBaseIE(InfoExtractor):
_API_BASE_URL = 'https://more.tv/api/v3/web/'
_VALID_URL_BASE = r'https?://(?:videomore\.ru|more\.tv)/'
def _download_page_data(self, display_id):
return self._download_json(
self._API_BASE_URL + 'PageData', display_id, query={
'url': '/' + display_id,
})['attributes']['response']['data']
def _track_url_result(self, track):
track_vod = track['trackVod']
video_url = track_vod.get('playerLink') or track_vod['link']
return self.url_result(
video_url, VideomoreIE.ie_key(), track_vod.get('hubId'))
class VideomoreIE(InfoExtractor): class VideomoreIE(InfoExtractor):
IE_NAME = 'videomore' IE_NAME = 'videomore'
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
videomore:(?P<sid>\d+)$| videomore:(?P<sid>\d+)$|
https?://(?:player\.)?videomore\.ru/ https?://
(?: (?:
videomore\.ru/
(?: (?:
embed| embed|
[^/]+/[^/]+ [^/]+/[^/]+
)/| )/|
[^/]*\?.*?\btrack_id= (?:
(?:player\.)?videomore\.ru|
siren\.more\.tv/player
)/[^/]*\?.*?\btrack_id=|
odysseus\.more.tv/player/(?P<partner_id>\d+)/
) )
(?P<id>\d+) (?P<id>\d+)
(?:[/?#&]|\.(?:xml|json)|$) (?:[/?#&]|\.(?:xml|json)|$)
@ -47,18 +67,19 @@ class VideomoreIE(InfoExtractor):
'comment_count': int, 'comment_count': int,
'age_limit': 16, 'age_limit': 16,
}, },
'skip': 'The video is not available for viewing.',
}, { }, {
'url': 'http://videomore.ru/embed/259974', 'url': 'http://videomore.ru/embed/259974',
'info_dict': { 'info_dict': {
'id': '259974', 'id': '259974',
'ext': 'flv', 'ext': 'mp4',
'title': 'Молодежка 2 сезон 40 серия', 'title': 'Молодежка 2 сезон 40 серия',
'series': 'Молодежка', 'series': 'Молодежка',
'season': '2 сезон',
'episode': '40 серия', 'episode': '40 серия',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'duration': 2809, 'duration': 2789,
'view_count': int, 'view_count': int,
'comment_count': int,
'age_limit': 16, 'age_limit': 16,
}, },
'params': { 'params': {
@ -79,6 +100,7 @@ class VideomoreIE(InfoExtractor):
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
'skip': 'The video is not available for viewing.',
}, { }, {
'url': 'http://videomore.ru/elki_3?track_id=364623', 'url': 'http://videomore.ru/elki_3?track_id=364623',
'only_matching': True, 'only_matching': True,
@ -100,7 +122,14 @@ class VideomoreIE(InfoExtractor):
}, { }, {
'url': 'https://player.videomore.ru/?partner_id=97&track_id=736234&autoplay=0&userToken=', 'url': 'https://player.videomore.ru/?partner_id=97&track_id=736234&autoplay=0&userToken=',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://odysseus.more.tv/player/1788/352317',
'only_matching': True,
}, {
'url': 'https://siren.more.tv/player/config?track_id=352317&partner_id=1788&user_token=',
'only_matching': True,
}] }]
_GEO_BYPASS = False
@staticmethod @staticmethod
def _extract_url(webpage): def _extract_url(webpage):
@ -118,46 +147,73 @@ class VideomoreIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('sid') or mobj.group('id') video_id = mobj.group('sid') or mobj.group('id')
partner_id = mobj.group('partner_id') or compat_parse_qs(compat_urllib_parse_urlparse(url).query).get('partner_id', [None])[0] or '97'
video = self._download_xml(
'http://videomore.ru/video/tracks/%s.xml' % video_id, item = self._download_json(
video_id, 'Downloading video XML') 'https://siren.more.tv/player/config', video_id, query={
'partner_id': partner_id,
item = xpath_element(video, './/playlist/item', fatal=True) 'track_id': video_id,
})['data']['playlist']['items'][0]
title = xpath_text(
item, ('./title', './episode_name'), 'title', fatal=True) title = item.get('title')
series = item.get('project_name')
video_url = xpath_text(item, './video_url', 'video url', fatal=True) season = item.get('season_name')
formats = self._extract_f4m_formats(video_url, video_id, f4m_id='hds') episode = item.get('episode_name')
if not title:
title = []
for v in (series, season, episode):
if v:
title.append(v)
title = ' '.join(title)
streams = item.get('streams') or []
for protocol in ('DASH', 'HLS'):
stream_url = item.get(protocol.lower() + '_url')
if stream_url:
streams.append({'protocol': protocol, 'url': stream_url})
formats = []
for stream in streams:
stream_url = stream.get('url')
if not stream_url:
continue
protocol = stream.get('protocol')
if protocol == 'DASH':
formats.extend(self._extract_mpd_formats(
stream_url, video_id, mpd_id='dash', fatal=False))
elif protocol == 'HLS':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif protocol == 'MSS':
formats.extend(self._extract_ism_formats(
stream_url, video_id, ism_id='mss', fatal=False))
if not formats:
error = item.get('error')
if error:
if error in ('Данное видео недоступно для просмотра на территории этой страны', 'Данное видео доступно для просмотра только на территории России'):
self.raise_geo_restricted(countries=['RU'])
raise ExtractorError(error, expected=True)
self._sort_formats(formats) self._sort_formats(formats)
thumbnail = xpath_text(item, './thumbnail_url')
duration = int_or_none(xpath_text(item, './duration'))
view_count = int_or_none(xpath_text(item, './views'))
comment_count = int_or_none(xpath_text(item, './count_comments'))
age_limit = int_or_none(xpath_text(item, './min_age'))
series = xpath_text(item, './project_name')
episode = xpath_text(item, './episode_name')
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'series': series, 'series': series,
'season': season,
'episode': episode, 'episode': episode,
'thumbnail': thumbnail, 'thumbnail': item.get('thumbnail_url'),
'duration': duration, 'duration': int_or_none(item.get('duration')),
'view_count': view_count, 'view_count': int_or_none(item.get('views')),
'comment_count': comment_count, 'age_limit': int_or_none(item.get('min_age')),
'age_limit': age_limit,
'formats': formats, 'formats': formats,
} }
class VideomoreVideoIE(InfoExtractor): class VideomoreVideoIE(VideomoreBaseIE):
IE_NAME = 'videomore:video' IE_NAME = 'videomore:video'
_VALID_URL = r'https?://videomore\.ru/(?:(?:[^/]+/){2})?(?P<id>[^/?#&]+)(?:/*|[?#&].*?)$' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?P<id>(?:(?:[^/]+/){2})?[^/?#&]+)(?:/*|[?#&].*?)$'
_TESTS = [{ _TESTS = [{
# single video with og:video:iframe # single video with og:video:iframe
'url': 'http://videomore.ru/elki_3', 'url': 'http://videomore.ru/elki_3',
@ -174,10 +230,25 @@ class VideomoreVideoIE(InfoExtractor):
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
'skip': 'Requires logging in',
}, { }, {
# season single series with og:video:iframe # season single series with og:video:iframe
'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya', 'url': 'http://videomore.ru/poslednii_ment/1_sezon/14_seriya',
'only_matching': True, 'info_dict': {
'id': '352317',
'ext': 'mp4',
'title': 'Последний мент 1 сезон 14 серия',
'series': 'Последний мент',
'season': '1 сезон',
'episode': '14 серия',
'thumbnail': r're:^https?://.*\.jpg',
'duration': 2464,
'age_limit': 16,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, { }, {
'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk', 'url': 'http://videomore.ru/sejchas_v_seti/serii_221-240/226_vypusk',
'only_matching': True, 'only_matching': True,
@ -197,9 +268,13 @@ class VideomoreVideoIE(InfoExtractor):
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
'skip': 'redirects to https://more.tv/'
}, { }, {
'url': 'https://videomore.ru/molodezhka/6_sezon/29_seriya?utm_so', 'url': 'https://videomore.ru/molodezhka/6_sezon/29_seriya?utm_so',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://more.tv/poslednii_ment/1_sezon/14_seriya',
'only_matching': True,
}] }]
@classmethod @classmethod
@ -208,38 +283,25 @@ class VideomoreVideoIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
display_id = self._match_id(url) display_id = self._match_id(url)
return self._track_url_result(self._download_page_data(display_id))
webpage = self._download_webpage(url, display_id)
video_url = self._og_search_property(
'video:iframe', webpage, 'video url', default=None)
if not video_url:
video_id = self._search_regex(
(r'config\s*:\s*["\']https?://videomore\.ru/video/tracks/(\d+)\.xml',
r'track-id=["\'](\d+)',
r'xcnt_product_id\s*=\s*(\d+)'), webpage, 'video id')
video_url = 'videomore:%s' % video_id
else:
video_id = None
return self.url_result(
video_url, ie=VideomoreIE.ie_key(), video_id=video_id)
class VideomoreSeasonIE(InfoExtractor): class VideomoreSeasonIE(VideomoreBaseIE):
IE_NAME = 'videomore:season' IE_NAME = 'videomore:season'
_VALID_URL = r'https?://videomore\.ru/(?!embed)(?P<id>[^/]+/[^/?#&]+)(?:/*|[?#&].*?)$' _VALID_URL = VideomoreBaseIE._VALID_URL_BASE + r'(?!embed)(?P<id>[^/]+/[^/?#&]+)(?:/*|[?#&].*?)$'
_TESTS = [{ _TESTS = [{
'url': 'http://videomore.ru/molodezhka/sezon_promo', 'url': 'http://videomore.ru/molodezhka/film_o_filme',
'info_dict': { 'info_dict': {
'id': 'molodezhka/sezon_promo', 'id': 'molodezhka/film_o_filme',
'title': 'Молодежка Промо', 'title': 'Фильм о фильме',
}, },
'playlist_mincount': 12, 'playlist_mincount': 3,
}, { }, {
'url': 'http://videomore.ru/molodezhka/sezon_promo?utm_so', 'url': 'http://videomore.ru/molodezhka/sezon_promo?utm_so',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://more.tv/molodezhka/film_o_filme',
'only_matching': True,
}] }]
@classmethod @classmethod
@ -249,59 +311,12 @@ class VideomoreSeasonIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
display_id = self._match_id(url) display_id = self._match_id(url)
season = self._download_page_data(display_id)
webpage = self._download_webpage(url, display_id) season_id = compat_str(season['id'])
tracks = self._download_json(
title = self._og_search_title(webpage) self._API_BASE_URL + 'seasons/%s/tracks' % season_id,
season_id)['data']
data = self._parse_json(
self._html_search_regex(
r'\bclass=["\']seasons-tracks["\'][^>]+\bdata-custom-data=(["\'])(?P<value>{.+?})\1',
webpage, 'data', default='{}', group='value'),
display_id, fatal=False)
entries = [] entries = []
for track in tracks:
if data: entries.append(self._track_url_result(track))
episodes = data.get('episodes') return self.playlist_result(entries, display_id, season.get('title'))
if isinstance(episodes, list):
for ep in episodes:
if not isinstance(ep, dict):
continue
ep_id = int_or_none(ep.get('id'))
ep_url = url_or_none(ep.get('url'))
if ep_id:
e = {
'url': 'videomore:%s' % ep_id,
'id': compat_str(ep_id),
}
elif ep_url:
e = {'url': ep_url}
else:
continue
e.update({
'_type': 'url',
'ie_key': VideomoreIE.ie_key(),
'title': str_or_none(ep.get('title')),
'thumbnail': url_or_none(ep.get('image')),
'duration': parse_duration(ep.get('duration')),
'episode_number': int_or_none(ep.get('number')),
'upload_date': unified_strdate(ep.get('date')),
})
entries.append(e)
if not entries:
entries = [
self.url_result(
'videomore:%s' % video_id, ie=VideomoreIE.ie_key(),
video_id=video_id)
for video_id in orderedSet(re.findall(
r':(?:id|key)=["\'](\d+)["\']', webpage))]
if not entries:
entries = [
self.url_result(item) for item in re.findall(
r'<a[^>]+href="((?:https?:)?//videomore\.ru/%s/[^/]+)"[^>]+class="widget-item-desc"'
% display_id, webpage)]
return self.playlist_result(entries, display_id, title)

@ -15,6 +15,8 @@ from ..utils import (
class YandexMusicBaseIE(InfoExtractor): class YandexMusicBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by|com)'
@staticmethod @staticmethod
def _handle_error(response): def _handle_error(response):
if isinstance(response, dict): if isinstance(response, dict):
@ -62,7 +64,7 @@ class YandexMusicBaseIE(InfoExtractor):
class YandexMusicTrackIE(YandexMusicBaseIE): class YandexMusicTrackIE(YandexMusicBaseIE):
IE_NAME = 'yandexmusic:track' IE_NAME = 'yandexmusic:track'
IE_DESC = 'Яндекс.Музыка - Трек' IE_DESC = 'Яндекс.Музыка - Трек'
_VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/album/(?P<album_id>\d+)/track/(?P<id>\d+)' _VALID_URL = r'%s/album/(?P<album_id>\d+)/track/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'http://music.yandex.ru/album/540508/track/4878838', 'url': 'http://music.yandex.ru/album/540508/track/4878838',
@ -100,6 +102,9 @@ class YandexMusicTrackIE(YandexMusicBaseIE):
'track_number': 9, 'track_number': 9,
}, },
# 'skip': 'Travis CI servers blocked by YandexMusic', # 'skip': 'Travis CI servers blocked by YandexMusic',
}, {
'url': 'http://music.yandex.com/album/540508/track/4878838',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -242,7 +247,7 @@ class YandexMusicPlaylistBaseIE(YandexMusicBaseIE):
class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE): class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:album' IE_NAME = 'yandexmusic:album'
IE_DESC = 'Яндекс.Музыка - Альбом' IE_DESC = 'Яндекс.Музыка - Альбом'
_VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/album/(?P<id>\d+)/?(\?|$)' _VALID_URL = r'%s/album/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'http://music.yandex.ru/album/540508', 'url': 'http://music.yandex.ru/album/540508',
@ -270,6 +275,10 @@ class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
'playlist_count': 187, 'playlist_count': 187,
}] }]
@classmethod
def suitable(cls, url):
return False if YandexMusicTrackIE.suitable(url) else super(YandexMusicAlbumIE, cls).suitable(url)
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = re.match(self._VALID_URL, url)
tld = mobj.group('tld') tld = mobj.group('tld')
@ -295,7 +304,7 @@ class YandexMusicAlbumIE(YandexMusicPlaylistBaseIE):
class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE): class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
IE_NAME = 'yandexmusic:playlist' IE_NAME = 'yandexmusic:playlist'
IE_DESC = 'Яндекс.Музыка - Плейлист' IE_DESC = 'Яндекс.Музыка - Плейлист'
_VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/users/(?P<user>[^/]+)/playlists/(?P<id>\d+)' _VALID_URL = r'%s/users/(?P<user>[^/]+)/playlists/(?P<id>\d+)' % YandexMusicBaseIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'http://music.yandex.ru/users/music.partners/playlists/1245', 'url': 'http://music.yandex.ru/users/music.partners/playlists/1245',
@ -374,7 +383,7 @@ class YandexMusicArtistBaseIE(YandexMusicPlaylistBaseIE):
class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE): class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE):
IE_NAME = 'yandexmusic:artist:tracks' IE_NAME = 'yandexmusic:artist:tracks'
IE_DESC = 'Яндекс.Музыка - Артист - Треки' IE_DESC = 'Яндекс.Музыка - Артист - Треки'
_VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/artist/(?P<id>\d+)/tracks' _VALID_URL = r'%s/artist/(?P<id>\d+)/tracks' % YandexMusicBaseIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'https://music.yandex.ru/artist/617526/tracks', 'url': 'https://music.yandex.ru/artist/617526/tracks',
@ -404,7 +413,7 @@ class YandexMusicArtistTracksIE(YandexMusicArtistBaseIE):
class YandexMusicArtistAlbumsIE(YandexMusicArtistBaseIE): class YandexMusicArtistAlbumsIE(YandexMusicArtistBaseIE):
IE_NAME = 'yandexmusic:artist:albums' IE_NAME = 'yandexmusic:artist:albums'
IE_DESC = 'Яндекс.Музыка - Артист - Альбомы' IE_DESC = 'Яндекс.Музыка - Артист - Альбомы'
_VALID_URL = r'https?://music\.yandex\.(?P<tld>ru|kz|ua|by)/artist/(?P<id>\d+)/albums' _VALID_URL = r'%s/artist/(?P<id>\d+)/albums' % YandexMusicBaseIE._VALID_URL_BASE
_TESTS = [{ _TESTS = [{
'url': 'https://music.yandex.ru/artist/617526/albums', 'url': 'https://music.yandex.ru/artist/617526/albums',

Loading…
Cancel
Save