mirror of https://github.com/yt-dlp/yt-dlp
Merge branch 'yt-dlp:master' into pr/live-sections
commit
6a84199473
@ -0,0 +1,14 @@
|
|||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: linter
|
||||||
|
name: Apply linter fixes
|
||||||
|
entry: ruff check --fix .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
||||||
|
- id: format
|
||||||
|
name: Apply formatting fixes
|
||||||
|
entry: autopep8 --in-place .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
@ -0,0 +1,9 @@
|
|||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: fix
|
||||||
|
name: Apply code fixes
|
||||||
|
entry: hatch fmt
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
@ -1,4 +0,0 @@
|
|||||||
@echo off
|
|
||||||
|
|
||||||
>&2 echo run_tests.bat is deprecated. Please use `devscripts/run_tests.py` instead
|
|
||||||
python %~dp0run_tests.py %~1
|
|
@ -1,4 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
>&2 echo 'run_tests.sh is deprecated. Please use `devscripts/run_tests.py` instead'
|
|
||||||
python3 devscripts/run_tests.py "$1"
|
|
@ -1,17 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow execution from anywhere
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
from bundle.pyinstaller import main
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning('`pyinst.py` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `bundle.pyinstaller` instead'))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
@ -1,36 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow execution from anywhere
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
|
|
||||||
if sys.argv[1:2] == ['py2exe']:
|
|
||||||
warnings.warn(DeprecationWarning('`setup.py py2exe` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `bundle.py2exe` instead'))
|
|
||||||
|
|
||||||
import bundle.py2exe
|
|
||||||
|
|
||||||
bundle.py2exe.main()
|
|
||||||
|
|
||||||
elif 'build_lazy_extractors' in sys.argv:
|
|
||||||
warnings.warn(DeprecationWarning('`setup.py build_lazy_extractors` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `devscripts.make_lazy_extractors` instead'))
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
os.chdir(sys.path[0])
|
|
||||||
print('running build_lazy_extractors')
|
|
||||||
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
print(
|
|
||||||
'ERROR: Building by calling `setup.py` is deprecated. '
|
|
||||||
'Use a build frontend like `build` instead. ',
|
|
||||||
'Refer to https://build.pypa.io for more info', file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,74 @@
|
|||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
determine_ext,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
traverse_obj,
|
||||||
|
urljoin,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CaffeineTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?caffeine\.tv/[^/?#]+/video/(?P<id>[\da-f-]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.caffeine.tv/TsuSurf/video/cffc0a00-e73f-11ec-8080-80017d29f26e',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'cffc0a00-e73f-11ec-8080-80017d29f26e',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'GOOOOD MORNINNNNN #highlights',
|
||||||
|
'timestamp': 1654702180,
|
||||||
|
'upload_date': '20220608',
|
||||||
|
'uploader': 'RahJON Wicc',
|
||||||
|
'uploader_id': 'TsuSurf',
|
||||||
|
'duration': 3145,
|
||||||
|
'age_limit': 17,
|
||||||
|
'thumbnail': 'https://www.caffeine.tv/broadcasts/776b6f84-9cd5-42e3-af1d-4a776eeed697/replay/lobby.jpg',
|
||||||
|
'comment_count': int,
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'tags': ['highlights', 'battlerap'],
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'm3u8',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
json_data = self._download_json(
|
||||||
|
f'https://api.caffeine.tv/social/public/activity/{video_id}', video_id)
|
||||||
|
broadcast_info = traverse_obj(json_data, ('broadcast_info', {dict})) or {}
|
||||||
|
|
||||||
|
video_url = broadcast_info['video_url']
|
||||||
|
ext = determine_ext(video_url)
|
||||||
|
if ext == 'm3u8':
|
||||||
|
formats = self._extract_m3u8_formats(video_url, video_id, 'mp4')
|
||||||
|
else:
|
||||||
|
formats = [{'url': video_url}]
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
**traverse_obj(json_data, {
|
||||||
|
'like_count': ('like_count', {int_or_none}),
|
||||||
|
'view_count': ('view_count', {int_or_none}),
|
||||||
|
'comment_count': ('comment_count', {int_or_none}),
|
||||||
|
'tags': ('tags', ..., {str}, {lambda x: x or None}),
|
||||||
|
'uploader': ('user', 'name', {str}),
|
||||||
|
'uploader_id': (((None, 'user'), 'username'), {str}, any),
|
||||||
|
'is_live': ('is_live', {bool}),
|
||||||
|
}),
|
||||||
|
**traverse_obj(broadcast_info, {
|
||||||
|
'title': ('broadcast_title', {str}),
|
||||||
|
'duration': ('content_duration', {int_or_none}),
|
||||||
|
'timestamp': ('broadcast_start_time', {parse_iso8601}),
|
||||||
|
'thumbnail': ('preview_image_path', {lambda x: urljoin(url, x)}),
|
||||||
|
}),
|
||||||
|
'age_limit': {
|
||||||
|
# assume Apple Store ratings: https://en.wikipedia.org/wiki/Mobile_software_content_rating_system
|
||||||
|
'FOUR_PLUS': 0,
|
||||||
|
'NINE_PLUS': 9,
|
||||||
|
'TWELVE_PLUS': 12,
|
||||||
|
'SEVENTEEN_PLUS': 17,
|
||||||
|
}.get(broadcast_info.get('content_rating'), 17),
|
||||||
|
}
|
@ -0,0 +1,197 @@
|
|||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
|
from ..utils import ExtractorError, int_or_none, join_nonempty, url_or_none
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class DangalPlayBaseIE(InfoExtractor):
|
||||||
|
_NETRC_MACHINE = 'dangalplay'
|
||||||
|
_OTV_USER_ID = None
|
||||||
|
_LOGIN_HINT = 'Pass credentials as -u "token" -p "USER_ID" where USER_ID is the `otv_user_id` in browser local storage'
|
||||||
|
_API_BASE = 'https://ottapi.dangalplay.com'
|
||||||
|
_AUTH_TOKEN = 'jqeGWxRKK7FK5zEk3xCM' # from https://www.dangalplay.com/main.48ad19e24eb46acccef3.js
|
||||||
|
_SECRET_KEY = 'f53d31a4377e4ef31fa0' # same as above
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
if self._OTV_USER_ID:
|
||||||
|
return
|
||||||
|
if username != 'token' or not re.fullmatch(r'[\da-f]{32}', password):
|
||||||
|
raise ExtractorError(self._LOGIN_HINT, expected=True)
|
||||||
|
self._OTV_USER_ID = password
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
if not self._OTV_USER_ID:
|
||||||
|
self.raise_login_required(f'Login required. {self._LOGIN_HINT}', method=None)
|
||||||
|
|
||||||
|
def _extract_episode_info(self, metadata, episode_slug, series_slug):
|
||||||
|
return {
|
||||||
|
'display_id': episode_slug,
|
||||||
|
'episode_number': int_or_none(self._search_regex(
|
||||||
|
r'ep-(?:number-)?(\d+)', episode_slug, 'episode number', default=None)),
|
||||||
|
'season_number': int_or_none(self._search_regex(
|
||||||
|
r'season-(\d+)', series_slug, 'season number', default='1')),
|
||||||
|
'series': series_slug,
|
||||||
|
**traverse_obj(metadata, {
|
||||||
|
'id': ('content_id', {str}),
|
||||||
|
'title': ('display_title', {str}),
|
||||||
|
'episode': ('title', {str}),
|
||||||
|
'series': ('show_name', {str}, {lambda x: x or None}),
|
||||||
|
'series_id': ('catalog_id', {str}),
|
||||||
|
'duration': ('duration', {int_or_none}),
|
||||||
|
'release_timestamp': ('release_date_uts', {int_or_none}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _call_api(self, path, display_id, note='Downloading JSON metadata', fatal=True, query={}):
|
||||||
|
return self._download_json(
|
||||||
|
f'{self._API_BASE}/{path}', display_id, note, fatal=fatal,
|
||||||
|
headers={'Accept': 'application/json'}, query={
|
||||||
|
'auth_token': self._AUTH_TOKEN,
|
||||||
|
'region': 'IN',
|
||||||
|
**query,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class DangalPlayIE(DangalPlayBaseIE):
|
||||||
|
IE_NAME = 'dangalplay'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?dangalplay.com/shows/(?P<series>[^/?#]+)/(?P<id>(?!episodes)[^/?#]+)/?(?:$|[?#])'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-2/kitani-mohabbat-hai-season-2-ep-number-01',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '647c61dc1e7171310dcd49b4',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'release_timestamp': 1262304000,
|
||||||
|
'episode_number': 1,
|
||||||
|
'episode': 'EP 1 | KITANI MOHABBAT HAI SEASON 2',
|
||||||
|
'series': 'kitani-mohabbat-hai-season-2',
|
||||||
|
'season_number': 2,
|
||||||
|
'title': 'EP 1 | KITANI MOHABBAT HAI SEASON 2',
|
||||||
|
'release_date': '20100101',
|
||||||
|
'duration': 2325,
|
||||||
|
'season': 'Season 2',
|
||||||
|
'display_id': 'kitani-mohabbat-hai-season-2-ep-number-01',
|
||||||
|
'series_id': '645c9ea41e717158ca574966',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.dangalplay.com/shows/milke-bhi-hum-na-mile/milke-bhi-hum-na-mile-ep-number-01',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '65d31d9ba73b9c3abd14a7f3',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'episode': 'EP 1 | MILKE BHI HUM NA MILE',
|
||||||
|
'release_timestamp': 1708367411,
|
||||||
|
'episode_number': 1,
|
||||||
|
'season': 'Season 1',
|
||||||
|
'title': 'EP 1 | MILKE BHI HUM NA MILE',
|
||||||
|
'duration': 156048,
|
||||||
|
'release_date': '20240219',
|
||||||
|
'season_number': 1,
|
||||||
|
'series': 'MILKE BHI HUM NA MILE',
|
||||||
|
'series_id': '645c9ea41e717158ca574966',
|
||||||
|
'display_id': 'milke-bhi-hum-na-mile-ep-number-01',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _generate_api_data(self, data):
|
||||||
|
catalog_id = data['catalog_id']
|
||||||
|
content_id = data['content_id']
|
||||||
|
timestamp = str(int(time.time()))
|
||||||
|
unhashed = ''.join((catalog_id, content_id, self._OTV_USER_ID, timestamp, self._SECRET_KEY))
|
||||||
|
|
||||||
|
return json.dumps({
|
||||||
|
'catalog_id': catalog_id,
|
||||||
|
'content_id': content_id,
|
||||||
|
'category': '',
|
||||||
|
'region': 'IN',
|
||||||
|
'auth_token': self._AUTH_TOKEN,
|
||||||
|
'id': self._OTV_USER_ID,
|
||||||
|
'md5': hashlib.md5(unhashed.encode()).hexdigest(),
|
||||||
|
'ts': timestamp,
|
||||||
|
}, separators=(',', ':')).encode()
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
series_slug, episode_slug = self._match_valid_url(url).group('series', 'id')
|
||||||
|
metadata = self._call_api(
|
||||||
|
f'catalogs/shows/{series_slug}/episodes/{episode_slug}.gzip',
|
||||||
|
episode_slug, query={'item_language': ''})['data']
|
||||||
|
|
||||||
|
try:
|
||||||
|
details = self._download_json(
|
||||||
|
f'{self._API_BASE}/v2/users/get_all_details.gzip', episode_slug,
|
||||||
|
'Downloading playback details JSON', headers={
|
||||||
|
'Accept': 'application/json',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
}, data=self._generate_api_data(metadata))['data']
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 422:
|
||||||
|
error_info = traverse_obj(e.cause.response.read().decode(), ({json.loads}, 'error', {dict})) or {}
|
||||||
|
if error_info.get('code') == '1016':
|
||||||
|
self.raise_login_required(
|
||||||
|
f'Your token has expired or is invalid. {self._LOGIN_HINT}', method=None)
|
||||||
|
elif msg := error_info.get('message'):
|
||||||
|
raise ExtractorError(msg)
|
||||||
|
raise
|
||||||
|
|
||||||
|
m3u8_url = traverse_obj(details, (
|
||||||
|
('adaptive_url', ('adaptive_urls', 'hd', 'hls', ..., 'playback_url')), {url_or_none}, any))
|
||||||
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, episode_slug, 'mp4')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
**self._extract_episode_info(metadata, episode_slug, series_slug),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class DangalPlaySeasonIE(DangalPlayBaseIE):
|
||||||
|
IE_NAME = 'dangalplay:season'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?dangalplay.com/shows/(?P<id>[^/?#]+)(?:/(?P<sub>ep-[^/?#]+)/episodes)?/?(?:$|[?#])'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-1',
|
||||||
|
'playlist_mincount': 170,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'kitani-mohabbat-hai-season-1',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.dangalplay.com/shows/kitani-mohabbat-hai-season-1/ep-01-30-1/episodes',
|
||||||
|
'playlist_count': 30,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'kitani-mohabbat-hai-season-1-ep-01-30-1',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# 1 season only, series page is season page
|
||||||
|
'url': 'https://www.dangalplay.com/shows/milke-bhi-hum-na-mile',
|
||||||
|
'playlist_mincount': 15,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'milke-bhi-hum-na-mile',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _entries(self, subcategories, series_slug):
|
||||||
|
for subcategory in subcategories:
|
||||||
|
data = self._call_api(
|
||||||
|
f'catalogs/shows/items/{series_slug}/subcategories/{subcategory}/episodes.gzip',
|
||||||
|
series_slug, f'Downloading episodes JSON for {subcategory}', fatal=False, query={
|
||||||
|
'order_by': 'asc',
|
||||||
|
'status': 'published',
|
||||||
|
})
|
||||||
|
for ep in traverse_obj(data, ('data', 'items', lambda _, v: v['friendly_id'])):
|
||||||
|
episode_slug = ep['friendly_id']
|
||||||
|
yield self.url_result(
|
||||||
|
f'https://www.dangalplay.com/shows/{series_slug}/{episode_slug}',
|
||||||
|
DangalPlayIE, **self._extract_episode_info(ep, episode_slug, series_slug))
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
series_slug, subcategory = self._match_valid_url(url).group('id', 'sub')
|
||||||
|
subcategories = [subcategory] if subcategory else traverse_obj(
|
||||||
|
self._call_api(
|
||||||
|
f'catalogs/shows/items/{series_slug}.gzip', series_slug,
|
||||||
|
'Downloading season info JSON', query={'item_language': ''}),
|
||||||
|
('data', 'subcategories', ..., 'friendly_id', {str}))
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
self._entries(subcategories, series_slug), join_nonempty(series_slug, subcategory))
|
@ -0,0 +1,107 @@
|
|||||||
|
import functools
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
extract_attributes,
|
||||||
|
get_elements_html_by_class,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class GBNewsIE(InfoExtractor):
|
||||||
|
IE_DESC = 'GB News clips, features and live streams'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?gbnews\.(?:uk|com)/(?:\w+/)?(?P<id>[^#?]+)'
|
||||||
|
|
||||||
|
_PLATFORM = 'safari'
|
||||||
|
_SSMP_URL = 'https://mm-v2.simplestream.com/ssmp/api.php'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.gbnews.com/news/bbc-claudine-gay-harvard-university-antisemitism-row',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '52264136',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)',
|
||||||
|
'display_id': 'bbc-claudine-gay-harvard-university-antisemitism-row',
|
||||||
|
'description': 'The post was criticised by former employers of the broadcaster',
|
||||||
|
'title': 'BBC deletes post after furious backlash over headline downplaying antisemitism',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.gbnews.com/royal/prince-harry-in-love-with-kate-meghan-markle-jealous-royal',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '52328390',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)',
|
||||||
|
'display_id': 'prince-harry-in-love-with-kate-meghan-markle-jealous-royal',
|
||||||
|
'description': 'Ingrid Seward has published 17 books documenting the highs and lows of the Royal Family',
|
||||||
|
'title': 'Royal author claims Prince Harry was \'in love\' with Kate - Meghan was \'jealous\'',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.gbnews.uk/watchlive',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1069',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'thumbnail': r're:https?://www\.gbnews\.\w+/.+\.(?:jpe?g|png|webp)',
|
||||||
|
'display_id': 'watchlive',
|
||||||
|
'live_status': 'is_live',
|
||||||
|
'title': r're:^GB News Live',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
}]
|
||||||
|
|
||||||
|
@functools.lru_cache
|
||||||
|
def _get_ss_endpoint(self, data_id, data_env):
|
||||||
|
if not data_id:
|
||||||
|
data_id = 'GB003'
|
||||||
|
if not data_env:
|
||||||
|
data_env = 'production'
|
||||||
|
|
||||||
|
json_data = self._download_json(
|
||||||
|
self._SSMP_URL, None, 'Downloading Simplestream JSON metadata', query={
|
||||||
|
'id': data_id,
|
||||||
|
'env': data_env,
|
||||||
|
})
|
||||||
|
meta_url = traverse_obj(json_data, ('response', 'api_hostname', {url_or_none}))
|
||||||
|
if not meta_url:
|
||||||
|
raise ExtractorError('No API host found')
|
||||||
|
|
||||||
|
return meta_url
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url).rpartition('/')[2]
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
video_data = None
|
||||||
|
elements = get_elements_html_by_class('simplestream', webpage)
|
||||||
|
for html_tag in elements:
|
||||||
|
attributes = extract_attributes(html_tag)
|
||||||
|
if 'sidebar' not in (attributes.get('class') or ''):
|
||||||
|
video_data = attributes
|
||||||
|
if not video_data:
|
||||||
|
raise ExtractorError('Could not find video element', expected=True)
|
||||||
|
|
||||||
|
endpoint_url = self._get_ss_endpoint(video_data.get('data-id'), video_data.get('data-env'))
|
||||||
|
|
||||||
|
uvid = video_data['data-uvid']
|
||||||
|
video_type = video_data.get('data-type')
|
||||||
|
if not video_type or video_type == 'vod':
|
||||||
|
video_type = 'show'
|
||||||
|
stream_data = self._download_json(
|
||||||
|
f'{endpoint_url}/api/{video_type}/stream/{uvid}',
|
||||||
|
uvid, 'Downloading stream JSON', query={
|
||||||
|
'key': video_data.get('data-key'),
|
||||||
|
'platform': self._PLATFORM,
|
||||||
|
})
|
||||||
|
if traverse_obj(stream_data, 'drm'):
|
||||||
|
self.report_drm(uvid)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': uvid,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': self._og_search_title(webpage, default=None),
|
||||||
|
'description': self._og_search_description(webpage, default=None),
|
||||||
|
'formats': self._extract_m3u8_formats(traverse_obj(stream_data, (
|
||||||
|
'response', 'stream', {url_or_none})), uvid, 'mp4'),
|
||||||
|
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||||
|
'is_live': video_type == 'live',
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue