Update europa.py

pull/12742/head
edmundman 6 months ago committed by GitHub
parent 6e3ddbbe4d
commit db1f9be975
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -1,32 +1,32 @@
# -*- coding: utf-8 -*-
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, # Import ExtractorError for raising specific errors
int_or_none, int_or_none,
orderedSet, orderedSet,
parse_duration, parse_duration,
parse_iso8601, parse_iso8601,
parse_qs, parse_qs,
qualities, qualities,
traverse_obj, traverse_obj, # Useful for safely navigating nested dictionaries
unified_strdate, unified_strdate,
xpath_text, xpath_text,
) )
import re # Import re for findall
# --- EuropaIE (Older extractor - unchanged) ---
# This extractor handles older ec.europa.eu/avservices URLs and is likely defunct.
class EuropaIE(InfoExtractor): class EuropaIE(InfoExtractor):
_WORKING = False _WORKING = False # Marked as not working
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)' _VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758', 'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
'md5': '574f080699ddd1e19a675b0ddf010371', 'md5': '574f080699ddd1e19a675b0ddf010371',
'info_dict': { 'info_dict': {
'id': 'I107758', 'id': 'I107758', 'ext': 'mp4', 'title': 'TRADE - Wikileaks on TTIP',
'ext': 'mp4',
'title': 'TRADE - Wikileaks on TTIP',
'description': 'NEW LIVE EC Midday press briefing of 11/08/2015', 'description': 'NEW LIVE EC Midday press briefing of 11/08/2015',
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:^https?://.*\.jpg$', 'upload_date': '20150811',
'upload_date': '20150811', 'duration': 34, 'view_count': int, 'formats': 'mincount:3',
'duration': 34,
'view_count': int,
'formats': 'mincount:3',
}, },
}, { }, {
'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786', 'url': 'http://ec.europa.eu/avservices/video/player.cfm?sitelang=en&ref=I107786',
@ -37,189 +37,150 @@ class EuropaIE(InfoExtractor):
}] }]
def _real_extract(self, url): def _real_extract(self, url):
# (Implementation remains the same as previous versions)
video_id = self._match_id(url) video_id = self._match_id(url)
playlist = self._download_xml( playlist = self._download_xml(
f'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID={video_id}', video_id) f'http://ec.europa.eu/avservices/video/player/playlist.cfm?ID={video_id}', video_id)
def get_item(type_, preference): def get_item(type_, preference):
items = {} items = {}
for item in playlist.findall(f'./info/{type_}/item'): for item in playlist.findall(f'./info/{type_}/item'):
lang, label = ( lang, label = (xpath_text(item, 'lg', default=None), xpath_text(item, 'label', default=None))
xpath_text(item, 'lg', default=None), if lang and label: items[lang] = label.strip()
xpath_text(item, 'label', default=None)
)
if lang and label:
items[lang] = label.strip()
for p in preference: for p in preference:
if items.get(p): if items.get(p): return items[p]
return items[p]
query = parse_qs(url) query = parse_qs(url)
preferred_lang = query.get('sitelang', ('en', ))[0] preferred_lang = query.get('sitelang', ('en', ))[0]
preferred_langs = orderedSet((preferred_lang, 'en', 'int')) preferred_langs = orderedSet((preferred_lang, 'en', 'int'))
title = get_item('title', preferred_langs) or video_id title = get_item('title', preferred_langs) or video_id
description = get_item('description', preferred_langs) description = get_item('description', preferred_langs)
thumbnail = xpath_text(playlist, './info/thumburl', 'thumbnail') thumbnail = xpath_text(playlist, './info/thumburl', 'thumbnail')
upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date')) upload_date = unified_strdate(xpath_text(playlist, './info/date', 'upload date'))
duration = parse_duration(xpath_text(playlist, './info/duration', 'duration')) duration = parse_duration(xpath_text(playlist, './info/duration', 'duration'))
view_count = int_or_none(xpath_text(playlist, './info/views', 'views')) view_count = int_or_none(xpath_text(playlist, './info/views', 'views'))
language_preference = qualities(preferred_langs[::-1]) language_preference = qualities(preferred_langs[::-1])
formats = [] formats = []
for file_ in playlist.findall('./files/file'): for file_ in playlist.findall('./files/file'):
video_url = xpath_text(file_, './url') video_url = xpath_text(file_, './url')
if not video_url: if not video_url: continue
continue
lang = xpath_text(file_, './lg') lang = xpath_text(file_, './lg')
formats.append({ formats.append({'url': video_url, 'format_id': lang, 'format_note': xpath_text(file_, './lglabel'), 'language_preference': language_preference(lang)})
'url': video_url, return {'id': video_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'upload_date': upload_date, 'duration': duration, 'view_count': view_count, 'formats': formats}
'format_id': lang,
'format_note': xpath_text(file_, './lglabel'),
'language_preference': language_preference(lang),
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
# --- EuroParlWebstreamIE (Modified extractor to handle potential site changes) ---
class EuroParlWebstreamIE(InfoExtractor): class EuroParlWebstreamIE(InfoExtractor):
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?://multimedia\.europarl\.europa\.eu/ https?://multimedia\.europarl\.europa\.eu/
(?:\w+/)?webstreaming/(?:[\w-]+_)?(?P<id>[\w-]+) (?:\w+/)?webstreaming/(?:[\w-]+_)?(?P<id>[\w-]+) # Matches /en/webstreaming/event_id format
''' '''
_TESTS = [{ _TESTS = [{
# Existing VOD test
'url': 'https://multimedia.europarl.europa.eu/pl/webstreaming/plenary-session_20220914-0900-PLENARY', 'url': 'https://multimedia.europarl.europa.eu/pl/webstreaming/plenary-session_20220914-0900-PLENARY',
'info_dict': { 'info_dict': {
'id': '62388b15-d85b-4add-99aa-ba12ccf64f0d', 'id': '62388b15-d85b-4add-99aa-ba12ccf64f0d', 'display_id': '20220914-0900-PLENARY',
'display_id': '20220914-0900-PLENARY', 'ext': 'mp4', 'title': 'Plenary session', 'release_timestamp': 1663139069, 'release_date': '20220914',
'ext': 'mp4',
'title': 'Plenary session',
'release_timestamp': 1663139069,
'release_date': '20220914',
},
'params': {
'skip_download': True,
}, },
'params': {'skip_download': True},
}, { }, {
# example of old live webstream # Test case that previously failed with regex method
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/euroscola_20221115-1000-SPECIAL-EUROSCOLA', 'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/euroscola_20250328-1000-SPECIAL-EUROSCOLA',
'info_dict': { 'info_dict': {
'id': str, # ID might be a string UUID or similar
'display_id': '20250328-1000-SPECIAL-EUROSCOLA',
'ext': 'mp4', 'ext': 'mp4',
'id': '510eda7f-ba72-161b-7ee7-0e836cd2e715', 'title': r're:Euroscola', # Expect title containing Euroscola
'release_timestamp': 1668502800, 'release_timestamp': int, # Expecting a Unix timestamp
'title': 'Euroscola 2022-11-15 19:21', 'release_date': '20250328',
'release_date': '20221115', 'is_live': bool, # Could be True (if near event time) or False
'live_status': 'is_live',
}, },
'skip': 'not live anymore', 'params': {'skip_download': True},
# Note: This test might fail after 2025-03-28 if the URL becomes invalid or content changes significantly
}] }]
def _real_extract(self, url): def _real_extract(self, url):
display_id = self._match_id(url) display_id = self._match_id(url) # Get ID from URL
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id) # Get page HTML
# Try to parse Next.js data for metadata # --- Extract Metadata (prioritize Next.js data) ---
nextjs = self._search_nextjs_data(webpage, display_id, default={}) nextjs_data = self._search_nextjs_data(webpage, display_id, default={})
page_props = traverse_obj(nextjs, ('props', 'pageProps'), default={}) # Use traverse_obj for safer nested dictionary access
media_info = page_props.get('mediaItem') or {} # Look for start/end times here for archives? media_info = traverse_obj(nextjs_data, ('props', 'pageProps', 'mediaItem')) or {}
# Extract basic info, falling back to display_id if metadata is sparse
internal_id = media_info.get('id') or display_id
title = media_info.get('title') or media_info.get('name') or display_id title = media_info.get('title') or media_info.get('name') or display_id
release_timestamp = None release_timestamp = traverse_obj(media_info, ('startDateTime', {parse_iso8601}))
# Existing logic uses startDateTime, might need adjustment for archive start/end # Determine live status based on metadata hint, if available
if 'startDateTime' in media_info:
release_timestamp = parse_iso8601(media_info['startDateTime'])
# Determine if it's Live or VOD/Archive (might need refinement)
# mediaSubType might be 'Live' or 'VOD' or something else
is_live = media_info.get('mediaSubType') == 'Live' is_live = media_info.get('mediaSubType') == 'Live'
# Search for any .m3u8 link first hls_url = None # Variable to store the found HLS URL
m3u8_links = self._search_regex(
r'(https?://[^"]+live\.media\.eup\.glcloud\.eu/hls/live/\d+/[^"]+\.m3u8[^"]*)', # --- Attempt 1: Find direct HLS URL in media_info ---
webpage, 'm3u8 URL', default=None, group=1, fatal=False # Check common dictionary keys where the full HLS URL might be stored.
) # Add more potential keys here if observed in website data.
possible_keys = ('hlsUrl', 'streamUrl', 'manifestUrl', 'url', 'playerUrl', 'videoUrl')
# --- Potential modification area START --- hls_url = traverse_obj(media_info, possible_keys)
# If it's NOT live, and we have start/end times, and m3u8_links points to a live URL, if hls_url and 'm3u8' in hls_url: # Basic check if it looks like an HLS URL
# try constructing the index-archive.m3u8 URL here. self.to_screen(f'Found direct HLS URL in metadata: {hls_url}')
# Example (conceptual - requires actual start/end times and base URL logic): else:
# if not is_live and media_info.get('startTime') and media_info.get('endTime'): hls_url = None # Reset if found value wasn't an HLS URL
# start_time = media_info['startTime'] # Assuming these keys exist and hold timestamps
# end_time = media_info['endTime'] # --- Attempt 2: Construct HLS URL from IDs in media_info ---
# # Assuming m3u8_links contains a base URL that needs modification if not hls_url:
# base_url = m3u8_links.split('/')[0:-1] # Highly simplified base URL extraction self.to_screen('Attempting to construct HLS URL from metadata IDs...')
# archive_url = '/'.join(base_url) + f'/index-archive.m3u8?startTime={start_time}&endTime={end_time}' # Try to extract relevant IDs. Keys like 'eventId', 'channelId' are common,
# m3u8_links = archive_url # Replace the found link with the constructed one # but might differ. Use traverse_obj to safely get values.
# --- Potential modification area END --- # 'id' from media_info is often the event ID.
event_id = traverse_obj(media_info, ('id', 'eventId', 'event_id'))
# Channel ID might be numeric or a string name.
if not m3u8_links: channel_id = traverse_obj(media_info, ('channelId', 'channel_id', 'channelName', 'channel'))
self.report_warning('Could not find any .m3u8 link in the page. The site structure may have changed.')
# Return basic info if no HLS manifest found if event_id and channel_id:
return { # Construct the URL using the assumed live/default pattern.
'id': media_info.get('id') or display_id, # For archive/VOD, '/index-archive.m3u8?startTime=...&endTime=...' might be needed.
'display_id': display_id, # This assumes the event is live or uses the default endpoint.
'title': title, constructed_url = f'https://live.media.eup.glcloud.eu/hls/live/{event_id}/{channel_id}/index.m3u8'
'release_timestamp': release_timestamp, hls_url = constructed_url
'formats': [], self.to_screen(f'Constructed potential HLS URL: {hls_url}')
} else:
self.to_screen('Could not find sufficient event/channel IDs in metadata to construct URL.')
# Process all found .m3u8 links (handles case where multiple are found or the first one is a master playlist)
# The regex used here is identical to the one above, ensures we capture all instances # --- Attempt 3: Fallback to regex search on raw webpage (Original Method) ---
import re if not hls_url:
all_links_text = self._html_search_regex( self.to_screen('Could not find or construct HLS URL from metadata, trying webpage regex search...')
r'(https?://[^"]+live\.media\.eup\.glcloud\.eu/hls/live/\d+/[^"]+\.m3u8[^"]*)', m3u8_url_pattern = r'(https?://[^"]*live\.media\.eup\.glcloud\.eu/hls/live/\d+/[^"]+\.m3u8[^"]*)'
webpage, 'all m3u8 URLs', default='', fatal=False, group=0 # Find all occurrences hls_url = self._search_regex(
) m3u8_url_pattern, webpage, 'm3u8 URL (regex fallback)', default=None, fatal=False)
candidates = re.findall(r'(https?://[^"]+live\.media\.eup\.glcloud\.eu/hls/live/\d+/[^"]+\.m3u8[^"]*)', all_links_text) if hls_url:
self.to_screen(f'Found HLS URL via regex fallback: {hls_url}')
# If the specific constructed URL was made above, ensure it's prioritized or the only candidate else:
# (Refined logic needed here based on the modification above) # This is where the original "Could not find any .m3u8 link" warning occurred.
if not candidates and m3u8_links: # Fallback if findall failed but initial search worked self.report_warning('Could not find any .m3u8 link via metadata or webpage regex.')
candidates = [m3u8_links]
elif m3u8_links not in candidates and m3u8_links: # Ensure the primary (possibly constructed) link is included # --- Process HLS Playlist ---
candidates.insert(0, m3u8_links) if not hls_url:
# If no URL was found after all attempts, raise an error.
candidates = list(dict.fromkeys(candidates)) # Make unique, preserving order raise ExtractorError(
'No HLS URL (.m3u8) could be found or constructed. The website structure might have changed.',
if not candidates: # Final check if still no candidates expected=True) # expected=True prevents stack trace for common errors
self.report_warning('Could not extract any valid .m3u8 URLs.')
return { # Pass the found HLS URL to the HLS processing function.
'id': media_info.get('id') or display_id, # The _extract_m3u8_formats function usually detects live/VOD automatically.
'display_id': display_id, # The 'live=is_live' hint can sometimes help but isn't strictly necessary.
'title': title, formats, subtitles = self._extract_m3u8_formats_and_subtitles(
'release_timestamp': release_timestamp, hls_url, display_id, ext='mp4', live=is_live, fatal=False)
'formats': [],
} # Check if HLS processing returned any formats
if not formats:
raise ExtractorError(f'HLS manifest found at {hls_url} but yielded no video formats.', expected=True)
formats, subtitles = [], {}
for link in candidates: # --- Return Extracted Information ---
# Pass the identified m3u8 URL (could be live, index-archive, or norsk-archive)
# The 'live' flag might need adjustment based on mediaSubType
fmts, subs = self._extract_m3u8_formats_and_subtitles(
link, display_id, ext='mp4', live=is_live, fatal=False) # Pass is_live status
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
return { return {
'id': media_info.get('id') or display_id, 'id': internal_id,
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
'release_timestamp': release_timestamp, 'release_timestamp': release_timestamp,
# Report 'is_live' based on detected mediaSubType 'is_live': is_live or None, # Use None if not explicitly marked Live
'is_live': is_live or None # Report None if not explicitly Live
} }

Loading…
Cancel
Save