[extractor/iwara] Overhaul extractors (#6557)

Authored by: Lesmiscore
pull/6566/head
Lesmiscore 2 years ago committed by GitHub
parent 9a06b7b189
commit c14af7a741
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,239 +1,184 @@
import itertools import functools
import re
import urllib.parse import urllib.parse
import hashlib
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
OnDemandPagedList,
int_or_none, int_or_none,
mimetype2ext, mimetype2ext,
remove_end, traverse_obj,
strip_or_none, unified_timestamp,
unified_strdate,
url_or_none,
urljoin,
) )
class IwaraBaseIE(InfoExtractor): class IwaraIE(InfoExtractor):
_BASE_REGEX = r'(?P<base_url>https?://(?:www\.|ecchi\.)?iwara\.tv)' IE_NAME = 'iwara'
_VALID_URL = r'https?://(?:www\.)?iwara\.tv/video/(?P<id>[a-zA-Z0-9]+)'
def _extract_playlist(self, base_url, webpage):
for path in re.findall(r'class="title">\s*<a[^<]+href="([^"]+)', webpage):
yield self.url_result(urljoin(base_url, path))
class IwaraIE(IwaraBaseIE):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/videos/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{ _TESTS = [{
'url': 'http://iwara.tv/videos/amVwUl1EHpAD9RD', # this video cannot be played because of migration
# md5 is unstable 'only_matching': True,
'url': 'https://www.iwara.tv/video/k2ayoueezfkx6gvq',
'info_dict': { 'info_dict': {
'id': 'amVwUl1EHpAD9RD', 'id': 'k2ayoueezfkx6gvq',
'ext': 'mp4', 'ext': 'mp4',
'title': '【MMD R-18】ガールフレンド carry_me_off',
'age_limit': 18, 'age_limit': 18,
'thumbnail': 'https://i.iwara.tv/sites/default/files/videos/thumbnails/7951/thumbnail-7951_0001.png', 'title': 'Defeat of Irybelda - アイリベルダの敗北',
'uploader': 'Reimu丨Action', 'description': 'md5:70278abebe706647a8b4cb04cf23e0d3',
'upload_date': '20150828', 'uploader': 'Inwerwm',
'description': 'md5:1d4905ce48c66c9299c617f08e106e0f', 'uploader_id': 'inwerwm',
'tags': 'count:1',
'like_count': 6133,
'view_count': 1050343,
'comment_count': 1,
'timestamp': 1677843869,
'modified_timestamp': 1679056362,
}, },
}, { }, {
'url': 'http://ecchi.iwara.tv/videos/Vb4yf2yZspkzkBO', 'url': 'https://iwara.tv/video/1ywe1sbkqwumpdxz5/',
'md5': '7e5f1f359cd51a027ba4a7b7710a50f0', 'md5': '20691ce1473ec2766c0788e14c60ce66',
'info_dict': { 'info_dict': {
'id': '0B1LvuHnL-sRFNXB1WHNqbGw4SXc', 'id': '1ywe1sbkqwumpdxz5',
'ext': 'mp4', 'ext': 'mp4',
'title': '[3D Hentai] Kyonyu × Genkai × Emaki Shinobi Girls.mp4',
'age_limit': 18, 'age_limit': 18,
'title': 'Aponia 阿波尼亚SEX Party Tonight 手动脱衣 大奶 裸腿',
'description': 'md5:0c4c310f2e0592d68b9f771d348329ca',
'uploader': '龙也zZZ',
'uploader_id': 'user792540',
'tags': [
'uncategorized'
],
'like_count': 1809,
'view_count': 25156,
'comment_count': 1,
'timestamp': 1678732213,
'modified_timestamp': 1679110271,
}, },
'add_ie': ['GoogleDrive'],
}, {
'url': 'http://www.iwara.tv/videos/nawkaumd6ilezzgq',
# md5 is unstable
'info_dict': {
'id': '6liAP9s2Ojc',
'ext': 'mp4',
'age_limit': 18,
'title': '[MMD] Do It Again Ver.2 [1080p 60FPS] (Motion,Camera,Wav+DL)',
'description': 'md5:590c12c0df1443d833fbebe05da8c47a',
'upload_date': '20160910',
'uploader': 'aMMDsork',
'uploader_id': 'UCVOFyOSCyFkXTYYHITtqB7A',
},
'add_ie': ['Youtube'],
}] }]
def _extract_formats(self, video_id, fileurl):
up = urllib.parse.urlparse(fileurl)
q = urllib.parse.parse_qs(up.query)
paths = up.path.rstrip('/').split('/')
# https://github.com/yt-dlp/yt-dlp/issues/6549#issuecomment-1473771047
x_version = hashlib.sha1('_'.join((paths[-1], q['expires'][0], '5nFp9kmbNnHdAFhaqMvt')).encode()).hexdigest()
files = self._download_json(fileurl, video_id, headers={'X-Version': x_version})
for fmt in files:
yield traverse_obj(fmt, {
'format_id': 'name',
'url': ('src', ('view', 'download'), {self._proto_relative_url}),
'ext': ('type', {mimetype2ext}),
'quality': ('name', {lambda x: int_or_none(x) or 1e4}),
'height': ('name', {int_or_none}),
}, get_all=False)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
video_data = self._download_json(f'http://api.iwara.tv/video/{video_id}', video_id)
webpage, urlh = self._download_webpage_handle(url, video_id)
hostname = urllib.parse.urlparse(urlh.geturl()).hostname
# ecchi is 'sexy' in Japanese
age_limit = 18 if hostname.split('.')[0] == 'ecchi' else 0
video_data = self._download_json('http://www.iwara.tv/api/video/%s' % video_id, video_id)
if not video_data:
iframe_url = self._html_search_regex(
r'<iframe[^>]+src=([\'"])(?P<url>[^\'"]+)\1',
webpage, 'iframe URL', group='url')
return {
'_type': 'url_transparent',
'url': iframe_url,
'age_limit': age_limit,
}
title = remove_end(self._html_extract_title(webpage), ' | Iwara')
thumbnail = self._html_search_regex(
r'poster=[\'"]([^\'"]+)', webpage, 'thumbnail', default=None)
uploader = self._html_search_regex(
r'class="username">([^<]+)', webpage, 'uploader', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r'作成日:([^\s]+)', webpage, 'upload_date', fatal=False))
description = strip_or_none(self._search_regex(
r'<p>(.+?(?=</div))', webpage, 'description', fatal=False,
flags=re.DOTALL))
formats = []
for a_format in video_data:
format_uri = url_or_none(a_format.get('uri'))
if not format_uri:
continue
format_id = a_format.get('resolution')
height = int_or_none(self._search_regex(
r'(\d+)p', format_id, 'height', default=None))
formats.append({
'url': self._proto_relative_url(format_uri, 'https:'),
'format_id': format_id,
'ext': mimetype2ext(a_format.get('mime')) or 'mp4',
'height': height,
'width': int_or_none(height / 9.0 * 16.0 if height else None),
'quality': 1 if format_id == 'Source' else 0,
})
return { return {
'id': video_id, 'id': video_id,
'title': title, 'age_limit': 18 if video_data.get('rating') == 'ecchi' else 0, # ecchi is 'sexy' in Japanese
'age_limit': age_limit, **traverse_obj(video_data, {
'formats': formats, 'title': 'title',
'thumbnail': self._proto_relative_url(thumbnail, 'https:'), 'description': 'body',
'uploader': uploader, 'uploader': ('user', 'name'),
'upload_date': upload_date, 'uploader_id': ('user', 'username'),
'description': description, 'tags': ('tags', ..., 'id'),
'like_count': 'numLikes',
'view_count': 'numViews',
'comment_count': 'numComments',
'timestamp': ('createdAt', {unified_timestamp}),
'modified_timestamp': ('updatedAt', {unified_timestamp}),
'thumbnail': ('file', 'id', {str}, {
lambda x: f'https://files.iwara.tv/image/thumbnail/{x}/thumbnail-00.jpg'}),
}),
'formats': list(self._extract_formats(video_id, video_data.get('fileUrl'))),
} }
class IwaraPlaylistIE(IwaraBaseIE): class IwaraUserIE(InfoExtractor):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/playlist/(?P<id>[^/?#&]+)' _VALID_URL = r'https?://(?:www\.)?iwara\.tv/profile/(?P<id>[^/?#&]+)'
IE_NAME = 'iwara:playlist' IE_NAME = 'iwara:user'
_PER_PAGE = 32
_TESTS = [{ _TESTS = [{
'url': 'https://ecchi.iwara.tv/playlist/best-enf', 'url': 'https://iwara.tv/profile/user792540/videos',
'info_dict': { 'info_dict': {
'title': 'Best enf', 'id': 'user792540',
'uploader': 'Jared98112',
'id': 'best-enf',
}, },
'playlist_mincount': 1097, 'playlist_mincount': 80,
}, { }, {
# urlencoded 'url': 'https://iwara.tv/profile/theblackbirdcalls/videos',
'url': 'https://ecchi.iwara.tv/playlist/%E3%83%97%E3%83%AC%E3%82%A4%E3%83%AA%E3%82%B9%E3%83%88-2',
'info_dict': { 'info_dict': {
'id': 'プレイリスト-2', 'id': 'theblackbirdcalls',
'title': 'プレイリスト',
'uploader': 'mainyu',
}, },
'playlist_mincount': 91, 'playlist_mincount': 723,
}, {
'url': 'https://iwara.tv/profile/user792540',
'only_matching': True,
}, {
'url': 'https://iwara.tv/profile/theblackbirdcalls',
'only_matching': True,
}] }]
def _entries(self, playlist_id, user_id, page):
videos = self._download_json(
'https://api.iwara.tv/videos', playlist_id,
note=f'Downloading page {page}',
query={
'page': page,
'sort': 'date',
'user': user_id,
'limit': self._PER_PAGE,
})
for x in traverse_obj(videos, ('results', ..., 'id')):
yield self.url_result(f'https://iwara.tv/video/{x}')
def _real_extract(self, url): def _real_extract(self, url):
playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url') playlist_id = self._match_id(url)
playlist_id = urllib.parse.unquote(playlist_id) user_info = self._download_json(
webpage = self._download_webpage(url, playlist_id) f'https://api.iwara.tv/profile/{playlist_id}', playlist_id,
note='Requesting user info')
user_id = traverse_obj(user_info, ('user', 'id'))
return { return self.playlist_result(
'_type': 'playlist', OnDemandPagedList(
'id': playlist_id, functools.partial(self._entries, playlist_id, user_id),
'title': self._html_search_regex(r'class="title"[^>]*>([^<]+)', webpage, 'title', fatal=False), self._PER_PAGE),
'uploader': self._html_search_regex(r'<h2>([^<]+)', webpage, 'uploader', fatal=False), playlist_id, traverse_obj(user_info, ('user', 'name')))
'entries': self._extract_playlist(base_url, webpage),
}
class IwaraUserIE(IwaraBaseIE): class IwaraPlaylistIE(InfoExtractor):
_VALID_URL = fr'{IwaraBaseIE._BASE_REGEX}/users/(?P<id>[^/?#&]+)' # the ID is an UUID but I don't think it's necessary to write concrete regex
IE_NAME = 'iwara:user' _VALID_URL = r'https?://(?:www\.)?iwara\.tv/playlist/(?P<id>[0-9a-f-]+)'
IE_NAME = 'iwara:playlist'
_PER_PAGE = 32
_TESTS = [{ _TESTS = [{
'note': 'number of all videos page is just 1 page. less than 40 videos', 'url': 'https://iwara.tv/playlist/458e5486-36a4-4ac0-b233-7e9eef01025f',
'url': 'https://ecchi.iwara.tv/users/infinityyukarip',
'info_dict': {
'title': 'Uploaded videos from Infinity_YukariP',
'id': 'infinityyukarip',
'uploader': 'Infinity_YukariP',
'uploader_id': 'infinityyukarip',
},
'playlist_mincount': 39,
}, {
'note': 'no even all videos page. probably less than 10 videos',
'url': 'https://ecchi.iwara.tv/users/mmd-quintet',
'info_dict': { 'info_dict': {
'title': 'Uploaded videos from mmd quintet', 'id': '458e5486-36a4-4ac0-b233-7e9eef01025f',
'id': 'mmd-quintet',
'uploader': 'mmd quintet',
'uploader_id': 'mmd-quintet',
}, },
'playlist_mincount': 6, 'playlist_mincount': 3,
}, {
'note': 'has paging. more than 40 videos',
'url': 'https://ecchi.iwara.tv/users/theblackbirdcalls',
'info_dict': {
'title': 'Uploaded videos from TheBlackbirdCalls',
'id': 'theblackbirdcalls',
'uploader': 'TheBlackbirdCalls',
'uploader_id': 'theblackbirdcalls',
},
'playlist_mincount': 420,
}, {
'note': 'foreign chars in URL. there must be foreign characters in URL',
'url': 'https://ecchi.iwara.tv/users/ぶた丼',
'info_dict': {
'title': 'Uploaded videos from ぶた丼',
'id': 'ぶた丼',
'uploader': 'ぶた丼',
'uploader_id': 'ぶた丼',
},
'playlist_mincount': 170,
}] }]
def _entries(self, playlist_id, base_url): def _entries(self, playlist_id, first_page, page):
webpage = self._download_webpage( videos = self._download_json(
f'{base_url}/users/{playlist_id}', playlist_id) 'https://api.iwara.tv/videos', playlist_id, f'Downloading page {page}',
videos_url = self._search_regex(r'<a href="(/users/[^/]+/videos)(?:\?[^"]+)?">', webpage, 'all videos url', default=None) query={'page': page, 'limit': self._PER_PAGE}) if page else first_page
if not videos_url: for x in traverse_obj(videos, ('results', ..., 'id')):
yield from self._extract_playlist(base_url, webpage) yield self.url_result(f'https://iwara.tv/video/{x}')
return
videos_url = urljoin(base_url, videos_url)
for n in itertools.count(1):
page = self._download_webpage(
videos_url, playlist_id, note=f'Downloading playlist page {n}',
query={'page': str(n - 1)} if n > 1 else {})
yield from self._extract_playlist(
base_url, page)
if f'page={n}' not in page:
break
def _real_extract(self, url): def _real_extract(self, url):
playlist_id, base_url = self._match_valid_url(url).group('id', 'base_url') playlist_id = self._match_id(url)
playlist_id = urllib.parse.unquote(playlist_id) page_0 = self._download_json(
f'https://api.iwara.tv/playlist/{playlist_id}?page=0&limit={self._PER_PAGE}', playlist_id,
note='Requesting playlist info')
return self.playlist_result( return self.playlist_result(
self._entries(playlist_id, base_url), playlist_id) OnDemandPagedList(
functools.partial(self._entries, playlist_id, page_0),
self._PER_PAGE),
playlist_id, traverse_obj(page_0, ('title', 'name')))

Loading…
Cancel
Save