|
|
@ -1,10 +1,12 @@
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import itertools
|
|
|
|
import re
|
|
|
|
import re
|
|
|
|
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..compat import compat_urllib_parse_unquote
|
|
|
|
from ..compat import compat_urllib_parse_unquote
|
|
|
|
from ..utils import (
|
|
|
|
from ..utils import (
|
|
|
|
|
|
|
|
int_or_none,
|
|
|
|
parse_duration,
|
|
|
|
parse_duration,
|
|
|
|
sanitized_Request,
|
|
|
|
sanitized_Request,
|
|
|
|
str_to_int,
|
|
|
|
str_to_int,
|
|
|
@ -88,45 +90,43 @@ class XTubeIE(InfoExtractor):
|
|
|
|
|
|
|
|
|
|
|
|
class XTubeUserIE(InfoExtractor):
|
|
|
|
class XTubeUserIE(InfoExtractor):
|
|
|
|
IE_DESC = 'XTube user profile'
|
|
|
|
IE_DESC = 'XTube user profile'
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?xtube\.com/community/profile\.php\?(.*?)user=(?P<username>[^&#]+)(?:$|[&#])'
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?xtube\.com/profile/(?P<id>[^/]+-\d+)'
|
|
|
|
_TEST = {
|
|
|
|
_TEST = {
|
|
|
|
'url': 'http://www.xtube.com/community/profile.php?user=greenshowers',
|
|
|
|
'url': 'http://www.xtube.com/profile/greenshowers-4056496',
|
|
|
|
'info_dict': {
|
|
|
|
'info_dict': {
|
|
|
|
'id': 'greenshowers',
|
|
|
|
'id': 'greenshowers-4056496',
|
|
|
|
'age_limit': 18,
|
|
|
|
'age_limit': 18,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
'playlist_mincount': 155,
|
|
|
|
'playlist_mincount': 155,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
user_id = self._match_id(url)
|
|
|
|
username = mobj.group('username')
|
|
|
|
|
|
|
|
|
|
|
|
entries = []
|
|
|
|
profile_page = self._download_webpage(
|
|
|
|
for pagenum in itertools.count(1):
|
|
|
|
url, username, note='Retrieving profile page')
|
|
|
|
request = sanitized_Request(
|
|
|
|
|
|
|
|
'http://www.xtube.com/profile/%s/videos/%d' % (user_id, pagenum),
|
|
|
|
video_count = int(self._search_regex(
|
|
|
|
headers={
|
|
|
|
r'<strong>%s\'s Videos \(([0-9]+)\)</strong>' % username, profile_page,
|
|
|
|
'Cookie': 'popunder=4',
|
|
|
|
'video count'))
|
|
|
|
'X-Requested-With': 'XMLHttpRequest',
|
|
|
|
|
|
|
|
'Referer': url,
|
|
|
|
PAGE_SIZE = 25
|
|
|
|
})
|
|
|
|
urls = []
|
|
|
|
|
|
|
|
page_count = (video_count + PAGE_SIZE + 1) // PAGE_SIZE
|
|
|
|
page = self._download_json(
|
|
|
|
for n in range(1, page_count + 1):
|
|
|
|
request, user_id, 'Downloading videos JSON page %d' % pagenum)
|
|
|
|
lpage_url = 'http://www.xtube.com/user_videos.php?page=%d&u=%s' % (n, username)
|
|
|
|
|
|
|
|
lpage = self._download_webpage(
|
|
|
|
html = page.get('html')
|
|
|
|
lpage_url, username,
|
|
|
|
if not html:
|
|
|
|
note='Downloading page %d/%d' % (n, page_count))
|
|
|
|
break
|
|
|
|
urls.extend(
|
|
|
|
|
|
|
|
re.findall(r'addthis:url="([^"]+)"', lpage))
|
|
|
|
for _, video_id in re.findall(r'data-plid=(["\'])(.+?)\1', html):
|
|
|
|
|
|
|
|
entries.append(self.url_result('xtube:%s' % video_id, XTubeIE.ie_key()))
|
|
|
|
return {
|
|
|
|
|
|
|
|
'_type': 'playlist',
|
|
|
|
page_count = int_or_none(page.get('pageCount'))
|
|
|
|
'id': username,
|
|
|
|
if not page_count or pagenum == page_count:
|
|
|
|
'age_limit': 18,
|
|
|
|
break
|
|
|
|
'entries': [{
|
|
|
|
|
|
|
|
'_type': 'url',
|
|
|
|
playlist = self.playlist_result(entries, user_id)
|
|
|
|
'url': eurl,
|
|
|
|
playlist['age_limit'] = 18
|
|
|
|
'ie_key': 'XTube',
|
|
|
|
return playlist
|
|
|
|
} for eurl in urls]
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|