|
|
@ -1,5 +1,4 @@
|
|
|
|
# coding: utf-8
|
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import itertools
|
|
|
|
import itertools
|
|
|
|
import hashlib
|
|
|
|
import hashlib
|
|
|
@ -9,7 +8,6 @@ import time
|
|
|
|
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..compat import (
|
|
|
|
from ..compat import (
|
|
|
|
compat_str,
|
|
|
|
|
|
|
|
compat_HTTPError,
|
|
|
|
compat_HTTPError,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
from ..utils import (
|
|
|
|
from ..utils import (
|
|
|
@ -19,9 +17,8 @@ from ..utils import (
|
|
|
|
int_or_none,
|
|
|
|
int_or_none,
|
|
|
|
lowercase_escape,
|
|
|
|
lowercase_escape,
|
|
|
|
std_headers,
|
|
|
|
std_headers,
|
|
|
|
try_get,
|
|
|
|
traverse_obj,
|
|
|
|
url_or_none,
|
|
|
|
url_or_none,
|
|
|
|
variadic,
|
|
|
|
|
|
|
|
urlencode_postdata,
|
|
|
|
urlencode_postdata,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
@ -72,6 +69,58 @@ class InstagramBaseIE(InfoExtractor):
|
|
|
|
def _real_initialize(self):
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._login()
|
|
|
|
self._login()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_count(self, media, kind, *keys):
|
|
|
|
|
|
|
|
return traverse_obj(
|
|
|
|
|
|
|
|
media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys),
|
|
|
|
|
|
|
|
expected_type=int_or_none)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _get_dimension(self, name, media, webpage=None):
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
|
|
|
traverse_obj(media, ('dimensions', name), expected_type=int_or_none)
|
|
|
|
|
|
|
|
or int_or_none(self._html_search_meta(
|
|
|
|
|
|
|
|
(f'og:video:{name}', f'video:{name}'), webpage or '', default=None)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _extract_nodes(self, nodes, is_direct=False):
|
|
|
|
|
|
|
|
for idx, node in enumerate(nodes, start=1):
|
|
|
|
|
|
|
|
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
video_id = node.get('shortcode')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if is_direct:
|
|
|
|
|
|
|
|
info = {
|
|
|
|
|
|
|
|
'id': video_id or node['id'],
|
|
|
|
|
|
|
|
'url': node.get('video_url'),
|
|
|
|
|
|
|
|
'width': self._get_dimension('width', node),
|
|
|
|
|
|
|
|
'height': self._get_dimension('height', node),
|
|
|
|
|
|
|
|
'http_headers': {
|
|
|
|
|
|
|
|
'Referer': 'https://www.instagram.com/',
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
elif not video_id:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
info = {
|
|
|
|
|
|
|
|
'_type': 'url',
|
|
|
|
|
|
|
|
'ie_key': 'Instagram',
|
|
|
|
|
|
|
|
'id': video_id,
|
|
|
|
|
|
|
|
'url': f'https://instagram.com/p/{video_id}',
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield {
|
|
|
|
|
|
|
|
**info,
|
|
|
|
|
|
|
|
'title': node.get('title') or (f'Video {idx}' if is_direct else None),
|
|
|
|
|
|
|
|
'description': traverse_obj(
|
|
|
|
|
|
|
|
node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str),
|
|
|
|
|
|
|
|
'thumbnail': traverse_obj(
|
|
|
|
|
|
|
|
node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none),
|
|
|
|
|
|
|
|
'duration': float_or_none(node.get('video_duration')),
|
|
|
|
|
|
|
|
'timestamp': int_or_none(node.get('taken_at_timestamp')),
|
|
|
|
|
|
|
|
'view_count': int_or_none(node.get('video_view_count')),
|
|
|
|
|
|
|
|
'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
|
|
|
|
|
|
|
|
'like_count': self._get_count(node, 'likes', 'preview_like'),
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class InstagramIOSIE(InfoExtractor):
|
|
|
|
class InstagramIOSIE(InfoExtractor):
|
|
|
|
IE_DESC = 'IOS instagram:// URL'
|
|
|
|
IE_DESC = 'IOS instagram:// URL'
|
|
|
@ -234,29 +283,22 @@ class InstagramIE(InstagramBaseIE):
|
|
|
|
return mobj.group('link')
|
|
|
|
return mobj.group('link')
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = self._match_valid_url(url)
|
|
|
|
video_id, url = self._match_valid_url(url).group('id', 'url')
|
|
|
|
video_id = mobj.group('id')
|
|
|
|
|
|
|
|
url = mobj.group('url')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
|
|
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
|
|
|
if 'www.instagram.com/accounts/login' in urlh.geturl().rstrip('/'):
|
|
|
|
if 'www.instagram.com/accounts/login' in urlh.geturl():
|
|
|
|
self.raise_login_required('You need to log in to access this content')
|
|
|
|
self.raise_login_required('You need to log in to access this content')
|
|
|
|
|
|
|
|
|
|
|
|
(media, video_url, description, thumbnails, timestamp, uploader,
|
|
|
|
|
|
|
|
uploader_id, like_count, comment_count, comments, height,
|
|
|
|
|
|
|
|
width) = [None] * 12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
shared_data = self._parse_json(
|
|
|
|
shared_data = self._parse_json(
|
|
|
|
self._search_regex(
|
|
|
|
self._search_regex(
|
|
|
|
r'window\._sharedData\s*=\s*({.+?});',
|
|
|
|
r'window\._sharedData\s*=\s*({.+?});',
|
|
|
|
webpage, 'shared data', default='{}'),
|
|
|
|
webpage, 'shared data', default='{}'),
|
|
|
|
video_id, fatal=False)
|
|
|
|
video_id, fatal=False)
|
|
|
|
if shared_data:
|
|
|
|
media = traverse_obj(
|
|
|
|
media = try_get(
|
|
|
|
shared_data,
|
|
|
|
shared_data,
|
|
|
|
('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
|
|
|
|
(lambda x: x['entry_data']['PostPage'][0]['graphql']['shortcode_media'],
|
|
|
|
('entry_data', 'PostPage', 0, 'media'),
|
|
|
|
lambda x: x['entry_data']['PostPage'][0]['media']),
|
|
|
|
expected_type=dict)
|
|
|
|
dict)
|
|
|
|
|
|
|
|
# _sharedData.entry_data.PostPage is empty when authenticated (see
|
|
|
|
# _sharedData.entry_data.PostPage is empty when authenticated (see
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/pull/22880)
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/pull/22880)
|
|
|
|
if not media:
|
|
|
|
if not media:
|
|
|
@ -265,125 +307,71 @@ class InstagramIE(InstagramBaseIE):
|
|
|
|
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\)\s*;',
|
|
|
|
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\)\s*;',
|
|
|
|
webpage, 'additional data', default='{}'),
|
|
|
|
webpage, 'additional data', default='{}'),
|
|
|
|
video_id, fatal=False)
|
|
|
|
video_id, fatal=False)
|
|
|
|
if additional_data:
|
|
|
|
media = traverse_obj(additional_data, ('graphql', 'shortcode_media'), expected_type=dict) or {}
|
|
|
|
media = try_get(
|
|
|
|
|
|
|
|
additional_data, lambda x: x['graphql']['shortcode_media'],
|
|
|
|
uploader_id = traverse_obj(media, ('owner', 'username')) or self._search_regex(
|
|
|
|
dict)
|
|
|
|
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'uploader id', fatal=False)
|
|
|
|
if media:
|
|
|
|
|
|
|
|
video_url = media.get('video_url')
|
|
|
|
description = (
|
|
|
|
height = int_or_none(self._html_search_meta(('og:video:height', 'video:height'), webpage)) or try_get(media, lambda x: x['dimensions']['height'])
|
|
|
|
traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str)
|
|
|
|
width = int_or_none(self._html_search_meta(('og:video:width', 'video:width'), webpage)) or try_get(media, lambda x: x['dimensions']['width'])
|
|
|
|
or media.get('caption'))
|
|
|
|
description = try_get(
|
|
|
|
if not description:
|
|
|
|
media, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
|
|
|
|
description = self._search_regex(
|
|
|
|
compat_str) or media.get('caption')
|
|
|
|
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
|
|
|
|
title = media.get('title')
|
|
|
|
if description is not None:
|
|
|
|
display_resources = media.get('display_resources')
|
|
|
|
description = lowercase_escape(description)
|
|
|
|
if not display_resources:
|
|
|
|
|
|
|
|
display_resources = [{'src': media.get('display_src')}, {'src': media.get('display_url')}]
|
|
|
|
|
|
|
|
duration = float_or_none(media.get('video_duration'))
|
|
|
|
|
|
|
|
timestamp = int_or_none(media.get('taken_at_timestamp') or media.get('date'))
|
|
|
|
|
|
|
|
uploader = try_get(media, lambda x: x['owner']['full_name'])
|
|
|
|
|
|
|
|
uploader_id = try_get(media, lambda x: x['owner']['username'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_count(keys, kind):
|
|
|
|
|
|
|
|
for key in variadic(keys):
|
|
|
|
|
|
|
|
count = int_or_none(try_get(
|
|
|
|
|
|
|
|
media, (lambda x: x['edge_media_%s' % key]['count'],
|
|
|
|
|
|
|
|
lambda x: x['%ss' % kind]['count'])))
|
|
|
|
|
|
|
|
if count is not None:
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
like_count = get_count('preview_like', 'like')
|
|
|
|
|
|
|
|
comment_count = get_count(
|
|
|
|
|
|
|
|
('preview_comment', 'to_comment', 'to_parent_comment'), 'comment')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
thumbnails = [{
|
|
|
|
|
|
|
|
'url': thumbnail['src'],
|
|
|
|
|
|
|
|
'width': thumbnail.get('config_width'),
|
|
|
|
|
|
|
|
'height': thumbnail.get('config_height'),
|
|
|
|
|
|
|
|
} for thumbnail in display_resources if thumbnail.get('src')]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
comments = []
|
|
|
|
|
|
|
|
for comment in try_get(media, lambda x: x['edge_media_to_parent_comment']['edges']):
|
|
|
|
|
|
|
|
comment_dict = comment.get('node', {})
|
|
|
|
|
|
|
|
comment_text = comment_dict.get('text')
|
|
|
|
|
|
|
|
if comment_text:
|
|
|
|
|
|
|
|
comments.append({
|
|
|
|
|
|
|
|
'author': try_get(comment_dict, lambda x: x['owner']['username']),
|
|
|
|
|
|
|
|
'author_id': try_get(comment_dict, lambda x: x['owner']['id']),
|
|
|
|
|
|
|
|
'id': comment_dict.get('id'),
|
|
|
|
|
|
|
|
'text': comment_text,
|
|
|
|
|
|
|
|
'timestamp': int_or_none(comment_dict.get('created_at')),
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
if not video_url:
|
|
|
|
|
|
|
|
edges = try_get(
|
|
|
|
|
|
|
|
media, lambda x: x['edge_sidecar_to_children']['edges'],
|
|
|
|
|
|
|
|
list) or []
|
|
|
|
|
|
|
|
if edges:
|
|
|
|
|
|
|
|
entries = []
|
|
|
|
|
|
|
|
for edge_num, edge in enumerate(edges, start=1):
|
|
|
|
|
|
|
|
node = try_get(edge, lambda x: x['node'], dict)
|
|
|
|
|
|
|
|
if not node:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
node_video_url = url_or_none(node.get('video_url'))
|
|
|
|
|
|
|
|
if not node_video_url:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
entries.append({
|
|
|
|
|
|
|
|
'id': node.get('shortcode') or node['id'],
|
|
|
|
|
|
|
|
'title': node.get('title') or 'Video %d' % edge_num,
|
|
|
|
|
|
|
|
'url': node_video_url,
|
|
|
|
|
|
|
|
'thumbnail': node.get('display_url'),
|
|
|
|
|
|
|
|
'duration': float_or_none(node.get('video_duration')),
|
|
|
|
|
|
|
|
'width': int_or_none(try_get(node, lambda x: x['dimensions']['width'])),
|
|
|
|
|
|
|
|
'height': int_or_none(try_get(node, lambda x: x['dimensions']['height'])),
|
|
|
|
|
|
|
|
'view_count': int_or_none(node.get('video_view_count')),
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
return self.playlist_result(
|
|
|
|
|
|
|
|
entries, video_id,
|
|
|
|
|
|
|
|
'Post by %s' % uploader_id if uploader_id else None,
|
|
|
|
|
|
|
|
description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
video_url = media.get('video_url')
|
|
|
|
if not video_url:
|
|
|
|
if not video_url:
|
|
|
|
|
|
|
|
nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or []
|
|
|
|
|
|
|
|
if nodes:
|
|
|
|
|
|
|
|
return self.playlist_result(
|
|
|
|
|
|
|
|
self._extract_nodes(nodes, True), video_id,
|
|
|
|
|
|
|
|
'Post by %s' % uploader_id if uploader_id else None, description)
|
|
|
|
|
|
|
|
|
|
|
|
video_url = self._og_search_video_url(webpage, secure=False)
|
|
|
|
video_url = self._og_search_video_url(webpage, secure=False)
|
|
|
|
|
|
|
|
|
|
|
|
formats = [{
|
|
|
|
formats = [{
|
|
|
|
'url': video_url,
|
|
|
|
'url': video_url,
|
|
|
|
'width': width,
|
|
|
|
'width': self._get_dimension('width', media, webpage),
|
|
|
|
'height': height,
|
|
|
|
'height': self._get_dimension('height', media, webpage),
|
|
|
|
}]
|
|
|
|
}]
|
|
|
|
dash = try_get(media, lambda x: x['dash_info']['video_dash_manifest'])
|
|
|
|
dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
|
|
|
|
if dash:
|
|
|
|
if dash:
|
|
|
|
formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
|
|
|
|
formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
|
|
|
|
self._sort_formats(formats)
|
|
|
|
self._sort_formats(formats)
|
|
|
|
|
|
|
|
|
|
|
|
if not uploader_id:
|
|
|
|
comments = [{
|
|
|
|
uploader_id = self._search_regex(
|
|
|
|
'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
|
|
|
|
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"',
|
|
|
|
'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
|
|
|
|
webpage, 'uploader id', fatal=False)
|
|
|
|
'id': traverse_obj(comment_dict, ('node', 'id')),
|
|
|
|
|
|
|
|
'text': traverse_obj(comment_dict, ('node', 'text')),
|
|
|
|
if not description:
|
|
|
|
'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
|
|
|
|
description = self._search_regex(
|
|
|
|
} for comment_dict in traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))]
|
|
|
|
r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
|
|
|
|
|
|
|
|
if description is not None:
|
|
|
|
display_resources = (
|
|
|
|
description = lowercase_escape(description)
|
|
|
|
media.get('display_resources')
|
|
|
|
|
|
|
|
or [{'src': media.get(key)} for key in ('display_src', 'display_url')]
|
|
|
|
if not thumbnails:
|
|
|
|
or [{'src': self._og_search_thumbnail(webpage)}])
|
|
|
|
thumbnails = self._og_search_thumbnail(webpage)
|
|
|
|
thumbnails = [{
|
|
|
|
|
|
|
|
'url': thumbnail['src'],
|
|
|
|
|
|
|
|
'width': thumbnail.get('config_width'),
|
|
|
|
|
|
|
|
'height': thumbnail.get('config_height'),
|
|
|
|
|
|
|
|
} for thumbnail in display_resources if thumbnail.get('src')]
|
|
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
return {
|
|
|
|
'id': video_id,
|
|
|
|
'id': video_id,
|
|
|
|
'formats': formats,
|
|
|
|
'formats': formats,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': media.get('title') or 'Video by %s' % uploader_id,
|
|
|
|
'title': title or 'Video by %s' % uploader_id,
|
|
|
|
|
|
|
|
'description': description,
|
|
|
|
'description': description,
|
|
|
|
'duration': duration,
|
|
|
|
'duration': float_or_none(media.get('video_duration')),
|
|
|
|
'thumbnails': thumbnails,
|
|
|
|
'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none),
|
|
|
|
'timestamp': timestamp,
|
|
|
|
|
|
|
|
'uploader_id': uploader_id,
|
|
|
|
'uploader_id': uploader_id,
|
|
|
|
'uploader': uploader,
|
|
|
|
'uploader': traverse_obj(media, ('owner', 'full_name')),
|
|
|
|
'like_count': like_count,
|
|
|
|
'like_count': self._get_count(media, 'likes', 'preview_like'),
|
|
|
|
'comment_count': comment_count,
|
|
|
|
'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
|
|
|
|
'comments': comments,
|
|
|
|
'comments': comments,
|
|
|
|
|
|
|
|
'thumbnails': thumbnails,
|
|
|
|
'http_headers': {
|
|
|
|
'http_headers': {
|
|
|
|
'Referer': 'https://www.instagram.com/',
|
|
|
|
'Referer': 'https://www.instagram.com/',
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -402,10 +390,6 @@ class InstagramPlaylistBaseIE(InstagramBaseIE):
|
|
|
|
|
|
|
|
|
|
|
|
def _extract_graphql(self, data, url):
|
|
|
|
def _extract_graphql(self, data, url):
|
|
|
|
# Parses GraphQL queries containing videos and generates a playlist.
|
|
|
|
# Parses GraphQL queries containing videos and generates a playlist.
|
|
|
|
def get_count(suffix):
|
|
|
|
|
|
|
|
return int_or_none(try_get(
|
|
|
|
|
|
|
|
node, lambda x: x['edge_media_' + suffix]['count']))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uploader_id = self._match_id(url)
|
|
|
|
uploader_id = self._match_id(url)
|
|
|
|
csrf_token = data['config']['csrf_token']
|
|
|
|
csrf_token = data['config']['csrf_token']
|
|
|
|
rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
|
|
|
|
rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
|
|
|
@ -454,55 +438,14 @@ class InstagramPlaylistBaseIE(InstagramBaseIE):
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
raise
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
edges = media.get('edges')
|
|
|
|
nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or []
|
|
|
|
if not edges or not isinstance(edges, list):
|
|
|
|
if not nodes:
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for edge in edges:
|
|
|
|
|
|
|
|
node = edge.get('node')
|
|
|
|
|
|
|
|
if not node or not isinstance(node, dict):
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
video_id = node.get('shortcode')
|
|
|
|
|
|
|
|
if not video_id:
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
info = self.url_result(
|
|
|
|
|
|
|
|
'https://instagram.com/p/%s/' % video_id,
|
|
|
|
|
|
|
|
ie=InstagramIE.ie_key(), video_id=video_id)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
description = try_get(
|
|
|
|
|
|
|
|
node, lambda x: x['edge_media_to_caption']['edges'][0]['node']['text'],
|
|
|
|
|
|
|
|
compat_str)
|
|
|
|
|
|
|
|
thumbnail = node.get('thumbnail_src') or node.get('display_src')
|
|
|
|
|
|
|
|
timestamp = int_or_none(node.get('taken_at_timestamp'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
comment_count = get_count('to_comment')
|
|
|
|
|
|
|
|
like_count = get_count('preview_like')
|
|
|
|
|
|
|
|
view_count = int_or_none(node.get('video_view_count'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
info.update({
|
|
|
|
|
|
|
|
'description': description,
|
|
|
|
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
|
|
|
|
'timestamp': timestamp,
|
|
|
|
|
|
|
|
'comment_count': comment_count,
|
|
|
|
|
|
|
|
'like_count': like_count,
|
|
|
|
|
|
|
|
'view_count': view_count,
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
page_info = media.get('page_info')
|
|
|
|
|
|
|
|
if not page_info or not isinstance(page_info, dict):
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
has_next_page = page_info.get('has_next_page')
|
|
|
|
|
|
|
|
if not has_next_page:
|
|
|
|
|
|
|
|
break
|
|
|
|
break
|
|
|
|
|
|
|
|
yield from self._extract_nodes(nodes)
|
|
|
|
|
|
|
|
|
|
|
|
cursor = page_info.get('end_cursor')
|
|
|
|
has_next_page = traverse_obj(media, ('page_info', 'has_next_page'))
|
|
|
|
if not cursor or not isinstance(cursor, compat_str):
|
|
|
|
cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str)
|
|
|
|
|
|
|
|
if not has_next_page or not cursor:
|
|
|
|
break
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|