mirror of https://github.com/yt-dlp/yt-dlp
Merge remote-tracking branch 'upstream/master'
commit
34e14a9beb
@ -1 +0,0 @@
|
||||
2012.12.99
|
@ -0,0 +1,5 @@
|
||||
|
||||
{{commands}}
|
||||
|
||||
|
||||
complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
|
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import optparse
|
||||
import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
from youtube_dl.utils import shell_quote
|
||||
|
||||
FISH_COMPLETION_FILE = 'youtube-dl.fish'
|
||||
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
|
||||
|
||||
EXTRA_ARGS = {
|
||||
'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
|
||||
|
||||
# Options that need a file parameter
|
||||
'download-archive': ['--require-parameter'],
|
||||
'cookies': ['--require-parameter'],
|
||||
'load-info': ['--require-parameter'],
|
||||
'batch-file': ['--require-parameter'],
|
||||
}
|
||||
|
||||
def build_completion(opt_parser):
|
||||
commands = []
|
||||
|
||||
for group in opt_parser.option_groups:
|
||||
for option in group.option_list:
|
||||
long_option = option.get_opt_string().strip('-')
|
||||
help_msg = shell_quote([option.help])
|
||||
complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
|
||||
if option._short_opts:
|
||||
complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
|
||||
if option.help != optparse.SUPPRESS_HELP:
|
||||
complete_cmd += ['--description', option.help]
|
||||
complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
|
||||
commands.append(shell_quote(complete_cmd))
|
||||
|
||||
with open(FISH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
filled_template = template.replace('{{commands}}', '\n'.join(commands))
|
||||
with open(FISH_COMPLETION_FILE, 'w') as f:
|
||||
f.write(filled_template)
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
build_completion(parser)
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
import json, hashlib
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
def rsa_verify(message, signature, key):
|
||||
from struct import pack
|
||||
from hashlib import sha256
|
||||
from sys import version_info
|
||||
def b(x):
|
||||
if version_info[0] == 2: return x
|
||||
else: return x.encode('latin1')
|
||||
assert(type(message) == type(b('')))
|
||||
block_size = 0
|
||||
n = key[0]
|
||||
while n:
|
||||
block_size += 1
|
||||
n >>= 8
|
||||
signature = pow(int(signature, 16), key[1], key[0])
|
||||
raw_bytes = []
|
||||
while signature:
|
||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||
signature >>= 8
|
||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||
if signature[0:2] != b('\x00\x01'): return False
|
||||
signature = signature[2:]
|
||||
if not b('\x00') in signature: return False
|
||||
signature = signature[signature.index(b('\x00'))+1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
||||
signature = signature[19:]
|
||||
if signature != sha256(message).digest(): return False
|
||||
return True
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.io/youtube-dl/download.html, not from the git repository.\n\n')
|
||||
|
||||
try:
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
||||
JSON_URL = UPDATE_URL + 'versions.json'
|
||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||
|
||||
if not os.access(filename, os.W_OK):
|
||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
||||
|
||||
try:
|
||||
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
|
||||
versions_info = json.loads(versions_info)
|
||||
except:
|
||||
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
|
||||
if not 'signature' in versions_info:
|
||||
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
|
||||
signature = versions_info['signature']
|
||||
del versions_info['signature']
|
||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
|
||||
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
|
||||
|
||||
version = versions_info['versions'][versions_info['latest']]
|
||||
|
||||
try:
|
||||
urlh = compat_urllib_request.urlopen(version['bin'][0])
|
||||
newcontent = urlh.read()
|
||||
urlh.close()
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to download latest version')
|
||||
|
||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
|
||||
if newcontent_hash != version['bin'][1]:
|
||||
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
|
||||
|
||||
try:
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(newcontent)
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to overwrite current version')
|
||||
|
||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
Binary file not shown.
@ -0,0 +1,53 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import url_basename
|
||||
|
||||
|
||||
class BehindKinkIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/',
|
||||
'md5': '41ad01222b8442089a55528fec43ec01',
|
||||
'info_dict': {
|
||||
'id': '36370',
|
||||
'ext': 'mp4',
|
||||
'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!',
|
||||
'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...',
|
||||
'upload_date': '20140814',
|
||||
'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
year = mobj.group('year')
|
||||
month = mobj.group('month')
|
||||
day = mobj.group('day')
|
||||
upload_date = year + month + day
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"'file':\s*'([^']+)'",
|
||||
webpage, 'URL base')
|
||||
|
||||
video_id = url_basename(video_url)
|
||||
video_id = video_id.split('_')[0]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'display_id': display_id,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'upload_date': upload_date,
|
||||
'age_limit': 18,
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse,
|
||||
remove_end,
|
||||
HEADRequest,
|
||||
compat_HTTPError,
|
||||
)
|
||||
|
||||
|
||||
class CloudyIE(InfoExtractor):
|
||||
_IE_DESC = 'cloudy.ec and videoraj.ch'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?(?P<host>cloudy\.ec|videoraj\.ch)/
|
||||
(?:v/|embed\.php\?id=)
|
||||
(?P<id>[A-Za-z0-9]+)
|
||||
'''
|
||||
_EMBED_URL = 'http://www.%s/embed.php?id=%s'
|
||||
_API_URL = 'http://www.%s/api/player.api.php?%s'
|
||||
_MAX_TRIES = 2
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
||||
'md5': '5cb253ace826a42f35b4740539bedf07',
|
||||
'info_dict': {
|
||||
'id': 'af511e2527aac',
|
||||
'ext': 'flv',
|
||||
'title': 'Funny Cats and Animals Compilation june 2013',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.videoraj.ch/v/47f399fd8bb60',
|
||||
'md5': '7d0f8799d91efd4eda26587421c3c3b0',
|
||||
'info_dict': {
|
||||
'id': '47f399fd8bb60',
|
||||
'ext': 'flv',
|
||||
'title': 'Burning a New iPhone 5 with Gasoline - Will it Survive?',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
|
||||
|
||||
if try_num > self._MAX_TRIES - 1:
|
||||
raise ExtractorError('Unable to extract video URL', expected=True)
|
||||
|
||||
form = {
|
||||
'file': video_id,
|
||||
'key': file_key,
|
||||
}
|
||||
|
||||
if error_url:
|
||||
form.update({
|
||||
'numOfErrors': try_num,
|
||||
'errorCode': '404',
|
||||
'errorUrl': error_url,
|
||||
})
|
||||
|
||||
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
|
||||
player_data = self._download_webpage(
|
||||
data_url, video_id, 'Downloading player data')
|
||||
data = compat_parse_qs(player_data)
|
||||
|
||||
try_num += 1
|
||||
|
||||
if 'error' in data:
|
||||
raise ExtractorError(
|
||||
'%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
|
||||
expected=True)
|
||||
|
||||
title = data.get('title', [None])[0]
|
||||
if title:
|
||||
title = remove_end(title, '&asdasdas').strip()
|
||||
|
||||
video_url = data.get('url', [None])[0]
|
||||
|
||||
if video_url:
|
||||
try:
|
||||
self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
|
||||
self.report_warning('Invalid video URL, requesting another', video_id)
|
||||
return self._extract_video(video_host, video_id, file_key, video_url, try_num)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_host = mobj.group('host')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
url = self._EMBED_URL % (video_host, video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
file_key = self._search_regex(
|
||||
r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
|
||||
|
||||
return self._extract_video(video_host, video_id, file_key)
|
@ -0,0 +1,89 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
)
|
||||
|
||||
|
||||
class DeezerPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?deezer\.com/playlist/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.deezer.com/playlist/176747451',
|
||||
'info_dict': {
|
||||
'id': '176747451',
|
||||
'title': 'Best!',
|
||||
'uploader': 'Anonymous',
|
||||
'thumbnail': 're:^https?://cdn-images.deezer.com/images/cover/.*\.jpg$',
|
||||
},
|
||||
'playlist_count': 30,
|
||||
'skip': 'Only available in .de',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
if 'test' not in self._downloader.params:
|
||||
self._downloader.report_warning('For now, this extractor only supports the 30 second previews. Patches welcome!')
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
playlist_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
geoblocking_msg = self._html_search_regex(
|
||||
r'<p class="soon-txt">(.*?)</p>', webpage, 'geoblocking message',
|
||||
default=None)
|
||||
if geoblocking_msg is not None:
|
||||
raise ExtractorError(
|
||||
'Deezer said: %s' % geoblocking_msg, expected=True)
|
||||
|
||||
data_json = self._search_regex(
|
||||
r'naboo\.display\(\'[^\']+\',\s*(.*?)\);\n', webpage, 'data JSON')
|
||||
data = json.loads(data_json)
|
||||
|
||||
playlist_title = data.get('DATA', {}).get('TITLE')
|
||||
playlist_uploader = data.get('DATA', {}).get('PARENT_USERNAME')
|
||||
playlist_thumbnail = self._search_regex(
|
||||
r'<img id="naboo_playlist_image".*?src="([^"]+)"', webpage,
|
||||
'playlist thumbnail')
|
||||
|
||||
preview_pattern = self._search_regex(
|
||||
r"var SOUND_PREVIEW_GATEWAY\s*=\s*'([^']+)';", webpage,
|
||||
'preview URL pattern', fatal=False)
|
||||
entries = []
|
||||
for s in data['SONGS']['data']:
|
||||
puid = s['MD5_ORIGIN']
|
||||
preview_video_url = preview_pattern.\
|
||||
replace('{0}', puid[0]).\
|
||||
replace('{1}', puid).\
|
||||
replace('{2}', s['MEDIA_VERSION'])
|
||||
formats = [{
|
||||
'format_id': 'preview',
|
||||
'url': preview_video_url,
|
||||
'preference': -100, # Only the first 30 seconds
|
||||
'ext': 'mp3',
|
||||
}]
|
||||
self._sort_formats(formats)
|
||||
artists = ', '.join(
|
||||
orderedSet(a['ART_NAME'] for a in s['ARTISTS']))
|
||||
entries.append({
|
||||
'id': s['SNG_ID'],
|
||||
'duration': int_or_none(s.get('DURATION')),
|
||||
'title': '%s - %s' % (artists, s['SNG_TITLE']),
|
||||
'uploader': s['ART_NAME'],
|
||||
'uploader_id': s['ART_ID'],
|
||||
'age_limit': 16 if s.get('EXPLICIT_LYRICS') == '1' else 0,
|
||||
'formats': formats,
|
||||
})
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'title': playlist_title,
|
||||
'uploader': playlist_uploader,
|
||||
'thumbnail': playlist_thumbnail,
|
||||
'entries': entries,
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EinthusanIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
|
||||
'md5': 'af244f4458cd667205e513d75da5b8b1',
|
||||
'info_dict': {
|
||||
'id': '2447',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ek Villain',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
|
||||
'md5': 'ef63c7a803e22315880ed182c10d1c5c',
|
||||
'info_dict': {
|
||||
'id': '1671',
|
||||
'ext': 'mp4',
|
||||
'title': 'Soodhu Kavvuum',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
|
||||
webpage, 'video url')
|
||||
|
||||
description = self._html_search_meta('description', webpage)
|
||||
thumbnail = self._html_search_regex(
|
||||
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
|
||||
webpage, "thumbnail url", fatal=False)
|
||||
if thumbnail is not None:
|
||||
thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
}
|
@ -0,0 +1,84 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_urllib_request,
|
||||
int_or_none,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
class HostingBulkIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?hostingbulk\.com/
|
||||
(?:embed-)?(?P<id>[A-Za-z0-9]{12})(?:-\d+x\d+)?\.html'''
|
||||
_FILE_DELETED_REGEX = r'<b>File Not Found</b>'
|
||||
_TEST = {
|
||||
'url': 'http://hostingbulk.com/n0ulw1hv20fm.html',
|
||||
'md5': '6c8653c8ecf7ebfa83b76e24b7b2fe3f',
|
||||
'info_dict': {
|
||||
'id': 'n0ulw1hv20fm',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:5afeba33f48ec87219c269e054afd622',
|
||||
'filesize': 6816081,
|
||||
'thumbnail': 're:^http://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
url = 'http://hostingbulk.com/{0:}.html'.format(video_id)
|
||||
|
||||
# Custom request with cookie to set language to English, so our file
|
||||
# deleted regex would work.
|
||||
request = compat_urllib_request.Request(
|
||||
url, headers={'Cookie': 'lang=english'})
|
||||
webpage = self._download_webpage(request, video_id)
|
||||
|
||||
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
||||
raise ExtractorError('Video %s does not exist' % video_id,
|
||||
expected=True)
|
||||
|
||||
title = self._html_search_regex(r'<h3>(.*?)</h3>', webpage, 'title')
|
||||
filesize = int_or_none(
|
||||
self._search_regex(
|
||||
r'<small>\((\d+)\sbytes?\)</small>',
|
||||
webpage,
|
||||
'filesize',
|
||||
fatal=False
|
||||
)
|
||||
)
|
||||
thumbnail = self._search_regex(
|
||||
r'<img src="([^"]+)".+?class="pic"',
|
||||
webpage, 'thumbnail', fatal=False)
|
||||
|
||||
fields = dict(re.findall(r'''(?x)<input\s+
|
||||
type="hidden"\s+
|
||||
name="([^"]+)"\s+
|
||||
value="([^"]*)"
|
||||
''', webpage))
|
||||
|
||||
request = compat_urllib_request.Request(url, urlencode_postdata(fields))
|
||||
request.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
response = self._request_webpage(request, video_id,
|
||||
'Submiting download request')
|
||||
video_url = response.geturl()
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'filesize': filesize,
|
||||
'url': video_url,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class MoeVideoIE(InfoExtractor):
|
||||
IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?P<host>(?:www\.)?
|
||||
(?:(?:moevideo|playreplay|videochart)\.net))/
|
||||
(?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)'''
|
||||
_API_URL = 'http://api.letitbit.net/'
|
||||
_API_KEY = 'tVL0gjqo5'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
|
||||
'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
|
||||
'info_dict': {
|
||||
'id': '00297.0036103fe3d513ef27915216fd29',
|
||||
'ext': 'flv',
|
||||
'title': 'Sink cut out machine',
|
||||
'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'width': 540,
|
||||
'height': 360,
|
||||
'duration': 179,
|
||||
'filesize': 17822500,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
|
||||
'md5': '74f0a014d5b661f0f0e2361300d1620e',
|
||||
'info_dict': {
|
||||
'id': '77107.7f325710a627383d40540d8e991a',
|
||||
'ext': 'flv',
|
||||
'title': 'Operacion Condor.',
|
||||
'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'width': 480,
|
||||
'height': 296,
|
||||
'duration': 6027,
|
||||
'filesize': 588257923,
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://%s/video/%s' % (mobj.group('host'), video_id),
|
||||
video_id, 'Downloading webpage')
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
|
||||
r = [
|
||||
self._API_KEY,
|
||||
[
|
||||
'preview/flv_link',
|
||||
{
|
||||
'uid': video_id,
|
||||
},
|
||||
],
|
||||
]
|
||||
r_json = json.dumps(r)
|
||||
post = compat_urllib_parse.urlencode({'r': r_json})
|
||||
req = compat_urllib_request.Request(self._API_URL, post)
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
|
||||
response = self._download_json(req, video_id)
|
||||
if response['status'] != 'OK':
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, response['data']),
|
||||
expected=True
|
||||
)
|
||||
item = response['data'][0]
|
||||
video_url = item['link']
|
||||
duration = int_or_none(item['length'])
|
||||
width = int_or_none(item['width'])
|
||||
height = int_or_none(item['height'])
|
||||
filesize = int_or_none(item['convert_size'])
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'http_headers': {'Range': 'bytes=0-'}, # Required to download
|
||||
'url': video_url,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'filesize': filesize,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
|
||||
class MonikerIE(InfoExtractor):
|
||||
IE_DESC = 'allmyvideos.net and vidspot.net'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:allmyvideos|vidspot)\.net/(?P<id>[a-zA-Z0-9_-]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://allmyvideos.net/jih3nce3x6wn',
|
||||
'md5': '710883dee1bfc370ecf9fa6a89307c88',
|
||||
'info_dict': {
|
||||
'id': 'jih3nce3x6wn',
|
||||
'ext': 'mp4',
|
||||
'title': 'youtube-dl test video',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://vidspot.net/l2ngsmhs8ci5',
|
||||
'md5': '710883dee1bfc370ecf9fa6a89307c88',
|
||||
'info_dict': {
|
||||
'id': 'l2ngsmhs8ci5',
|
||||
'ext': 'mp4',
|
||||
'title': 'youtube-dl test video',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.vidspot.net/l2ngsmhs8ci5',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
orig_webpage = self._download_webpage(url, video_id)
|
||||
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
|
||||
data = dict(fields)
|
||||
|
||||
post = compat_urllib_parse.urlencode(data)
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
}
|
||||
req = compat_urllib_request.Request(url, post, headers)
|
||||
webpage = self._download_webpage(
|
||||
req, video_id, note='Downloading video page ...')
|
||||
|
||||
title = os.path.splitext(data['fname'])[0]
|
||||
|
||||
#Could be several links with different quality
|
||||
links = re.findall(r'"file" : "?(.+?)",', webpage)
|
||||
# Assume the links are ordered in quality
|
||||
formats = [{
|
||||
'url': l,
|
||||
'quality': i,
|
||||
} for i, l in enumerate(links)]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
)
|
||||
|
||||
|
||||
class MuenchenTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?muenchen\.tv/livestream'
|
||||
IE_DESC = 'münchen.tv'
|
||||
_TEST = {
|
||||
'url': 'http://www.muenchen.tv/livestream/',
|
||||
'info_dict': {
|
||||
'id': '5334',
|
||||
'display_id': 'live',
|
||||
'ext': 'mp4',
|
||||
'title': 're:^münchen.tv-Livestream [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = 'live'
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
title = self._og_search_title(webpage) + ' ' + now_str
|
||||
|
||||
data_js = self._search_regex(
|
||||
r'(?s)\nplaylist:\s*(\[.*?}\]),related:',
|
||||
webpage, 'playlist configuration')
|
||||
data_json = js_to_json(data_js)
|
||||
data = json.loads(data_json)[0]
|
||||
|
||||
video_id = data['mediaid']
|
||||
thumbnail = data.get('image')
|
||||
|
||||
formats = []
|
||||
for format_num, s in enumerate(data['sources']):
|
||||
ext = determine_ext(s['file'], None)
|
||||
label_str = s.get('label')
|
||||
if label_str is None:
|
||||
label_str = '_%d' % format_num
|
||||
|
||||
if ext is None:
|
||||
format_id = label_str
|
||||
else:
|
||||
format_id = '%s-%s' % (ext, label_str)
|
||||
|
||||
formats.append({
|
||||
'url': s['file'],
|
||||
'tbr': int_or_none(s.get('label')),
|
||||
'ext': 'mp4',
|
||||
'format_id': format_id,
|
||||
'preference': -100 if '.smil' in s['file'] else 0,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
@ -0,0 +1,65 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
|
||||
class PornoXOIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?pornoxo\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary.html',
|
||||
'md5': '582f28ecbaa9e6e24cb90f50f524ce87',
|
||||
'info_dict': {
|
||||
'id': '7564',
|
||||
'ext': 'flv',
|
||||
'title': 'Striptease From Sexy Secretary!',
|
||||
'description': 'Striptease From Sexy Secretary!',
|
||||
'categories': list, # NSFW
|
||||
'thumbnail': 're:https?://.*\.jpg$',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'\'file\'\s*:\s*"([^"]+)"', webpage, 'video_url')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<title>([^<]+)\s*-\s*PornoXO', webpage, 'title')
|
||||
|
||||
description = self._html_search_regex(
|
||||
r'<meta name="description" content="([^"]+)\s*featuring',
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_regex(
|
||||
r'\'image\'\s*:\s*"([^"]+)"', webpage, 'thumbnail', fatal=False)
|
||||
|
||||
view_count = str_to_int(self._html_search_regex(
|
||||
r'[vV]iews:\s*([0-9,]+)', webpage, 'view count', fatal=False))
|
||||
|
||||
categories_str = self._html_search_regex(
|
||||
r'<meta name="description" content=".*featuring\s*([^"]+)"',
|
||||
webpage, 'categories', fatal=False)
|
||||
categories = (
|
||||
None if categories_str is None
|
||||
else categories_str.split(','))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'categories': categories,
|
||||
'view_count': view_count,
|
||||
'age_limit': 18,
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
parse_duration,
|
||||
)
|
||||
|
||||
|
||||
class ShareSixIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?sharesix\.com/(?:f/)?(?P<id>[0-9a-zA-Z]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://sharesix.com/f/OXjQ7Y6',
|
||||
'md5': '9e8e95d8823942815a7d7c773110cc93',
|
||||
'info_dict': {
|
||||
'id': 'OXjQ7Y6',
|
||||
'ext': 'mp4',
|
||||
'title': 'big_buck_bunny_480p_surround-fix.avi',
|
||||
'duration': 596,
|
||||
'width': 854,
|
||||
'height': 480,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://sharesix.com/lfrwoxp35zdd',
|
||||
'md5': 'dd19f1435b7cec2d7912c64beeee8185',
|
||||
'info_dict': {
|
||||
'id': 'lfrwoxp35zdd',
|
||||
'ext': 'flv',
|
||||
'title': 'WhiteBoard___a_Mac_vs_PC_Parody_Cartoon.mp4.flv',
|
||||
'duration': 65,
|
||||
'width': 1280,
|
||||
'height': 720,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
fields = {
|
||||
'method_free': 'Free'
|
||||
}
|
||||
post = compat_urllib_parse.urlencode(fields)
|
||||
req = compat_urllib_request.Request(url, post)
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
|
||||
webpage = self._download_webpage(req, video_id,
|
||||
'Downloading video page')
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"var\slnk1\s=\s'([^']+)'", webpage, 'video URL')
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<dt>Filename:</dt>.+?<dd>(.+?)</dd>', webpage, 'title')
|
||||
duration = parse_duration(
|
||||
self._search_regex(
|
||||
r'(?s)<dt>Length:</dt>.+?<dd>(.+?)</dd>',
|
||||
webpage,
|
||||
'duration',
|
||||
fatal=False
|
||||
)
|
||||
)
|
||||
|
||||
m = re.search(
|
||||
r'''(?xs)<dt>Width\sx\sHeight</dt>.+?
|
||||
<dd>(?P<width>\d+)\sx\s(?P<height>\d+)</dd>''',
|
||||
webpage
|
||||
)
|
||||
width = height = None
|
||||
if m:
|
||||
width, height = int(m.group('width')), int(m.group('height'))
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': video_url,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,78 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import remove_start
|
||||
|
||||
|
||||
class TeleMBIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?telemb\.be/(?P<display_id>.+?)_d_(?P<id>\d+)\.html'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.telemb.be/mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-_d_13466.html',
|
||||
'md5': 'f45ea69878516ba039835794e0f8f783',
|
||||
'info_dict': {
|
||||
'id': '13466',
|
||||
'display_id': 'mons-cook-with-danielle-des-cours-de-cuisine-en-anglais-',
|
||||
'ext': 'mp4',
|
||||
'title': 'Mons - Cook with Danielle : des cours de cuisine en anglais ! - Les reportages',
|
||||
'description': 'md5:bc5225f47b17c309761c856ad4776265',
|
||||
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
|
||||
}
|
||||
},
|
||||
{
|
||||
# non-ASCII characters in download URL
|
||||
'url': 'http://telemb.be/les-reportages-havre-incendie-mortel_d_13514.html',
|
||||
'md5': '6e9682736e5ccd4eab7f21e855350733',
|
||||
'info_dict': {
|
||||
'id': '13514',
|
||||
'display_id': 'les-reportages-havre-incendie-mortel',
|
||||
'ext': 'mp4',
|
||||
'title': 'Havré - Incendie mortel - Les reportages',
|
||||
'description': 'md5:5e54cb449acb029c2b7734e2d946bd4a',
|
||||
'thumbnail': 're:^http://.*\.(?:jpg|png)$',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
formats = []
|
||||
for video_url in re.findall(r'file\s*:\s*"([^"]+)"', webpage):
|
||||
fmt = {
|
||||
'url': video_url,
|
||||
'format_id': video_url.split(':')[0]
|
||||
}
|
||||
rtmp = re.search(r'^(?P<url>rtmp://[^/]+/(?P<app>.+))/(?P<playpath>mp4:.+)$', video_url)
|
||||
if rtmp:
|
||||
fmt.update({
|
||||
'play_path': rtmp.group('playpath'),
|
||||
'app': rtmp.group('app'),
|
||||
'player_url': 'http://p.jwpcdn.com/6/10/jwplayer.flash.swf',
|
||||
'page_url': 'http://www.telemb.be',
|
||||
'preference': -1,
|
||||
})
|
||||
formats.append(fmt)
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = remove_start(self._og_search_title(webpage), 'TéléMB : ')
|
||||
description = self._html_search_regex(
|
||||
r'<meta property="og:description" content="(.+?)" />',
|
||||
webpage, 'description', fatal=False)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
qualities,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class TurboIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
|
||||
_API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
|
||||
_TEST = {
|
||||
'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
|
||||
'md5': '33f4b91099b36b5d5a91f84b5bcba600',
|
||||
'info_dict': {
|
||||
'id': '454443',
|
||||
'ext': 'mp4',
|
||||
'duration': 3715,
|
||||
'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
|
||||
'description': 'Retrouvez dans cette rubrique toutes les vidéos de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
playlist = self._download_xml(self._API_URL.format(video_id), video_id)
|
||||
item = playlist.find('./channel/item')
|
||||
if item is None:
|
||||
raise ExtractorError('Playlist item was not found', expected=True)
|
||||
|
||||
title = xpath_text(item, './title', 'title')
|
||||
duration = int_or_none(xpath_text(item, './durate', 'duration'))
|
||||
thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
|
||||
description = self._og_search_description(webpage)
|
||||
|
||||
formats = []
|
||||
get_quality = qualities(['3g', 'sd', 'hq'])
|
||||
for child in item:
|
||||
m = re.search(r'url_video_(?P<quality>.+)', child.tag)
|
||||
if m:
|
||||
quality = m.group('quality')
|
||||
formats.append({
|
||||
'format_id': quality,
|
||||
'url': child.text,
|
||||
'quality': get_quality(quality),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
remove_start,
|
||||
)
|
||||
|
||||
|
||||
class VideoMegaIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?:www\.)?videomega\.tv/
|
||||
(?:iframe\.php)?\?ref=(?P<id>[A-Za-z0-9]+)
|
||||
'''
|
||||
_TEST = {
|
||||
'url': 'http://videomega.tv/?ref=GKeGPVedBe',
|
||||
'md5': '240fb5bcf9199961f48eb17839b084d6',
|
||||
'info_dict': {
|
||||
'id': 'GKeGPVedBe',
|
||||
'ext': 'mp4',
|
||||
'title': 'XXL - All Sports United',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
url = 'http://videomega.tv/iframe.php?ref={0:}'.format(video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
escaped_data = self._search_regex(
|
||||
r'unescape\("([^"]+)"\)', webpage, 'escaped data')
|
||||
playlist = compat_urllib_parse.unquote(escaped_data)
|
||||
|
||||
thumbnail = self._search_regex(
|
||||
r'image:\s*"([^"]+)"', playlist, 'thumbnail', fatal=False)
|
||||
url = self._search_regex(r'file:\s*"([^"]+)"', playlist, 'URL')
|
||||
title = remove_start(self._html_search_regex(
|
||||
r'<title>(.*?)</title>', webpage, 'title'), 'VideoMega.tv - ')
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': url,
|
||||
}]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
@ -0,0 +1,481 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os.path
|
||||
import optparse
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
from .utils import (
|
||||
get_term_width,
|
||||
write_string,
|
||||
)
|
||||
from .version import __version__
|
||||
|
||||
|
||||
def parseOpts(overrideArguments=None):
|
||||
def _readOptions(filename_bytes, default=[]):
|
||||
try:
|
||||
optionf = open(filename_bytes)
|
||||
except IOError:
|
||||
return default # silently skip if file is not present
|
||||
try:
|
||||
res = []
|
||||
for l in optionf:
|
||||
res += shlex.split(l, comments=True)
|
||||
finally:
|
||||
optionf.close()
|
||||
return res
|
||||
|
||||
def _readUserConf():
|
||||
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
|
||||
if xdg_config_home:
|
||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config')
|
||||
if not os.path.isfile(userConfFile):
|
||||
userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf')
|
||||
else:
|
||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config')
|
||||
if not os.path.isfile(userConfFile):
|
||||
userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf')
|
||||
userConf = _readOptions(userConfFile, None)
|
||||
|
||||
if userConf is None:
|
||||
appdata_dir = os.environ.get('appdata')
|
||||
if appdata_dir:
|
||||
userConf = _readOptions(
|
||||
os.path.join(appdata_dir, 'youtube-dl', 'config'),
|
||||
default=None)
|
||||
if userConf is None:
|
||||
userConf = _readOptions(
|
||||
os.path.join(appdata_dir, 'youtube-dl', 'config.txt'),
|
||||
default=None)
|
||||
|
||||
if userConf is None:
|
||||
userConf = _readOptions(
|
||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'),
|
||||
default=None)
|
||||
if userConf is None:
|
||||
userConf = _readOptions(
|
||||
os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'),
|
||||
default=None)
|
||||
|
||||
if userConf is None:
|
||||
userConf = []
|
||||
|
||||
return userConf
|
||||
|
||||
def _format_option_string(option):
|
||||
''' ('-o', '--option') -> -o, --format METAVAR'''
|
||||
|
||||
opts = []
|
||||
|
||||
if option._short_opts:
|
||||
opts.append(option._short_opts[0])
|
||||
if option._long_opts:
|
||||
opts.append(option._long_opts[0])
|
||||
if len(opts) > 1:
|
||||
opts.insert(1, ', ')
|
||||
|
||||
if option.takes_value(): opts.append(' %s' % option.metavar)
|
||||
|
||||
return "".join(opts)
|
||||
|
||||
def _comma_separated_values_options_callback(option, opt_str, value, parser):
|
||||
setattr(parser.values, option.dest, value.split(','))
|
||||
|
||||
def _hide_login_info(opts):
|
||||
opts = list(opts)
|
||||
for private_opt in ['-p', '--password', '-u', '--username', '--video-password']:
|
||||
try:
|
||||
i = opts.index(private_opt)
|
||||
opts[i+1] = '<PRIVATE>'
|
||||
except ValueError:
|
||||
pass
|
||||
return opts
|
||||
|
||||
max_width = 80
|
||||
max_help_position = 80
|
||||
|
||||
# No need to wrap help messages if we're on a wide console
|
||||
columns = get_term_width()
|
||||
if columns: max_width = columns
|
||||
|
||||
fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position)
|
||||
fmt.format_option_strings = _format_option_string
|
||||
|
||||
kw = {
|
||||
'version' : __version__,
|
||||
'formatter' : fmt,
|
||||
'usage' : '%prog [options] url [url...]',
|
||||
'conflict_handler' : 'resolve',
|
||||
}
|
||||
|
||||
parser = optparse.OptionParser(**kw)
|
||||
|
||||
# option groups
|
||||
general = optparse.OptionGroup(parser, 'General Options')
|
||||
selection = optparse.OptionGroup(parser, 'Video Selection')
|
||||
authentication = optparse.OptionGroup(parser, 'Authentication Options')
|
||||
video_format = optparse.OptionGroup(parser, 'Video Format Options')
|
||||
subtitles = optparse.OptionGroup(parser, 'Subtitle Options')
|
||||
downloader = optparse.OptionGroup(parser, 'Download Options')
|
||||
postproc = optparse.OptionGroup(parser, 'Post-processing Options')
|
||||
filesystem = optparse.OptionGroup(parser, 'Filesystem Options')
|
||||
workarounds = optparse.OptionGroup(parser, 'Workarounds')
|
||||
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
|
||||
|
||||
general.add_option('-h', '--help',
|
||||
action='help', help='print this help text and exit')
|
||||
general.add_option('-v', '--version',
|
||||
action='version', help='print program version and exit')
|
||||
general.add_option('-U', '--update',
|
||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||
general.add_option('-i', '--ignore-errors',
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
|
||||
general.add_option('--abort-on-error',
|
||||
action='store_false', dest='ignoreerrors',
|
||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||
general.add_option('--dump-user-agent',
|
||||
action='store_true', dest='dump_user_agent',
|
||||
help='display the current browser identification', default=False)
|
||||
general.add_option('--list-extractors',
|
||||
action='store_true', dest='list_extractors',
|
||||
help='List all supported extractors and the URLs they would handle', default=False)
|
||||
general.add_option('--extractor-descriptions',
|
||||
action='store_true', dest='list_extractor_descriptions',
|
||||
help='Output descriptions of all supported extractors', default=False)
|
||||
general.add_option(
|
||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||
general.add_option(
|
||||
'--socket-timeout', dest='socket_timeout',
|
||||
type=float, default=None, help=u'Time to wait before giving up, in seconds')
|
||||
general.add_option(
|
||||
'--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.')
|
||||
general.add_option(
|
||||
'--ignore-config',
|
||||
action='store_true',
|
||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||
|
||||
selection.add_option(
|
||||
'--playlist-start',
|
||||
dest='playliststart', metavar='NUMBER', default=1, type=int,
|
||||
help='playlist video to start at (default is %default)')
|
||||
selection.add_option(
|
||||
'--playlist-end',
|
||||
dest='playlistend', metavar='NUMBER', default=None, type=int,
|
||||
help='playlist video to end at (default is last)')
|
||||
selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)')
|
||||
selection.add_option('--max-downloads', metavar='NUMBER',
|
||||
dest='max_downloads', type=int, default=None,
|
||||
help='Abort after downloading NUMBER files')
|
||||
selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||
selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None)
|
||||
selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None)
|
||||
selection.add_option(
|
||||
'--datebefore', metavar='DATE', dest='datebefore', default=None,
|
||||
help='download only videos uploaded on or before this date (i.e. inclusive)')
|
||||
selection.add_option(
|
||||
'--dateafter', metavar='DATE', dest='dateafter', default=None,
|
||||
help='download only videos uploaded on or after this date (i.e. inclusive)')
|
||||
selection.add_option(
|
||||
'--min-views', metavar='COUNT', dest='min_views',
|
||||
default=None, type=int,
|
||||
help="Do not download any videos with less than COUNT views",)
|
||||
selection.add_option(
|
||||
'--max-views', metavar='COUNT', dest='max_views',
|
||||
default=None, type=int,
|
||||
help="Do not download any videos with more than COUNT views",)
|
||||
selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False)
|
||||
selection.add_option('--age-limit', metavar='YEARS', dest='age_limit',
|
||||
help='download only videos suitable for the given age',
|
||||
default=None, type=int)
|
||||
selection.add_option('--download-archive', metavar='FILE',
|
||||
dest='download_archive',
|
||||
help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.')
|
||||
selection.add_option(
|
||||
'--include-ads', dest='include_ads',
|
||||
action='store_true',
|
||||
help='Download advertisements as well (experimental)')
|
||||
selection.add_option(
|
||||
'--youtube-include-dash-manifest', action='store_true',
|
||||
dest='youtube_include_dash_manifest', default=False,
|
||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
||||
|
||||
authentication.add_option('-u', '--username',
|
||||
dest='username', metavar='USERNAME', help='account username')
|
||||
authentication.add_option('-p', '--password',
|
||||
dest='password', metavar='PASSWORD', help='account password')
|
||||
authentication.add_option('-2', '--twofactor',
|
||||
dest='twofactor', metavar='TWOFACTOR', help='two-factor auth code')
|
||||
authentication.add_option('-n', '--netrc',
|
||||
action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False)
|
||||
authentication.add_option('--video-password',
|
||||
dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)')
|
||||
|
||||
|
||||
video_format.add_option('-f', '--format',
|
||||
action='store', dest='format', metavar='FORMAT', default=None,
|
||||
help='video format code, specify the order of preference using slashes: -f 22/17/18 . -f mp4 , -f m4a and -f flv are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality. Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio')
|
||||
video_format.add_option('--all-formats',
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
video_format.add_option('--prefer-free-formats',
|
||||
action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested')
|
||||
video_format.add_option('--max-quality',
|
||||
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
|
||||
video_format.add_option('-F', '--list-formats',
|
||||
action='store_true', dest='listformats', help='list all available formats')
|
||||
|
||||
subtitles.add_option('--write-sub', '--write-srt',
|
||||
action='store_true', dest='writesubtitles',
|
||||
help='write subtitle file', default=False)
|
||||
subtitles.add_option('--write-auto-sub', '--write-automatic-sub',
|
||||
action='store_true', dest='writeautomaticsub',
|
||||
help='write automatic subtitle file (youtube only)', default=False)
|
||||
subtitles.add_option('--all-subs',
|
||||
action='store_true', dest='allsubtitles',
|
||||
help='downloads all the available subtitles of the video', default=False)
|
||||
subtitles.add_option('--list-subs',
|
||||
action='store_true', dest='listsubtitles',
|
||||
help='lists all available subtitles for the video', default=False)
|
||||
subtitles.add_option('--sub-format',
|
||||
action='store', dest='subtitlesformat', metavar='FORMAT',
|
||||
help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt')
|
||||
subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang',
|
||||
action='callback', dest='subtitleslangs', metavar='LANGS', type='str',
|
||||
default=[], callback=_comma_separated_values_options_callback,
|
||||
help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'')
|
||||
|
||||
downloader.add_option('-r', '--rate-limit',
|
||||
dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)')
|
||||
downloader.add_option('-R', '--retries',
|
||||
dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10)
|
||||
downloader.add_option('--buffer-size',
|
||||
dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024")
|
||||
downloader.add_option('--no-resize-buffer',
|
||||
action='store_true', dest='noresizebuffer',
|
||||
help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False)
|
||||
downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP)
|
||||
|
||||
workarounds.add_option(
|
||||
'--encoding', dest='encoding', metavar='ENCODING',
|
||||
help='Force the specified encoding (experimental)')
|
||||
workarounds.add_option(
|
||||
'--no-check-certificate', action='store_true',
|
||||
dest='no_check_certificate', default=False,
|
||||
help='Suppress HTTPS certificate validation.')
|
||||
workarounds.add_option(
|
||||
'--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
|
||||
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
|
||||
workarounds.add_option(
|
||||
'--user-agent', metavar='UA',
|
||||
dest='user_agent', help='specify a custom user agent')
|
||||
workarounds.add_option(
|
||||
'--referer', metavar='REF',
|
||||
dest='referer', default=None,
|
||||
help='specify a custom referer, use if the video access is restricted to one domain',
|
||||
)
|
||||
workarounds.add_option(
|
||||
'--add-header', metavar='FIELD:VALUE',
|
||||
dest='headers', action='append',
|
||||
help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times',
|
||||
)
|
||||
workarounds.add_option(
|
||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||
|
||||
verbosity.add_option('-q', '--quiet',
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
verbosity.add_option(
|
||||
'--no-warnings',
|
||||
dest='no_warnings', action='store_true', default=False,
|
||||
help='Ignore warnings')
|
||||
verbosity.add_option('-s', '--simulate',
|
||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
||||
verbosity.add_option('--skip-download',
|
||||
action='store_true', dest='skip_download', help='do not download the video', default=False)
|
||||
verbosity.add_option('-g', '--get-url',
|
||||
action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False)
|
||||
verbosity.add_option('-e', '--get-title',
|
||||
action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False)
|
||||
verbosity.add_option('--get-id',
|
||||
action='store_true', dest='getid', help='simulate, quiet but print id', default=False)
|
||||
verbosity.add_option('--get-thumbnail',
|
||||
action='store_true', dest='getthumbnail',
|
||||
help='simulate, quiet but print thumbnail URL', default=False)
|
||||
verbosity.add_option('--get-description',
|
||||
action='store_true', dest='getdescription',
|
||||
help='simulate, quiet but print video description', default=False)
|
||||
verbosity.add_option('--get-duration',
|
||||
action='store_true', dest='getduration',
|
||||
help='simulate, quiet but print video length', default=False)
|
||||
verbosity.add_option('--get-filename',
|
||||
action='store_true', dest='getfilename',
|
||||
help='simulate, quiet but print output filename', default=False)
|
||||
verbosity.add_option('--get-format',
|
||||
action='store_true', dest='getformat',
|
||||
help='simulate, quiet but print output format', default=False)
|
||||
verbosity.add_option('-j', '--dump-json',
|
||||
action='store_true', dest='dumpjson',
|
||||
help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
|
||||
verbosity.add_option('--newline',
|
||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||
verbosity.add_option('--no-progress',
|
||||
action='store_true', dest='noprogress', help='do not print progress bar', default=False)
|
||||
verbosity.add_option('--console-title',
|
||||
action='store_true', dest='consoletitle',
|
||||
help='display progress in console titlebar', default=False)
|
||||
verbosity.add_option('-v', '--verbose',
|
||||
action='store_true', dest='verbose', help='print various debugging information', default=False)
|
||||
verbosity.add_option('--dump-intermediate-pages',
|
||||
action='store_true', dest='dump_intermediate_pages', default=False,
|
||||
help='print downloaded pages to debug problems (very verbose)')
|
||||
verbosity.add_option('--write-pages',
|
||||
action='store_true', dest='write_pages', default=False,
|
||||
help='Write downloaded intermediary pages to files in the current directory to debug problems')
|
||||
verbosity.add_option('--youtube-print-sig-code',
|
||||
action='store_true', dest='youtube_print_sig_code', default=False,
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
verbosity.add_option('--print-traffic',
|
||||
dest='debug_printtraffic', action='store_true', default=False,
|
||||
help='Display sent and read HTTP traffic')
|
||||
|
||||
|
||||
filesystem.add_option('-a', '--batch-file',
|
||||
dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)')
|
||||
filesystem.add_option('--id',
|
||||
action='store_true', dest='useid', help='use only video ID in file name', default=False)
|
||||
filesystem.add_option('-A', '--auto-number',
|
||||
action='store_true', dest='autonumber',
|
||||
help='number downloaded files starting from 00000', default=False)
|
||||
filesystem.add_option('-o', '--output',
|
||||
dest='outtmpl', metavar='TEMPLATE',
|
||||
help=('output filename template. Use %(title)s to get the title, '
|
||||
'%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, '
|
||||
'%(autonumber)s to get an automatically incremented number, '
|
||||
'%(ext)s for the filename extension, '
|
||||
'%(format)s for the format description (like "22 - 1280x720" or "HD"), '
|
||||
'%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), '
|
||||
'%(upload_date)s for the upload date (YYYYMMDD), '
|
||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||
'%(height)s and %(width)s for the width and height of the video format. '
|
||||
'%(resolution)s for a textual description of the resolution of the video format. '
|
||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||
filesystem.add_option('--autonumber-size',
|
||||
dest='autonumber_size', metavar='NUMBER',
|
||||
help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given')
|
||||
filesystem.add_option('--restrict-filenames',
|
||||
action='store_true', dest='restrictfilenames',
|
||||
help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False)
|
||||
filesystem.add_option('-t', '--title',
|
||||
action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False)
|
||||
filesystem.add_option('-l', '--literal',
|
||||
action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False)
|
||||
filesystem.add_option('-w', '--no-overwrites',
|
||||
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
|
||||
filesystem.add_option('-c', '--continue',
|
||||
action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True)
|
||||
filesystem.add_option('--no-continue',
|
||||
action='store_false', dest='continue_dl',
|
||||
help='do not resume partially downloaded files (restart from beginning)')
|
||||
filesystem.add_option('--no-part',
|
||||
action='store_true', dest='nopart', help='do not use .part files', default=False)
|
||||
filesystem.add_option('--no-mtime',
|
||||
action='store_false', dest='updatetime',
|
||||
help='do not use the Last-modified header to set the file modification time', default=True)
|
||||
filesystem.add_option('--write-description',
|
||||
action='store_true', dest='writedescription',
|
||||
help='write video description to a .description file', default=False)
|
||||
filesystem.add_option('--write-info-json',
|
||||
action='store_true', dest='writeinfojson',
|
||||
help='write video metadata to a .info.json file', default=False)
|
||||
filesystem.add_option('--write-annotations',
|
||||
action='store_true', dest='writeannotations',
|
||||
help='write video annotations to a .annotation file', default=False)
|
||||
filesystem.add_option('--write-thumbnail',
|
||||
action='store_true', dest='writethumbnail',
|
||||
help='write thumbnail image to disk', default=False)
|
||||
filesystem.add_option('--load-info',
|
||||
dest='load_info_filename', metavar='FILE',
|
||||
help='json file containing the video information (created with the "--write-json" option)')
|
||||
filesystem.add_option('--cookies',
|
||||
dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in')
|
||||
filesystem.add_option(
|
||||
'--cache-dir', dest='cachedir', default=None, metavar='DIR',
|
||||
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
||||
filesystem.add_option(
|
||||
'--no-cache-dir', action='store_const', const=False, dest='cachedir',
|
||||
help='Disable filesystem caching')
|
||||
filesystem.add_option(
|
||||
'--rm-cache-dir', action='store_true', dest='rm_cachedir',
|
||||
help='Delete all filesystem cache files')
|
||||
|
||||
|
||||
postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False,
|
||||
help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)')
|
||||
postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best',
|
||||
help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default')
|
||||
postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5',
|
||||
help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)')
|
||||
postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None,
|
||||
help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)')
|
||||
postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False,
|
||||
help='keeps the video file on disk after the post-processing; the video is erased by default')
|
||||
postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False,
|
||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
|
||||
help='embed thumbnail in the audio as cover art')
|
||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||
help='write metadata to the video file')
|
||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||
help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)')
|
||||
postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg',
|
||||
help='Prefer avconv over ffmpeg for running the postprocessors (default)')
|
||||
postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg',
|
||||
help='Prefer ffmpeg over avconv for running the postprocessors')
|
||||
postproc.add_option(
|
||||
'--exec', metavar='CMD', dest='exec_cmd',
|
||||
help='Execute a command on the file after downloading, similar to find\'s -exec syntax. Example: --exec \'adb push {} /sdcard/Music/ && rm {}\'' )
|
||||
|
||||
parser.add_option_group(general)
|
||||
parser.add_option_group(selection)
|
||||
parser.add_option_group(downloader)
|
||||
parser.add_option_group(filesystem)
|
||||
parser.add_option_group(verbosity)
|
||||
parser.add_option_group(workarounds)
|
||||
parser.add_option_group(video_format)
|
||||
parser.add_option_group(subtitles)
|
||||
parser.add_option_group(authentication)
|
||||
parser.add_option_group(postproc)
|
||||
|
||||
if overrideArguments is not None:
|
||||
opts, args = parser.parse_args(overrideArguments)
|
||||
if opts.verbose:
|
||||
write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n')
|
||||
else:
|
||||
commandLineConf = sys.argv[1:]
|
||||
if '--ignore-config' in commandLineConf:
|
||||
systemConf = []
|
||||
userConf = []
|
||||
else:
|
||||
systemConf = _readOptions('/etc/youtube-dl.conf')
|
||||
if '--ignore-config' in systemConf:
|
||||
userConf = []
|
||||
else:
|
||||
userConf = _readUserConf()
|
||||
argv = systemConf + userConf + commandLineConf
|
||||
|
||||
opts, args = parser.parse_args(argv)
|
||||
if opts.verbose:
|
||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||
|
||||
return parser, opts, args
|
@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.09.06'
|
||||
__version__ = '2014.09.19'
|
||||
|
Loading…
Reference in New Issue