Merge remote-tracking branch 'gcmalloc/master' into fork_master
commit
2b5b2cb84c
@ -1,133 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import io # for python 2
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
import youtube_dl.InfoExtractors
|
|
||||||
|
|
||||||
HEADER = u'''#!/usr/bin/env python
|
|
||||||
|
|
||||||
# DO NOT EDIT THIS FILE BY HAND!
|
|
||||||
# It is auto-generated from tests.json and gentests.py.
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
import io
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import unittest
|
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
import youtube_dl.FileDownloader
|
|
||||||
import youtube_dl.InfoExtractors
|
|
||||||
from youtube_dl.utils import *
|
|
||||||
|
|
||||||
# General configuration (from __init__, not very elegant...)
|
|
||||||
jar = compat_cookiejar.CookieJar()
|
|
||||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
|
||||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
|
||||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
|
||||||
compat_urllib_request.install_opener(opener)
|
|
||||||
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
|
|
||||||
|
|
||||||
class FileDownloader(youtube_dl.FileDownloader):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
|
|
||||||
self.to_stderr = self.to_screen
|
|
||||||
|
|
||||||
def _file_md5(fn):
|
|
||||||
with open(fn, 'rb') as f:
|
|
||||||
return hashlib.md5(f.read()).hexdigest()
|
|
||||||
try:
|
|
||||||
_skip_unless = unittest.skipUnless
|
|
||||||
except AttributeError: # Python 2.6
|
|
||||||
def _skip_unless(cond, reason='No reason given'):
|
|
||||||
def resfunc(f):
|
|
||||||
# Start the function name with test to appease nosetests-2.6
|
|
||||||
def test_wfunc(*args, **kwargs):
|
|
||||||
if cond:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
else:
|
|
||||||
print('Skipped test')
|
|
||||||
return
|
|
||||||
test_wfunc.__name__ = f.__name__
|
|
||||||
return test_wfunc
|
|
||||||
return resfunc
|
|
||||||
_skip = lambda *args, **kwargs: _skip_unless(False, *args, **kwargs)
|
|
||||||
|
|
||||||
class DownloadTest(unittest.TestCase):
|
|
||||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
# Clear old files
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
with io.open(self.PARAMETERS_FILE, encoding='utf-8') as pf:
|
|
||||||
self.parameters = json.load(pf)
|
|
||||||
'''
|
|
||||||
|
|
||||||
FOOTER = u'''
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
'''
|
|
||||||
|
|
||||||
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
|
|
||||||
TEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_download.py')
|
|
||||||
|
|
||||||
def gentests():
|
|
||||||
with io.open(DEF_FILE, encoding='utf-8') as deff:
|
|
||||||
defs = json.load(deff)
|
|
||||||
with io.open(TEST_FILE, 'w', encoding='utf-8') as testf:
|
|
||||||
testf.write(HEADER)
|
|
||||||
spaces = ' ' * 4
|
|
||||||
write = lambda l: testf.write(spaces + l + u'\n')
|
|
||||||
|
|
||||||
for d in defs:
|
|
||||||
name = d['name']
|
|
||||||
ie = getattr(youtube_dl.InfoExtractors, name + 'IE')
|
|
||||||
testf.write(u'\n')
|
|
||||||
write('@_skip_unless(youtube_dl.InfoExtractors.' + name + 'IE._WORKING, "IE marked as not _WORKING")')
|
|
||||||
if not d['file']:
|
|
||||||
write('@_skip("No output file specified")')
|
|
||||||
elif 'skip' in d:
|
|
||||||
write('@_skip(' + repr(d['skip']) + ')')
|
|
||||||
write('def test_' + name + '(self):')
|
|
||||||
write(' filename = ' + repr(d['file']))
|
|
||||||
write(' params = self.parameters')
|
|
||||||
for p in d.get('params', {}):
|
|
||||||
write(' params["' + p + '"] = ' + repr(d['params'][p]))
|
|
||||||
write(' fd = FileDownloader(params)')
|
|
||||||
write(' fd.add_info_extractor(youtube_dl.InfoExtractors.' + name + 'IE())')
|
|
||||||
for ien in d.get('addIEs', []):
|
|
||||||
write(' fd.add_info_extractor(youtube_dl.InfoExtractors.' + ien + 'IE())')
|
|
||||||
write(' fd.download([' + repr(d['url']) + '])')
|
|
||||||
write(' self.assertTrue(os.path.exists(filename))')
|
|
||||||
if 'md5' in d:
|
|
||||||
write(' md5_for_file = _file_md5(filename)')
|
|
||||||
write(' self.assertEqual(md5_for_file, ' + repr(d['md5']) + ')')
|
|
||||||
|
|
||||||
testf.write(u'\n\n')
|
|
||||||
write('def tearDown(self):')
|
|
||||||
for d in defs:
|
|
||||||
if d['file']:
|
|
||||||
write(' if os.path.exists(' + repr(d['file']) + '):')
|
|
||||||
write(' os.remove(' + repr(d['file']) + ')')
|
|
||||||
else:
|
|
||||||
write(' # No file specified for ' + d['name'])
|
|
||||||
testf.write(u'\n')
|
|
||||||
testf.write(FOOTER)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gentests()
|
|
Loading…
Reference in New Issue