diff --git a/devscripts/make_contributing.py b/devscripts/make_contributing.py index a06f8a616e..e76cf885c8 100755 --- a/devscripts/make_contributing.py +++ b/devscripts/make_contributing.py @@ -8,7 +8,7 @@ def main(): return # This is unused in yt-dlp parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') - options, args = parser.parse_args() + _, args = parser.parse_args() if len(args) != 2: parser.error('Expected an input and an output filename') diff --git a/pyproject.toml b/pyproject.toml index 57b315feef..d402314bd3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,7 +75,7 @@ dev = [ ] static-analysis = [ "autopep8~=2.0", - "ruff~=0.12.0", + "ruff~=0.13.0", ] test = [ "pytest~=8.1", diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py index 40dd05e136..c15dd8a617 100644 --- a/test/test_InfoExtractor.py +++ b/test/test_InfoExtractor.py @@ -1945,7 +1945,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/ server_thread.daemon = True server_thread.start() - (content, urlh) = self.ie._download_webpage_handle( + content, _ = self.ie._download_webpage_handle( f'http://127.0.0.1:{port}/teapot', None, expected_status=TEAPOT_RESPONSE_STATUS) self.assertEqual(content, TEAPOT_RESPONSE_BODY) diff --git a/test/test_overwrites.py b/test/test_overwrites.py index 0beafdf12e..96a77a0081 100644 --- a/test/test_overwrites.py +++ b/test/test_overwrites.py @@ -29,7 +29,7 @@ class TestOverwrites(unittest.TestCase): '-o', 'test.webm', 'https://www.youtube.com/watch?v=jNQXAC9IVRw', ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + sout, _ = outp.communicate() self.assertTrue(b'has already been downloaded' in sout) # if the file has no content, it has not been redownloaded self.assertTrue(os.path.getsize(download_file) < 1) @@ -41,7 +41,7 @@ class TestOverwrites(unittest.TestCase): '-o', 'test.webm', 'https://www.youtube.com/watch?v=jNQXAC9IVRw', ], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + sout, _ = outp.communicate() self.assertTrue(b'has already been downloaded' not in sout) # if the file has no content, it has not been redownloaded self.assertTrue(os.path.getsize(download_file) > 1) diff --git a/test/test_pot/test_pot_framework.py b/test/test_pot/test_pot_framework.py index bc94653f4a..d2de1dd290 100644 --- a/test/test_pot/test_pot_framework.py +++ b/test/test_pot/test_pot_framework.py @@ -153,7 +153,7 @@ class TestPoTokenProvider: with pytest.raises( PoTokenProviderRejectedRequest, - match='External requests by "example" provider do not support proxy scheme "socks4". Supported proxy ' + match=r'External requests by "example" provider do not support proxy scheme "socks4"\. Supported proxy ' 'schemes: http, socks5h', ): provider.request_pot(pot_request) diff --git a/test/test_verbose_output.py b/test/test_verbose_output.py index 21ce10a1fb..e9559d33b1 100644 --- a/test/test_verbose_output.py +++ b/test/test_verbose_output.py @@ -22,7 +22,7 @@ class TestVerboseOutput(unittest.TestCase): '--username', 'johnsmith@gmail.com', '--password', 'my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + _, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) @@ -36,7 +36,7 @@ class TestVerboseOutput(unittest.TestCase): '-u', 'johnsmith@gmail.com', '-p', 'my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + _, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) @@ -50,7 +50,7 @@ class TestVerboseOutput(unittest.TestCase): '--username=johnsmith@gmail.com', '--password=my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + _, serr = outp.communicate() self.assertTrue(b'--username' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'--password' in serr) @@ -64,7 +64,7 @@ class TestVerboseOutput(unittest.TestCase): '-u=johnsmith@gmail.com', '-p=my_secret_password', ], cwd=rootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - sout, serr = outp.communicate() + _, serr = outp.communicate() self.assertTrue(b'-u' in serr) self.assertTrue(b'johnsmith' not in serr) self.assertTrue(b'-p' in serr) diff --git a/yt_dlp/downloader/f4m.py b/yt_dlp/downloader/f4m.py index 22d0ebd265..3c8f0265e6 100644 --- a/yt_dlp/downloader/f4m.py +++ b/yt_dlp/downloader/f4m.py @@ -149,14 +149,14 @@ class FlvReader(io.BytesIO): segments_count = self.read_unsigned_char() segments = [] for _ in range(segments_count): - box_size, box_type, box_data = self.read_box_info() + _box_size, box_type, box_data = self.read_box_info() assert box_type == b'asrt' segment = FlvReader(box_data).read_asrt() segments.append(segment) fragments_run_count = self.read_unsigned_char() fragments = [] for _ in range(fragments_run_count): - box_size, box_type, box_data = self.read_box_info() + _box_size, box_type, box_data = self.read_box_info() assert box_type == b'afrt' fragments.append(FlvReader(box_data).read_afrt()) @@ -167,7 +167,7 @@ class FlvReader(io.BytesIO): } def read_bootstrap_info(self): - total_size, box_type, box_data = self.read_box_info() + _, box_type, box_data = self.read_box_info() assert box_type == b'abst' return FlvReader(box_data).read_abst() @@ -324,9 +324,9 @@ class F4mFD(FragmentFD): if requested_bitrate is None or len(formats) == 1: # get the best format formats = sorted(formats, key=lambda f: f[0]) - rate, media = formats[-1] + _, media = formats[-1] else: - rate, media = next(filter( + _, media = next(filter( lambda f: int(f[0]) == requested_bitrate, formats)) # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. diff --git a/yt_dlp/extractor/bilibili.py b/yt_dlp/extractor/bilibili.py index cd9bf6b165..8675061d1a 100644 --- a/yt_dlp/extractor/bilibili.py +++ b/yt_dlp/extractor/bilibili.py @@ -1366,7 +1366,7 @@ class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE): else: yield self.url_result(f'https://www.bilibili.com/video/{entry["bvid"]}', BiliBiliIE, entry['bvid']) - metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries) + _, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries) return self.playlist_result(paged_list, playlist_id) @@ -1400,7 +1400,7 @@ class BilibiliSpaceAudioIE(BilibiliSpaceBaseIE): for entry in page_data.get('data') or []: yield self.url_result(f'https://www.bilibili.com/audio/au{entry["id"]}', BilibiliAudioIE, entry['id']) - metadata, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries) + _, paged_list = self._extract_playlist(fetch_page, get_metadata, get_entries) return self.playlist_result(paged_list, playlist_id) diff --git a/yt_dlp/extractor/brainpop.py b/yt_dlp/extractor/brainpop.py index df10299a0c..1e4fb2c8f5 100644 --- a/yt_dlp/extractor/brainpop.py +++ b/yt_dlp/extractor/brainpop.py @@ -174,7 +174,7 @@ class BrainPOPLegacyBaseIE(BrainPOPBaseIE): } def _real_extract(self, url): - slug, display_id = self._match_valid_url(url).group('slug', 'id') + display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) topic_data = self._search_json( r'var\s+content\s*=\s*', webpage, 'content data', diff --git a/yt_dlp/extractor/cnn.py b/yt_dlp/extractor/cnn.py index 8148762c54..a601e3eb53 100644 --- a/yt_dlp/extractor/cnn.py +++ b/yt_dlp/extractor/cnn.py @@ -272,6 +272,7 @@ class CNNIndonesiaIE(InfoExtractor): return merge_dicts(json_ld_data, { '_type': 'url_transparent', 'url': embed_url, + 'id': video_id, 'upload_date': upload_date, 'tags': try_call(lambda: self._html_search_meta('keywords', webpage).split(', ')), }) diff --git a/yt_dlp/extractor/fifa.py b/yt_dlp/extractor/fifa.py index ae837f6a02..e08b114023 100644 --- a/yt_dlp/extractor/fifa.py +++ b/yt_dlp/extractor/fifa.py @@ -7,7 +7,7 @@ from ..utils import ( class FifaIE(InfoExtractor): - _VALID_URL = r'https?://www\.fifa\.com/fifaplus/(?P\w{2})/watch/([^#?]+/)?(?P\w+)' + _VALID_URL = r'https?://www\.fifa\.com/fifaplus/\w{2}/watch/([^#?]+/)?(?P\w+)' _TESTS = [{ 'url': 'https://www.fifa.com/fifaplus/en/watch/7on10qPcnyLajDDU3ntg6y', 'info_dict': { @@ -51,7 +51,7 @@ class FifaIE(InfoExtractor): }] def _real_extract(self, url): - video_id, locale = self._match_valid_url(url).group('id', 'locale') + video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) preconnect_link = self._search_regex( diff --git a/yt_dlp/extractor/nowness.py b/yt_dlp/extractor/nowness.py index c001a82e9f..8d568aef7c 100644 --- a/yt_dlp/extractor/nowness.py +++ b/yt_dlp/extractor/nowness.py @@ -129,7 +129,7 @@ class NownessSeriesIE(NownessBaseIE): } def _real_extract(self, url): - display_id, series = self._api_request(url, 'series/getBySlug/%s') + _, series = self._api_request(url, 'series/getBySlug/%s') entries = [self._extract_url_result(post) for post in series['posts']] series_title = None series_description = None diff --git a/yt_dlp/extractor/radiofrance.py b/yt_dlp/extractor/radiofrance.py index 9d90439841..fe3fc17419 100644 --- a/yt_dlp/extractor/radiofrance.py +++ b/yt_dlp/extractor/radiofrance.py @@ -414,7 +414,7 @@ class RadioFranceProgramScheduleIE(RadioFranceBaseIE): _VALID_URL = rf'''(?x) {RadioFranceBaseIE._VALID_URL_BASE} /(?P{RadioFranceBaseIE._STATIONS_RE}) - /grille-programmes(?:\?date=(?P[\d-]+))? + /grille-programmes ''' _TESTS = [{ @@ -463,7 +463,7 @@ class RadioFranceProgramScheduleIE(RadioFranceBaseIE): })) def _real_extract(self, url): - station, date = self._match_valid_url(url).group('station', 'date') + station = self._match_valid_url(url).group('station') webpage = self._download_webpage(url, station) grid_data = self._extract_data_from_webpage(webpage, station, 'grid') upload_date = strftime_or_none(grid_data.get('date'), '%Y%m%d') diff --git a/yt_dlp/extractor/rcti.py b/yt_dlp/extractor/rcti.py index 61b73a550c..c8e57e2aba 100644 --- a/yt_dlp/extractor/rcti.py +++ b/yt_dlp/extractor/rcti.py @@ -321,7 +321,7 @@ class RCTIPlusSeriesIE(RCTIPlusBaseIE): f'Only {video_type} will be downloaded. ' f'To download everything from the series, remove "/{video_type}" from the URL') - series_meta, meta_paths = self._call_api( + series_meta, _ = self._call_api( f'https://api.rctiplus.com/api/v1/program/{series_id}/detail', display_id, 'Downloading series metadata') metadata = { 'age_limit': try_get(series_meta, lambda x: self._AGE_RATINGS[x['age_restriction'][0]['code']]), diff --git a/yt_dlp/extractor/substack.py b/yt_dlp/extractor/substack.py index f0fa00ea57..efda234fd3 100644 --- a/yt_dlp/extractor/substack.py +++ b/yt_dlp/extractor/substack.py @@ -12,7 +12,7 @@ from ..utils.traversal import traverse_obj class SubstackIE(InfoExtractor): - _VALID_URL = r'https?://(?P[\w-]+)\.substack\.com/p/(?P[\w-]+)' + _VALID_URL = r'https?://[\w-]+\.substack\.com/p/(?P[\w-]+)' _TESTS = [{ 'url': 'https://haleynahman.substack.com/p/i-made-a-vlog?s=r', 'md5': 'f27e4fc6252001d48d479f45e65cdfd5', @@ -116,7 +116,7 @@ class SubstackIE(InfoExtractor): return formats, subtitles def _real_extract(self, url): - display_id, username = self._match_valid_url(url).group('id', 'username') + display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) webpage_info = self._parse_json(self._search_json( diff --git a/yt_dlp/extractor/vk.py b/yt_dlp/extractor/vk.py index 9adb3086a8..b5b8804cc6 100644 --- a/yt_dlp/extractor/vk.py +++ b/yt_dlp/extractor/vk.py @@ -723,7 +723,7 @@ class VKWallPostIE(VKBaseIE): def _unmask_url(self, mask_url, vk_id): if 'audio_api_unavailable' in mask_url: extra = mask_url.split('?extra=')[1].split('#') - func, base = self._decode(extra[1]).split(chr(11)) + _, base = self._decode(extra[1]).split(chr(11)) mask_url = list(self._decode(extra[0])) url_len = len(mask_url) indexes = [None] * url_len diff --git a/yt_dlp/extractor/youtube/_video.py b/yt_dlp/extractor/youtube/_video.py index afb1226cfa..0527368e77 100644 --- a/yt_dlp/extractor/youtube/_video.py +++ b/yt_dlp/extractor/youtube/_video.py @@ -2760,7 +2760,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): if max_depth == 1 and parent: return - max_comments, max_parents, max_replies, max_replies_per_thread, *_ = ( + _max_comments, max_parents, max_replies, max_replies_per_thread, *_ = ( int_or_none(p, default=sys.maxsize) for p in self._configuration_arg('max_comments') + [''] * 4) continuation = self._extract_continuation(root_continuation_data) diff --git a/yt_dlp/extractor/zingmp3.py b/yt_dlp/extractor/zingmp3.py index 1685edb92f..c786c2f593 100644 --- a/yt_dlp/extractor/zingmp3.py +++ b/yt_dlp/extractor/zingmp3.py @@ -476,7 +476,7 @@ class ZingMp3UserIE(ZingMp3BaseIE): class ZingMp3HubIE(ZingMp3BaseIE): IE_NAME = 'zingmp3:hub' - _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?Phub)/(?P[^/]+)/(?P[^\.]+)' + _VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?Phub)/[^/?#]+/(?P[^./?#]+)' _TESTS = [{ 'url': 'https://zingmp3.vn/hub/Nhac-Moi/IWZ9Z0CA.html', 'info_dict': { @@ -496,7 +496,7 @@ class ZingMp3HubIE(ZingMp3BaseIE): }] def _real_extract(self, url): - song_id, regions, url_type = self._match_valid_url(url).group('id', 'regions', 'type') + song_id, url_type = self._match_valid_url(url).group('id', 'type') hub_detail = self._call_api(url_type, {'id': song_id}) entries = self._parse_items(traverse_obj(hub_detail, ( 'sections', lambda _, v: v['sectionId'] == 'hub', 'items', ...))) diff --git a/yt_dlp/networking/_helper.py b/yt_dlp/networking/_helper.py index ef9c8bafab..661a2c3b51 100644 --- a/yt_dlp/networking/_helper.py +++ b/yt_dlp/networking/_helper.py @@ -200,7 +200,7 @@ def wrap_request_errors(func): def _socket_connect(ip_addr, timeout, source_address): - af, socktype, proto, canonname, sa = ip_addr + af, socktype, proto, _canonname, sa = ip_addr sock = socket.socket(af, socktype, proto) try: if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: @@ -215,7 +215,7 @@ def _socket_connect(ip_addr, timeout, source_address): def create_socks_proxy_socket(dest_addr, proxy_args, proxy_ip_addr, timeout, source_address): - af, socktype, proto, canonname, sa = proxy_ip_addr + af, socktype, proto, _canonname, sa = proxy_ip_addr sock = sockssocket(af, socktype, proto) try: connect_proxy_args = proxy_args.copy() diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py index 6f6d85a7fd..6942e7f298 100644 --- a/yt_dlp/utils/_utils.py +++ b/yt_dlp/utils/_utils.py @@ -4770,7 +4770,7 @@ def jwt_encode(payload_data, key, *, alg='HS256', headers=None): # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256 def jwt_decode_hs256(jwt): - header_b64, payload_b64, signature_b64 = jwt.split('.') + _header_b64, payload_b64, _signature_b64 = jwt.split('.') # add trailing ='s that may have been stripped, superfluous ='s are ignored return json.loads(base64.urlsafe_b64decode(f'{payload_b64}==='))