more py3 fixes

pull/224/head
Mike Lang 3 years ago committed by Mike Lang
parent f2a8007bf7
commit a56f6859bb

@ -231,7 +231,7 @@ class BackfillerManager(object):
def stop(self): def stop(self):
"""Shut down all workers and stop backfilling.""" """Shut down all workers and stop backfilling."""
self.logger.info('Stopping') self.logger.info('Stopping')
for node in self.workers.keys(): for node in list(self.workers.keys()):
self.stop_worker(node) self.stop_worker(node)
self.stopping.set() self.stopping.set()
@ -344,7 +344,7 @@ class BackfillerManager(object):
self.stopping.wait(common.jitter(self.NODE_INTERVAL)) self.stopping.wait(common.jitter(self.NODE_INTERVAL))
#wait for all workers to finish #wait for all workers to finish
for worker in self.workers.values(): for worker in list(self.workers.values()):
worker.done.wait() worker.done.wait()
def get_nodes(self): def get_nodes(self):
@ -390,7 +390,7 @@ class BackfillerManager(object):
for row in results: for row in results:
nodes[row.name] = row.url nodes[row.name] = row.url
nodes.pop(self.localhost, None) nodes.pop(self.localhost, None)
self.logger.info('Nodes fetched: {}'.format(nodes.keys())) self.logger.info('Nodes fetched: {}'.format(list(nodes.keys())))
return list(nodes.values()) return list(nodes.values())
class BackfillerWorker(object): class BackfillerWorker(object):

@ -1,7 +1,7 @@
"""Code for instrumenting requests calls. Requires requests, obviously.""" """Code for instrumenting requests calls. Requires requests, obviously."""
import urlparse import urllib.parse
import requests.sessions import requests.sessions
import prometheus_client as prom import prometheus_client as prom
@ -33,7 +33,7 @@ class InstrumentedSession(requests.sessions.Session):
""" """
def request(self, method, url, *args, **kwargs): def request(self, method, url, *args, **kwargs):
_, domain, _, _, _ = urlparse.urlsplit(url) _, domain, _, _, _ = urllib.parse.urlsplit(url)
name = kwargs.pop('metric_name', '') name = kwargs.pop('metric_name', '')
start = monotonic() # we only use our own measured latency if an error occurs start = monotonic() # we only use our own measured latency if an error occurs

@ -544,7 +544,7 @@ class TranscodeChecker(object):
def check_ids(self, ids): def check_ids(self, ids):
# Future work: Set error in DB if video id is not present, # Future work: Set error in DB if video id is not present,
# and/or try to get more info from yt about what's wrong. # and/or try to get more info from yt about what's wrong.
done = self.backend.check_status(ids.values()) done = self.backend.check_status(list(ids.values()))
return { return {
id: video_id for id, video_id in ids.items() id: video_id for id, video_id in ids.items()
if video_id in done if video_id in done

@ -32,7 +32,7 @@ import re
from binascii import unhexlify from binascii import unhexlify
from collections import namedtuple from collections import namedtuple
from itertools import starmap from itertools import starmap
from urlparse import urljoin, urlparse from urllib.parse import urljoin, urlparse
# EXT-X-BYTERANGE # EXT-X-BYTERANGE

@ -17,11 +17,12 @@ import prometheus_client as prom
import requests import requests
from monotonic import monotonic from monotonic import monotonic
import twitch
import common import common
import common.dateutil import common.dateutil
import common.requests import common.requests
from . import twitch
segments_downloaded = prom.Counter( segments_downloaded = prom.Counter(
"segments_downloaded", "segments_downloaded",
@ -328,7 +329,7 @@ class StreamWorker(object):
else: else:
self.logger.info("Worker stopped") self.logger.info("Worker stopped")
finally: finally:
for getter in self.getters.values(): for getter in list(self.getters.values()):
getter.done.wait() getter.done.wait()
self.done.set() self.done.set()
self.manager.stream_workers[self.quality].remove(self) self.manager.stream_workers[self.quality].remove(self)
@ -392,8 +393,9 @@ class StreamWorker(object):
if date is not None: if date is not None:
date += datetime.timedelta(seconds=segment.duration) date += datetime.timedelta(seconds=segment.duration)
# Clean up any old segment getters # Clean up any old segment getters.
for url, getter in self.getters.items(): # Note use of list() to make a copy to avoid modification-during-iteration
for url, getter in list(self.getters.items()):
# If segment is done and wasn't in latest fetch # If segment is done and wasn't in latest fetch
if getter.done.is_set() and not any( if getter.done.is_set() and not any(
segment.uri == url for segment in playlist.segments segment.uri == url for segment in playlist.segments
@ -512,7 +514,7 @@ class SegmentGetter(object):
partial: Segment is incomplete. Hash is included. partial: Segment is incomplete. Hash is included.
temp: Segment has not been downloaded yet. A random uuid is added. temp: Segment has not been downloaded yet. A random uuid is added.
""" """
arg = str(uuid.uuid4()) if type == "temp" else b64encode(hash.digest(), b"-_").encode().rstrip("=") arg = str(uuid.uuid4()) if type == "temp" else b64encode(hash.digest(), b"-_").decode().rstrip("=")
return "{}-{}-{}.ts".format(self.prefix, type, arg) return "{}-{}-{}.ts".format(self.prefix, type, arg)
def exists(self): def exists(self):

@ -2,10 +2,10 @@
import logging import logging
import random import random
import hls_playlist
from common.requests import InstrumentedSession from common.requests import InstrumentedSession
from . import hls_playlist
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

@ -195,7 +195,7 @@ class CoverageChecker(object):
self.logger.exception('Getting nodes failed.', exc_info=True) self.logger.exception('Getting nodes failed.', exc_info=True)
return return
self.logger.info('Nodes fetched: {}'.format(nodes.keys())) self.logger.info('Nodes fetched: {}'.format(list(nodes.keys())))
html = """<!DOCTYPE html> html = """<!DOCTYPE html>
<html> <html>

Loading…
Cancel
Save