mirror of https://github.com/ekimekim/wubloader
wip:
parent
3280517618
commit
dcb6d6ba6d
@ -0,0 +1,109 @@
|
||||
|
||||
import logging
|
||||
|
||||
from . import states
|
||||
|
||||
|
||||
class Job(object):
|
||||
"""A job wraps a row and represents a video cutting job to do."""
|
||||
|
||||
# How often to check if someone else has claimed a row out from underneath us.
|
||||
OWNERSHIP_CHECK_INTERVAL = 1
|
||||
|
||||
def __init__(self, wubloader, is_chunk, sheet, row):
|
||||
self.wubloader = wubloader
|
||||
if is_chunk:
|
||||
self.job_type = 'chunk'
|
||||
elif row.state in states.FLOWS['draft']:
|
||||
self.job_type = 'draft'
|
||||
else:
|
||||
assert row.state in states.FLOWS['publish']
|
||||
self.job_type = 'publish'
|
||||
self.sheet = sheet
|
||||
self.row = row
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
"""User-set priority is most important, followed by type, then earliest first."""
|
||||
type_priority = ['chunk', 'draft', 'publish'] # low to high priority
|
||||
return (
|
||||
getattr(self.row, 'priority', 0), # chunks don't have priority, default to 0
|
||||
type_priority.index(self.job_type),
|
||||
-self.sheet.id, # sheet index, low number is high priority
|
||||
-self.row.index, # row index, low number is high priority
|
||||
)
|
||||
|
||||
@property
|
||||
def uploader(self):
|
||||
"""A processed uploader check that ignores dead bots"""
|
||||
return self.row.uploader if self.row.uploader in self.wubloader.heartbeat.alive else ""
|
||||
|
||||
@property
|
||||
def excluded(self):
|
||||
"""Bots that may not claim this row. NOTE: All lowercase."""
|
||||
if not self.row.excluded.strip():
|
||||
return []
|
||||
return [name.strip().lower() for name in self.row.excluded.split(',')]
|
||||
|
||||
@property
|
||||
def start_time(self):
|
||||
try:
|
||||
return parse_bustime(self.wubloader.bustime_base, self.row.start_time)
|
||||
except ValueError as e:
|
||||
raise ValueError("Start time: {}".format(e))
|
||||
|
||||
@property
|
||||
def end_time(self):
|
||||
try:
|
||||
return parse_bustime(self.wubloader.bustime_base, self.row.end_time)
|
||||
except ValueError as e:
|
||||
raise ValueError("End time: {}".format(e))
|
||||
|
||||
@property
|
||||
def duration(self):
|
||||
return self.end_time - self.start_time
|
||||
|
||||
def cancel(self):
|
||||
"""Cancel job that is currently being processed, setting it back to its starting state."""
|
||||
if not self.worker.ready():
|
||||
# By setting uploader to blank, the watchdog will stop the in-progress job.
|
||||
self.row.update(state=states.FLOWS[self.job_type][0], uploader="")
|
||||
|
||||
def process(self):
|
||||
"""Call this to perform the job."""
|
||||
# We do the actual work in a seperate greenlet so we can easily cancel it.
|
||||
self.worker = self.wubloader.group.spawn(self._process)
|
||||
# While that does the real work, we poll the uploader field to check no-one else has stolen it.
|
||||
while not self.worker.ready():
|
||||
row = self.row.refresh()
|
||||
if row is None or row.uploader != self.row.uploader:
|
||||
# Our row's been stolen, cancelled, or just plain lost.
|
||||
# Abort with no rollback - let them have it.
|
||||
logging.warning("Job {} aborted: Row {} is {}".format(self, self.row,
|
||||
"gone" if row is None
|
||||
else "cancelled" if row.uploader == ""
|
||||
else "claimed by {}".format(row.uploader)
|
||||
))
|
||||
self.worker.kill(block=True)
|
||||
return
|
||||
# Sleep until either worker is done or interval has passed
|
||||
self.worker.join(self.OWNERSHIP_CHECK_INTERVAL)
|
||||
|
||||
def _process(self):
|
||||
"""Does the actual cutting work. You should call process() instead."""
|
||||
# TODO
|
||||
|
||||
|
||||
def parse_bustime(base, value):
|
||||
parts = value.strip().split(':')
|
||||
if len(parts) == 2:
|
||||
hours = int(parts[0])
|
||||
mins = float(parts[1])
|
||||
secs = 0
|
||||
elif len(parts) == 3:
|
||||
hours = int(parts[0])
|
||||
mins = int(parts[1])
|
||||
secs = float(parts[2])
|
||||
else:
|
||||
raise ValueError("Bad format: Must be HH:MM or HH:MM:SS")
|
||||
return base + hours * 3600 + mins * 60 + secs
|
@ -0,0 +1,46 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
QUEUED = "[✓] Queued"
|
||||
PROCESSING_VIDEO = "[^] Processing Video"
|
||||
AWAITING_EDITS = "[✓] Awaiting Edits"
|
||||
EDITS_QUEUED = "[✓] Edits Queued"
|
||||
PROCESSING_EDITS = "[^] Processing Edits"
|
||||
UPLOADING = "[^] Uploading"
|
||||
PUBLISHED = "[✓] Published"
|
||||
ERROR = "[❌] Error"
|
||||
|
||||
|
||||
# Map {name: (ready, *in progress, complete)} state flows.
|
||||
# Note that due to bot deaths, etc, it can be pretty easy for something to be in an in-progress state
|
||||
# but not actually be in progress. We treat in progress and ready states as basically equivalent and only
|
||||
# existing for human feedback. Our actual in progress indicator comes from the uploader field,
|
||||
# which can be ignored if the uploader in question is dead.
|
||||
FLOWS = {
|
||||
'draft': (QUEUED, PROCESSING_VIDEO, AWAITING_EDITS),
|
||||
'publish': (EDITS_QUEUED, PROCESSING_EDITS, UPLOADING, PUBLISHED),
|
||||
'chunk': (QUEUED, PROCESSING_VIDEO, UPLOADING, PUBLISHED),
|
||||
}
|
||||
CHUNK_FLOWS = ('chunk',)
|
||||
MAIN_FLOWS = ('draft', 'publish')
|
||||
|
||||
|
||||
# Whether this is a state we want to act on, defined as any non-complete state.
|
||||
def is_actionable(sheet_type, state):
|
||||
flows = CHUNK_FLOWS if sheet_type == 'chunks' else MAIN_FLOWS
|
||||
for name in flows:
|
||||
flow = FLOWS[name]
|
||||
if state in flow[:-1]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
# General map from in-progress states to the state to rollback to.
|
||||
# For non-in-progress states, we just map them to themselves.
|
||||
def rollback(sheet_type, state):
|
||||
flows = CHUNK_FLOWS if sheet_type == 'chunks' else MAIN_FLOWS
|
||||
for name in flows:
|
||||
flow = FLOWS[name]
|
||||
for s in flow[1:-1]: # for each in progress state
|
||||
if s == state:
|
||||
return flow[0] # return the first state in the flow
|
||||
return state # if no in progress state matches, return same state
|
Loading…
Reference in New Issue