|
|
@ -457,7 +457,7 @@ def merge_batch_files(path, batch_time):
|
|
|
|
os.remove(batch_file)
|
|
|
|
os.remove(batch_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(channel, nick, oauth_token_path, base_dir='/mnt', name=None, merge_interval=60, metrics_port=8008):
|
|
|
|
def main(nick, oauth_token_path, *channels, base_dir='/mnt', name=None, merge_interval=60, metrics_port=8008):
|
|
|
|
with open(oauth_token_path) as f:
|
|
|
|
with open(oauth_token_path) as f:
|
|
|
|
oauth_token = f.read()
|
|
|
|
oauth_token = f.read()
|
|
|
|
# To ensure uniqueness even if multiple instances are running on the same host,
|
|
|
|
# To ensure uniqueness even if multiple instances are running on the same host,
|
|
|
@ -470,23 +470,27 @@ def main(channel, nick, oauth_token_path, base_dir='/mnt', name=None, merge_inte
|
|
|
|
stopping = gevent.event.Event()
|
|
|
|
stopping = gevent.event.Event()
|
|
|
|
gevent.signal_handler(signal.SIGTERM, stopping.set)
|
|
|
|
gevent.signal_handler(signal.SIGTERM, stopping.set)
|
|
|
|
|
|
|
|
|
|
|
|
merger = gevent.spawn(merge_all,
|
|
|
|
mergers = [
|
|
|
|
|
|
|
|
gevent.spawn(merge_all,
|
|
|
|
os.path.join(base_dir, channel, "chat"),
|
|
|
|
os.path.join(base_dir, channel, "chat"),
|
|
|
|
interval=merge_interval,
|
|
|
|
interval=merge_interval,
|
|
|
|
stopping=stopping
|
|
|
|
stopping=stopping
|
|
|
|
)
|
|
|
|
) for channel in channels
|
|
|
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
prom.start_http_server(metrics_port)
|
|
|
|
prom.start_http_server(metrics_port)
|
|
|
|
|
|
|
|
|
|
|
|
logging.info("Starting")
|
|
|
|
logging.info("Starting")
|
|
|
|
for index in count():
|
|
|
|
for index in count():
|
|
|
|
# To ensure uniqueness between clients, include a client number
|
|
|
|
# To ensure uniqueness between clients, include a client number
|
|
|
|
archiver = Archiver("{}.{}".format(name, index), base_dir, [channel], nick, oauth_token)
|
|
|
|
archiver = Archiver("{}.{}".format(name, index), base_dir, channels, nick, oauth_token)
|
|
|
|
worker = gevent.spawn(archiver.run)
|
|
|
|
archive_worker = gevent.spawn(archiver.run)
|
|
|
|
|
|
|
|
workers = mergers + [archive_worker]
|
|
|
|
# wait for either graceful exit, error, or for a signal from the archiver that a reconnect was requested
|
|
|
|
# wait for either graceful exit, error, or for a signal from the archiver that a reconnect was requested
|
|
|
|
gevent.wait([stopping, worker, merger, archiver.got_reconnect], count=1)
|
|
|
|
gevent.wait([stopping, archiver.got_reconnect] + workers, count=1)
|
|
|
|
if stopping.is_set():
|
|
|
|
if stopping.is_set():
|
|
|
|
archiver.stop()
|
|
|
|
archiver.stop()
|
|
|
|
|
|
|
|
for worker in workers:
|
|
|
|
worker.get()
|
|
|
|
worker.get()
|
|
|
|
break
|
|
|
|
break
|
|
|
|
# if got reconnect, discard the old archiver (we don't care even if it fails after this)
|
|
|
|
# if got reconnect, discard the old archiver (we don't care even if it fails after this)
|
|
|
@ -497,10 +501,11 @@ def main(channel, nick, oauth_token_path, base_dir='/mnt', name=None, merge_inte
|
|
|
|
# the only remaining case is that something failed. stop everything and re-raise.
|
|
|
|
# the only remaining case is that something failed. stop everything and re-raise.
|
|
|
|
logging.warning("Stopping due to worker dying")
|
|
|
|
logging.warning("Stopping due to worker dying")
|
|
|
|
stopping.set()
|
|
|
|
stopping.set()
|
|
|
|
|
|
|
|
archiver.stop()
|
|
|
|
|
|
|
|
for worker in workers:
|
|
|
|
worker.join()
|
|
|
|
worker.join()
|
|
|
|
merger.join()
|
|
|
|
# at least one of these should raise
|
|
|
|
# at least one of these two should raise
|
|
|
|
for worker in workers:
|
|
|
|
worker.get()
|
|
|
|
worker.get()
|
|
|
|
merger.get()
|
|
|
|
|
|
|
|
assert False, "Worker unexpectedly exited successfully"
|
|
|
|
assert False, "Worker unexpectedly exited successfully"
|
|
|
|
logging.info("Gracefully stopped")
|
|
|
|
logging.info("Gracefully stopped")
|
|
|
|