0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-15 04:23:54 +01:00

Run replication streamers on workers (#7146)

Currently we never write to streams from workers, but that will change soon
This commit is contained in:
Erik Johnston 2020-04-28 13:34:12 +01:00 committed by GitHub
parent 07337fe30b
commit 38919b521e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 22 deletions

1
changelog.d/7146.misc Normal file
View file

@ -0,0 +1 @@
Run replication streamers on workers.

View file

@ -960,17 +960,22 @@ def start(config_options):
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = GenericWorkerServer( hs = GenericWorkerServer(
config.server_name, config.server_name,
config=config, config=config,
version_string="Synapse/" + get_version_string(synapse), version_string="Synapse/" + get_version_string(synapse),
) )
setup_logging(ss, config, use_worker_options=True) setup_logging(hs, config, use_worker_options=True)
hs.setup()
# Ensure the replication streamer is always started in case we write to any
# streams. Will no-op if no streams can be written to by this worker.
hs.get_replication_streamer()
ss.setup()
reactor.addSystemEventTrigger( reactor.addSystemEventTrigger(
"before", "startup", _base.start, ss, config.worker_listeners "before", "startup", _base.start, hs, config.worker_listeners
) )
_base.start_worker_reactor("synapse-generic-worker", config) _base.start_worker_reactor("synapse-generic-worker", config)

View file

@ -17,9 +17,7 @@
import logging import logging
import random import random
from typing import Dict from typing import Dict, List
from six import itervalues
from prometheus_client import Counter from prometheus_client import Counter
@ -71,28 +69,27 @@ class ReplicationStreamer(object):
def __init__(self, hs): def __init__(self, hs):
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.presence_handler = hs.get_presence_handler()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.notifier = hs.get_notifier() self.notifier = hs.get_notifier()
self._server_notices_sender = hs.get_server_notices_sender()
self._replication_torture_level = hs.config.replication_torture_level self._replication_torture_level = hs.config.replication_torture_level
# List of streams that clients can subscribe to. # Work out list of streams that this instance is the source of.
# We only support federation stream if federation sending hase been self.streams = [] # type: List[Stream]
# disabled on the master. if hs.config.worker_app is None:
self.streams = [ for stream in STREAMS_MAP.values():
stream(hs) if stream == FederationStream and hs.config.send_federation:
for stream in itervalues(STREAMS_MAP) # We only support federation stream if federation sending
if stream != FederationStream or not hs.config.send_federation # hase been disabled on the master.
] continue
self.streams.append(stream(hs))
self.streams_by_name = {stream.NAME: stream for stream in self.streams} self.streams_by_name = {stream.NAME: stream for stream in self.streams}
self.federation_sender = None # Only bother registering the notifier callback if we have streams to
if not hs.config.send_federation: # publish.
self.federation_sender = hs.get_federation_sender() if self.streams:
self.notifier.add_replication_callback(self.on_notifier_poke) self.notifier.add_replication_callback(self.on_notifier_poke)
# Keeps track of whether we are currently checking for updates # Keeps track of whether we are currently checking for updates