forked from MirrorHub/synapse
Limit cache invalidation replication line length (#4748)
This commit is contained in:
parent
f191be822b
commit
6bb1c028f1
3 changed files with 28 additions and 5 deletions
1
changelog.d/4748.misc
Normal file
1
changelog.d/4748.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Improve replication performance by reducing cache invalidation traffic.
|
|
@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||||
if "\n" in string:
|
if "\n" in string:
|
||||||
raise Exception("Unexpected newline in command: %r", string)
|
raise Exception("Unexpected newline in command: %r", string)
|
||||||
|
|
||||||
self.sendLine(string.encode("utf-8"))
|
encoded_string = string.encode("utf-8")
|
||||||
|
|
||||||
|
if len(encoded_string) > self.MAX_LENGTH:
|
||||||
|
raise Exception(
|
||||||
|
"Failed to send command %s as too long (%d > %d)" % (
|
||||||
|
cmd.NAME,
|
||||||
|
len(encoded_string), self.MAX_LENGTH,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.sendLine(encoded_string)
|
||||||
|
|
||||||
self.last_sent_command = self.clock.time_msec()
|
self.last_sent_command = self.clock.time_msec()
|
||||||
|
|
||||||
|
@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||||
def id(self):
|
def id(self):
|
||||||
return "%s-%s" % (self.name, self.conn_id)
|
return "%s-%s" % (self.name, self.conn_id)
|
||||||
|
|
||||||
|
def lineLengthExceeded(self, line):
|
||||||
|
"""Called when we receive a line that is above the maximum line length
|
||||||
|
"""
|
||||||
|
self.send_error("Line length exceeded")
|
||||||
|
|
||||||
|
|
||||||
class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
|
||||||
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
|
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
|
||||||
|
|
|
@ -30,6 +30,7 @@ from synapse.api.errors import StoreError
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
from synapse.util import batch_iter
|
||||||
from synapse.util.caches.descriptors import Cache
|
from synapse.util.caches.descriptors import Cache
|
||||||
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
|
||||||
from synapse.util.stringutils import exception_to_unicode
|
from synapse.util.stringutils import exception_to_unicode
|
||||||
|
@ -1327,7 +1328,13 @@ class SQLBaseStore(object):
|
||||||
"""
|
"""
|
||||||
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
txn.call_after(self._invalidate_state_caches, room_id, members_changed)
|
||||||
|
|
||||||
keys = itertools.chain([room_id], members_changed)
|
# We need to be careful that the size of the `members_changed` list
|
||||||
|
# isn't so large that it causes problems sending over replication, so we
|
||||||
|
# send them in chunks.
|
||||||
|
# Max line length is 16K, and max user ID length is 255, so 50 should
|
||||||
|
# be safe.
|
||||||
|
for chunk in batch_iter(members_changed, 50):
|
||||||
|
keys = itertools.chain([room_id], chunk)
|
||||||
self._send_invalidation_to_replication(
|
self._send_invalidation_to_replication(
|
||||||
txn, _CURRENT_STATE_CACHE_NAME, keys,
|
txn, _CURRENT_STATE_CACHE_NAME, keys,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in a new issue