mirror of
https://mau.dev/maunium/synapse.git
synced 2024-12-14 20:13:50 +01:00
Merge branch 'develop' of github.com:matrix-org/synapse into mysql
This commit is contained in:
commit
8bf285e082
4 changed files with 62 additions and 27 deletions
|
@ -46,9 +46,10 @@ def _event_dict_property(key):
|
||||||
|
|
||||||
class EventBase(object):
|
class EventBase(object):
|
||||||
def __init__(self, event_dict, signatures={}, unsigned={},
|
def __init__(self, event_dict, signatures={}, unsigned={},
|
||||||
internal_metadata_dict={}):
|
internal_metadata_dict={}, rejected_reason=None):
|
||||||
self.signatures = signatures
|
self.signatures = signatures
|
||||||
self.unsigned = unsigned
|
self.unsigned = unsigned
|
||||||
|
self.rejected_reason = rejected_reason
|
||||||
|
|
||||||
self._event_dict = event_dict
|
self._event_dict = event_dict
|
||||||
|
|
||||||
|
@ -109,7 +110,7 @@ class EventBase(object):
|
||||||
|
|
||||||
|
|
||||||
class FrozenEvent(EventBase):
|
class FrozenEvent(EventBase):
|
||||||
def __init__(self, event_dict, internal_metadata_dict={}):
|
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||||
event_dict = dict(event_dict)
|
event_dict = dict(event_dict)
|
||||||
|
|
||||||
# Signatures is a dict of dicts, and this is faster than doing a
|
# Signatures is a dict of dicts, and this is faster than doing a
|
||||||
|
@ -128,6 +129,7 @@ class FrozenEvent(EventBase):
|
||||||
signatures=signatures,
|
signatures=signatures,
|
||||||
unsigned=unsigned,
|
unsigned=unsigned,
|
||||||
internal_metadata_dict=internal_metadata_dict,
|
internal_metadata_dict=internal_metadata_dict,
|
||||||
|
rejected_reason=rejected_reason,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -59,8 +59,8 @@ class _NotificationListener(object):
|
||||||
self.limit = limit
|
self.limit = limit
|
||||||
self.timeout = timeout
|
self.timeout = timeout
|
||||||
self.deferred = deferred
|
self.deferred = deferred
|
||||||
|
|
||||||
self.rooms = rooms
|
self.rooms = rooms
|
||||||
|
self.timer = None
|
||||||
|
|
||||||
def notified(self):
|
def notified(self):
|
||||||
return self.deferred.called
|
return self.deferred.called
|
||||||
|
@ -93,6 +93,13 @@ class _NotificationListener(object):
|
||||||
self.appservice, set()
|
self.appservice, set()
|
||||||
).discard(self)
|
).discard(self)
|
||||||
|
|
||||||
|
# Cancel the timeout for this notifer if one exists.
|
||||||
|
if self.timer is not None:
|
||||||
|
try:
|
||||||
|
notifier.clock.cancel_call_later(self.timer)
|
||||||
|
except:
|
||||||
|
logger.exception("Failed to cancel notifier timer")
|
||||||
|
|
||||||
|
|
||||||
class Notifier(object):
|
class Notifier(object):
|
||||||
""" This class is responsible for notifying any listeners when there are
|
""" This class is responsible for notifying any listeners when there are
|
||||||
|
@ -325,14 +332,20 @@ class Notifier(object):
|
||||||
self._register_with_keys(listener[0])
|
self._register_with_keys(listener[0])
|
||||||
|
|
||||||
result = yield callback()
|
result = yield callback()
|
||||||
|
timer = [None]
|
||||||
|
|
||||||
if timeout:
|
if timeout:
|
||||||
timed_out = [False]
|
timed_out = [False]
|
||||||
|
|
||||||
def _timeout_listener():
|
def _timeout_listener():
|
||||||
timed_out[0] = True
|
timed_out[0] = True
|
||||||
|
timer[0] = None
|
||||||
listener[0].notify(self, [], from_token, from_token)
|
listener[0].notify(self, [], from_token, from_token)
|
||||||
|
|
||||||
self.clock.call_later(timeout/1000., _timeout_listener)
|
# We create multiple notification listeners so we have to manage
|
||||||
|
# canceling the timeout ourselves.
|
||||||
|
timer[0] = self.clock.call_later(timeout/1000., _timeout_listener)
|
||||||
|
|
||||||
while not result and not timed_out[0]:
|
while not result and not timed_out[0]:
|
||||||
yield deferred
|
yield deferred
|
||||||
deferred = defer.Deferred()
|
deferred = defer.Deferred()
|
||||||
|
@ -347,6 +360,12 @@ class Notifier(object):
|
||||||
self._register_with_keys(listener[0])
|
self._register_with_keys(listener[0])
|
||||||
result = yield callback()
|
result = yield callback()
|
||||||
|
|
||||||
|
if timer[0] is not None:
|
||||||
|
try:
|
||||||
|
self.clock.cancel_call_later(timer[0])
|
||||||
|
except:
|
||||||
|
logger.exception("Failed to cancel notifer timer")
|
||||||
|
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
def get_events_for(self, user, rooms, pagination_config, timeout):
|
def get_events_for(self, user, rooms, pagination_config, timeout):
|
||||||
|
@ -385,6 +404,8 @@ class Notifier(object):
|
||||||
def _timeout_listener():
|
def _timeout_listener():
|
||||||
# TODO (erikj): We should probably set to_token to the current
|
# TODO (erikj): We should probably set to_token to the current
|
||||||
# max rather than reusing from_token.
|
# max rather than reusing from_token.
|
||||||
|
# Remove the timer from the listener so we don't try to cancel it.
|
||||||
|
listener.timer = None
|
||||||
listener.notify(
|
listener.notify(
|
||||||
self,
|
self,
|
||||||
[],
|
[],
|
||||||
|
@ -400,8 +421,11 @@ class Notifier(object):
|
||||||
if not timeout:
|
if not timeout:
|
||||||
_timeout_listener()
|
_timeout_listener()
|
||||||
else:
|
else:
|
||||||
self.clock.call_later(timeout/1000.0, _timeout_listener)
|
# Only add the timer if the listener hasn't been notified
|
||||||
|
if not listener.notified():
|
||||||
|
listener.timer = self.clock.call_later(
|
||||||
|
timeout/1000.0, _timeout_listener
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
|
|
|
@ -251,13 +251,11 @@ class SQLBaseStore(object):
|
||||||
self._txn_perf_counters = PerformanceCounters()
|
self._txn_perf_counters = PerformanceCounters()
|
||||||
self._get_event_counters = PerformanceCounters()
|
self._get_event_counters = PerformanceCounters()
|
||||||
|
|
||||||
self._get_event_cache = LruCache(hs.config.event_cache_size)
|
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
|
||||||
|
max_entries=hs.config.event_cache_size)
|
||||||
|
|
||||||
self.database_engine = hs.database_engine
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
# Pretend the getEventCache is just another named cache
|
|
||||||
caches_by_name["*getEvent*"] = self._get_event_cache
|
|
||||||
|
|
||||||
self._stream_id_gen = StreamIdGenerator()
|
self._stream_id_gen = StreamIdGenerator()
|
||||||
self._transaction_id_gen = IdGenerator("sent_transactions", "id", self)
|
self._transaction_id_gen = IdGenerator("sent_transactions", "id", self)
|
||||||
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
|
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
|
||||||
|
@ -768,6 +766,12 @@ class SQLBaseStore(object):
|
||||||
|
|
||||||
return [e for e in events if e]
|
return [e for e in events if e]
|
||||||
|
|
||||||
|
def _invalidate_get_event_cache(self, event_id):
|
||||||
|
for check_redacted in (False, True):
|
||||||
|
for get_prev_content in (False, True):
|
||||||
|
self._get_event_cache.invalidate(event_id, check_redacted,
|
||||||
|
get_prev_content)
|
||||||
|
|
||||||
def _get_event_txn(self, txn, event_id, check_redacted=True,
|
def _get_event_txn(self, txn, event_id, check_redacted=True,
|
||||||
get_prev_content=False, allow_rejected=False):
|
get_prev_content=False, allow_rejected=False):
|
||||||
|
|
||||||
|
@ -778,16 +782,14 @@ class SQLBaseStore(object):
|
||||||
sql_getevents_timer.inc_by(curr_time - last_time, desc)
|
sql_getevents_timer.inc_by(curr_time - last_time, desc)
|
||||||
return curr_time
|
return curr_time
|
||||||
|
|
||||||
cache = self._get_event_cache.setdefault(event_id, {})
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Separate cache entries for each way to invoke _get_event_txn
|
ret = self._get_event_cache.get(event_id, check_redacted, get_prev_content)
|
||||||
ret = cache[(check_redacted, get_prev_content, allow_rejected)]
|
|
||||||
|
|
||||||
cache_counter.inc_hits("*getEvent*")
|
if allow_rejected or not ret.rejected_reason:
|
||||||
return ret
|
return ret
|
||||||
|
else:
|
||||||
|
return None
|
||||||
except KeyError:
|
except KeyError:
|
||||||
cache_counter.inc_misses("*getEvent*")
|
|
||||||
pass
|
pass
|
||||||
finally:
|
finally:
|
||||||
start_time = update_counter("event_cache", start_time)
|
start_time = update_counter("event_cache", start_time)
|
||||||
|
@ -812,19 +814,22 @@ class SQLBaseStore(object):
|
||||||
|
|
||||||
start_time = update_counter("select_event", start_time)
|
start_time = update_counter("select_event", start_time)
|
||||||
|
|
||||||
if allow_rejected or not rejected_reason:
|
|
||||||
result = self._get_event_from_row_txn(
|
result = self._get_event_from_row_txn(
|
||||||
txn, internal_metadata, js, redacted,
|
txn, internal_metadata, js, redacted,
|
||||||
check_redacted=check_redacted,
|
check_redacted=check_redacted,
|
||||||
get_prev_content=get_prev_content,
|
get_prev_content=get_prev_content,
|
||||||
|
rejected_reason=rejected_reason,
|
||||||
)
|
)
|
||||||
cache[(check_redacted, get_prev_content, allow_rejected)] = result
|
self._get_event_cache.prefill(event_id, check_redacted, get_prev_content, result)
|
||||||
|
|
||||||
|
if allow_rejected or not rejected_reason:
|
||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
|
def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
|
||||||
check_redacted=True, get_prev_content=False):
|
check_redacted=True, get_prev_content=False,
|
||||||
|
rejected_reason=None):
|
||||||
|
|
||||||
start_time = time.time() * 1000
|
start_time = time.time() * 1000
|
||||||
|
|
||||||
|
@ -841,7 +846,11 @@ class SQLBaseStore(object):
|
||||||
internal_metadata = json.loads(str(internal_metadata).decode("utf8"))
|
internal_metadata = json.loads(str(internal_metadata).decode("utf8"))
|
||||||
start_time = update_counter("decode_internal", start_time)
|
start_time = update_counter("decode_internal", start_time)
|
||||||
|
|
||||||
ev = FrozenEvent(d, internal_metadata_dict=internal_metadata)
|
ev = FrozenEvent(
|
||||||
|
d,
|
||||||
|
internal_metadata_dict=internal_metadata,
|
||||||
|
rejected_reason=rejected_reason,
|
||||||
|
)
|
||||||
start_time = update_counter("build_frozen_event", start_time)
|
start_time = update_counter("build_frozen_event", start_time)
|
||||||
|
|
||||||
if check_redacted and redacted:
|
if check_redacted and redacted:
|
||||||
|
|
|
@ -93,7 +93,7 @@ class EventsStore(SQLBaseStore):
|
||||||
current_state=None):
|
current_state=None):
|
||||||
|
|
||||||
# Remove the any existing cache entries for the event_id
|
# Remove the any existing cache entries for the event_id
|
||||||
self._get_event_cache.pop(event.event_id)
|
self._invalidate_get_event_cache(event.event_id)
|
||||||
|
|
||||||
if stream_ordering is None:
|
if stream_ordering is None:
|
||||||
with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
|
with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
|
||||||
|
@ -356,7 +356,7 @@ class EventsStore(SQLBaseStore):
|
||||||
|
|
||||||
def _store_redaction(self, txn, event):
|
def _store_redaction(self, txn, event):
|
||||||
# invalidate the cache for the redacted event
|
# invalidate the cache for the redacted event
|
||||||
self._get_event_cache.pop(event.redacts)
|
self._invalidate_get_event_cache(event.redacts)
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
|
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
|
||||||
(event.event_id, event.redacts)
|
(event.event_id, event.redacts)
|
||||||
|
|
Loading…
Reference in a new issue