0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-17 07:33:52 +01:00

Merge pull request #114 from matrix-org/improve_get_event_cache

Improve get event cache
This commit is contained in:
Mark Haines 2015-04-08 16:50:27 +01:00
commit 83f5125d52
3 changed files with 34 additions and 23 deletions

View file

@ -46,9 +46,10 @@ def _event_dict_property(key):
class EventBase(object):
def __init__(self, event_dict, signatures={}, unsigned={},
internal_metadata_dict={}):
internal_metadata_dict={}, rejected_reason=None):
self.signatures = signatures
self.unsigned = unsigned
self.rejected_reason = rejected_reason
self._event_dict = event_dict
@ -109,7 +110,7 @@ class EventBase(object):
class FrozenEvent(EventBase):
def __init__(self, event_dict, internal_metadata_dict={}):
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
event_dict = dict(event_dict)
# Signatures is a dict of dicts, and this is faster than doing a
@ -128,6 +129,7 @@ class FrozenEvent(EventBase):
signatures=signatures,
unsigned=unsigned,
internal_metadata_dict=internal_metadata_dict,
rejected_reason=rejected_reason,
)
@staticmethod

View file

@ -242,10 +242,8 @@ class SQLBaseStore(object):
self._txn_perf_counters = PerformanceCounters()
self._get_event_counters = PerformanceCounters()
self._get_event_cache = LruCache(hs.config.event_cache_size)
# Pretend the getEventCache is just another named cache
caches_by_name["*getEvent*"] = self._get_event_cache
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
max_entries=hs.config.event_cache_size)
def start_profiling(self):
self._previous_loop_ts = self._clock.time_msec()
@ -733,6 +731,12 @@ class SQLBaseStore(object):
return [e for e in events if e]
def _invalidate_get_event_cache(self, event_id):
for check_redacted in (False, True):
for get_prev_content in (False, True):
self._get_event_cache.invalidate(event_id, check_redacted,
get_prev_content)
def _get_event_txn(self, txn, event_id, check_redacted=True,
get_prev_content=False, allow_rejected=False):
@ -743,16 +747,14 @@ class SQLBaseStore(object):
sql_getevents_timer.inc_by(curr_time - last_time, desc)
return curr_time
cache = self._get_event_cache.setdefault(event_id, {})
try:
# Separate cache entries for each way to invoke _get_event_txn
ret = cache[(check_redacted, get_prev_content, allow_rejected)]
ret = self._get_event_cache.get(event_id, check_redacted, get_prev_content)
cache_counter.inc_hits("*getEvent*")
return ret
if allow_rejected or not ret.rejected_reason:
return ret
else:
return None
except KeyError:
cache_counter.inc_misses("*getEvent*")
pass
finally:
start_time = update_counter("event_cache", start_time)
@ -777,19 +779,22 @@ class SQLBaseStore(object):
start_time = update_counter("select_event", start_time)
result = self._get_event_from_row_txn(
txn, internal_metadata, js, redacted,
check_redacted=check_redacted,
get_prev_content=get_prev_content,
rejected_reason=rejected_reason,
)
self._get_event_cache.prefill(event_id, check_redacted, get_prev_content, result)
if allow_rejected or not rejected_reason:
result = self._get_event_from_row_txn(
txn, internal_metadata, js, redacted,
check_redacted=check_redacted,
get_prev_content=get_prev_content,
)
cache[(check_redacted, get_prev_content, allow_rejected)] = result
return result
else:
return None
def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
check_redacted=True, get_prev_content=False):
check_redacted=True, get_prev_content=False,
rejected_reason=None):
start_time = time.time() * 1000
@ -804,7 +809,11 @@ class SQLBaseStore(object):
internal_metadata = json.loads(internal_metadata)
start_time = update_counter("decode_internal", start_time)
ev = FrozenEvent(d, internal_metadata_dict=internal_metadata)
ev = FrozenEvent(
d,
internal_metadata_dict=internal_metadata,
rejected_reason=rejected_reason,
)
start_time = update_counter("build_frozen_event", start_time)
if check_redacted and redacted:

View file

@ -94,7 +94,7 @@ class EventsStore(SQLBaseStore):
current_state=None):
# Remove the any existing cache entries for the event_id
self._get_event_cache.pop(event.event_id)
self._invalidate_get_event_cache(event.event_id)
# We purposefully do this first since if we include a `current_state`
# key, we *want* to update the `current_state_events` table
@ -356,7 +356,7 @@ class EventsStore(SQLBaseStore):
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
self._get_event_cache.pop(event.redacts)
self._invalidate_get_event_cache(event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts)