0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-25 05:04:16 +01:00

Bug fixes.

This commit is contained in:
Erik Johnston 2015-02-06 16:52:22 +00:00
parent b4886264a3
commit 5bf318e9a6
2 changed files with 57 additions and 27 deletions

View file

@ -274,6 +274,8 @@ def setup():
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
if config.daemonize:
print config.pid_file
daemon = Daemonize(

View file

@ -43,6 +43,10 @@ AuthEventTypes = (
)
SIZE_OF_CACHE = 1000
EVICTION_TIMEOUT_SECONDS = 20
class _StateCacheEntry(object):
def __init__(self, state, state_group, ts):
self.state = state
@ -59,13 +63,22 @@ class StateHandler(object):
self.store = hs.get_datastore()
self.hs = hs
# set of event_ids -> _StateCacheEntry.
# dict of set of event_ids -> _StateCacheEntry.
self._state_cache = None
def start_caching(self):
logger.debug("start_caching")
self._state_cache = {}
def f():
self._prune_cache()
logger.debug("Pruning")
try:
self._prune_cache()
except:
logger.exception("Prune")
self.clock.looping_call(f, 10*1000)
self.clock.looping_call(f, 5*1000)
@defer.inlineCallbacks
def get_current_state(self, room_id, event_type=None, state_key=""):
@ -86,7 +99,10 @@ class StateHandler(object):
for e_id, _, _ in events
]
cache = self._state_cache.get(frozenset(event_ids), None)
cache = None
if self._state_cache is not None:
cache = self._state_cache.get(frozenset(event_ids), None)
if cache:
cache.ts = self.clock.time_msec()
state = cache.state
@ -199,16 +215,19 @@ class StateHandler(object):
"""
logger.debug("resolve_state_groups event_ids %s", event_ids)
cache = self._state_cache.get(frozenset(event_ids), None)
if cache and cache.state_group:
cache.ts = self.clock.time_msec()
prev_state = cache.state.get((event_type, state_key), None)
if prev_state:
prev_state = prev_state.event_id
prev_states = [prev_state]
else:
prev_states = []
defer.returnValue((cache.state_group, cache.state, prev_states))
if self._state_cache is not None:
cache = self._state_cache.get(frozenset(event_ids), None)
if cache and cache.state_group:
cache.ts = self.clock.time_msec()
prev_state = cache.state.get((event_type, state_key), None)
if prev_state:
prev_state = prev_state.event_id
prev_states = [prev_state]
else:
prev_states = []
defer.returnValue(
(cache.state_group, cache.state, prev_states)
)
state_groups = yield self.store.get_state_groups(
event_ids
@ -233,15 +252,16 @@ class StateHandler(object):
else:
prev_states = []
cache = _StateCacheEntry(
state=state,
state_group=name,
ts=self.clock.time_msec()
)
if self._state_cache is not None:
cache = _StateCacheEntry(
state=state,
state_group=name,
ts=self.clock.time_msec()
)
self._state_cache[frozenset(event_ids)] = cache
self._state_cache[frozenset(event_ids)] = cache
defer.returnValue((name, state, prev_states))
defer.returnValue((name, state, prev_states))
state = {}
for group, g_state in state_groups.items():
@ -292,7 +312,8 @@ class StateHandler(object):
ts=self.clock.time_msec()
)
self._state_cache[frozenset(event_ids)] = cache
if self._state_cache is not None:
self._state_cache[frozenset(event_ids)] = cache
defer.returnValue((None, new_state, prev_states))
@ -379,26 +400,33 @@ class StateHandler(object):
return sorted(events, key=key_func)
def _prune_cache(self):
logger.debug("_prune_cache. before len: ", len(self._state_cache))
logger.debug("_prune_cache")
logger.debug(
"_prune_cache. before len: %d",
len(self._state_cache.keys())
)
now = self.clock.time_msec()
if len(self._state_cache) > 100:
if len(self._state_cache.keys()) > SIZE_OF_CACHE:
sorted_entries = sorted(
self._state_cache.items(),
key=lambda k, v: v.ts,
)
for k, _ in sorted_entries[100:]:
for k, _ in sorted_entries[SIZE_OF_CACHE:]:
self._state_cache.pop(k)
keys_to_delete = set()
for key, cache_entry in self._state_cache.items():
if now - cache_entry.ts > 60*1000:
if now - cache_entry.ts > EVICTION_TIMEOUT_SECONDS*1000:
keys_to_delete.add(key)
for k in keys_to_delete:
self._state_cache.pop(k)
logger.debug("_prune_cache. after len: ", len(self._state_cache))
logger.debug(
"_prune_cache. after len: %d",
len(self._state_cache.keys())
)