0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-12-15 06:13:52 +01:00

Merge pull request #3932 from matrix-org/erikj/auto_start_expiring_caches

Fix some instances of ExpiringCache not expiring cache items
This commit is contained in:
Richard van der Hoff 2018-09-25 12:02:57 +01:00 committed by GitHub
commit 4c3e7eeec5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 13 additions and 39 deletions

1
changelog.d/3932.bugfix Normal file
View file

@ -0,0 +1 @@
Fix some instances of ExpiringCache not expiring cache items

View file

@ -172,7 +172,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)

View file

@ -181,7 +181,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View file

@ -199,7 +199,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View file

@ -168,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View file

@ -201,7 +201,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)
_base.start_worker_reactor("synapse-federation-sender", config)

View file

@ -258,7 +258,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View file

@ -384,7 +384,6 @@ def setup(config_options):
def start():
hs.get_pusherpool().start()
hs.get_state_handler().start_caching()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
hs.get_federation_client().start_get_pdu_cache()

View file

@ -168,7 +168,6 @@ def start(config_options):
ss.start_listening(config.worker_listeners)
def start():
ss.get_state_handler().start_caching()
ss.get_datastore().start_profiling()
reactor.callWhenRunning(start)

View file

@ -228,7 +228,6 @@ def start(config_options):
def start():
ps.get_pusherpool().start()
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)

View file

@ -435,7 +435,6 @@ def start(config_options):
def start():
ss.get_datastore().start_profiling()
ss.get_state_handler().start_caching()
reactor.callWhenRunning(start)

View file

@ -229,7 +229,6 @@ def start(config_options):
def start():
ps.get_datastore().start_profiling()
ps.get_state_handler().start_caching()
reactor.callWhenRunning(start)

View file

@ -66,6 +66,14 @@ class FederationClient(FederationBase):
self.state = hs.get_state_handler()
self.transport_layer = hs.get_federation_transport_client()
self._get_pdu_cache = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)
def _clear_tried_cache(self):
"""Clear pdu_destination_tried cache"""
now = self._clock.time_msec()
@ -82,17 +90,6 @@ class FederationClient(FederationBase):
if destination_dict:
self.pdu_destination_tried[event_id] = destination_dict
def start_get_pdu_cache(self):
self._get_pdu_cache = ExpiringCache(
cache_name="get_pdu_cache",
clock=self._clock,
max_len=1000,
expiry_ms=120 * 1000,
reset_expiry_on_get=False,
)
self._get_pdu_cache.start()
@log_function
def make_query(self, destination, query_type, args,
retry_on_dns_fail=False, ignore_backoff=False):
@ -229,10 +226,9 @@ class FederationClient(FederationBase):
# TODO: Rate limit the number of times we try and get the same event.
if self._get_pdu_cache:
ev = self._get_pdu_cache.get(event_id)
if ev:
defer.returnValue(ev)
ev = self._get_pdu_cache.get(event_id)
if ev:
defer.returnValue(ev)
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
@ -285,7 +281,7 @@ class FederationClient(FederationBase):
)
continue
if self._get_pdu_cache is not None and signed_pdu:
if signed_pdu:
self._get_pdu_cache[event_id] = signed_pdu
defer.returnValue(signed_pdu)

View file

@ -79,7 +79,6 @@ class PreviewUrlResource(Resource):
# don't spider URLs more often than once an hour
expiry_ms=60 * 60 * 1000,
)
self._cache.start()
self._cleaner_loop = self.clock.looping_call(
self._start_expire_url_cache_data, 10 * 1000,

View file

@ -95,10 +95,6 @@ class StateHandler(object):
self.hs = hs
self._state_resolution_handler = hs.get_state_resolution_handler()
def start_caching(self):
# TODO: remove this shim
self._state_resolution_handler.start_caching()
@defer.inlineCallbacks
def get_current_state(self, room_id, event_type=None, state_key="",
latest_event_ids=None):
@ -428,9 +424,6 @@ class StateResolutionHandler(object):
self._state_cache = None
self.resolve_linearizer = Linearizer(name="state_resolve_lock")
def start_caching(self):
logger.debug("start_caching")
self._state_cache = ExpiringCache(
cache_name="state_cache",
clock=self.clock,
@ -440,8 +433,6 @@ class StateResolutionHandler(object):
reset_expiry_on_get=True,
)
self._state_cache.start()
@defer.inlineCallbacks
@log_function
def resolve_state_groups(

View file

@ -58,7 +58,6 @@ class ExpiringCache(object):
self.metrics = register_cache("expiring", cache_name, self)
def start(self):
if not self._expiry_ms:
# Don't bother starting the loop if things never expire
return

View file

@ -65,7 +65,6 @@ class ExpiringCacheTestCase(unittest.TestCase):
def test_time_eviction(self):
clock = MockClock()
cache = ExpiringCache("test", clock, expiry_ms=1000)
cache.start()
cache["key"] = 1
clock.advance_time(0.5)