From f93ecf87837567a540904e5d6a4280cf453ce7a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Jan 2016 18:27:23 +0000 Subject: [PATCH 01/30] Don't turn on profiling --- synapse/app/homeserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index fb76be58a..504557b2f 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -651,7 +651,7 @@ def _resource_id(resource, path_seg): def run(hs): - PROFILE_SYNAPSE = True + PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: def profile(func): from cProfile import Profile From b97f6626b6f9b91498d06a7ae113b9d20f1fc2ef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 09:54:30 +0000 Subject: [PATCH 02/30] Add cache to room stream --- synapse/handlers/sync.py | 42 +++++-- synapse/storage/events.py | 2 + synapse/storage/receipts.py | 65 +---------- synapse/storage/stream.py | 133 +++++++++++++++++++++++ synapse/util/caches/room_change_cache.py | 86 +++++++++++++++ 5 files changed, 254 insertions(+), 74 deletions(-) create mode 100644 synapse/util/caches/room_change_cache.py diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 328c049b0..1fdf97831 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -514,13 +514,6 @@ class SyncHandler(BaseHandler): timeline_limit = sync_config.filter_collection.timeline_limit() - room_events, _ = yield self.store.get_room_events_stream( - sync_config.user.to_string(), - from_key=since_token.room_key, - to_key=now_token.room_key, - limit=timeline_limit + 1, - ) - tags_by_room = yield self.store.get_updated_tags( sync_config.user.to_string(), since_token.account_data_key, @@ -533,6 +526,32 @@ class SyncHandler(BaseHandler): ) ) + rooms_changed = yield self.store.get_room_changes_for_user( + sync_config.user.to_string(), since_token.room_key, now_token.room_key + ) + + room_to_events = yield self.store.get_room_events_stream_for_rooms( + room_ids=room_ids, + from_key=since_token.room_key, + to_key=now_token.room_key, + limit=timeline_limit + 1, + ) + + room_events = [ + event + for events, _ in room_to_events.values() + for event in events + ] + + room_events.extend(rooms_changed) + + # room_events, _ = yield self.store.get_room_events_stream( + # sync_config.user.to_string(), + # from_key=since_token.room_key, + # to_key=now_token.room_key, + # limit=timeline_limit + 1, + # ) + joined = [] archived = [] if len(room_events) <= timeline_limit: @@ -694,14 +713,12 @@ class SyncHandler(BaseHandler): end_key = room_key while limited and len(recents) < timeline_limit and max_repeat: - events, keys = yield self.store.get_recent_events_for_room( + events, end_key = yield self.store.get_recent_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_token=since_token.room_key if since_token else None, - end_token=end_key, + from_key=since_token.room_key if since_token else None, + to_key=end_key, ) - room_key, _ = keys - end_key = "s" + room_key.split('-')[-1] loaded_recents = sync_config.filter_collection.filter_room_timeline(events) loaded_recents = yield self._filter_events_for_client( sync_config.user.to_string(), @@ -712,6 +729,7 @@ class SyncHandler(BaseHandler): recents = loaded_recents if len(events) <= load_limit: limited = False + break max_repeat -= 1 if len(recents) > timeline_limit: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 298cb9bad..d96ea3a30 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,6 +128,8 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) + logger.info("Invalidating %r at %r", event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index c0593e23e..b7a4e7774 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -16,6 +16,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached from synapse.util.caches import cache_counter, caches_by_name +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer @@ -31,8 +32,8 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = _RoomStreamChangeCache( - self._receipts_id_gen.get_max_token(None) + self._receipts_stream_cache = RoomStreamChangeCache( + "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @cached(num_args=2) @@ -370,63 +371,3 @@ class ReceiptsStore(SQLBaseStore): "data": json.dumps(data), } ) - - -class _RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_key = current_key - self.name = "ReceiptsRoomChangeCache" - caches_by_name[self.name] = self._cache - - @defer.inlineCallbacks - def get_rooms_changed(self, store, room_ids, key): - """Returns subset of room ids that have had new receipts since the - given key. If the key is too old it will just return the given list. - """ - if key > (yield self._get_earliest_key(store)): - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - defer.returnValue(result) - - @defer.inlineCallbacks - def room_has_changed(self, store, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - if key > (yield self._get_earliest_key(store)): - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) - - @defer.inlineCallbacks - def _get_earliest_key(self, store): - if self._earliest_key is None: - self._earliest_key = yield store.get_max_receipt_stream_id() - self._earliest_key = int(self._earliest_key) - - defer.returnValue(self._earliest_key) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index e31bad258..3a32a0019 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,6 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util.caches.room_change_cache import RoomStreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -77,6 +78,12 @@ def upper_bound(token): class StreamStore(SQLBaseStore): + def __init__(self, hs): + super(StreamStore, self).__init__(hs) + + self._events_stream_cache = RoomStreamChangeCache( + "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) + ) @defer.inlineCallbacks def get_appservice_room_stream(self, service, from_key, to_key, limit=0): @@ -157,6 +164,132 @@ class StreamStore(SQLBaseStore): results = yield self.runInteraction("get_appservice_room_stream", f) defer.returnValue(results) + @defer.inlineCallbacks + def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): + from_id = RoomStreamToken.parse_stream_token(from_key).stream + + room_ids = yield self._events_stream_cache.get_rooms_changed( + self, room_ids, from_id + ) + + if not room_ids: + defer.returnValue({}) + + results = {} + room_ids = list(room_ids) + for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): + res = yield defer.gatherResults([ + self.get_recent_room_events_stream_for_room( + room_id, from_key, to_key, limit + ).addCallback(lambda r, rm: (rm, r), room_id) + for room_id in room_ids + ]) + results.update(dict(res)) + + defer.returnValue(results) + + @defer.inlineCallbacks + def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + defer.returnValue(([], from_key)) + + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) + + if not has_changed: + defer.returnValue(([], from_key)) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering > ? AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, from_id, to_id, limit)) + else: + sql = ( + "SELECT event_id, stream_ordering FROM events WHERE" + " room_id = ?" + " AND not outlier" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering DESC LIMIT ?" + ) + txn.execute(sql, (room_id, to_id, limit)) + + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + ret.reverse() + + self._set_before_and_after(ret, rows) + + if rows: + key = "s%d" % min(r["stream_ordering"] for r in rows) + else: + # Assume we didn't get anything because there was nothing to + # get. + key = from_key + + return ret, key + res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + defer.returnValue(res) + + def get_room_changes_for_user(self, user_id, from_key, to_key): + if from_key is not None: + from_id = RoomStreamToken.parse_stream_token(from_key).stream + else: + from_id = None + to_id = RoomStreamToken.parse_stream_token(to_key).stream + + if from_key == to_key: + return defer.succeed([]) + + def f(txn): + if from_id is not None: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND e.stream_ordering > ? AND e.stream_ordering <= ?" + " ORDER BY e.stream_ordering ASC" + ) + txn.execute(sql, (user_id, from_id, to_id,)) + else: + sql = ( + "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + " WHERE e.event_id = m.event_id" + " AND m.user_id = ?" + " AND stream_ordering <= ?" + " ORDER BY stream_ordering ASC" + ) + txn.execute(sql, (user_id, to_id,)) + rows = self.cursor_to_dict(txn) + + ret = self._get_events_txn( + txn, + [r["event_id"] for r in rows], + get_prev_content=True + ) + + return ret + + return self.runInteraction("get_room_changes_for_user", f) + @log_function def get_room_events_stream( self, diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py new file mode 100644 index 000000000..3a873c9c3 --- /dev/null +++ b/synapse/util/caches/room_change_cache.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class RoomStreamChangeCache(object): + """Keeps track of the stream_id of the latest change in rooms. + + Given a list of rooms and stream key, it will give a subset of rooms that + may have changed since that key. If the key is too old then the cache + will simply return all rooms. + """ + def __init__(self, name, current_key, size_of_cache=10000): + self._size_of_cache = size_of_cache + self._room_to_key = {} + self._cache = sorteddict() + self._earliest_known_key = current_key + self.name = name + caches_by_name[self.name] = self._cache + + def get_room_has_changed(self, room_id, key): + if key <= self._earliest_known_key: + return True + + room_key = self._room_to_key.get(room_id, None) + if room_key is None: + return True + + if key < room_key: + return True + + return False + + def get_rooms_changed(self, store, room_ids, key): + """Returns subset of room ids that have had new things since the + given key. If the key is too old it will just return the given list. + """ + if key > self._earliest_known_key: + keys = self._cache.keys() + i = keys.bisect_right(key) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(room_ids) + + cache_counter.inc_hits(self.name) + else: + result = room_ids + cache_counter.inc_misses(self.name) + + return result + + def room_has_changed(self, store, room_id, key): + """Informs the cache that the room has been changed at the given key. + """ + if key > self._earliest_known_key: + old_key = self._room_to_key.get(room_id, None) + if old_key: + key = max(key, old_key) + self._cache.pop(old_key, None) + self._cache[key] = room_id + + while len(self._cache) > self._size_of_cache: + k, r = self._cache.popitem() + self._earliest_key = max(k, self._earliest_key) + self._room_to_key.pop(r, None) From aca3193efb8c5f9f20049f61c96e5ff12f328b05 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:06:52 +0000 Subject: [PATCH 03/30] Use the same path for incremental with gap or without gap --- synapse/handlers/sync.py | 352 ++++++++++++++++---------------------- synapse/storage/events.py | 1 - synapse/storage/stream.py | 6 +- 3 files changed, 147 insertions(+), 212 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1fdf97831..f5e20d6a6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -72,7 +72,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [ ) -class ArchivedSyncResult(collections.namedtuple("JoinedSyncResult", [ +class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [ "room_id", # str "timeline", # TimelineBatch "state", # dict[(str, str), FrozenEvent] @@ -429,44 +429,20 @@ class SyncHandler(BaseHandler): defer.returnValue((now_token, ephemeral_by_room)) - @defer.inlineCallbacks def full_state_sync_for_archived_room(self, room_id, sync_config, leave_event_id, leave_token, timeline_since_token, tags_by_room, account_data_by_room): """Sync a room for a client which is starting without any state Returns: - A Deferred JoinedSyncResult. + A Deferred ArchivedSyncResult. """ - batch = yield self.load_filtered_recents( - room_id, sync_config, leave_token, since_token=timeline_since_token + return self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event_id, timeline_since_token, tags_by_room, + account_data_by_room, full_state=True, leave_token=leave_token, ) - leave_state = yield self.store.get_state_for_event(leave_event_id) - - leave_state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - leave_state.values() - ) - } - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - defer.returnValue(ArchivedSyncResult( - room_id=room_id, - timeline=batch, - state=leave_state, - account_data=account_data, - )) - @defer.inlineCallbacks def incremental_sync_with_gap(self, sync_config, since_token): """ Get the incremental delta needed to bring the client up to @@ -512,173 +488,127 @@ class SyncHandler(BaseHandler): sync_config.user ) + user_id = sync_config.user.to_string() + timeline_limit = sync_config.filter_collection.timeline_limit() tags_by_room = yield self.store.get_updated_tags( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) account_data, account_data_by_room = ( yield self.store.get_updated_account_data_for_user( - sync_config.user.to_string(), + user_id, since_token.account_data_key, ) ) + # Get a list of membership change events that have happened. rooms_changed = yield self.store.get_room_changes_for_user( - sync_config.user.to_string(), since_token.room_key, now_token.room_key + user_id, since_token.room_key, now_token.room_key ) + mem_change_events_by_room_id = {} + for event in rooms_changed: + mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) + + newly_joined_rooms = [] + archived = [] + invited = [] + for room_id, events in mem_change_events_by_room_id.items(): + non_joins = [e for e in events if e.membership != Membership.JOIN] + has_join = len(non_joins) != len(events) + + # We want to figure out if we joined the room at some point since + # the last sync (even if we have since left). This is to make sure + # we do send down the room, and with full state, where necessary + if room_id in joined_room_ids or has_join: + old_state = yield self.get_state_at(room_id, since_token) + old_mem_ev = old_state.get((EventTypes.Member, user_id), None) + if not old_mem_ev or old_mem_ev.membership != Membership.JOIN: + newly_joined_rooms.append(room_id) + + if room_id in joined_room_ids: + continue + + if not non_joins: + continue + + # Only bother if we're still currently invited + should_invite = non_joins[-1].membership == Membership.INVITE + if should_invite: + room_sync = InvitedSyncResult(room_id, invite=non_joins[-1]) + if room_sync: + invited.append(room_sync) + + # Always include leave/ban events. Just take the last one. + # TODO: How do we handle ban -> leave in same batch? + leave_events = [ + e for e in non_joins + if e.membership in (Membership.LEAVE, Membership.BAN) + ] + + if leave_events: + leave_event = leave_events[-1] + room_sync = yield self.incremental_sync_for_archived_room( + sync_config, room_id, leave_event.event_id, since_token, + tags_by_room, account_data_by_room, + full_state=room_id in newly_joined_rooms + ) + if room_sync: + archived.append(room_sync) + + # Get all events for rooms we're currently joined to. room_to_events = yield self.store.get_room_events_stream_for_rooms( - room_ids=room_ids, + room_ids=joined_room_ids, from_key=since_token.room_key, to_key=now_token.room_key, limit=timeline_limit + 1, ) - room_events = [ - event - for events, _ in room_to_events.values() - for event in events - ] - - room_events.extend(rooms_changed) - - # room_events, _ = yield self.store.get_room_events_stream( - # sync_config.user.to_string(), - # from_key=since_token.room_key, - # to_key=now_token.room_key, - # limit=timeline_limit + 1, - # ) - joined = [] - archived = [] - if len(room_events) <= timeline_limit: - # There is no gap in any of the rooms. Therefore we can just - # partition the new events by room and return them. - logger.debug("Got %i events for incremental sync - not limited", - len(room_events)) + # We loop through all room ids, even if there are no new events, in case + # there are non room events taht we need to notify about. + for room_id in joined_room_ids: + room_entry = room_to_events.get(room_id, None) - invite_events = [] - leave_events = [] - events_by_room_id = {} - for event in room_events: - events_by_room_id.setdefault(event.room_id, []).append(event) - if event.room_id not in joined_room_ids: - if (event.type == EventTypes.Member - and event.state_key == sync_config.user.to_string()): - if event.membership == Membership.INVITE: - invite_events.append(event) - elif event.membership in (Membership.LEAVE, Membership.BAN): - leave_events.append(event) + if room_entry: + events, start_key = room_entry - for room_id in joined_room_ids: - recents = events_by_room_id.get(room_id, []) - logger.debug("Events for room %s: %r", room_id, recents) - state = { - (event.type, event.state_key): event - for event in recents if event.is_state()} - limited = False + prev_batch_token = now_token.copy_and_replace("room_key", start_key) - if recents: - prev_batch = now_token.copy_and_replace( - "room_key", recents[0].internal_metadata.before - ) - else: - prev_batch = now_token + newly_joined_room = room_id in newly_joined_rooms + full_state = newly_joined_room - just_joined = yield self.check_joined_room(sync_config, state) - if just_joined: - logger.debug("User has just joined %s: needs full state", - room_id) - state = yield self.get_state_at(room_id, now_token) - # the timeline is inherently limited if we've just joined - limited = True - - recents = sync_config.filter_collection.filter_room_timeline(recents) - - state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - state.values() - ) - } - - acc_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room + batch = yield self.load_filtered_recents( + room_id, sync_config, prev_batch_token, + since_token=since_token, + recents=events, + newly_joined_room=newly_joined_room, ) - - acc_data = sync_config.filter_collection.filter_room_account_data( - acc_data + else: + batch = TimelineBatch( + events=[], + prev_batch=since_token, + limited=False, ) + full_state = False - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) - - room_sync = JoinedSyncResult( - room_id=room_id, - timeline=TimelineBatch( - events=recents, - prev_batch=prev_batch, - limited=limited, - ), - state=state, - ephemeral=ephemeral, - account_data=acc_data, - unread_notifications={}, - ) - logger.debug("Result for room %s: %r", room_id, room_sync) - - if room_sync: - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room - ) - - if notifs is not None: - notif_dict = room_sync.unread_notifications - notif_dict["notification_count"] = len(notifs) - notif_dict["highlight_count"] = len([ - 1 for notif in notifs - if _action_has_highlight(notif["actions"]) - ]) - - joined.append(room_sync) - - else: - logger.debug("Got %i events for incremental sync - hit limit", - len(room_events)) - - invite_events = yield self.store.get_invites_for_user( - sync_config.user.to_string() - ) - - leave_events = yield self.store.get_leave_and_ban_events_for_user( - sync_config.user.to_string() - ) - - for room_id in joined_room_ids: - room_sync = yield self.incremental_sync_with_gap_for_room( - room_id, sync_config, since_token, now_token, - ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room=all_ephemeral_by_room, - ) - if room_sync: - joined.append(room_sync) - - for leave_event in leave_events: - room_sync = yield self.incremental_sync_for_archived_room( - sync_config, leave_event, since_token, tags_by_room, - account_data_by_room + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id=room_id, + sync_config=sync_config, + since_token=since_token, + now_token=now_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + all_ephemeral_by_room=all_ephemeral_by_room, + batch=batch, + full_state=full_state, ) if room_sync: - archived.append(room_sync) - - invited = [ - InvitedSyncResult(room_id=event.room_id, invite=event) - for event in invite_events - ] + joined.append(room_sync) account_data_for_user = sync_config.filter_collection.filter_account_data( self.account_data_for_user(account_data) @@ -699,12 +629,10 @@ class SyncHandler(BaseHandler): @defer.inlineCallbacks def load_filtered_recents(self, room_id, sync_config, now_token, - since_token=None): + since_token=None, recents=None, newly_joined_room=False): """ :returns a Deferred TimelineBatch """ - limited = True - recents = [] filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() load_limit = max(timeline_limit * filtering_factor, 100) @@ -712,11 +640,27 @@ class SyncHandler(BaseHandler): room_key = now_token.room_key end_key = room_key + limited = recents is None or newly_joined_room or timeline_limit < len(recents) + + if recents is not None: + recents = sync_config.filter_collection.filter_room_timeline(recents) + recents = yield self._filter_events_for_client( + sync_config.user.to_string(), + recents, + is_peeking=sync_config.is_guest, + ) + else: + recents = [] + + since_key = None + if since_token and not newly_joined_room: + since_key = since_token.room_key + while limited and len(recents) < timeline_limit and max_repeat: - events, end_key = yield self.store.get_recent_room_events_stream_for_room( + events, end_key = yield self.store.get_room_events_stream_for_room( room_id, limit=load_limit + 1, - from_key=since_token.room_key if since_token else None, + from_key=since_key, to_key=end_key, ) loaded_recents = sync_config.filter_collection.filter_room_timeline(events) @@ -727,6 +671,7 @@ class SyncHandler(BaseHandler): ) loaded_recents.extend(recents) recents = loaded_recents + if len(events) <= load_limit: limited = False break @@ -742,7 +687,9 @@ class SyncHandler(BaseHandler): ) defer.returnValue(TimelineBatch( - events=recents, prev_batch=prev_batch_token, limited=limited + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room )) @defer.inlineCallbacks @@ -750,24 +697,8 @@ class SyncHandler(BaseHandler): since_token, now_token, ephemeral_by_room, tags_by_room, account_data_by_room, - all_ephemeral_by_room): - """ Get the incremental delta needed to bring the client up to date for - the room. Gives the client the most recent events and the changes to - state. - Returns: - A Deferred JoinedSyncResult - """ - logger.debug("Doing incremental sync for room %s between %s and %s", - room_id, since_token, now_token) - - # TODO(mjark): Check for redactions we might have missed. - - batch = yield self.load_filtered_recents( - room_id, sync_config, now_token, since_token, - ) - - logger.debug("Recents %r", batch) - + all_ephemeral_by_room, + batch, full_state=False): if batch.limited: current_state = yield self.get_state_at(room_id, now_token) @@ -832,43 +763,48 @@ class SyncHandler(BaseHandler): defer.returnValue(room_sync) @defer.inlineCallbacks - def incremental_sync_for_archived_room(self, sync_config, leave_event, + def incremental_sync_for_archived_room(self, sync_config, room_id, leave_event_id, since_token, tags_by_room, - account_data_by_room): + account_data_by_room, full_state, + leave_token=None): """ Get the incremental delta needed to bring the client up to date for the archived room. Returns: A Deferred ArchivedSyncResult """ - stream_token = yield self.store.get_stream_token_for_event( - leave_event.event_id - ) + if not leave_token: + stream_token = yield self.store.get_stream_token_for_event( + leave_event_id + ) - leave_token = since_token.copy_and_replace("room_key", stream_token) + leave_token = since_token.copy_and_replace("room_key", stream_token) - if since_token.is_after(leave_token): + if since_token and since_token.is_after(leave_token): defer.returnValue(None) batch = yield self.load_filtered_recents( - leave_event.room_id, sync_config, leave_token, since_token, + room_id, sync_config, leave_token, since_token, ) logger.debug("Recents %r", batch) state_events_at_leave = yield self.store.get_state_for_event( - leave_event.event_id + leave_event_id ) - state_at_previous_sync = yield self.get_state_at( - leave_event.room_id, stream_position=since_token - ) + if not full_state: + state_at_previous_sync = yield self.get_state_at( + room_id, stream_position=since_token + ) - state_events_delta = yield self.compute_state_delta( - since_token=since_token, - previous_state=state_at_previous_sync, - current_state=state_events_at_leave, - ) + state_events_delta = yield self.compute_state_delta( + since_token=since_token, + previous_state=state_at_previous_sync, + current_state=state_events_at_leave, + ) + else: + state_events_delta = state_events_at_leave state_events_delta = { (e.type, e.state_key): e @@ -878,7 +814,7 @@ class SyncHandler(BaseHandler): } account_data = self.account_data_for_room( - leave_event.room_id, tags_by_room, account_data_by_room + room_id, tags_by_room, account_data_by_room ) account_data = sync_config.filter_collection.filter_room_account_data( @@ -886,7 +822,7 @@ class SyncHandler(BaseHandler): ) room_sync = ArchivedSyncResult( - room_id=leave_event.room_id, + room_id=room_id, timeline=batch, state=state_events_delta, account_data=account_data, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index d96ea3a30..0dd1daaa2 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - logger.info("Invalidating %r at %r", event.room_id, stream_ordering) self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) except _RollbackButIsFineException: pass diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 3a32a0019..563e289c4 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -179,7 +179,7 @@ class StreamStore(SQLBaseStore): room_ids = list(room_ids) for rm_ids in (room_ids[i:i+20] for i in xrange(0, len(room_ids), 20)): res = yield defer.gatherResults([ - self.get_recent_room_events_stream_for_room( + self.get_room_events_stream_for_room( room_id, from_key, to_key, limit ).addCallback(lambda r, rm: (rm, r), room_id) for room_id in room_ids @@ -189,7 +189,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(results) @defer.inlineCallbacks - def get_recent_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): + def get_room_events_stream_for_room(self, room_id, from_key, to_key, limit=0): if from_key is not None: from_id = RoomStreamToken.parse_stream_token(from_key).stream else: @@ -246,7 +246,7 @@ class StreamStore(SQLBaseStore): key = from_key return ret, key - res = yield self.runInteraction("get_recent_room_events_stream_for_room", f) + res = yield self.runInteraction("get_room_events_stream_for_room", f) defer.returnValue(res) def get_room_changes_for_user(self, user_id, from_key, to_key): From e7febf4fbb1f1beb11e7a03252f6844f84af7f30 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:11:04 +0000 Subject: [PATCH 04/30] PEP8 --- synapse/storage/events.py | 4 +++- synapse/storage/receipts.py | 2 -- synapse/storage/stream.py | 9 ++++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0dd1daaa2..80187722e 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,7 +128,9 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed(None, event.room_id, stream_ordering) + self._events_stream_cache.room_has_changed( + None, event.room_id, stream_ordering + ) except _RollbackButIsFineException: pass diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b7a4e7774..7118368d9 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,12 +15,10 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches import cache_counter, caches_by_name from synapse.util.caches.room_change_cache import RoomStreamChangeCache from twisted.internet import defer -from blist import sorteddict import logging import ujson as json diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 563e289c4..0b2225179 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -262,7 +262,8 @@ class StreamStore(SQLBaseStore): def f(txn): if from_id is not None: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND e.stream_ordering > ? AND e.stream_ordering <= ?" @@ -271,7 +272,8 @@ class StreamStore(SQLBaseStore): txn.execute(sql, (user_id, from_id, to_id,)) else: sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e, room_memberships AS m" + "SELECT m.event_id, stream_ordering FROM events AS e," + " room_memberships AS m" " WHERE e.event_id = m.event_id" " AND m.user_id = ?" " AND stream_ordering <= ?" @@ -307,7 +309,8 @@ class StreamStore(SQLBaseStore): "SELECT c.room_id FROM history_visibility AS h" " INNER JOIN current_state_events AS c" " ON h.event_id = c.event_id" - " WHERE c.room_id IN (%s) AND h.history_visibility = 'world_readable'" % ( + " WHERE c.room_id IN (%s)" + " AND h.history_visibility = 'world_readable'" % ( ",".join(map(lambda _: "?", room_ids)) ) ) From 5fc9b17518008e08a420b94a8f77955440c52b2b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Jan 2016 17:39:20 +0000 Subject: [PATCH 05/30] No chdir --- synapse/app/homeserver.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 504557b2f..56a34bd50 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -728,7 +728,6 @@ def run(hs): auto_close_fds=False, verbose=True, logger=logger, - chdir=os.path.dirname(os.path.abspath(__file__)), ) daemon.start() From ba8931829b0b601eb14049c92e0f21a10772576d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:34:17 +0000 Subject: [PATCH 06/30] Return correct type of token --- synapse/storage/stream.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0b2225179..28721e699 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -236,7 +236,7 @@ class StreamStore(SQLBaseStore): ret.reverse() - self._set_before_and_after(ret, rows) + self._set_before_and_after(ret, rows, topo_order=False) if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) @@ -581,10 +581,13 @@ class StreamStore(SQLBaseStore): return rows[0][0] if rows else 0 @staticmethod - def _set_before_and_after(events, rows): + def _set_before_and_after(events, rows, topo_order=True): for event, row in zip(events, rows): stream = row["stream_ordering"] - topo = event.depth + if topo_order: + topo = event.depth + else: + topo = None internal = event.internal_metadata internal.before = str(RoomStreamToken(topo, stream - 1)) internal.after = str(RoomStreamToken(topo, stream)) From 4e7948b47a3f197682de82fc0cda07ebb08a581d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 11:52:34 +0000 Subject: [PATCH 07/30] Allow paginating backwards from stream token --- synapse/handlers/message.py | 15 +++++++++------ synapse/storage/stream.py | 16 ++++++++++++++-- tests/rest/client/v1/test_rooms.py | 9 +-------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b73ad6214..82c8cb5f0 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -16,7 +16,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError, AuthError, Codes +from synapse.api.errors import AuthError, Codes from synapse.streams.config import PaginationConfig from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator @@ -119,9 +119,12 @@ class MessageHandler(BaseHandler): if source_config.direction == 'b': # if we're going backwards, we might need to backfill. This # requires that we have a topo token. - if room_token.topological is None: - raise SynapseError(400, "Invalid token: cannot paginate " - "backwards from a stream token") + if room_token.topological: + max_topo = room_token.topological + else: + max_topo = yield self.store.get_max_topological_token_for_stream_and_room( + room_id, room_token.stream + ) if membership == Membership.LEAVE: # If they have left the room then clamp the token to be before @@ -131,11 +134,11 @@ class MessageHandler(BaseHandler): member_event_id ) leave_token = RoomStreamToken.parse(leave_token) - if leave_token.topological < room_token.topological: + if leave_token.topological < max_topo: source_config.from_key = str(leave_token) yield self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, room_token.topological + room_id, max_topo ) events, next_key = yield data_source.get_pagination_rows( diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 28721e699..5096b4686 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -234,10 +234,10 @@ class StreamStore(SQLBaseStore): get_prev_content=True ) - ret.reverse() - self._set_before_and_after(ret, rows, topo_order=False) + ret.reverse() + if rows: key = "s%d" % min(r["stream_ordering"] for r in rows) else: @@ -570,6 +570,18 @@ class StreamStore(SQLBaseStore): row["topological_ordering"], row["stream_ordering"],) ) + def get_max_topological_token_for_stream_and_room(self, room_id, stream_key): + sql = ( + "SELECT max(topological_ordering) FROM events" + " WHERE room_id = ? AND stream_ordering < ?" + ) + return self._execute( + "get_max_topological_token_for_stream_and_room", None, + sql, room_id, stream_key, + ).addCallback( + lambda r: r[0][0] if r else 0 + ) + def _get_max_topological_txn(self, txn): txn.execute( "SELECT MAX(topological_ordering) FROM events" diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 2fe6f695f..ad5dd3bd6 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1044,13 +1044,6 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("chunk" in response) self.assertTrue("end" in response) - @defer.inlineCallbacks - def test_stream_token_is_rejected_for_back_pagination(self): - (code, response) = yield self.mock_resource.trigger_get( - "/rooms/%s/messages?access_token=x&from=s0_0_0_0_0&dir=b" % - self.room_id) - self.assertEquals(400, code) - @defer.inlineCallbacks def test_stream_token_is_accepted_for_fwd_pagianation(self): token = "s0_0_0_0_0" @@ -1061,4 +1054,4 @@ class RoomMessageListTestCase(RestTestCase): self.assertTrue("start" in response) self.assertEquals(token, response['start']) self.assertTrue("chunk" in response) - self.assertTrue("end" in response) \ No newline at end of file + self.assertTrue("end" in response) From 3c6518ddbfb6f74eb5c0d1d0e4e8159e4ae45b04 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:03:48 +0000 Subject: [PATCH 08/30] Amalgamate incremental and full sync for user --- synapse/handlers/sync.py | 56 ++++++++++++---------------------------- 1 file changed, 16 insertions(+), 40 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f5e20d6a6..2bd83e4b5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -298,46 +298,19 @@ class SyncHandler(BaseHandler): room_id, sync_config, now_token, since_token=timeline_since_token ) - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, ephemeral_by_room + room_sync = yield self.incremental_sync_with_gap_for_room( + room_id, sync_config, + now_token=now_token, + since_token=timeline_since_token, + ephemeral_by_room=ephemeral_by_room, + tags_by_room=tags_by_room, + account_data_by_room=account_data_by_room, + all_ephemeral_by_room=ephemeral_by_room, + batch=batch, + full_state=True, ) - unread_notifications = {} - if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) - - current_state = yield self.get_state_at(room_id, now_token) - - current_state = { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - current_state.values() - ) - } - - account_data = self.account_data_for_room( - room_id, tags_by_room, account_data_by_room - ) - - account_data = sync_config.filter_collection.filter_room_account_data( - account_data - ) - - ephemeral = sync_config.filter_collection.filter_room_ephemeral( - ephemeral_by_room.get(room_id, []) - ) - - defer.returnValue(JoinedSyncResult( - room_id=room_id, - timeline=batch, - state=current_state, - ephemeral=ephemeral, - account_data=account_data, - unread_notifications=unread_notifications, - )) + defer.returnValue(room_sync) def account_data_for_user(self, account_data): account_data_events = [] @@ -635,7 +608,7 @@ class SyncHandler(BaseHandler): """ filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = max(timeline_limit * filtering_factor, 100) + load_limit = timeline_limit * filtering_factor max_repeat = 3 # Only try a few times per room, otherwise room_key = now_token.room_key end_key = room_key @@ -699,7 +672,10 @@ class SyncHandler(BaseHandler): account_data_by_room, all_ephemeral_by_room, batch, full_state=False): - if batch.limited: + if full_state: + state = yield self.get_state_at(room_id, now_token) + + elif batch.limited: current_state = yield self.get_state_at(room_id, now_token) state_at_previous_sync = yield self.get_state_at( From 571a5663993fe68e85da76f9df47c2f215a84403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 14:11:16 +0000 Subject: [PATCH 09/30] Change load limit params --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 2bd83e4b5..51ec4702d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -608,8 +608,8 @@ class SyncHandler(BaseHandler): """ filtering_factor = 2 timeline_limit = sync_config.filter_collection.timeline_limit() - load_limit = timeline_limit * filtering_factor - max_repeat = 3 # Only try a few times per room, otherwise + load_limit = max(timeline_limit * filtering_factor, 10) + max_repeat = 5 # Only try a few times per room, otherwise room_key = now_token.room_key end_key = room_key From e1941442d442fe62570551071edfd936304697e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:02:37 +0000 Subject: [PATCH 10/30] Invalidate caches properly. Remove unused arg --- synapse/storage/events.py | 9 ++++++--- synapse/storage/receipts.py | 10 ++++++---- synapse/storage/stream.py | 2 +- synapse/util/caches/room_change_cache.py | 4 ++-- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 80187722e..2d2270b29 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -128,9 +128,6 @@ class EventsStore(SQLBaseStore): is_new_state=is_new_state, current_state=current_state, ) - self._events_stream_cache.room_has_changed( - None, event.room_id, stream_ordering - ) except _RollbackButIsFineException: pass @@ -213,6 +210,12 @@ class EventsStore(SQLBaseStore): for event, _ in events_and_contexts: txn.call_after(self._invalidate_get_event_cache, event.event_id) + if not backfilled: + txn.call_after( + self._events_stream_cache.room_has_changed, + event.room_id, event.internal_metadata.stream_ordering, + ) + depth_updates = {} for event, _ in events_and_contexts: if event.internal_metadata.is_outlier(): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 7118368d9..5ffbfdec5 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -78,7 +78,7 @@ class ReceiptsStore(SQLBaseStore): if from_key: room_ids = yield self._receipts_stream_cache.get_rooms_changed( - self, room_ids, from_key + room_ids, from_key ) results = yield self._get_linearized_receipts_for_rooms( @@ -221,6 +221,11 @@ class ReceiptsStore(SQLBaseStore): # FIXME: This shouldn't invalidate the whole cache txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) + txn.call_after( + self._receipts_stream_cache.room_has_changed, + room_id, stream_id + ) + # We don't want to clobber receipts for more recent events, so we # have to compare orderings of existing receipts sql = ( @@ -308,9 +313,6 @@ class ReceiptsStore(SQLBaseStore): stream_id_manager = yield self._receipts_id_gen.get_next(self) with stream_id_manager as stream_id: - yield self._receipts_stream_cache.room_has_changed( - self, room_id, stream_id - ) have_persisted = yield self.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 5096b4686..67e7e6a76 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -169,7 +169,7 @@ class StreamStore(SQLBaseStore): from_id = RoomStreamToken.parse_stream_token(from_key).stream room_ids = yield self._events_stream_cache.get_rooms_changed( - self, room_ids, from_id + room_ids, from_id ) if not room_ids: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index 3a873c9c3..eb2ab5f1e 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -51,7 +51,7 @@ class RoomStreamChangeCache(object): return False - def get_rooms_changed(self, store, room_ids, key): + def get_rooms_changed(self, room_ids, key): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ @@ -70,7 +70,7 @@ class RoomStreamChangeCache(object): return result - def room_has_changed(self, store, room_id, key): + def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ if key > self._earliest_known_key: From c23a8c783382a0789c757e16e104cf08654e6cf8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 15:55:26 +0000 Subject: [PATCH 11/30] Ensure keys to RoomStreamChangeCache are ints --- synapse/storage/stream.py | 11 ++++++----- synapse/util/caches/room_change_cache.py | 6 ++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 67e7e6a76..6a724193e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -199,12 +199,13 @@ class StreamStore(SQLBaseStore): if from_key == to_key: defer.returnValue(([], from_key)) - has_changed = yield self._events_stream_cache.get_room_has_changed( - room_id, from_id - ) + if from_id: + has_changed = yield self._events_stream_cache.get_room_has_changed( + room_id, from_id + ) - if not has_changed: - defer.returnValue(([], from_key)) + if not has_changed: + defer.returnValue(([], from_key)) def f(txn): if from_id is not None: diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py index eb2ab5f1e..e8bfedd72 100644 --- a/synapse/util/caches/room_change_cache.py +++ b/synapse/util/caches/room_change_cache.py @@ -39,6 +39,8 @@ class RoomStreamChangeCache(object): caches_by_name[self.name] = self._cache def get_room_has_changed(self, room_id, key): + assert type(key) is int + if key <= self._earliest_known_key: return True @@ -55,6 +57,8 @@ class RoomStreamChangeCache(object): """Returns subset of room ids that have had new things since the given key. If the key is too old it will just return the given list. """ + assert type(key) is int + if key > self._earliest_known_key: keys = self._cache.keys() i = keys.bisect_right(key) @@ -73,6 +77,8 @@ class RoomStreamChangeCache(object): def room_has_changed(self, room_id, key): """Informs the cache that the room has been changed at the given key. """ + assert type(key) is int + if key > self._earliest_known_key: old_key = self._room_to_key.get(room_id, None) if old_key: From 00cb3eb24b277bb37bd1b7d8449c08a37cb4b014 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:37:41 +0000 Subject: [PATCH 12/30] Cache tags and account data --- synapse/storage/account_data.py | 20 ++++- synapse/storage/events.py | 2 +- synapse/storage/receipts.py | 8 +- synapse/storage/stream.py | 8 +- synapse/storage/tags.py | 14 ++++ synapse/util/caches/room_change_cache.py | 92 --------------------- synapse/util/caches/stream_change_cache.py | 95 ++++++++++++++++++++++ 7 files changed, 137 insertions(+), 102 deletions(-) delete mode 100644 synapse/util/caches/room_change_cache.py create mode 100644 synapse/util/caches/stream_change_cache.py diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 9c6597e01..95294c3f6 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -14,6 +14,7 @@ # limitations under the License. from ._base import SQLBaseStore +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -23,6 +24,13 @@ logger = logging.getLogger(__name__) class AccountDataStore(SQLBaseStore): + def __init__(self, hs): + super(AccountDataStore, self).__init__(hs) + + self._account_data_stream_cache = StreamChangeCache( + "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_account_data_for_user(self, user_id): """Get all the client account_data for a user. @@ -83,7 +91,7 @@ class AccountDataStore(SQLBaseStore): "get_account_data_for_room", get_account_data_for_room_txn ) - def get_updated_account_data_for_user(self, user_id, stream_id): + def get_updated_account_data_for_user(self, user_id, stream_id, room_ids=None): """Get all the client account_data for a that's changed. Args: @@ -120,6 +128,12 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) + changed = self._account_data_stream_cache.get_entity_has_changed( + user_id, int(stream_id) + ) + if not changed: + defer.returnValue(({}, {})) + return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn ) @@ -186,6 +200,10 @@ class AccountDataStore(SQLBaseStore): "content": content_json, } ) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id, + ) self._update_max_stream_id(txn, next_id) with (yield self._account_data_id_gen.get_next(self)) as next_id: diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2d2270b29..5e8555202 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -212,7 +212,7 @@ class EventsStore(SQLBaseStore): if not backfilled: txn.call_after( - self._events_stream_cache.room_has_changed, + self._events_stream_cache.entity_has_changed, event.room_id, event.internal_metadata.stream_ordering, ) diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index 5ffbfdec5..8068c7374 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -15,7 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList, cached -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer @@ -30,7 +30,7 @@ class ReceiptsStore(SQLBaseStore): def __init__(self, hs): super(ReceiptsStore, self).__init__(hs) - self._receipts_stream_cache = RoomStreamChangeCache( + self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token(None) ) @@ -77,7 +77,7 @@ class ReceiptsStore(SQLBaseStore): room_ids = set(room_ids) if from_key: - room_ids = yield self._receipts_stream_cache.get_rooms_changed( + room_ids = yield self._receipts_stream_cache.get_entities_changed( room_ids, from_key ) @@ -222,7 +222,7 @@ class ReceiptsStore(SQLBaseStore): txn.call_after(self.get_linearized_receipts_for_room.invalidate_all) txn.call_after( - self._receipts_stream_cache.room_has_changed, + self._receipts_stream_cache.entity_has_changed, room_id, stream_id ) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 6a724193e..c7d789332 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -37,7 +37,7 @@ from twisted.internet import defer from ._base import SQLBaseStore from synapse.util.caches.descriptors import cachedInlineCallbacks -from synapse.util.caches.room_change_cache import RoomStreamChangeCache +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.api.constants import EventTypes from synapse.types import RoomStreamToken from synapse.util.logutils import log_function @@ -81,7 +81,7 @@ class StreamStore(SQLBaseStore): def __init__(self, hs): super(StreamStore, self).__init__(hs) - self._events_stream_cache = RoomStreamChangeCache( + self._events_stream_cache = StreamChangeCache( "EventsRoomStreamChangeCache", self._stream_id_gen.get_max_token(None) ) @@ -168,7 +168,7 @@ class StreamStore(SQLBaseStore): def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0): from_id = RoomStreamToken.parse_stream_token(from_key).stream - room_ids = yield self._events_stream_cache.get_rooms_changed( + room_ids = yield self._events_stream_cache.get_entities_changed( room_ids, from_id ) @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_room_has_changed( + has_changed = yield self._events_stream_cache.get_entity_has_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 4c39e07cb..50af89919 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,6 +15,7 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached +from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -24,6 +25,13 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): + def __init__(self, hs): + super(TagsStore, self).__init__(hs) + + self._tags_stream_cache = StreamChangeCache( + "TagsChangeCache", self._account_data_id_gen.get_max_token(None), + max_size=1000, + ) def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -80,6 +88,10 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids + changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + if not changed: + defer.returnValue({}) + room_ids = yield self.runInteraction( "get_updated_tags", get_updated_tags_txn ) @@ -177,6 +189,8 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ + txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + update_max_id_sql = ( "UPDATE account_data_max_stream_id" " SET stream_id = ?" diff --git a/synapse/util/caches/room_change_cache.py b/synapse/util/caches/room_change_cache.py deleted file mode 100644 index e8bfedd72..000000000 --- a/synapse/util/caches/room_change_cache.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.util.caches import cache_counter, caches_by_name - - -from blist import sorteddict -import logging - - -logger = logging.getLogger(__name__) - - -class RoomStreamChangeCache(object): - """Keeps track of the stream_id of the latest change in rooms. - - Given a list of rooms and stream key, it will give a subset of rooms that - may have changed since that key. If the key is too old then the cache - will simply return all rooms. - """ - def __init__(self, name, current_key, size_of_cache=10000): - self._size_of_cache = size_of_cache - self._room_to_key = {} - self._cache = sorteddict() - self._earliest_known_key = current_key - self.name = name - caches_by_name[self.name] = self._cache - - def get_room_has_changed(self, room_id, key): - assert type(key) is int - - if key <= self._earliest_known_key: - return True - - room_key = self._room_to_key.get(room_id, None) - if room_key is None: - return True - - if key < room_key: - return True - - return False - - def get_rooms_changed(self, room_ids, key): - """Returns subset of room ids that have had new things since the - given key. If the key is too old it will just return the given list. - """ - assert type(key) is int - - if key > self._earliest_known_key: - keys = self._cache.keys() - i = keys.bisect_right(key) - - result = set( - self._cache[k] for k in keys[i:] - ).intersection(room_ids) - - cache_counter.inc_hits(self.name) - else: - result = room_ids - cache_counter.inc_misses(self.name) - - return result - - def room_has_changed(self, room_id, key): - """Informs the cache that the room has been changed at the given key. - """ - assert type(key) is int - - if key > self._earliest_known_key: - old_key = self._room_to_key.get(room_id, None) - if old_key: - key = max(key, old_key) - self._cache.pop(old_key, None) - self._cache[key] = room_id - - while len(self._cache) > self._size_of_cache: - k, r = self._cache.popitem() - self._earliest_key = max(k, self._earliest_key) - self._room_to_key.pop(r, None) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py new file mode 100644 index 000000000..33b37f7f2 --- /dev/null +++ b/synapse/util/caches/stream_change_cache.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.caches import cache_counter, caches_by_name + + +from blist import sorteddict +import logging + + +logger = logging.getLogger(__name__) + + +class StreamChangeCache(object): + """Keeps track of the stream positions of the latest change in a set of entities. + + Typically the entity will be a room or user id. + + Given a list of entities and a stream position, it will give a subset of + entities that may have changed since that position. If position key is too + old then the cache will simply return all given entities. + """ + def __init__(self, name, current_stream_pos, max_size=10000): + self._max_size = max_size + self._entity_to_key = {} + self._cache = sorteddict() + self._earliest_known_stream_pos = current_stream_pos + self.name = name + caches_by_name[self.name] = self._cache + + def get_entity_has_changed(self, entity, stream_pos): + assert type(stream_pos) is int + + if stream_pos <= self._earliest_known_stream_pos: + return True + + latest_entity_change_pos = self._entity_to_key.get(entity, None) + if latest_entity_change_pos is None: + return True + + if stream_pos < latest_entity_change_pos: + return True + + return False + + def get_entities_changed(self, entities, stream_pos): + """Returns subset of entities that have had new things since the + given position. If the position is too old it will just return the given list. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + keys = self._cache.keys() + i = keys.bisect_right(stream_pos) + + result = set( + self._cache[k] for k in keys[i:] + ).intersection(entities) + + cache_counter.inc_hits(self.name) + else: + result = entities + cache_counter.inc_misses(self.name) + + return result + + def entity_has_changed(self, entitiy, stream_pos): + """Informs the cache that the entitiy has been changed at the given + position. + """ + assert type(stream_pos) is int + + if stream_pos > self._earliest_known_stream_pos: + old_pos = self._entity_to_key.get(entitiy, None) + if old_pos: + stream_pos = max(stream_pos, old_pos) + self._cache.pop(old_pos, None) + self._cache[stream_pos] = entitiy + + while len(self._cache) > self._max_size: + k, r = self._cache.popitem() + self._earliest_known_stream_pos = max(k, self._earliest_known_stream_pos) + self._entity_to_key.pop(r, None) From 45cf827c8fe7163a51f1d0d7c9e2531da9b58c8d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:39:18 +0000 Subject: [PATCH 13/30] Change name and doc has_entity_changed --- synapse/storage/account_data.py | 2 +- synapse/storage/stream.py | 2 +- synapse/storage/tags.py | 2 +- synapse/util/caches/stream_change_cache.py | 4 +++- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 95294c3f6..62e49e1c0 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -128,7 +128,7 @@ class AccountDataStore(SQLBaseStore): return (global_account_data, account_data_by_room) - changed = self._account_data_stream_cache.get_entity_has_changed( + changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index c7d789332..6e81d46c6 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -200,7 +200,7 @@ class StreamStore(SQLBaseStore): defer.returnValue(([], from_key)) if from_id: - has_changed = yield self._events_stream_cache.get_entity_has_changed( + has_changed = yield self._events_stream_cache.has_entity_changed( room_id, from_id ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 50af89919..75ce04092 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -88,7 +88,7 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.get_entity_has_changed(user_id, int(stream_id)) + changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) if not changed: defer.returnValue({}) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 33b37f7f2..3ca0e5778 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -40,7 +40,9 @@ class StreamChangeCache(object): self.name = name caches_by_name[self.name] = self._cache - def get_entity_has_changed(self, entity, stream_pos): + def has_entity_changed(self, entity, stream_pos): + """Returns True if the entity may have been updated since stream_pos + """ assert type(stream_pos) is int if stream_pos <= self._earliest_known_stream_pos: From fdca8ec4187e1b4ea93cbfe17ece6ca4cbadd519 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 16:41:59 +0000 Subject: [PATCH 14/30] Add events index --- .../schema/delta/28/events_room_stream.sql | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 synapse/storage/schema/delta/28/events_room_stream.sql diff --git a/synapse/storage/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/delta/28/events_room_stream.sql new file mode 100644 index 000000000..200c35e6e --- /dev/null +++ b/synapse/storage/schema/delta/28/events_room_stream.sql @@ -0,0 +1,16 @@ +/* Copyright 2016 OpenMarket Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +CREATE INDEX events_room_stream on events(room_id, stream_ordering); From 8fe8951a8d997a652d16758a7abed2b8afd117e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:09:09 +0000 Subject: [PATCH 15/30] Cache filters --- synapse/storage/filtering.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index f8fc9bddd..524873681 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -16,12 +16,13 @@ from twisted.internet import defer from ._base import SQLBaseStore +from synapse.util.caches.descriptors import cachedInlineCallbacks import simplejson as json class FilteringStore(SQLBaseStore): - @defer.inlineCallbacks + @cachedInlineCallbacks(num_args=2) def get_user_filter(self, user_localpart, filter_id): def_json = yield self._simple_select_one_onecol( table="user_filters", From 35981c8b71a2ce675f3b8414ca0e7920a5d1658e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:19:51 +0000 Subject: [PATCH 16/30] Fix test --- synapse/api/filtering.py | 5 +++++ tests/api/test_filtering.py | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 6c13ada5d..6eff83e5f 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -15,6 +15,8 @@ from synapse.api.errors import SynapseError from synapse.types import UserID, RoomID +import ujson as json + class Filtering(object): @@ -149,6 +151,9 @@ class FilterCollection(object): "include_leave", False ) + def __repr__(self): + return "" % (json.dumps(self._filter_json),) + def get_filter_json(self): return self._filter_json diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 1a4e439d3..ceb008926 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -382,19 +382,20 @@ class FilteringTestCase(unittest.TestCase): "types": ["m.*"] } } - user = UserID.from_string("@" + user_localpart + ":test") + filter_id = yield self.datastore.add_user_filter( - user_localpart=user_localpart, + user_localpart=user_localpart + "2", user_filter=user_filter_json, ) event = MockEvent( + event_id="$asdasd:localhost", sender="@foo:bar", type="custom.avatar.3d.crazy", ) events = [event] user_filter = yield self.filtering.get_user_filter( - user_localpart=user_localpart, + user_localpart=user_localpart + "2", filter_id=filter_id, ) From 0663c5bd52d52e095258e312fb62c2a9cb3f200a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:27:28 +0000 Subject: [PATCH 17/30] Include cache hits with has_entity_changed --- synapse/util/caches/stream_change_cache.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 3ca0e5778..b48f5cb27 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -46,15 +46,19 @@ class StreamChangeCache(object): assert type(stream_pos) is int if stream_pos <= self._earliest_known_stream_pos: + cache_counter.inc_misses(self.name) return True latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: + cache_counter.inc_misses(self.name) return True if stream_pos < latest_entity_change_pos: + cache_counter.inc_misses(self.name) return True + cache_counter.inc_hits(self.name) return False def get_entities_changed(self, entities, stream_pos): From 03b2c2577cbf51c80a42319689e1cb4903b8c4af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:29:24 +0000 Subject: [PATCH 18/30] Don't use defer.returnValue --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 62e49e1c0..88404059e 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -132,7 +132,7 @@ class AccountDataStore(SQLBaseStore): user_id, int(stream_id) ) if not changed: - defer.returnValue(({}, {})) + return ({}, {}) return self.runInteraction( "get_updated_account_data_for_user", get_updated_account_data_for_user_txn From 82cf3a8043a6ffe1f57d78bf88eba06ad0c53fbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 17:44:04 +0000 Subject: [PATCH 19/30] Fix inequalities --- synapse/util/caches/stream_change_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index b48f5cb27..483e0cdf9 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -45,7 +45,7 @@ class StreamChangeCache(object): """ assert type(stream_pos) is int - if stream_pos <= self._earliest_known_stream_pos: + if stream_pos < self._earliest_known_stream_pos: cache_counter.inc_misses(self.name) return True @@ -67,7 +67,7 @@ class StreamChangeCache(object): """ assert type(stream_pos) is int - if stream_pos > self._earliest_known_stream_pos: + if stream_pos >= self._earliest_known_stream_pos: keys = self._cache.keys() i = keys.bisect_right(stream_pos) From 40431251cba7ef1b623559db972600ece40818a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:05:43 +0000 Subject: [PATCH 20/30] Correctly update _entity_to_key --- synapse/util/caches/stream_change_cache.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 483e0cdf9..22a9f8f46 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -82,18 +82,19 @@ class StreamChangeCache(object): return result - def entity_has_changed(self, entitiy, stream_pos): - """Informs the cache that the entitiy has been changed at the given + def entity_has_changed(self, entity, stream_pos): + """Informs the cache that the entity has been changed at the given position. """ assert type(stream_pos) is int if stream_pos > self._earliest_known_stream_pos: - old_pos = self._entity_to_key.get(entitiy, None) + old_pos = self._entity_to_key.get(entity, None) if old_pos: stream_pos = max(stream_pos, old_pos) self._cache.pop(old_pos, None) - self._cache[stream_pos] = entitiy + self._cache[stream_pos] = entity + self._entity_to_key[entity] = stream_pos while len(self._cache) > self._max_size: k, r = self._cache.popitem() From 3f5dd18bd44ae426d3b1ff062dd64acbad72f8ae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:11:41 +0000 Subject: [PATCH 21/30] If the same as the earliest key, assume nothing has changed. --- synapse/util/caches/stream_change_cache.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 22a9f8f46..c673b1bdf 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -49,6 +49,11 @@ class StreamChangeCache(object): cache_counter.inc_misses(self.name) return True + if stream_pos == self._earliest_known_stream_pos: + # If the same as the earliest key, assume nothing has changed. + cache_counter.inc_hits(self.name) + return False + latest_entity_change_pos = self._entity_to_key.get(entity, None) if latest_entity_change_pos is None: cache_counter.inc_misses(self.name) From 467c27a1f90b873d6838ad1351399551cfa9cc07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Jan 2016 18:20:00 +0000 Subject: [PATCH 22/30] Amalgamate tags and account data stream caches --- synapse/storage/account_data.py | 3 ++- synapse/storage/tags.py | 18 +++++++----------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 88404059e..822c8bbe0 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -28,7 +28,8 @@ class AccountDataStore(SQLBaseStore): super(AccountDataStore, self).__init__(hs) self._account_data_stream_cache = StreamChangeCache( - "AccountDataChangeCache", self._account_data_id_gen.get_max_token(None), + "AccountDataAndTagsChangeCache", + self._account_data_id_gen.get_max_token(None), max_size=1000, ) diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index 75ce04092..e1a9c0c26 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -15,7 +15,6 @@ from ._base import SQLBaseStore from synapse.util.caches.descriptors import cached -from synapse.util.caches.stream_change_cache import StreamChangeCache from twisted.internet import defer import ujson as json @@ -25,14 +24,6 @@ logger = logging.getLogger(__name__) class TagsStore(SQLBaseStore): - def __init__(self, hs): - super(TagsStore, self).__init__(hs) - - self._tags_stream_cache = StreamChangeCache( - "TagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, - ) - def get_max_account_data_stream_id(self): """Get the current max stream id for the private user data stream @@ -88,7 +79,9 @@ class TagsStore(SQLBaseStore): room_ids = [row[0] for row in txn.fetchall()] return room_ids - changed = self._tags_stream_cache.has_entity_changed(user_id, int(stream_id)) + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(stream_id) + ) if not changed: defer.returnValue({}) @@ -189,7 +182,10 @@ class TagsStore(SQLBaseStore): next_id(int): The the revision to advance to. """ - txn.call_after(self._tags_stream_cache.entity_has_changed, user_id, next_id) + txn.call_after( + self._account_data_stream_cache.entity_has_changed, + user_id, next_id + ) update_max_id_sql = ( "UPDATE account_data_max_stream_id" From 50e18938a96583b2dc8cbc69aa7182a35e3435af Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:00:45 +0000 Subject: [PATCH 23/30] Reset size on clear --- synapse/util/caches/lrucache.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index e6a66dc04..5182cadb9 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -37,6 +37,7 @@ class LruCache(object): """ def __init__(self, max_size, keylen=1, cache_type=dict): cache = cache_type() + self.cache = cache # Used for introspection. self.size = 0 list_root = [] list_root[:] = [list_root, list_root, None, None] @@ -142,6 +143,7 @@ class LruCache(object): list_root[NEXT] = list_root list_root[PREV] = list_root cache.clear() + self.size = 0 @synchronized def cache_len(): From 766526e1142e7ad0ffb43bd075b0ff2d6265e4cb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:11:21 +0000 Subject: [PATCH 24/30] Make TreeCache keep track of its own size. --- synapse/util/caches/lrucache.py | 9 +++------ synapse/util/caches/treecache.py | 7 +++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 5182cadb9..ca9ffdf1b 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -38,7 +38,6 @@ class LruCache(object): def __init__(self, max_size, keylen=1, cache_type=dict): cache = cache_type() self.cache = cache # Used for introspection. - self.size = 0 list_root = [] list_root[:] = [list_root, list_root, None, None] @@ -61,7 +60,6 @@ class LruCache(object): prev_node[NEXT] = node next_node[PREV] = node cache[key] = node - self.size += 1 def move_node_to_front(node): prev_node = node[PREV] @@ -80,7 +78,6 @@ class LruCache(object): next_node = node[NEXT] prev_node[NEXT] = next_node next_node[PREV] = prev_node - self.size -= 1 @synchronized def cache_get(key, default=None): @@ -99,7 +96,7 @@ class LruCache(object): node[VALUE] = value else: add_node(key, value) - if self.size > max_size: + if len(cache) > max_size: todelete = list_root[PREV] delete_node(todelete) cache.pop(todelete[KEY], None) @@ -111,7 +108,7 @@ class LruCache(object): return node[VALUE] else: add_node(key, value) - if self.size > max_size: + if len(cache) > max_size: todelete = list_root[PREV] delete_node(todelete) cache.pop(todelete[KEY], None) @@ -147,7 +144,7 @@ class LruCache(object): @synchronized def cache_len(): - return self.size + return len(cache) @synchronized def cache_contains(key): diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 3b5886091..a29ea8144 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -8,6 +8,7 @@ class TreeCache(object): Keys must be tuples. """ def __init__(self): + self.size = 0 self.root = {} def __setitem__(self, key, value): @@ -21,6 +22,7 @@ class TreeCache(object): for k in key[:-1]: node = node.setdefault(k, {}) node[key[-1]] = value + self.size += 1 def get(self, key, default=None): node = self.root @@ -31,6 +33,7 @@ class TreeCache(object): return node.get(key[-1], default) def clear(self): + self.size = 0 self.root = {} def pop(self, key, default=None): @@ -57,4 +60,8 @@ class TreeCache(object): break node_and_keys[i+1][0].pop(k) + self.size -= 1 return popped + + def __len__(self): + return self.size From a30364c1f99bdd7d5cb0fe82ebdfe52d996defef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 10:44:46 +0000 Subject: [PATCH 25/30] Correctly bookkeep the size of TreeCache --- synapse/util/caches/treecache.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index a29ea8144..3331ea9eb 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -21,7 +21,7 @@ class TreeCache(object): node = self.root for k in key[:-1]: node = node.setdefault(k, {}) - node[key[-1]] = value + node[key[-1]] = _Entry(value) self.size += 1 def get(self, key, default=None): @@ -30,7 +30,7 @@ class TreeCache(object): node = node.get(k, None) if node is None: return default - return node.get(key[-1], default) + return node.get(key[-1], _Entry(default)).value def clear(self): self.size = 0 @@ -60,8 +60,33 @@ class TreeCache(object): break node_and_keys[i+1][0].pop(k) - self.size -= 1 + popped, cnt = _strip_and_count_entires(popped) + self.size -= cnt return popped def __len__(self): return self.size + + +class _Entry(object): + __slots__ = ["value"] + + def __init__(self, value): + object.__setattr__(self, "value", value) + + +def _strip_and_count_entires(d): + """Takes an _Entry or dict with leaves of _Entry's, and either returns the + value or a dictionary with _Entry's replaced by their values. + + Also returns the count of _Entry's + """ + if isinstance(d, dict): + cnt = 0 + for key, value in d.items(): + v, n = _strip_and_count_entires(value) + d[key] = v + cnt += n + return d, cnt + else: + return d.value, 1 From c046630c330b5abc4403f15afa2ffb13b7b8aa08 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:17:54 +0000 Subject: [PATCH 26/30] Remove spurious self.size --- synapse/util/caches/lrucache.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index ca9ffdf1b..f7423f2fa 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -140,7 +140,6 @@ class LruCache(object): list_root[NEXT] = list_root list_root[PREV] = list_root cache.clear() - self.size = 0 @synchronized def cache_len(): From fb7299800feb0b8bc0f8429fc5b840abea0609d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:29:14 +0000 Subject: [PATCH 27/30] Directly set self.value --- synapse/util/caches/treecache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 3331ea9eb..29d02f7e9 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -72,7 +72,7 @@ class _Entry(object): __slots__ = ["value"] def __init__(self, value): - object.__setattr__(self, "value", value) + self.value = value def _strip_and_count_entires(d): From 4fce59f2747d9c73784470e84396a09ae70bbda7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 11:33:11 +0000 Subject: [PATCH 28/30] Add tests --- tests/util/test_lrucache.py | 7 +++++++ tests/util/test_treecache.py | 12 ++++++++++++ 2 files changed, 19 insertions(+) diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index 2cd3d2645..bab366fb7 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -19,6 +19,7 @@ from .. import unittest from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache + class LruCacheTestCase(unittest.TestCase): def test_get_set(self): @@ -72,3 +73,9 @@ class LruCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("vehicles", "car")), "vroom") self.assertEquals(cache.get(("vehicles", "train")), "chuff") # Man from del_multi say "Yes". + + def test_clear(self): + cache = LruCache(1) + cache["key"] = 1 + cache.clear() + self.assertEquals(len(cache), 0) diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 9946ceb3f..1efbeb6b3 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -25,6 +25,7 @@ class TreeCacheTestCase(unittest.TestCase): cache[("b",)] = "B" self.assertEquals(cache.get(("a",)), "A") self.assertEquals(cache.get(("b",)), "B") + self.assertEquals(len(cache), 2) def test_pop_onelevel(self): cache = TreeCache() @@ -33,6 +34,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.pop(("a",)), "A") self.assertEquals(cache.pop(("a",)), None) self.assertEquals(cache.get(("b",)), "B") + self.assertEquals(len(cache), 1) def test_get_set_twolevel(self): cache = TreeCache() @@ -42,6 +44,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "a")), "AA") self.assertEquals(cache.get(("a", "b")), "AB") self.assertEquals(cache.get(("b", "a")), "BA") + self.assertEquals(len(cache), 3) def test_pop_twolevel(self): cache = TreeCache() @@ -53,6 +56,7 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "b")), "AB") self.assertEquals(cache.pop(("b", "a")), "BA") self.assertEquals(cache.pop(("b", "a")), None) + self.assertEquals(len(cache), 1) def test_pop_mixedlevel(self): cache = TreeCache() @@ -64,3 +68,11 @@ class TreeCacheTestCase(unittest.TestCase): self.assertEquals(cache.get(("a", "a")), None) self.assertEquals(cache.get(("a", "b")), None) self.assertEquals(cache.get(("b", "a")), "BA") + self.assertEquals(len(cache), 1) + + def test_clear(self): + cache = TreeCache() + cache[("a",)] = "A" + cache[("b",)] = "B" + cache.clear() + self.assertEquals(len(cache), 0) From ea320d34641f98c7aa477e2340ff265c1ac63419 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:34:48 +0000 Subject: [PATCH 29/30] Don't work out unread_notifs_for_room_id unless needed --- synapse/handlers/sync.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 51ec4702d..075566417 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -697,17 +697,6 @@ class SyncHandler(BaseHandler): if just_joined: state = yield self.get_state_at(room_id, now_token) - notifs = yield self.unread_notifs_for_room_id( - room_id, sync_config, all_ephemeral_by_room - ) - - unread_notifications = {} - if notifs is not None: - unread_notifications["notification_count"] = len(notifs) - unread_notifications["highlight_count"] = len([ - 1 for notif in notifs if _action_has_highlight(notif["actions"]) - ]) - state = { (e.type, e.state_key): e for e in sync_config.filter_collection.filter_room_state(state.values()) @@ -725,6 +714,7 @@ class SyncHandler(BaseHandler): ephemeral_by_room.get(room_id, []) ) + unread_notifications = {} room_sync = JoinedSyncResult( room_id=room_id, timeline=batch, @@ -734,6 +724,17 @@ class SyncHandler(BaseHandler): unread_notifications=unread_notifications, ) + if room_sync: + notifs = yield self.unread_notifs_for_room_id( + room_id, sync_config, all_ephemeral_by_room + ) + + if notifs is not None: + unread_notifications["notification_count"] = len(notifs) + unread_notifications["highlight_count"] = len([ + 1 for notif in notifs if _action_has_highlight(notif["actions"]) + ]) + logger.debug("Room sync: %r", room_sync) defer.returnValue(room_sync) From ebc5f00efed5c7f72601933f55032947077c50a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 29 Jan 2016 13:37:40 +0000 Subject: [PATCH 30/30] Bump AccountDataAndTagsChangeCache size --- synapse/storage/account_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 822c8bbe0..ed6587429 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -30,7 +30,7 @@ class AccountDataStore(SQLBaseStore): self._account_data_stream_cache = StreamChangeCache( "AccountDataAndTagsChangeCache", self._account_data_id_gen.get_max_token(None), - max_size=1000, + max_size=10000, ) def get_account_data_for_user(self, user_id):