From 07340cdacad901a55abd0811a1fca86061b752bd Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 01:42:53 +0100 Subject: [PATCH 01/85] untested stab at autocreating autojoin rooms --- synapse/config/registration.py | 4 ++++ synapse/handlers/register.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 0fb964eb6..686c7fa9f 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,6 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) @@ -98,6 +99,9 @@ class RegistrationConfig(Config): # to these rooms #auto_join_rooms: # - "#example:example.com" + + # Have first user on server autocreate autojoin rooms + autocreate_auto_join_rooms: true """ % locals() def add_arguments(self, parser): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index da914c46f..0e5337d26 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,6 +50,8 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self._room_creation_handler = hs.get_room_creation_handler() + self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -513,6 +515,22 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): + + # try to create the room if we're the first user on the server + if self.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + if count == 1 and RoomAlias.is_valid(room_identifier): + info = yield self._room_creation_handler.create_room( + requester, + config={ + "preset": "public_chat", + }, + ratelimit=False, + ) + room_id = info["room_id"] + + yield create_association(self, requester.user, room_identifier, room_id) + room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): From 8f646f2d04e7d21aa0570826bd78a0bbc3bb706b Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 28 Sep 2018 15:37:28 +0100 Subject: [PATCH 02/85] fix UTs --- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 686c7fa9f..dcf2374ed 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -44,7 +44,7 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) - self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", true) + self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): registration_shared_secret = random_string_with_symbols(50) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 0e5337d26..a358bfc72 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,8 +50,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self._room_creation_handler = hs.get_room_creation_handler() - self._directory_handler = hs.get_handlers().directory_handler self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -520,7 +518,8 @@ class RegistrationHandler(BaseHandler): if self.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - info = yield self._room_creation_handler.create_room( + room_creation_handler = hs.get_room_creation_handler() + info = yield room_creation_handler.create_room( requester, config={ "preset": "public_chat", @@ -529,7 +528,13 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - yield create_association(self, requester.user, room_identifier, room_id) + directory_handler = hs.get_handlers().directory_handler + yield directory_handler.create_association( + self, + requester.user, + room_identifier, + room_id + ) room_id = None room_member_handler = self.hs.get_room_member_handler() From 5b68f29f48acfa45e671af1fb325d0c0d532f3d6 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:14:40 +0100 Subject: [PATCH 03/85] fix thinkos --- synapse/handlers/register.py | 12 ++++++------ synapse/storage/directory.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a358bfc72..05e8f4ea7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -515,10 +515,10 @@ class RegistrationHandler(BaseHandler): def _join_user_to_room(self, requester, room_identifier): # try to create the room if we're the first user on the server - if self.config.autocreate_auto_join_rooms: + if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = hs.get_room_creation_handler() + room_creation_handler = self.hs.get_room_creation_handler() info = yield room_creation_handler.create_room( requester, config={ @@ -528,11 +528,11 @@ class RegistrationHandler(BaseHandler): ) room_id = info["room_id"] - directory_handler = hs.get_handlers().directory_handler + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - self, - requester.user, - room_identifier, + requester.user.to_string(), + room_alias, room_id ) diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index cfb687cb5..61a029a53 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -90,7 +90,7 @@ class DirectoryWorkerStore(SQLBaseStore): class DirectoryStore(DirectoryWorkerStore): @defer.inlineCallbacks def create_room_alias_association(self, room_alias, room_id, servers, creator=None): - """ Creates an associatin between a room alias and room_id/servers + """ Creates an association between a room alias and room_id/servers Args: room_alias (RoomAlias) From 23b6a0537f9e2b46a5857a7c7bd0a03d04d4095d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:19:37 +0100 Subject: [PATCH 04/85] emit room aliases event --- synapse/handlers/register.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 05e8f4ea7..7584064d5 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,9 +531,14 @@ class RegistrationHandler(BaseHandler): directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_identifier) yield directory_handler.create_association( - requester.user.to_string(), - room_alias, - room_id + user_id=requester.user.to_string(), + room_alias=room_alias, + room_id=room_id, + servers=[self.hs.hostname], + ) + + yield directory_handler.send_room_alias_update_event( + requester, requester.user.to_string(), room_id ) room_id = None From faa462ef79529f6b1802764038fd7365dfc37385 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Sat, 29 Sep 2018 02:21:01 +0100 Subject: [PATCH 05/85] changelog --- changelog.d/3975.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3975.feature diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature new file mode 100644 index 000000000..5cd8ad6cc --- /dev/null +++ b/changelog.d/3975.feature @@ -0,0 +1 @@ +First user should autocreate autojoin rooms From 17d585753f1df2b2c2b13ddb8171e174cef97aac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:18:52 +0100 Subject: [PATCH 06/85] Delete unreferened state groups during purge --- synapse/storage/events.py | 33 +++++++++++++++++++++----- synapse/storage/state.py | 50 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+), 6 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e7487311c..0fb190530 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2025,6 +2025,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding state groups which depend on redundant" " state groups") remaining_state_groups = [] + unreferenced_state_groups = 0 for i in range(0, len(state_rows), 100): chunk = [sg for sg, in state_rows[i:i + 100]] # look for state groups whose prev_state_group is one we are about @@ -2037,13 +2038,33 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore retcols=["state_group"], keyvalues={}, ) - remaining_state_groups.extend( - row["state_group"] for row in rows - # exclude state groups we are about to delete: no point in - # updating them - if row["state_group"] not in state_groups_to_delete - ) + for row in rows: + sg = row["state_group"] + + if sg in state_groups_to_delete: + # exclude state groups we are about to delete: no point in + # updating them + continue + + if not self._is_state_group_referenced(txn, sg): + # Let's also delete unreferenced state groups while we're + # here, since otherwise we'd need to de-delta them + state_groups_to_delete.add(sg) + unreferenced_state_groups += 1 + continue + + remaining_state_groups.append(sg) + + logger.info( + "[purge] found %i extra unreferenced state groups to delete", + unreferenced_state_groups, + ) + + logger.info( + "[purge] de-delta-ing %i remaining state groups", + len(remaining_state_groups), + ) # Now we turn the state groups that reference to-be-deleted state # groups to non delta versions. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c..b88c7dc09 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,6 +1041,56 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count + def _is_state_group_referenced(self, txn, state_group): + """Checks if a given state group is referenced, or is safe to delete. + + A state groups is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a group which has the given + state_group as a prev group) + """ + + # We check this by doing a depth first search to look for any + # descendant referenced by `event_to_state_groups`. + + # State groups we need to check, contains state groups that are + # descendants of `state_group` + state_groups_to_search = [state_group] + + # Set of state groups we've already checked + state_groups_searched = set() + + while state_groups_to_search: + state_group = state_groups_to_search.pop() # Next state group to check + + is_referenced = self._simple_select_one_onecol_txn( + txn, + table="event_to_state_groups", + keyvalues={"state_group": state_group}, + retcol="event_id", + allow_none=True, + ) + if is_referenced: + # A descendant is referenced by event_to_state_groups, so + # original state group is referenced. + return True + + state_groups_searched.add(state_group) + + # Find all children of current state group and add to search + references = self._simple_select_onecol_txn( + txn, + table="state_group_edges", + keyvalues={"prev_state_group": state_group}, + retcol="state_group", + ) + state_groups_to_search.extend(references) + + # Lets be paranoid and check for cycles + if state_groups_searched.intersection(references): + raise Exception("State group %s has cyclic dependency", state_group) + + return False + class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. From 4917ff55234718c0e650c6dc2a1117304465b9be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:24:01 +0100 Subject: [PATCH 07/85] Add state_group index to event_to_state_groups This is needed to efficiently check for unreferenced state groups during purge. --- synapse/storage/prepare_database.py | 2 +- .../52/add_event_to_state_group_index.sql | 19 +++++++++++++++++++ synapse/storage/state.py | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/52/add_event_to_state_group_index.sql diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index b36471931..bd740e1e4 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 51 +SCHEMA_VERSION = 52 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql new file mode 100644 index 000000000..91e03d13e --- /dev/null +++ b/synapse/storage/schema/delta/52/add_event_to_state_group_index.sql @@ -0,0 +1,19 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This is needed to efficiently check for unreferenced state groups during +-- purge. Added events_to_state_group(state_group) index +INSERT into background_updates (update_name, progress_json) + VALUES ('event_to_state_groups_sg_index', '{}'); diff --git a/synapse/storage/state.py b/synapse/storage/state.py index b88c7dc09..3f08f447a 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1114,6 +1114,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" + EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" def __init__(self, db_conn, hs): super(StateStore, self).__init__(db_conn, hs) @@ -1132,6 +1133,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): columns=["state_key"], where_clause="type='m.room.member'", ) + self.register_background_index_update( + self.EVENT_STATE_GROUP_INDEX_UPDATE_NAME, + index_name="event_to_state_groups_sg_index", + table="event_to_state_groups", + columns=["state_group"], + ) def _store_event_state_mappings_txn(self, txn, events_and_contexts): state_groups = {} From d9f3db5081a4306520cb3c3208564d3c923fbf0a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Oct 2018 15:29:30 +0100 Subject: [PATCH 08/85] Newsfile --- changelog.d/4006.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4006.misc diff --git a/changelog.d/4006.misc b/changelog.d/4006.misc new file mode 100644 index 000000000..35ffa1c2d --- /dev/null +++ b/changelog.d/4006.misc @@ -0,0 +1 @@ +Delete unreferenced state groups during history purge From 2dadc092b8f0e4cc1ac037ea54324efd906d4caf Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 4 Oct 2018 17:00:27 +0100 Subject: [PATCH 09/85] move logic into register, fix room alias localpart bug, tests --- synapse/handlers/register.py | 45 ++++++++++-------------- tests/handlers/test_register.py | 62 +++++++++++++++++++++++---------- tests/utils.py | 1 + 3 files changed, 62 insertions(+), 46 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7584064d5..01cf7ab58 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -220,8 +220,26 @@ class RegistrationHandler(BaseHandler): # auto-join the user to any rooms we're supposed to dump them into fake_requester = create_requester(user_id) + + # try to create the room if we're the first user on the server + if self.hs.config.autocreate_auto_join_rooms: + count = yield self.store.count_all_users() + auto_create_rooms = count == 1 + for r in self.hs.config.auto_join_rooms: try: + if auto_create_rooms and RoomAlias.is_valid(r): + room_creation_handler = self.hs.get_room_creation_handler() + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, + ) yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) @@ -514,33 +532,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - # try to create the room if we're the first user on the server - if self.hs.config.autocreate_auto_join_rooms: - count = yield self.store.count_all_users() - if count == 1 and RoomAlias.is_valid(room_identifier): - room_creation_handler = self.hs.get_room_creation_handler() - info = yield room_creation_handler.create_room( - requester, - config={ - "preset": "public_chat", - }, - ratelimit=False, - ) - room_id = info["room_id"] - - directory_handler = self.hs.get_handlers().directory_handler - room_alias = RoomAlias.from_string(room_identifier) - yield directory_handler.create_association( - user_id=requester.user.to_string(), - room_alias=room_alias, - room_id=room_id, - servers=[self.hs.hostname], - ) - - yield directory_handler.send_room_alias_update_event( - requester, requester.user.to_string(), room_id - ) - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 7b4ade3df..a150a897a 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.errors import ResourceLimitError from synapse.handlers.register import RegistrationHandler -from synapse.types import UserID, create_requester +from synapse.types import RoomAlias, UserID, create_requester from tests.utils import setup_test_homeserver @@ -41,30 +41,28 @@ class RegistrationTestCase(unittest.TestCase): self.mock_captcha_client = Mock() self.hs = yield setup_test_homeserver( self.addCleanup, - handlers=None, - http_client=None, expire_access_token=True, - profile_handler=Mock(), ) self.macaroon_generator = Mock( generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - self.hs.handlers = RegistrationHandlers(self.hs) + # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 self.lots_of_users = 100 self.small_number_of_users = 1 + self.requester = create_requester("@requester:test") + @defer.inlineCallbacks def test_user_is_created_and_logged_in_if_doesnt_exist(self): - local_part = "someone" - display_name = "someone" - user_id = "@someone:test" - requester = create_requester("@as:test") + frank = UserID.from_string("@frank:test") + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, frank.localpart, "Frankie" ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -78,12 +76,11 @@ class RegistrationTestCase(unittest.TestCase): token="jkv;g498752-43gj['eamb!-5", password_hash=None, ) - local_part = "frank" - display_name = "Frank" - user_id = "@frank:test" - requester = create_requester("@as:test") + local_part = frank.localpart + user_id = frank.to_string() + requester = create_requester(user_id) result_user_id, result_token = yield self.handler.get_or_create_user( - requester, local_part, display_name + requester, local_part, None ) self.assertEquals(result_user_id, user_id) self.assertEquals(result_token, 'secret') @@ -92,7 +89,7 @@ class RegistrationTestCase(unittest.TestCase): def test_mau_limits_when_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - yield self.handler.get_or_create_user("requester", 'a', "display_name") + yield self.handler.get_or_create_user(self.requester, 'a', "display_name") @defer.inlineCallbacks def test_get_or_create_user_mau_not_blocked(self): @@ -101,7 +98,7 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception - yield self.handler.get_or_create_user("@user:server", 'c', "User") + yield self.handler.get_or_create_user(self.requester, 'c', "User") @defer.inlineCallbacks def test_get_or_create_user_mau_blocked(self): @@ -110,13 +107,13 @@ class RegistrationTestCase(unittest.TestCase): return_value=defer.succeed(self.lots_of_users) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") self.store.get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) with self.assertRaises(ResourceLimitError): - yield self.handler.get_or_create_user("requester", 'b', "display_name") + yield self.handler.get_or_create_user(self.requester, 'b', "display_name") @defer.inlineCallbacks def test_register_mau_blocked(self): @@ -147,3 +144,30 @@ class RegistrationTestCase(unittest.TestCase): ) with self.assertRaises(ResourceLimitError): yield self.handler.register_saml2(localpart="local_part") + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms(self): + room_alias_str = "#room:test" + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [room_alias_str] + + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) + + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = yield directory_handler.get_association(room_alias) + + self.assertTrue(room_id['room_id'] in rooms) + self.assertEqual(len(rooms), 1) + + @defer.inlineCallbacks + def test_auto_create_auto_join_rooms_with_no_rooms(self): + self.hs.config.autocreate_auto_join_rooms = True + self.hs.config.auto_join_rooms = [] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + + self.assertEqual(len(rooms), 0) diff --git a/tests/utils.py b/tests/utils.py index aaed1149c..d34c224fb 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -149,6 +149,7 @@ def setup_test_homeserver( config.block_events_without_consent_error = None config.media_storage_providers = [] config.auto_join_rooms = [] + config.autocreate_auto_join_rooms = True config.limit_usage_by_mau = False config.hs_disabled = False config.hs_disabled_message = "" From 67a1e315cc7f8b270ec87e0ab6e58aa2adaee32d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 13:49:48 +0100 Subject: [PATCH 10/85] Fix up comments --- synapse/storage/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f08f447a..f7cf5c86c 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1044,9 +1044,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): def _is_state_group_referenced(self, txn, state_group): """Checks if a given state group is referenced, or is safe to delete. - A state groups is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a group which has the given - state_group as a prev group) + A state group is referenced if it or any of its descendants are + pointed at by an event. (A descendant is a state_group whose chain of + prev_groups includes the given state_group.) """ # We check this by doing a depth first search to look for any From a2bfb778c8a2a91bdff1a5824571d91f4f4536d3 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 12 Oct 2018 18:17:36 +0100 Subject: [PATCH 11/85] improve auto room join logic, comments and tests --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 11 ++++++++++- synapse/handlers/register.py | 11 ++++++++--- tests/handlers/test_register.py | 23 ++++++++++++++++++----- 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 5cd8ad6cc..496ba4f4a 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -First user should autocreate autojoin rooms +Servers with auto join rooms, should autocreate those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index dcf2374ed..43ff20a63 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,6 +15,8 @@ from distutils.util import strtobool +from synapse.config._base import ConfigError +from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols from ._base import Config @@ -44,6 +46,9 @@ class RegistrationConfig(Config): ) self.auto_join_rooms = config.get("auto_join_rooms", []) + for room_alias in self.auto_join_rooms: + if not RoomAlias.is_valid(room_alias): + raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -100,7 +105,11 @@ class RegistrationConfig(Config): #auto_join_rooms: # - "#example:example.com" - # Have first user on server autocreate autojoin rooms + # Where auto_join_rooms are specified, setting this flag ensures that the + # the rooms exists by creating them when the first user on the + # homeserver registers. + # Setting to false means that if the rooms are not manually created, + # users cannot be auto joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 01cf7ab58..2b269ab69 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,6 +26,7 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) +from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -222,14 +223,19 @@ class RegistrationHandler(BaseHandler): fake_requester = create_requester(user_id) # try to create the room if we're the first user on the server + should_auto_create_rooms = False if self.hs.config.autocreate_auto_join_rooms: count = yield self.store.count_all_users() - auto_create_rooms = count == 1 + should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: try: - if auto_create_rooms and RoomAlias.is_valid(r): + if should_auto_create_rooms: room_creation_handler = self.hs.get_room_creation_handler() + if self.hs.hostname != RoomAlias.from_string(r).domain: + raise ConfigError( + 'Cannot create room alias %s, it does not match server domain' + ) # create room expects the localpart of the room alias room_alias_localpart = RoomAlias.from_string(r).localpart yield room_creation_handler.create_room( @@ -531,7 +537,6 @@ class RegistrationHandler(BaseHandler): @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): - room_id = None room_member_handler = self.hs.get_room_member_handler() if RoomID.is_valid(room_identifier): diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index a150a897a..3e9a19072 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -47,7 +47,6 @@ class RegistrationTestCase(unittest.TestCase): generate_access_token=Mock(return_value='secret') ) self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) - # self.hs.handlers = RegistrationHandlers(self.hs) self.handler = self.hs.get_handlers().registration_handler self.store = self.hs.get_datastore() self.hs.config.max_mau_value = 50 @@ -148,9 +147,7 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [room_alias_str] - res = yield self.handler.register(localpart='jeff') rooms = yield self.store.get_rooms_for_user(res[0]) @@ -163,11 +160,27 @@ class RegistrationTestCase(unittest.TestCase): @defer.inlineCallbacks def test_auto_create_auto_join_rooms_with_no_rooms(self): - self.hs.config.autocreate_auto_join_rooms = True self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") res = yield self.handler.register(frank.localpart) self.assertEqual(res[0], frank.to_string()) rooms = yield self.store.get_rooms_for_user(res[0]) - + self.assertEqual(len(rooms), 0) + + @defer.inlineCallbacks + def test_auto_create_auto_join_where_room_is_another_domain(self): + self.hs.config.auto_join_rooms = ["#room:another"] + frank = UserID.from_string("@frank:test") + res = yield self.handler.register(frank.localpart) + self.assertEqual(res[0], frank.to_string()) + rooms = yield self.store.get_rooms_for_user(res[0]) + self.assertEqual(len(rooms), 0) + + @defer.inlineCallbacks + def test_auto_create_auto_join_where_auto_create_is_false(self): + self.hs.config.autocreate_auto_join_rooms = False + room_alias_str = "#room:test" + self.hs.config.auto_join_rooms = [room_alias_str] + res = yield self.handler.register(localpart='jeff') + rooms = yield self.store.get_rooms_for_user(res[0]) self.assertEqual(len(rooms), 0) From 1ccafb0c5e3e6c9c64ce231163e7c73f05b910fa Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 13 Oct 2018 21:14:21 +0100 Subject: [PATCH 12/85] no need to join room if creator --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2b269ab69..2f7bdb0a2 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -246,7 +246,8 @@ class RegistrationHandler(BaseHandler): }, ratelimit=False, ) - yield self._join_user_to_room(fake_requester, r) + else: + yield self._join_user_to_room(fake_requester, r) except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) From c6584f4b5f9cc495478e03e01f85fd2399cf6f8d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 17 Oct 2018 11:36:41 +0100 Subject: [PATCH 13/85] clean up config error logic and imports --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 9 ++++----- synapse/handlers/register.py | 30 ++++++++++++++++-------------- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 496ba4f4a..79c2711fb 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto join rooms, should autocreate those rooms when first user registers +Servers with auto-join rooms, should automatically create those rooms when first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 43ff20a63..4b9bf6f2d 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool -from synapse.config._base import ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config +from ._base import Config, ConfigError class RegistrationConfig(Config): @@ -48,7 +47,7 @@ class RegistrationConfig(Config): self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: if not RoomAlias.is_valid(room_alias): - raise ConfigError('Invalid auto_join_rooms entry %s' % room_alias) + raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,)) self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) def default_config(self, **kwargs): @@ -106,10 +105,10 @@ class RegistrationConfig(Config): # - "#example:example.com" # Where auto_join_rooms are specified, setting this flag ensures that the - # the rooms exists by creating them when the first user on the + # the rooms exist by creating them when the first user on the # homeserver registers. # Setting to false means that if the rooms are not manually created, - # users cannot be auto joined since they do not exist. + # users cannot be auto-joined since they do not exist. autocreate_auto_join_rooms: true """ % locals() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 2f7bdb0a2..1b5873c8d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -26,7 +26,6 @@ from synapse.api.errors import ( RegistrationError, SynapseError, ) -from synapse.config._base import ConfigError from synapse.http.client import CaptchaServerHttpClient from synapse.types import RoomAlias, RoomID, UserID, create_requester from synapse.util.async_helpers import Linearizer @@ -51,6 +50,7 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() + self.room_creation_handler = self.hs.get_room_creation_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -231,21 +231,23 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - room_creation_handler = self.hs.get_room_creation_handler() if self.hs.hostname != RoomAlias.from_string(r).domain: - raise ConfigError( - 'Cannot create room alias %s, it does not match server domain' + logger.warn( + 'Cannot create room alias %s, ' + 'it does not match server domain' % (r,) + ) + raise SynapseError() + else: + # create room expects the localpart of the room alias + room_alias_localpart = RoomAlias.from_string(r).localpart + yield self.room_creation_handler.create_room( + fake_requester, + config={ + "preset": "public_chat", + "room_alias_name": room_alias_localpart + }, + ratelimit=False, ) - # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart - yield room_creation_handler.create_room( - fake_requester, - config={ - "preset": "public_chat", - "room_alias_name": room_alias_localpart - }, - ratelimit=False, - ) else: yield self._join_user_to_room(fake_requester, r) except Exception as e: From 084046456ec88588779a62f9378c1a8e911bfc7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:14:04 +0100 Subject: [PATCH 14/85] Add config option to control alias creation --- synapse/config/homeserver.py | 3 +- synapse/config/room_directory.py | 101 ++++++++++++++++++++++++ synapse/federation/federation_server.py | 16 +--- synapse/handlers/directory.py | 9 +++ synapse/util/__init__.py | 21 +++++ 5 files changed, 135 insertions(+), 15 deletions(-) create mode 100644 synapse/config/room_directory.py diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index b8d5690f2..10dd40159 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -31,6 +31,7 @@ from .push import PushConfig from .ratelimiting import RatelimitConfig from .registration import RegistrationConfig from .repository import ContentRepositoryConfig +from .room_directory import RoomDirectoryConfig from .saml2 import SAML2Config from .server import ServerConfig from .server_notices_config import ServerNoticesConfig @@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig, WorkerConfig, PasswordAuthProviderConfig, PushConfig, SpamCheckerConfig, GroupsConfig, UserDirectoryConfig, ConsentConfig, - ServerNoticesConfig, + ServerNoticesConfig, RoomDirectoryConfig, ): pass diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py new file mode 100644 index 000000000..41ef3217e --- /dev/null +++ b/synapse/config/room_directory.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util import glob_to_regex + +from ._base import Config, ConfigError + + +class RoomDirectoryConfig(Config): + def read_config(self, config): + alias_creation_rules = config["alias_creation_rules"] + + self._alias_creation_rules = [ + _AliasRule(rule) + for rule in alias_creation_rules + ] + + def default_config(self, config_dir_path, server_name, **kwargs): + return """ + # The `alias_creation` option controls who's allowed to create aliases + # on this server. + # + # The format of this option is a list of rules that contain globs that + # match against user_id and the new alias (fully qualified with server + # name). The action in the first rule that matches is taken, which can + # currently either be "allowed" or "denied". + # + # If no rules match the request is denied. + alias_creation_rules: + - user_id: "*" + alias: "*" + action: allowed + """ + + def is_alias_creation_allowed(self, user_id, alias): + """Checks if the given user is allowed to create the given alias + + Args: + user_id (str) + alias (str) + + Returns: + boolean: True if user is allowed to crate the alias + """ + for rule in self._alias_creation_rules: + if rule.matches(user_id, alias): + return rule.action == "allowed" + + return False + + +class _AliasRule(object): + def __init__(self, rule): + action = rule["action"] + user_id = rule["user_id"] + alias = rule["alias"] + + if action in ("allowed", "denied"): + self.action = action + else: + raise ConfigError( + "alias_creation_rules rules can only have action of 'allowed'" + " or 'denied'" + ) + + try: + self._user_id_regex = glob_to_regex(user_id) + self._alias_regex = glob_to_regex(alias) + except Exception as e: + raise ConfigError("Failed to parse glob into regex: %s", e) + + def matches(self, user_id, alias): + """Tests if this rule matches the given user_id and alias. + + Args: + user_id (str) + alias (str) + + Returns: + boolean + """ + + if not self._user_id_regex.search(user_id): + return False + + if not self._alias_regex.search(alias): + return False + + return True diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 4efe95faa..d041c2682 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re import six from six import iteritems @@ -44,6 +43,7 @@ from synapse.replication.http.federation import ( ReplicationGetQueryRestServlet, ) from synapse.types import get_domain_from_id +from synapse.util import glob_to_regex from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.caches.response_cache import ResponseCache from synapse.util.logcontext import nested_logging_context @@ -729,22 +729,10 @@ def _acl_entry_matches(server_name, acl_entry): if not isinstance(acl_entry, six.string_types): logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)) return False - regex = _glob_to_regex(acl_entry) + regex = glob_to_regex(acl_entry) return regex.match(server_name) -def _glob_to_regex(glob): - res = '' - for c in glob: - if c == '*': - res = res + '.*' - elif c == '?': - res = res + '.' - else: - res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) - - class FederationHandlerRegistry(object): """Allows classes to register themselves as handlers for a given EDU or query type for incoming federation traffic. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 02f12f664..7d67bf803 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -43,6 +43,7 @@ class DirectoryHandler(BaseHandler): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.config = hs.config self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -111,6 +112,14 @@ class DirectoryHandler(BaseHandler): 403, "This user is not permitted to create this alias", ) + if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()): + # Lets just return a generic message, as there may be all sorts of + # reasons why we said no. TODO: Allow configurable error messages + # per alias creation rule? + raise SynapseError( + 403, "Not allowed to create alias", + ) + can_create = yield self.can_modify_alias( room_alias, user_id=user_id diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9a8fae049..163e4b35f 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +import re from itertools import islice import attr @@ -138,3 +139,23 @@ def log_failure(failure, msg, consumeErrors=True): if not consumeErrors: return failure + + +def glob_to_regex(glob): + """Converts a glob to a compiled regex object + + Args: + glob (str) + + Returns: + re.RegexObject + """ + res = '' + for c in glob: + if c == '*': + res = res + '.*' + elif c == '?': + res = res + '.' + else: + res = res + re.escape(c) + return re.compile(res + "\\Z", re.IGNORECASE) From f9d6c677eac35c926339a904b1f0c8c9dbd9049a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Oct 2018 16:36:39 +0100 Subject: [PATCH 15/85] Newsfile --- changelog.d/4051.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4051.feature diff --git a/changelog.d/4051.feature b/changelog.d/4051.feature new file mode 100644 index 000000000..9c1b3a72a --- /dev/null +++ b/changelog.d/4051.feature @@ -0,0 +1 @@ +Add config option to control alias creation From 9fafdfa97d87006177d13d4b80aeebfc4ded4bee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 14:21:09 +0100 Subject: [PATCH 16/85] Anchor returned regex to start and end of string --- synapse/util/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 163e4b35f..0ae7e2ef3 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -142,7 +142,9 @@ def log_failure(failure, msg, consumeErrors=True): def glob_to_regex(glob): - """Converts a glob to a compiled regex object + """Converts a glob to a compiled regex object. + + The regex is anchored at the beginning and end of the string. Args: glob (str) @@ -158,4 +160,6 @@ def glob_to_regex(glob): res = res + '.' else: res = res + re.escape(c) - return re.compile(res + "\\Z", re.IGNORECASE) + + # \A anchors at start of string, \Z at end of string + return re.compile(r"\A" + res + r"\Z", re.IGNORECASE) From 1b4bf232b9fd348a94b8bc4e9c851ed5b6d8e801 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:04:50 +0100 Subject: [PATCH 17/85] Add tests for config generation --- tests/config/test_room_directory.py | 67 +++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 tests/config/test_room_directory.py diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py new file mode 100644 index 000000000..75021a5f0 --- /dev/null +++ b/tests/config/test_room_directory.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from synapse.config.room_directory import RoomDirectoryConfig + +from tests import unittest + + +class RoomDirectoryConfigTestCase(unittest.TestCase): + def test_alias_creation_acl(self): + config = yaml.load(""" + alias_creation_rules: + - user_id: "*bob*" + alias: "*" + action: "denied" + - user_id: "*" + alias: "#unofficial_*" + action: "allowed" + - user_id: "@foo*:example.com" + alias: "*" + action: "allowed" + - user_id: "@gah:example.com" + alias: "#goo:example.com" + action: "allowed" + """) + + rd_config = RoomDirectoryConfig() + rd_config.read_config(config) + + self.assertFalse(rd_config.is_alias_creation_allowed( + user_id="@bob:example.com", + alias="#test:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@test:example.com", + alias="#unofficial_st:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@foobar:example.com", + alias="#test:example.com", + )) + + self.assertTrue(rd_config.is_alias_creation_allowed( + user_id="@gah:example.com", + alias="#goo:example.com", + )) + + self.assertFalse(rd_config.is_alias_creation_allowed( + user_id="@test:example.com", + alias="#test:example.com", + )) From 3c580c2b47fab5f85c76a7061065e11b7d0eaeb8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Oct 2018 16:14:41 +0100 Subject: [PATCH 18/85] Add tests for alias creation rules --- tests/handlers/test_directory.py | 48 ++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index ec7355688..4f299b74b 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -18,7 +18,9 @@ from mock import Mock from twisted.internet import defer +from synapse.config.room_directory import RoomDirectoryConfig from synapse.handlers.directory import DirectoryHandler +from synapse.rest.client.v1 import directory, room from synapse.types import RoomAlias from tests import unittest @@ -102,3 +104,49 @@ class DirectoryTestCase(unittest.TestCase): ) self.assertEquals({"room_id": "!8765asdf:test", "servers": ["test"]}, response) + + +class TestCreateAliasACL(unittest.HomeserverTestCase): + user_id = "@test:test" + + servlets = [directory.register_servlets, room.register_servlets] + + def prepare(self, hs, reactor, clock): + # We cheekily override the config to add custom alias creation rules + config = {} + config["alias_creation_rules"] = [ + { + "user_id": "*", + "alias": "#unofficial_*", + "action": "allowed", + } + ] + + rd_config = RoomDirectoryConfig() + rd_config.read_config(config) + + self.hs.config.is_alias_creation_allowed = rd_config.is_alias_creation_allowed + + return hs + + def test_denied(self): + room_id = self.helper.create_room_as(self.user_id) + + request, channel = self.make_request( + "PUT", + b"directory/room/%23test%3Atest", + ('{"room_id":"%s"}' % (room_id,)).encode('ascii'), + ) + self.render(request) + self.assertEquals(403, channel.code, channel.result) + + def test_allowed(self): + room_id = self.helper.create_room_as(self.user_id) + + request, channel = self.make_request( + "PUT", + b"directory/room/%23unofficial_test%3Atest", + ('{"room_id":"%s"}' % (room_id,)).encode('ascii'), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) From 47a9da28caa6a4f27d2df31f043971a2c9c7b555 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Oct 2018 20:43:18 +0100 Subject: [PATCH 19/85] Batch process handling state groups --- synapse/storage/events.py | 81 ++++++++---------------------- synapse/storage/state.py | 100 +++++++++++++++++++++++++------------- 2 files changed, 86 insertions(+), 95 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 0fb190530..e4d0f8b1a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -37,6 +37,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdateStore from synapse.storage.event_federation import EventFederationStore from synapse.storage.events_worker import EventsWorkerStore +from synapse.storage.state import StateGroupWorkerStore from synapse.types import RoomStreamToken, get_domain_from_id from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.descriptors import cached, cachedInlineCallbacks @@ -203,7 +204,8 @@ def _retry_on_integrity_error(func): # inherits from EventFederationStore so that we can call _update_backward_extremities # and _handle_mult_prev_events (though arguably those could both be moved in here) -class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore): +class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore, + BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" @@ -1995,70 +1997,29 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] finding redundant state groups") - # Get all state groups that are only referenced by events that are - # to be deleted. - # This works by first getting state groups that we may want to delete, - # joining against event_to_state_groups to get events that use that - # state group, then left joining against events_to_purge again. Any - # state group where the left join produce *no nulls* are referenced - # only by events that are going to be purged. + # Get all state groups that are referenced by events that are to be + # deleted. We then go and check if they are referenced by other events + # or state groups, and if not we delete them. txn.execute(""" - SELECT state_group FROM - ( - SELECT DISTINCT state_group FROM events_to_purge - INNER JOIN event_to_state_groups USING (event_id) - ) AS sp - INNER JOIN event_to_state_groups USING (state_group) - LEFT JOIN events_to_purge AS ep USING (event_id) - GROUP BY state_group - HAVING SUM(CASE WHEN ep.event_id IS NULL THEN 1 ELSE 0 END) = 0 + SELECT DISTINCT state_group FROM events_to_purge + INNER JOIN event_to_state_groups USING (event_id) """) - state_rows = txn.fetchall() - logger.info("[purge] found %i redundant state groups", len(state_rows)) + referenced_state_groups = set(sg for sg, in txn) + logger.info( + "[purge] found %i referenced state groups", + len(referenced_state_groups), + ) - # make a set of the redundant state groups, so that we can look them up - # efficiently - state_groups_to_delete = set([sg for sg, in state_rows]) + logger.info("[purge] finding state groups that can be deleted") - # Now we get all the state groups that rely on these state groups - logger.info("[purge] finding state groups which depend on redundant" - " state groups") - remaining_state_groups = [] - unreferenced_state_groups = 0 - for i in range(0, len(state_rows), 100): - chunk = [sg for sg, in state_rows[i:i + 100]] - # look for state groups whose prev_state_group is one we are about - # to delete - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=chunk, - retcols=["state_group"], - keyvalues={}, - ) - - for row in rows: - sg = row["state_group"] - - if sg in state_groups_to_delete: - # exclude state groups we are about to delete: no point in - # updating them - continue - - if not self._is_state_group_referenced(txn, sg): - # Let's also delete unreferenced state groups while we're - # here, since otherwise we'd need to de-delta them - state_groups_to_delete.add(sg) - unreferenced_state_groups += 1 - continue - - remaining_state_groups.append(sg) + state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( + txn, referenced_state_groups, + ) logger.info( - "[purge] found %i extra unreferenced state groups to delete", - unreferenced_state_groups, + "[purge] found %i state groups to delete", + len(state_groups_to_delete), ) logger.info( @@ -2109,11 +2070,11 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore logger.info("[purge] removing redundant state groups") txn.executemany( "DELETE FROM state_groups_state WHERE state_group = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) txn.executemany( "DELETE FROM state_groups WHERE id = ?", - state_rows + ((sg,) for sg in state_groups_to_delete), ) logger.info("[purge] removing events from event_to_state_groups") diff --git a/synapse/storage/state.py b/synapse/storage/state.py index f7cf5c86c..0f86311ed 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1041,55 +1041,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _is_state_group_referenced(self, txn, state_group): - """Checks if a given state group is referenced, or is safe to delete. + def _find_unreferenced_groups(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). - A state group is referenced if it or any of its descendants are - pointed at by an event. (A descendant is a state_group whose chain of - prev_groups includes the given state_group.) + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. + + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed """ + # Graph of state group -> previous group + graph = {} - # We check this by doing a depth first search to look for any - # descendant referenced by `event_to_state_groups`. + # Set of events that we have found to be referenced by events + referenced_groups = set() - # State groups we need to check, contains state groups that are - # descendants of `state_group` - state_groups_to_search = [state_group] + # Set of state groups we've already seen + state_groups_seen = set(state_groups) - # Set of state groups we've already checked - state_groups_searched = set() + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + lst = list(next_to_search) + current_search = set(lst[:100]) + next_to_search = set(lst[100:]) - while state_groups_to_search: - state_group = state_groups_to_search.pop() # Next state group to check + # Check if state groups are referenced + sql = """ + SELECT state_group, count(*) FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + GROUP BY state_group + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) - is_referenced = self._simple_select_one_onecol_txn( - txn, - table="event_to_state_groups", - keyvalues={"state_group": state_group}, - retcol="event_id", - allow_none=True, - ) - if is_referenced: - # A descendant is referenced by event_to_state_groups, so - # original state group is referenced. - return True + referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced_groups |= referenced - state_groups_searched.add(state_group) + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced - # Find all children of current state group and add to search - references = self._simple_select_onecol_txn( + rows = self._simple_select_many_txn( txn, table="state_group_edges", - keyvalues={"prev_state_group": state_group}, - retcol="state_group", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), ) - state_groups_to_search.extend(references) - # Lets be paranoid and check for cycles - if state_groups_searched.intersection(references): - raise Exception("State group %s has cyclic dependency", state_group) + next_to_search.update(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + next_to_search -= state_groups_seen + state_groups_seen |= next_to_search - return False + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups + + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) + + return to_delete, to_dedelta class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): From 67f7b9cb50c246226b94027e3c1d4b958ff9f840 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 16:06:59 +0100 Subject: [PATCH 20/85] pep8 --- synapse/storage/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index af822fb69..379b5c514 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2041,7 +2041,7 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore INNER JOIN event_to_state_groups USING (event_id) """) - referenced_state_groups = set(sg for sg, in txn) + referenced_state_groups = set(sg for sg, in txn) logger.info( "[purge] found %i referenced state groups", len(referenced_state_groups), From eba48c0f16d08d5806da73d0b51d62de6aaee118 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 19:58:28 +0000 Subject: [PATCH 21/85] Add Caddy example to README --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index e1ea351f8..de133f674 100644 --- a/README.rst +++ b/README.rst @@ -652,6 +652,7 @@ Using a reverse proxy with Synapse It is recommended to put a reverse proxy such as `nginx `_, `Apache `_ or +`Caddy `_ or `HAProxy `_ in front of Synapse. One advantage of doing so is that it means that you can expose the default https port (443) to Matrix clients without needing to run Synapse with root privileges. @@ -682,6 +683,11 @@ so an example nginx configuration might look like:: } } +an example caddy configuration may look like:: + proxy /_matrix http://localhost:8008 { + transparent + } + and an example apache configuration may look like:: From b85fe45f46abd7ff1c42851ab6dbb2f4ff33758f Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:55:38 +0000 Subject: [PATCH 22/85] Add CL --- changelog.d/4072.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4072.misc diff --git a/changelog.d/4072.misc b/changelog.d/4072.misc new file mode 100644 index 000000000..9d7279fd2 --- /dev/null +++ b/changelog.d/4072.misc @@ -0,0 +1 @@ +The README now contains example for the Caddy web server. Contributed by steamp0rt. From 08760b0d9ac7d1ecf48b6aaef662cf346da7368d Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:57:28 +0000 Subject: [PATCH 23/85] Fix formatting. --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index de133f674..bb99f4fe2 100644 --- a/README.rst +++ b/README.rst @@ -684,6 +684,7 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: + proxy /_matrix http://localhost:8008 { transparent } From 9c2f99a3b74ebd84c14314344bf7ff785b8ff1d1 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:59:14 +0000 Subject: [PATCH 24/85] Fix --- README.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index bb99f4fe2..b42a96a34 100644 --- a/README.rst +++ b/README.rst @@ -684,9 +684,10 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: - - proxy /_matrix http://localhost:8008 { - transparent + example.com { + proxy /_matrix http://localhost:8008 { + transparent + } } and an example apache configuration may look like:: From 3f357583ce17e0816acbc373bc2179293212ba4e Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 21:59:39 +0000 Subject: [PATCH 25/85] I HATE RST --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index b42a96a34..df9430f4c 100644 --- a/README.rst +++ b/README.rst @@ -684,6 +684,7 @@ so an example nginx configuration might look like:: } an example caddy configuration may look like:: + example.com { proxy /_matrix http://localhost:8008 { transparent From 5c3d6ea9c72125ad152823e73f52667e845e6a61 Mon Sep 17 00:00:00 2001 From: steamport Date: Fri, 19 Oct 2018 22:00:27 +0000 Subject: [PATCH 26/85] Whoops! --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index df9430f4c..209313ba3 100644 --- a/README.rst +++ b/README.rst @@ -685,7 +685,7 @@ so an example nginx configuration might look like:: an example caddy configuration may look like:: - example.com { + matrix.example.com { proxy /_matrix http://localhost:8008 { transparent } From 6340141300ea9370651b9dbcc2574fff96f45d48 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 22 Oct 2018 16:17:27 +0100 Subject: [PATCH 27/85] README.rst: fix minor grammar --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 209313ba3..9f27c14c4 100644 --- a/README.rst +++ b/README.rst @@ -651,7 +651,7 @@ Using a reverse proxy with Synapse It is recommended to put a reverse proxy such as `nginx `_, -`Apache `_ or +`Apache `_, `Caddy `_ or `HAProxy `_ in front of Synapse. One advantage of doing so is that it means that you can expose the default https port (443) to @@ -683,7 +683,7 @@ so an example nginx configuration might look like:: } } -an example caddy configuration may look like:: +an example Caddy configuration might look like:: matrix.example.com { proxy /_matrix http://localhost:8008 { @@ -691,7 +691,7 @@ an example caddy configuration may look like:: } } -and an example apache configuration may look like:: +and an example Apache configuration might look like:: SSLEngine on From 6105c6101fa0f4560daf7d23dedcfd217530b02a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:24:58 +0100 Subject: [PATCH 28/85] fix race condiftion in calling initialise_reserved_users --- changelog.d/4081.bugfix | 2 + synapse/app/homeserver.py | 8 ---- synapse/storage/monthly_active_users.py | 46 ++++++++++++++++------ synapse/storage/registration.py | 16 ++++++-- tests/storage/test_monthly_active_users.py | 10 ++++- 5 files changed, 55 insertions(+), 27 deletions(-) create mode 100644 changelog.d/4081.bugfix diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix new file mode 100644 index 000000000..f275acb61 --- /dev/null +++ b/changelog.d/4081.bugfix @@ -0,0 +1,2 @@ +Fix race condition in populating reserved users + diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0b85b377e..593e1e75d 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -553,14 +553,6 @@ def run(hs): generate_monthly_active_users, ) - # XXX is this really supposed to be a background process? it looks - # like it needs to complete before some of the other stuff runs. - run_as_background_process( - "initialise_reserved_users", - hs.get_datastore().initialise_reserved_users, - hs.config.mau_limits_reserved_threepids, - ) - start_generate_monthly_active_users() if hs.config.limit_usage_by_mau: clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 0fe8c8e24..26e577814 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,19 +33,28 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () + self.initialise_reserved_users( + dbconn.cursor(), hs.config.mau_limits_reserved_threepids + ) - @defer.inlineCallbacks - def initialise_reserved_users(self, threepids): - store = self.hs.get_datastore() + def initialise_reserved_users(self, txn, threepids): + """ + Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Arguments: + threepids []: List of threepid dicts to reserve + """ reserved_user_list = [] # Do not add more reserved users than the total allowable number for tp in threepids[:self.hs.config.max_mau_value]: - user_id = yield store.get_user_id_by_threepid( + user_id = self.get_user_id_by_threepid_txn( + txn, tp["medium"], tp["address"] ) if user_id: - yield self.upsert_monthly_active_user(user_id) + self.upsert_monthly_active_user_txn(txn, user_id) reserved_user_list.append(user_id) else: logger.warning( @@ -55,8 +64,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def reap_monthly_active_users(self): - """ - Cleans out monthly active user table to ensure that no stale + """Cleans out monthly active user table to ensure that no stale entries exist. Returns: @@ -165,19 +173,33 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): + """Updates or inserts monthly active user member + Arguments: + user_id (str): user to add/update + """ + is_insert = yield self.runInteraction( + "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, + user_id + ) + if is_insert: + self.user_last_seen_monthly_active.invalidate((user_id,)) + self.get_monthly_active_count.invalidate(()) + + def upsert_monthly_active_user_txn(self, txn, user_id): """ Updates or inserts monthly active user member Arguments: + txn (cursor): user_id (str): user to add/update - Deferred[bool]: True if a new entry was created, False if an + bool: True if a new entry was created, False if an existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple # upserts into a single txn) introduced a lot of extra complexity. # See https://github.com/matrix-org/synapse/issues/3854 for more - is_insert = yield self._simple_upsert( - desc="upsert_monthly_active_user", + is_insert = self._simple_upsert_txn( + txn, table="monthly_active_users", keyvalues={ "user_id": user_id, @@ -186,9 +208,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) - if is_insert: - self.user_last_seen_monthly_active.invalidate((user_id,)) - self.get_monthly_active_count.invalidate(()) + return is_insert @cached(num_args=1) def user_last_seen_monthly_active(self, user_id): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 26b429e30..01931f29c 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,17 +474,25 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): - ret = yield self._simple_select_one( + user_id = yield self.runInteraction( + "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, + medium, address + ) + defer.returnValue(user_id) + + def get_user_id_by_threepid_txn(self, txn, medium, address): + ret = self._simple_select_one_txn( + txn, "user_threepids", { "medium": medium, "address": address }, - ['user_id'], True, 'get_user_id_by_threepid' + ['user_id'], True ) if ret: - defer.returnValue(ret['user_id']) - defer.returnValue(None) + return ret['user_id'] + return None def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 686f12a0d..0c17745ae 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -52,7 +52,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): now = int(self.hs.get_clock().time_msec()) self.store.user_add_threepid(user1, "email", user1_email, now, now) self.store.user_add_threepid(user2, "email", user2_email, now, now) - self.store.initialise_reserved_users(threepids) + + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) self.pump() active_count = self.store.get_monthly_active_count() @@ -199,7 +202,10 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): {'medium': 'email', 'address': user2_email}, ] self.hs.config.mau_limits_reserved_threepids = threepids - self.store.initialise_reserved_users(threepids) + self.store.runInteraction( + "initialise", self.store.initialise_reserved_users, threepids + ) + self.pump() count = self.store.get_registered_reserved_users_count() self.assertEquals(self.get_success(count), 0) From 329d18b39cbc1bc9eba8ae9de1fcf734d0cf1a78 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 15:27:20 +0100 Subject: [PATCH 29/85] remove white space --- changelog.d/4081.bugfix | 1 - synapse/storage/monthly_active_users.py | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index f275acb61..13dad5884 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1,2 +1 @@ Fix race condition in populating reserved users - diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 26e577814..cf15f8c5b 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -38,8 +38,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): ) def initialise_reserved_users(self, txn, threepids): - """ - Ensures that reserved threepids are accounted for in the MAU table, should + """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. Arguments: From a67d8ace9bc8b5f5ab953fdcfd6ade077337782d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 23 Oct 2018 17:44:39 +0100 Subject: [PATCH 30/85] remove errant exception and style --- changelog.d/3975.feature | 2 +- synapse/config/registration.py | 2 +- synapse/handlers/register.py | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 79c2711fb..1e33f1b3b 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto-join rooms, should automatically create those rooms when first user registers +Servers with auto-join rooms, will now automatically create those rooms when the first user registers diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4b9bf6f2d..5df321b28 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -18,7 +18,7 @@ from distutils.util import strtobool from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from ._base import Config, ConfigError +from synapse.config._base import Config, ConfigError class RegistrationConfig(Config): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 1b5873c8d..9615dd552 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -231,15 +231,15 @@ class RegistrationHandler(BaseHandler): for r in self.hs.config.auto_join_rooms: try: if should_auto_create_rooms: - if self.hs.hostname != RoomAlias.from_string(r).domain: - logger.warn( + room_alias = RoomAlias.from_string(r) + if self.hs.hostname != room_alias.domain: + logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain' % (r,) + 'it does not match server domain', (r,) ) - raise SynapseError() else: # create room expects the localpart of the room alias - room_alias_localpart = RoomAlias.from_string(r).localpart + room_alias_localpart = room_alias.localpart yield self.room_creation_handler.create_room( fake_requester, config={ From 47a9ba435d9a4b9e311d9d9a3c02be105942f357 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Oct 2018 10:26:50 +0100 Subject: [PATCH 31/85] Use match rather than search --- synapse/config/room_directory.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 41ef3217e..2ca010afd 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -92,10 +92,11 @@ class _AliasRule(object): boolean """ - if not self._user_id_regex.search(user_id): + # Note: The regexes are anchored at both ends + if not self._user_id_regex.match(user_id): return False - if not self._alias_regex.search(alias): + if not self._alias_regex.match(alias): return False return True From 9f72c209eed4bec78133ca9821961eb9a3cf0dad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Oct 2018 14:37:36 +0100 Subject: [PATCH 32/85] Update changelog.d/3975.feature Co-Authored-By: neilisfragile --- changelog.d/3975.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/3975.feature b/changelog.d/3975.feature index 1e33f1b3b..162f30a53 100644 --- a/changelog.d/3975.feature +++ b/changelog.d/3975.feature @@ -1 +1 @@ -Servers with auto-join rooms, will now automatically create those rooms when the first user registers +Servers with auto-join rooms will now automatically create those rooms when the first user registers From 94a49e0636313507fab2e43b99f969385f535ea2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Oct 2018 14:39:23 +0100 Subject: [PATCH 33/85] fix tuple Co-Authored-By: neilisfragile --- synapse/handlers/register.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 9615dd552..217928add 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,8 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', (r,) + 'it does not match server domain', + r, ) else: # create room expects the localpart of the room alias From ab96ee29c96d10638d6ae69a80521aa8c4f19113 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 13:41:31 +0100 Subject: [PATCH 34/85] reduce git clone depth --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index 197dec2bc..5b6fc47ce 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,10 @@ language: python # tell travis to cache ~/.cache/pip cache: pip +# don't clone the whole repo history, one commit will do +git: + depth: 1 + # only build branches we care about (PRs are built seperately) branches: only: From 480d98c91f32da5127b695047b912b889d0b9dc2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 14:49:25 +0100 Subject: [PATCH 35/85] Disable newsfragment checks on branch builds --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5b6fc47ce..107b3ab70 100644 --- a/.travis.yml +++ b/.travis.yml @@ -53,7 +53,9 @@ matrix: - python: 3.6 env: TOX_ENV=check_isort - - python: 3.6 + - # we only need to check for the newsfragment if it's a PR build + if: type = pull_request + python: 3.6 env: TOX_ENV=check-newsfragment install: From 83d9ca71225239e2f4ba27dc79907e93d8573d1b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 13:48:03 +0100 Subject: [PATCH 36/85] only fetch develop for check-newsfragments --- .travis.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 107b3ab70..56e10dbdc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,10 +15,6 @@ branches: - develop - /^release-v/ -before_script: - - git remote set-branches --add origin develop - - git fetch origin develop - matrix: fast_finish: true include: @@ -57,6 +53,10 @@ matrix: if: type = pull_request python: 3.6 env: TOX_ENV=check-newsfragment + script: + - git remote set-branches --add origin develop + - git fetch origin develop + - tox -e $TOX_ENV install: - pip install tox From 9532caf6ef3cdc6dba80b11a920410b50d3490dd Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:08:25 +0100 Subject: [PATCH 37/85] remove trailing whiter space --- synapse/handlers/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 217928add..e9d7b25a3 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -235,7 +235,7 @@ class RegistrationHandler(BaseHandler): if self.hs.hostname != room_alias.domain: logger.warning( 'Cannot create room alias %s, ' - 'it does not match server domain', + 'it does not match server domain', r, ) else: From 9ec218658650558a704e939e56f87d1df1a84423 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 16:09:21 +0100 Subject: [PATCH 38/85] isort --- synapse/config/registration.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 5df321b28..7480ed514 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -15,11 +15,10 @@ from distutils.util import strtobool +from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols -from synapse.config._base import Config, ConfigError - class RegistrationConfig(Config): From 663d9db8e7892b79a927de70ec9add64312827d5 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:17:30 +0100 Subject: [PATCH 39/85] commit transaction before closing --- synapse/server.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/server.py b/synapse/server.py index 3e9d3d825..cf6b872cb 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -207,6 +207,7 @@ class HomeServer(object): logger.info("Setting up.") with self.get_db_conn() as conn: self.datastore = self.DATASTORE_CLASS(conn, self) + conn.commit() logger.info("Finished setting up.") def get_reactor(self): From ea69a84bbb2fc9c1da6db0384a98f577f3bc95a7 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 24 Oct 2018 17:18:08 +0100 Subject: [PATCH 40/85] fix style inconsistencies --- changelog.d/4081.bugfix | 3 +- synapse/storage/monthly_active_users.py | 43 ++++++++++++++-------- synapse/storage/registration.py | 19 ++++++++++ tests/storage/test_monthly_active_users.py | 4 +- 4 files changed, 51 insertions(+), 18 deletions(-) diff --git a/changelog.d/4081.bugfix b/changelog.d/4081.bugfix index 13dad5884..cfe4b3e9d 100644 --- a/changelog.d/4081.bugfix +++ b/changelog.d/4081.bugfix @@ -1 +1,2 @@ -Fix race condition in populating reserved users +Fix race condition where config defined reserved users were not being added to +the monthly active user list prior to the homeserver reactor firing up diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index cf15f8c5b..9a5c3b7ed 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -33,21 +33,23 @@ class MonthlyActiveUsersStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs self.reserved_users = () - self.initialise_reserved_users( - dbconn.cursor(), hs.config.mau_limits_reserved_threepids + # Do not add more reserved users than the total allowable number + self._initialise_reserved_users( + dbconn.cursor(), + hs.config.mau_limits_reserved_threepids[:self.hs.config.max_mau_value], ) - def initialise_reserved_users(self, txn, threepids): + def _initialise_reserved_users(self, txn, threepids): """Ensures that reserved threepids are accounted for in the MAU table, should be called on start up. - Arguments: - threepids []: List of threepid dicts to reserve + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve """ reserved_user_list = [] - # Do not add more reserved users than the total allowable number - for tp in threepids[:self.hs.config.max_mau_value]: + for tp in threepids: user_id = self.get_user_id_by_threepid_txn( txn, tp["medium"], tp["address"] @@ -172,26 +174,36 @@ class MonthlyActiveUsersStore(SQLBaseStore): @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): - """Updates or inserts monthly active user member - Arguments: + """Updates or inserts the user into the monthly active user table, which + is used to track the current MAU usage of the server + + Args: user_id (str): user to add/update """ is_insert = yield self.runInteraction( "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) + # Considered pushing cache invalidation down into txn method, but + # did not because txn is not a LoggingTransaction. This means I could not + # call txn.call_after(). Therefore cache is altered in background thread + # and calls from elsewhere to user_last_seen_monthly_active and + # get_monthly_active_count fail with ValueError in + # synapse/util/caches/descriptors.py#check_thread if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): - """ - Updates or inserts monthly active user member - Arguments: - txn (cursor): - user_id (str): user to add/update + """Updates or inserts monthly active user member + + Args: + txn (cursor): + user_id (str): user to add/update + + Returns: bool: True if a new entry was created, False if an - existing one was updated. + existing one was updated. """ # Am consciously deciding to lock the table on the basis that is ought # never be a big table and alternative approaches (batching multiple @@ -207,6 +219,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): "timestamp": int(self._clock.time_msec()), }, ) + return is_insert @cached(num_args=1) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 0f970850e..80d76bf9d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -474,6 +474,15 @@ class RegistrationStore(RegistrationWorkerStore, @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address): + """Returns user id from threepid + + Args: + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + Deferred[str|None]: user id or None if no user id/threepid mapping exists + """ user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address @@ -481,6 +490,16 @@ class RegistrationStore(RegistrationWorkerStore, defer.returnValue(user_id) def get_user_id_by_threepid_txn(self, txn, medium, address): + """Returns user id from threepid + + Args: + txn (cursor): + medium (str): threepid medium e.g. email + address (str): threepid address e.g. me@example.com + + Returns: + str|None: user id or None if no user id/threepid mapping exists + """ ret = self._simple_select_one_txn( txn, "user_threepids", diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0c17745ae..832e379a8 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -54,7 +54,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): self.store.user_add_threepid(user2, "email", user2_email, now, now) self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() @@ -203,7 +203,7 @@ class MonthlyActiveUsersTestCase(HomeserverTestCase): ] self.hs.config.mau_limits_reserved_threepids = threepids self.store.runInteraction( - "initialise", self.store.initialise_reserved_users, threepids + "initialise", self.store._initialise_reserved_users, threepids ) self.pump() From 77d3b5772fc53fdc4461ebb3e6bd2c6c8f7e78bf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 21:59:26 +0100 Subject: [PATCH 41/85] disable coverage checking I don't think we ever use this, and it slows things down. If we want to use it, we should just do so on a couple of builds rather than all of them. --- tox.ini | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 04d2f721b..9de5a5704 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,6 @@ envlist = packaging, py27, py36, pep8, check_isort [base] deps = - coverage Twisted>=17.1 mock python-subunit @@ -26,9 +25,7 @@ passenv = * commands = /usr/bin/find "{toxinidir}" -name '*.pyc' -delete - coverage run {env:COVERAGE_OPTS:} --source="{toxinidir}/synapse" \ - "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} - {env:DUMP_COVERAGE_COMMAND:coverage report -m} + "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} [testenv:py27] From fc33e813231a9a78012080193a4a2a96951515da Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 00:44:55 +0100 Subject: [PATCH 42/85] Combine the pep8 and check_isort builds into one there's really no point spinning up two separate jobs for these. --- .travis.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 56e10dbdc..64079b56f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ matrix: env: TOX_ENV=packaging - python: 3.6 - env: TOX_ENV=pep8 + env: TOX_ENV="pep8, check_isort" - python: 2.7 env: TOX_ENV=py27 @@ -46,9 +46,6 @@ matrix: services: - postgresql - - python: 3.6 - env: TOX_ENV=check_isort - - # we only need to check for the newsfragment if it's a PR build if: type = pull_request python: 3.6 From 46f98a6a297ff014e7e88fb056e59755fd18cf58 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 01:00:58 +0100 Subject: [PATCH 43/85] Only cache the wheels --- .travis.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 64079b56f..ac34c3672 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,16 @@ sudo: false language: python -# tell travis to cache ~/.cache/pip -cache: pip +cache: + directories: + # we only bother to cache the wheels; parts of the http cache get + # invalidated every build (because they get served with a max-age of 600 + # seconds), which means that we end up re-uploading the whole cache for + # every build, which is time-consuming In any case, it's not obvious that + # downloading the cache from S3 would be much faster than downloading the + # originals from pypi. + # + - $HOME/.cache/pip/wheels # don't clone the whole repo history, one commit will do git: From edd2d828095c5e9352d2755af19c0be5e4dc9e0d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 01:06:39 +0100 Subject: [PATCH 44/85] oops, run the check_isort build --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ac34c3672..fd41841c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,7 +30,7 @@ matrix: env: TOX_ENV=packaging - python: 3.6 - env: TOX_ENV="pep8, check_isort" + env: TOX_ENV="pep8,check_isort" - python: 2.7 env: TOX_ENV=py27 From f8fe98812be74e37432f11508c07488079bf949d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 14:58:59 +0100 Subject: [PATCH 45/85] improve comments --- synapse/storage/monthly_active_users.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 9a5c3b7ed..01963c879 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -184,18 +184,18 @@ class MonthlyActiveUsersStore(SQLBaseStore): "upsert_monthly_active_user", self.upsert_monthly_active_user_txn, user_id ) - # Considered pushing cache invalidation down into txn method, but - # did not because txn is not a LoggingTransaction. This means I could not - # call txn.call_after(). Therefore cache is altered in background thread - # and calls from elsewhere to user_last_seen_monthly_active and - # get_monthly_active_count fail with ValueError in - # synapse/util/caches/descriptors.py#check_thread + if is_insert: self.user_last_seen_monthly_active.invalidate((user_id,)) self.get_monthly_active_count.invalidate(()) def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary + to invalidate the caches on user_last_seen_monthly_active and + get_monthly_active_count. We can't do that here, because we are running + in a database thread rather than the main thread, and we can't call + txn.call_after because txn may not be a LoggingTransaction. Args: txn (cursor): From e5481b22aac800b016e05f50a50ced85a226b364 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 15:25:21 +0100 Subject: [PATCH 46/85] Use allow/deny --- synapse/config/room_directory.py | 12 ++++++------ tests/config/test_room_directory.py | 8 ++++---- tests/handlers/test_directory.py | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 2ca010afd..9da13ab11 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -35,13 +35,13 @@ class RoomDirectoryConfig(Config): # The format of this option is a list of rules that contain globs that # match against user_id and the new alias (fully qualified with server # name). The action in the first rule that matches is taken, which can - # currently either be "allowed" or "denied". + # currently either be "allow" or "deny". # # If no rules match the request is denied. alias_creation_rules: - user_id: "*" alias: "*" - action: allowed + action: allow """ def is_alias_creation_allowed(self, user_id, alias): @@ -56,7 +56,7 @@ class RoomDirectoryConfig(Config): """ for rule in self._alias_creation_rules: if rule.matches(user_id, alias): - return rule.action == "allowed" + return rule.action == "allow" return False @@ -67,12 +67,12 @@ class _AliasRule(object): user_id = rule["user_id"] alias = rule["alias"] - if action in ("allowed", "denied"): + if action in ("allow", "deny"): self.action = action else: raise ConfigError( - "alias_creation_rules rules can only have action of 'allowed'" - " or 'denied'" + "alias_creation_rules rules can only have action of 'allow'" + " or 'deny'" ) try: diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 75021a5f0..f37a17d61 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -26,16 +26,16 @@ class RoomDirectoryConfigTestCase(unittest.TestCase): alias_creation_rules: - user_id: "*bob*" alias: "*" - action: "denied" + action: "deny" - user_id: "*" alias: "#unofficial_*" - action: "allowed" + action: "allow" - user_id: "@foo*:example.com" alias: "*" - action: "allowed" + action: "allow" - user_id: "@gah:example.com" alias: "#goo:example.com" - action: "allowed" + action: "allow" """) rd_config = RoomDirectoryConfig() diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 4f299b74b..8ae6556c0 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -118,7 +118,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): { "user_id": "*", "alias": "#unofficial_*", - "action": "allowed", + "action": "allow", } ] From fcbd488e9a60a70a81cd7a94a6adebe6e06c525a Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 25 Oct 2018 16:13:43 +0100 Subject: [PATCH 47/85] add new line --- synapse/storage/monthly_active_users.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 01963c879..cf4104dc2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -191,6 +191,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): def upsert_monthly_active_user_txn(self, txn, user_id): """Updates or inserts monthly active user member + Note that, after calling this method, it will generally be necessary to invalidate the caches on user_last_seen_monthly_active and get_monthly_active_count. We can't do that here, because we are running From cb53ce9d6429252d5ee012f5a476cc834251c27d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Oct 2018 17:49:55 +0100 Subject: [PATCH 48/85] Refactor state group lookup to reduce DB hits (#4011) Currently when fetching state groups from the data store we make two hits two the database: once for members and once for non-members (unless request is filtered to one or the other). This adds needless load to the datbase, so this PR refactors the lookup to make only a single database hit. --- changelog.d/4011.misc | 1 + synapse/handlers/initial_sync.py | 4 +- synapse/handlers/message.py | 20 +- synapse/handlers/pagination.py | 15 +- synapse/handlers/room.py | 22 +- synapse/handlers/sync.py | 97 ++-- synapse/rest/client/v1/room.py | 3 +- synapse/storage/events.py | 2 +- synapse/storage/state.py | 851 +++++++++++++++++++------------ synapse/visibility.py | 15 +- tests/storage/test_state.py | 175 ++++--- 11 files changed, 717 insertions(+), 488 deletions(-) create mode 100644 changelog.d/4011.misc diff --git a/changelog.d/4011.misc b/changelog.d/4011.misc new file mode 100644 index 000000000..ad7768c4c --- /dev/null +++ b/changelog.d/4011.misc @@ -0,0 +1 @@ +Reduce database load when fetching state groups diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index e00939520..563bb3cea 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -156,7 +156,7 @@ class InitialSyncHandler(BaseHandler): room_end_token = "s%d" % (event.stream_ordering,) deferred_room_state = run_in_background( self.store.get_state_for_events, - [event.event_id], None, + [event.event_id], ) deferred_room_state.addCallback( lambda states: states[event.event_id] @@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler): def _room_initial_sync_parted(self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking): room_state = yield self.store.get_state_for_events( - [member_event_id], None + [member_event_id], ) room_state = room_state[member_event_id] diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6c4fcfb10..969e588e7 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -35,6 +35,7 @@ from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.replication.http.send_event import ReplicationSendEventRestServlet +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, UserID from synapse.util.async_helpers import Linearizer from synapse.util.frozenutils import frozendict_json_encoder @@ -80,7 +81,7 @@ class MessageHandler(object): elif membership == Membership.LEAVE: key = (event_type, state_key) room_state = yield self.store.get_state_for_events( - [membership_event_id], [key] + [membership_event_id], StateFilter.from_types([key]) ) data = room_state[membership_event_id].get(key) @@ -88,7 +89,7 @@ class MessageHandler(object): @defer.inlineCallbacks def get_state_events( - self, user_id, room_id, types=None, filtered_types=None, + self, user_id, room_id, state_filter=StateFilter.all(), at_token=None, is_guest=False, ): """Retrieve all state events for a given room. If the user is @@ -100,13 +101,8 @@ class MessageHandler(object): Args: user_id(str): The user requesting state events. room_id(str): The room ID to get all state events from. - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. at_token(StreamToken|None): the stream token of the at which we are requesting the stats. If the user is not allowed to view the state as of that stream token, we raise a 403 SynapseError. If None, returns the current @@ -139,7 +135,7 @@ class MessageHandler(object): event = last_events[0] if visible_events: room_state = yield self.store.get_state_for_events( - [event.event_id], types, filtered_types=filtered_types, + [event.event_id], state_filter=state_filter, ) room_state = room_state[event.event_id] else: @@ -158,12 +154,12 @@ class MessageHandler(object): if membership == Membership.JOIN: state_ids = yield self.store.get_filtered_current_state_ids( - room_id, types, filtered_types=filtered_types, + room_id, state_filter=state_filter, ) room_state = yield self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: room_state = yield self.store.get_state_for_events( - [membership_event_id], types, filtered_types=filtered_types, + [membership_event_id], state_filter=state_filter, ) room_state = room_state[membership_event_id] diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a155b6e93..43f81bd60 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -21,6 +21,7 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock from synapse.util.logcontext import run_in_background @@ -255,16 +256,14 @@ class PaginationHandler(object): if event_filter and event_filter.lazy_load_members(): # TODO: remove redundant members - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in events - ) - ] + # FIXME: we also care about invite targets etc. + state_filter = StateFilter.from_types( + (EventTypes.Member, event.sender) + for event in events + ) state_ids = yield self.store.get_state_ids_for_event( - events[0].event_id, types=types, + events[0].event_id, state_filter=state_filter, ) if state_ids: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab1571b27..3ba92bdb4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -33,6 +33,7 @@ from synapse.api.constants import ( RoomCreationPreset, ) from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils from synapse.visibility import filter_events_for_client @@ -489,23 +490,24 @@ class RoomContextHandler(object): else: last_event_id = event_id - types = None - filtered_types = None if event_filter and event_filter.lazy_load_members(): - members = set(ev.sender for ev in itertools.chain( - results["events_before"], - (results["event"],), - results["events_after"], - )) - filtered_types = [EventTypes.Member] - types = [(EventTypes.Member, member) for member in members] + state_filter = StateFilter.from_lazy_load_member_list( + ev.sender + for ev in itertools.chain( + results["events_before"], + (results["event"],), + results["events_after"], + ) + ) + else: + state_filter = StateFilter.all() # XXX: why do we return the state as of the last event rather than the # first? Shouldn't we be consistent with /sync? # https://github.com/matrix-org/matrix-doc/issues/687 state = yield self.store.get_state_for_events( - [last_event_id], types, filtered_types=filtered_types, + [last_event_id], state_filter=state_filter, ) results["state"] = list(state[last_event_id].values()) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 351892a94..09739f286 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -27,6 +27,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.push.clientformat import format_push_rules_for_user from synapse.storage.roommember import MemberSummary +from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache @@ -469,25 +470,20 @@ class SyncHandler(object): )) @defer.inlineCallbacks - def get_state_after_event(self, event, types=None, filtered_types=None): + def get_state_after_event(self, event, state_filter=StateFilter.all()): """ Get the room state after the given event Args: event(synapse.events.EventBase): event of interest - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) """ state_ids = yield self.store.get_state_ids_for_event( - event.event_id, types, filtered_types=filtered_types, + event.event_id, state_filter=state_filter, ) if event.is_state(): state_ids = state_ids.copy() @@ -495,18 +491,14 @@ class SyncHandler(object): defer.returnValue(state_ids) @defer.inlineCallbacks - def get_state_at(self, room_id, stream_position, types=None, filtered_types=None): + def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): """ Get the room state at a particular stream position Args: room_id(str): room for which to get state stream_position(StreamToken): point at which to get state - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A Deferred map from ((type, state_key)->Event) @@ -522,7 +514,7 @@ class SyncHandler(object): if last_events: last_event = last_events[-1] state = yield self.get_state_after_event( - last_event, types, filtered_types=filtered_types, + last_event, state_filter=state_filter, ) else: @@ -563,10 +555,11 @@ class SyncHandler(object): last_event = last_events[-1] state_ids = yield self.store.get_state_ids_for_event( - last_event.event_id, [ + last_event.event_id, + state_filter=StateFilter.from_types([ (EventTypes.Name, ''), (EventTypes.CanonicalAlias, ''), - ] + ]), ) # this is heavily cached, thus: fast. @@ -717,8 +710,7 @@ class SyncHandler(object): with Measure(self.clock, "compute_state_delta"): - types = None - filtered_types = None + members_to_fetch = None lazy_load_members = sync_config.filter_collection.lazy_load_members() include_redundant_members = ( @@ -729,16 +721,21 @@ class SyncHandler(object): # We only request state for the members needed to display the # timeline: - types = [ - (EventTypes.Member, state_key) - for state_key in set( - event.sender # FIXME: we also care about invite targets etc. - for event in batch.events - ) - ] + members_to_fetch = set( + event.sender # FIXME: we also care about invite targets etc. + for event in batch.events + ) - # only apply the filtering to room members - filtered_types = [EventTypes.Member] + if full_state: + # always make sure we LL ourselves so we know we're in the room + # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 + # We only need apply this on full state syncs given we disabled + # LL for incr syncs in #3840. + members_to_fetch.add(sync_config.user.to_string()) + + state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) + else: + state_filter = StateFilter.all() timeline_state = { (event.type, event.state_key): event.event_id @@ -746,28 +743,19 @@ class SyncHandler(object): } if full_state: - if lazy_load_members: - # always make sure we LL ourselves so we know we're in the room - # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 - # We only need apply this on full state syncs given we disabled - # LL for incr syncs in #3840. - types.append((EventTypes.Member, sync_config.user.to_string())) - if batch: current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) else: current_state_ids = yield self.get_state_at( - room_id, stream_position=now_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=now_token, + state_filter=state_filter, ) state_ids = current_state_ids @@ -781,8 +769,7 @@ class SyncHandler(object): ) elif batch.limited: state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=filtered_types, + batch.events[0].event_id, state_filter=state_filter, ) # for now, we disable LL for gappy syncs - see @@ -797,17 +784,15 @@ class SyncHandler(object): # members to just be ones which were timeline senders, which then ensures # all of the rest get included in the state block (if we need to know # about them). - types = None - filtered_types = None + state_filter = StateFilter.all() state_at_previous_sync = yield self.get_state_at( - room_id, stream_position=since_token, types=types, - filtered_types=filtered_types, + room_id, stream_position=since_token, + state_filter=state_filter, ) current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, types=types, - filtered_types=filtered_types, + batch.events[-1].event_id, state_filter=state_filter, ) state_ids = _calculate_state( @@ -821,7 +806,7 @@ class SyncHandler(object): else: state_ids = {} if lazy_load_members: - if types and batch.events: + if members_to_fetch and batch.events: # We're returning an incremental sync, with no # "gap" since the previous sync, so normally there would be # no state to return. @@ -831,8 +816,12 @@ class SyncHandler(object): # timeline here, and then dedupe any redundant ones below. state_ids = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, types=types, - filtered_types=None, # we only want members! + batch.events[0].event_id, + # we only want members! + state_filter=StateFilter.from_types( + (EventTypes.Member, member) + for member in members_to_fetch + ), ) if lazy_load_members and not include_redundant_members: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 663934efd..fcfe7857f 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -33,6 +33,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID @@ -409,7 +410,7 @@ class RoomMemberListRestServlet(ClientV1RestServlet): room_id=room_id, user_id=requester.user.to_string(), at_token=at_token, - types=[(EventTypes.Member, None)], + state_filter=StateFilter.from_types([(EventTypes.Member, None)]), ) chunk = [] diff --git a/synapse/storage/events.py b/synapse/storage/events.py index c780f5527..8881b009d 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2089,7 +2089,7 @@ class EventsStore(EventFederationStore, EventsWorkerStore, BackgroundUpdateStore for sg in remaining_state_groups: logger.info("[purge] de-delta-ing remaining state group %s", sg) curr_state = self._get_state_groups_from_groups_txn( - txn, [sg], types=None + txn, [sg], ) curr_state = curr_state[sg] diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 3f4cbd61c..ef65929bb 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -19,6 +19,8 @@ from collections import namedtuple from six import iteritems, itervalues from six.moves import range +import attr + from twisted.internet import defer from synapse.api.constants import EventTypes @@ -48,6 +50,318 @@ class _GetStateGroupDelta(namedtuple("_GetStateGroupDelta", ("prev_group", "delt return len(self.delta_ids) if self.delta_ids else 0 +@attr.s(slots=True) +class StateFilter(object): + """A filter used when querying for state. + + Attributes: + types (dict[str, set[str]|None]): Map from type to set of state keys (or + None). This specifies which state_keys for the given type to fetch + from the DB. If None then all events with that type are fetched. If + the set is empty then no events with that type are fetched. + include_others (bool): Whether to fetch events with types that do not + appear in `types`. + """ + + types = attr.ib() + include_others = attr.ib(default=False) + + def __attrs_post_init__(self): + # If `include_others` is set we canonicalise the filter by removing + # wildcards from the types dictionary + if self.include_others: + self.types = { + k: v for k, v in iteritems(self.types) + if v is not None + } + + @staticmethod + def all(): + """Creates a filter that fetches everything. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=True) + + @staticmethod + def none(): + """Creates a filter that fetches nothing. + + Returns: + StateFilter + """ + return StateFilter(types={}, include_others=False) + + @staticmethod + def from_types(types): + """Creates a filter that only fetches the given types + + Args: + types (Iterable[tuple[str, str|None]]): A list of type and state + keys to fetch. A state_key of None fetches everything for + that type + + Returns: + StateFilter + """ + type_dict = {} + for typ, s in types: + if typ in type_dict: + if type_dict[typ] is None: + continue + + if s is None: + type_dict[typ] = None + continue + + type_dict.setdefault(typ, set()).add(s) + + return StateFilter(types=type_dict) + + @staticmethod + def from_lazy_load_member_list(members): + """Creates a filter that returns all non-member events, plus the member + events for the given users + + Args: + members (iterable[str]): Set of user IDs + + Returns: + StateFilter + """ + return StateFilter( + types={EventTypes.Member: set(members)}, + include_others=True, + ) + + def return_expanded(self): + """Creates a new StateFilter where type wild cards have been removed + (except for memberships). The returned filter is a superset of the + current one, i.e. anything that passes the current filter will pass + the returned filter. + + This helps the caching as the DictionaryCache knows if it has *all* the + state, but does not know if it has all of the keys of a particular type, + which makes wildcard lookups expensive unless we have a complete cache. + Hence, if we are doing a wildcard lookup, populate the cache fully so + that we can do an efficient lookup next time. + + Note that since we have two caches, one for membership events and one for + other events, we can be a bit more clever than simply returning + `StateFilter.all()` if `has_wildcards()` is True. + + We return a StateFilter where: + 1. the list of membership events to return is the same + 2. if there is a wildcard that matches non-member events we + return all non-member events + + Returns: + StateFilter + """ + + if self.is_full(): + # If we're going to return everything then there's nothing to do + return self + + if not self.has_wildcards(): + # If there are no wild cards, there's nothing to do + return self + + if EventTypes.Member in self.types: + get_all_members = self.types[EventTypes.Member] is None + else: + get_all_members = self.include_others + + has_non_member_wildcard = self.include_others or any( + state_keys is None + for t, state_keys in iteritems(self.types) + if t != EventTypes.Member + ) + + if not has_non_member_wildcard: + # If there are no non-member wild cards we can just return ourselves + return self + + if get_all_members: + # We want to return everything. + return StateFilter.all() + else: + # We want to return all non-members, but only particular + # memberships + return StateFilter( + types={EventTypes.Member: self.types[EventTypes.Member]}, + include_others=True, + ) + + def make_sql_filter_clause(self): + """Converts the filter to an SQL clause. + + For example: + + f = StateFilter.from_types([("m.room.create", "")]) + clause, args = f.make_sql_filter_clause() + clause == "(type = ? AND state_key = ?)" + args == ['m.room.create', ''] + + + Returns: + tuple[str, list]: The SQL string (may be empty) and arguments. An + empty SQL string is returned when the filter matches everything + (i.e. is "full"). + """ + + where_clause = "" + where_args = [] + + if self.is_full(): + return where_clause, where_args + + if not self.include_others and not self.types: + # i.e. this is an empty filter, so we need to return a clause that + # will match nothing + return "1 = 2", [] + + # First we build up a lost of clauses for each type/state_key combo + clauses = [] + for etype, state_keys in iteritems(self.types): + if state_keys is None: + clauses.append("(type = ?)") + where_args.append(etype) + continue + + for state_key in state_keys: + clauses.append("(type = ? AND state_key = ?)") + where_args.extend((etype, state_key)) + + # This will match anything that appears in `self.types` + where_clause = " OR ".join(clauses) + + # If we want to include stuff that's not in the types dict then we add + # a `OR type NOT IN (...)` clause to the end. + if self.include_others: + if where_clause: + where_clause += " OR " + + where_clause += "type NOT IN (%s)" % ( + ",".join(["?"] * len(self.types)), + ) + where_args.extend(self.types) + + return where_clause, where_args + + def max_entries_returned(self): + """Returns the maximum number of entries this filter will return if + known, otherwise returns None. + + For example a simple state filter asking for `("m.room.create", "")` + will return 1, whereas the default state filter will return None. + + This is used to bail out early if the right number of entries have been + fetched. + """ + if self.has_wildcards(): + return None + + return len(self.concrete_types()) + + def filter_state(self, state_dict): + """Returns the state filtered with by this StateFilter + + Args: + state (dict[tuple[str, str], Any]): The state map to filter + + Returns: + dict[tuple[str, str], Any]: The filtered state map + """ + if self.is_full(): + return dict(state_dict) + + filtered_state = {} + for k, v in iteritems(state_dict): + typ, state_key = k + if typ in self.types: + state_keys = self.types[typ] + if state_keys is None or state_key in state_keys: + filtered_state[k] = v + elif self.include_others: + filtered_state[k] = v + + return filtered_state + + def is_full(self): + """Whether this filter fetches everything or not + + Returns: + bool + """ + return self.include_others and not self.types + + def has_wildcards(self): + """Whether the filter includes wildcards or is attempting to fetch + specific state. + + Returns: + bool + """ + + return ( + self.include_others + or any( + state_keys is None + for state_keys in itervalues(self.types) + ) + ) + + def concrete_types(self): + """Returns a list of concrete type/state_keys (i.e. not None) that + will be fetched. This will be a complete list if `has_wildcards` + returns False, but otherwise will be a subset (or even empty). + + Returns: + list[tuple[str,str]] + """ + return [ + (t, s) + for t, state_keys in iteritems(self.types) + if state_keys is not None + for s in state_keys + ] + + def get_member_split(self): + """Return the filter split into two: one which assumes it's exclusively + matching against member state, and one which assumes it's matching + against non member state. + + This is useful due to the returned filters giving correct results for + `is_full()`, `has_wildcards()`, etc, when operating against maps that + either exclusively contain member events or only contain non-member + events. (Which is the case when dealing with the member vs non-member + state caches). + + Returns: + tuple[StateFilter, StateFilter]: The member and non member filters + """ + + if EventTypes.Member in self.types: + state_keys = self.types[EventTypes.Member] + if state_keys is None: + member_filter = StateFilter.all() + else: + member_filter = StateFilter({EventTypes.Member: state_keys}) + elif self.include_others: + member_filter = StateFilter.all() + else: + member_filter = StateFilter.none() + + non_member_filter = StateFilter( + types={k: v for k, v in iteritems(self.types) if k != EventTypes.Member}, + include_others=self.include_others, + ) + + return member_filter, non_member_filter + + # this inherits from EventsWorkerStore because it calls self.get_events class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """The parts of StateGroupStore that can be called from workers. @@ -152,61 +466,41 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # FIXME: how should this be cached? - def get_filtered_current_state_ids(self, room_id, types, filtered_types=None): + def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all()): """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result of doing a fresh state resolution as per state_handler.get_current_state + Args: room_id (str) - types (list[(Str, (Str|None))]): List of (type, state_key) tuples - which are used to filter the state fetched. `state_key` may be - None, which matches any `state_key` - filtered_types (list[Str]|None): List of types to apply the above filter to. - Returns: - deferred: dict of (type, state_key) -> event - """ + state_filter (StateFilter): The state filter used to fetch state + from the database. - include_other_types = False if filtered_types is None else True + Returns: + Deferred[dict[tuple[str, str], str]]: Map from type/state_key to + event ID. + """ def _get_filtered_current_state_ids_txn(txn): results = {} - sql = """SELECT type, state_key, event_id FROM current_state_events - WHERE room_id = ? %s""" - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] + sql = """ + SELECT type, state_key, event_id FROM current_state_events + WHERE room_id = ? + """ + + where_clause, where_args = state_filter.make_sql_filter_clause() + + if where_clause: + sql += " AND (%s)" % (where_clause,) + + args = [room_id] + args.extend(where_args) + txn.execute(sql, args) + for row in txn: + typ, state_key, event_id = row + key = (intern_string(typ), intern_string(state_key)) + results[key] = event_id - if include_other_types: - unique_types = set(filtered_types) - clause_to_args.append( - ( - "AND type <> ? " * len(unique_types), - list(unique_types) - ) - ) - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - for where_clause, where_args in clause_to_args: - args = [room_id] - args.extend(where_args) - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (intern_string(typ), intern_string(state_key)) - results[key] = event_id return results return self.runInteraction( @@ -322,20 +616,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): }) @defer.inlineCallbacks - def _get_state_groups_from_groups(self, groups, types, members=None): + def _get_state_groups_from_groups(self, groups, state_filter): """Returns the state groups for a given set of groups, filtering on types of state events. Args: groups(list[int]): list of state group IDs to query - types (Iterable[str, str|None]|None): list of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. If None, all types are returned. - members (bool|None): If not None, then, in addition to any filtering - implied by types, the results are also filtered to only include - member events (if True), or to exclude member events (if False) - - Returns: + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) @@ -346,19 +634,23 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for chunk in chunks: res = yield self.runInteraction( "_get_state_groups_from_groups", - self._get_state_groups_from_groups_txn, chunk, types, members, + self._get_state_groups_from_groups_txn, chunk, state_filter, ) results.update(res) defer.returnValue(results) def _get_state_groups_from_groups_txn( - self, txn, groups, types=None, members=None, + self, txn, groups, state_filter=StateFilter.all(), ): results = {group: {} for group in groups} - if types is not None: - types = list(set(types)) # deduplicate types list + where_clause, where_args = state_filter.make_sql_filter_clause() + + # Unless the filter clause is empty, we're going to append it after an + # existing where clause + if where_clause: + where_clause = " AND (%s)" % (where_clause,) if isinstance(self.database_engine, PostgresEngine): # Temporarily disable sequential scans in this transaction. This is @@ -374,79 +666,33 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # group for the given type, state_key. # This may return multiple rows per (type, state_key), but last_value # should be the same. - sql = (""" + sql = """ WITH RECURSIVE state(state_group) AS ( VALUES(?::bigint) UNION ALL SELECT prev_state_group FROM state_group_edges e, state s WHERE s.state_group = e.state_group ) - SELECT type, state_key, last_value(event_id) OVER ( + SELECT DISTINCT type, state_key, last_value(event_id) OVER ( PARTITION BY type, state_key ORDER BY state_group ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) AS event_id FROM state_groups_state WHERE state_group IN ( SELECT state_group FROM state ) - %s - """) + """ - if members is True: - sql += " AND type = '%s'" % (EventTypes.Member,) - elif members is False: - sql += " AND type <> '%s'" % (EventTypes.Member,) + for group in groups: + args = [group] + args.extend(where_args) - # Turns out that postgres doesn't like doing a list of OR's and - # is about 1000x slower, so we just issue a query for each specific - # type seperately. - if types is not None: - clause_to_args = [ - ( - "AND type = ? AND state_key = ?", - (etype, state_key) - ) if state_key is not None else ( - "AND type = ?", - (etype,) - ) - for etype, state_key in types - ] - else: - # If types is None we fetch all the state, and so just use an - # empty where clause with no extra args. - clause_to_args = [("", [])] - - for where_clause, where_args in clause_to_args: - for group in groups: - args = [group] - args.extend(where_args) - - txn.execute(sql % (where_clause,), args) - for row in txn: - typ, state_key, event_id = row - key = (typ, state_key) - results[group][key] = event_id + txn.execute(sql + where_clause, args) + for row in txn: + typ, state_key, event_id = row + key = (typ, state_key) + results[group][key] = event_id else: - where_args = [] - where_clauses = [] - wildcard_types = False - if types is not None: - for typ in types: - if typ[1] is None: - where_clauses.append("(type = ?)") - where_args.append(typ[0]) - wildcard_types = True - else: - where_clauses.append("(type = ? AND state_key = ?)") - where_args.extend([typ[0], typ[1]]) - - where_clause = "AND (%s)" % (" OR ".join(where_clauses)) - else: - where_clause = "" - - if members is True: - where_clause += " AND type = '%s'" % EventTypes.Member - elif members is False: - where_clause += " AND type <> '%s'" % EventTypes.Member + max_entries_returned = state_filter.max_entries_returned() # We don't use WITH RECURSIVE on sqlite3 as there are distributions # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) @@ -460,12 +706,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # without the right indices (which we can't add until # after we finish deduping state, which requires this func) args = [next_group] - if types: - args.extend(where_args) + args.extend(where_args) txn.execute( "SELECT type, state_key, event_id FROM state_groups_state" - " WHERE state_group = ? %s" % (where_clause,), + " WHERE state_group = ? " + where_clause, args ) results[group].update( @@ -481,9 +726,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # wildcards (i.e. Nones) in which case we have to do an exhaustive # search if ( - types is not None and - not wildcard_types and - len(results[group]) == len(types) + max_entries_returned is not None and + len(results[group]) == max_entries_returned ): break @@ -498,20 +742,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return results @defer.inlineCallbacks - def get_state_for_events(self, event_ids, types, filtered_types=None): + def get_state_for_events(self, event_ids, state_filter=StateFilter.all()): """Given a list of event_ids and type tuples, return a list of state - dicts for each event. The state dicts will only have the type/state_keys - that are in the `types` list. + dicts for each event. Args: event_ids (list[string]) - types (list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: deferred: A dict of (event_id) -> (type, state_key) -> [state_events] @@ -521,7 +759,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) state_event_map = yield self.get_events( [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)], @@ -540,20 +778,15 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_ids_for_events(self, event_ids, types=None, filtered_types=None): + def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): """ Get the state dicts corresponding to a list of events, containing the event_ids of the state events (as opposed to the events themselves) Args: event_ids(list(str)): events whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from event_id -> (type, state_key) -> event_id @@ -563,7 +796,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) groups = set(itervalues(event_to_groups)) - group_to_state = yield self._get_state_for_groups(groups, types, filtered_types) + group_to_state = yield self._get_state_for_groups(groups, state_filter) event_to_state = { event_id: group_to_state[group] @@ -573,45 +806,35 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({event: event_to_state[event] for event in event_ids}) @defer.inlineCallbacks - def get_state_for_event(self, event_id, types=None, filtered_types=None): + def get_state_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @defer.inlineCallbacks - def get_state_ids_for_event(self, event_id, types=None, filtered_types=None): + def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): """ Get the state dict corresponding to a particular event Args: event_id(str): event whose state should be returned - types(list[(str, str|None)]|None): List of (type, state_key) tuples - which are used to filter the state fetched. If `state_key` is None, - all events are returned of the given type. - May be None, which matches any key. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: A deferred dict from (type, state_key) -> state_event """ - state_map = yield self.get_state_ids_for_events([event_id], types, filtered_types) + state_map = yield self.get_state_ids_for_events([event_id], state_filter) defer.returnValue(state_map[event_id]) @cached(max_entries=50000) @@ -642,18 +865,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) - def _get_some_state_from_cache(self, cache, group, types, filtered_types=None): + def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` Args: cache(DictionaryCache): the state group cache to use group(int): The state group to lookup - types(list[str, str|None]): List of 2-tuples of the form - (`type`, `state_key`), where a `state_key` of `None` matches all - state_keys for the `type`. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool indicating if we successfully retrieved all @@ -662,124 +881,102 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ is_all, known_absent, state_dict_ids = cache.get(group) - type_to_key = {} + if is_all or state_filter.is_full(): + # Either we have everything or want everything, either way + # `is_all` tells us whether we've gotten everything. + return state_filter.filter_state(state_dict_ids), is_all # tracks whether any of our requested types are missing from the cache missing_types = False - for typ, state_key in types: - key = (typ, state_key) - - if ( - state_key is None or - (filtered_types is not None and typ not in filtered_types) - ): - type_to_key[typ] = None - # we mark the type as missing from the cache because - # when the cache was populated it might have been done with a - # restricted set of state_keys, so the wildcard will not work - # and the cache may be incomplete. - missing_types = True - else: - if type_to_key.get(typ, object()) is not None: - type_to_key.setdefault(typ, set()).add(state_key) - + if state_filter.has_wildcards(): + # We don't know if we fetched all the state keys for the types in + # the filter that are wildcards, so we have to assume that we may + # have missed some. + missing_types = True + else: + # There aren't any wild cards, so `concrete_types()` returns the + # complete list of event types we're wanting. + for key in state_filter.concrete_types(): if key not in state_dict_ids and key not in known_absent: missing_types = True + break - sentinel = object() - - def include(typ, state_key): - valid_state_keys = type_to_key.get(typ, sentinel) - if valid_state_keys is sentinel: - return filtered_types is not None and typ not in filtered_types - if valid_state_keys is None: - return True - if state_key in valid_state_keys: - return True - return False - - got_all = is_all - if not got_all: - # the cache is incomplete. We may still have got all the results we need, if - # we don't have any wildcards in the match list. - if not missing_types and filtered_types is None: - got_all = True - - return { - k: v for k, v in iteritems(state_dict_ids) - if include(k[0], k[1]) - }, got_all - - def _get_all_state_from_cache(self, cache, group): - """Checks if group is in cache. See `_get_state_for_groups` - - Returns 2-tuple (`state_dict`, `got_all`). `got_all` is a bool - indicating if we successfully retrieved all requests state from the - cache, if False we need to query the DB for the missing state. - - Args: - cache(DictionaryCache): the state group cache to use - group: The state group to lookup - """ - is_all, _, state_dict_ids = cache.get(group) - - return state_dict_ids, is_all + return state_filter.filter_state(state_dict_ids), not missing_types @defer.inlineCallbacks - def _get_state_for_groups(self, groups, types=None, filtered_types=None): + def _get_state_for_groups(self, groups, state_filter=StateFilter.all()): """Gets the state at each of a list of state groups, optionally filtering by type/state_key Args: groups (iterable[int]): list of state groups for which we want to get the state. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. - + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: Deferred[dict[int, dict[tuple[str, str], str]]]: dict of state_group_id -> (dict of (type, state_key) -> event id) """ - if types is not None: - non_member_types = [t for t in types if t[0] != EventTypes.Member] - if filtered_types is not None and EventTypes.Member not in filtered_types: - # we want all of the membership events - member_types = None - else: - member_types = [t for t in types if t[0] == EventTypes.Member] + member_filter, non_member_filter = state_filter.get_member_split() - else: - non_member_types = None - member_types = None - - non_member_state = yield self._get_state_for_groups_using_cache( - groups, self._state_group_cache, non_member_types, filtered_types, - ) - # XXX: we could skip this entirely if member_types is [] - member_state = yield self._get_state_for_groups_using_cache( - # we set filtered_types=None as member_state only ever contain members. - groups, self._state_group_members_cache, member_types, None, + # Now we look them up in the member and non-member caches + non_member_state, incomplete_groups_nm, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_cache, + state_filter=non_member_filter, + ) ) - state = non_member_state + member_state, incomplete_groups_m, = ( + yield self._get_state_for_groups_using_cache( + groups, self._state_group_members_cache, + state_filter=member_filter, + ) + ) + + state = dict(non_member_state) for group in groups: state[group].update(member_state[group]) + # Now fetch any missing groups from the database + + incomplete_groups = incomplete_groups_m | incomplete_groups_nm + + if not incomplete_groups: + defer.returnValue(state) + + cache_sequence_nm = self._state_group_cache.sequence + cache_sequence_m = self._state_group_members_cache.sequence + + # Help the cache hit ratio by expanding the filter a bit + db_state_filter = state_filter.return_expanded() + + group_to_state_dict = yield self._get_state_groups_from_groups( + list(incomplete_groups), + state_filter=db_state_filter, + ) + + # Now lets update the caches + self._insert_into_cache( + group_to_state_dict, + db_state_filter, + cache_seq_num_members=cache_sequence_m, + cache_seq_num_non_members=cache_sequence_nm, + ) + + # And finally update the result dict, by filtering out any extra + # stuff we pulled out of the database. + for group, group_state_dict in iteritems(group_to_state_dict): + # We just replace any existing entries, as we will have loaded + # everything we need from the database anyway. + state[group] = state_filter.filter_state(group_state_dict) + defer.returnValue(state) - @defer.inlineCallbacks def _get_state_for_groups_using_cache( - self, groups, cache, types=None, filtered_types=None + self, groups, cache, state_filter, ): """Gets the state at each of a list of state groups, optionally filtering by type/state_key, querying from a specific cache. @@ -790,89 +987,85 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): cache (DictionaryCache): the cache of group ids to state dicts which we will pass through - either the normal state cache or the specific members state cache. - types (None|iterable[(str, None|str)]): - indicates the state type/keys required. If None, the whole - state is fetched and returned. - - Otherwise, each entry should be a `(type, state_key)` tuple to - include in the response. A `state_key` of None is a wildcard - meaning that we require all state with that type. - filtered_types(list[str]|None): Only apply filtering via `types` to this - list of event types. Other types of events are returned unfiltered. - If None, `types` filtering is applied to all events. + state_filter (StateFilter): The state filter used to fetch state + from the database. Returns: - Deferred[dict[int, dict[tuple[str, str], str]]]: - dict of state_group_id -> (dict of (type, state_key) -> event id) + tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of + dict of state_group_id -> (dict of (type, state_key) -> event id) + of entries in the cache, and the state group ids either missing + from the cache or incomplete. """ - if types: - types = frozenset(types) results = {} - missing_groups = [] - if types is not None: - for group in set(groups): - state_dict_ids, got_all = self._get_some_state_from_cache( - cache, group, types, filtered_types - ) - results[group] = state_dict_ids + incomplete_groups = set() + for group in set(groups): + state_dict_ids, got_all = self._get_state_for_group_using_cache( + cache, group, state_filter + ) + results[group] = state_dict_ids - if not got_all: - missing_groups.append(group) + if not got_all: + incomplete_groups.add(group) + + return results, incomplete_groups + + def _insert_into_cache(self, group_to_state_dict, state_filter, + cache_seq_num_members, cache_seq_num_non_members): + """Inserts results from querying the database into the relevant cache. + + Args: + group_to_state_dict (dict): The new entries pulled from database. + Map from state group to state dict + state_filter (StateFilter): The state filter used to fetch state + from the database. + cache_seq_num_members (int): Sequence number of member cache since + last lookup in cache + cache_seq_num_non_members (int): Sequence number of member cache since + last lookup in cache + """ + + # We need to work out which types we've fetched from the DB for the + # member vs non-member caches. This should be as accurate as possible, + # but can be an underestimate (e.g. when we have wild cards) + + member_filter, non_member_filter = state_filter.get_member_split() + if member_filter.is_full(): + # We fetched all member events + member_types = None else: - for group in set(groups): - state_dict_ids, got_all = self._get_all_state_from_cache( - cache, group - ) + # `concrete_types()` will only return a subset when there are wild + # cards in the filter, but that's fine. + member_types = member_filter.concrete_types() - results[group] = state_dict_ids + if non_member_filter.is_full(): + # We fetched all non member events + non_member_types = None + else: + non_member_types = non_member_filter.concrete_types() - if not got_all: - missing_groups.append(group) + for group, group_state_dict in iteritems(group_to_state_dict): + state_dict_members = {} + state_dict_non_members = {} - if missing_groups: - # Okay, so we have some missing_types, let's fetch them. - cache_seq_num = cache.sequence + for k, v in iteritems(group_state_dict): + if k[0] == EventTypes.Member: + state_dict_members[k] = v + else: + state_dict_non_members[k] = v - # the DictionaryCache knows if it has *all* the state, but - # does not know if it has all of the keys of a particular type, - # which makes wildcard lookups expensive unless we have a complete - # cache. Hence, if we are doing a wildcard lookup, populate the - # cache fully so that we can do an efficient lookup next time. - - if filtered_types or (types and any(k is None for (t, k) in types)): - types_to_fetch = None - else: - types_to_fetch = types - - group_to_state_dict = yield self._get_state_groups_from_groups( - missing_groups, types_to_fetch, cache == self._state_group_members_cache, + self._state_group_members_cache.update( + cache_seq_num_members, + key=group, + value=state_dict_members, + fetched_keys=member_types, ) - for group, group_state_dict in iteritems(group_to_state_dict): - state_dict = results[group] - - # update the result, filtering by `types`. - if types: - for k, v in iteritems(group_state_dict): - (typ, _) = k - if ( - (k in types or (typ, None) in types) or - (filtered_types and typ not in filtered_types) - ): - state_dict[k] = v - else: - state_dict.update(group_state_dict) - - # update the cache with all the things we fetched from the - # database. - cache.update( - cache_seq_num, - key=group, - value=group_state_dict, - fetched_keys=types_to_fetch, - ) - - defer.returnValue(results) + self._state_group_cache.update( + cache_seq_num_non_members, + key=group, + value=state_dict_non_members, + fetched_keys=non_member_types, + ) def store_state_group(self, event_id, room_id, prev_group, delta_ids, current_state_ids): @@ -1181,12 +1374,12 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): continue prev_state = self._get_state_groups_from_groups_txn( - txn, [prev_group], types=None + txn, [prev_group], ) prev_state = prev_state[prev_group] curr_state = self._get_state_groups_from_groups_txn( - txn, [state_group], types=None + txn, [state_group], ) curr_state = curr_state[state_group] diff --git a/synapse/visibility.py b/synapse/visibility.py index 43f48196b..0281a7c91 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.events.utils import prune_event +from synapse.storage.state import StateFilter from synapse.types import get_domain_from_id logger = logging.getLogger(__name__) @@ -72,7 +73,7 @@ def filter_events_for_client(store, user_id, events, is_peeking=False, ) event_id_to_state = yield store.get_state_for_events( frozenset(e.event_id for e in events), - types=types, + state_filter=StateFilter.from_types(types), ) ignore_dict_content = yield store.get_global_account_data_by_type_for_user( @@ -273,8 +274,8 @@ def filter_events_for_server(store, server_name, events): # need to check membership (as we know the server is in the room). event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), + state_filter=StateFilter.from_types( + types=((EventTypes.RoomHistoryVisibility, ""),), ) ) @@ -314,9 +315,11 @@ def filter_events_for_server(store, server_name, events): # of the history vis and membership state at those events. event_to_state_ids = yield store.get_state_ids_for_events( frozenset(e.event_id for e in events), - types=( - (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.Member, None), + state_filter=StateFilter.from_types( + types=( + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.Member, None), + ), ) ) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index b9c5b39d5..086a39d83 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.state import StateFilter from synapse.types import RoomID, UserID import tests.unittest @@ -148,7 +149,7 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we get the full state as of the final event state = yield self.store.get_state_for_event( - e5.event_id, None, filtered_types=None + e5.event_id, ) self.assertIsNotNone(e4) @@ -166,33 +167,35 @@ class StateStoreTestCase(tests.unittest.TestCase): # check we can filter to the m.room.name event (with a '' state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, '')], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, '')]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can filter to the m.room.name event (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Name, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Name, None)]) ) self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state) # check we can grab the m.room.member events (with a wildcard None state key) state = yield self.store.get_state_for_event( - e5.event_id, [(EventTypes.Member, None)], filtered_types=None + e5.event_id, StateFilter.from_types([(EventTypes.Member, None)]) ) self.assertStateMapEqual( {(e3.type, e3.state_key): e3, (e5.type, e5.state_key): e5}, state ) - # check we can use filtered_types to grab a specific room member - # without filtering out the other event types + # check we can grab a specific room member without filtering out the + # other event types state = yield self.store.get_state_for_event( e5.event_id, - [(EventTypes.Member, self.u_alice.to_string())], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {self.u_alice.to_string()}}, + include_others=True, + ) ) self.assertStateMapEqual( @@ -204,10 +207,12 @@ class StateStoreTestCase(tests.unittest.TestCase): state, ) - # check that types=[], filtered_types=[EventTypes.Member] - # doesn't return all members + # check that we can grab everything except members state = yield self.store.get_state_for_event( - e5.event_id, [], filtered_types=[EventTypes.Member] + e5.event_id, state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertStateMapEqual( @@ -215,16 +220,21 @@ class StateStoreTestCase(tests.unittest.TestCase): ) ####################################################### - # _get_some_state_from_cache tests against a full cache + # _get_state_for_group_using_cache tests against a full cache ####################################################### room_id = self.room.to_string() group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id]) group = list(group_ids.keys())[0] - # test _get_some_state_from_cache correctly filters out members with types=[] - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -236,22 +246,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members with wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -263,11 +278,13 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -280,12 +297,15 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -297,23 +317,27 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) @@ -357,42 +381,54 @@ class StateStoreTestCase(tests.unittest.TestCase): ############################################ # test that things work with a partial cache - # test _get_some_state_from_cache correctly filters out members with types=[] + # test _get_state_for_group_using_cache correctly filters out members + # with types=[] room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( - self.store._state_group_cache, group, [], filtered_types=[EventTypes.Member] + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( + self.store._state_group_cache, group, + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) room_id = self.room.to_string() - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: set()}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({}, state_dict) - # test _get_some_state_from_cache correctly filters in members wildcard types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # wildcard types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, None)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: None}, + include_others=True, + ), ) self.assertEqual(is_all, True) @@ -404,44 +440,53 @@ class StateStoreTestCase(tests.unittest.TestCase): state_dict, ) - # test _get_some_state_from_cache correctly filters in members with specific types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=[EventTypes.Member], + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=True, + ), ) self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) - # test _get_some_state_from_cache correctly filters in members with specific types - # and no filtered_types - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + # test _get_state_for_group_using_cache correctly filters in members + # with specific types + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all) = yield self.store._get_some_state_from_cache( + (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( self.store._state_group_members_cache, group, - [(EventTypes.Member, e5.state_key)], - filtered_types=None, + state_filter=StateFilter( + types={EventTypes.Member: {e5.state_key}}, + include_others=False, + ), ) self.assertEqual(is_all, True) From 871c4abfecfd14acda13e3f25c7d040f848a9a32 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:40:41 +0100 Subject: [PATCH 49/85] Factor _generate_room_id out of create_room we're going to need this for room upgrades. --- synapse/handlers/room.py | 45 ++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3ba92bdb4..000a22b07 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -165,28 +165,7 @@ class RoomCreationHandler(BaseHandler): visibility = config.get("visibility", None) is_public = visibility == "public" - # autogen room IDs and try to create it. We may clash, so just - # try a few times till one goes through, giving up eventually. - attempts = 0 - room_id = None - while attempts < 5: - try: - random_string = stringutils.random_string(18) - gen_room_id = RoomID( - random_string, - self.hs.hostname, - ) - yield self.store.store_room( - room_id=gen_room_id.to_string(), - room_creator_user_id=user_id, - is_public=is_public - ) - room_id = gen_room_id.to_string() - break - except StoreError: - attempts += 1 - if not room_id: - raise StoreError(500, "Couldn't generate a room ID.") + room_id = yield self._generate_room_id(creator_id=user_id, is_public=is_public) if room_alias: directory_handler = self.hs.get_handlers().directory_handler @@ -427,6 +406,28 @@ class RoomCreationHandler(BaseHandler): content=content, ) + @defer.inlineCallbacks + def _generate_room_id(self, creator_id, is_public): + # autogen room IDs and try to create it. We may clash, so just + # try a few times till one goes through, giving up eventually. + attempts = 0 + while attempts < 5: + try: + random_string = stringutils.random_string(18) + gen_room_id = RoomID( + random_string, + self.hs.hostname, + ).to_string() + yield self.store.store_room( + room_id=gen_room_id, + room_creator_user_id=creator_id, + is_public=is_public, + ) + defer.returnValue(gen_room_id) + except StoreError: + attempts += 1 + raise StoreError(500, "Couldn't generate a room ID.") + class RoomContextHandler(object): def __init__(self, hs): From 379376e5e6ae242d9a69f0ec738758a649058a82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Laudrel?= Date: Thu, 25 Oct 2018 16:36:07 +0200 Subject: [PATCH 50/85] Make Docker image listening on ipv6 as well as ipv4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cédric Laudrel --- changelog.d/4089.feature | 1 + docker/conf/homeserver.yaml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/4089.feature diff --git a/changelog.d/4089.feature b/changelog.d/4089.feature new file mode 100644 index 000000000..62c9d839b --- /dev/null +++ b/changelog.d/4089.feature @@ -0,0 +1 @@ + Configure Docker image to listen on both ipv4 and ipv6. diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index a38b929f5..1b0f655d2 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -21,7 +21,7 @@ listeners: {% if not SYNAPSE_NO_TLS %} - port: 8448 - bind_addresses: ['0.0.0.0'] + bind_addresses: ['::'] type: http tls: true x_forwarded: false @@ -34,7 +34,7 @@ listeners: - port: 8008 tls: false - bind_addresses: ['0.0.0.0'] + bind_addresses: ['::'] type: http x_forwarded: false From 7f7b2cd3de192816bcb0225774a22617989aec37 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:42:37 +0100 Subject: [PATCH 51/85] Make room_member_handler a member of RoomCreationHandler ... to save passing it into `_send_events_for_new_room` --- synapse/handlers/register.py | 6 ++++-- synapse/handlers/room.py | 9 +++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e9d7b25a3..7b4549223 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -50,7 +50,6 @@ class RegistrationHandler(BaseHandler): self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() - self.room_creation_handler = self.hs.get_room_creation_handler() self.captcha_client = CaptchaServerHttpClient(hs) self._next_generated_user_id = None @@ -241,7 +240,10 @@ class RegistrationHandler(BaseHandler): else: # create room expects the localpart of the room alias room_alias_localpart = room_alias.localpart - yield self.room_creation_handler.create_room( + + # getting the RoomCreationHandler during init gives a dependency + # loop + yield self.hs.get_room_creation_handler().create_room( fake_requester, config={ "preset": "public_chat", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 000a22b07..d03d2cd7b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -73,6 +73,7 @@ class RoomCreationHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self.room_member_handler = hs.get_room_member_handler() @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, @@ -195,12 +196,9 @@ class RoomCreationHandler(BaseHandler): # override any attempt to set room versions via the creation_content creation_content["room_version"] = room_version - room_member_handler = self.hs.get_room_member_handler() - yield self._send_events_for_new_room( requester, room_id, - room_member_handler, preset_config=preset_config, invite_list=invite_list, initial_state=initial_state, @@ -242,7 +240,7 @@ class RoomCreationHandler(BaseHandler): if is_direct: content["is_direct"] = is_direct - yield room_member_handler.update_membership( + yield self.room_member_handler.update_membership( requester, UserID.from_string(invitee), room_id, @@ -280,7 +278,6 @@ class RoomCreationHandler(BaseHandler): self, creator, # A Requester object. room_id, - room_member_handler, preset_config, invite_list, initial_state, @@ -325,7 +322,7 @@ class RoomCreationHandler(BaseHandler): content=creation_content, ) - yield room_member_handler.update_membership( + yield self.room_member_handler.update_membership( creator, creator.user, room_id, From e1948175ee7fc469c985b58a01ecc2eb577e5e0a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 17:50:06 +0100 Subject: [PATCH 52/85] Allow power_level_content_override=None for _send_events_for_new_room --- synapse/handlers/room.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d03d2cd7b..d42c2c41c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -204,7 +204,7 @@ class RoomCreationHandler(BaseHandler): initial_state=initial_state, creation_content=creation_content, room_alias=room_alias, - power_level_content_override=config.get("power_level_content_override", {}), + power_level_content_override=config.get("power_level_content_override"), creator_join_profile=creator_join_profile, ) @@ -282,9 +282,9 @@ class RoomCreationHandler(BaseHandler): invite_list, initial_state, creation_content, - room_alias, - power_level_content_override, - creator_join_profile, + room_alias=None, + power_level_content_override=None, + creator_join_profile=None, ): def create(etype, content, **kwargs): e = { @@ -364,7 +364,8 @@ class RoomCreationHandler(BaseHandler): for invitee in invite_list: power_level_content["users"][invitee] = 100 - power_level_content.update(power_level_content_override) + if power_level_content_override: + power_level_content.update(power_level_content_override) yield send( etype=EventTypes.PowerLevels, From 0f7d1c99061075fe54a37cfe785184f095addf78 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 22 Aug 2018 10:57:54 +0100 Subject: [PATCH 53/85] Basic initial support for room upgrades Currently just creates a new, empty, room, and sends a tombstone in the old room. --- synapse/api/constants.py | 1 + synapse/handlers/room.py | 121 ++++++++++++++++++ synapse/rest/__init__.py | 2 + .../v2_alpha/room_upgrade_rest_servlet.py | 78 +++++++++++ synapse/server.pyi | 6 + 5 files changed, 208 insertions(+) create mode 100644 synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c2630c4c6..5565e516d 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -61,6 +61,7 @@ class LoginType(object): class EventTypes(object): Member = "m.room.member" Create = "m.room.create" + Tombstone = "m.room.tombstone" JoinRules = "m.room.join_rules" PowerLevels = "m.room.power_levels" Aliases = "m.room.aliases" diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d42c2c41c..3cce6f615 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -36,6 +36,7 @@ from synapse.api.errors import AuthError, Codes, StoreError, SynapseError from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils +from synapse.util.async_helpers import Linearizer from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -75,6 +76,124 @@ class RoomCreationHandler(BaseHandler): self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + # linearizer to stop two upgrades happening at once + self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") + + @defer.inlineCallbacks + def upgrade_room(self, requester, old_room_id, new_version): + """Replace a room with a new room with a different version + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_version (unicode): the new room version to use + + Returns: + Deferred[unicode]: the new room id + """ + yield self.ratelimit(requester) + + user_id = requester.user.to_string() + + with (yield self._upgrade_linearizer.queue(old_room_id)): + # start by allocating a new room id + is_public = False # XXX fixme + new_room_id = yield self._generate_room_id( + creator_id=user_id, is_public=is_public, + ) + + # we create and auth the tombstone event before properly creating the new + # room, to check our user has perms in the old room. + tombstone_event, tombstone_context = ( + yield self.event_creation_handler.create_event( + requester, { + "type": EventTypes.Tombstone, + "state_key": "", + "room_id": old_room_id, + "sender": user_id, + "content": { + "body": "This room has been replaced", + "replacement_room": new_room_id, + } + }, + token_id=requester.access_token_id, + ) + ) + yield self.auth.check_from_context(tombstone_event, tombstone_context) + + yield self.clone_exiting_room( + requester, + old_room_id=old_room_id, + new_room_id=new_room_id, + new_room_version=new_version, + tombstone_event_id=tombstone_event.event_id, + ) + + # now send the tombstone + yield self.event_creation_handler.send_nonmember_event( + requester, tombstone_event, tombstone_context, + ) + + # XXX send a power_levels in the old room, if possible + + defer.returnValue(new_room_id) + + @defer.inlineCallbacks + def clone_exiting_room( + self, requester, old_room_id, new_room_id, new_room_version, + tombstone_event_id, + ): + """Populate a new room based on an old room + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_room_id (unicode): the id to give the new room (should already have been + created with _gemerate_room_id()) + new_room_version (unicode): the new room version to use + tombstone_event_id (unicode|str): the ID of the tombstone event in the old + room. + Returns: + Deferred[None] + """ + user_id = requester.user.to_string() + + if not self.spam_checker.user_may_create_room(user_id): + raise SynapseError(403, "You are not permitted to create rooms") + + # XXX check alias is free + # canonical_alias = None + + # XXX create association in directory handler + # XXX preset + + preset_config = RoomCreationPreset.PRIVATE_CHAT + + creation_content = { + "room_version": new_room_version, + "predecessor": { + "room_id": old_room_id, + "event_id": tombstone_event_id, + } + } + + initial_state = OrderedDict() + + yield self._send_events_for_new_room( + requester, + new_room_id, + preset_config=preset_config, + invite_list=[], + initial_state=initial_state, + creation_content=creation_content, + ) + + # XXX name + # XXX topic + # XXX invites/joins + # XXX 3pid invites + # XXX directory_handler.send_room_alias_update_event + @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, creator_join_profile=None): @@ -416,6 +535,8 @@ class RoomCreationHandler(BaseHandler): random_string, self.hs.hostname, ).to_string() + if isinstance(gen_room_id, bytes): + gen_room_id = gen_room_id.decode('utf-8') yield self.store.store_room( room_id=gen_room_id, room_creator_user_id=creator_id, diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 4856822a5..5f35c2d1b 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -47,6 +47,7 @@ from synapse.rest.client.v2_alpha import ( register, report_event, room_keys, + room_upgrade_rest_servlet, sendtodevice, sync, tags, @@ -116,3 +117,4 @@ class ClientRestResource(JsonResource): sendtodevice.register_servlets(hs, client_resource) user_directory.register_servlets(hs, client_resource) groups.register_servlets(hs, client_resource) + room_upgrade_rest_servlet.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py new file mode 100644 index 000000000..1b195f90c --- /dev/null +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.constants import KNOWN_ROOM_VERSIONS +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) + +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class RoomUpgradeRestServlet(RestServlet): + PATTERNS = client_v2_patterns( + # /rooms/$roomid/upgrade + "/rooms/(?P[^/]*)/upgrade$", + v2_alpha=False, + ) + + def __init__(self, hs): + """ + + Args: + hs (synapse.server.HomeServer): + """ + super(RoomUpgradeRestServlet, self).__init__() + self._hs = hs + self._room_creation_handler = hs.get_room_creation_handler() + self._auth = hs.get_auth() + + @defer.inlineCallbacks + def on_POST(self, request, room_id): + requester = yield self._auth.get_user_by_req(request) + + content = parse_json_object_from_request(request) + assert_params_in_dict(content, ("new_version", )) + new_version = content["new_version"] + + if new_version not in KNOWN_ROOM_VERSIONS: + raise SynapseError( + 400, + "Your homeserver does not support this room version", + Codes.UNSUPPORTED_ROOM_VERSION, + ) + + new_room_id = yield self._room_creation_handler.upgrade_room( + requester, room_id, new_version + ) + + ret = { + "replacement_room": new_room_id, + } + + defer.returnValue((200, ret)) + + +def register_servlets(hs, http_server): + RoomUpgradeRestServlet(hs).register(http_server) diff --git a/synapse/server.pyi b/synapse/server.pyi index ce2848623..06cd083a7 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -7,6 +7,9 @@ import synapse.handlers.auth import synapse.handlers.deactivate_account import synapse.handlers.device import synapse.handlers.e2e_keys +import synapse.handlers.room +import synapse.handlers.room_member +import synapse.handlers.message import synapse.handlers.set_password import synapse.rest.media.v1.media_repository import synapse.server_notices.server_notices_manager @@ -50,6 +53,9 @@ class HomeServer(object): def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler: pass + def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler: + pass + def get_event_creation_handler(self) -> synapse.handlers.message.EventCreationHandler: pass From 4cda300058ba68f97c032923ebf429f437eddd8e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 11:13:40 +0100 Subject: [PATCH 54/85] preserve room visibility --- synapse/handlers/room.py | 8 +++++--- synapse/storage/room.py | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 3cce6f615..2f9eb8ef4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -32,7 +32,7 @@ from synapse.api.constants import ( JoinRules, RoomCreationPreset, ) -from synapse.api.errors import AuthError, Codes, StoreError, SynapseError +from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -97,9 +97,11 @@ class RoomCreationHandler(BaseHandler): with (yield self._upgrade_linearizer.queue(old_room_id)): # start by allocating a new room id - is_public = False # XXX fixme + r = yield self.store.get_room(old_room_id) + if r is None: + raise NotFoundError("Unknown room id %s" % (old_room_id,)) new_room_id = yield self._generate_room_id( - creator_id=user_id, is_public=is_public, + creator_id=user_id, is_public=r["is_public"], ) # we create and auth the tombstone event before properly creating the new diff --git a/synapse/storage/room.py b/synapse/storage/room.py index 61013b891..41c65e112 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -47,7 +47,7 @@ class RoomWorkerStore(SQLBaseStore): Args: room_id (str): The ID of the room to retrieve. Returns: - A namedtuple containing the room information, or an empty list. + A dict containing the room information, or None if the room is unknown. """ return self._simple_select_one( table="rooms", From 1b9f253e208ea3a471594bde52366e3abf54fc1a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 12:05:18 +0100 Subject: [PATCH 55/85] preserve PLs --- synapse/handlers/room.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 2f9eb8ef4..40ca12f1b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -179,7 +179,13 @@ class RoomCreationHandler(BaseHandler): } } - initial_state = OrderedDict() + initial_state = dict() + + old_room_state_ids = yield self.store.get_current_state_ids(old_room_id) + pl_event_id = old_room_state_ids.get((EventTypes.PowerLevels, "")) + if pl_event_id: + pl_event = yield self.store.get_event(pl_event_id) + initial_state[(EventTypes.PowerLevels, "")] = pl_event.content yield self._send_events_for_new_room( requester, From 3a263bf3aec6b9709fed391671f8faec334dc739 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Oct 2018 17:05:48 +0100 Subject: [PATCH 56/85] copy state --- synapse/handlers/room.py | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 40ca12f1b..ab92ca5e7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -167,9 +167,6 @@ class RoomCreationHandler(BaseHandler): # canonical_alias = None # XXX create association in directory handler - # XXX preset - - preset_config = RoomCreationPreset.PRIVATE_CHAT creation_content = { "room_version": new_room_version, @@ -181,23 +178,41 @@ class RoomCreationHandler(BaseHandler): initial_state = dict() - old_room_state_ids = yield self.store.get_current_state_ids(old_room_id) - pl_event_id = old_room_state_ids.get((EventTypes.PowerLevels, "")) - if pl_event_id: - pl_event = yield self.store.get_event(pl_event_id) - initial_state[(EventTypes.PowerLevels, "")] = pl_event.content + types_to_copy = ( + (EventTypes.PowerLevels, ""), + (EventTypes.JoinRules, ""), + (EventTypes.Name, ""), + (EventTypes.Topic, ""), + (EventTypes.RoomHistoryVisibility, ""), + (EventTypes.GuestAccess, "") + ) + + old_room_state_ids = yield self.store.get_filtered_current_state_ids( + old_room_id, StateFilter.from_types(types_to_copy), + ) + # map from event_id to BaseEvent + old_room_state_events = yield self.store.get_events(old_room_state_ids.values()) + + for k in types_to_copy: + old_event_id = old_room_state_ids.get(k) + if old_event_id: + old_event = old_room_state_events.get(old_event_id) + if old_event: + initial_state[k] = old_event.content yield self._send_events_for_new_room( requester, new_room_id, - preset_config=preset_config, + + # we expect to override all the presets with initial_state, so this is + # somewhat arbitrary. + preset_config=RoomCreationPreset.PRIVATE_CHAT, + invite_list=[], initial_state=initial_state, creation_content=creation_content, ) - # XXX name - # XXX topic # XXX invites/joins # XXX 3pid invites # XXX directory_handler.send_room_alias_update_event From e6babc27d51c3de04cdaedc40439b7ddb56b2e12 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Oct 2018 23:14:36 +0100 Subject: [PATCH 57/85] restrict PLs in old room --- synapse/handlers/room.py | 44 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index ab92ca5e7..d016f0e8b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,7 +136,49 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # XXX send a power_levels in the old room, if possible + # ... and restrict the PLs in the old room, if possible. + old_room_pl_state = yield self.state_handler.get_current_state( + old_room_id, + event_type=EventTypes.PowerLevels, + latest_event_ids=(tombstone_event.event_id, ), + ) + + if old_room_pl_state is None: + logger.warning( + "Not supported: upgrading a room with no PL event. Not setting PLs " + "in old room.", + ) + else: + pl_content = dict(old_room_pl_state.content) + users_default = int(pl_content.get("users_default", 0)) + restricted_level = max(users_default + 1, 50) + + updated = False + for v in ("invite", "events_default"): + current = int(pl_content.get(v, 0)) + if current < restricted_level: + logger.debug( + "Setting level for %s in %s to %i (was %i)", + v, old_room_id, restricted_level, current, + ) + pl_content[v] = restricted_level + updated = True + else: + logger.debug( + "Not setting level for %s (already %i)", + v, current, + ) + + if updated: + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": old_room_id, + "sender": user_id, + "content": pl_content, + }, ratelimit=False, + ) defer.returnValue(new_room_id) From 68c0ce62d839fe3a2014c1df32b761eeb0916b9c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 18:17:00 +0100 Subject: [PATCH 58/85] changelog --- changelog.d/4091.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4091.feature diff --git a/changelog.d/4091.feature b/changelog.d/4091.feature new file mode 100644 index 000000000..a3f7dbdcd --- /dev/null +++ b/changelog.d/4091.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones From 474810d9d545c07cfac567be7b29100a66cb2b7c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Oct 2018 23:15:03 +0100 Subject: [PATCH 59/85] fix broken test This test stubbed out some stuff in a very weird way. I have no idea why. It broke. --- .../test_resource_limits_server_notices.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 4701eedd4..b1551df7c 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -4,7 +4,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, ServerNoticeMsgType from synapse.api.errors import ResourceLimitError -from synapse.handlers.auth import AuthHandler from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) @@ -13,17 +12,10 @@ from tests import unittest from tests.utils import setup_test_homeserver -class AuthHandlers(object): - def __init__(self, hs): - self.auth_handler = AuthHandler(hs) - - class TestResourceLimitsServerNotices(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - self.hs = yield setup_test_homeserver(self.addCleanup, handlers=None) - self.hs.handlers = AuthHandlers(self.hs) - self.auth_handler = self.hs.handlers.auth_handler + self.hs = yield setup_test_homeserver(self.addCleanup) self.server_notices_sender = self.hs.get_server_notices_sender() # relying on [1] is far from ideal, but the only case where From 77d70a7646bd30315393b9aec44e452d5739a266 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 26 Oct 2018 22:05:22 +1100 Subject: [PATCH 60/85] Port register_new_matrix_user to Python 3 and add tests (#4085) --- changelog.d/4085.feature | 1 + scripts/register_new_matrix_user | 204 +----------------- synapse/_scripts/__init__.py | 0 synapse/_scripts/register_new_matrix_user.py | 215 +++++++++++++++++++ tests/scripts/__init__.py | 0 tests/scripts/test_new_matrix_user.py | 160 ++++++++++++++ 6 files changed, 378 insertions(+), 202 deletions(-) create mode 100644 changelog.d/4085.feature create mode 100644 synapse/_scripts/__init__.py create mode 100644 synapse/_scripts/register_new_matrix_user.py create mode 100644 tests/scripts/__init__.py create mode 100644 tests/scripts/test_new_matrix_user.py diff --git a/changelog.d/4085.feature b/changelog.d/4085.feature new file mode 100644 index 000000000..4bd3ddcf2 --- /dev/null +++ b/changelog.d/4085.feature @@ -0,0 +1 @@ +The register_new_matrix_user script is now ported to Python 3. diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 89143c5d5..b450712ab 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -16,207 +16,7 @@ from __future__ import print_function -import argparse -import getpass -import hashlib -import hmac -import json -import sys -import urllib2 - -from six import input - -import yaml - - -def request_registration(user, password, server_location, shared_secret, admin=False): - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - headers={'Content-Type': 'application/json'}, - ) - - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - body = f.read() - f.close() - nonce = json.loads(body)["nonce"] - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - mac = hmac.new(key=shared_secret, digestmod=hashlib.sha1) - - mac.update(nonce) - mac.update("\x00") - mac.update(user) - mac.update("\x00") - mac.update(password) - mac.update("\x00") - mac.update("admin" if admin else "notadmin") - - mac = mac.hexdigest() - - data = { - "nonce": nonce, - "username": user, - "password": password, - "mac": mac, - "admin": admin, - } - - server_location = server_location.rstrip("/") - - print("Sending registration request...") - - req = urllib2.Request( - "%s/_matrix/client/r0/admin/register" % (server_location,), - data=json.dumps(data), - headers={'Content-Type': 'application/json'}, - ) - try: - if sys.version_info[:3] >= (2, 7, 9): - # As of version 2.7.9, urllib2 now checks SSL certs - import ssl - - f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23)) - else: - f = urllib2.urlopen(req) - f.read() - f.close() - print("Success.") - except urllib2.HTTPError as e: - print("ERROR! Received %d %s" % (e.code, e.reason)) - if 400 <= e.code < 500: - if e.info().type == "application/json": - resp = json.load(e) - if "error" in resp: - print(resp["error"]) - sys.exit(1) - - -def register_new_user(user, password, server_location, shared_secret, admin): - if not user: - try: - default_user = getpass.getuser() - except Exception: - default_user = None - - if default_user: - user = input("New user localpart [%s]: " % (default_user,)) - if not user: - user = default_user - else: - user = input("New user localpart: ") - - if not user: - print("Invalid user name") - sys.exit(1) - - if not password: - password = getpass.getpass("Password: ") - - if not password: - print("Password cannot be blank.") - sys.exit(1) - - confirm_password = getpass.getpass("Confirm password: ") - - if password != confirm_password: - print("Passwords do not match") - sys.exit(1) - - if admin is None: - admin = input("Make admin [no]: ") - if admin in ("y", "yes", "true"): - admin = True - else: - admin = False - - request_registration(user, password, server_location, shared_secret, bool(admin)) - +from synapse._scripts.register_new_matrix_user import main if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" - " configured with the 'registration_shared_secret' option" - " set." - ) - parser.add_argument( - "-u", - "--user", - default=None, - help="Local part of the new user. Will prompt if omitted.", - ) - parser.add_argument( - "-p", - "--password", - default=None, - help="New password for user. Will prompt if omitted.", - ) - admin_group = parser.add_mutually_exclusive_group() - admin_group.add_argument( - "-a", - "--admin", - action="store_true", - help=( - "Register new user as an admin. " - "Will prompt if --no-admin is not set either." - ), - ) - admin_group.add_argument( - "--no-admin", - action="store_true", - help=( - "Register new user as a regular user. " - "Will prompt if --admin is not set either." - ), - ) - - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument( - "-c", - "--config", - type=argparse.FileType('r'), - help="Path to server config file. Used to read in shared secret.", - ) - - group.add_argument( - "-k", "--shared-secret", help="Shared secret as defined in server config file." - ) - - parser.add_argument( - "server_url", - default="https://localhost:8448", - nargs='?', - help="URL to use to talk to the home server. Defaults to " - " 'https://localhost:8448'.", - ) - - args = parser.parse_args() - - if "config" in args and args.config: - config = yaml.safe_load(args.config) - secret = config.get("registration_shared_secret", None) - if not secret: - print("No 'registration_shared_secret' defined in config.") - sys.exit(1) - else: - secret = args.shared_secret - - admin = None - if args.admin or args.no_admin: - admin = args.admin - - register_new_user(args.user, args.password, args.server_url, secret, admin) + main() diff --git a/synapse/_scripts/__init__.py b/synapse/_scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py new file mode 100644 index 000000000..70cecde48 --- /dev/null +++ b/synapse/_scripts/register_new_matrix_user.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import argparse +import getpass +import hashlib +import hmac +import logging +import sys + +from six.moves import input + +import requests as _requests +import yaml + + +def request_registration( + user, + password, + server_location, + shared_secret, + admin=False, + requests=_requests, + _print=print, + exit=sys.exit, +): + + url = "%s/_matrix/client/r0/admin/register" % (server_location,) + + # Get the nonce + r = requests.get(url, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + nonce = r.json()["nonce"] + + mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1) + + mac.update(nonce.encode('utf8')) + mac.update(b"\x00") + mac.update(user.encode('utf8')) + mac.update(b"\x00") + mac.update(password.encode('utf8')) + mac.update(b"\x00") + mac.update(b"admin" if admin else b"notadmin") + + mac = mac.hexdigest() + + data = { + "nonce": nonce, + "username": user, + "password": password, + "mac": mac, + "admin": admin, + } + + _print("Sending registration request...") + r = requests.post(url, json=data, verify=False) + + if r.status_code is not 200: + _print("ERROR! Received %d %s" % (r.status_code, r.reason)) + if 400 <= r.status_code < 500: + try: + _print(r.json()["error"]) + except Exception: + pass + return exit(1) + + _print("Success!") + + +def register_new_user(user, password, server_location, shared_secret, admin): + if not user: + try: + default_user = getpass.getuser() + except Exception: + default_user = None + + if default_user: + user = input("New user localpart [%s]: " % (default_user,)) + if not user: + user = default_user + else: + user = input("New user localpart: ") + + if not user: + print("Invalid user name") + sys.exit(1) + + if not password: + password = getpass.getpass("Password: ") + + if not password: + print("Password cannot be blank.") + sys.exit(1) + + confirm_password = getpass.getpass("Confirm password: ") + + if password != confirm_password: + print("Passwords do not match") + sys.exit(1) + + if admin is None: + admin = input("Make admin [no]: ") + if admin in ("y", "yes", "true"): + admin = True + else: + admin = False + + request_registration(user, password, server_location, shared_secret, bool(admin)) + + +def main(): + + logging.captureWarnings(True) + + parser = argparse.ArgumentParser( + description="Used to register new users with a given home server when" + " registration has been disabled. The home server must be" + " configured with the 'registration_shared_secret' option" + " set." + ) + parser.add_argument( + "-u", + "--user", + default=None, + help="Local part of the new user. Will prompt if omitted.", + ) + parser.add_argument( + "-p", + "--password", + default=None, + help="New password for user. Will prompt if omitted.", + ) + admin_group = parser.add_mutually_exclusive_group() + admin_group.add_argument( + "-a", + "--admin", + action="store_true", + help=( + "Register new user as an admin. " + "Will prompt if --no-admin is not set either." + ), + ) + admin_group.add_argument( + "--no-admin", + action="store_true", + help=( + "Register new user as a regular user. " + "Will prompt if --admin is not set either." + ), + ) + + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + "-c", + "--config", + type=argparse.FileType('r'), + help="Path to server config file. Used to read in shared secret.", + ) + + group.add_argument( + "-k", "--shared-secret", help="Shared secret as defined in server config file." + ) + + parser.add_argument( + "server_url", + default="https://localhost:8448", + nargs='?', + help="URL to use to talk to the home server. Defaults to " + " 'https://localhost:8448'.", + ) + + args = parser.parse_args() + + if "config" in args and args.config: + config = yaml.safe_load(args.config) + secret = config.get("registration_shared_secret", None) + if not secret: + print("No 'registration_shared_secret' defined in config.") + sys.exit(1) + else: + secret = args.shared_secret + + admin = None + if args.admin or args.no_admin: + admin = args.admin + + register_new_user(args.user, args.password, args.server_url, secret, admin) + + +if __name__ == "__main__": + main() diff --git a/tests/scripts/__init__.py b/tests/scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py new file mode 100644 index 000000000..6f56893f5 --- /dev/null +++ b/tests/scripts/test_new_matrix_user.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from synapse._scripts.register_new_matrix_user import request_registration + +from tests.unittest import TestCase + + +class RegisterTestCase(TestCase): + def test_success(self): + """ + The script will fetch a nonce, and then generate a MAC with it, and then + post that MAC. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + r.status_code = 200 + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # We should get the success message making sure everything is OK. + self.assertIn("Success!", out) + + # sys.exit shouldn't have been called. + self.assertEqual(err_code, []) + + def test_failure_nonce(self): + """ + If the script fails to fetch a nonce, it throws an error and quits. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 404 + r.reason = "Not Found" + r.json = lambda: {"not": "error"} + return r + + requests = Mock() + requests.get = get + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 404 Not Found", out) + self.assertNotIn("Success!", out) + + def test_failure_post(self): + """ + The script will fetch a nonce, and then if the final POST fails, will + report an error and quit. + """ + + def get(url, verify=None): + r = Mock() + r.status_code = 200 + r.json = lambda: {"nonce": "a"} + return r + + def post(url, json=None, verify=None): + # Make sure we are sent the correct info + self.assertEqual(json["username"], "user") + self.assertEqual(json["password"], "pass") + self.assertEqual(json["nonce"], "a") + # We want a 40-char hex MAC + self.assertEqual(len(json["mac"]), 40) + + r = Mock() + # Then 500 because we're jerks + r.status_code = 500 + r.reason = "Broken" + return r + + requests = Mock() + requests.get = get + requests.post = post + + # The fake stdout will be written here + out = [] + err_code = [] + + request_registration( + "user", + "pass", + "matrix.org", + "shared", + admin=False, + requests=requests, + _print=out.append, + exit=err_code.append, + ) + + # Exit was called + self.assertEqual(err_code, [1]) + + # We got an error message + self.assertIn("ERROR! Received 500 Broken", out) + self.assertNotIn("Success!", out) From 193cadc988801d9035124d1fd3ca23607b9b1f25 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 17:10:30 +0100 Subject: [PATCH 61/85] Address review comments Improve comments, get old room state from the context we already have --- synapse/handlers/room.py | 16 +++++++------- .../v2_alpha/room_upgrade_rest_servlet.py | 21 ++++++++++++++----- 2 files changed, 25 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d016f0e8b..145b5b19e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,19 +136,21 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # ... and restrict the PLs in the old room, if possible. - old_room_pl_state = yield self.state_handler.get_current_state( - old_room_id, - event_type=EventTypes.PowerLevels, - latest_event_ids=(tombstone_event.event_id, ), - ) + old_room_state = yield tombstone_context.get_current_state_ids(self.store) + old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) - if old_room_pl_state is None: + if old_room_pl_event_id is None: logger.warning( "Not supported: upgrading a room with no PL event. Not setting PLs " "in old room.", ) else: + # we try to stop regular users from speaking by setting the PL required + # to send regular events and invites to 'Moderator' level. That's normally + # 50, but if the default PL in a room is 50 or more, then we set the + # required PL above that. + + old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) pl_content = dict(old_room_pl_state.content) users_default = int(pl_content.get("users_default", 0)) restricted_level = max(users_default + 1, 50) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 1b195f90c..e6356101f 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -31,6 +31,22 @@ logger = logging.getLogger(__name__) class RoomUpgradeRestServlet(RestServlet): + """Handler for room uprade requests. + + Handles requests of the form: + + POST /_matrix/client/r0/rooms/$roomid/upgrade HTTP/1.1 + Content-Type: application/json + + { + "new_version": "2", + } + + Creates a new room and shuts down the old one. Returns the ID of the new room. + + Args: + hs (synapse.server.HomeServer): + """ PATTERNS = client_v2_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", @@ -38,11 +54,6 @@ class RoomUpgradeRestServlet(RestServlet): ) def __init__(self, hs): - """ - - Args: - hs (synapse.server.HomeServer): - """ super(RoomUpgradeRestServlet, self).__init__() self._hs = hs self._room_creation_handler = hs.get_room_creation_handler() From 54bbe71867fb3de2e3984e2b3eb909845c2448b3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 22:51:34 +0100 Subject: [PATCH 62/85] optimise state copying --- synapse/handlers/room.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 145b5b19e..8e48c1ca6 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -21,7 +21,7 @@ import math import string from collections import OrderedDict -from six import string_types +from six import iteritems, string_types from twisted.internet import defer @@ -237,12 +237,10 @@ class RoomCreationHandler(BaseHandler): # map from event_id to BaseEvent old_room_state_events = yield self.store.get_events(old_room_state_ids.values()) - for k in types_to_copy: - old_event_id = old_room_state_ids.get(k) - if old_event_id: - old_event = old_room_state_events.get(old_event_id) - if old_event: - initial_state[k] = old_event.content + for k, old_event_id in iteritems(old_room_state_ids): + old_event = old_room_state_events.get(old_event_id) + if old_event: + initial_state[k] = old_event.content yield self._send_events_for_new_room( requester, From 5caf79b312947c823977c89275c1ea5750aeec92 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 23:56:40 +0100 Subject: [PATCH 63/85] Remember to copy the avatar on room upgrades --- changelog.d/4100.feature | 1 + synapse/handlers/room.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4100.feature diff --git a/changelog.d/4100.feature b/changelog.d/4100.feature new file mode 100644 index 000000000..a3f7dbdcd --- /dev/null +++ b/changelog.d/4100.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8e48c1ca6..c59c02527 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -228,7 +228,8 @@ class RoomCreationHandler(BaseHandler): (EventTypes.Name, ""), (EventTypes.Topic, ""), (EventTypes.RoomHistoryVisibility, ""), - (EventTypes.GuestAccess, "") + (EventTypes.GuestAccess, ""), + (EventTypes.RoomAvatar, ""), ) old_room_state_ids = yield self.store.get_filtered_current_state_ids( From db24d7f15e406390d57b23d48a78fa33604a47e7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 23:47:37 +0100 Subject: [PATCH 64/85] Better handling of odd PLs during room upgrades Fixes handling of rooms where we have permission to send the tombstone, but not other state. We need to (a) fail more gracefully when we can't send the PLs in the old room, and (b) not set the PLs in the new room until we are done with the other stuff. --- changelog.d/4099.feature | 1 + synapse/handlers/room.py | 121 +++++++++++++++++++++++++-------------- 2 files changed, 80 insertions(+), 42 deletions(-) create mode 100644 changelog.d/4099.feature diff --git a/changelog.d/4099.feature b/changelog.d/4099.feature new file mode 100644 index 000000000..a3f7dbdcd --- /dev/null +++ b/changelog.d/4099.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 8e48c1ca6..70085db62 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,53 +136,91 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) + # and finally, shut down the PLs in the old room, and update them in the new + # room. old_room_state = yield tombstone_context.get_current_state_ids(self.store) - old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) - if old_room_pl_event_id is None: - logger.warning( - "Not supported: upgrading a room with no PL event. Not setting PLs " - "in old room.", + yield self._update_upgraded_room_pls( + requester, old_room_id, new_room_id, old_room_state, + ) + + defer.returnValue(new_room_id) + + @defer.inlineCallbacks + def _update_upgraded_room_pls( + self, requester, old_room_id, new_room_id, old_room_state, + ): + """Send updated power levels in both rooms after an upgrade + + Args: + requester (synapse.types.Requester): the user requesting the upgrade + old_room_id (unicode): the id of the room to be replaced + new_room_id (unicode): the id of the replacement room + old_room_state (dict[tuple[str, str], str]): the state map for the old room + + Returns: + Deferred + """ + old_room_pl_event_id = old_room_state.get((EventTypes.PowerLevels, "")) + + if old_room_pl_event_id is None: + logger.warning( + "Not supported: upgrading a room with no PL event. Not setting PLs " + "in old room.", + ) + return + + old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) + + # we try to stop regular users from speaking by setting the PL required + # to send regular events and invites to 'Moderator' level. That's normally + # 50, but if the default PL in a room is 50 or more, then we set the + # required PL above that. + + pl_content = dict(old_room_pl_state.content) + users_default = int(pl_content.get("users_default", 0)) + restricted_level = max(users_default + 1, 50) + + updated = False + for v in ("invite", "events_default"): + current = int(pl_content.get(v, 0)) + if current < restricted_level: + logger.info( + "Setting level for %s in %s to %i (was %i)", + v, old_room_id, restricted_level, current, ) + pl_content[v] = restricted_level + updated = True else: - # we try to stop regular users from speaking by setting the PL required - # to send regular events and invites to 'Moderator' level. That's normally - # 50, but if the default PL in a room is 50 or more, then we set the - # required PL above that. + logger.info( + "Not setting level for %s (already %i)", + v, current, + ) - old_room_pl_state = yield self.store.get_event(old_room_pl_event_id) - pl_content = dict(old_room_pl_state.content) - users_default = int(pl_content.get("users_default", 0)) - restricted_level = max(users_default + 1, 50) + if updated: + try: + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": old_room_id, + "sender": requester.user.to_string(), + "content": pl_content, + }, ratelimit=False, + ) + except AuthError as e: + logger.warning("Unable to update PLs in old room: %s", e) - updated = False - for v in ("invite", "events_default"): - current = int(pl_content.get(v, 0)) - if current < restricted_level: - logger.debug( - "Setting level for %s in %s to %i (was %i)", - v, old_room_id, restricted_level, current, - ) - pl_content[v] = restricted_level - updated = True - else: - logger.debug( - "Not setting level for %s (already %i)", - v, current, - ) - - if updated: - yield self.event_creation_handler.create_and_send_nonmember_event( - requester, { - "type": EventTypes.PowerLevels, - "state_key": '', - "room_id": old_room_id, - "sender": user_id, - "content": pl_content, - }, ratelimit=False, - ) - - defer.returnValue(new_room_id) + logger.info("Setting correct PLs in new room") + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, { + "type": EventTypes.PowerLevels, + "state_key": '', + "room_id": new_room_id, + "sender": requester.user.to_string(), + "content": old_room_pl_state.content, + }, ratelimit=False, + ) @defer.inlineCallbacks def clone_exiting_room( @@ -223,7 +261,6 @@ class RoomCreationHandler(BaseHandler): initial_state = dict() types_to_copy = ( - (EventTypes.PowerLevels, ""), (EventTypes.JoinRules, ""), (EventTypes.Name, ""), (EventTypes.Topic, ""), From c4b3698a80468957c63b2a79685ac06f76cabae1 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 22:59:44 +1100 Subject: [PATCH 65/85] Make the replication logger quieter (#4108) --- changelog.d/4108.misc | 1 + synapse/replication/tcp/client.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4108.misc diff --git a/changelog.d/4108.misc b/changelog.d/4108.misc new file mode 100644 index 000000000..85810c3d8 --- /dev/null +++ b/changelog.d/4108.misc @@ -0,0 +1 @@ +The "Received rdata" log messages on workers is now logged at DEBUG, not INFO. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index cbe964581..586dddb40 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -106,7 +106,7 @@ class ReplicationClientHandler(object): Can be overriden in subclasses to handle more. """ - logger.info("Received rdata %s -> %s", stream_name, token) + logger.debug("Received rdata %s -> %s", stream_name, token) return self.store.process_replication_rows(stream_name, token, rows) def on_position(self, stream_name, token): From 4cd1c9f2ffa46bc8ed258da200ae3b8ba25fcbb5 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 29 Oct 2018 23:57:24 +1100 Subject: [PATCH 66/85] Delete the disused & unspecced identicon functionality (#4106) --- changelog.d/4106.removal | 1 + scripts-dev/make_identicons.pl | 39 ------------ synapse/handlers/register.py | 3 - synapse/python_dependencies.py | 1 - synapse/rest/media/v1/identicon_resource.py | 68 --------------------- synapse/rest/media/v1/media_repository.py | 2 - 6 files changed, 1 insertion(+), 113 deletions(-) create mode 100644 changelog.d/4106.removal delete mode 100755 scripts-dev/make_identicons.pl delete mode 100644 synapse/rest/media/v1/identicon_resource.py diff --git a/changelog.d/4106.removal b/changelog.d/4106.removal new file mode 100644 index 000000000..7e63208da --- /dev/null +++ b/changelog.d/4106.removal @@ -0,0 +1 @@ +The disused and un-specced identicon generator has been removed. diff --git a/scripts-dev/make_identicons.pl b/scripts-dev/make_identicons.pl deleted file mode 100755 index cbff63e29..000000000 --- a/scripts-dev/make_identicons.pl +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env perl - -use strict; -use warnings; - -use DBI; -use DBD::SQLite; -use JSON; -use Getopt::Long; - -my $db; # = "homeserver.db"; -my $server = "http://localhost:8008"; -my $size = 320; - -GetOptions("db|d=s", \$db, - "server|s=s", \$server, - "width|w=i", \$size) or usage(); - -usage() unless $db; - -my $dbh = DBI->connect("dbi:SQLite:dbname=$db","","") || die $DBI::errstr; - -my $res = $dbh->selectall_arrayref("select token, name from access_tokens, users where access_tokens.user_id = users.id group by user_id") || die $DBI::errstr; - -foreach (@$res) { - my ($token, $mxid) = ($_->[0], $_->[1]); - my ($user_id) = ($mxid =~ m/@(.*):/); - my ($url) = $dbh->selectrow_array("select avatar_url from profiles where user_id=?", undef, $user_id); - if (!$url || $url =~ /#auto$/) { - `curl -s -o tmp.png "$server/_matrix/media/v1/identicon?name=${mxid}&width=$size&height=$size"`; - my $json = `curl -s -X POST -H "Content-Type: image/png" -T "tmp.png" $server/_matrix/media/v1/upload?access_token=$token`; - my $content_uri = from_json($json)->{content_uri}; - `curl -X PUT -H "Content-Type: application/json" --data '{ "avatar_url": "${content_uri}#auto"}' $server/_matrix/client/api/v1/profile/${mxid}/avatar_url?access_token=$token`; - } -} - -sub usage { - die "usage: ./make-identicons.pl\n\t-d database [e.g. homeserver.db]\n\t-s homeserver (default: http://localhost:8008)\n\t-w identicon size in pixels (default 320)"; -} \ No newline at end of file diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 7b4549223..d2beb275c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -256,9 +256,6 @@ class RegistrationHandler(BaseHandler): except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) - # We used to generate default identicons here, but nowadays - # we want clients to generate their own as part of their branding - # rather than there being consistent matrix-wide ones, so we don't. defer.returnValue((user_id, token)) @defer.inlineCallbacks diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 943876456..ca62ee763 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -51,7 +51,6 @@ REQUIREMENTS = { "daemonize>=2.3.1": ["daemonize"], "bcrypt>=3.1.0": ["bcrypt>=3.1.0"], "pillow>=3.1.2": ["PIL"], - "pydenticon>=0.2": ["pydenticon"], "sortedcontainers>=1.4.4": ["sortedcontainers"], "psutil>=2.0.0": ["psutil>=2.0.0"], "pysaml2>=3.0.0": ["saml2"], diff --git a/synapse/rest/media/v1/identicon_resource.py b/synapse/rest/media/v1/identicon_resource.py deleted file mode 100644 index bdbd8d50d..000000000 --- a/synapse/rest/media/v1/identicon_resource.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from pydenticon import Generator - -from twisted.web.resource import Resource - -from synapse.http.servlet import parse_integer - -FOREGROUND = [ - "rgb(45,79,255)", - "rgb(254,180,44)", - "rgb(226,121,234)", - "rgb(30,179,253)", - "rgb(232,77,65)", - "rgb(49,203,115)", - "rgb(141,69,170)" -] - -BACKGROUND = "rgb(224,224,224)" -SIZE = 5 - - -class IdenticonResource(Resource): - isLeaf = True - - def __init__(self): - Resource.__init__(self) - self.generator = Generator( - SIZE, SIZE, foreground=FOREGROUND, background=BACKGROUND, - ) - - def generate_identicon(self, name, width, height): - v_padding = width % SIZE - h_padding = height % SIZE - top_padding = v_padding // 2 - left_padding = h_padding // 2 - bottom_padding = v_padding - top_padding - right_padding = h_padding - left_padding - width -= v_padding - height -= h_padding - padding = (top_padding, bottom_padding, left_padding, right_padding) - identicon = self.generator.generate( - name, width, height, padding=padding - ) - return identicon - - def render_GET(self, request): - name = "/".join(request.postpath) - width = parse_integer(request, "width", default=96) - height = parse_integer(request, "height", default=96) - identicon_bytes = self.generate_identicon(name, width, height) - request.setHeader(b"Content-Type", b"image/png") - request.setHeader( - b"Cache-Control", b"public,max-age=86400,s-maxage=86400" - ) - return identicon_bytes diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 08b1867fa..d6c5f07af 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -45,7 +45,6 @@ from ._base import FileInfo, respond_404, respond_with_responder from .config_resource import MediaConfigResource from .download_resource import DownloadResource from .filepath import MediaFilePaths -from .identicon_resource import IdenticonResource from .media_storage import MediaStorage from .preview_url_resource import PreviewUrlResource from .storage_provider import StorageProviderWrapper @@ -769,7 +768,6 @@ class MediaRepositoryResource(Resource): self.putChild(b"thumbnail", ThumbnailResource( hs, media_repo, media_repo.media_storage, )) - self.putChild(b"identicon", IdenticonResource()) if hs.config.url_preview_enabled: self.putChild(b"preview_url", PreviewUrlResource( hs, media_repo, media_repo.media_storage, From b2399f6281d7cd11e7762b683bdd5a4f0c24927e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:11 +0000 Subject: [PATCH 67/85] Make SQL a bit cleaner --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 59a50a5df..45afd42b3 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1272,14 +1272,13 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Check if state groups are referenced sql = """ - SELECT state_group, count(*) FROM event_to_state_groups + SELECT DISTINCT state_group FROM event_to_state_groups LEFT JOIN events_to_purge AS ep USING (event_id) WHERE state_group IN (%s) AND ep.event_id IS NULL - GROUP BY state_group """ % (",".join("?" for _ in current_search),) txn.execute(sql, list(current_search)) - referenced = set(sg for sg, cnt in txn if cnt > 0) + referenced = set(sg for sg, in txn) referenced_groups |= referenced # We don't continue iterating up the state group graphs for state From f4f223aa4455bea3aa642c23ae957932b1168ba3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:01:49 +0000 Subject: [PATCH 68/85] Don't make temporary list --- synapse/storage/state.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 45afd42b3..947d3fc17 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1266,9 +1266,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): current_search = next_to_search next_to_search = set() else: - lst = list(next_to_search) - current_search = set(lst[:100]) - next_to_search = set(lst[100:]) + current_search = set(islice(next_to_search, 100)) + next_to_search -= current_search # Check if state groups are referenced sql = """ From 664b192a3b4aa57597d9832361b025bc078ea87a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:21:43 +0000 Subject: [PATCH 69/85] Fix set operations thinko --- synapse/storage/state.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 947d3fc17..dfec57c04 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1293,10 +1293,11 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): retcols=("prev_state_group", "state_group",), ) - next_to_search.update(row["state_group"] for row in rows) + prevs = set(row["state_group"] for row in rows) # We don't bother re-handling groups we've already seen - next_to_search -= state_groups_seen - state_groups_seen |= next_to_search + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs for row in rows: # Note: Each state group can have at most one prev group From ad88460e0d2e0a7c2cf39ec5539d5c4ff030bbbd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 14:23:34 +0000 Subject: [PATCH 70/85] Move _find_unreferenced_groups --- synapse/storage/events.py | 85 ++++++++++++++++++++++++++++++++++++++- synapse/storage/state.py | 79 ------------------------------------ 2 files changed, 83 insertions(+), 81 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index e79192273..919e855f3 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2052,8 +2052,10 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] finding state groups that can be deleted") - state_groups_to_delete, remaining_state_groups = self._find_unreferenced_groups( - txn, referenced_state_groups, + state_groups_to_delete, remaining_state_groups = ( + self._find_unreferenced_groups_during_purge( + txn, referenced_state_groups, + ) ) logger.info( @@ -2209,6 +2211,85 @@ class EventsStore(StateGroupWorkerStore, EventFederationStore, EventsWorkerStore logger.info("[purge] done") + def _find_unreferenced_groups_during_purge(self, txn, state_groups): + """Used when purging history to figure out which state groups can be + deleted and which need to be de-delta'ed (due to one of its prev groups + being scheduled for deletion). + + Args: + txn + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. + + Returns: + tuple[set[int], set[int]]: The set of state groups that can be + deleted and the set of state groups that need to be de-delta'ed + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + current_search = set(itertools.islice(next_to_search, 100)) + next_to_search -= current_search + + # Check if state groups are referenced + sql = """ + SELECT DISTINCT state_group FROM event_to_state_groups + LEFT JOIN events_to_purge AS ep USING (event_id) + WHERE state_group IN (%s) AND ep.event_id IS NULL + """ % (",".join("?" for _ in current_search),) + txn.execute(sql, list(current_search)) + + referenced = set(sg for sg, in txn) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + rows = self._simple_select_many_txn( + txn, + table="state_group_edges", + column="prev_state_group", + iterable=current_search, + keyvalues={}, + retcols=("prev_state_group", "state_group",), + ) + + prevs = set(row["state_group"] for row in rows) + # We don't bother re-handling groups we've already seen + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs + + for row in rows: + # Note: Each state group can have at most one prev group + graph[row["state_group"]] = row["prev_state_group"] + + to_delete = state_groups_seen - referenced_groups + + to_dedelta = set() + for sg in referenced_groups: + prev_sg = graph.get(sg) + if prev_sg and prev_sg in to_delete: + to_dedelta.add(sg) + + return to_delete, to_dedelta + @defer.inlineCallbacks def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream diff --git a/synapse/storage/state.py b/synapse/storage/state.py index dfec57c04..d737bd677 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1234,85 +1234,6 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): return count - def _find_unreferenced_groups(self, txn, state_groups): - """Used when purging history to figure out which state groups can be - deleted and which need to be de-delta'ed (due to one of its prev groups - being scheduled for deletion). - - Args: - txn - state_groups (set[int]): Set of state groups referenced by events - that are going to be deleted. - - Returns: - tuple[set[int], set[int]]: The set of state groups that can be - deleted and the set of state groups that need to be de-delta'ed - """ - # Graph of state group -> previous group - graph = {} - - # Set of events that we have found to be referenced by events - referenced_groups = set() - - # Set of state groups we've already seen - state_groups_seen = set(state_groups) - - # Set of state groups to handle next. - next_to_search = set(state_groups) - while next_to_search: - # We bound size of groups we're looking up at once, to stop the - # SQL query getting too big - if len(next_to_search) < 100: - current_search = next_to_search - next_to_search = set() - else: - current_search = set(islice(next_to_search, 100)) - next_to_search -= current_search - - # Check if state groups are referenced - sql = """ - SELECT DISTINCT state_group FROM event_to_state_groups - LEFT JOIN events_to_purge AS ep USING (event_id) - WHERE state_group IN (%s) AND ep.event_id IS NULL - """ % (",".join("?" for _ in current_search),) - txn.execute(sql, list(current_search)) - - referenced = set(sg for sg, in txn) - referenced_groups |= referenced - - # We don't continue iterating up the state group graphs for state - # groups that are referenced. - current_search -= referenced - - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=current_search, - keyvalues={}, - retcols=("prev_state_group", "state_group",), - ) - - prevs = set(row["state_group"] for row in rows) - # We don't bother re-handling groups we've already seen - prevs -= state_groups_seen - next_to_search |= prevs - state_groups_seen |= prevs - - for row in rows: - # Note: Each state group can have at most one prev group - graph[row["state_group"]] = row["prev_state_group"] - - to_delete = state_groups_seen - referenced_groups - - to_dedelta = set() - for sg in referenced_groups: - prev_sg = graph.get(sg) - if prev_sg and prev_sg in to_delete: - to_dedelta.add(sg) - - return to_delete, to_dedelta - class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): """ Keeps track of the state at a given event. From a163b748a5ca37853f440c5c46d2da80f738a9e0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:34:21 +0000 Subject: [PATCH 71/85] Don't truncate command name in metrics --- synapse/replication/tcp/protocol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 5dc7b3fff..0b3fe6cbf 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -656,7 +656,7 @@ tcp_inbound_commands = LaterGauge( "", ["command", "name"], lambda: { - (k[0], p.name,): count + (k, p.name,): count for p in connected_connections for k, count in iteritems(p.inbound_commands_counter) }, @@ -667,7 +667,7 @@ tcp_outbound_commands = LaterGauge( "", ["command", "name"], lambda: { - (k[0], p.name,): count + (k, p.name,): count for p in connected_connections for k, count in iteritems(p.outbound_commands_counter) }, From 88e5ffe6fe816e54a5471728e93fde63353d9a70 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:34:34 +0000 Subject: [PATCH 72/85] Deduplicate device updates sent over replication We currently send several kHz of device list updates over replication occisonally, which often causes the replications streams to lag and then get dropped. A lot of those updates will actually be duplicates, since we don't send e.g. device_ids across replication, so let's deduplicate it when we pull them out of the database. --- synapse/storage/devices.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d10ff9e4b..62497ab63 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -589,10 +589,14 @@ class DeviceStore(SQLBaseStore): combined list of changes to devices, and which destinations need to be poked. `destination` may be None if no destinations need to be poked. """ + # We do a group by here as there can be a large number of duplicate + # entries, since we throw away device IDs. sql = """ - SELECT stream_id, user_id, destination FROM device_lists_stream + SELECT MAX(stream_id) AS stream_id, user_id, destination + FROM device_lists_stream LEFT JOIN device_lists_outbound_pokes USING (stream_id, user_id, device_id) WHERE ? < stream_id AND stream_id <= ? + GROUP BY user_id, destination """ return self._execute( "get_all_device_list_changes_for_remotes", None, From 39f419868fa8df56e3d9df9ba8e153884fe3ea55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 17:38:09 +0000 Subject: [PATCH 73/85] Newsfile --- changelog.d/4109.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4109.misc diff --git a/changelog.d/4109.misc b/changelog.d/4109.misc new file mode 100644 index 000000000..566c68311 --- /dev/null +++ b/changelog.d/4109.misc @@ -0,0 +1 @@ +Reduce replication traffic for device lists From 4f0fa7a1201b0b76763dc146db9af12c4dd29494 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Oct 2018 18:15:42 +0000 Subject: [PATCH 74/85] Newsfile --- changelog.d/4110.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4110.misc diff --git a/changelog.d/4110.misc b/changelog.d/4110.misc new file mode 100644 index 000000000..a50327ae3 --- /dev/null +++ b/changelog.d/4110.misc @@ -0,0 +1 @@ +Fix `synapse_replication_tcp_protocol_*_commands` metric label to be full command name, rather than just the first character From 0dce9e1379ea867c9a00c8e6cf1d42badb52601d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Oct 2018 23:55:43 +1100 Subject: [PATCH 75/85] Write some tests for the email pusher (#4095) --- .travis.yml | 11 ++- changelog.d/4095.bugfix | 1 + synapse/push/emailpusher.py | 5 +- synapse/push/mailer.py | 10 +-- synapse/server.py | 5 ++ tests/push/__init__.py | 0 tests/push/test_email.py | 148 ++++++++++++++++++++++++++++++++++++ tests/server.py | 4 +- tests/test_mau.py | 2 +- tests/unittest.py | 9 ++- 10 files changed, 182 insertions(+), 13 deletions(-) create mode 100644 changelog.d/4095.bugfix create mode 100644 tests/push/__init__.py create mode 100644 tests/push/test_email.py diff --git a/.travis.yml b/.travis.yml index fd41841c7..655fab9d8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,6 +23,9 @@ branches: - develop - /^release-v/ +# When running the tox environments that call Twisted Trial, we can pass the -j +# flag to run the tests concurrently. We set this to 2 for CPU bound tests +# (SQLite) and 4 for I/O bound tests (PostgreSQL). matrix: fast_finish: true include: @@ -33,10 +36,10 @@ matrix: env: TOX_ENV="pep8,check_isort" - python: 2.7 - env: TOX_ENV=py27 + env: TOX_ENV=py27 TRIAL_FLAGS="-j 2" - python: 2.7 - env: TOX_ENV=py27-old + env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2" - python: 2.7 env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4" @@ -44,10 +47,10 @@ matrix: - postgresql - python: 3.5 - env: TOX_ENV=py35 + env: TOX_ENV=py35 TRIAL_FLAGS="-j 2" - python: 3.6 - env: TOX_ENV=py36 + env: TOX_ENV=py36 TRIAL_FLAGS="-j 2" - python: 3.6 env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4" diff --git a/changelog.d/4095.bugfix b/changelog.d/4095.bugfix new file mode 100644 index 000000000..76ee7148c --- /dev/null +++ b/changelog.d/4095.bugfix @@ -0,0 +1 @@ +Fix exceptions when using the email mailer on Python 3. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index f36912425..50e1007d8 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -85,7 +85,10 @@ class EmailPusher(object): self.timed_call = None def on_new_notifications(self, min_stream_ordering, max_stream_ordering): - self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + if self.max_stream_ordering: + self.max_stream_ordering = max(max_stream_ordering, self.max_stream_ordering) + else: + self.max_stream_ordering = max_stream_ordering self._start_processing() def on_new_receipts(self, min_stream_id, max_stream_id): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 16fb5e847..ebcb93bfc 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -26,7 +26,6 @@ import bleach import jinja2 from twisted.internet import defer -from twisted.mail.smtp import sendmail from synapse.api.constants import EventTypes from synapse.api.errors import StoreError @@ -85,6 +84,7 @@ class Mailer(object): self.notif_template_html = notif_template_html self.notif_template_text = notif_template_text + self.sendmail = self.hs.get_sendmail() self.store = self.hs.get_datastore() self.macaroon_gen = self.hs.get_macaroon_generator() self.state_handler = self.hs.get_state_handler() @@ -191,11 +191,11 @@ class Mailer(object): multipart_msg.attach(html_part) logger.info("Sending email push notification to %s" % email_address) - # logger.debug(html_text) - yield sendmail( + yield self.sendmail( self.hs.config.email_smtp_host, - raw_from, raw_to, multipart_msg.as_string(), + raw_from, raw_to, multipart_msg.as_string().encode('utf8'), + reactor=self.hs.get_reactor(), port=self.hs.config.email_smtp_port, requireAuthentication=self.hs.config.email_smtp_user is not None, username=self.hs.config.email_smtp_user, @@ -333,7 +333,7 @@ class Mailer(object): notif_events, user_id, reason): if len(notifs_by_room) == 1: # Only one room has new stuff - room_id = notifs_by_room.keys()[0] + room_id = list(notifs_by_room.keys())[0] # If the room has some kind of name, use it, but we don't # want the generated-from-names one here otherwise we'll diff --git a/synapse/server.py b/synapse/server.py index cf6b872cb..9985687b9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,6 +23,7 @@ import abc import logging from twisted.enterprise import adbapi +from twisted.mail.smtp import sendmail from twisted.web.client import BrowserLikePolicyForHTTPS from synapse.api.auth import Auth @@ -174,6 +175,7 @@ class HomeServer(object): 'message_handler', 'pagination_handler', 'room_context_handler', + 'sendmail', ] # This is overridden in derived application classes @@ -269,6 +271,9 @@ class HomeServer(object): def build_room_creation_handler(self): return RoomCreationHandler(self) + def build_sendmail(self): + return sendmail + def build_state_handler(self): return StateHandler(self) diff --git a/tests/push/__init__.py b/tests/push/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/push/test_email.py b/tests/push/test_email.py new file mode 100644 index 000000000..50ee6910d --- /dev/null +++ b/tests/push/test_email.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pkg_resources + +from twisted.internet.defer import Deferred + +from synapse.rest.client.v1 import admin, login, room + +from tests.unittest import HomeserverTestCase + +try: + from synapse.push.mailer import load_jinja2_templates +except Exception: + load_jinja2_templates = None + + +class EmailPusherTests(HomeserverTestCase): + + skip = "No Jinja installed" if not load_jinja2_templates else None + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def make_homeserver(self, reactor, clock): + + # List[Tuple[Deferred, args, kwargs]] + self.email_attempts = [] + + def sendmail(*args, **kwargs): + d = Deferred() + self.email_attempts.append((d, args, kwargs)) + return d + + config = self.default_config() + config.email_enable_notifs = True + config.start_pushers = True + + config.email_template_dir = os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ) + config.email_notif_template_html = "notif_mail.html" + config.email_notif_template_text = "notif_mail.txt" + config.email_smtp_host = "127.0.0.1" + config.email_smtp_port = 20 + config.require_transport_security = False + config.email_smtp_user = None + config.email_app_name = "Matrix" + config.email_notif_from = "test@example.com" + + hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + + return hs + + def test_sends_email(self): + + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher + user_tuple = self.get_success( + self.hs.get_datastore().get_user_by_access_token(access_token) + ) + token_id = user_tuple["token_id"] + + self.get_success( + self.hs.get_pusherpool().add_pusher( + user_id=user_id, + access_token=token_id, + kind="email", + app_id="m.email", + app_display_name="Email Notifications", + device_display_name="a@example.com", + pushkey="a@example.com", + lang=None, + data={}, + ) + ) + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # Invite the other person + self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=other_access_token) + self.helper.send(room, body="There!", tok=other_access_token) + + # Get the stream ordering before it gets sent + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + last_stream_ordering = pushers[0]["last_stream_ordering"] + + # Advance time a bit, so the pusher will register something has happened + self.pump(100) + + # It hasn't succeeded yet, so the stream ordering shouldn't have moved + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"]) + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # Make the email succeed + self.email_attempts[0][0].callback(True) + self.pump() + + # One email was attempted to be sent + self.assertEqual(len(self.email_attempts), 1) + + # The stream ordering has increased + pushers = self.get_success( + self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + ) + self.assertEqual(len(pushers), 1) + self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering) diff --git a/tests/server.py b/tests/server.py index 7bee58dff..819c85444 100644 --- a/tests/server.py +++ b/tests/server.py @@ -125,7 +125,9 @@ def make_request(method, path, content=b"", access_token=None, request=SynapseRe req.content = BytesIO(content) if access_token: - req.requestHeaders.addRawHeader(b"Authorization", b"Bearer " + access_token) + req.requestHeaders.addRawHeader( + b"Authorization", b"Bearer " + access_token.encode('ascii') + ) if content: req.requestHeaders.addRawHeader(b"Content-Type", b"application/json") diff --git a/tests/test_mau.py b/tests/test_mau.py index bdbacb844..5d387851c 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -207,7 +207,7 @@ class TestMauLimit(unittest.TestCase): def do_sync_for_user(self, token): request, channel = make_request( - "GET", "/sync", access_token=token.encode('ascii') + "GET", "/sync", access_token=token ) render(request, self.resource, self.reactor) diff --git a/tests/unittest.py b/tests/unittest.py index a59291cc6..4d40bdb6a 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -146,6 +146,13 @@ def DEBUG(target): return target +def INFO(target): + """A decorator to set the .loglevel attribute to logging.INFO. + Can apply to either a TestCase or an individual test method.""" + target.loglevel = logging.INFO + return target + + class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. @@ -373,5 +380,5 @@ class HomeserverTestCase(TestCase): self.render(request) self.assertEqual(channel.code, 200) - access_token = channel.json_body["access_token"].encode('ascii') + access_token = channel.json_body["access_token"] return access_token From 2e223a8c22ef7f65aa42fd149f178a842b60e3c7 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:24:59 +1100 Subject: [PATCH 76/85] Remove the unused /pull federation API (#4118) --- changelog.d/4118.removal | 1 + synapse/federation/federation_server.py | 5 ----- synapse/federation/transport/server.py | 9 --------- 3 files changed, 1 insertion(+), 14 deletions(-) create mode 100644 changelog.d/4118.removal diff --git a/changelog.d/4118.removal b/changelog.d/4118.removal new file mode 100644 index 000000000..6fb1d67b4 --- /dev/null +++ b/changelog.d/4118.removal @@ -0,0 +1 @@ +The obsolete and non-functional /pull federation endpoint has been removed. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 0f9302a6a..fa2cc550e 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -323,11 +323,6 @@ class FederationServer(FederationBase): else: defer.returnValue((404, "")) - @defer.inlineCallbacks - @log_function - def on_pull_request(self, origin, versions): - raise NotImplementedError("Pull transactions not implemented") - @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 7288d4907..3553f418f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -362,14 +362,6 @@ class FederationSendServlet(BaseFederationServlet): defer.returnValue((code, response)) -class FederationPullServlet(BaseFederationServlet): - PATH = "/pull/" - - # This is for when someone asks us for everything since version X - def on_GET(self, origin, content, query): - return self.handler.on_pull_request(query["origin"][0], query["v"]) - - class FederationEventServlet(BaseFederationServlet): PATH = "/event/(?P[^/]*)/" @@ -1261,7 +1253,6 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, - FederationPullServlet, FederationEventServlet, FederationStateServlet, FederationStateIdsServlet, From 3bade14ec0aa7e56c84d30241bd86a177f0699d6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 04:33:41 +1100 Subject: [PATCH 77/85] Fix search 500ing (#4122) --- changelog.d/4122.bugfix | 1 + synapse/handlers/search.py | 8 ++- tests/rest/client/v1/test_rooms.py | 106 ++++++++++++++++++++++++++++- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 changelog.d/4122.bugfix diff --git a/changelog.d/4122.bugfix b/changelog.d/4122.bugfix new file mode 100644 index 000000000..66dcfb18b --- /dev/null +++ b/changelog.d/4122.bugfix @@ -0,0 +1 @@ +Searches that request profile info now no longer fail with a 500. diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 0c1d52fd1..80e7b15de 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -24,6 +24,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import serialize_event +from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client from ._base import BaseHandler @@ -324,9 +325,12 @@ class SearchHandler(BaseHandler): else: last_event_id = event.event_id + state_filter = StateFilter.from_types( + [(EventTypes.Member, sender) for sender in senders] + ) + state = yield self.store.get_state_for_event( - last_event_id, - types=[(EventTypes.Member, sender) for sender in senders] + last_event_id, state_filter ) res["profile_info"] = { diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 359f7777f..a824be9a6 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -23,7 +23,7 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer from synapse.api.constants import Membership -from synapse.rest.client.v1 import room +from synapse.rest.client.v1 import admin, login, room from tests import unittest @@ -799,3 +799,107 @@ class RoomMessageListTestCase(RoomBase): self.assertEquals(token, channel.json_body['start']) self.assertTrue("chunk" in channel.json_body) self.assertTrue("end" in channel.json_body) + + +class RoomSearchTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + user_id = True + hijack_auth = False + + def prepare(self, reactor, clock, hs): + + # Register the user who does the searching + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register the user who sends the message + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + # Invite the other person + self.helper.invite( + room=self.room, + src=self.user_id, + tok=self.access_token, + targ=self.other_user_id, + ) + + # The other user joins + self.helper.join( + room=self.room, user=self.other_user_id, tok=self.other_access_token + ) + + def test_finds_message(self): + """ + The search functionality will search for content in messages if asked to + do so. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": {"keys": ["content.body"], "search_term": "Hi"} + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # No context was requested, so we should get none. + self.assertEqual(results["results"][0]["context"], {}) + + def test_include_context(self): + """ + When event_context includes include_profile, profile information will be + included in the search response. + """ + # The other user sends some messages + self.helper.send(self.room, body="Hi!", tok=self.other_access_token) + self.helper.send(self.room, body="There!", tok=self.other_access_token) + + request, channel = self.make_request( + "POST", + "/search?access_token=%s" % (self.access_token,), + { + "search_categories": { + "room_events": { + "keys": ["content.body"], + "search_term": "Hi", + "event_context": {"include_profile": True}, + } + } + }, + ) + self.render(request) + + # Check we get the results we expect -- one search result, of the sent + # messages + self.assertEqual(channel.code, 200) + results = channel.json_body["search_categories"]["room_events"] + self.assertEqual(results["count"], 1) + self.assertEqual(results["results"][0]["result"]["content"]["body"], "Hi!") + + # We should get context info, like the two users, and the display names. + context = results["results"][0]["context"] + self.assertEqual(len(context["profile_info"].keys()), 2) + self.assertEqual( + context["profile_info"][self.other_user_id]["displayname"], "otheruser" + ) From 0f6ec6d1aedc88a2057f50b77ce9d6a405177096 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 30 Oct 2018 21:00:31 +0000 Subject: [PATCH 78/85] Attempt to fix tox installs It seems that, at some point, the ability to run tox on old servers (with old setuptools) got broken - and it was only working on our Jenkins instance by dint of reusing the tox environments. Let's try to get tox to do the right thing, and remove the guff from jenkins/prepare_synapse.sh. (There is a separate question about whether the jenkins builds should be using tox to prepare the virtualenv at all here, but that is somewhat orthogonal). --- jenkins/prepare_synapse.sh | 19 ------------------- tox.ini | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/jenkins/prepare_synapse.sh b/jenkins/prepare_synapse.sh index d95ca846c..016afb8ba 100755 --- a/jenkins/prepare_synapse.sh +++ b/jenkins/prepare_synapse.sh @@ -14,22 +14,3 @@ fi # set up the virtualenv tox -e py27 --notest -v - -TOX_BIN=$TOX_DIR/py27/bin - -# cryptography 2.2 requires setuptools >= 18.5. -# -# older versions of virtualenv (?) give us a virtualenv with the same version -# of setuptools as is installed on the system python (and tox runs virtualenv -# under python3, so we get the version of setuptools that is installed on that). -# -# anyway, make sure that we have a recent enough setuptools. -$TOX_BIN/pip install 'setuptools>=18.5' - -# we also need a semi-recent version of pip, because old ones fail to install -# the "enum34" dependency of cryptography. -$TOX_BIN/pip install 'pip>=10' - -{ python synapse/python_dependencies.py - echo lxml -} | xargs $TOX_BIN/pip install diff --git a/tox.ini b/tox.ini index 9de5a5704..920211bf5 100644 --- a/tox.ini +++ b/tox.ini @@ -11,6 +11,20 @@ deps = # needed by some of the tests lxml + # cyptography 2.2 requires setuptools >= 18.5 + # + # older versions of virtualenv (?) give us a virtualenv with the same + # version of setuptools as is installed on the system python (and tox runs + # virtualenv under python3, so we get the version of setuptools that is + # installed on that). + # + # anyway, make sure that we have a recent enough setuptools. + setuptools>=18.5 + + # we also need a semi-recent version of pip, because old ones fail to + # install the "enum34" dependency of cryptography. + pip>=10 + setenv = PYTHONDONTWRITEBYTECODE = no_byte_code From a2d8bff0dc430f9e0a980535dd4330ff420118ee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 30 Oct 2018 21:21:05 +0000 Subject: [PATCH 79/85] changelog --- changelog.d/4124.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/4124.misc diff --git a/changelog.d/4124.misc b/changelog.d/4124.misc new file mode 100644 index 000000000..28f438b9b --- /dev/null +++ b/changelog.d/4124.misc @@ -0,0 +1 @@ +Fix `tox` failure on old systems From f79f45448527f22f3813e38233521a5e13e9223e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 22:29:02 +1100 Subject: [PATCH 80/85] Remove deprecated v1 key exchange endpoint (#4119) --- changelog.d/4119.removal | 1 + synapse/api/urls.py | 1 - synapse/app/homeserver.py | 7 +- synapse/rest/key/v1/__init__.py | 14 ---- synapse/rest/key/v1/server_key_resource.py | 92 ---------------------- 5 files changed, 2 insertions(+), 113 deletions(-) create mode 100644 changelog.d/4119.removal delete mode 100644 synapse/rest/key/v1/__init__.py delete mode 100644 synapse/rest/key/v1/server_key_resource.py diff --git a/changelog.d/4119.removal b/changelog.d/4119.removal new file mode 100644 index 000000000..81383ece6 --- /dev/null +++ b/changelog.d/4119.removal @@ -0,0 +1 @@ +The deprecated v1 key exchange endpoints have been removed. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 6d9f1ca0e..f78695b65 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -28,7 +28,6 @@ FEDERATION_PREFIX = "/_matrix/federation/v1" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" -SERVER_KEY_PREFIX = "/_matrix/key/v1" SERVER_KEY_V2_PREFIX = "/_matrix/key/v2" MEDIA_PREFIX = "/_matrix/media/r0" LEGACY_MEDIA_PREFIX = "/_matrix/media/v1" diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 593e1e75d..415374a2c 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -37,7 +37,6 @@ from synapse.api.urls import ( FEDERATION_PREFIX, LEGACY_MEDIA_PREFIX, MEDIA_PREFIX, - SERVER_KEY_PREFIX, SERVER_KEY_V2_PREFIX, STATIC_PREFIX, WEB_CLIENT_PREFIX, @@ -59,7 +58,6 @@ from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, check_requirem from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource -from synapse.rest.key.v1.server_key_resource import LocalKey from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer @@ -236,10 +234,7 @@ class SynapseHomeServer(HomeServer): ) if name in ["keys", "federation"]: - resources.update({ - SERVER_KEY_PREFIX: LocalKey(self), - SERVER_KEY_V2_PREFIX: KeyApiV2Resource(self), - }) + resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self) diff --git a/synapse/rest/key/v1/__init__.py b/synapse/rest/key/v1/__init__.py deleted file mode 100644 index fe0ac3f8e..000000000 --- a/synapse/rest/key/v1/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/synapse/rest/key/v1/server_key_resource.py b/synapse/rest/key/v1/server_key_resource.py deleted file mode 100644 index 38eb2ee23..000000000 --- a/synapse/rest/key/v1/server_key_resource.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging - -from canonicaljson import encode_canonical_json -from signedjson.sign import sign_json -from unpaddedbase64 import encode_base64 - -from OpenSSL import crypto -from twisted.web.resource import Resource - -from synapse.http.server import respond_with_json_bytes - -logger = logging.getLogger(__name__) - - -class LocalKey(Resource): - """HTTP resource containing encoding the TLS X.509 certificate and NACL - signature verification keys for this server:: - - GET /key HTTP/1.1 - - HTTP/1.1 200 OK - Content-Type: application/json - { - "server_name": "this.server.example.com" - "verify_keys": { - "algorithm:version": # base64 encoded NACL verification key. - }, - "tls_certificate": # base64 ASN.1 DER encoded X.509 tls cert. - "signatures": { - "this.server.example.com": { - "algorithm:version": # NACL signature for this server. - } - } - } - """ - - def __init__(self, hs): - self.response_body = encode_canonical_json( - self.response_json_object(hs.config) - ) - Resource.__init__(self) - - @staticmethod - def response_json_object(server_config): - verify_keys = {} - for key in server_config.signing_key: - verify_key_bytes = key.verify_key.encode() - key_id = "%s:%s" % (key.alg, key.version) - verify_keys[key_id] = encode_base64(verify_key_bytes) - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, - server_config.tls_certificate - ) - json_object = { - u"server_name": server_config.server_name, - u"verify_keys": verify_keys, - u"tls_certificate": encode_base64(x509_certificate_bytes) - } - for key in server_config.signing_key: - json_object = sign_json( - json_object, - server_config.server_name, - key, - ) - - return json_object - - def render_GET(self, request): - return respond_with_json_bytes( - request, 200, self.response_body, - ) - - def getChild(self, name, request): - if name == b'': - return self From 916efc824950f924c3f7bced09b9cd5759b1532e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Oct 2018 23:14:39 +1100 Subject: [PATCH 81/85] Remove fetching keys via the deprecated v1 kex method (#4120) --- changelog.d/4120.removal | 1 + synapse/crypto/keyclient.py | 8 ++- synapse/crypto/keyring.py | 110 +++--------------------------------- 3 files changed, 13 insertions(+), 106 deletions(-) create mode 100644 changelog.d/4120.removal diff --git a/changelog.d/4120.removal b/changelog.d/4120.removal new file mode 100644 index 000000000..a7a567098 --- /dev/null +++ b/changelog.d/4120.removal @@ -0,0 +1 @@ +Synapse will no longer fetch keys using the fallback deprecated v1 key exchange method and will now always use v2. diff --git a/synapse/crypto/keyclient.py b/synapse/crypto/keyclient.py index 080c81f14..d40e4b859 100644 --- a/synapse/crypto/keyclient.py +++ b/synapse/crypto/keyclient.py @@ -15,6 +15,8 @@ import logging +from six.moves import urllib + from canonicaljson import json from twisted.internet import defer, reactor @@ -28,15 +30,15 @@ from synapse.util import logcontext logger = logging.getLogger(__name__) -KEY_API_V1 = b"/_matrix/key/v1/" +KEY_API_V2 = "/_matrix/key/v2/server/%s" @defer.inlineCallbacks -def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1): +def fetch_server_key(server_name, tls_client_options_factory, key_id): """Fetch the keys for a remote server.""" factory = SynapseKeyClientFactory() - factory.path = path + factory.path = KEY_API_V2 % (urllib.parse.quote(key_id), ) factory.host = server_name endpoint = matrix_federation_endpoint( reactor, server_name, tls_client_options_factory, timeout=30 diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d89f94c21..515ebbc14 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2017 New Vector Ltd. +# Copyright 2017, 2018 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +18,6 @@ import hashlib import logging from collections import namedtuple -from six.moves import urllib - from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -395,32 +393,13 @@ class Keyring(object): @defer.inlineCallbacks def get_keys_from_server(self, server_name_and_key_ids): - @defer.inlineCallbacks - def get_key(server_name, key_ids): - keys = None - try: - keys = yield self.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except Exception as e: - logger.info( - "Unable to get key %r for %r directly: %s %s", - key_ids, server_name, - type(e).__name__, str(e), - ) - - if not keys: - keys = yield self.get_server_verify_key_v1_direct( - server_name, key_ids - ) - - keys = {server_name: keys} - - defer.returnValue(keys) - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( [ - run_in_background(get_key, server_name, key_ids) + run_in_background( + self.get_server_verify_key_v2_direct, + server_name, + key_ids, + ) for server_name, key_ids in server_name_and_key_ids ], consumeErrors=True, @@ -525,10 +504,7 @@ class Keyring(object): continue (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory, - path=("/_matrix/key/v2/server/%s" % ( - urllib.parse.quote(requested_key_id), - )).encode("ascii"), + server_name, self.hs.tls_client_options_factory, requested_key_id ) if (u"signatures" not in response @@ -657,78 +633,6 @@ class Keyring(object): defer.returnValue(results) - @defer.inlineCallbacks - def get_server_verify_key_v1_direct(self, server_name, key_ids): - """Finds a verification key for the server with one of the key ids. - Args: - server_name (str): The name of the server to fetch a key for. - keys_ids (list of str): The key_ids to check for. - """ - - # Try to fetch the key from the remote server. - - (response, tls_certificate) = yield fetch_server_key( - server_name, self.hs.tls_client_options_factory - ) - - # Check the response. - - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, tls_certificate - ) - - if ("signatures" not in response - or server_name not in response["signatures"]): - raise KeyLookupError("Key response not signed by remote server") - - if "tls_certificate" not in response: - raise KeyLookupError("Key response missing TLS certificate") - - tls_certificate_b64 = response["tls_certificate"] - - if encode_base64(x509_certificate_bytes) != tls_certificate_b64: - raise KeyLookupError("TLS certificate doesn't match") - - # Cache the result in the datastore. - - time_now_ms = self.clock.time_msec() - - verify_keys = {} - for key_id, key_base64 in response["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.time_added = time_now_ms - verify_keys[key_id] = verify_key - - for key_id in response["signatures"][server_name]: - if key_id not in response["verify_keys"]: - raise KeyLookupError( - "Key response must include verification keys for all" - " signatures" - ) - if key_id in verify_keys: - verify_signed_json( - response, - server_name, - verify_keys[key_id] - ) - - yield self.store.store_server_certificate( - server_name, - server_name, - time_now_ms, - tls_certificate, - ) - - yield self.store_keys( - server_name=server_name, - from_server=server_name, - verify_keys=verify_keys, - ) - - defer.returnValue(verify_keys) - def store_keys(self, server_name, from_server, verify_keys): """Store a collection of verify keys for a given server Args: From e3758c8c929be11fe38cebb2b4f7d43185f80197 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Sch=C3=BCrmann?= Date: Wed, 31 Oct 2018 15:46:47 +0100 Subject: [PATCH 82/85] Fix typo in docker-compose.yml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Schürmann --- contrib/docker/docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/docker/docker-compose.yml b/contrib/docker/docker-compose.yml index 3a8dfbae3..b1f6fcb7d 100644 --- a/contrib/docker/docker-compose.yml +++ b/contrib/docker/docker-compose.yml @@ -47,4 +47,4 @@ services: # You may store the database tables in a local folder.. - ./schemas:/var/lib/postgresql/data # .. or store them on some high performance storage for better results - # - /path/to/ssd/storage:/var/lib/postfesql/data + # - /path/to/ssd/storage:/var/lib/postgresql/data From 9b827c40ca71510390c92472f7ec5cfcff9e69b2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 31 Oct 2018 15:42:23 +0000 Subject: [PATCH 83/85] Log some bits about event creation (#4121) I found these helpful in debugging my room upgrade tests. --- changelog.d/4121.misc | 1 + synapse/handlers/message.py | 3 +++ synapse/handlers/room.py | 4 ++++ 3 files changed, 8 insertions(+) create mode 100644 changelog.d/4121.misc diff --git a/changelog.d/4121.misc b/changelog.d/4121.misc new file mode 100644 index 000000000..9c29d80c3 --- /dev/null +++ b/changelog.d/4121.misc @@ -0,0 +1 @@ +Log some bits about room creation diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 969e588e7..a7cd779b0 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -427,6 +427,9 @@ class EventCreationHandler(object): if event.is_state(): prev_state = yield self.deduplicate_state_event(event, context) + logger.info( + "Not bothering to persist duplicate state event %s", event.event_id, + ) if prev_state is not None: defer.returnValue(prev_state) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1..fe960342b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -104,6 +104,8 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=r["is_public"], ) + logger.info("Creating new room %s to replace %s", new_room_id, old_room_id) + # we create and auth the tombstone event before properly creating the new # room, to check our user has perms in the old room. tombstone_event, tombstone_context = ( @@ -522,6 +524,7 @@ class RoomCreationHandler(BaseHandler): @defer.inlineCallbacks def send(etype, content, **kwargs): event = create(etype, content, **kwargs) + logger.info("Sending %s in new room", etype) yield self.event_creation_handler.create_and_send_nonmember_event( creator, event, @@ -544,6 +547,7 @@ class RoomCreationHandler(BaseHandler): content=creation_content, ) + logger.info("Sending %s in new room", EventTypes.Member) yield self.room_member_handler.update_membership( creator, creator.user, From 94c7fadc98542d582ff67c5ac788081c0d836e6b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Oct 2018 15:11:35 +0100 Subject: [PATCH 84/85] Attempt to move room aliases on room upgrades --- changelog.d/4101.feature | 1 + synapse/handlers/directory.py | 34 ++++++++-- synapse/handlers/room.py | 121 +++++++++++++++++++++++++++++++--- 3 files changed, 142 insertions(+), 14 deletions(-) create mode 100644 changelog.d/4101.feature diff --git a/changelog.d/4101.feature b/changelog.d/4101.feature new file mode 100644 index 000000000..a3f7dbdcd --- /dev/null +++ b/changelog.d/4101.feature @@ -0,0 +1 @@ +Support for replacing rooms with new ones diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 7d67bf803..0699731c1 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -138,9 +138,30 @@ class DirectoryHandler(BaseHandler): ) @defer.inlineCallbacks - def delete_association(self, requester, room_alias): - # association deletion for human users + def delete_association(self, requester, room_alias, send_event=True): + """Remove an alias from the directory + (this is only meant for human users; AS users should call + delete_appservice_association) + + Args: + requester (Requester): + room_alias (RoomAlias): + send_event (bool): Whether to send an updated m.room.aliases event. + Note that, if we delete the canonical alias, we will always attempt + to send an m.room.canonical_alias event + + Returns: + Deferred[unicode]: room id that the alias used to point to + + Raises: + NotFoundError: if the alias doesn't exist + + AuthError: if the user doesn't have perms to delete the alias (ie, the user + is neither the creator of the alias, nor a server admin. + + SynapseError: if the alias belongs to an AS + """ user_id = requester.user.to_string() try: @@ -168,10 +189,11 @@ class DirectoryHandler(BaseHandler): room_id = yield self._delete_association(room_alias) try: - yield self.send_room_alias_update_event( - requester, - room_id - ) + if send_event: + yield self.send_room_alias_update_event( + requester, + room_id + ) yield self._update_canonical_alias( requester, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 1d9417ff1..76811050a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -136,10 +136,15 @@ class RoomCreationHandler(BaseHandler): requester, tombstone_event, tombstone_context, ) - # and finally, shut down the PLs in the old room, and update them in the new - # room. old_room_state = yield tombstone_context.get_current_state_ids(self.store) + # update any aliases + yield self._move_aliases_to_new_room( + requester, old_room_id, new_room_id, old_room_state, + ) + + # and finally, shut down the PLs in the old room, and update them in the new + # room. yield self._update_upgraded_room_pls( requester, old_room_id, new_room_id, old_room_state, ) @@ -245,11 +250,6 @@ class RoomCreationHandler(BaseHandler): if not self.spam_checker.user_may_create_room(user_id): raise SynapseError(403, "You are not permitted to create rooms") - # XXX check alias is free - # canonical_alias = None - - # XXX create association in directory handler - creation_content = { "room_version": new_room_version, "predecessor": { @@ -295,7 +295,112 @@ class RoomCreationHandler(BaseHandler): # XXX invites/joins # XXX 3pid invites - # XXX directory_handler.send_room_alias_update_event + + @defer.inlineCallbacks + def _move_aliases_to_new_room( + self, requester, old_room_id, new_room_id, old_room_state, + ): + directory_handler = self.hs.get_handlers().directory_handler + + aliases = yield self.store.get_aliases_for_room(old_room_id) + + # check to see if we have a canonical alias. + canonical_alias = None + canonical_alias_event_id = old_room_state.get((EventTypes.CanonicalAlias, "")) + if canonical_alias_event_id: + canonical_alias_event = yield self.store.get_event(canonical_alias_event_id) + if canonical_alias_event: + canonical_alias = canonical_alias_event.content.get("alias", "") + + # first we try to remove the aliases from the old room (we suppress sending + # the room_aliases event until the end). + # + # Note that we'll only be able to remove aliases that (a) aren't owned by an AS, + # and (b) unless the user is a server admin, which the user created. + # + # This is probably correct - given we don't allow such aliases to be deleted + # normally, it would be odd to allow it in the case of doing a room upgrade - + # but it makes the upgrade less effective, and you have to wonder why a room + # admin can't remove aliases that point to that room anyway. + # (cf https://github.com/matrix-org/synapse/issues/2360) + # + removed_aliases = [] + for alias_str in aliases: + alias = RoomAlias.from_string(alias_str) + try: + yield directory_handler.delete_association( + requester, alias, send_event=False, + ) + except SynapseError as e: + logger.warning( + "Unable to remove alias %s from old room: %s", + alias, e, + ) + else: + removed_aliases.append(alias_str) + + # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest + # of this. + if not removed_aliases: + return + + try: + # this can fail if, for some reason, our user doesn't have perms to send + # m.room.aliases events in the old room (note that we've already checked that + # they have perms to send a tombstone event, so that's not terribly likely). + # + # If that happens, it's regrettable, but we should carry on: it's the same + # as when you remove an alias from the directory normally - it just means that + # the aliases event gets out of sync with the directory + # (cf https://github.com/vector-im/riot-web/issues/2369) + yield directory_handler.send_room_alias_update_event( + requester, old_room_id, + ) + except AuthError as e: + logger.warning( + "Failed to send updated alias event on old room: %s", e, + ) + + # we can now add any aliases we successfully removed to the new room. + for alias in removed_aliases: + try: + yield directory_handler.create_association( + requester, RoomAlias.from_string(alias), + new_room_id, servers=(self.hs.hostname, ), + send_event=False, + ) + logger.info("Moved alias %s to new room", alias) + except SynapseError as e: + # I'm not really expecting this to happen, but it could if the spam + # checking module decides it shouldn't, or similar. + logger.error( + "Error adding alias %s to new room: %s", + alias, e, + ) + + try: + if canonical_alias and (canonical_alias in removed_aliases): + yield self.event_creation_handler.create_and_send_nonmember_event( + requester, + { + "type": EventTypes.CanonicalAlias, + "state_key": "", + "room_id": new_room_id, + "sender": requester.user.to_string(), + "content": {"alias": canonical_alias, }, + }, + ratelimit=False + ) + + yield directory_handler.send_room_alias_update_event( + requester, new_room_id, + ) + except SynapseError as e: + # again I'm not really expecting this to fail, but if it does, I'd rather + # we returned the new room to the client at this point. + logger.error( + "Unable to send updated alias events in new room: %s", e, + ) @defer.inlineCallbacks def create_room(self, requester, config, ratelimit=True, From 0f8591a5a8695aa176736c651a361c40cf228b6d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 29 Oct 2018 15:20:19 +0000 Subject: [PATCH 85/85] Avoid else clause on exception for clarity --- synapse/handlers/room.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 76811050a..9ff465671 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -331,13 +331,12 @@ class RoomCreationHandler(BaseHandler): yield directory_handler.delete_association( requester, alias, send_event=False, ) + removed_aliases.append(alias_str) except SynapseError as e: logger.warning( "Unable to remove alias %s from old room: %s", alias, e, ) - else: - removed_aliases.append(alias_str) # if we didn't find any aliases, or couldn't remove anyway, we can skip the rest # of this.