From 9dc3293e0b3a5cbf6fcc4a0cef7386b531190882 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Jun 2022 07:43:35 -0400 Subject: [PATCH 01/85] Consolidate the logic of delete_device/delete_devices. (#12970) By always using delete_devices and sometimes passing a list with a single device ID. Previously these methods had gotten out of sync with each other and it seems there's little benefit to the single-device variant. --- changelog.d/12970.misc | 1 + synapse/handlers/device.py | 33 ++--------------------- synapse/module_api/__init__.py | 2 +- synapse/rest/admin/devices.py | 2 +- synapse/rest/client/devices.py | 4 ++- synapse/rest/client/logout.py | 4 +-- synapse/storage/databases/main/devices.py | 10 ------- tests/handlers/test_device.py | 4 +-- 8 files changed, 12 insertions(+), 48 deletions(-) create mode 100644 changelog.d/12970.misc diff --git a/changelog.d/12970.misc b/changelog.d/12970.misc new file mode 100644 index 000000000..8f874aa07 --- /dev/null +++ b/changelog.d/12970.misc @@ -0,0 +1 @@ +Remove the `delete_device` method and always call `delete_devices`. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index a0cbeedc3..b79c55170 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -397,35 +397,6 @@ class DeviceHandler(DeviceWorkerHandler): for user_id, user_devices in devices.items(): await self.delete_devices(user_id, user_devices) - @trace - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete the given device - - Args: - user_id: The user to delete the device from. - device_id: The device to delete. - """ - - try: - await self.store.delete_device(user_id, device_id) - except errors.StoreError as e: - if e.code == 404: - # no match - set_tag("error", True) - log_kv( - {"reason": "User doesn't have device id.", "device_id": device_id} - ) - else: - raise - - await self._auth_handler.delete_access_tokens_for_user( - user_id, device_id=device_id - ) - - await self.store.delete_e2e_keys_by_device(user_id=user_id, device_id=device_id) - - await self.notify_device_update(user_id, [device_id]) - @trace async def delete_all_devices_for_user( self, user_id: str, except_device_id: Optional[str] = None @@ -591,7 +562,7 @@ class DeviceHandler(DeviceWorkerHandler): user_id, device_id, device_data ) if old_device_id is not None: - await self.delete_device(user_id, old_device_id) + await self.delete_devices(user_id, [old_device_id]) return device_id async def get_dehydrated_device( @@ -638,7 +609,7 @@ class DeviceHandler(DeviceWorkerHandler): await self.store.update_device(user_id, device_id, old_device["display_name"]) # can't call self.delete_device because that will clobber the # access token so call the storage layer directly - await self.store.delete_device(user_id, old_device_id) + await self.store.delete_devices(user_id, [old_device_id]) await self.store.delete_e2e_keys_by_device( user_id=user_id, device_id=old_device_id ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a8ad575fc..30b2aeffd 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -799,7 +799,7 @@ class ModuleApi: if device_id: # delete the device, which will also delete its access tokens yield defer.ensureDeferred( - self._hs.get_device_handler().delete_device(user_id, device_id) + self._hs.get_device_handler().delete_devices(user_id, [device_id]) ) else: # no associated device. Just delete the access token. diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index cef46ba0d..d93488010 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -80,7 +80,7 @@ class DeviceRestServlet(RestServlet): if u is None: raise NotFoundError("Unknown user") - await self.device_handler.delete_device(target_user.to_string(), device_id) + await self.device_handler.delete_devices(target_user.to_string(), [device_id]) return HTTPStatus.OK, {} async def on_PUT( diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index ad6fd6492..6fab10243 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -147,7 +147,9 @@ class DeviceRestServlet(RestServlet): can_skip_ui_auth=True, ) - await self.device_handler.delete_device(requester.user.to_string(), device_id) + await self.device_handler.delete_devices( + requester.user.to_string(), [device_id] + ) return 200, {} async def on_PUT( diff --git a/synapse/rest/client/logout.py b/synapse/rest/client/logout.py index 193a6951b..23dfa4518 100644 --- a/synapse/rest/client/logout.py +++ b/synapse/rest/client/logout.py @@ -45,8 +45,8 @@ class LogoutRestServlet(RestServlet): access_token = self.auth.get_access_token_from_request(request) await self._auth_handler.delete_access_token(access_token) else: - await self._device_handler.delete_device( - requester.user.to_string(), requester.device_id + await self._device_handler.delete_devices( + requester.user.to_string(), [requester.device_id] ) return 200, {} diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d900064c0..71e7863dd 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1433,16 +1433,6 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ) raise StoreError(500, "Problem storing device.") - async def delete_device(self, user_id: str, device_id: str) -> None: - """Delete a device and its device_inbox. - - Args: - user_id: The ID of the user which owns the device - device_id: The ID of the device to delete - """ - - await self.delete_devices(user_id, [device_id]) - async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: """Deletes several devices. diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 01ea7d2a4..b8b465d35 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -154,7 +154,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): self._record_users() # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check the device was deleted self.get_failure(self.handler.get_device(user1, "abc"), NotFoundError) @@ -179,7 +179,7 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) # delete the device - self.get_success(self.handler.delete_device(user1, "abc")) + self.get_success(self.handler.delete_devices(user1, ["abc"])) # check that the device_inbox was deleted res = self.get_success( From d2fd7f7b5c6adabd1cb323a2107a16bbb3b4506d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Jun 2022 07:44:31 -0400 Subject: [PATCH 02/85] Fix a stale comment in get_room_version_id_txn. (#12969) --- changelog.d/12969.misc | 1 + synapse/storage/databases/main/state.py | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) create mode 100644 changelog.d/12969.misc diff --git a/changelog.d/12969.misc b/changelog.d/12969.misc new file mode 100644 index 000000000..05de7ce83 --- /dev/null +++ b/changelog.d/12969.misc @@ -0,0 +1 @@ +Fix an inaccurate comment. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index bdd00273c..5e6efbd0f 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -127,13 +127,8 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): NotFoundError: if the room is unknown """ - # First we try looking up room version from the database, but for old - # rooms we might not have added the room version to it yet so we fall - # back to previous behaviour and look in current state events. - # # We really should have an entry in the rooms table for every room we - # care about, but let's be a bit paranoid (at least while the background - # update is happening) to avoid breaking existing rooms. + # care about, but let's be a bit paranoid. room_version = self.db_pool.simple_select_one_onecol_txn( txn, table="rooms", From a7e506ddee16919cf543370899c756ec2d6685a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Jun 2022 16:35:56 +0300 Subject: [PATCH 03/85] Reduce amount of state we pull out when attempting to send catchup PDUs. (#12963) * Don't pull out state for catchup * Newsfile * Merge newsfile --- changelog.d/12963.misc | 1 + .../sender/per_destination_queue.py | 31 ++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12963.misc diff --git a/changelog.d/12963.misc b/changelog.d/12963.misc new file mode 100644 index 000000000..d57e1aca6 --- /dev/null +++ b/changelog.d/12963.misc @@ -0,0 +1 @@ +Reduce the amount of state we pull from the DB. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 333ca9a97..41d8b937a 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -37,6 +37,7 @@ from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ReadReceipt from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter +from synapse.visibility import filter_events_for_server if TYPE_CHECKING: import synapse.server @@ -77,6 +78,7 @@ class PerDestinationQueue: ): self._server_name = hs.hostname self._clock = hs.get_clock() + self._storage_controllers = hs.get_storage_controllers() self._store = hs.get_datastores().main self._transaction_manager = transaction_manager self._instance_name = hs.get_instance_name() @@ -442,6 +444,12 @@ class PerDestinationQueue: "This should not happen." % event_ids ) + logger.info( + "Catching up destination %s with %d PDUs", + self._destination, + len(catchup_pdus), + ) + # We send transactions with events from one room only, as its likely # that the remote will have to do additional processing, which may # take some time. It's better to give it small amounts of work @@ -487,19 +495,20 @@ class PerDestinationQueue: ): continue - # Filter out events where the server is not in the room, - # e.g. it may have left/been kicked. *Ideally* we'd pull - # out the kick and send that, but it's a rare edge case - # so we don't bother for now (the server that sent the - # kick should send it out if its online). - hosts = await self._state.get_hosts_in_room_at_events( - p.room_id, [p.event_id] - ) - if self._destination not in hosts: - continue - new_pdus.append(p) + # Filter out events where the server is not in the room, + # e.g. it may have left/been kicked. *Ideally* we'd pull + # out the kick and send that, but it's a rare edge case + # so we don't bother for now (the server that sent the + # kick should send it out if its online). + new_pdus = await filter_events_for_server( + self._storage_controllers, + self._destination, + new_pdus, + redact=False, + ) + # If we've filtered out all the extremities, fall back to # sending the original event. This should ensure that the # server gets at least some of missed events (especially if From f30bcbd84a651de59777b2a749850f6ca56ce3f0 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Jun 2022 15:24:11 +0100 Subject: [PATCH 04/85] Fix Synapse git info missing in version strings (#12973) --- changelog.d/12973.bugfix | 1 + poetry.lock | 10 +++++----- pyproject.toml | 2 +- synapse/__init__.py | 6 +++--- synapse/_scripts/synapse_port_db.py | 7 ++----- synapse/_scripts/update_synapse_database.py | 5 ++--- synapse/app/_base.py | 4 ++-- synapse/app/admin_cmd.py | 5 ++--- synapse/app/generic_worker.py | 5 ++--- synapse/app/homeserver.py | 6 ++---- synapse/config/logger.py | 4 ++-- synapse/federation/transport/server/federation.py | 4 ++-- synapse/metrics/__init__.py | 4 ++-- synapse/rest/admin/__init__.py | 5 ++--- synapse/util/__init__.py | 6 ++++++ 15 files changed, 36 insertions(+), 38 deletions(-) create mode 100644 changelog.d/12973.bugfix diff --git a/changelog.d/12973.bugfix b/changelog.d/12973.bugfix new file mode 100644 index 000000000..1bf45854f --- /dev/null +++ b/changelog.d/12973.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. diff --git a/poetry.lock b/poetry.lock index 7c561e318..8a54a939f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -524,7 +524,7 @@ python-versions = ">=3.7" [[package]] name = "matrix-common" -version = "1.1.0" +version = "1.2.1" description = "Common utilities for Synapse, Sydent and Sygnal" category = "main" optional = false @@ -535,7 +535,7 @@ attrs = "*" importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""} [package.extras] -dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==21.9b0)", "flake8 (==4.0.1)", "isort (==5.9.3)"] +dev = ["tox", "twisted", "aiounittest", "mypy (==0.910)", "black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "build (==0.8.0)", "twine (==4.0.1)"] test = ["tox", "twisted", "aiounittest"] [[package]] @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "539e5326f401472d1ffc8325d53d72e544cd70156b3f43f32f1285c4c131f831" +content-hash = "c1bb4dabba1e87517e25ca7bf778e8082fbc960a51d83819aec3a154110a374f" [metadata.files] attrs = [ @@ -2042,8 +2042,8 @@ markupsafe = [ {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, ] matrix-common = [ - {file = "matrix_common-1.1.0-py3-none-any.whl", hash = "sha256:5d6dfd777503b2f3a031b566e6af25b6e95f9c0818ef57d954c3190fce5eb407"}, - {file = "matrix_common-1.1.0.tar.gz", hash = "sha256:a8238748afc2b37079818367fed5156f355771b07c8ff0a175934f47e0ff3276"}, + {file = "matrix_common-1.2.1-py3-none-any.whl", hash = "sha256:946709c405944a0d4b1d73207b77eb064b6dbfc5d70a69471320b06d8ce98b20"}, + {file = "matrix_common-1.2.1.tar.gz", hash = "sha256:a99dcf02a6bd95b24a5a61b354888a2ac92bf2b4b839c727b8dd9da2cdfa3853"}, ] matrix-synapse-ldap3 = [ {file = "matrix-synapse-ldap3-0.2.0.tar.gz", hash = "sha256:91a0715b43a41ec3033244174fca20846836da98fda711fb01687f7199eecd2e"}, diff --git a/pyproject.toml b/pyproject.toml index ec6e81f25..a6f3169e7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1" cryptography = ">=3.4.7" # ijson 3.1.4 fixes a bug with "." in property names ijson = ">=3.1.4" -matrix-common = "~=1.1.0" +matrix-common = "~=1.2.1" # We need packaging.requirements.Requirement, added in 16.1. packaging = ">=16.1" # At the time of writing, we only use functions from the version `importlib.metadata` diff --git a/synapse/__init__.py b/synapse/__init__.py index 161394175..b1369aca8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -20,8 +20,6 @@ import json import os import sys -from matrix_common.versionstring import get_distribution_version_string - # Check that we're not running on an unsupported Python version. if sys.version_info < (3, 7): print("Synapse requires Python 3.7 or above.") @@ -70,7 +68,9 @@ try: except ImportError: pass -__version__ = get_distribution_version_string("matrix-synapse") +import synapse.util + +__version__ = synapse.util.SYNAPSE_VERSION if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 361b51d2f..c753dfa7c 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -40,7 +40,6 @@ from typing import ( ) import yaml -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import TypedDict from twisted.internet import defer, reactor as reactor_ @@ -84,7 +83,7 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.engines import create_engine from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor -from synapse.util import Clock +from synapse.util import SYNAPSE_VERSION, Clock # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -258,9 +257,7 @@ class MockHomeserver: self.clock = Clock(reactor) self.config = config self.hostname = config.server.server_name - self.version_string = "Synapse/" + get_distribution_version_string( - "matrix-synapse" - ) + self.version_string = SYNAPSE_VERSION def get_clock(self) -> Clock: return self.clock diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index c443522c0..b4aeae6dd 100755 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -19,7 +19,6 @@ import sys from typing import cast import yaml -from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, reactor as reactor_ @@ -28,6 +27,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.server import HomeServer from synapse.storage import DataStore from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION # Cast safety: Twisted does some naughty magic which replaces the # twisted.internet.reactor module with a Reactor instance at runtime. @@ -43,8 +43,7 @@ class MockHomeserver(HomeServer): hostname=config.server.server_name, config=config, reactor=reactor, - version_string="Synapse/" - + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a3446ac6e..84e389a6c 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,7 +37,6 @@ from typing import ( ) from cryptography.utils import CryptographyDeprecationWarning -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import ParamSpec import twisted @@ -68,6 +67,7 @@ from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.types import ISynapseReactor +from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries from synapse.util.daemonize import daemonize_process from synapse.util.gai_resolver import GAIResolver @@ -540,7 +540,7 @@ def setup_sentry(hs: "HomeServer") -> None: sentry_sdk.init( dsn=hs.config.metrics.sentry_dsn, - release=get_distribution_version_string("matrix-synapse"), + release=SYNAPSE_VERSION, ) # We set some default tags that give some context to this instance diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 6fedf681f..561621a28 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -19,8 +19,6 @@ import sys import tempfile from typing import List, Optional -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import defer, task import synapse @@ -43,6 +41,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto from synapse.server import HomeServer from synapse.storage.databases.main.room import RoomWorkerStore from synapse.types import StateMap +from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext logger = logging.getLogger("synapse.app.admin_cmd") @@ -220,7 +219,7 @@ def start(config_options: List[str]) -> None: ss = AdminCmdServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(ss, config, use_worker_options=True) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 89f8998f0..4a987fb75 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -16,8 +16,6 @@ import logging import sys from typing import Dict, List, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet import address from twisted.web.resource import Resource @@ -121,6 +119,7 @@ from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.httpresourcetree import create_resource_tree logger = logging.getLogger("synapse.app.generic_worker") @@ -447,7 +446,7 @@ def start(config_options: List[str]) -> None: hs = GenericWorkerServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{SYNAPSE_VERSION}", ) setup_logging(hs, config, use_worker_options=True) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4c6c0658a..745e70414 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -18,8 +18,6 @@ import os import sys from typing import Dict, Iterable, List -from matrix_common.versionstring import get_distribution_version_string - from twisted.internet.tcp import Port from twisted.web.resource import EncodingResourceWrapper, Resource from twisted.web.server import GzipEncoderFactory @@ -69,7 +67,7 @@ from synapse.rest.synapse.client import build_synapse_client_resource_tree from synapse.rest.well_known import well_known_resource from synapse.server import HomeServer from synapse.storage import DataStore -from synapse.util.check_dependencies import check_requirements +from synapse.util.check_dependencies import VERSION, check_requirements from synapse.util.httpresourcetree import create_resource_tree from synapse.util.module_loader import load_module @@ -371,7 +369,7 @@ def setup(config_options: List[str]) -> SynapseHomeServer: hs = SynapseHomeServer( config.server.server_name, config=config, - version_string="Synapse/" + get_distribution_version_string("matrix-synapse"), + version_string=f"Synapse/{VERSION}", ) synapse.config.logger.setup_logging(hs, config, use_worker_options=False) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 470b8b449..82a5b5fa1 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -22,7 +22,6 @@ from string import Template from typing import TYPE_CHECKING, Any, Dict, Optional import yaml -from matrix_common.versionstring import get_distribution_version_string from zope.interface import implementer from twisted.logger import ( @@ -37,6 +36,7 @@ from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter from synapse.types import JsonDict +from ..util import SYNAPSE_VERSION from ._base import Config, ConfigError if TYPE_CHECKING: @@ -349,7 +349,7 @@ def setup_logging( logging.warning( "Server %s version %s", sys.argv[0], - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, ) logging.info("Server hostname: %s", config.server.server_name) logging.info("Instance name: %s", hs.get_instance_name()) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 7dfb89066..f7884bfbe 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -24,7 +24,6 @@ from typing import ( Union, ) -from matrix_common.versionstring import get_distribution_version_string from typing_extensions import Literal from synapse.api.constants import EduTypes @@ -42,6 +41,7 @@ from synapse.http.servlet import ( parse_strings_from_args, ) from synapse.types import JsonDict +from synapse.util import SYNAPSE_VERSION from synapse.util.ratelimitutils import FederationRateLimiter if TYPE_CHECKING: @@ -622,7 +622,7 @@ class FederationVersionServlet(BaseFederationServlet): { "server": { "name": "Synapse", - "version": get_distribution_version_string("matrix-synapse"), + "version": SYNAPSE_VERSION, } }, ) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index fffd83546..496fce2ec 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -35,7 +35,6 @@ from typing import ( ) import attr -from matrix_common.versionstring import get_distribution_version_string from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Metric from prometheus_client.core import ( REGISTRY, @@ -54,6 +53,7 @@ from synapse.metrics._exposition import ( ) from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager from synapse.metrics._types import Collector +from synapse.util import SYNAPSE_VERSION logger = logging.getLogger(__name__) @@ -419,7 +419,7 @@ build_info = Gauge( ) build_info.labels( " ".join([platform.python_implementation(), platform.python_version()]), - get_distribution_version_string("matrix-synapse"), + SYNAPSE_VERSION, " ".join([platform.system(), platform.release()]), ).set(1) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 1aa08f8d9..fa3266720 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -20,8 +20,6 @@ import platform from http import HTTPStatus from typing import TYPE_CHECKING, Optional, Tuple -from matrix_common.versionstring import get_distribution_version_string - from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer, JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request @@ -88,6 +86,7 @@ from synapse.rest.admin.users import ( WhoisRestServlet, ) from synapse.types import JsonDict, RoomStreamToken +from synapse.util import SYNAPSE_VERSION if TYPE_CHECKING: from synapse.server import HomeServer @@ -100,7 +99,7 @@ class VersionServlet(RestServlet): def __init__(self, hs: "HomeServer"): self.res = { - "server_version": get_distribution_version_string("matrix-synapse"), + "server_version": SYNAPSE_VERSION, "python_version": platform.python_version(), } diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index d8046b755..6323d452e 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -19,6 +19,7 @@ from typing import Any, Callable, Dict, Generator, Optional import attr from frozendict import frozendict +from matrix_common.versionstring import get_distribution_version_string from twisted.internet import defer, task from twisted.internet.defer import Deferred @@ -183,3 +184,8 @@ def log_failure( if not consumeErrors: return failure return None + + +# Version string with git info. Computed here once so that we don't invoke git multiple +# times. +SYNAPSE_VERSION = get_distribution_version_string("matrix-synapse", __file__) From b5a3aecf18740fb699f871c8e1d110d847fea6d3 Mon Sep 17 00:00:00 2001 From: Daniel Aloni <74783603+Danieloni1@users.noreply.github.com> Date: Tue, 7 Jun 2022 17:58:48 +0300 Subject: [PATCH 05/85] Return the same error message from `/login` when password is incorrect and when account doesn't exist. (#12738) --- changelog.d/12738.misc | 1 + synapse/handlers/auth.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12738.misc diff --git a/changelog.d/12738.misc b/changelog.d/12738.misc new file mode 100644 index 000000000..825222347 --- /dev/null +++ b/changelog.d/12738.misc @@ -0,0 +1 @@ +Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. \ No newline at end of file diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index fbafbbee6..6e15028b0 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -81,6 +81,8 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +INVALID_USERNAME_OR_PASSWORD = "Invalid username or password" + def convert_client_dict_legacy_fields_to_identifier( submission: JsonDict, @@ -1215,7 +1217,9 @@ class AuthHandler: await self._failed_login_attempts_ratelimiter.can_do_action( None, (medium, address) ) - raise LoginError(403, "", errcode=Codes.FORBIDDEN) + raise LoginError( + 403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN + ) identifier_dict = {"type": "m.id.user", "user": user_id} @@ -1341,7 +1345,7 @@ class AuthHandler: # We raise a 403 here, but note that if we're doing user-interactive # login, it turns all LoginErrors into a 401 anyway. - raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN) + raise LoginError(403, msg=INVALID_USERNAME_OR_PASSWORD, errcode=Codes.FORBIDDEN) async def check_password_provider_3pid( self, medium: str, address: str, password: str From a10cc5f82480c4905979f753d3734e822a064669 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 7 Jun 2022 17:14:47 +0100 Subject: [PATCH 06/85] Test cancellation at every `await` during request handling (#12674) * Add tests for `/rooms//members` cancellation. * Add tests for `/rooms//state` cancellation. Signed-off-by: Sean Quah --- changelog.d/12674.misc | 1 + tests/http/server/_base.py | 452 +++++++++++++++++++++++++++++++- tests/rest/client/test_rooms.py | 100 +++++++ 3 files changed, 551 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12674.misc diff --git a/changelog.d/12674.misc b/changelog.d/12674.misc new file mode 100644 index 000000000..c8a8f32f0 --- /dev/null +++ b/changelog.d/12674.misc @@ -0,0 +1 @@ +Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index b9f1a381a..57b92beb8 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -12,21 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +import inspect +import itertools +import logging from http import HTTPStatus -from typing import Any, Callable, Optional, Union +from typing import ( + Any, + Callable, + ContextManager, + Dict, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, +) from unittest import mock +from unittest.mock import Mock +from twisted.internet.defer import Deferred from twisted.internet.error import ConnectionDone +from twisted.python.failure import Failure +from twisted.test.proto_helpers import MemoryReactorClock +from twisted.web.server import Site from synapse.http.server import ( HTTP_STATUS_REQUEST_CANCELLED, respond_with_html_bytes, respond_with_json, ) +from synapse.http.site import SynapseRequest +from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.types import JsonDict from tests import unittest -from tests.server import FakeChannel, ThreadedMemoryReactorClock +from tests.server import FakeChannel, ThreadedMemoryReactorClock, make_request +from tests.unittest import logcontext_clean + +logger = logging.getLogger(__name__) + + +T = TypeVar("T") class EndpointCancellationTestHelperMixin(unittest.TestCase): @@ -98,3 +125,424 @@ class EndpointCancellationTestHelperMixin(unittest.TestCase): self.assertEqual(code, expected_code) self.assertEqual(request.code, expected_code) self.assertEqual(body, expected_body) + + +@logcontext_clean +def make_request_with_cancellation_test( + test_name: str, + reactor: MemoryReactorClock, + site: Site, + method: str, + path: str, + content: Union[bytes, str, JsonDict] = b"", +) -> FakeChannel: + """Performs a request repeatedly, disconnecting at successive `await`s, until + one completes. + + Fails if: + * A logging context is lost during cancellation. + * A logging context get restarted after it is marked as finished, eg. if + a request's logging context is used by some processing started by the + request, but the request neglects to cancel that processing or wait for it + to complete. + + Note that "Re-starting finished log context" errors get raised within the + request handling code and may or may not get caught. These errors will + likely manifest as a different logging context error at a later point. When + debugging logging context failures, setting a breakpoint in + `logcontext_error` can prove useful. + * A request gets stuck, possibly due to a previous cancellation. + * The request does not return a 499 when the client disconnects. + This implies that a `CancelledError` was swallowed somewhere. + + It is up to the caller to verify that the request returns the correct data when + it finally runs to completion. + + Note that this function can only cover a single code path and does not guarantee + that an endpoint is compatible with cancellation on every code path. + To allow inspection of the code path that is being tested, this function will + log the stack trace at every `await` that gets cancelled. To view these log + lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment + variable, which will include the log lines in `_trial_temp/test.log`. + Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. + + Args: + test_name: The name of the test, which will be logged. + reactor: The twisted reactor running the request handler. + site: The twisted `Site` to use to render the request. + method: The HTTP request method ("verb"). + path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and + such). + content: The body of the request. + + Returns: + The `FakeChannel` object which stores the result of the final request that + runs to completion. + """ + # To process a request, a coroutine run is created for the async method handling + # the request. That method may then start other coroutine runs, wrapped in + # `Deferred`s. + # + # We would like to trigger a cancellation at the first `await`, re-run the + # request and cancel at the second `await`, and so on. By patching + # `Deferred.__next__`, we can intercept `await`s, track which ones we have or + # have not seen, and force them to block when they wouldn't have. + + # The set of previously seen `await`s. + # Each element is a stringified stack trace. + seen_awaits: Set[Tuple[str, ...]] = set() + + _log_for_request( + 0, f"Running make_request_with_cancellation_test for {test_name}..." + ) + + for request_number in itertools.count(1): + deferred_patch = Deferred__next__Patch(seen_awaits, request_number) + + try: + with mock.patch( + "synapse.http.server.respond_with_json", wraps=respond_with_json + ) as respond_mock: + with deferred_patch.patch(): + # Start the request. + channel = make_request( + reactor, site, method, path, content, await_result=False + ) + request = channel.request + + # Run the request until we see a new `await` which we have not + # yet cancelled at, or it completes. + while not respond_mock.called and not deferred_patch.new_await_seen: + previous_awaits_seen = deferred_patch.awaits_seen + + reactor.advance(0.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We didn't see any progress. Try advancing the clock. + reactor.advance(1.0) + + if deferred_patch.awaits_seen == previous_awaits_seen: + # We still didn't see any progress. The request might be + # stuck. + raise AssertionError( + "Request appears to be stuck, possibly due to a " + "previous cancelled request" + ) + + if respond_mock.called: + # The request ran to completion and we are done with testing it. + + # `respond_with_json` writes the response asynchronously, so we + # might have to give the reactor a kick before the channel gets + # the response. + deferred_patch.unblock_awaits() + channel.await_result() + + return channel + + # Disconnect the client and wait for the response. + request.connectionLost(reason=ConnectionDone()) + + _log_for_request(request_number, "--- disconnected ---") + + # Advance the reactor just enough to get a response. + # We don't want to advance the reactor too far, because we can only + # detect re-starts of finished logging contexts after we set the + # finished flag below. + for _ in range(2): + # We may need to pump the reactor to allow `delay_cancellation`s to + # finish. + if not respond_mock.called: + reactor.advance(0.0) + + # Try advancing the clock if that didn't work. + if not respond_mock.called: + reactor.advance(1.0) + + # `delay_cancellation`s may be waiting for processing that we've + # forced to block. Try unblocking them, followed by another round of + # pumping the reactor. + if not respond_mock.called: + deferred_patch.unblock_awaits() + + # Mark the request's logging context as finished. If it gets + # activated again, an `AssertionError` will be raised and bubble up + # through request handling code. This `AssertionError` may or may not be + # caught. Eventually some other code will deactivate the logging + # context which will raise a different `AssertionError` because + # resource usage won't have been correctly tracked. + if isinstance(request, SynapseRequest) and request.logcontext: + request.logcontext.finished = True + + # Check that the request finished with a 499, + # ie. the `CancelledError` wasn't swallowed. + respond_mock.assert_called_once() + + if request.code != HTTP_STATUS_REQUEST_CANCELLED: + raise AssertionError( + f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : " + "Cancelled request did not finish with the correct status code." + ) + finally: + # Unblock any processing that might be shared between requests, if we + # haven't already done so. + deferred_patch.unblock_awaits() + + assert False, "unreachable" # noqa: B011 + + +class Deferred__next__Patch: + """A `Deferred.__next__` patch that will intercept `await`s and force them + to block once it sees a new `await`. + + When done with the patch, `unblock_awaits()` must be called to clean up after any + `await`s that were forced to block, otherwise processing shared between multiple + requests, such as database queries started by `@cached`, will become permanently + stuck. + + Usage: + seen_awaits = set() + deferred_patch = Deferred__next__Patch(seen_awaits, 1) + try: + with deferred_patch.patch(): + # do things + ... + finally: + deferred_patch.unblock_awaits() + """ + + def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): + """ + Args: + seen_awaits: The set of stack traces of `await`s that have been previously + seen. When the `Deferred.__next__` patch sees a new `await`, it will add + it to the set. + request_number: The request number to log against. + """ + self._request_number = request_number + self._seen_awaits = seen_awaits + + self._original_Deferred___next__ = Deferred.__next__ + + # The number of `await`s on `Deferred`s we have seen so far. + self.awaits_seen = 0 + + # Whether we have seen a new `await` not in `seen_awaits`. + self.new_await_seen = False + + # To force `await`s on resolved `Deferred`s to block, we make up a new + # unresolved `Deferred` and return it out of `Deferred.__next__` / + # `coroutine.send()`. We have to resolve it later, in case the `await`ing + # coroutine is part of some shared processing, such as `@cached`. + self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} + + # The last stack we logged. + self._previous_stack: List[inspect.FrameInfo] = [] + + def patch(self) -> ContextManager[Mock]: + """Returns a context manager which patches `Deferred.__next__`.""" + + def Deferred___next__( + deferred: "Deferred[T]", value: object = None + ) -> "Deferred[T]": + """Intercepts `await`s on `Deferred`s and rigs them to block once we have + seen enough of them. + + `Deferred.__next__` will normally: + * return `self` if the `Deferred` is unresolved, in which case + `coroutine.send()` will return the `Deferred`, and + `_defer.inlineCallbacks` will stop running the coroutine until the + `Deferred` is resolved. + * raise a `StopIteration(result)`, containing the result of the `await`. + * raise another exception, which will come out of the `await`. + """ + self.awaits_seen += 1 + + stack = _get_stack(skip_frames=1) + stack_hash = _hash_stack(stack) + + if stack_hash not in self._seen_awaits: + # Block at the current `await` onwards. + self._seen_awaits.add(stack_hash) + self.new_await_seen = True + + if not self.new_await_seen: + # This `await` isn't interesting. Let it proceed normally. + + # Don't log the stack. It's been seen before in a previous run. + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + # We want to block at the current `await`. + if deferred.called and not deferred.paused: + # This `Deferred` already has a result. + # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait + # on. This blocks the coroutine that did this `await`. + # We queue it up for unblocking later. + new_deferred: "Deferred[T]" = Deferred() + self._to_unblock[new_deferred] = deferred.result + + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "force-blocked await", + ) + self._previous_stack = stack + + return make_deferred_yieldable(new_deferred) + + # This `Deferred` does not have a result yet. + # The `await` will block normally, so we don't have to do anything. + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "blocking await", + ) + self._previous_stack = stack + + return self._original_Deferred___next__(deferred, value) + + return mock.patch.object(Deferred, "__next__", new=Deferred___next__) + + def unblock_awaits(self) -> None: + """Unblocks any shared processing that we forced to block. + + Must be called when done, otherwise processing shared between multiple requests, + such as database queries started by `@cached`, will become permanently stuck. + """ + to_unblock = self._to_unblock + self._to_unblock = {} + for deferred, result in to_unblock.items(): + deferred.callback(result) + + +def _log_for_request(request_number: int, message: str) -> None: + """Logs a message for an iteration of `make_request_with_cancellation_test`.""" + # We want consistent alignment when logging stack traces, so ensure the logging + # context has a fixed width name. + with LoggingContext(name=f"request-{request_number:<2}"): + logger.info(message) + + +def _log_await_stack( + stack: List[inspect.FrameInfo], + previous_stack: List[inspect.FrameInfo], + request_number: int, + note: str, +) -> None: + """Logs the stack for an `await` in `make_request_with_cancellation_test`. + + Only logs the part of the stack that has changed since the previous call. + + Example output looks like: + ``` + delay_cancellation:750 (synapse/util/async_helpers.py:750) + DatabasePool._runInteraction:768 (synapse/storage/database.py:768) + > *blocked on await* at DatabasePool.runWithConnection:891 (synapse/storage/database.py:891) + ``` + + Args: + stack: The stack to log, as returned by `_get_stack()`. + previous_stack: The previous stack logged, with callers appearing before + callees. + request_number: The request number to log against. + note: A note to attach to the last stack frame, eg. "blocked on await". + """ + for i, frame_info in enumerate(stack[:-1]): + # Skip any frames in common with the previous logging. + if i < len(previous_stack) and frame_info == previous_stack[i]: + continue + + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}{frame}" + _log_for_request(request_number, message) + + # Always print the final frame with the `await`. + # If the frame with the `await` started another coroutine run, we may have already + # printed a deeper stack which includes our final frame. We want to log where all + # `await`s happen, so we reprint the frame in this case. + i = len(stack) - 1 + frame_info = stack[i] + frame = _format_stack_frame(frame_info) + message = f"{' ' * i}> *{note}* at {frame}" + _log_for_request(request_number, message) + + +def _format_stack_frame(frame_info: inspect.FrameInfo) -> str: + """Returns a string representation of a stack frame. + + Used for debug logging. + + Returns: + A string, formatted like + "JsonResource._async_render:559 (synapse/http/server.py:559)". + """ + method_name = _get_stack_frame_method_name(frame_info) + + return ( + f"{method_name}:{frame_info.lineno} ({frame_info.filename}:{frame_info.lineno})" + ) + + +def _get_stack(skip_frames: int) -> List[inspect.FrameInfo]: + """Captures the stack for a request. + + Skips any twisted frames and stops at `JsonResource.wrapped_async_request_handler`. + + Used for debug logging. + + Returns: + A list of `inspect.FrameInfo`s, with callers appearing before callees. + """ + stack = [] + + skip_frames += 1 # Also skip `get_stack` itself. + + for frame_info in inspect.stack()[skip_frames:]: + # Skip any twisted `inlineCallbacks` gunk. + if "/twisted/" in frame_info.filename: + continue + + # Exclude the reactor frame, upwards. + method_name = _get_stack_frame_method_name(frame_info) + if method_name == "ThreadedMemoryReactorClock.advance": + break + + stack.append(frame_info) + + # Stop at `JsonResource`'s `wrapped_async_request_handler`, which is the entry + # point for request handling. + if frame_info.function == "wrapped_async_request_handler": + break + + return stack[::-1] + + +def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: + """Returns the name of a stack frame's method. + + eg. "JsonResource._async_render". + """ + method_name = frame_info.function + + # Prefix the class name for instance methods. + frame_self = frame_info.frame.f_locals.get("self") + if frame_self: + method = getattr(frame_self, method_name, None) + if method: + method_name = method.__qualname__ + else: + # We couldn't find the method on `self`. + # Make something up. It's useful to know which class "contains" a + # function anyway. + method_name = f"{type(frame_self).__name__} {method_name}" + + return method_name + + +def _hash_stack(stack: List[inspect.FrameInfo]): + """Turns a stack into a hashable value that can be put into a set.""" + return tuple(_format_stack_frame(frame) for frame in stack) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index f523d89b8..4be83dfd6 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -42,6 +42,7 @@ from synapse.util import Clock from synapse.util.stringutils import random_string from tests import unittest +from tests.http.server._base import make_request_with_cancellation_test from tests.test_utils import make_awaitable PATH_PREFIX = b"/_matrix/client/api/v1" @@ -471,6 +472,49 @@ class RoomPermissionsTestCase(RoomBase): ) +class RoomStateTestCase(RoomBase): + """Tests /rooms/$room_id/state.""" + + user_id = "@sid1:red" + + def test_get_state_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertCountEqual( + [state_event["type"] for state_event in channel.json_body], + { + "m.room.create", + "m.room.power_levels", + "m.room.join_rules", + "m.room.member", + "m.room.history_visibility", + }, + ) + + def test_get_state_event_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/state/$event_type` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_state_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/state/m.room.member/%s" % (room_id, self.user_id), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(channel.json_body, {"membership": "join"}) + + class RoomsMemberListTestCase(RoomBase): """Tests /rooms/$room_id/members/list REST events.""" @@ -591,6 +635,62 @@ class RoomsMemberListTestCase(RoomBase): channel = self.make_request("GET", room_path) self.assertEqual(200, channel.code, msg=channel.result["body"]) + def test_get_member_list_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members` request.""" + room_id = self.helper.create_room_as(self.user_id) + channel = make_request_with_cancellation_test( + "test_get_member_list_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members" % room_id, + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + + def test_get_member_list_with_at_token_cancellation(self) -> None: + """Test cancellation of a `/rooms/$room_id/members?at=` request.""" + room_id = self.helper.create_room_as(self.user_id) + + # first sync to get an at token + channel = self.make_request("GET", "/sync") + self.assertEqual(200, channel.code) + sync_token = channel.json_body["next_batch"] + + channel = make_request_with_cancellation_test( + "test_get_member_list_with_at_token_cancellation", + self.reactor, + self.site, + "GET", + "/rooms/%s/members?at=%s" % (room_id, sync_token), + ) + + self.assertEqual(200, channel.code, msg=channel.result["body"]) + self.assertEqual(len(channel.json_body["chunk"]), 1) + self.assertLessEqual( + { + "content": {"membership": "join"}, + "room_id": room_id, + "sender": self.user_id, + "state_key": self.user_id, + "type": "m.room.member", + "user_id": self.user_id, + }.items(), + channel.json_body["chunk"][0].items(), + ) + class RoomsCreateTestCase(RoomBase): """Tests /rooms and /rooms/$room_id REST events.""" From 586bfc6dc0241bdd40376c3314f13b82a5593538 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Jun 2022 17:33:55 +0100 Subject: [PATCH 07/85] Use dummy fallback engines if imports fail (#12979) --- changelog.d/12979.bugfix | 1 + synapse/storage/databases/main/events.py | 2 +- synapse/storage/engines/__init__.py | 38 +++++++++++++++++++++--- synapse/storage/engines/postgres.py | 24 +++++++-------- synapse/storage/prepare_database.py | 3 +- 5 files changed, 47 insertions(+), 21 deletions(-) create mode 100644 changelog.d/12979.bugfix diff --git a/changelog.d/12979.bugfix b/changelog.d/12979.bugfix new file mode 100644 index 000000000..6b5440802 --- /dev/null +++ b/changelog.d/12979.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 17e35cf63..a8773374b 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -46,7 +46,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventCacheEntry from synapse.storage.databases.main.search import SearchEntry -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator from synapse.types import JsonDict, StateMap, get_domain_from_id diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index f51b3d228..a182e8a09 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -11,11 +11,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Mapping +from typing import Any, Mapping, NoReturn from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup -from .postgres import PostgresEngine -from .sqlite import Sqlite3Engine + +# The classes `PostgresEngine` and `Sqlite3Engine` must always be importable, because +# we use `isinstance(engine, PostgresEngine)` to write different queries for postgres +# and sqlite. But the database driver modules are both optional: they may not be +# installed. To account for this, create dummy classes on import failure so we can +# still run `isinstance()` checks. +try: + from .postgres import PostgresEngine +except ImportError: + + class PostgresEngine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- psycopg2 module is not installed" + ) + + +try: + from .sqlite import Sqlite3Engine +except ImportError: + + class Sqlite3Engine(BaseDatabaseEngine): # type: ignore[no-redef] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + raise RuntimeError( + f"Cannot create {cls.__name__} -- sqlite3 module is not installed" + ) def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: @@ -30,4 +54,10 @@ def create_engine(database_config: Mapping[str, Any]) -> BaseDatabaseEngine: raise RuntimeError("Unsupported database engine '%s'" % (name,)) -__all__ = ["create_engine", "BaseDatabaseEngine", "IncorrectDatabaseSetup"] +__all__ = [ + "create_engine", + "BaseDatabaseEngine", + "PostgresEngine", + "Sqlite3Engine", + "IncorrectDatabaseSetup", +] diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 391f8ed24..517f9d5f9 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING, Any, Mapping, NoReturn, Optional, Tuple, cast +import psycopg2.extensions + from synapse.storage.engines._base import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -23,18 +25,14 @@ from synapse.storage.engines._base import ( from synapse.storage.types import Cursor if TYPE_CHECKING: - import psycopg2 # noqa: F401 - from synapse.storage.database import LoggingDatabaseConnection logger = logging.getLogger(__name__) -class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): +class PostgresEngine(BaseDatabaseEngine[psycopg2.extensions.connection]): def __init__(self, database_config: Mapping[str, Any]): - import psycopg2.extensions - super().__init__(psycopg2, database_config) psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) @@ -69,7 +67,9 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return collation, ctype def check_database( - self, db_conn: "psycopg2.connection", allow_outdated_version: bool = False + self, + db_conn: psycopg2.extensions.connection, + allow_outdated_version: bool = False, ) -> None: # Get the version of PostgreSQL that we're using. As per the psycopg2 # docs: The number is formed by converting the major, minor, and @@ -176,8 +176,6 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return True def is_deadlock(self, error: Exception) -> bool: - import psycopg2.extensions - if isinstance(error, psycopg2.DatabaseError): # https://www.postgresql.org/docs/current/static/errcodes-appendix.html # "40001" serialization_failure @@ -185,7 +183,7 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): return error.pgcode in ["40001", "40P01"] return False - def is_connection_closed(self, conn: "psycopg2.connection") -> bool: + def is_connection_closed(self, conn: psycopg2.extensions.connection) -> bool: return bool(conn.closed) def lock_table(self, txn: Cursor, table: str) -> None: @@ -205,18 +203,16 @@ class PostgresEngine(BaseDatabaseEngine["psycopg2.connection"]): else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) - def in_transaction(self, conn: "psycopg2.connection") -> bool: - import psycopg2.extensions - + def in_transaction(self, conn: psycopg2.extensions.connection) -> bool: return conn.status != psycopg2.extensions.STATUS_READY def attempt_to_set_autocommit( - self, conn: "psycopg2.connection", autocommit: bool + self, conn: psycopg2.extensions.connection, autocommit: bool ) -> None: return conn.set_session(autocommit=autocommit) def attempt_to_set_isolation_level( - self, conn: "psycopg2.connection", isolation_level: Optional[int] + self, conn: psycopg2.extensions.connection, isolation_level: Optional[int] ) -> None: if isolation_level is None: isolation_level = self.default_isolation_level diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c33df4208..09a2b58f4 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -23,8 +23,7 @@ from typing_extensions import Counter as CounterType from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import BaseDatabaseEngine -from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor From 3c1c40d843575edba27c56ef82146cced09bcf8f Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 7 Jun 2022 18:17:32 +0100 Subject: [PATCH 08/85] Clean up the test code for client disconnections (#12929) * Reword failure message about `await_result=False` * Use `reactor.advance()` instead of `reactor.pump()` * Raise `AssertionError`s ourselves * Un-instance method `_test_disconnect` * Replace `ThreadedMemoryReactorClock` with `MemoryReactorClock` --- changelog.d/12929.misc | 1 + .../federation/transport/server/test__base.py | 10 +- tests/http/server/_base.py | 130 +++++++++--------- tests/http/test_servlet.py | 10 +- tests/replication/http/test__base.py | 10 +- tests/test_server.py | 14 +- 6 files changed, 88 insertions(+), 87 deletions(-) create mode 100644 changelog.d/12929.misc diff --git a/changelog.d/12929.misc b/changelog.d/12929.misc new file mode 100644 index 000000000..20718d258 --- /dev/null +++ b/changelog.d/12929.misc @@ -0,0 +1 @@ +Clean up the test code for client disconnection. diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index e63885c1c..d33e86db4 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -24,7 +24,7 @@ from synapse.types import JsonDict from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableFederationServlet(BaseFederationServlet): @@ -54,9 +54,7 @@ class CancellableFederationServlet(BaseFederationServlet): return HTTPStatus.OK, {"result": True} -class BaseFederationServletCancellationTests( - unittest.FederatingHomeserverTestCase, EndpointCancellationTestHelperMixin -): +class BaseFederationServletCancellationTests(unittest.FederatingHomeserverTestCase): """Tests for `BaseFederationServlet` cancellation.""" skip = "`BaseFederationServlet` does not support cancellation yet." @@ -86,7 +84,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -106,7 +104,7 @@ class BaseFederationServletCancellationTests( # request won't be processed. self.pump() - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 57b92beb8..994d8880b 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -46,8 +46,7 @@ from synapse.http.site import SynapseRequest from synapse.logging.context import LoggingContext, make_deferred_yieldable from synapse.types import JsonDict -from tests import unittest -from tests.server import FakeChannel, ThreadedMemoryReactorClock, make_request +from tests.server import FakeChannel, make_request from tests.unittest import logcontext_clean logger = logging.getLogger(__name__) @@ -56,75 +55,82 @@ logger = logging.getLogger(__name__) T = TypeVar("T") -class EndpointCancellationTestHelperMixin(unittest.TestCase): - """Provides helper methods for testing cancellation of endpoints.""" +def test_disconnect( + reactor: MemoryReactorClock, + channel: FakeChannel, + expect_cancellation: bool, + expected_body: Union[bytes, JsonDict], + expected_code: Optional[int] = None, +) -> None: + """Disconnects an in-flight request and checks the response. - def _test_disconnect( - self, - reactor: ThreadedMemoryReactorClock, - channel: FakeChannel, - expect_cancellation: bool, - expected_body: Union[bytes, JsonDict], - expected_code: Optional[int] = None, - ) -> None: - """Disconnects an in-flight request and checks the response. + Args: + reactor: The twisted reactor running the request handler. + channel: The `FakeChannel` for the request. + expect_cancellation: `True` if request processing is expected to be cancelled, + `False` if the request should run to completion. + expected_body: The expected response for the request. + expected_code: The expected status code for the request. Defaults to `200` or + `499` depending on `expect_cancellation`. + """ + # Determine the expected status code. + if expected_code is None: + if expect_cancellation: + expected_code = HTTP_STATUS_REQUEST_CANCELLED + else: + expected_code = HTTPStatus.OK - Args: - reactor: The twisted reactor running the request handler. - channel: The `FakeChannel` for the request. - expect_cancellation: `True` if request processing is expected to be - cancelled, `False` if the request should run to completion. - expected_body: The expected response for the request. - expected_code: The expected status code for the request. Defaults to `200` - or `499` depending on `expect_cancellation`. - """ - # Determine the expected status code. - if expected_code is None: - if expect_cancellation: - expected_code = HTTP_STATUS_REQUEST_CANCELLED - else: - expected_code = HTTPStatus.OK - - request = channel.request - self.assertFalse( - channel.is_finished(), + request = channel.request + if channel.is_finished(): + raise AssertionError( "Request finished before we could disconnect - " - "was `await_result=False` passed to `make_request`?", + "ensure `await_result=False` is passed to `make_request`.", ) - # We're about to disconnect the request. This also disconnects the channel, so - # we have to rely on mocks to extract the response. - respond_method: Callable[..., Any] - if isinstance(expected_body, bytes): - respond_method = respond_with_html_bytes + # We're about to disconnect the request. This also disconnects the channel, so we + # have to rely on mocks to extract the response. + respond_method: Callable[..., Any] + if isinstance(expected_body, bytes): + respond_method = respond_with_html_bytes + else: + respond_method = respond_with_json + + with mock.patch( + f"synapse.http.server.{respond_method.__name__}", wraps=respond_method + ) as respond_mock: + # Disconnect the request. + request.connectionLost(reason=ConnectionDone()) + + if expect_cancellation: + # An immediate cancellation is expected. + respond_mock.assert_called_once() else: - respond_method = respond_with_json + respond_mock.assert_not_called() - with mock.patch( - f"synapse.http.server.{respond_method.__name__}", wraps=respond_method - ) as respond_mock: - # Disconnect the request. - request.connectionLost(reason=ConnectionDone()) + # The handler is expected to run to completion. + reactor.advance(1.0) + respond_mock.assert_called_once() - if expect_cancellation: - # An immediate cancellation is expected. - respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) - else: - respond_mock.assert_not_called() + args, _kwargs = respond_mock.call_args + code, body = args[1], args[2] - # The handler is expected to run to completion. - reactor.pump([1.0]) - respond_mock.assert_called_once() - args, _kwargs = respond_mock.call_args - code, body = args[1], args[2] - self.assertEqual(code, expected_code) - self.assertEqual(request.code, expected_code) - self.assertEqual(body, expected_body) + if code != expected_code: + raise AssertionError( + f"{code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if request.code != expected_code: + raise AssertionError( + f"{request.code} != {expected_code} : " + "Request did not finish with the expected status code." + ) + + if body != expected_body: + raise AssertionError( + f"{body!r} != {expected_body!r} : " + "Request did not finish with the expected status code." + ) @logcontext_clean diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index b3655d7b4..bb966c80c 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -30,7 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect def make_request(content): @@ -108,9 +108,7 @@ class CancellableRestServlet(RestServlet): return HTTPStatus.OK, {"result": True} -class TestRestServletCancellation( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class TestRestServletCancellation(unittest.HomeserverTestCase): """Tests for `RestServlet` cancellation.""" servlets = [ @@ -120,7 +118,7 @@ class TestRestServletCancellation( def test_cancellable_disconnect(self) -> None: """Test that handlers with the `@cancellable` flag can be cancelled.""" channel = self.make_request("GET", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -130,7 +128,7 @@ class TestRestServletCancellation( def test_uncancellable_disconnect(self) -> None: """Test that handlers without the `@cancellable` flag cannot be cancelled.""" channel = self.make_request("POST", "/sleep", await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index a5ab093a2..822a957c3 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -25,7 +25,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect class CancellableReplicationEndpoint(ReplicationEndpoint): @@ -69,9 +69,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): return HTTPStatus.OK, {"result": True} -class ReplicationEndpointCancellationTestCase( - unittest.HomeserverTestCase, EndpointCancellationTestHelperMixin -): +class ReplicationEndpointCancellationTestCase(unittest.HomeserverTestCase): """Tests for `ReplicationEndpoint` cancellation.""" def create_test_resource(self): @@ -87,7 +85,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers with the `@cancellable` flag can be cancelled.""" path = f"{REPLICATION_PREFIX}/{CancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -98,7 +96,7 @@ class ReplicationEndpointCancellationTestCase( """Test that handlers without the `@cancellable` flag cannot be cancelled.""" path = f"{REPLICATION_PREFIX}/{UncancellableReplicationEndpoint.NAME}/" channel = self.make_request("POST", path, await_result=False) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, diff --git a/tests/test_server.py b/tests/test_server.py index 0f1eb43cb..847432f79 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -34,7 +34,7 @@ from synapse.types import JsonDict from synapse.util import Clock from tests import unittest -from tests.http.server._base import EndpointCancellationTestHelperMixin +from tests.http.server._base import test_disconnect from tests.server import ( FakeSite, ThreadedMemoryReactorClock, @@ -407,7 +407,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource): return HTTPStatus.OK, b"ok" -class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" def setUp(self): @@ -421,7 +421,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -433,7 +433,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, @@ -441,7 +441,7 @@ class DirectServeJsonResourceCancellationTests(EndpointCancellationTestHelperMix ) -class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMixin): +class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" def setUp(self): @@ -455,7 +455,7 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "GET", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=True, @@ -467,6 +467,6 @@ class DirectServeHtmlResourceCancellationTests(EndpointCancellationTestHelperMix channel = make_request( self.reactor, self.site, "POST", "/sleep", await_result=False ) - self._test_disconnect( + test_disconnect( self.reactor, channel, expect_cancellation=False, expected_body=b"ok" ) From c316fe8d4a71260ff3c81495e32aac149f32bdc1 Mon Sep 17 00:00:00 2001 From: James Date: Wed, 8 Jun 2022 10:26:42 +0100 Subject: [PATCH 09/85] Docker Compose Worker Documentation and Examples (#12737) --- changelog.d/12737.doc | 1 + contrib/docker_compose_workers/README.md | 125 ++++++++++++++++++ .../docker-compose.yaml | 77 +++++++++++ .../workers/synapse-federation-sender-1.yaml | 14 ++ .../workers/synapse-generic-worker-1.yaml | 19 +++ 5 files changed, 236 insertions(+) create mode 100644 changelog.d/12737.doc create mode 100644 contrib/docker_compose_workers/README.md create mode 100644 contrib/docker_compose_workers/docker-compose.yaml create mode 100644 contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml create mode 100644 contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml diff --git a/changelog.d/12737.doc b/changelog.d/12737.doc new file mode 100644 index 000000000..ab2d1f2fd --- /dev/null +++ b/changelog.d/12737.doc @@ -0,0 +1 @@ +Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md new file mode 100644 index 000000000..4dbfee285 --- /dev/null +++ b/contrib/docker_compose_workers/README.md @@ -0,0 +1,125 @@ +# Setting up Synapse with Workers using Docker Compose + +This directory describes how deploy and manage Synapse and workers via [Docker Compose](https://docs.docker.com/compose/). + +Example worker configuration files can be found [here](workers). + +All examples and snippets assume that your Synapse service is called `synapse` in your Docker Compose file. + +An example Docker Compose file can be found [here](docker-compose.yaml). + +## Worker Service Examples in Docker Compose + +In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`). + +### Generic Worker Example + +```yaml +synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse +``` + +### Federation Sender Example + +Please note: The federation sender does not receive REST API calls so no exposed ports are required. + +```yaml +synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse +``` + +## `homeserver.yaml` Configuration + +### Enable Redis + +Locate the `redis` section of your `homeserver.yaml` and enable and configure it: + +```yaml +redis: + enabled: true + host: redis + port: 6379 + # password: +``` + +This assumes that your Redis service is called `redis` in your Docker Compose file. + +### Add a replication Listener + +Locate the `listeners` section of your `homeserver.yaml` and add the following replication listener: + +```yaml +listeners: + # Other listeners + + - port: 9093 + type: http + resources: + - names: [replication] +``` + +This listener is used by the workers for replication and is referred to in worker config files using the following settings: + +```yaml +worker_replication_host: synapse +worker_replication_http_port: 9093 +``` + +### Add Workers to `instance_map` + +Locate the `instance_map` section of your `homeserver.yaml` and populate it with your workers: + +```yaml +instance_map: + synapse-generic-worker-1: # The worker_name setting in your worker configuration file + host: synapse-generic-worker-1 # The name of the worker service in your Docker Compose file + port: 8034 # The port assigned to the replication listener in your worker config file + synapse-federation-sender-1: + host: synapse-federation-sender-1 + port: 8034 +``` + +### Configure Federation Senders + +This section is applicable if you are using Federation senders (synapse.app.federation_sender). Locate the `send_federation` and `federation_sender_instances` settings in your `homeserver.yaml` and configure them: + +```yaml +# This will disable federation sending on the main Synapse instance +send_federation: false + +federation_sender_instances: + - synapse-federation-sender-1 # The worker_name setting in your federation sender worker configuration file +``` + +## Other Worker types + +Using the concepts shown here it is possible to create other worker types in Docker Compose. See the [Workers](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications) documentation for a list of available workers. \ No newline at end of file diff --git a/contrib/docker_compose_workers/docker-compose.yaml b/contrib/docker_compose_workers/docker-compose.yaml new file mode 100644 index 000000000..eaf02c2af --- /dev/null +++ b/contrib/docker_compose_workers/docker-compose.yaml @@ -0,0 +1,77 @@ +networks: + backend: + +services: + postgres: + image: postgres:latest + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/var/lib/postgresql/data:/var/lib/postgresql/data:rw + networks: + - backend + environment: + POSTGRES_DB: synapse + POSTGRES_USER: synapse_user + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: --encoding=UTF8 --locale=C + + redis: + image: redis:latest + restart: unless-stopped + networks: + - backend + + synapse: + image: matrixdotorg/synapse:latest + container_name: synapse + restart: unless-stopped + volumes: + - ${VOLUME_PATH}/data:/data:rw + ports: + - 8008:8008 + networks: + - backend + environment: + SYNAPSE_CONFIG_DIR: /data + SYNAPSE_CONFIG_PATH: /data/homeserver.yaml + depends_on: + - postgres + + synapse-generic-worker-1: + image: matrixdotorg/synapse:latest + container_name: synapse-generic-worker-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-generic-worker-1.yaml"] + healthcheck: + test: ["CMD-SHELL", "curl -fSs http://localhost:8081/health || exit 1"] + start_period: "5s" + interval: "15s" + timeout: "5s" + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.generic_worker + # Expose port if required so your reverse proxy can send requests to this worker + # Port configuration will depend on how the http listener is defined in the worker configuration file + ports: + - 8081:8081 + depends_on: + - synapse + + synapse-federation-sender-1: + image: matrixdotorg/synapse:latest + container_name: synapse-federation-sender-1 + restart: unless-stopped + entrypoint: ["/start.py", "run", "--config-path=/data/homeserver.yaml", "--config-path=/data/workers/synapse-federation-sender-1.yaml"] + healthcheck: + disable: true + networks: + - backend + volumes: + - ${VOLUME_PATH}/data:/data:rw # Replace VOLUME_PATH with the path to your Synapse volume + environment: + SYNAPSE_WORKER: synapse.app.federation_sender + depends_on: + - synapse diff --git a/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml new file mode 100644 index 000000000..5ba42a92d --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-federation-sender-1.yaml @@ -0,0 +1,14 @@ +worker_app: synapse.app.federation_sender +worker_name: synapse-federation-sender-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + +worker_log_config: /data/federation_sender.log.config diff --git a/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml new file mode 100644 index 000000000..694584105 --- /dev/null +++ b/contrib/docker_compose_workers/workers/synapse-generic-worker-1.yaml @@ -0,0 +1,19 @@ +worker_app: synapse.app.generic_worker +worker_name: synapse-generic-worker-1 + +# The replication listener on the main synapse process. +worker_replication_host: synapse +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: 8034 + resources: + - names: [replication] + - type: http + port: 8081 + x_forwarded: true + resources: + - names: [client, federation] + +worker_log_config: /data/worker.log.config From 67f51c84f828c2043f37b987b42323e8d740bad0 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 8 Jun 2022 10:57:05 +0100 Subject: [PATCH 10/85] Merge the Complement testing Docker images into a single, multi-purpose image. (#12881) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/12881.misc | 1 + docker/Dockerfile-workers | 3 +- docker/README-testing.md | 101 +++++++------- docker/complement/Dockerfile | 45 ++++-- docker/complement/README.md | 33 ++++- docker/complement/SynapseWorkers.Dockerfile | 40 ------ .../start-complement-synapse-workers.sh | 61 --------- docker/complement/conf/homeserver.yaml | 129 ------------------ docker/complement/conf/log_config.yaml | 24 ---- .../postgres.supervisord.conf | 3 + docker/complement/conf/start.sh | 30 ---- .../complement/conf/start_for_complement.sh | 90 ++++++++++++ .../workers-shared-extra.yaml.j2} | 20 ++- docker/conf-workers/shared.yaml.j2 | 2 + docker/conf-workers/supervisord.conf.j2 | 5 +- docker/configure_workers_and_start.py | 22 +-- docs/development/contributing_guide.md | 5 + scripts-dev/complement.sh | 35 +++-- 18 files changed, 277 insertions(+), 372 deletions(-) create mode 100644 changelog.d/12881.misc delete mode 100644 docker/complement/SynapseWorkers.Dockerfile delete mode 100755 docker/complement/conf-workers/start-complement-synapse-workers.sh delete mode 100644 docker/complement/conf/homeserver.yaml delete mode 100644 docker/complement/conf/log_config.yaml rename docker/complement/{conf-workers => conf}/postgres.supervisord.conf (88%) delete mode 100755 docker/complement/conf/start.sh create mode 100755 docker/complement/conf/start_for_complement.sh rename docker/complement/{conf-workers/workers-shared.yaml => conf/workers-shared-extra.yaml.j2} (75%) diff --git a/changelog.d/12881.misc b/changelog.d/12881.misc new file mode 100644 index 000000000..8a83182bd --- /dev/null +++ b/changelog.d/12881.misc @@ -0,0 +1 @@ +Merge the Complement testing Docker images into a single, multi-purpose image. \ No newline at end of file diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 24b03585f..83db0a95b 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -1,5 +1,6 @@ # Inherit from the official Synapse docker image -FROM matrixdotorg/synapse +ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse:$SYNAPSE_VERSION # Install deps RUN \ diff --git a/docker/README-testing.md b/docker/README-testing.md index c38cae753..1f0423f09 100644 --- a/docker/README-testing.md +++ b/docker/README-testing.md @@ -8,13 +8,19 @@ docker images that can be run inside Complement for testing purposes. Note that running Synapse's unit tests from within the docker image is not supported. -## Testing with SQLite and single-process Synapse +## Using the Complement launch script -> Note that `scripts-dev/complement.sh` is a script that will automatically build -> and run an SQLite-based, single-process of Synapse against Complement. +`scripts-dev/complement.sh` is a script that will automatically build +and run Synapse against Complement. +Consult the [contributing guide][guideComplementSh] for instructions on how to use it. -The instructions below will set up Complement testing for a single-process, -SQLite-based Synapse deployment. + +[guideComplementSh]: https://matrix-org.github.io/synapse/latest/development/contributing_guide.html#run-the-integration-tests-complement + +## Building and running the images manually + +Under some circumstances, you may wish to build the images manually. +The instructions below will lead you to doing that. Start by building the base Synapse docker image. If you wish to run tests with the latest release of Synapse, instead of your current checkout, you can skip this step. From the @@ -24,12 +30,17 @@ root of the repository: docker build -t matrixdotorg/synapse -f docker/Dockerfile . ``` -This will build an image with the tag `matrixdotorg/synapse`. - -Next, build the Synapse image for Complement. +Next, build the workerised Synapse docker image, which is a layer over the base +image. ```sh -docker build -t complement-synapse -f "docker/complement/Dockerfile" docker/complement +docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . +``` + +Finally, build the multi-purpose image for Complement, which is a layer over the workers image. + +```sh +docker build -t complement-synapse -f docker/complement/Dockerfile docker/complement ``` This will build an image with the tag `complement-synapse`, which can be handed to @@ -37,49 +48,9 @@ Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Ref [Complement's documentation](https://github.com/matrix-org/complement/#running) for how to run the tests, as well as the various available command line flags. -## Testing with PostgreSQL and single or multi-process Synapse +See [the Complement image README](./complement/README.md) for information about the +expected environment variables. -The above docker image only supports running Synapse with SQLite and in a -single-process topology. The following instructions are used to build a Synapse image for -Complement that supports either single or multi-process topology with a PostgreSQL -database backend. - -As with the single-process image, build the base Synapse docker image. If you wish to run -tests with the latest release of Synapse, instead of your current checkout, you can skip -this step. From the root of the repository: - -```sh -docker build -t matrixdotorg/synapse -f docker/Dockerfile . -``` - -This will build an image with the tag `matrixdotorg/synapse`. - -Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. -Again, from the root of the repository: - -```sh -docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . -``` - -This will build an image with the tag` matrixdotorg/synapse-workers`. - -It's worth noting at this point that this image is fully functional, and -can be used for testing against locally. See instructions for using the container -under -[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) -below. - -Finally, build the Synapse image for Complement, which is based on -`matrixdotorg/synapse-workers`. - -```sh -docker build -t matrixdotorg/complement-synapse-workers -f docker/complement/SynapseWorkers.Dockerfile docker/complement -``` - -This will build an image with the tag `complement-synapse-workers`, which can be handed to -Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to -[Complement's documentation](https://github.com/matrix-org/complement/#running) for -how to run the tests, as well as the various available command line flags. ## Running the Dockerfile-worker image standalone @@ -113,6 +84,9 @@ docker run -d --name synapse \ ...substituting `POSTGRES*` variables for those that match a postgres host you have available (usually a running postgres docker container). + +### Workers + The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to use when running the container. All possible worker names are defined by the keys of the `WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the @@ -125,8 +99,11 @@ type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES` (e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`). Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers -(leaving only the main process). The container is configured to use redis-based worker -mode. +(leaving only the main process). +The container will only be configured to use Redis-based worker mode if there are +workers enabled. + +### Logging Logs for workers and the main process are logged to stdout and can be viewed with standard `docker logs` tooling. Worker logs contain their worker name @@ -136,3 +113,21 @@ Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be writ `/logs/.log`. Logs are kept for 1 week and rotate every day at 00: 00, according to the container's clock. Logging for the main process must still be configured by modifying the homeserver's log config in your Synapse data volume. + + +### Application Services + +Setting the `SYNAPSE_AS_REGISTRATION_DIR` environment variable to the path of +a directory (within the container) will cause the configuration script to scan +that directory for `.yaml`/`.yml` registration files. +Synapse will be configured to load these configuration files. + + +### TLS Termination + +Nginx is present in the image to route requests to the appropriate workers, +but it does not serve TLS by default. + +You can configure `SYNAPSE_TLS_CERT` and `SYNAPSE_TLS_KEY` to point to a +TLS certificate and key (respectively), both in PEM (textual) format. +In this case, Nginx will additionally serve using HTTPS on port 8448. diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 4823ce736..50684c956 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -1,22 +1,45 @@ -# A dockerfile which builds an image suitable for testing Synapse under -# complement. - +# This dockerfile builds on top of 'docker/Dockerfile-workers' in matrix-org/synapse +# by including a built-in postgres instance, as well as setting up the homeserver so +# that it is ready for testing via Complement. +# +# Instructions for building this image from those it depends on is detailed in this guide: +# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse ARG SYNAPSE_VERSION=latest +FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION -FROM matrixdotorg/synapse:${SYNAPSE_VERSION} +# Install postgresql +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 -ENV SERVER_NAME=localhost +# Configure a user and create a database for Synapse +RUN pg_ctlcluster 13 main start && su postgres -c "echo \ + \"ALTER USER postgres PASSWORD 'somesecret'; \ + CREATE DATABASE synapse \ + ENCODING 'UTF8' \ + LC_COLLATE='C' \ + LC_CTYPE='C' \ + template=template0;\" | psql" && pg_ctlcluster 13 main stop -COPY conf/* /conf/ - -# generate a signing key -RUN generate_signing_key -o /conf/server.signing.key +# Extend the shared homeserver config to disable rate-limiting, +# set Complement's static shared secret, enable registration, amongst other +# tweaks to get Synapse ready for testing. +# To do this, we copy the old template out of the way and then include it +# with Jinja2. +RUN mv /conf/shared.yaml.j2 /conf/shared-orig.yaml.j2 +COPY conf/workers-shared-extra.yaml.j2 /conf/shared.yaml.j2 WORKDIR /data +COPY conf/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf + +# Copy the entrypoint +COPY conf/start_for_complement.sh / + +# Expose nginx's listener ports EXPOSE 8008 8448 -ENTRYPOINT ["/conf/start.sh"] +ENTRYPOINT ["/start_for_complement.sh"] +# Update the healthcheck to have a shorter check interval HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD curl -fSs http://localhost:8008/health || exit 1 + CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/README.md b/docker/complement/README.md index e075418e4..37c39e2df 100644 --- a/docker/complement/README.md +++ b/docker/complement/README.md @@ -1 +1,32 @@ -Stuff for building the docker image used for testing under complement. +# Unified Complement image for Synapse + +This is an image for testing Synapse with [the *Complement* integration test suite][complement]. +It contains some insecure defaults that are only suitable for testing purposes, +so **please don't use this image for a production server**. + +This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory +and can be switched using environment variables between the following configurations: + +- Monolithic Synapse with SQLite (`SYNAPSE_COMPLEMENT_DATABASE=sqlite`) +- Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`) +- Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`) + +The image is self-contained; it contains an integrated Postgres, Redis and Nginx. + + +## How to get Complement to pass the environment variables through + +To pass these environment variables, use [Complement's `COMPLEMENT_SHARE_ENV_PREFIX`][complementEnv] +variable to configure an environment prefix to pass through, then prefix the above options +with that prefix. + +Example: +``` +COMPLEMENT_SHARE_ENV_PREFIX=PASS_ PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres +``` + +Consult `scripts-dev/complement.sh` in the repository root for a real example. + + +[complement]: https://github.com/matrix-org/complement +[complementEnv]: https://github.com/matrix-org/complement/pull/382 diff --git a/docker/complement/SynapseWorkers.Dockerfile b/docker/complement/SynapseWorkers.Dockerfile deleted file mode 100644 index 99a09cbc2..000000000 --- a/docker/complement/SynapseWorkers.Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -# This dockerfile builds on top of 'docker/Dockerfile-worker' in matrix-org/synapse -# by including a built-in postgres instance, as well as setting up the homeserver so -# that it is ready for testing via Complement. -# -# Instructions for building this image from those it depends on is detailed in this guide: -# https://github.com/matrix-org/synapse/blob/develop/docker/README-testing.md#testing-with-postgresql-and-single-or-multi-process-synapse -FROM matrixdotorg/synapse-workers - -# Install postgresql -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 - -# Configure a user and create a database for Synapse -RUN pg_ctlcluster 13 main start && su postgres -c "echo \ - \"ALTER USER postgres PASSWORD 'somesecret'; \ - CREATE DATABASE synapse \ - ENCODING 'UTF8' \ - LC_COLLATE='C' \ - LC_CTYPE='C' \ - template=template0;\" | psql" && pg_ctlcluster 13 main stop - -# Modify the shared homeserver config with postgres support, certificate setup -# and the disabling of rate-limiting -COPY conf-workers/workers-shared.yaml /conf/workers/shared.yaml - -WORKDIR /data - -COPY conf-workers/postgres.supervisord.conf /etc/supervisor/conf.d/postgres.conf - -# Copy the entrypoint -COPY conf-workers/start-complement-synapse-workers.sh / - -# Expose nginx's listener ports -EXPOSE 8008 8448 - -ENTRYPOINT ["/start-complement-synapse-workers.sh"] - -# Update the healthcheck to have a shorter check interval -HEALTHCHECK --start-period=5s --interval=1s --timeout=1s \ - CMD /bin/sh /healthcheck.sh diff --git a/docker/complement/conf-workers/start-complement-synapse-workers.sh b/docker/complement/conf-workers/start-complement-synapse-workers.sh deleted file mode 100755 index b7e244400..000000000 --- a/docker/complement/conf-workers/start-complement-synapse-workers.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement - -set -e - -function log { - d=$(date +"%Y-%m-%d %H:%M:%S,%3N") - echo "$d $@" -} - -# Set the server name of the homeserver -export SYNAPSE_SERVER_NAME=${SERVER_NAME} - -# No need to report stats here -export SYNAPSE_REPORT_STATS=no - -# Set postgres authentication details which will be placed in the homeserver config file -export POSTGRES_PASSWORD=somesecret -export POSTGRES_USER=postgres -export POSTGRES_HOST=localhost - -# Specify the workers to test with -export SYNAPSE_WORKER_TYPES="\ - event_persister, \ - event_persister, \ - background_worker, \ - frontend_proxy, \ - event_creator, \ - user_dir, \ - media_repository, \ - federation_inbound, \ - federation_reader, \ - federation_sender, \ - synchrotron, \ - appservice, \ - pusher" - -# Add Complement's appservice registration directory, if there is one -# (It can be absent when there are no application services in this test!) -if [ -d /complement/appservice ]; then - export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice -fi - -# Generate a TLS key, then generate a certificate by having Complement's CA sign it -# Note that both the key and certificate are in PEM format (not DER). -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" - -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -export SYNAPSE_TLS_CERT=/conf/server.tls.crt -export SYNAPSE_TLS_KEY=/conf/server.tls.key - -# Run the script that writes the necessary config files and starts supervisord, which in turn -# starts everything else -exec /configure_workers_and_start.py diff --git a/docker/complement/conf/homeserver.yaml b/docker/complement/conf/homeserver.yaml deleted file mode 100644 index e2be540bb..000000000 --- a/docker/complement/conf/homeserver.yaml +++ /dev/null @@ -1,129 +0,0 @@ -## Server ## - -server_name: SERVER_NAME -log_config: /conf/log_config.yaml -report_stats: False -signing_key_path: /conf/server.signing.key -trusted_key_servers: [] -enable_registration: true -enable_registration_without_verification: true - -## Listeners ## - -tls_certificate_path: /conf/server.tls.crt -tls_private_key_path: /conf/server.tls.key -bcrypt_rounds: 4 -registration_shared_secret: complement - -listeners: - - port: 8448 - bind_addresses: ['::'] - type: http - tls: true - resources: - - names: [federation] - - - port: 8008 - bind_addresses: ['::'] - type: http - - resources: - - names: [client] - -## Database ## - -database: - name: "sqlite3" - args: - # We avoid /data, as it is a volume and is not transferred when the container is committed, - # which is a fundamental necessity in complement. - database: "/conf/homeserver.db" - -## Federation ## - -# trust certs signed by the complement CA -federation_custom_ca_list: -- /complement/ca/ca.crt - -# unblacklist RFC1918 addresses -ip_range_blacklist: [] - -# Disable server rate-limiting -rc_federation: - window_size: 1000 - sleep_limit: 10 - sleep_delay: 500 - reject_limit: 99999 - concurrent: 3 - -rc_message: - per_second: 9999 - burst_count: 9999 - -rc_registration: - per_second: 9999 - burst_count: 9999 - -rc_login: - address: - per_second: 9999 - burst_count: 9999 - account: - per_second: 9999 - burst_count: 9999 - failed_attempts: - per_second: 9999 - burst_count: 9999 - -rc_admin_redaction: - per_second: 9999 - burst_count: 9999 - -rc_joins: - local: - per_second: 9999 - burst_count: 9999 - remote: - per_second: 9999 - burst_count: 9999 - -rc_3pid_validation: - per_second: 1000 - burst_count: 1000 - -rc_invites: - per_room: - per_second: 1000 - burst_count: 1000 - per_user: - per_second: 1000 - burst_count: 1000 - -federation_rr_transactions_per_room_per_second: 9999 - -## API Configuration ## - -# A list of application service config files to use -# -app_service_config_files: -AS_REGISTRATION_FILES - -## Experimental Features ## - -experimental_features: - # Enable spaces support - spaces_enabled: true - # Enable history backfilling support - msc2716_enabled: true - # server-side support for partial state in /send_join responses - msc3706_enabled: true - # client-side support for partial state in /send_join responses - faster_joins: true - # Enable jump to date endpoint - msc3030_enabled: true - -server_notices: - system_mxid_localpart: _server - system_mxid_display_name: "Server Alert" - system_mxid_avatar_url: "" - room_name: "Server Alert" diff --git a/docker/complement/conf/log_config.yaml b/docker/complement/conf/log_config.yaml deleted file mode 100644 index c33fd6cd0..000000000 --- a/docker/complement/conf/log_config.yaml +++ /dev/null @@ -1,24 +0,0 @@ -version: 1 - -formatters: - precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' - -filters: - context: - (): synapse.logging.context.LoggingContextFilter - request: "" - -handlers: - console: - class: logging.StreamHandler - formatter: precise - filters: [context] - # log to stdout, for easier use with 'docker logs' - stream: 'ext://sys.stdout' - -root: - level: INFO - handlers: [console] - -disable_existing_loggers: false diff --git a/docker/complement/conf-workers/postgres.supervisord.conf b/docker/complement/conf/postgres.supervisord.conf similarity index 88% rename from docker/complement/conf-workers/postgres.supervisord.conf rename to docker/complement/conf/postgres.supervisord.conf index 5608342d1..5dae3e633 100644 --- a/docker/complement/conf-workers/postgres.supervisord.conf +++ b/docker/complement/conf/postgres.supervisord.conf @@ -1,6 +1,9 @@ [program:postgres] command=/usr/local/bin/prefix-log /usr/bin/pg_ctlcluster 13 main start --foreground +# Only start if START_POSTGRES=1 +autostart=%(ENV_START_POSTGRES)s + # Lower priority number = starts first priority=1 diff --git a/docker/complement/conf/start.sh b/docker/complement/conf/start.sh deleted file mode 100755 index 5d8d0fe01..000000000 --- a/docker/complement/conf/start.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh - -set -e - -sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml - -# Add the application service registration files to the homeserver.yaml config -for filename in /complement/appservice/*.yaml; do - [ -f "$filename" ] || break - - as_id=$(basename "$filename" .yaml) - - # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after - # so we can add the next application service in the next iteration of this for loop - sed -i "s/AS_REGISTRATION_FILES/ - \/complement\/appservice\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml -done -# Remove the AS_REGISTRATION_FILES entry -sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml - -# generate an ssl key and cert for the server, signed by the complement CA -openssl genrsa -out /conf/server.tls.key 2048 - -openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ - -subj "/CN=${SERVER_NAME}" -openssl x509 -req -in /conf/server.tls.csr \ - -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ - -out /conf/server.tls.crt - -exec python -m synapse.app.homeserver -c /conf/homeserver.yaml "$@" - diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh new file mode 100755 index 000000000..b9c97ab68 --- /dev/null +++ b/docker/complement/conf/start_for_complement.sh @@ -0,0 +1,90 @@ +#!/bin/bash +# +# Default ENTRYPOINT for the docker image used for testing synapse with workers under complement + +set -e + +echo "Complement Synapse launcher" +echo " Args: $@" +echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS" + +function log { + d=$(date +"%Y-%m-%d %H:%M:%S,%3N") + echo "$d $@" +} + +# Set the server name of the homeserver +export SYNAPSE_SERVER_NAME=${SERVER_NAME} + +# No need to report stats here +export SYNAPSE_REPORT_STATS=no + + +case "$SYNAPSE_COMPLEMENT_DATABASE" in + postgres) + # Set postgres authentication details which will be placed in the homeserver config file + export POSTGRES_PASSWORD=somesecret + export POSTGRES_USER=postgres + export POSTGRES_HOST=localhost + + # configure supervisord to start postgres + export START_POSTGRES=true + ;; + + sqlite) + # Configure supervisord not to start Postgres, as we don't need it + export START_POSTGRES=false + ;; + + *) + echo "Unknown Synapse database: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE" >&2 + exit 1 + ;; +esac + + +if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then + # Specify the workers to test with + export SYNAPSE_WORKER_TYPES="\ + event_persister, \ + event_persister, \ + background_worker, \ + frontend_proxy, \ + event_creator, \ + user_dir, \ + media_repository, \ + federation_inbound, \ + federation_reader, \ + federation_sender, \ + synchrotron, \ + appservice, \ + pusher" +else + # Empty string here means 'main process only' + export SYNAPSE_WORKER_TYPES="" +fi + + +# Add Complement's appservice registration directory, if there is one +# (It can be absent when there are no application services in this test!) +if [ -d /complement/appservice ]; then + export SYNAPSE_AS_REGISTRATION_DIR=/complement/appservice +fi + +# Generate a TLS key, then generate a certificate by having Complement's CA sign it +# Note that both the key and certificate are in PEM format (not DER). +openssl genrsa -out /conf/server.tls.key 2048 + +openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ + -subj "/CN=${SERVER_NAME}" + +openssl x509 -req -in /conf/server.tls.csr \ + -CA /complement/ca/ca.crt -CAkey /complement/ca/ca.key -set_serial 1 \ + -out /conf/server.tls.crt + +export SYNAPSE_TLS_CERT=/conf/server.tls.crt +export SYNAPSE_TLS_KEY=/conf/server.tls.key + +# Run the script that writes the necessary config files and starts supervisord, which in turn +# starts everything else +exec /configure_workers_and_start.py diff --git a/docker/complement/conf-workers/workers-shared.yaml b/docker/complement/conf/workers-shared-extra.yaml.j2 similarity index 75% rename from docker/complement/conf-workers/workers-shared.yaml rename to docker/complement/conf/workers-shared-extra.yaml.j2 index cd7b50c65..a5b1b6bb8 100644 --- a/docker/complement/conf-workers/workers-shared.yaml +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -1,3 +1,11 @@ +{# + This file extends the default 'shared' configuration file (from the 'synapse-workers' + docker image) with Complement-specific tweak. + + The base configuration is moved out of the default path to `shared-orig.yaml.j2` + in the Complement Dockerfile and below we include that original file. +#} + ## Server ## report_stats: False trusted_key_servers: [] @@ -76,10 +84,16 @@ federation_rr_transactions_per_room_per_second: 9999 ## Experimental Features ## experimental_features: - # Enable history backfilling support - msc2716_enabled: true # Enable spaces support spaces_enabled: true + # Enable history backfilling support + msc2716_enabled: true + # server-side support for partial state in /send_join responses + msc3706_enabled: true + {% if not workers_in_use %} + # client-side support for partial state in /send_join responses + faster_joins: true + {% endif %} # Enable jump to date endpoint msc3030_enabled: true @@ -88,3 +102,5 @@ server_notices: system_mxid_display_name: "Server Alert" system_mxid_avatar_url: "" room_name: "Server Alert" + +{% include "shared-orig.yaml.j2" %} diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 index 644ed788f..92d25386d 100644 --- a/docker/conf-workers/shared.yaml.j2 +++ b/docker/conf-workers/shared.yaml.j2 @@ -3,8 +3,10 @@ # configure_workers_and_start.py uses and amends to this file depending on the workers # that have been selected. +{% if enable_redis %} redis: enabled: true +{% endif %} {% if appservice_registrations is not none %} ## Application Services ## diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index ca1f7aef8..7afab0513 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -28,6 +28,9 @@ stderr_logfile_maxbytes=0 username=redis autorestart=true +# Redis can be disabled if the image is being used without workers +autostart={{ enable_redis }} + [program:synapse_main] command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml priority=10 @@ -41,4 +44,4 @@ autorestart=unexpected exitcodes=0 # Additional process blocks -{{ worker_config }} \ No newline at end of file +{{ worker_config }} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index f7dac9022..64697e035 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -37,8 +37,8 @@ import sys from pathlib import Path from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Set -import jinja2 import yaml +from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 @@ -236,12 +236,13 @@ def convert(src: str, dst: str, **template_vars: object) -> None: template_vars: The arguments to replace placeholder variables in the template with. """ # Read the template file - with open(src) as infile: - template = infile.read() + # We disable autoescape to prevent template variables from being escaped, + # as we're not using HTML. + env = Environment(loader=FileSystemLoader(os.path.dirname(src)), autoescape=False) + template = env.get_template(os.path.basename(src)) - # Generate a string from the template. We disable autoescape to prevent template - # variables from being escaped. - rendered = jinja2.Template(template, autoescape=False).render(**template_vars) + # Generate a string from the template. + rendered = template.render(**template_vars) # Write the generated contents to a file # @@ -378,8 +379,8 @@ def generate_worker_files( nginx_locations = {} # Read the desired worker configuration from the environment - worker_types_env = environ.get("SYNAPSE_WORKER_TYPES") - if worker_types_env is None: + worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() + if not worker_types_env: # No workers, just the main process worker_types = [] else: @@ -506,12 +507,16 @@ def generate_worker_files( if reg_path.suffix.lower() in (".yaml", ".yml") ] + workers_in_use = len(worker_types) > 0 + # Shared homeserver config convert( "/conf/shared.yaml.j2", "/conf/workers/shared.yaml", shared_worker_config=yaml.dump(shared_config), appservice_registrations=appservice_registrations, + enable_redis=workers_in_use, + workers_in_use=workers_in_use, ) # Nginx config @@ -531,6 +536,7 @@ def generate_worker_files( "/etc/supervisor/supervisord.conf", main_config_path=config_path, worker_config=supervisord_config, + enable_redis=workers_in_use, ) # healthcheck config diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 2b3714df6..c2f04a390 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -304,6 +304,11 @@ To run a specific test, you can specify the whole name structure: COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -run TestImportHistoricalMessages/parallel/Historical_events_resolve_in_the_correct_order ``` +The above will run a monolithic (single-process) Synapse with SQLite as the database. For other configurations, try: + +- Passing `POSTGRES=1` as an environment variable to use the Postgres database instead. +- Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. + ### Access database for homeserver after Complement test runs. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 3c472c576..ffd399c39 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -43,17 +43,29 @@ fi # Build the base Synapse image from the local checkout docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . +# Build the workers docker image (from the base Synapse image we just built). +docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + +# Build the unified Complement image (from the worker Synapse image we just built). +docker build -t complement-synapse \ + -f "docker/complement/Dockerfile" "docker/complement" + +export COMPLEMENT_BASE_IMAGE=complement-synapse + extra_test_args=() test_tags="synapse_blacklist,msc2716,msc3030,msc3787" -# If we're using workers, modify the docker files slightly. -if [[ -n "$WORKERS" ]]; then - # Build the workers docker image (from the base Synapse image). - docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . +# All environment variables starting with PASS_ will be shared. +# (The prefix is stripped off before reaching the container.) +export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ - export COMPLEMENT_BASE_IMAGE=complement-synapse-workers - COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile +if [[ -n "$WORKERS" ]]; then + # Use workers. + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true + + # Workers can only use Postgres as a database. + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres # And provide some more configuration to complement. @@ -65,17 +77,18 @@ if [[ -n "$WORKERS" ]]; then # ... and it takes longer than 10m to run the whole suite. extra_test_args+=("-timeout=60m") else - export COMPLEMENT_BASE_IMAGE=complement-synapse - COMPLEMENT_DOCKERFILE=Dockerfile + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= + if [[ -n "$POSTGRES" ]]; then + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + else + export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + fi # We only test faster room joins on monoliths, because they are purposefully # being developed without worker support to start with. test_tags="$test_tags,faster_joins" fi -# Build the Complement image from the Synapse image we just built. -docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERFILE" "docker/complement" - # Run the tests! echo "Images built; running complement" cd "$COMPLEMENT_DIR" From 3c8f1290b8ccf52462baa6b3e78d1caae2906395 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 8 Jun 2022 12:11:02 +0100 Subject: [PATCH 11/85] Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. (#12982) Co-authored-by: Brendan Abolivier --- .github/ISSUE_TEMPLATE/BUG_REPORT.md | 72 ------------------ .github/ISSUE_TEMPLATE/BUG_REPORT.yml | 103 ++++++++++++++++++++++++++ changelog.d/12982.misc | 1 + 3 files changed, 104 insertions(+), 72 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/BUG_REPORT.md create mode 100644 .github/ISSUE_TEMPLATE/BUG_REPORT.yml create mode 100644 changelog.d/12982.misc diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.md b/.github/ISSUE_TEMPLATE/BUG_REPORT.md deleted file mode 100644 index 978b69988..000000000 --- a/.github/ISSUE_TEMPLATE/BUG_REPORT.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - - - -### Description - - - -### Steps to reproduce - -- list the steps -- that reproduce the bug -- using hyphens as bullet points - - - -### Version information - - - - -- **Homeserver**: - -If not matrix.org: - - -- **Version**: - -- **Install method**: - - -- **Platform**: - diff --git a/.github/ISSUE_TEMPLATE/BUG_REPORT.yml b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml new file mode 100644 index 000000000..1b304198b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/BUG_REPORT.yml @@ -0,0 +1,103 @@ +name: Bug report +description: Create a report to help us improve +body: + - type: markdown + attributes: + value: | + **THIS IS NOT A SUPPORT CHANNEL!** + **IF YOU HAVE SUPPORT QUESTIONS ABOUT RUNNING OR CONFIGURING YOUR OWN HOME SERVER**, please ask in **[#synapse:matrix.org](https://matrix.to/#/#synapse:matrix.org)** (using a matrix.org account if necessary). + + If you want to report a security issue, please see https://matrix.org/security-disclosure-policy/ + + This is a bug report form. By following the instructions below and completing the sections with your information, you will help the us to get all the necessary data to fix your issue. + + You can also preview your report before submitting it. + - type: textarea + id: description + attributes: + label: Description + description: Describe the problem that you are experiencing + validations: + required: true + - type: textarea + id: reproduction_steps + attributes: + label: Steps to reproduce + description: | + Describe the series of steps that leads you to the problem. + + Describe how what happens differs from what you expected. + placeholder: Tell us what you see! + value: | + - list the steps + - that reproduce the bug + - using hyphens as bullet points + validations: + required: true + - type: markdown + attributes: + value: | + --- + + **IMPORTANT**: please answer the following questions, to help us narrow down the problem. + - type: input + id: homeserver + attributes: + label: Homeserver + description: Which homeserver was this issue identified on? (matrix.org, another homeserver, etc) + validations: + required: true + - type: input + id: version + attributes: + label: Synapse Version + description: | + What version of Synapse is this homeserver running? + + You can find the Synapse version by visiting https://yourserver.example.com/_matrix/federation/v1/version + + or with this command: + + ``` + $ curl http://localhost:8008/_synapse/admin/v1/server_version + ``` + + (You may need to replace `localhost:8008` if Synapse is not configured to listen on that port.) + validations: + required: true + - type: dropdown + id: install_method + attributes: + label: Installation Method + options: + - Docker (matrixdotorg/synapse) + - Debian packages from packages.matrix.org + - pip (from PyPI) + - Other (please mention below) + - type: textarea + id: platform + attributes: + label: Platform + description: | + Tell us about the environment in which your homeserver is operating... + e.g. distro, hardware, if it's running in a vm/container, etc. + validations: + required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: | + Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. + This will be automatically formatted into code, so there is no need for backticks. + + Please be careful to remove any personal or private data. + + **Bug reports are usually very difficult to diagnose without logging.** + render: shell + validations: + required: true + - type: textarea + id: anything_else + attributes: + label: Anything else that would be useful to know? diff --git a/changelog.d/12982.misc b/changelog.d/12982.misc new file mode 100644 index 000000000..036b69efe --- /dev/null +++ b/changelog.d/12982.misc @@ -0,0 +1 @@ +Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. \ No newline at end of file From dd2d66b0c9f3ff734314fcddb7f90f3225ddb555 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Jun 2022 09:00:35 -0400 Subject: [PATCH 12/85] Move the (unstable) `dir` parameter for /relations behind an experimental flag. (#12984) MSC3715 defines this parameter, but the unstable version of it should be behind an experimental flag. --- changelog.d/12984.misc | 1 + synapse/config/experimental.py | 3 +++ synapse/rest/client/relations.py | 13 ++++++++++--- tests/rest/client/test_relations.py | 1 + 4 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 changelog.d/12984.misc diff --git a/changelog.d/12984.misc b/changelog.d/12984.misc new file mode 100644 index 000000000..a90201718 --- /dev/null +++ b/changelog.d/12984.misc @@ -0,0 +1 @@ +Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index f2dfd49b0..0a285dba3 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,3 +84,6 @@ class ExperimentalConfig(Config): # MSC3772: A push rule for mutual relations. self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False) + + # MSC3715: dir param on /relations. + self.msc3715_enabled: bool = experimental.get("msc3715_enabled", False) diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 3cae6d2b5..ce9708001 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -43,6 +43,7 @@ class RelationPaginationServlet(RestServlet): self.auth = hs.get_auth() self.store = hs.get_datastores().main self._relations_handler = hs.get_relations_handler() + self._msc3715_enabled = hs.config.experimental.msc3715_enabled async def on_GET( self, @@ -55,9 +56,15 @@ class RelationPaginationServlet(RestServlet): requester = await self.auth.get_user_by_req(request, allow_guest=True) limit = parse_integer(request, "limit", default=5) - direction = parse_string( - request, "org.matrix.msc3715.dir", default="b", allowed_values=["f", "b"] - ) + if self._msc3715_enabled: + direction = parse_string( + request, + "org.matrix.msc3715.dir", + default="b", + allowed_values=["f", "b"], + ) + else: + direction = "b" from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 62e4db23e..aa8490654 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -728,6 +728,7 @@ class RelationsTestCase(BaseRelationsTestCase): class RelationPaginationTestCase(BaseRelationsTestCase): + @unittest.override_config({"experimental_features": {"msc3715_enabled": True}}) def test_basic_paginate_relations(self) -> None: """Tests that calling pagination API correctly the latest relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") From 04ca3a52f68275ce85355fb4c56f656080b20c92 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Thu, 9 Jun 2022 09:44:16 +0100 Subject: [PATCH 13/85] Use READ COMMITTED isolation level when inserting read receipts (#12957) --- changelog.d/12957.misc | 1 + synapse/storage/databases/main/receipts.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/12957.misc diff --git a/changelog.d/12957.misc b/changelog.d/12957.misc new file mode 100644 index 000000000..0c075276e --- /dev/null +++ b/changelog.d/12957.misc @@ -0,0 +1 @@ +Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 21e954ccc..b6106affa 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -36,6 +36,7 @@ from synapse.storage.database import ( LoggingTransaction, ) from synapse.storage.engines import PostgresEngine +from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import ( AbstractStreamIdTracker, MultiWriterIdGenerator, @@ -764,6 +765,10 @@ class ReceiptsWorkerStore(SQLBaseStore): linearized_event_id, data, stream_id=stream_id, + # Read committed is actually beneficial here because we check for a receipt with + # greater stream order, and checking the very latest data at select time is better + # than the data at transaction start time. + isolation_level=IsolationLevel.READ_COMMITTED, ) # If the receipt was older than the currently persisted one, nothing to do. From 97053c94060ea31d3b9d41a129221ad4b2a76865 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 9 Jun 2022 09:48:04 +0100 Subject: [PATCH 14/85] Type annotations for `test_v2` (#12985) --- changelog.d/12985.misc | 1 + mypy.ini | 4 +- synapse/state/v2.py | 57 ++++++++++++++----- tests/state/test_v2.py | 125 +++++++++++++++++++++++++++-------------- 4 files changed, 129 insertions(+), 58 deletions(-) create mode 100644 changelog.d/12985.misc diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc new file mode 100644 index 000000000..d5ab9eede --- /dev/null +++ b/changelog.d/12985.misc @@ -0,0 +1 @@ +Add type annotations to `tests.state.test_v2`. diff --git a/mypy.ini b/mypy.ini index fe3e3f9b8..7973f2ac0 100644 --- a/mypy.ini +++ b/mypy.ini @@ -56,7 +56,6 @@ exclude = (?x) |tests/rest/media/v1/test_media_storage.py |tests/server.py |tests/server_notices/test_resource_limits_server_notices.py - |tests/state/test_v2.py |tests/test_metrics.py |tests/test_server.py |tests/test_state.py @@ -115,6 +114,9 @@ disallow_untyped_defs = False [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True +[mypy-tests.state.test_profile] +disallow_untyped_defs = True + [mypy-tests.storage.test_profile] disallow_untyped_defs = True diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c618df2fd..0e609114e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -17,12 +17,14 @@ import itertools import logging from typing import ( Any, + Awaitable, Callable, Collection, Dict, Generator, Iterable, List, + Mapping, Optional, Sequence, Set, @@ -30,33 +32,58 @@ from typing import ( overload, ) -from typing_extensions import Literal +from typing_extensions import Literal, Protocol -import synapse.state from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap -from synapse.util import Clock logger = logging.getLogger(__name__) +class Clock(Protocol): + # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. + # We only ever sleep(0) though, so that other async functions can make forward + # progress without waiting for stateres to complete. + def sleep(self, duration_ms: float) -> Awaitable[None]: + ... + + +class StateResolutionStore(Protocol): + # This is usually synapse.state.StateResolutionStore, but it's replaced with a + # TestStateResolutionStore in tests. + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> Awaitable[Dict[str, EventBase]]: + ... + + def get_auth_chain_difference( + self, room_id: str, state_sets: List[Set[str]] + ) -> Awaitable[Set[str]]: + ... + + # We want to await to the reactor occasionally during state res when dealing # with large data sets, so that we don't exhaust the reactor. This is done by # awaiting to reactor during loops every N iterations. _AWAIT_AFTER_ITERATIONS = 100 +__all__ = [ + "resolve_events_with_store", +] + + async def resolve_events_with_store( clock: Clock, room_id: str, room_version: RoomVersion, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> StateMap[str]: """Resolves the state using the v2 state resolution algorithm @@ -194,7 +221,7 @@ async def _get_power_level_for_sender( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Return the power level of the sender of the given event according to their auth events. @@ -243,9 +270,9 @@ async def _get_power_level_for_sender( async def _get_auth_chain_difference( room_id: str, - state_sets: Sequence[StateMap[str]], + state_sets: Sequence[Mapping[Any, str]], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> Set[str]: """Compare the auth chains of each state set and return the set of events that only appear in some but not all of the auth chains. @@ -406,7 +433,7 @@ async def _add_event_and_auth_chain_to_graph( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> None: """Helper function for _reverse_topological_power_sort that add the event @@ -440,7 +467,7 @@ async def _reverse_topological_power_sort( room_id: str, event_ids: Iterable[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, auth_diff: Set[str], ) -> List[str]: """Returns a list of the event_ids sorted by reverse topological ordering, @@ -501,7 +528,7 @@ async def _iterative_auth_checks( event_ids: List[str], base_state: StateMap[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> MutableStateMap[str]: """Sequentially apply auth checks to each event in given list, updating the state as it goes along. @@ -570,7 +597,7 @@ async def _mainline_sort( event_ids: List[str], resolved_power_event_id: Optional[str], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> List[str]: """Returns a sorted list of event_ids sorted by mainline ordering based on the given event resolved_power_event_id @@ -639,7 +666,7 @@ async def _get_mainline_depth_for_event( event: EventBase, mainline_map: Dict[str, int], event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, ) -> int: """Get the mainline depths for the given event based on the mainline map @@ -683,7 +710,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[False] = False, ) -> EventBase: ... @@ -694,7 +721,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: Literal[True], ) -> Optional[EventBase]: ... @@ -704,7 +731,7 @@ async def _get_event( room_id: str, event_id: str, event_map: Dict[str, EventBase], - state_res_store: "synapse.state.StateResolutionStore", + state_res_store: StateResolutionStore, allow_none: bool = False, ) -> Optional[EventBase]: """Helper function to look up event in event_map, falling back to looking diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 8370a2719..78b83d97b 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -13,7 +13,17 @@ # limitations under the License. import itertools -from typing import List +from typing import ( + Collection, + Dict, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + TypeVar, +) import attr @@ -22,13 +32,13 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.state.v2 import ( _get_auth_chain_difference, lexicographical_topological_sort, resolve_events_with_store, ) -from synapse.types import EventID +from synapse.types import EventID, StateMap from tests import unittest @@ -48,7 +58,7 @@ ORIGIN_SERVER_TS = 0 class FakeClock: - def sleep(self, msec): + def sleep(self, msec: float) -> "defer.Deferred[None]": return defer.succeed(None) @@ -60,7 +70,14 @@ class FakeEvent: as domain. """ - def __init__(self, id, sender, type, state_key, content): + def __init__( + self, + id: str, + sender: str, + type: str, + state_key: Optional[str], + content: Mapping[str, object], + ): self.node_id = id self.event_id = EventID(id, "example.com").to_string() self.sender = sender @@ -69,12 +86,12 @@ class FakeEvent: self.content = content self.room_id = ROOM_ID - def to_event(self, auth_events, prev_events): + def to_event(self, auth_events: List[str], prev_events: List[str]) -> EventBase: """Given the auth_events and prev_events, convert to a Frozen Event Args: - auth_events (list[str]): list of event_ids - prev_events (list[str]): list of event_ids + auth_events: list of event_ids + prev_events: list of event_ids Returns: FrozenEvent @@ -164,7 +181,7 @@ INITIAL_EDGES = ["START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] class StateTestCase(unittest.TestCase): - def test_ban_vs_pl(self): + def test_ban_vs_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -202,7 +219,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_join_rule_evasion(self): + def test_join_rule_evasion(self) -> None: events = [ FakeEvent( id="JR", @@ -226,7 +243,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_offtopic_pl(self): + def test_offtopic_pl(self) -> None: events = [ FakeEvent( id="PA", @@ -257,7 +274,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_basic(self): + def test_topic_basic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -297,7 +314,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic_reset(self): + def test_topic_reset(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -327,7 +344,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_topic(self): + def test_topic(self) -> None: events = [ FakeEvent( id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} @@ -380,7 +397,7 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def test_mainline_sort(self): + def test_mainline_sort(self) -> None: """Tests that the mainline ordering works correctly.""" events = [ @@ -434,22 +451,26 @@ class StateTestCase(unittest.TestCase): self.do_check(events, edges, expected_state_ids) - def do_check(self, events, edges, expected_state_ids): + def do_check( + self, + events: List[FakeEvent], + edges: List[List[str]], + expected_state_ids: List[str], + ) -> None: """Take a list of events and edges and calculate the state of the graph at END, and asserts it matches `expected_state_ids` Args: - events (list[FakeEvent]) - edges (list[list[str]]): A list of chains of event edges, e.g. + events + edges: A list of chains of event edges, e.g. `[[A, B, C]]` are edges A->B and B->C. - expected_state_ids (list[str]): The expected state at END, (excluding + expected_state_ids: The expected state at END, (excluding the keys that haven't changed since START). """ # We want to sort the events into topological order for processing. - graph = {} + graph: Dict[str, Set[str]] = {} - # node_id -> FakeEvent - fake_event_map = {} + fake_event_map: Dict[str, FakeEvent] = {} for ev in itertools.chain(INITIAL_EVENTS, events): graph[ev.node_id] = set() @@ -462,10 +483,8 @@ class StateTestCase(unittest.TestCase): for a, b in pairwise(edge_list): graph[a].add(b) - # event_id -> FrozenEvent - event_map = {} - # node_id -> state - state_at_event = {} + event_map: Dict[str, EventBase] = {} + state_at_event: Dict[str, StateMap[str]] = {} # We copy the map as the sort consumes the graph graph_copy = {k: set(v) for k, v in graph.items()} @@ -496,7 +515,16 @@ class StateTestCase(unittest.TestCase): if fake_event.state_key is not None: state_after[(fake_event.type, fake_event.state_key)] = event_id - auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) + # This type ignore is a bit sad. Things we have tried: + # 1. Define a `GenericEvent` Protocol satisfied by FakeEvent, EventBase and + # EventBuilder. But this is Hard because the relevant attributes are + # DictProperty[T] descriptors on EventBase but normal Ts on FakeEvent. + # 2. Define a `GenericEvent` Protocol describing `FakeEvent` only, and + # change this function to accept Union[Event, EventBase, EventBuilder]. + # This seems reasonable to me, but mypy isn't happy. I think that's + # a mypy bug, see https://github.com/python/mypy/issues/5570 + # Instead, resort to a type-ignore. + auth_types = set(auth_types_for_event(RoomVersions.V6, fake_event)) # type: ignore[arg-type] auth_events = [] for key in auth_types: @@ -530,8 +558,14 @@ class StateTestCase(unittest.TestCase): class LexicographicalTestCase(unittest.TestCase): - def test_simple(self): - graph = {"l": {"o"}, "m": {"n", "o"}, "n": {"o"}, "o": set(), "p": {"o"}} + def test_simple(self) -> None: + graph: Dict[str, Set[str]] = { + "l": {"o"}, + "m": {"n", "o"}, + "n": {"o"}, + "o": set(), + "p": {"o"}, + } res = list(lexicographical_topological_sort(graph, key=lambda x: x)) @@ -539,7 +573,7 @@ class LexicographicalTestCase(unittest.TestCase): class SimpleParamStateTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: # We build up a simple DAG. event_map = {} @@ -627,7 +661,7 @@ class SimpleParamStateTestCase(unittest.TestCase): ] } - def test_event_map_none(self): + def test_event_map_none(self) -> None: # Test that we correctly handle passing `None` as the event_map state_d = resolve_events_with_store( @@ -649,7 +683,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): events. """ - def test_simple(self): + def test_simple(self) -> None: # Test getting the auth difference for a simple chain with a single # unpersisted event: # @@ -695,7 +729,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {c.event_id}) - def test_multiple_unpersisted_chain(self): + def test_multiple_unpersisted_chain(self) -> None: # Test getting the auth difference for a simple chain with multiple # unpersisted events: # @@ -752,7 +786,7 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, c.event_id}) - def test_unpersisted_events_different_sets(self): + def test_unpersisted_events_different_sets(self) -> None: # Test getting the auth difference for with multiple unpersisted events # in different branches: # @@ -820,7 +854,10 @@ class AuthChainDifferenceTestCase(unittest.TestCase): self.assertEqual(difference, {d.event_id, e.event_id}) -def pairwise(iterable): +T = TypeVar("T") + + +def pairwise(iterable: Iterable[T]) -> Iterable[Tuple[T, T]]: "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = itertools.tee(iterable) next(b, None) @@ -829,24 +866,26 @@ def pairwise(iterable): @attr.s class TestStateResolutionStore: - event_map = attr.ib() + event_map: Dict[str, EventBase] = attr.ib() - def get_events(self, event_ids, allow_rejected=False): + def get_events( + self, event_ids: Collection[str], allow_rejected: bool = False + ) -> "defer.Deferred[Dict[str, EventBase]]": """Get events from the database Args: - event_ids (list): The event_ids of the events to fetch - allow_rejected (bool): If True return rejected events. + event_ids: The event_ids of the events to fetch + allow_rejected: If True return rejected events. Returns: - Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. + Dict from event_id to event. """ return defer.succeed( {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} ) - def _get_auth_chain(self, event_ids: List[str]) -> List[str]: + def _get_auth_chain(self, event_ids: Iterable[str]) -> List[str]: """Gets the full auth chain for a set of events (including rejected events). @@ -880,7 +919,9 @@ class TestStateResolutionStore: return list(result) - def get_auth_chain_difference(self, room_id, auth_sets): + def get_auth_chain_difference( + self, room_id: str, auth_sets: List[Set[str]] + ) -> "defer.Deferred[Set[str]]": chains = [frozenset(self._get_auth_chain(a)) for a in auth_sets] common = set(chains[0]).intersection(*chains[1:]) From 7c6b2204d143550d81e5bf9612c4e69fe0866b4c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 9 Jun 2022 11:13:03 +0100 Subject: [PATCH 15/85] Faster joins: add issue links to the TODOs (#13004) ... to help us keep track of these things --- changelog.d/13004.misc | 1 + synapse/handlers/federation.py | 13 ++++++++++++- synapse/handlers/federation_event.py | 2 ++ synapse/handlers/message.py | 1 + synapse/storage/controllers/persist_events.py | 5 ++++- synapse/storage/controllers/state.py | 3 +++ synapse/storage/databases/main/room.py | 2 ++ synapse/storage/databases/main/state.py | 1 + synapse/storage/state.py | 1 + 9 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13004.misc diff --git a/changelog.d/13004.misc b/changelog.d/13004.misc new file mode 100644 index 000000000..d8e93d87a --- /dev/null +++ b/changelog.d/13004.misc @@ -0,0 +1 @@ +Faster joins: add issue links to the TODO comments in the code. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6a143440d..5e1613962 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -545,6 +545,7 @@ class FederationHandler: if ret.partial_state: # TODO(faster_joins): roll this back if we don't manage to start the # background resync (eg process_remote_join fails) + # https://github.com/matrix-org/synapse/issues/12998 await self.store.store_partial_state_room(room_id, ret.servers_in_room) max_stream_id = await self._federation_event_handler.process_remote_join( @@ -1506,14 +1507,17 @@ class FederationHandler: # TODO(faster_joins): do we need to lock to avoid races? What happens if other # worker processes kick off a resync in parallel? Perhaps we should just elect # a single worker to do the resync. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): what happens if we leave the room during a resync? if we # really leave, that might mean we have difficulty getting the room state over # federation. + # https://github.com/matrix-org/synapse/issues/12802 # # TODO(faster_joins): we need some way of prioritising which homeservers in # `other_destinations` to try first, otherwise we'll spend ages trying dead # homeservers for large rooms. + # https://github.com/matrix-org/synapse/issues/12999 if initial_destination is None and len(other_destinations) == 0: raise ValueError( @@ -1543,9 +1547,11 @@ class FederationHandler: # all the events are updated, so we can update current state and # clear the lazy-loading flag. logger.info("Updating current state for %s", room_id) + # TODO(faster_joins): support workers + # https://github.com/matrix-org/synapse/issues/12994 assert ( self._storage_controllers.persistence is not None - ), "TODO(faster_joins): support for workers" + ), "worker-mode deployments not currently supported here" await self._storage_controllers.persistence.update_current_state( room_id ) @@ -1559,6 +1565,8 @@ class FederationHandler: ) # TODO(faster_joins) update room stats and user directory? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 return # we raced against more events arriving with partial state. Go round @@ -1566,6 +1574,8 @@ class FederationHandler: # TODO(faster_joins): there is still a race here, whereby incoming events which raced # with us will fail to be persisted after the call to `clear_partial_state_room` due to # having partial state. + # https://github.com/matrix-org/synapse/issues/12988 + # continue events = await self.store.get_events_as_list( @@ -1588,6 +1598,7 @@ class FederationHandler: # indefinitely is also not the right thing to do if we can # reach all homeservers and they all claim they don't have # the state we want. + # https://github.com/matrix-org/synapse/issues/13000 logger.error( "Failed to get state for %s at %s from %s because %s, " "giving up!", diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 87a060835..9889d1cb4 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -532,6 +532,7 @@ class FederationEventHandler: # # TODO(faster_joins): we probably need to be more intelligent, and # exclude partial-state prev_events from consideration + # https://github.com/matrix-org/synapse/issues/13001 logger.warning( "%s still has partial state: can't de-partial-state it yet", event.event_id, @@ -777,6 +778,7 @@ class FederationEventHandler: state_ids = await self._resolve_state_at_missing_prevs(origin, event) # TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does # not return partial state + # https://github.com/matrix-org/synapse/issues/13002 await self._process_received_pdu( origin, event, state_ids=state_ids, backfilled=backfilled diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f455158a2..294217cc2 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1102,6 +1102,7 @@ class EventCreationHandler: # # TODO(faster_joins): figure out how this works, and make sure that the # old state is complete. + # https://github.com/matrix-org/synapse/issues/13003 metadata = await self.store.get_metadata_for_events(state_event_ids) state_map_for_event: MutableStateMap[str] = {} diff --git a/synapse/storage/controllers/persist_events.py b/synapse/storage/controllers/persist_events.py index 4caaa8180..4bcb99d06 100644 --- a/synapse/storage/controllers/persist_events.py +++ b/synapse/storage/controllers/persist_events.py @@ -388,10 +388,13 @@ class EventsPersistenceStorageController: # TODO(faster_joins): get a real stream ordering, to make this work correctly # across workers. + # https://github.com/matrix-org/synapse/issues/12994 # # TODO(faster_joins): this can race against event persistence, in which case we # will end up with incorrect state. Perhaps we should make this a job we - # farm out to the event persister, somehow. + # farm out to the event persister thread, somehow. + # https://github.com/matrix-org/synapse/issues/13007 + # stream_id = self.main_store.get_room_max_stream_ordering() await self.persist_events_store.update_current_state(room_id, delta, stream_id) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 3b4cdb67e..d3a44bc87 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -452,6 +452,9 @@ class StateStorageController: up to date. """ # FIXME(faster_joins): what do we do here? + # https://github.com/matrix-org/synapse/issues/12814 + # https://github.com/matrix-org/synapse/issues/12815 + # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( prev_stream_id, max_stream_id diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 68d4fc2e6..5760d3428 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1112,6 +1112,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): # this can race with incoming events, so we watch out for FK errors. # TODO(faster_joins): this still doesn't completely fix the race, since the persist process # is not atomic. I fear we need an application-level lock. + # https://github.com/matrix-org/synapse/issues/12988 try: await self.db_pool.runInteraction( "clear_partial_state_room", self._clear_partial_state_room_txn, room_id @@ -1119,6 +1120,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore): return True except self.db_pool.engine.module.DatabaseError as e: # TODO(faster_joins): how do we distinguish between FK errors and other errors? + # https://github.com/matrix-org/synapse/issues/12988 logger.warning( "Exception while clearing lazy partial-state-room %s, retrying: %s", room_id, diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5e6efbd0f..9674c4a75 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -435,6 +435,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) # TODO(faster_joins): need to do something about workers here + # https://github.com/matrix-org/synapse/issues/12994 txn.call_after(self.is_partial_state_event.invalidate, (event.event_id,)) txn.call_after( self._get_state_group_for_event.prefill, diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 96aaffb53..af3bab2c1 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -546,6 +546,7 @@ class StateFilter: # the sender of a piece of state wasn't actually in the room, then clearly that # state shouldn't have been returned. # We should at least add some tests around this to see what happens. + # https://github.com/matrix-org/synapse/issues/13006 # if we haven't requested membership events, then it depends on the value of # 'include_others' From 1df22e16dfa5534120d83302f87693954f18bf7f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Jun 2022 07:21:15 -0400 Subject: [PATCH 16/85] Fix example of running complement.sh. (#12990) --- changelog.d/12990.misc | 1 + scripts-dev/complement.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12990.misc diff --git a/changelog.d/12990.misc b/changelog.d/12990.misc new file mode 100644 index 000000000..c68f6a731 --- /dev/null +++ b/changelog.d/12990.misc @@ -0,0 +1 @@ +Fix documentation for running complement tests. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index ffd399c39..30b974b95 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -18,7 +18,7 @@ # argument to the script. Complement will then only run those tests. If # no regex is supplied, all tests are run. For example; # -# ./complement.sh "TestOutboundFederation(Profile|Send)" +# ./complement.sh -run "TestOutboundFederation(Profile|Send)" # # Exit if a line returns a non-zero exit code From 3d1d510fa9d6d46e08b347dd8c803e5dd9b8a5fe Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 9 Jun 2022 14:16:34 +0100 Subject: [PATCH 17/85] Enable testing against PostgreSQL databases in Complement CI. (#12965) --- .github/workflows/tests.yml | 12 +++++++++++- changelog.d/12965.misc | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12965.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 83ab72737..0b70ffc64 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -310,6 +310,16 @@ jobs: needs: linting-done runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - arrangement: monolith + database: SQLite + + - arrangement: monolith + database: Postgres + steps: # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path @@ -337,7 +347,7 @@ jobs: - run: | set -o pipefail - COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/12965.misc b/changelog.d/12965.misc new file mode 100644 index 000000000..cc2823e12 --- /dev/null +++ b/changelog.d/12965.misc @@ -0,0 +1 @@ +Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file From 81608490e364d5a332b06606aba0703f2fb5d0a9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 10 Jun 2022 07:15:51 -0400 Subject: [PATCH 18/85] Stop depending on `room_id` to be returned for children state in the hierarchy response. (#12991) The `room_id` field was removed from MSC2946 before it was accepted. It was initially kept for backwards compatibility and should be removed now that the stable form of the API is used. This change only stops Synapse from validating that it is returned, a future PR will remove returning it as part of the response. --- changelog.d/12991.bugfix | 2 ++ synapse/federation/federation_client.py | 4 ---- tests/handlers/test_room_summary.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) create mode 100644 changelog.d/12991.bugfix diff --git a/changelog.d/12991.bugfix b/changelog.d/12991.bugfix new file mode 100644 index 000000000..c6e388d5b --- /dev/null +++ b/changelog.d/12991.bugfix @@ -0,0 +1,2 @@ +Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced +in Synapse v1.41.0. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index ad475a913..66e630556 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1642,10 +1642,6 @@ def _validate_hierarchy_event(d: JsonDict) -> None: if not isinstance(event_type, str): raise ValueError("Invalid event: 'event_type' must be a str") - room_id = d.get("room_id") - if not isinstance(room_id, str): - raise ValueError("Invalid event: 'room_id' must be a str") - state_key = d.get("state_key") if not isinstance(state_key, str): raise ValueError("Invalid event: 'state_key' must be a str") diff --git a/tests/handlers/test_room_summary.py b/tests/handlers/test_room_summary.py index 054665569..aa650756e 100644 --- a/tests/handlers/test_room_summary.py +++ b/tests/handlers/test_room_summary.py @@ -178,7 +178,7 @@ class SpaceSummaryTestCase(unittest.HomeserverTestCase): result_room_ids.append(result_room["room_id"]) result_children_ids.append( [ - (cs["room_id"], cs["state_key"]) + (result_room["room_id"], cs["state_key"]) for cs in result_room["children_state"] ] ) From 84cd0fe4e2c45fe3aaa03e74b5c90fc8382ac277 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 10 Jun 2022 08:30:14 -0400 Subject: [PATCH 19/85] Fix-up the contrib/graph scripts. (#13013) * Clarifies comments and documentation. * Adds type-hints. * Fixes Python 3 compatibility (and runs pyupgrade). * Updates for changes in Synapse internals. --- changelog.d/13013.misc | 1 + contrib/graph/graph.py | 35 ++++++++++++++++++-------------- contrib/graph/graph2.py | 32 ++++++++++++++++++++--------- contrib/graph/graph3.py | 45 ++++++++++++++++++++++++----------------- 4 files changed, 70 insertions(+), 43 deletions(-) create mode 100644 changelog.d/13013.misc diff --git a/changelog.d/13013.misc b/changelog.d/13013.misc new file mode 100644 index 000000000..903c6a3c8 --- /dev/null +++ b/changelog.d/13013.misc @@ -0,0 +1 @@ +Modernize the `contrib/graph/` scripts. diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index fdbac087b..3c4f47dbd 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,11 +1,3 @@ -import argparse -import cgi -import datetime -import json - -import pydot -import urllib2 - # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -20,12 +12,25 @@ import urllib2 # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import cgi +import datetime +import json +import urllib.request +from typing import List -def make_name(pdu_id, origin): - return "%s@%s" % (pdu_id, origin) +import pydot -def make_graph(pdus, room, filename_prefix): +def make_name(pdu_id: str, origin: str) -> str: + return f"{pdu_id}@{origin}" + + +def make_graph(pdus: List[dict], filename_prefix: str) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by querying a homeserver. + """ pdu_map = {} node_map = {} @@ -111,10 +116,10 @@ def make_graph(pdus, room, filename_prefix): graph.write_svg("%s.svg" % filename_prefix, prog="dot") -def get_pdus(host, room): +def get_pdus(host: str, room: str) -> List[dict]: transaction = json.loads( - urllib2.urlopen( - "http://%s/_matrix/federation/v1/context/%s/" % (host, room) + urllib.request.urlopen( + f"http://{host}/_matrix/federation/v1/context/{room}/" ).read() ) @@ -141,4 +146,4 @@ if __name__ == "__main__": pdus = get_pdus(host, room) - make_graph(pdus, room, prefix) + make_graph(pdus, prefix) diff --git a/contrib/graph/graph2.py b/contrib/graph/graph2.py index 0980231e4..b46094ce0 100644 --- a/contrib/graph/graph2.py +++ b/contrib/graph/graph2.py @@ -14,22 +14,31 @@ import argparse -import cgi import datetime +import html import json import sqlite3 import pydot -from synapse.events import FrozenEvent +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict from synapse.util.frozenutils import unfreeze -def make_graph(db_name, room_id, file_prefix, limit): +def make_graph(db_name: str, room_id: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading from a Synapse SQLite database. + """ conn = sqlite3.connect(db_name) + sql = "SELECT room_version FROM rooms WHERE room_id = ?" + c = conn.execute(sql, (room_id,)) + room_version = KNOWN_ROOM_VERSIONS[c.fetchone()[0]] + sql = ( - "SELECT json FROM event_json as j " + "SELECT json, internal_metadata FROM event_json as j " "INNER JOIN events as e ON e.event_id = j.event_id " "WHERE j.room_id = ?" ) @@ -43,7 +52,10 @@ def make_graph(db_name, room_id, file_prefix, limit): c = conn.execute(sql, args) - events = [FrozenEvent(json.loads(e[0])) for e in c.fetchall()] + events = [ + make_event_from_dict(json.loads(e[0]), room_version, json.loads(e[1])) + for e in c.fetchall() + ] events.sort(key=lambda e: e.depth) @@ -84,7 +96,7 @@ def make_graph(db_name, room_id, file_prefix, limit): "name": event.event_id, "type": event.type, "state_key": event.get("state_key", None), - "content": cgi.escape(content, quote=True), + "content": html.escape(content, quote=True), "time": t, "depth": event.depth, "state_group": state_group, @@ -96,11 +108,11 @@ def make_graph(db_name, room_id, file_prefix, limit): graph.add_node(node) for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<%s>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<{prev_id}>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -112,7 +124,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if len(event_ids) <= 1: continue - cluster = pydot.Cluster(str(group), label="" % (str(group),)) + cluster = pydot.Cluster(str(group), label=f"") for event_id in event_ids: cluster.add_node(node_map[event_id]) @@ -126,7 +138,7 @@ def make_graph(db_name, room_id, file_prefix, limit): if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate a PDU graph for a given room by talking " - "to the given homeserver to get the list of PDUs. \n" + "to the given Synapse SQLite file to get the list of PDUs. \n" "Requires pydot." ) parser.add_argument( diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py index dd0c19368..a28a1594c 100644 --- a/contrib/graph/graph3.py +++ b/contrib/graph/graph3.py @@ -1,13 +1,3 @@ -import argparse -import cgi -import datetime - -import pydot -import simplejson as json - -from synapse.events import FrozenEvent -from synapse.util.frozenutils import unfreeze - # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -22,15 +12,35 @@ from synapse.util.frozenutils import unfreeze # See the License for the specific language governing permissions and # limitations under the License. +import argparse +import datetime +import html +import json -def make_graph(file_name, room_id, file_prefix, limit): +import pydot + +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.events import make_event_from_dict +from synapse.util.frozenutils import unfreeze + + +def make_graph(file_name: str, file_prefix: str, limit: int) -> None: + """ + Generate a dot and SVG file for a graph of events in the room based on the + topological ordering by reading line-delimited JSON from a file. + """ print("Reading lines") with open(file_name) as f: lines = f.readlines() print("Read lines") - events = [FrozenEvent(json.loads(line)) for line in lines] + # Figure out the room version, assume the first line is the create event. + room_version = KNOWN_ROOM_VERSIONS[ + json.loads(lines[0]).get("content", {}).get("room_version") + ] + + events = [make_event_from_dict(json.loads(line), room_version) for line in lines] print("Loaded events.") @@ -66,8 +76,8 @@ def make_graph(file_name, room_id, file_prefix, limit): content.append( "%s: %s," % ( - cgi.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), - cgi.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(key, quote=True).encode("ascii", "xmlcharrefreplace"), + html.escape(value, quote=True).encode("ascii", "xmlcharrefreplace"), ) ) @@ -101,11 +111,11 @@ def make_graph(file_name, room_id, file_prefix, limit): print("Created Nodes") for event in events: - for prev_id, _ in event.prev_events: + for prev_id in event.prev_event_ids(): try: end_node = node_map[prev_id] except Exception: - end_node = pydot.Node(name=prev_id, label="<%s>" % (prev_id,)) + end_node = pydot.Node(name=prev_id, label=f"<{prev_id}>") node_map[prev_id] = end_node graph.add_node(end_node) @@ -139,8 +149,7 @@ if __name__ == "__main__": ) parser.add_argument("-l", "--limit", help="Only retrieve the last N events.") parser.add_argument("event_file") - parser.add_argument("room") args = parser.parse_args() - make_graph(args.event_file, args.room, args.prefix, args.limit) + make_graph(args.event_file, args.prefix, args.limit) From 4579445cc54640341ef23cddad9c0518e90be63a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 10 Jun 2022 15:47:49 +0100 Subject: [PATCH 20/85] Add missing TOC link to poetry cheat sheet (#13022) Missed by #12475. --- changelog.d/13022.doc | 1 + docs/SUMMARY.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13022.doc diff --git a/changelog.d/13022.doc b/changelog.d/13022.doc new file mode 100644 index 000000000..4d6ac7ae9 --- /dev/null +++ b/changelog.d/13022.doc @@ -0,0 +1 @@ +Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8400a6539..d7cf2df11 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -88,6 +88,7 @@ - [OpenTracing](opentracing.md) - [Database Schemas](development/database_schema.md) - [Experimental features](development/experimental_features.md) + - [Dependency management](development/dependencies.md) - [Synapse Architecture]() - [Cancellation](development/synapse_architecture/cancellation.md) - [Log Contexts](log_contexts.md) From ae09cc2ee45715f3e5c14a5549b3d142a026406f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 10 Jun 2022 19:32:40 +0100 Subject: [PATCH 21/85] Changelog --- changelog.d/13025.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/13025.misc diff --git a/changelog.d/13025.misc b/changelog.d/13025.misc new file mode 100644 index 000000000..7cb0d174b --- /dev/null +++ b/changelog.d/13025.misc @@ -0,0 +1 @@ +Add type annotations to `synapse.storage.databases.main.devices`. From cfff055fa298d84014aa5cf15c9a09953eefd143 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 10 Jun 2022 19:33:21 +0100 Subject: [PATCH 22/85] Revert "Changelog" This reverts commit ae09cc2ee45715f3e5c14a5549b3d142a026406f. This commit was intended for a different branch. --- changelog.d/13025.misc | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/13025.misc diff --git a/changelog.d/13025.misc b/changelog.d/13025.misc deleted file mode 100644 index 7cb0d174b..000000000 --- a/changelog.d/13025.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `synapse.storage.databases.main.devices`. From 2959184a42398277ff916206235b844a8f7be5d7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Jun 2022 10:44:19 +0100 Subject: [PATCH 23/85] EventAuthTestCase: build events for the right room version In practice, when we run the auth rules, all of the events have the right room version. Let's stop building Room V1 events for these tests and use the right version. --- tests/test_event_auth.py | 324 +++++++++++++++++++++++++-------------- 1 file changed, 205 insertions(+), 119 deletions(-) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e2c506e5a..1e11fb5da 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -15,10 +15,12 @@ import unittest from typing import Optional +from parameterized import parameterized + from synapse import event_auth from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.types import JsonDict, get_domain_from_id @@ -30,19 +32,23 @@ class EventAuthTestCase(unittest.TestCase): """ creator = "@creator:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V9, creator), + _join_event(RoomVersions.V9, creator), ] # creator should be able to send state event_auth.check_auth_rules_for_event( RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... but a rejected join_rules event should cause it to be rejected - rejected_join_rules = _join_rules_event(creator, "public") + rejected_join_rules = _join_rules_event( + RoomVersions.V9, + creator, + "public", + ) rejected_join_rules.rejected_reason = "stinky" auth_events.append(rejected_join_rules) @@ -50,18 +56,18 @@ class EventAuthTestCase(unittest.TestCase): AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) # ... even if there is *also* a good join rules - auth_events.append(_join_rules_event(creator, "public")) + auth_events.append(_join_rules_event(RoomVersions.V9, creator, "public")) self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V9, - _random_state_event(creator), + _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -73,15 +79,15 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" joiner = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), - _join_event(joiner), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, joiner), ] # creator should be able to send state event_auth.check_auth_rules_for_event( RoomVersions.V1, - _random_state_event(creator), + _random_state_event(RoomVersions.V1, creator), auth_events, ) @@ -90,7 +96,7 @@ class EventAuthTestCase(unittest.TestCase): AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V1, - _random_state_event(joiner), + _random_state_event(RoomVersions.V1, joiner), auth_events, ) @@ -104,13 +110,15 @@ class EventAuthTestCase(unittest.TestCase): king = "@joiner2:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "29", king: "30"}} + RoomVersions.V1, + creator, + {"state_default": "30", "users": {pleb: "29", king: "30"}}, ), - _join_event(pleb), - _join_event(king), + _join_event(RoomVersions.V1, pleb), + _join_event(RoomVersions.V1, king), ] # pleb should not be able to send state @@ -118,14 +126,14 @@ class EventAuthTestCase(unittest.TestCase): AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V1, - _random_state_event(pleb), + _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state event_auth.check_auth_rules_for_event( RoomVersions.V1, - _random_state_event(king), + _random_state_event(RoomVersions.V1, king), auth_events, ) @@ -134,14 +142,14 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V1, creator), + _join_event(RoomVersions.V1, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( RoomVersions.V1, - _alias_event(creator), + _alias_event(RoomVersions.V1, creator), auth_events, ) @@ -149,7 +157,7 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V1, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V1, creator, state_key=""), auth_events, ) @@ -157,14 +165,14 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V1, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V1, creator, state_key="test.com"), auth_events, ) # Note that the member does *not* need to be in the room. event_auth.check_auth_rules_for_event( RoomVersions.V1, - _alias_event(other), + _alias_event(RoomVersions.V1, other), auth_events, ) @@ -173,26 +181,26 @@ class EventAuthTestCase(unittest.TestCase): creator = "@creator:example.com" other = "@other:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(RoomVersions.V6, creator), + _join_event(RoomVersions.V6, creator), ] # creator should be able to send aliases event_auth.check_auth_rules_for_event( RoomVersions.V6, - _alias_event(creator), + _alias_event(RoomVersions.V6, creator), auth_events, ) # No particular checks are done on the state key. event_auth.check_auth_rules_for_event( RoomVersions.V6, - _alias_event(creator, state_key=""), + _alias_event(RoomVersions.V6, creator, state_key=""), auth_events, ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _alias_event(creator, state_key="test.com"), + _alias_event(RoomVersions.V6, creator, state_key="test.com"), auth_events, ) @@ -200,11 +208,12 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _alias_event(other), + _alias_event(RoomVersions.V6, other), auth_events, ) - def test_msc2209(self): + @parameterized.expand([(RoomVersions.V1, True), (RoomVersions.V6, False)]) + def test_notifications(self, room_version: RoomVersion, allow_modification: bool): """ Notifications power levels get checked due to MSC2209. """ @@ -212,28 +221,28 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = [ - _create_event(creator), - _join_event(creator), + _create_event(room_version, creator), + _join_event(room_version, creator), _power_levels_event( - creator, {"state_default": "30", "users": {pleb: "30"}} + room_version, creator, {"state_default": "30", "users": {pleb: "30"}} ), - _join_event(pleb), + _join_event(room_version, pleb), ] - # pleb should be able to modify the notifications power level. - event_auth.check_auth_rules_for_event( - RoomVersions.V1, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, + pl_event = _power_levels_event( + room_version, pleb, {"notifications": {"room": 100}} ) - # But an MSC2209 room rejects this change. - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _power_levels_event(pleb, {"notifications": {"room": 100}}), - auth_events, - ) + # on room V1, pleb should be able to modify the notifications power level. + if allow_modification: + event_auth.check_auth_rules_for_event(room_version, pl_event, auth_events) + + else: + # But an MSC2209 room rejects this change. + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event( + room_version, pl_event, auth_events + ) def test_join_rules_public(self): """ @@ -243,15 +252,17 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "public"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "public" + ), } # Check join. event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -259,42 +270,48 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -306,16 +323,18 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.join_rules", ""): _join_rules_event(creator, "invite"), + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "invite" + ), } # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -323,47 +342,76 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _member_event(pleb, "join", sender=creator), + _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user who left cannot re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "leave" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can send a join if they're in the room. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V6, pleb, "join" + ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user can accept an invite. auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V6, - _join_event(pleb), + _join_event(RoomVersions.V6, pleb), auth_events.values(), ) - def test_join_rules_msc3083_restricted(self): + def test_join_rules_restricted_old_room(self) -> None: + """Old room versions should reject joins to restricted rooms""" + creator = "@creator:example.com" + pleb = "@joiner:example.com" + + auth_events = { + ("m.room.create", ""): _create_event(RoomVersions.V6, creator), + ("m.room.member", creator): _join_event(RoomVersions.V6, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V6, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V6, creator, "restricted" + ), + } + + with self.assertRaises(AuthError): + event_auth.check_auth_rules_for_event( + RoomVersions.V6, + _join_event(RoomVersions.V6, pleb), + auth_events.values(), + ) + + def test_join_rules_msc3083_restricted(self) -> None: """ Test joining a restricted room from MSC3083. @@ -377,22 +425,19 @@ class EventAuthTestCase(unittest.TestCase): pleb = "@joiner:example.com" auth_events = { - ("m.room.create", ""): _create_event(creator), - ("m.room.member", creator): _join_event(creator), - ("m.room.power_levels", ""): _power_levels_event(creator, {"invite": 0}), - ("m.room.join_rules", ""): _join_rules_event(creator, "restricted"), + ("m.room.create", ""): _create_event(RoomVersions.V8, creator), + ("m.room.member", creator): _join_event(RoomVersions.V8, creator), + ("m.room.power_levels", ""): _power_levels_event( + RoomVersions.V8, creator, {"invite": 0} + ), + ("m.room.join_rules", ""): _join_rules_event( + RoomVersions.V8, creator, "restricted" + ), } - # Older room versions don't understand this join rule - with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - RoomVersions.V6, - _join_event(pleb), - auth_events.values(), - ) - # A properly formatted join event should work. authorised_join_event = _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@creator:example.com" @@ -408,14 +453,17 @@ class EventAuthTestCase(unittest.TestCase): # are done properly). pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@inviter:foo.test": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@inviter:foo.test": 150}}, ) pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( - "@inviter:foo.test" + RoomVersions.V8, "@inviter:foo.test" ) event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@inviter:foo.test" @@ -428,19 +476,22 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # An join authorised by a user who is not in the room is rejected. pl_auth_events = auth_events.copy() pl_auth_events[("m.room.power_levels", "")] = _power_levels_event( - creator, {"invite": 100, "users": {"@other:example.com": 150}} + RoomVersions.V8, + creator, + {"invite": 100, "users": {"@other:example.com": 150}}, ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, _join_event( + RoomVersions.V8, pleb, additional_content={ EventContentFields.AUTHORISING_USER: "@other:example.com" @@ -455,6 +506,7 @@ class EventAuthTestCase(unittest.TestCase): event_auth.check_auth_rules_for_event( RoomVersions.V8, _member_event( + RoomVersions.V8, pleb, "join", sender=creator, @@ -466,7 +518,9 @@ class EventAuthTestCase(unittest.TestCase): ) # Banned should be rejected. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "ban") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "ban" + ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( RoomVersions.V8, @@ -475,7 +529,9 @@ class EventAuthTestCase(unittest.TestCase): ) # A user who left can re-join. - auth_events[("m.room.member", pleb)] = _member_event(pleb, "leave") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "leave" + ) event_auth.check_auth_rules_for_event( RoomVersions.V8, authorised_join_event, @@ -484,21 +540,23 @@ class EventAuthTestCase(unittest.TestCase): # A user can send a join if they're in the room. (This doesn't need to # be authorised since the user is already joined.) - auth_events[("m.room.member", pleb)] = _member_event(pleb, "join") + auth_events[("m.room.member", pleb)] = _member_event( + RoomVersions.V8, pleb, "join" + ) event_auth.check_auth_rules_for_event( RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) # A user can accept an invite. (This doesn't need to be authorised since # the user was invited.) auth_events[("m.room.member", pleb)] = _member_event( - pleb, "invite", sender=creator + RoomVersions.V8, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( RoomVersions.V8, - _join_event(pleb), + _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -508,20 +566,25 @@ class EventAuthTestCase(unittest.TestCase): TEST_ROOM_ID = "!test:room" -def _create_event(user_id: str) -> EventBase: +def _create_event( + room_version: RoomVersion, + user_id: str, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.create", "state_key": "", "sender": user_id, "content": {"creator": user_id}, - } + }, + room_version=room_version, ) def _member_event( + room_version: RoomVersion, user_id: str, membership: str, sender: Optional[str] = None, @@ -530,79 +593,102 @@ def _member_event( return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.member", "sender": sender or user_id, "state_key": user_id, "content": {"membership": membership, **(additional_content or {})}, "prev_events": [], - } + }, + room_version=room_version, ) -def _join_event(user_id: str, additional_content: Optional[dict] = None) -> EventBase: - return _member_event(user_id, "join", additional_content=additional_content) +def _join_event( + room_version: RoomVersion, + user_id: str, + additional_content: Optional[dict] = None, +) -> EventBase: + return _member_event( + room_version, + user_id, + "join", + additional_content=additional_content, + ) -def _power_levels_event(sender: str, content: JsonDict) -> EventBase: +def _power_levels_event( + room_version: RoomVersion, + sender: str, + content: JsonDict, +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.power_levels", "sender": sender, "state_key": "", "content": content, - } + }, + room_version=room_version, ) -def _alias_event(sender: str, **kwargs) -> EventBase: +def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: data = { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.aliases", "sender": sender, "state_key": get_domain_from_id(sender), "content": {"aliases": []}, } data.update(**kwargs) - return make_event_from_dict(data) + return make_event_from_dict(data, room_version=room_version) -def _random_state_event(sender: str) -> EventBase: +def _random_state_event(room_version: RoomVersion, sender: str) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "test.state", "sender": sender, "state_key": "", "content": {"membership": "join"}, - } + }, + room_version=room_version, ) -def _join_rules_event(sender: str, join_rule: str) -> EventBase: +def _join_rules_event( + room_version: RoomVersion, sender: str, join_rule: str +) -> EventBase: return make_event_from_dict( { "room_id": TEST_ROOM_ID, - "event_id": _get_event_id(), + **_maybe_get_event_id_dict_for_room_version(room_version), "type": "m.room.join_rules", "sender": sender, "state_key": "", "content": { "join_rule": join_rule, }, - } + }, + room_version=room_version, ) event_count = 0 -def _get_event_id() -> str: +def _maybe_get_event_id_dict_for_room_version(room_version: RoomVersion) -> dict: + """If this room version needs it, generate an event id""" + if room_version.event_format != EventFormatVersions.V1: + return {} + global event_count c = event_count event_count += 1 - return "!%i:example.com" % (c,) + return {"event_id": "!%i:example.com" % (c,)} From 68be42f6b6433e93c7dccc0eae70177500ca60bc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 Jun 2022 15:51:34 +0100 Subject: [PATCH 24/85] Remove `room_version` param from `validate_event_for_room_version` Instead, use the `room_version` property of the event we're validating. The `room_version` was originally added as a parameter somewhere around #4482, but really it's been redundant since #6875 added a `room_version` field to `EventBase`. --- synapse/event_auth.py | 12 ++++-------- synapse/events/validator.py | 4 ++++ synapse/handlers/federation.py | 4 ++-- synapse/handlers/federation_event.py | 4 ++-- synapse/handlers/message.py | 2 +- synapse/handlers/room.py | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4c0b587a7..77f90558d 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -45,9 +45,7 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) -def validate_event_for_room_version( - room_version_obj: RoomVersion, event: "EventBase" -) -> None: +def validate_event_for_room_version(event: "EventBase") -> None: """Ensure that the event complies with the limits, and has the right signatures NB: does not *validate* the signatures - it assumes that any signatures present @@ -60,12 +58,10 @@ def validate_event_for_room_version( NB: This is used to check events that have been received over federation. As such, it can only enforce the checks specified in the relevant room version, to avoid a split-brain situation where some servers accept such events, and others reject - them. - - TODO: consider moving this into EventValidator + them. See also EventValidator, which contains extra checks which are applied only to + locally-generated events. Args: - room_version_obj: the version of the room which contains this event event: the event to be checked Raises: @@ -103,7 +99,7 @@ def validate_event_for_room_version( raise AuthError(403, "Event not signed by sending server") is_invite_via_allow_rule = ( - room_version_obj.msc3083_join_rules + event.room_version.msc3083_join_rules and event.type == EventTypes.Member and event.membership == Membership.JOIN and EventContentFields.AUTHORISING_USER in event.content diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 29fa9b388..27c8beba2 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -35,6 +35,10 @@ class EventValidator: def validate_new(self, event: EventBase, config: HomeServerConfig) -> None: """Validates the event has roughly the right format + Suitable for checking a locally-created event. It has stricter checks than + is appropriate for an event received over federation (for which, see + event_auth.validate_event_for_room_version) + Args: event: The event to validate. config: The homeserver's configuration. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 6a143440d..b59641776 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1206,7 +1206,7 @@ class FederationHandler: event.internal_metadata.send_on_behalf_of = self.hs.hostname try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( room_version_obj, event, context ) @@ -1258,7 +1258,7 @@ class FederationHandler: ) try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( room_version_obj, event, context ) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 87a060835..420ad8b96 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1453,7 +1453,7 @@ class FederationEventHandler: context = EventContext.for_outlier(self._storage_controllers) try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) check_auth_rules_for_event(room_version_obj, event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) @@ -1501,7 +1501,7 @@ class FederationEventHandler: room_version_obj = KNOWN_ROOM_VERSIONS[room_version] try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) except AuthError as e: logger.warning("While validating received event %r: %s", event, e) # TODO: use a different rejected reason here? diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index f455158a2..b078e2424 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1297,7 +1297,7 @@ class EventCreationHandler: assert event.content["membership"] == Membership.LEAVE else: try: - validate_event_for_room_version(room_version_obj, event) + validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( room_version_obj, event, context ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 520663f17..44d978407 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -227,7 +227,7 @@ class RoomCreationHandler: }, ) old_room_version = await self.store.get_room_version(old_room_id) - validate_event_for_room_version(old_room_version, tombstone_event) + validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( old_room_version, tombstone_event, tombstone_context ) From 0d9d36b15c3cd2851f1cf3452c096480f568e6cb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Jun 2022 10:48:25 +0100 Subject: [PATCH 25/85] Remove `room_version` param from `check_auth_rules_for_event` Instead, use the `room_version` property of the event we're checking. The `room_version` was originally added as a parameter somewhere around #4482, but really it's been redundant since #6875 added a `room_version` field to `EventBase`. --- synapse/event_auth.py | 15 +++++----- synapse/handlers/event_auth.py | 2 +- synapse/handlers/federation_event.py | 16 +++-------- synapse/state/v1.py | 4 +-- synapse/state/v2.py | 1 - tests/test_event_auth.py | 43 ++-------------------------- 6 files changed, 16 insertions(+), 65 deletions(-) diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 77f90558d..e23503c1e 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -113,7 +113,6 @@ def validate_event_for_room_version(event: "EventBase") -> None: def check_auth_rules_for_event( - room_version_obj: RoomVersion, event: "EventBase", auth_events: Iterable["EventBase"], ) -> None: @@ -132,7 +131,6 @@ def check_auth_rules_for_event( a bunch of other tests. Args: - room_version_obj: the version of the room event: the event being checked. auth_events: the room state to check the events against. @@ -201,7 +199,10 @@ def check_auth_rules_for_event( raise AuthError(403, "This room has been marked as unfederatable.") # 4. If type is m.room.aliases - if event.type == EventTypes.Aliases and room_version_obj.special_case_aliases_auth: + if ( + event.type == EventTypes.Aliases + and event.room_version.special_case_aliases_auth + ): # 4a. If event has no state_key, reject if not event.is_state(): raise AuthError(403, "Alias event must be a state event") @@ -221,7 +222,7 @@ def check_auth_rules_for_event( # 5. If type is m.room.membership if event.type == EventTypes.Member: - _is_membership_change_allowed(room_version_obj, event, auth_dict) + _is_membership_change_allowed(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) return @@ -243,17 +244,17 @@ def check_auth_rules_for_event( _can_send_event(event, auth_dict) if event.type == EventTypes.PowerLevels: - _check_power_levels(room_version_obj, event, auth_dict) + _check_power_levels(event.room_version, event, auth_dict) if event.type == EventTypes.Redaction: - check_redaction(room_version_obj, event, auth_dict) + check_redaction(event.room_version, event, auth_dict) if ( event.type == EventTypes.MSC2716_INSERTION or event.type == EventTypes.MSC2716_BATCH or event.type == EventTypes.MSC2716_MARKER ): - check_historical(room_version_obj, event, auth_dict) + check_historical(event.room_version, event, auth_dict) logger.debug("Allowing! %s", event) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 6bed46435..7bbb833f3 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -55,7 +55,7 @@ class EventAuthHandler: """Check an event passes the auth rules at its own auth events""" auth_event_ids = event.auth_event_ids() auth_events_by_id = await self._store.get_events(auth_event_ids) - check_auth_rules_for_event(room_version_obj, event, auth_events_by_id.values()) + check_auth_rules_for_event(event, auth_events_by_id.values()) def compute_auth_events( self, diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 420ad8b96..9488fef29 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -1428,9 +1428,6 @@ class FederationEventHandler: allow_rejected=True, ) - room_version = await self._store.get_room_version_id(room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: with nested_logging_context(suffix=event.event_id): auth = [] @@ -1454,7 +1451,7 @@ class FederationEventHandler: context = EventContext.for_outlier(self._storage_controllers) try: validate_event_for_room_version(event) - check_auth_rules_for_event(room_version_obj, event, auth) + check_auth_rules_for_event(event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1497,9 +1494,6 @@ class FederationEventHandler: assert not event.internal_metadata.outlier # first of all, check that the event itself is valid. - room_version = await self._store.get_room_version_id(event.room_id) - room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - try: validate_event_for_room_version(event) except AuthError as e: @@ -1519,7 +1513,7 @@ class FederationEventHandler: # ... and check that the event passes auth at those auth events. try: - check_auth_rules_for_event(room_version_obj, event, claimed_auth_events) + check_auth_rules_for_event(event, claimed_auth_events) except AuthError as e: logger.warning( "While checking auth of %r against auth_events: %s", event, e @@ -1567,9 +1561,7 @@ class FederationEventHandler: auth_events_for_auth = calculated_auth_event_map try: - check_auth_rules_for_event( - room_version_obj, event, auth_events_for_auth.values() - ) + check_auth_rules_for_event(event, auth_events_for_auth.values()) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1669,7 +1661,7 @@ class FederationEventHandler: ) try: - check_auth_rules_for_event(room_version_obj, event, current_auth_events) + check_auth_rules_for_event(event, current_auth_events) except AuthError as e: logger.warning( "Soft-failing %r (from %s) because %s", diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 499a32820..8bbb4ce41 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -30,7 +30,7 @@ from typing import ( from synapse import event_auth from synapse.api.constants import EventTypes from synapse.api.errors import AuthError -from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.types import MutableStateMap, StateMap @@ -331,7 +331,6 @@ def _resolve_auth_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) @@ -349,7 +348,6 @@ def _resolve_normal_events( try: # The signatures have already been checked at this point event_auth.check_auth_rules_for_event( - RoomVersions.V1, event, auth_events.values(), ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c618df2fd..041ccac59 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -547,7 +547,6 @@ async def _iterative_auth_checks( try: event_auth.check_auth_rules_for_event( - room_version, event, auth_events.values(), ) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 1e11fb5da..229ecd84a 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -38,7 +38,6 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V9, _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -55,7 +54,6 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -66,7 +64,6 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V9, _random_state_event(RoomVersions.V9, creator), auth_events, ) @@ -86,7 +83,6 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, _random_state_event(RoomVersions.V1, creator), auth_events, ) @@ -95,7 +91,6 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, _random_state_event(RoomVersions.V1, joiner), auth_events, ) @@ -125,14 +120,12 @@ class EventAuthTestCase(unittest.TestCase): self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, - RoomVersions.V1, _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state event_auth.check_auth_rules_for_event( - RoomVersions.V1, _random_state_event(RoomVersions.V1, king), auth_events, ) @@ -148,7 +141,6 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V1, _alias_event(RoomVersions.V1, creator), auth_events, ) @@ -156,7 +148,6 @@ class EventAuthTestCase(unittest.TestCase): # Reject an event with no state key. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, _alias_event(RoomVersions.V1, creator, state_key=""), auth_events, ) @@ -164,14 +155,12 @@ class EventAuthTestCase(unittest.TestCase): # If the domain of the sender does not match the state key, reject. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V1, _alias_event(RoomVersions.V1, creator, state_key="test.com"), auth_events, ) # Note that the member does *not* need to be in the room. event_auth.check_auth_rules_for_event( - RoomVersions.V1, _alias_event(RoomVersions.V1, other), auth_events, ) @@ -187,19 +176,16 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send aliases event_auth.check_auth_rules_for_event( - RoomVersions.V6, _alias_event(RoomVersions.V6, creator), auth_events, ) # No particular checks are done on the state key. event_auth.check_auth_rules_for_event( - RoomVersions.V6, _alias_event(RoomVersions.V6, creator, state_key=""), auth_events, ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _alias_event(RoomVersions.V6, creator, state_key="test.com"), auth_events, ) @@ -207,7 +193,6 @@ class EventAuthTestCase(unittest.TestCase): # Per standard auth rules, the member must be in the room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _alias_event(RoomVersions.V6, other), auth_events, ) @@ -235,14 +220,12 @@ class EventAuthTestCase(unittest.TestCase): # on room V1, pleb should be able to modify the notifications power level. if allow_modification: - event_auth.check_auth_rules_for_event(room_version, pl_event, auth_events) + event_auth.check_auth_rules_for_event(pl_event, auth_events) else: # But an MSC2209 room rejects this change. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( - room_version, pl_event, auth_events - ) + event_auth.check_auth_rules_for_event(pl_event, auth_events) def test_join_rules_public(self): """ @@ -261,7 +244,6 @@ class EventAuthTestCase(unittest.TestCase): # Check join. event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -269,7 +251,6 @@ class EventAuthTestCase(unittest.TestCase): # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) @@ -280,7 +261,6 @@ class EventAuthTestCase(unittest.TestCase): ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -290,7 +270,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "leave" ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -300,7 +279,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "join" ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -310,7 +288,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -333,7 +310,6 @@ class EventAuthTestCase(unittest.TestCase): # A join without an invite is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -341,7 +317,6 @@ class EventAuthTestCase(unittest.TestCase): # A user cannot be force-joined to a room. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) @@ -352,7 +327,6 @@ class EventAuthTestCase(unittest.TestCase): ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -363,7 +337,6 @@ class EventAuthTestCase(unittest.TestCase): ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -373,7 +346,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "join" ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -383,7 +355,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -406,7 +377,6 @@ class EventAuthTestCase(unittest.TestCase): with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V6, _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -444,7 +414,6 @@ class EventAuthTestCase(unittest.TestCase): }, ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) @@ -461,7 +430,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V8, "@inviter:foo.test" ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( RoomVersions.V8, pleb, @@ -475,7 +443,6 @@ class EventAuthTestCase(unittest.TestCase): # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -489,7 +456,6 @@ class EventAuthTestCase(unittest.TestCase): ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event( RoomVersions.V8, pleb, @@ -504,7 +470,6 @@ class EventAuthTestCase(unittest.TestCase): # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, _member_event( RoomVersions.V8, pleb, @@ -523,7 +488,6 @@ class EventAuthTestCase(unittest.TestCase): ) with self.assertRaises(AuthError): event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) @@ -533,7 +497,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V8, pleb, "leave" ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, authorised_join_event, auth_events.values(), ) @@ -544,7 +507,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V8, pleb, "join" ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -555,7 +517,6 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V8, pleb, "invite", sender=creator ) event_auth.check_auth_rules_for_event( - RoomVersions.V8, _join_event(RoomVersions.V8, pleb), auth_events.values(), ) From c1b28b8842849a2b3e62025d378997861c041932 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Jun 2022 11:01:55 +0100 Subject: [PATCH 26/85] Remove redundant `room_version` param from `check_auth_rules_from_context` It's now implied by the room_version property on the event. --- synapse/handlers/event_auth.py | 1 - synapse/handlers/federation.py | 18 +++++------------- synapse/handlers/message.py | 21 ++------------------- synapse/handlers/room.py | 3 +-- 4 files changed, 8 insertions(+), 35 deletions(-) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 7bbb833f3..ed4149bd5 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -48,7 +48,6 @@ class EventAuthHandler: async def check_auth_rules_from_context( self, - room_version_obj: RoomVersion, event: EventBase, context: EventContext, ) -> None: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b59641776..6310f0ef2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -799,9 +799,7 @@ class FederationHandler: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) return event async def on_invite_request( @@ -972,9 +970,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new leave %r because %s", event, e) raise e @@ -1033,9 +1029,7 @@ class FederationHandler: try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_knock_request` - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Failed to create new knock %r because %s", event, e) raise e @@ -1208,7 +1202,7 @@ class FederationHandler: try: validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as e: logger.warning("Denying new third party invite %r because %s", event, e) @@ -1259,9 +1253,7 @@ class FederationHandler: try: validate_event_for_room_version(event) - await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context - ) + await self._event_auth_handler.check_auth_rules_from_context(event, context) except AuthError as e: logger.warning("Denying third party invite %r because %s", event, e) raise e diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index b078e2424..c8bbcfd8c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -42,7 +42,7 @@ from synapse.api.errors import ( SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.api.urls import ConsentURIBuilder from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase, relation_from_event @@ -1273,23 +1273,6 @@ class EventCreationHandler: ) return prev_event - if event.is_state() and (event.type, event.state_key) == ( - EventTypes.Create, - "", - ): - room_version_id = event.content.get( - "room_version", RoomVersions.V1.identifier - ) - maybe_room_version_obj = KNOWN_ROOM_VERSIONS.get(room_version_id) - if not maybe_room_version_obj: - raise UnsupportedRoomVersionError( - "Attempt to create a room with unsupported room version %s" - % (room_version_id,) - ) - room_version_obj = maybe_room_version_obj - else: - room_version_obj = await self.store.get_room_version(event.room_id) - if event.internal_metadata.is_out_of_band_membership(): # the only sort of out-of-band-membership events we expect to see here are # invite rejections and rescinded knocks that we have generated ourselves. @@ -1299,7 +1282,7 @@ class EventCreationHandler: try: validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context( - room_version_obj, event, context + event, context ) except AuthError as err: logger.warning("Denying new event %r because %s", event, err) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 44d978407..d8918ee1a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -226,10 +226,9 @@ class RoomCreationHandler: }, }, ) - old_room_version = await self.store.get_room_version(old_room_id) validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( - old_room_version, tombstone_event, tombstone_context + tombstone_event, tombstone_context ) # Upgrade the room From a6173a16fe308291054b5fb507e6c160161ed0a5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 Jun 2022 11:08:01 +0100 Subject: [PATCH 27/85] changelog --- changelog.d/13017.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/13017.misc diff --git a/changelog.d/13017.misc b/changelog.d/13017.misc new file mode 100644 index 000000000..b314687f9 --- /dev/null +++ b/changelog.d/13017.misc @@ -0,0 +1 @@ +Remove redundant `room_version` parameters from event auth functions. From 53b77b203ac12f20a6534393464588d5d49435f5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 13 Jun 2022 14:06:27 -0400 Subject: [PATCH 28/85] Replace noop background updates with DELETE. (#12954) Removes the `register_noop_background_update` and deletes the background updates directly in a delta file. --- changelog.d/12954.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 - synapse/storage/background_updates.py | 19 ------ synapse/storage/databases/main/__init__.py | 2 - synapse/storage/databases/main/deviceinbox.py | 11 ---- synapse/storage/databases/main/devices.py | 9 --- .../databases/main/events_bg_updates.py | 5 -- .../storage/databases/main/group_server.py | 34 ----------- .../databases/main/media_repository.py | 10 --- .../storage/databases/main/registration.py | 11 ---- synapse/storage/databases/main/search.py | 10 --- synapse/storage/databases/main/stats.py | 5 -- .../70/02remove_noop_background_updates.sql | 61 +++++++++++++++++++ tests/handlers/test_stats.py | 28 --------- 14 files changed, 62 insertions(+), 146 deletions(-) create mode 100644 changelog.d/12954.misc delete mode 100644 synapse/storage/databases/main/group_server.py create mode 100644 synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql diff --git a/changelog.d/12954.misc b/changelog.d/12954.misc new file mode 100644 index 000000000..20bf13673 --- /dev/null +++ b/changelog.d/12954.misc @@ -0,0 +1 @@ +Replace noop background updates with `DELETE` delta. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index c753dfa7c..9586086c0 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -61,7 +61,6 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, ) -from synapse.storage.databases.main.group_server import GroupServerStore from synapse.storage.databases.main.media_repository import ( MediaRepositoryBackgroundUpdateStore, ) @@ -218,7 +217,6 @@ class Store( PushRuleStore, PusherWorkerStore, PresenceBackgroundUpdateStore, - GroupServerStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index b1e5208c7..555b4e77d 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -507,25 +507,6 @@ class BackgroundUpdater: update_handler ) - def register_noop_background_update(self, update_name: str) -> None: - """Register a noop handler for a background update. - - This is useful when we previously did a background update, but no - longer wish to do the update. In this case the background update should - be removed from the schema delta files, but there may still be some - users who have the background update queued, so this method should - also be called to clear the update. - - Args: - update_name: Name of update - """ - - async def noop_update(progress: JsonDict, batch_size: int) -> int: - await self._end_background_update(update_name) - return 1 - - self.register_background_update_handler(update_name, noop_update) - def register_background_index_update( self, update_name: str, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 11d9d16c1..9121badb3 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -45,7 +45,6 @@ from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore from .filtering import FilteringStore -from .group_server import GroupServerStore from .keys import KeyStore from .lock import LockStore from .media_repository import MediaRepositoryStore @@ -117,7 +116,6 @@ class DataStore( DeviceStore, DeviceInboxStore, UserDirectoryStore, - GroupServerStore, UserErasureStore, MonthlyActiveUsersWorkerStore, StatsStore, diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 599b41838..422e0e65c 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -834,8 +834,6 @@ class DeviceInboxWorkerStore(SQLBaseStore): class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" - REMOVE_DELETED_DEVICES = "remove_deleted_devices_from_device_inbox" - REMOVE_HIDDEN_DEVICES = "remove_hidden_devices_from_device_inbox" REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox" def __init__( @@ -857,15 +855,6 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox ) - # Used to be a background update that deletes all device_inboxes for deleted - # devices. - self.db_pool.updates.register_noop_background_update( - self.REMOVE_DELETED_DEVICES - ) - # Used to be a background update that deletes all device_inboxes for hidden - # devices. - self.db_pool.updates.register_noop_background_update(self.REMOVE_HIDDEN_DEVICES) - self.db_pool.updates.register_background_update_handler( self.REMOVE_DEAD_DEVICES_FROM_INBOX, self._remove_dead_devices_from_device_inbox, diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 71e7863dd..2414a7dc3 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1240,15 +1240,6 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): self._remove_duplicate_outbound_pokes, ) - # a pair of background updates that were added during the 1.14 release cycle, - # but replaced with 58/06dlols_unique_idx.py - self.db_pool.updates.register_noop_background_update( - "device_lists_outbound_last_success_unique_idx", - ) - self.db_pool.updates.register_noop_background_update( - "drop_device_lists_outbound_last_success_non_unique_idx", - ) - async def _drop_device_list_streams_non_unique_indexes( self, progress: JsonDict, batch_size: int ) -> int: diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index d5f005966..bea34a4c4 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -177,11 +177,6 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): self._purged_chain_cover_index, ) - # The event_thread_relation background update was replaced with the - # event_arbitrary_relations one, which handles any relation to avoid - # needed to potentially crawl the entire events table in the future. - self.db_pool.updates.register_noop_background_update("event_thread_relation") - self.db_pool.updates.register_background_update_handler( "event_arbitrary_relations", self._event_arbitrary_relations, diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py deleted file mode 100644 index c15a7136b..000000000 --- a/synapse/storage/databases/main/group_server.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from synapse.storage._base import SQLBaseStore -from synapse.storage.database import DatabasePool, LoggingDatabaseConnection - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class GroupServerStore(SQLBaseStore): - def __init__( - self, - database: DatabasePool, - db_conn: LoggingDatabaseConnection, - hs: "HomeServer", - ): - # Register a legacy groups background update as a no-op. - database.updates.register_noop_background_update("local_group_updates_index") - super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index d028be16d..9b172a64d 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -37,9 +37,6 @@ from synapse.types import JsonDict, UserID if TYPE_CHECKING: from synapse.server import HomeServer -BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( - "media_repository_drop_index_wo_method" -) BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2 = ( "media_repository_drop_index_wo_method_2" ) @@ -111,13 +108,6 @@ class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): unique=True, ) - # the original impl of _drop_media_index_without_method was broken (see - # https://github.com/matrix-org/synapse/issues/8649), so we replace the original - # impl with a no-op and run the fixed migration as - # media_repository_drop_index_wo_method_2. - self.db_pool.updates.register_noop_background_update( - BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD - ) self.db_pool.updates.register_background_update_handler( BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD_2, self._drop_media_index_without_method, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 4991360b7..cb63cd9b7 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1805,21 +1805,10 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): columns=["creation_ts"], ) - # we no longer use refresh tokens, but it's possible that some people - # might have a background update queued to build this index. Just - # clear the background update. - self.db_pool.updates.register_noop_background_update( - "refresh_tokens_device_index" - ) - self.db_pool.updates.register_background_update_handler( "users_set_deactivated_flag", self._background_update_set_deactivated_flag ) - self.db_pool.updates.register_noop_background_update( - "user_threepids_grandfather" - ) - self.db_pool.updates.register_background_index_update( "user_external_ids_user_id_idx", index_name="user_external_ids_user_id_idx", diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 78e0773b2..f6e24b68d 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -113,7 +113,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" - EVENT_SEARCH_USE_GIST_POSTGRES_NAME = "event_search_postgres_gist" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" EVENT_SEARCH_DELETE_NON_STRINGS = "event_search_sqlite_delete_non_strings" @@ -132,15 +131,6 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): self.EVENT_SEARCH_ORDER_UPDATE_NAME, self._background_reindex_search_order ) - # we used to have a background update to turn the GIN index into a - # GIST one; we no longer do that (obviously) because we actually want - # a GIN index. However, it's possible that some people might still have - # the background update queued, so we register a handler to clear the - # background update. - self.db_pool.updates.register_noop_background_update( - self.EVENT_SEARCH_USE_GIST_POSTGRES_NAME - ) - self.db_pool.updates.register_background_update_handler( self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME, self._background_reindex_gin_search ) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index b95dbef67..538451b05 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -120,11 +120,6 @@ class StatsStore(StateDeltasStore): self.db_pool.updates.register_background_update_handler( "populate_stats_process_users", self._populate_stats_process_users ) - # we no longer need to perform clean-up, but we will give ourselves - # the potential to reintroduce it in the future – so documentation - # will still encourage the use of this no-op handler. - self.db_pool.updates.register_noop_background_update("populate_stats_cleanup") - self.db_pool.updates.register_noop_background_update("populate_stats_prepare") async def _populate_stats_process_users( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql b/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql new file mode 100644 index 000000000..fa96ac50c --- /dev/null +++ b/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql @@ -0,0 +1,61 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Clean-up background updates which should no longer be run. Previously these +-- used the (now removed) register_noop_background_update method. + +-- Used to be a background update that deletes all device_inboxes for deleted +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_deleted_devices_from_device_inbox'; +-- Used to be a background update that deletes all device_inboxes for hidden +-- devices. +DELETE FROM background_updates WHERE update_name = 'remove_hidden_devices_from_device_inbox'; + +-- A pair of background updates that were added during the 1.14 release cycle, +-- but replaced with 58/06dlols_unique_idx.py +DELETE FROM background_updates WHERE update_name = 'device_lists_outbound_last_success_unique_idx'; +DELETE FROM background_updates WHERE update_name = 'drop_device_lists_outbound_last_success_non_unique_idx'; + +-- The event_thread_relation background update was replaced with the +-- event_arbitrary_relations one, which handles any relation to avoid +-- needed to potentially crawl the entire events table in the future. +DELETE FROM background_updates WHERE update_name = 'event_thread_relation'; + +-- A legacy groups background update. +DELETE FROM background_updates WHERE update_name = 'local_group_updates_index'; + +-- The original impl of _drop_media_index_without_method was broken (see +-- https://github.com/matrix-org/synapse/issues/8649), so we replace the original +-- impl with a no-op and run the fixed migration as +-- media_repository_drop_index_wo_method_2. +DELETE FROM background_updates WHERE update_name = 'media_repository_drop_index_wo_method'; + +-- We no longer use refresh tokens, but it's possible that some people +-- might have a background update queued to build this index. Just +-- clear the background update. +DELETE FROM background_updates WHERE update_name = 'refresh_tokens_device_index'; + +DELETE FROM background_updates WHERE update_name = 'user_threepids_grandfather'; + +-- We used to have a background update to turn the GIN index into a +-- GIST one; we no longer do that (obviously) because we actually want +-- a GIN index. However, it's possible that some people might still have +-- the background update queued, so we register a handler to clear the +-- background update. +DELETE FROM background_updates WHERE update_name = 'event_search_postgres_gist'; + +-- We no longer need to perform clean-up. +DELETE FROM background_updates WHERE update_name = 'populate_stats_cleanup'; +DELETE FROM background_updates WHERE update_name = 'populate_stats_prepare'; diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index ecd78fa36..05f9ec3c5 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -43,19 +43,12 @@ class StatsRoomTests(unittest.HomeserverTestCase): # Ugh, have to reset this flag self.store.db_pool.updates._all_done = False - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - {"update_name": "populate_stats_prepare", "progress_json": "{}"}, - ) - ) self.get_success( self.store.db_pool.simple_insert( "background_updates", { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -69,16 +62,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) async def get_all_room_state(self): return await self.store.db_pool.simple_select_list( @@ -533,7 +516,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): { "update_name": "populate_stats_process_rooms", "progress_json": "{}", - "depends_on": "populate_stats_prepare", }, ) ) @@ -547,16 +529,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): }, ) ) - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": "populate_stats_cleanup", - "progress_json": "{}", - "depends_on": "populate_stats_process_users", - }, - ) - ) self.wait_for_background_updates() From a164a46038b0e51142781619db0e6dec8e0c2aaa Mon Sep 17 00:00:00 2001 From: David Teller Date: Mon, 13 Jun 2022 20:16:16 +0200 Subject: [PATCH 29/85] Uniformize spam-checker API, part 4: port other spam-checker callbacks to return `Union[Allow, Codes]`. (#12857) Co-authored-by: Brendan Abolivier --- changelog.d/12857.feature | 1 + docs/modules/spam_checker_callbacks.md | 190 ++++++++++++----- docs/upgrade.md | 41 ++++ synapse/events/spamcheck.py | 277 ++++++++++++++++++------- synapse/handlers/directory.py | 19 +- synapse/handlers/federation.py | 10 +- synapse/handlers/message.py | 12 +- synapse/handlers/room.py | 20 +- synapse/handlers/room_member.py | 32 +-- synapse/module_api/__init__.py | 2 + synapse/rest/media/v1/media_storage.py | 7 +- tests/rest/client/test_rooms.py | 175 +++++++++++++++- 12 files changed, 604 insertions(+), 182 deletions(-) create mode 100644 changelog.d/12857.feature diff --git a/changelog.d/12857.feature b/changelog.d/12857.feature new file mode 100644 index 000000000..ddd1dbe68 --- /dev/null +++ b/changelog.d/12857.feature @@ -0,0 +1 @@ +Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index ad35e667e..8ca7d5bdb 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -38,15 +38,13 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_join_room(user: str, room: str, is_invited: bool) -> bool +async def user_may_join_room(user: str, room: str, is_invited: bool) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when a user is trying to join a room. The module must return a `bool` to indicate -whether the user can join the room. Return `False` to prevent the user from joining the -room; otherwise return `True` to permit the joining. - -The user is represented by their Matrix user ID (e.g. +Called when a user is trying to join a room. The user is represented by their Matrix user ID (e.g. `@alice:example.com`) and the room is represented by its Matrix ID (e.g. `!room:example.com`). The module is also given a boolean to indicate whether the user currently has a pending invite in the room. @@ -54,46 +52,67 @@ currently has a pending invite in the room. This callback isn't called if the join is performed by a server administrator, or in the context of a room creation. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. ### `user_may_invite` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_invite(inviter: str, invitee: str, room_id: str) -> bool +async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. The module must return a `bool` indicating whether -the inviter can invite the invitee to the given room. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). Return `False` to prevent -the invitation; otherwise return `True` to permit it. +Called when processing an invitation. Both inviter and invitee are +represented by their Matrix user ID (e.g. `@alice:example.com`). + + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_send_3pid_invite` _First introduced in Synapse v1.45.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def user_may_send_3pid_invite( inviter: str, medium: str, address: str, room_id: str, -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). The module must return a `bool` indicating -whether the inviter can invite the invitee to the given room. Return `False` to prevent -the invitation; otherwise return `True` to permit it. +e.g. an email address or a phone number). The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address @@ -115,63 +134,108 @@ await user_may_send_3pid_invite( **Note**: If the third-party identifier is already associated with a matrix user ID, [`user_may_invite`](#user_may_invite) will be used instead. +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. + If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `user_may_create_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room(user: str) -> bool +async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing a room creation request. The module must return a `bool` indicating -whether the given user (represented by their Matrix user ID) is allowed to create a room. -Return `False` to prevent room creation; otherwise return `True` to permit it. +Called when processing a room creation request. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_create_room_alias` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_create_room_alias(user: str, room_alias: "synapse.types.RoomAlias") -> bool +async def user_may_create_room_alias(user_id: str, room_alias: "synapse.module_api.RoomAlias") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to associate an alias with an existing room. The module must return a -`bool` indicating whether the given user (represented by their Matrix user ID) is allowed -to set the given alias. Return `False` to prevent the alias creation; otherwise return -`True` to permit it. +Called when trying to associate an alias with an existing room. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `user_may_publish_room` _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python -async def user_may_publish_room(user: str, room_id: str) -> bool +async def user_may_publish_room(user_id: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when trying to publish a room to the homeserver's public rooms directory. The -module must return a `bool` indicating whether the given user (represented by their -Matrix user ID) is allowed to publish the given room. Return `False` to prevent the -room from being published; otherwise return `True` to permit its publication. +Called when trying to publish a room to the homeserver's public rooms directory. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `True`, Synapse falls through to the next one. The value of the first -callback that does not return `True` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + + ### `check_username_for_spam` @@ -239,21 +303,32 @@ this callback. _First introduced in Synapse v1.37.0_ +_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ + ```python async def check_media_file_for_spam( file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", file_info: "synapse.rest.media.v1._base.FileInfo", -) -> bool +) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when storing a local or remote file. The module must return a `bool` indicating -whether the given file should be excluded from the homeserver's media store. Return -`True` to prevent this file from being stored; otherwise return `False`. +Called when storing a local or remote file. + +The callback must return one of: + - `synapse.module_api.NOT_SPAM`, to allow the operation. Other callbacks may still + decide to reject it. + - `synapse.module_api.errors.Codes` to reject the operation with an error code. In case + of doubt, `synapse.module_api.errors.Codes.FORBIDDEN` is a good error code. + + - (deprecated) `False`, which is the same as returning `synapse.module_api.NOT_SPAM`. + - (deprecated) `True`, which is the same as returning `synapse.module_api.errors.Codes.FORBIDDEN`. If multiple modules implement this callback, they will be considered in order. If a -callback returns `False`, Synapse falls through to the next one. The value of the first -callback that does not return `False` will be used. If this happens, Synapse will not call -any of the subsequent implementations of this callback. +callback returns `synapse.module_api.NOT_SPAM`, Synapse falls through to the next one. +The value of the first callback that does not return `synapse.module_api.NOT_SPAM` will +be used. If this happens, Synapse will not call any of the subsequent implementations of +this callback. + ### `should_drop_federated_event` @@ -316,6 +391,9 @@ class ListSpamChecker: resource=IsUserEvilResource(config), ) - async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[bool, str]: - return event.sender not in self.evil_users + async def check_event_for_spam(self, event: "synapse.events.EventBase") -> Union[Literal["NOT_SPAM"], Codes]: + if event.sender in self.evil_users: + return Codes.FORBIDDEN + else: + return synapse.module_api.NOT_SPAM ``` diff --git a/docs/upgrade.md b/docs/upgrade.md index e3c64da17..3ade86b1a 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,47 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.61.0 + +## New signatures for spam checker callbacks + +As a followup to changes in v1.60.0, the following spam-checker callbacks have changed signature: + +- `user_may_join_room` +- `user_may_invite` +- `user_may_send_3pid_invite` +- `user_may_create_room` +- `user_may_create_room_alias` +- `user_may_publish_room` +- `check_media_file_for_spam` + +For each of these methods, the previous callback signature has been deprecated. + +Whereas callbacks used to return `bool`, they should now return `Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes"]`. + +For instance, if your module implements `user_may_join_room` as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam + return False + # Request is not spam + return True +``` + +you should rewrite it as follows: + +```python +async def user_may_join_room(self, user_id: str, room_id: str, is_invited: bool) + if ...: + # Request is spam, mark it as forbidden (you may use some more precise error + # code if it is useful). + return synapse.module_api.errors.Codes.FORBIDDEN + # Request is not spam, mark it as such. + return synapse.module_api.NOT_SPAM +``` + # Upgrading to v1.60.0 ## Adding a new unique index to `state_group_edges` could fail if your database is corrupted diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index d2e06c754..32712d204 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -28,7 +28,10 @@ from typing import ( Union, ) -from synapse.api.errors import Codes +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + +import synapse from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour @@ -47,12 +50,12 @@ CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[ Awaitable[ Union[ str, - Codes, + "synapse.api.errors.Codes", # Highly experimental, not officially part of the spamchecker API, may # disappear without warning depending on the results of ongoing # experiments. # Use this to return additional information as part of an error. - Tuple[Codes, Dict], + Tuple["synapse.api.errors.Codes", Dict], # Deprecated bool, ] @@ -62,12 +65,72 @@ SHOULD_DROP_FEDERATED_EVENT_CALLBACK = Callable[ ["synapse.events.EventBase"], Awaitable[Union[bool, str]], ] -USER_MAY_JOIN_ROOM_CALLBACK = Callable[[str, str, bool], Awaitable[bool]] -USER_MAY_INVITE_CALLBACK = Callable[[str, str, str], Awaitable[bool]] -USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[[str, str, str, str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_CALLBACK = Callable[[str], Awaitable[bool]] -USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[[str, RoomAlias], Awaitable[bool]] -USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[[str, str], Awaitable[bool]] +USER_MAY_JOIN_ROOM_CALLBACK = Callable[ + [str, str, bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_INVITE_CALLBACK = Callable[ + [str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_SEND_3PID_INVITE_CALLBACK = Callable[ + [str, str, str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_CALLBACK = Callable[ + [str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_CREATE_ROOM_ALIAS_CALLBACK = Callable[ + [str, RoomAlias], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] +USER_MAY_PUBLISH_ROOM_CALLBACK = Callable[ + [str, str], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], +] CHECK_USERNAME_FOR_SPAM_CALLBACK = Callable[[UserProfile], Awaitable[bool]] LEGACY_CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ [ @@ -88,7 +151,14 @@ CHECK_REGISTRATION_FOR_SPAM_CALLBACK = Callable[ ] CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK = Callable[ [ReadableFileWrapper, FileInfo], - Awaitable[bool], + Awaitable[ + Union[ + Literal["NOT_SPAM"], + "synapse.api.errors.Codes", + # Deprecated + bool, + ] + ], ] @@ -181,7 +251,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer") -> None: class SpamChecker: - NOT_SPAM = "NOT_SPAM" + NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM" def __init__(self, hs: "synapse.server.HomeServer") -> None: self.hs = hs @@ -275,7 +345,7 @@ class SpamChecker: async def check_event_for_spam( self, event: "synapse.events.EventBase" - ) -> Union[Tuple[Codes, Dict], str]: + ) -> Union[Tuple["synapse.api.errors.Codes", Dict], str]: """Checks if a given event is considered "spammy" by this server. If the server considers an event spammy, then it will be rejected if @@ -306,7 +376,7 @@ class SpamChecker: elif res is True: # This spam-checker rejects the event with deprecated # return value `True` - return Codes.FORBIDDEN + return (synapse.api.errors.Codes.FORBIDDEN, {}) elif not isinstance(res, str): # mypy complains that we can't reach this code because of the # return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know @@ -352,7 +422,7 @@ class SpamChecker: async def user_may_join_room( self, user_id: str, room_id: str, is_invited: bool - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given users is allowed to join a room. Not called when a user creates a room. @@ -362,54 +432,70 @@ class SpamChecker: is_invited: Whether the user is invited into the room Returns: - Whether the user may join the room + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_join_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_join_room = await delay_cancellation( - callback(user_id, room_id, is_invited) - ) - if may_join_room is False: - return False + res = await delay_cancellation(callback(user_id, room_id, is_invited)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting join as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_invite( self, inviter_userid: str, invitee_userid: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may send an invite - If this method returns false, the invite will be rejected. - Args: inviter_userid: The user ID of the sender of the invitation invitee_userid: The user ID targeted in the invitation room_id: The room ID Returns: - True if the user may send an invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, invitee_userid, room_id) ) - if may_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + # No spam-checker has rejected the request, let it pass. + return self.NOT_SPAM async def user_may_send_3pid_invite( self, inviter_userid: str, medium: str, address: str, room_id: str - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may invite a given threepid into the room - If this method returns false, the threepid invite will be rejected. - Note that if the threepid is already associated with a Matrix user ID, Synapse will call user_may_invite with said user ID instead. @@ -420,88 +506,113 @@ class SpamChecker: room_id: The room ID Returns: - True if the user may send the invite, otherwise False + NOT_SPAM if the operation is permitted, Codes otherwise. """ for callback in self._user_may_send_3pid_invite_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_send_3pid_invite = await delay_cancellation( + res = await delay_cancellation( callback(inviter_userid, medium, address, room_id) ) - if may_send_3pid_invite is False: - return False + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting 3pid invite as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_create_room(self, userid: str) -> bool: + async def user_may_create_room( + self, userid: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room - If this method returns false, the creation request will be rejected. - Args: userid: The ID of the user attempting to create a room - - Returns: - True if the user may create a room, otherwise False """ for callback in self._user_may_create_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room = await delay_cancellation(callback(userid)) - if may_create_room is False: - return False + res = await delay_cancellation(callback(userid)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room creation as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def user_may_create_room_alias( self, userid: str, room_alias: RoomAlias - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may create a room alias - If this method returns false, the association request will be rejected. - Args: userid: The ID of the user attempting to create a room alias room_alias: The alias to be created - Returns: - True if the user may create a room alias, otherwise False """ for callback in self._user_may_create_room_alias_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_create_room_alias = await delay_cancellation( - callback(userid, room_alias) - ) - if may_create_room_alias is False: - return False + res = await delay_cancellation(callback(userid, room_alias)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room create as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM - async def user_may_publish_room(self, userid: str, room_id: str) -> bool: + async def user_may_publish_room( + self, userid: str, room_id: str + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a given user may publish a room to the directory - If this method returns false, the publish request will be rejected. - Args: userid: The user ID attempting to publish the room room_id: The ID of the room that would be published - - Returns: - True if the user may publish the room, otherwise False """ for callback in self._user_may_publish_room_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - may_publish_room = await delay_cancellation(callback(userid, room_id)) - if may_publish_room is False: - return False + res = await delay_cancellation(callback(userid, room_id)) + if res is True or res is self.NOT_SPAM: + continue + elif res is False: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting room publication as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return True + return self.NOT_SPAM async def check_username_for_spam(self, user_profile: UserProfile) -> bool: """Checks if a user ID or display name are considered "spammy" by this server. @@ -567,7 +678,7 @@ class SpamChecker: async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union["synapse.api.errors.Codes", Literal["NOT_SPAM"]]: """Checks if a piece of newly uploaded media should be blocked. This will be called for local uploads, downloads of remote media, each @@ -580,31 +691,37 @@ class SpamChecker: async def check_media_file_for_spam( self, file: ReadableFileWrapper, file_info: FileInfo - ) -> bool: + ) -> Union[Codes, Literal["NOT_SPAM"]]: buffer = BytesIO() await file.write_chunks_to(buffer.write) if buffer.getvalue() == b"Hello World": - return True + return synapse.module_api.NOT_SPAM - return False + return Codes.FORBIDDEN Args: file: An object that allows reading the contents of the media. file_info: Metadata about the file. - - Returns: - True if the media should be blocked or False if it should be - allowed. """ for callback in self._check_media_file_for_spam_callbacks: with Measure( self.clock, "{}.{}".format(callback.__module__, callback.__qualname__) ): - spam = await delay_cancellation(callback(file_wrapper, file_info)) - if spam: - return True + res = await delay_cancellation(callback(file_wrapper, file_info)) + # Normalize return values to `Codes` or `"NOT_SPAM"`. + if res is False or res is self.NOT_SPAM: + continue + elif res is True: + return synapse.api.errors.Codes.FORBIDDEN + elif isinstance(res, synapse.api.errors.Codes): + return res + else: + logger.warning( + "Module returned invalid value, rejecting media file as spam" + ) + return synapse.api.errors.Codes.FORBIDDEN - return False + return self.NOT_SPAM diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1459a046d..8b0f16f96 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -28,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.appservice import ApplicationService +from synapse.module_api import NOT_SPAM from synapse.storage.databases.main.directory import RoomAliasMapping from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id @@ -141,10 +142,15 @@ class DirectoryHandler: 403, "You must be in the room to create an alias for it" ) - if not await self.spam_checker.user_may_create_room_alias( + spam_check = await self.spam_checker.user_may_create_room_alias( user_id, room_alias - ): - raise AuthError(403, "This user is not permitted to create this alias") + ) + if spam_check != self.spam_checker.NOT_SPAM: + raise AuthError( + 403, + "This user is not permitted to create this alias", + spam_check, + ) if not self.config.roomdirectory.is_alias_creation_allowed( user_id, room_id, room_alias_str @@ -430,9 +436,12 @@ class DirectoryHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_publish_room(user_id, room_id): + spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id) + if spam_check != NOT_SPAM: raise AuthError( - 403, "This user is not permitted to publish rooms to the room list" + 403, + "This user is not permitted to publish rooms to the room list", + spam_check, ) if requester.is_guest: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1e5694244..34cc5ecd1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -59,6 +59,7 @@ from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.module_api import NOT_SPAM from synapse.replication.http.federation import ( ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, @@ -820,11 +821,14 @@ class FederationHandler: if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( event.sender, event.state_key, event.room_id - ): + ) + if spam_check != NOT_SPAM: raise SynapseError( - 403, "This user is not permitted to send invites to this server/user" + 403, + "This user is not permitted to send invites to this server/user", + spam_check, ) membership = event.content.get("membership") diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 9b1793916..ad87c4178 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -954,14 +954,12 @@ class EventCreationHandler: "Spam-check module returned invalid error value. Expecting [code, dict], got %s", spam_check_result, ) - spam_check_result = Codes.FORBIDDEN - if isinstance(spam_check_result, Codes): - raise SynapseError( - 403, - "This message has been rejected as probable spam", - spam_check_result, - ) + raise SynapseError( + 403, + "This message has been rejected as probable spam", + Codes.FORBIDDEN, + ) # Backwards compatibility: if the return value is not an error code, it # means the module returned an error message to be included in the diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d8918ee1a..42aae4a21 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -62,6 +62,7 @@ from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.federation.federation_client import InvalidResponseError from synapse.handlers.federation import get_domains_from_state from synapse.handlers.relations import BundledAggregations +from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin from synapse.storage.state import StateFilter from synapse.streams import EventSource @@ -436,10 +437,9 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - if not await self.spam_checker.user_may_create_room(user_id): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError(403, "You are not permitted to create rooms", spam_check) creation_content: JsonDict = { "room_version": new_room_version.identifier, @@ -726,12 +726,12 @@ class RoomCreationHandler: invite_3pid_list = config.get("invite_3pid", []) invite_list = config.get("invite", []) - if not is_requester_admin and not ( - await self.spam_checker.user_may_create_room(user_id) - ): - raise SynapseError( - 403, "You are not permitted to create rooms", Codes.FORBIDDEN - ) + if not is_requester_admin: + spam_check = await self.spam_checker.user_may_create_room(user_id) + if spam_check != NOT_SPAM: + raise SynapseError( + 403, "You are not permitted to create rooms", spam_check + ) if ratelimit: await self.request_ratelimiter.ratelimit(requester) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d1199a064..e89b7441a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -38,6 +38,7 @@ from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.module_api import NOT_SPAM from synapse.storage.state import StateFilter from synapse.types import ( JsonDict, @@ -683,7 +684,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if target_id == self._server_notices_mxid: raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user") - block_invite = False + block_invite_code = None if ( self._server_notices_mxid is not None @@ -701,16 +702,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): "Blocking invite: user is not admin and non-admin " "invites disabled" ) - block_invite = True + block_invite_code = Codes.FORBIDDEN - if not await self.spam_checker.user_may_invite( + spam_check = await self.spam_checker.user_may_invite( requester.user.to_string(), target_id, room_id - ): + ) + if spam_check != NOT_SPAM: logger.info("Blocking invite due to spam checker") - block_invite = True + block_invite_code = spam_check - if block_invite: - raise SynapseError(403, "Invites have been disabled on this server") + if block_invite_code is not None: + raise SynapseError( + 403, "Invites have been disabled on this server", block_invite_code + ) # An empty prev_events list is allowed as long as the auth_event_ids are present if prev_event_ids is not None: @@ -818,11 +822,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # We assume that if the spam checker allowed the user to create # a room then they're allowed to join it. and not new_room - and not await self.spam_checker.user_may_join_room( + ): + spam_check = await self.spam_checker.user_may_join_room( target.to_string(), room_id, is_invited=inviter is not None ) - ): - raise SynapseError(403, "Not allowed to join this room") + if spam_check != NOT_SPAM: + raise SynapseError(403, "Not allowed to join this room", spam_check) # Check if a remote join should be performed. remote_join, remote_room_hosts = await self._should_perform_remote_join( @@ -1369,13 +1374,14 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) else: # Check if the spamchecker(s) allow this invite to go through. - if not await self.spam_checker.user_may_send_3pid_invite( + spam_check = await self.spam_checker.user_may_send_3pid_invite( inviter_userid=requester.user.to_string(), medium=medium, address=address, room_id=room_id, - ): - raise SynapseError(403, "Cannot send threepid invite") + ) + if spam_check != NOT_SPAM: + raise SynapseError(403, "Cannot send threepid invite", spam_check) stream_id = await self._make_and_store_3pid_invite( requester, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 30b2aeffd..6191c2dc9 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -115,6 +115,7 @@ from synapse.types import ( JsonDict, JsonMapping, Requester, + RoomAlias, StateMap, UserID, UserInfo, @@ -163,6 +164,7 @@ __all__ = [ "EventBase", "StateMap", "ProfileInfo", + "RoomAlias", "UserProfile", ] diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 604f18bf5..913741734 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -36,6 +36,7 @@ from twisted.internet.defer import Deferred from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender +import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -145,15 +146,15 @@ class MediaStorage: f.flush() f.close() - spam = await self.spam_checker.check_media_file_for_spam( + spam_check = await self.spam_checker.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam: + if spam_check != synapse.module_api.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in # the DB. - raise SpamMediaException() + raise SpamMediaException(errcode=spam_check) for provider in self.storage_providers: await provider.store_file(path, file_info) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4be83dfd6..35c59ee9e 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -18,10 +18,13 @@ """Tests REST events for /rooms paths.""" import json -from typing import Any, Dict, Iterable, List, Optional +from typing import Any, Dict, Iterable, List, Optional, Union from unittest.mock import Mock, call from urllib import parse as urlparse +# `Literal` appears with Python 3.8. +from typing_extensions import Literal + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -777,9 +780,11 @@ class RoomsCreateTestCase(RoomBase): channel = self.make_request("POST", "/createRoom", content) self.assertEqual(200, channel.code) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly bypassed when creating a new room. + + In this test, we use the deprecated API in which callbacks return a bool. """ async def user_may_join_room( @@ -801,6 +806,32 @@ class RoomsCreateTestCase(RoomBase): self.assertEqual(join_mock.call_count, 0) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly bypassed + when creating a new room. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + """ + + async def user_may_join_room( + mxid: str, + room_id: str, + is_invite: bool, + ) -> Codes: + return Codes.CONSENT_NOT_GIVEN + + join_mock = Mock(side_effect=user_may_join_room) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + + channel = self.make_request( + "POST", + "/createRoom", + {}, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + self.assertEqual(join_mock.call_count, 0) + class RoomTopicTestCase(RoomBase): """Tests /rooms/$room_id/topic REST events.""" @@ -1011,9 +1042,11 @@ class RoomJoinTestCase(RoomBase): self.room2 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) self.room3 = self.helper.create_room_as(room_creator=self.user1, tok=self.tok1) - def test_spam_checker_may_join_room(self) -> None: + def test_spam_checker_may_join_room_deprecated(self) -> None: """Tests that the user_may_join_room spam checker callback is correctly called and blocks room joins when needed. + + This test uses the deprecated API, in which callbacks return booleans. """ # Register a dummy callback. Make it allow all room joins for now. @@ -1026,6 +1059,8 @@ class RoomJoinTestCase(RoomBase): ) -> bool: return return_value + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) @@ -1068,6 +1103,67 @@ class RoomJoinTestCase(RoomBase): return_value = False self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + def test_spam_checker_may_join_room(self) -> None: + """Tests that the user_may_join_room spam checker callback is correctly called + and blocks room joins when needed. + + This test uses the latest API to this day, in which callbacks return `NOT_SPAM` or `Codes`. + """ + + # Register a dummy callback. Make it allow all room joins for now. + return_value: Union[Literal["NOT_SPAM"], Codes] = synapse.module_api.NOT_SPAM + + async def user_may_join_room( + userid: str, + room_id: str, + is_invited: bool, + ) -> Union[Literal["NOT_SPAM"], Codes]: + return return_value + + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) + self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + + # Join a first room, without being invited to it. + self.helper.join(self.room1, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room1, + False, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Join a second room, this time with an invite for it. + self.helper.invite(self.room2, self.user1, self.user2, tok=self.tok1) + self.helper.join(self.room2, self.user2, tok=self.tok2) + + # Check that the callback was called with the right arguments. + expected_call_args = ( + ( + self.user2, + self.room2, + True, + ), + ) + self.assertEqual( + callback_mock.call_args, + expected_call_args, + callback_mock.call_args, + ) + + # Now make the callback deny all room joins, and check that a join actually fails. + return_value = Codes.CONSENT_NOT_GIVEN + self.helper.join(self.room3, self.user2, expect_code=403, tok=self.tok2) + class RoomJoinRatelimitTestCase(RoomBase): user_id = "@sid1:red" @@ -2945,9 +3041,14 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) - def test_threepid_invite_spamcheck(self) -> None: + def test_threepid_invite_spamcheck_deprecated(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the deprecated API in which callbacks return a bool. + """ # Mock a few functions to prevent the test from failing due to failing to talk to - # a remote IS. We keep the mock for _mock_make_and_store_3pid_invite around so we + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable(0)) self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock @@ -3001,3 +3102,67 @@ class ThreepidInviteTestCase(unittest.HomeserverTestCase): # Also check that it stopped before calling _make_and_store_3pid_invite. make_invite_mock.assert_called_once() + + def test_threepid_invite_spamcheck(self) -> None: + """ + Test allowing/blocking threepid invites with a spam-check module. + + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.""" + # Mock a few functions to prevent the test from failing due to failing to talk to + # a remote IS. We keep the mock for make_and_store_3pid_invite around so we + # can check its call_count later on during the test. + make_invite_mock = Mock(return_value=make_awaitable(0)) + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock + self.hs.get_identity_handler().lookup_3pid = Mock( + return_value=make_awaitable(None), + ) + + # Add a mock to the spamchecker callbacks for user_may_send_3pid_invite. Make it + # allow everything for now. + # `spec` argument is needed for this function mock to have `__qualname__`, which + # is needed for `Measure` metrics buried in SpamChecker. + mock = Mock( + return_value=make_awaitable(synapse.module_api.NOT_SPAM), + spec=lambda *x: None, + ) + self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + + # Send a 3PID invite into the room and check that it succeeded. + email_to_invite = "teresa@example.com" + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 200) + + # Check that the callback was called with the right params. + mock.assert_called_with(self.user_id, "email", email_to_invite, self.room_id) + + # Check that the call to send the invite was made. + make_invite_mock.assert_called_once() + + # Now change the return value of the callback to deny any invite and test that + # we can't send the invite. + mock.return_value = make_awaitable(Codes.CONSENT_NOT_GIVEN) + channel = self.make_request( + method="POST", + path="/rooms/" + self.room_id + "/invite", + content={ + "id_server": "example.com", + "id_access_token": "sometoken", + "medium": "email", + "address": email_to_invite, + }, + access_token=self.tok, + ) + self.assertEqual(channel.code, 403) + + # Also check that it stopped before calling _make_and_store_3pid_invite. + make_invite_mock.assert_called_once() From 92103cb2c8b8bff6b522a7bfa8a3a776b4821b11 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 14 Jun 2022 10:51:15 +0200 Subject: [PATCH 30/85] Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. (#13021) --- changelog.d/13021.misc | 1 + synapse/api/auth.py | 14 ------- synapse/handlers/auth.py | 5 ++- synapse/handlers/message.py | 4 +- synapse/handlers/register.py | 3 +- synapse/handlers/room.py | 3 +- synapse/handlers/sync.py | 4 +- synapse/server.py | 5 +++ .../resource_limits_server_notices.py | 4 +- tests/api/test_auth.py | 42 ++++++++++++------- tests/handlers/test_auth.py | 2 +- tests/handlers/test_register.py | 2 +- tests/handlers/test_sync.py | 2 +- .../test_resource_limits_server_notices.py | 22 ++++++---- 14 files changed, 63 insertions(+), 50 deletions(-) create mode 100644 changelog.d/13021.misc diff --git a/changelog.d/13021.misc b/changelog.d/13021.misc new file mode 100644 index 000000000..84c41cdf5 --- /dev/null +++ b/changelog.d/13021.misc @@ -0,0 +1 @@ +Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 5a410f805..c037ccb98 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -20,7 +20,6 @@ from netaddr import IPAddress from twisted.web.server import Request from synapse import event_auth -from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.errors import ( AuthError, @@ -67,8 +66,6 @@ class Auth: 10000, "token_cache" ) - self._auth_blocking = AuthBlocking(self.hs) - self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips self._macaroon_secret_key = hs.config.key.macaroon_secret_key @@ -711,14 +708,3 @@ class Auth: "User %s not in room %s, and room previews are disabled" % (user_id, room_id), ) - - async def check_auth_blocking( - self, - user_id: Optional[str] = None, - threepid: Optional[dict] = None, - user_type: Optional[str] = None, - requester: Optional[Requester] = None, - ) -> None: - await self._auth_blocking.check_auth_blocking( - user_id=user_id, threepid=threepid, user_type=user_type, requester=requester - ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 6e15028b0..60d13040a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -199,6 +199,7 @@ class AuthHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.checkers: Dict[str, UserInteractiveAuthChecker] = {} for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: @@ -985,7 +986,7 @@ class AuthHandler: not is_appservice_ghost or self.hs.config.appservice.track_appservice_user_ips ): - await self.auth.check_auth_blocking(user_id) + await self.auth_blocking.check_auth_blocking(user_id) access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( @@ -1439,7 +1440,7 @@ class AuthHandler: except Exception: raise AuthError(403, "Invalid login token", errcode=Codes.FORBIDDEN) - await self.auth.check_auth_blocking(res.user_id) + await self.auth_blocking.check_auth_blocking(res.user_id) return res async def delete_access_token(self, access_token: str) -> None: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ad87c4178..189f52fe5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -444,7 +444,7 @@ _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY = 7 * 24 * 60 * 60 * 1000 class EventCreationHandler: def __init__(self, hs: "HomeServer"): self.hs = hs - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._event_auth_handler = hs.get_event_auth_handler() self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -605,7 +605,7 @@ class EventCreationHandler: Returns: Tuple of created event, Context """ - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if event_dict["type"] == EventTypes.Create and event_dict["state_key"] == "": room_version_id = event_dict["content"]["room_version"] diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 338204287..c77d18172 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -91,6 +91,7 @@ class RegistrationHandler: self.clock = hs.get_clock() self.hs = hs self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._auth_handler = hs.get_auth_handler() self.profile_handler = hs.get_profile_handler() self.user_directory_handler = hs.get_user_directory_handler() @@ -276,7 +277,7 @@ class RegistrationHandler: # do not check_auth_blocking if the call is coming through the Admin API if not by_admin: - await self.auth.check_auth_blocking(threepid=threepid) + await self.auth_blocking.check_auth_blocking(threepid=threepid) if localpart is not None: await self.check_username(localpart, guest_access_token=guest_access_token) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 42aae4a21..75c0be8c3 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -110,6 +110,7 @@ class RoomCreationHandler: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.hs = hs self.spam_checker = hs.get_spam_checker() @@ -706,7 +707,7 @@ class RoomCreationHandler: """ user_id = requester.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) if ( self._server_notices_mxid is not None diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b4ead79f9..af19c513b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -237,7 +237,7 @@ class SyncHandler: self.event_sources = hs.get_event_sources() self.clock = hs.get_clock() self.state = hs.get_state_handler() - self.auth = hs.get_auth() + self.auth_blocking = hs.get_auth_blocking() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -280,7 +280,7 @@ class SyncHandler: # not been exceeded (if not part of the group by this point, almost certain # auth_blocking will occur) user_id = sync_config.user.to_string() - await self.auth.check_auth_blocking(requester=requester) + await self.auth_blocking.check_auth_blocking(requester=requester) res = await self.response_cache.wrap( sync_config.request_key, diff --git a/synapse/server.py b/synapse/server.py index a66ec228d..a6a415aea 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -29,6 +29,7 @@ from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.filtering import Filtering from synapse.api.ratelimiting import Ratelimiter, RequestRatelimiter from synapse.appservice.api import ApplicationServiceApi @@ -379,6 +380,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_auth(self) -> Auth: return Auth(self) + @cache_in_self + def get_auth_blocking(self) -> AuthBlocking: + return AuthBlocking(self) + @cache_in_self def get_http_client_context_factory(self) -> IPolicyForHTTPS: if self.config.tls.use_insecure_ssl_client_just_for_testing_do_not_use: diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index 686302077..3134cd2d3 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -37,7 +37,7 @@ class ResourceLimitsServerNotices: self._server_notices_manager = hs.get_server_notices_manager() self._store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() - self._auth = hs.get_auth() + self._auth_blocking = hs.get_auth_blocking() self._config = hs.config self._resouce_limited = False self._account_data_handler = hs.get_account_data_handler() @@ -91,7 +91,7 @@ class ResourceLimitsServerNotices: # Normally should always pass in user_id to check_auth_blocking # if you have it, but in this case are checking what would happen # to other users if they were to arrive. - await self._auth.check_auth_blocking() + await self._auth_blocking.check_auth_blocking() except ResourceLimitError as e: limit_msg = e.msg limit_type = e.limit_type diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index bc75ddd3e..54af9089e 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -19,6 +19,7 @@ import pymacaroons from twisted.test.proto_helpers import MemoryReactor from synapse.api.auth import Auth +from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import UserTypes from synapse.api.errors import ( AuthError, @@ -49,7 +50,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.auth._auth_blocking + self.auth_blocking = AuthBlocking(hs) self.test_user = "@foo:bar" self.test_token = b"_test_token_" @@ -362,20 +363,22 @@ class AuthTestCase(unittest.HomeserverTestCase): small_number_of_users = 1 # Ensure no error thrown - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) self.auth_blocking._limit_usage_by_mau = True self.store.get_monthly_active_count = simple_async_mock(lots_of_users) - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) # Ensure does not throw an error self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) - self.get_success(self.auth.check_auth_blocking()) + self.get_success(self.auth_blocking.check_auth_blocking()) def test_blocking_mau__depending_on_user_type(self): self.auth_blocking._max_mau_value = 50 @@ -383,15 +386,18 @@ class AuthTestCase(unittest.HomeserverTestCase): self.store.get_monthly_active_count = simple_async_mock(100) # Support users allowed - self.get_success(self.auth.check_auth_blocking(user_type=UserTypes.SUPPORT)) + self.get_success( + self.auth_blocking.check_auth_blocking(user_type=UserTypes.SUPPORT) + ) self.store.get_monthly_active_count = simple_async_mock(100) # Bots not allowed self.get_failure( - self.auth.check_auth_blocking(user_type=UserTypes.BOT), ResourceLimitError + self.auth_blocking.check_auth_blocking(user_type=UserTypes.BOT), + ResourceLimitError, ) self.store.get_monthly_active_count = simple_async_mock(100) # Real users not allowed - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -419,7 +425,7 @@ class AuthTestCase(unittest.HomeserverTestCase): app_service=appservice, authenticated_entity="@appservice:server", ) - self.get_success(self.auth.check_auth_blocking(requester=requester)) + self.get_success(self.auth_blocking.check_auth_blocking(requester=requester)) def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): self.auth_blocking._max_mau_value = 50 @@ -448,7 +454,8 @@ class AuthTestCase(unittest.HomeserverTestCase): authenticated_entity="@appservice:server", ) self.get_failure( - self.auth.check_auth_blocking(requester=requester), ResourceLimitError + self.auth_blocking.check_auth_blocking(requester=requester), + ResourceLimitError, ) def test_reserved_threepid(self): @@ -459,18 +466,21 @@ class AuthTestCase(unittest.HomeserverTestCase): unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.auth_blocking._mau_limits_reserved_threepids = [threepid] - self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) self.get_failure( - self.auth.check_auth_blocking(threepid=unknown_threepid), ResourceLimitError + self.auth_blocking.check_auth_blocking(threepid=unknown_threepid), + ResourceLimitError, ) - self.get_success(self.auth.check_auth_blocking(threepid=threepid)) + self.get_success(self.auth_blocking.check_auth_blocking(threepid=threepid)) def test_hs_disabled(self): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -485,7 +495,9 @@ class AuthTestCase(unittest.HomeserverTestCase): self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" - e = self.get_failure(self.auth.check_auth_blocking(), ResourceLimitError) + e = self.get_failure( + self.auth_blocking.check_auth_blocking(), ResourceLimitError + ) self.assertEqual(e.value.admin_contact, self.hs.config.server.admin_contact) self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) @@ -495,4 +507,4 @@ class AuthTestCase(unittest.HomeserverTestCase): user = "@user:server" self.auth_blocking._server_notices_mxid = user self.auth_blocking._hs_disabled_message = "Reason for being disabled" - self.get_success(self.auth.check_auth_blocking(user)) + self.get_success(self.auth_blocking.check_auth_blocking(user)) diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 67a782976..7106799d4 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -38,7 +38,7 @@ class AuthTestCase(unittest.HomeserverTestCase): # MAU tests # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = hs.get_auth()._auth_blocking + self.auth_blocking = hs.get_auth_blocking() self.auth_blocking._max_mau_value = 50 self.small_number_of_users = 1 diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index b6ba19c73..23f35d5bf 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -699,7 +699,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): """ if localpart is None: raise SynapseError(400, "Request must include user id") - await self.hs.get_auth().check_auth_blocking() + await self.hs.get_auth_blocking().check_auth_blocking() need_register = True try: diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index db3302a4c..ecc7cc646 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -45,7 +45,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase): # AuthBlocking reads from the hs' config on initialization. We need to # modify its config instead of the hs' - self.auth_blocking = self.hs.get_auth()._auth_blocking + self.auth_blocking = self.hs.get_auth_blocking() def test_wait_for_sync_for_user_auth_blocking(self): user_id1 = "@user1:test" diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 07e29788e..e07ae78fc 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -96,7 +96,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) @@ -112,7 +114,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -132,7 +134,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -145,7 +147,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -156,7 +160,9 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth.check_auth_blocking = Mock(return_value=make_awaitable(None)) + self._rlsn._auth_blocking.check_auth_blocking = Mock( + return_value=make_awaitable(None) + ) self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(None) ) @@ -170,7 +176,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -185,7 +191,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -202,7 +208,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER From a542a92c5742464712b2660abbda6c3c73c93a9f Mon Sep 17 00:00:00 2001 From: Sami Olmari Date: Tue, 14 Jun 2022 13:35:22 +0300 Subject: [PATCH 31/85] Mention removed community/group worker endpoints in upgrade.md (#13023) --- changelog.d/13023.doc | 1 + docs/upgrade.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 changelog.d/13023.doc diff --git a/changelog.d/13023.doc b/changelog.d/13023.doc new file mode 100644 index 000000000..5589c7492 --- /dev/null +++ b/changelog.d/13023.doc @@ -0,0 +1 @@ +Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index 3ade86b1a..2803de8b8 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -91,6 +91,22 @@ process, for example: # Upgrading to v1.61.0 +## Removal of depracated community/groups + +This release of Synapse will remove deprecated community/groups from codebase. + +### Worker endpoints + +For those who have deployed workers, following worker endpoints will no longer be existing +and they are expected to be removed from reverse proxy config: + +- `^/_matrix/federation/v1/get_groups_publicised$` +- `^/_matrix/client/(r0|v3|unstable)/joined_groups$` +- `^/_matrix/client/(r0|v3|unstable)/publicised_groups$` +- `^/_matrix/client/(r0|v3|unstable)/publicised_groups/` +- `^/_matrix/federation/v1/groups/` +- `^/_matrix/client/(r0|v3|unstable)/groups/` + ## New signatures for spam checker callbacks As a followup to changes in v1.60.0, the following spam-checker callbacks have changed signature: From 09a3c5ce0b9daeda30f7168897be26912a489da7 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 14 Jun 2022 13:13:35 +0100 Subject: [PATCH 32/85] Fix Complement runs always being Postgres (#13034) * Fix Complement runs always being Postgres * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- .github/workflows/tests.yml | 2 +- changelog.d/13034.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13034.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 0b70ffc64..4ce27ff41 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -347,7 +347,7 @@ jobs: - run: | set -o pipefail - POSTGRES=${{ (matrix.database == 'Postgres') && 1 }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt + POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt shell: bash name: Run Complement Tests diff --git a/changelog.d/13034.misc b/changelog.d/13034.misc new file mode 100644 index 000000000..cc2823e12 --- /dev/null +++ b/changelog.d/13034.misc @@ -0,0 +1 @@ +Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file From fe1daad67237c2154a3d8d8cdf6c603f0d33682e Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 14 Jun 2022 15:12:08 +0200 Subject: [PATCH 33/85] Move the "email unsubscribe" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986) This simplifies the access token verification logic by removing the `rights` parameter which was only ever used for the unsubscribe link in email notifications. The latter has been moved under the `/_synapse` namespace, since it is not a standard API. This also makes the email verification link more secure, by embedding the app_id and pushkey in the macaroon and verifying it. This prevents the user from tampering the query parameters of that unsubscribe link. Macaroon generation is refactored: - Centralised all macaroon generation and verification logic to the `MacaroonGenerator` - Moved to `synapse.utils` - Changed the constructor to require only a `Clock`, hostname, and a secret key (instead of a full `Homeserver`). - Added tests for all methods. --- changelog.d/12986.misc | 1 + synapse/api/auth.py | 191 +++---------- synapse/config/key.py | 6 +- synapse/handlers/auth.py | 109 +------- synapse/handlers/oidc.py | 131 +-------- synapse/push/mailer.py | 7 +- synapse/rest/client/pusher.py | 50 +--- synapse/rest/synapse/client/__init__.py | 3 + synapse/rest/synapse/client/unsubscribe.py | 64 +++++ synapse/server.py | 7 +- synapse/util/macaroons.py | 308 +++++++++++++++++++++ tests/api/test_auth.py | 15 +- tests/handlers/test_oidc.py | 7 +- tests/test_state.py | 11 +- tests/unittest.py | 2 +- tests/util/test_macaroons.py | 146 ++++++++++ 16 files changed, 618 insertions(+), 440 deletions(-) create mode 100644 changelog.d/12986.misc create mode 100644 synapse/rest/synapse/client/unsubscribe.py create mode 100644 tests/util/test_macaroons.py diff --git a/changelog.d/12986.misc b/changelog.d/12986.misc new file mode 100644 index 000000000..937b88802 --- /dev/null +++ b/changelog.d/12986.misc @@ -0,0 +1 @@ +Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index c037ccb98..6e6eaf380 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -33,8 +33,6 @@ from synapse.http.site import SynapseRequest from synapse.logging.opentracing import active_span, force_tracing, start_active_span from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import Requester, UserID, create_requester -from synapse.util.caches.lrucache import LruCache -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry if TYPE_CHECKING: from synapse.server import HomeServer @@ -46,10 +44,6 @@ logger = logging.getLogger(__name__) GUEST_DEVICE_ID = "guest_device" -class _InvalidMacaroonException(Exception): - pass - - class Auth: """ This class contains functions for authenticating users of our client-server API. @@ -61,14 +55,10 @@ class Auth: self.store = hs.get_datastores().main self._account_validity_handler = hs.get_account_validity_handler() self._storage_controllers = hs.get_storage_controllers() - - self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache( - 10000, "token_cache" - ) + self._macaroon_generator = hs.get_macaroon_generator() self._track_appservice_user_ips = hs.config.appservice.track_appservice_user_ips self._track_puppeted_user_ips = hs.config.api.track_puppeted_user_ips - self._macaroon_secret_key = hs.config.key.macaroon_secret_key self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users async def check_user_in_room( @@ -123,7 +113,6 @@ class Auth: self, request: SynapseRequest, allow_guest: bool = False, - rights: str = "access", allow_expired: bool = False, ) -> Requester: """Get a registered user's ID. @@ -132,7 +121,6 @@ class Auth: request: An HTTP request with an access_token query parameter. allow_guest: If False, will raise an AuthError if the user making the request is a guest. - rights: The operation being performed; the access token must allow this allow_expired: If True, allow the request through even if the account is expired, or session token lifetime has ended. Note that /login will deliver access tokens regardless of expiration. @@ -147,7 +135,7 @@ class Auth: parent_span = active_span() with start_active_span("get_user_by_req"): requester = await self._wrapped_get_user_by_req( - request, allow_guest, rights, allow_expired + request, allow_guest, allow_expired ) if parent_span: @@ -173,7 +161,6 @@ class Auth: self, request: SynapseRequest, allow_guest: bool, - rights: str, allow_expired: bool, ) -> Requester: """Helper for get_user_by_req @@ -211,7 +198,7 @@ class Auth: return requester user_info = await self.get_user_by_access_token( - access_token, rights, allow_expired=allow_expired + access_token, allow_expired=allow_expired ) token_id = user_info.token_id is_guest = user_info.is_guest @@ -391,15 +378,12 @@ class Auth: async def get_user_by_access_token( self, token: str, - rights: str = "access", allow_expired: bool = False, ) -> TokenLookupResult: """Validate access token and get user_id from it Args: token: The access token to get the user by - rights: The operation being performed; the access token must - allow this allow_expired: If False, raises an InvalidClientTokenError if the token is expired @@ -410,70 +394,55 @@ class Auth: is invalid """ - if rights == "access": - # First look in the database to see if the access token is present - # as an opaque token. - r = await self.store.get_user_by_access_token(token) - if r: - valid_until_ms = r.valid_until_ms - if ( - not allow_expired - and valid_until_ms is not None - and valid_until_ms < self.clock.time_msec() - ): - # there was a valid access token, but it has expired. - # soft-logout the user. - raise InvalidClientTokenError( - msg="Access token has expired", soft_logout=True - ) + # First look in the database to see if the access token is present + # as an opaque token. + r = await self.store.get_user_by_access_token(token) + if r: + valid_until_ms = r.valid_until_ms + if ( + not allow_expired + and valid_until_ms is not None + and valid_until_ms < self.clock.time_msec() + ): + # there was a valid access token, but it has expired. + # soft-logout the user. + raise InvalidClientTokenError( + msg="Access token has expired", soft_logout=True + ) - return r + return r # If the token isn't found in the database, then it could still be a - # macaroon, so we check that here. + # macaroon for a guest, so we check that here. try: - user_id, guest = self._parse_and_validate_macaroon(token, rights) + user_id = self._macaroon_generator.verify_guest_token(token) - if rights == "access": - if not guest: - # non-guest access tokens must be in the database - logger.warning("Unrecognised access token - not in store.") - raise InvalidClientTokenError() - - # Guest access tokens are not stored in the database (there can - # only be one access token per guest, anyway). - # - # In order to prevent guest access tokens being used as regular - # user access tokens (and hence getting around the invalidation - # process), we look up the user id and check that it is indeed - # a guest user. - # - # It would of course be much easier to store guest access - # tokens in the database as well, but that would break existing - # guest tokens. - stored_user = await self.store.get_user_by_id(user_id) - if not stored_user: - raise InvalidClientTokenError("Unknown user_id %s" % user_id) - if not stored_user["is_guest"]: - raise InvalidClientTokenError( - "Guest access token used for regular user" - ) - - ret = TokenLookupResult( - user_id=user_id, - is_guest=True, - # all guests get the same device id - device_id=GUEST_DEVICE_ID, + # Guest access tokens are not stored in the database (there can + # only be one access token per guest, anyway). + # + # In order to prevent guest access tokens being used as regular + # user access tokens (and hence getting around the invalidation + # process), we look up the user id and check that it is indeed + # a guest user. + # + # It would of course be much easier to store guest access + # tokens in the database as well, but that would break existing + # guest tokens. + stored_user = await self.store.get_user_by_id(user_id) + if not stored_user: + raise InvalidClientTokenError("Unknown user_id %s" % user_id) + if not stored_user["is_guest"]: + raise InvalidClientTokenError( + "Guest access token used for regular user" ) - elif rights == "delete_pusher": - # We don't store these tokens in the database - ret = TokenLookupResult(user_id=user_id, is_guest=False) - else: - raise RuntimeError("Unknown rights setting %s", rights) - return ret + return TokenLookupResult( + user_id=user_id, + is_guest=True, + # all guests get the same device id + device_id=GUEST_DEVICE_ID, + ) except ( - _InvalidMacaroonException, pymacaroons.exceptions.MacaroonException, TypeError, ValueError, @@ -485,78 +454,6 @@ class Auth: ) raise InvalidClientTokenError("Invalid access token passed.") - def _parse_and_validate_macaroon( - self, token: str, rights: str = "access" - ) -> Tuple[str, bool]: - """Takes a macaroon and tries to parse and validate it. This is cached - if and only if rights == access and there isn't an expiry. - - On invalid macaroon raises _InvalidMacaroonException - - Returns: - (user_id, is_guest) - """ - if rights == "access": - cached = self.token_cache.get(token, None) - if cached: - return cached - - try: - macaroon = pymacaroons.Macaroon.deserialize(token) - except Exception: # deserialize can throw more-or-less anything - # The access token doesn't look like a macaroon. - raise _InvalidMacaroonException() - - try: - user_id = get_value_from_macaroon(macaroon, "user_id") - - guest = False - for caveat in macaroon.caveats: - if caveat.caveat_id == "guest = true": - guest = True - - self.validate_macaroon(macaroon, rights, user_id=user_id) - except ( - pymacaroons.exceptions.MacaroonException, - KeyError, - TypeError, - ValueError, - ): - raise InvalidClientTokenError("Invalid macaroon passed.") - - if rights == "access": - self.token_cache[token] = (user_id, guest) - - return user_id, guest - - def validate_macaroon( - self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str - ) -> None: - """ - validate that a Macaroon is understood by and was signed by this server. - - Args: - macaroon: The macaroon to validate - type_string: The kind of token required (e.g. "access", "delete_pusher") - user_id: The user_id required - """ - v = pymacaroons.Verifier() - - # the verifier runs a test for every caveat on the macaroon, to check - # that it is met for the current request. Each caveat must match at - # least one of the predicates specified by satisfy_exact or - # specify_general. - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = " + type_string) - v.satisfy_exact("user_id = %s" % user_id) - v.satisfy_exact("guest = true") - satisfy_expiry(v, self.clock.time_msec) - - # access_tokens include a nonce for uniqueness: any value is acceptable - v.satisfy_general(lambda c: c.startswith("nonce = ")) - - v.verify(macaroon, self._macaroon_secret_key) - def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService: token = self.get_access_token_from_request(request) service = self.store.get_app_service_by_token(token) diff --git a/synapse/config/key.py b/synapse/config/key.py index ada65f6dd..b250912e3 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -159,16 +159,18 @@ class KeyConfig(Config): ) ) - self.macaroon_secret_key = config.get( + macaroon_secret_key: Optional[str] = config.get( "macaroon_secret_key", self.root.registration.registration_shared_secret ) - if not self.macaroon_secret_key: + if not macaroon_secret_key: # Unfortunately, there are people out there that don't have this # set. Lets just be "nice" and derive one from their secret key. logger.warning("Config is missing macaroon_secret_key") seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() + else: + self.macaroon_secret_key = macaroon_secret_key.encode("utf-8") # a secret which is used to calculate HMACs for form values, to stop # falsification of values diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 60d13040a..3d83236b0 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -37,9 +37,7 @@ from typing import ( import attr import bcrypt -import pymacaroons import unpaddedbase64 -from pymacaroons.exceptions import MacaroonVerificationFailedException from twisted.internet.defer import CancelledError from twisted.web.server import Request @@ -69,7 +67,7 @@ from synapse.storage.roommember import ProfileInfo from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.macaroons import LoginTokenAttributes from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email @@ -180,19 +178,6 @@ class SsoLoginExtraAttributes: extra_attributes: JsonDict -@attr.s(slots=True, frozen=True, auto_attribs=True) -class LoginTokenAttributes: - """Data we store in a short-term login token""" - - user_id: str - - auth_provider_id: str - """The SSO Identity Provider that the user authenticated with, to get this token.""" - - auth_provider_session_id: Optional[str] - """The session ID advertised by the SSO Identity Provider.""" - - class AuthHandler: SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 @@ -1831,98 +1816,6 @@ class AuthHandler: return urllib.parse.urlunparse(url_parts) -@attr.s(slots=True, auto_attribs=True) -class MacaroonGenerator: - hs: "HomeServer" - - def generate_guest_access_token(self, user_id: str) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = access") - # Include a nonce, to make sure that each login gets a different - # access token. - macaroon.add_first_party_caveat( - "nonce = %s" % (stringutils.random_string_with_symbols(16),) - ) - macaroon.add_first_party_caveat("guest = true") - return macaroon.serialize() - - def generate_short_term_login_token( - self, - user_id: str, - auth_provider_id: str, - auth_provider_session_id: Optional[str] = None, - duration_in_ms: int = (2 * 60 * 1000), - ) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = login") - now = self.hs.get_clock().time_msec() - expiry = now + duration_in_ms - macaroon.add_first_party_caveat("time < %d" % (expiry,)) - macaroon.add_first_party_caveat("auth_provider_id = %s" % (auth_provider_id,)) - if auth_provider_session_id is not None: - macaroon.add_first_party_caveat( - "auth_provider_session_id = %s" % (auth_provider_session_id,) - ) - return macaroon.serialize() - - def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes: - """Verify a short-term-login macaroon - - Checks that the given token is a valid, unexpired short-term-login token - minted by this server. - - Args: - token: the login token to verify - - Returns: - the user_id that this token is valid for - - Raises: - MacaroonVerificationFailedException if the verification failed - """ - macaroon = pymacaroons.Macaroon.deserialize(token) - user_id = get_value_from_macaroon(macaroon, "user_id") - auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id") - - auth_provider_session_id: Optional[str] = None - try: - auth_provider_session_id = get_value_from_macaroon( - macaroon, "auth_provider_session_id" - ) - except MacaroonVerificationFailedException: - pass - - v = pymacaroons.Verifier() - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = login") - v.satisfy_general(lambda c: c.startswith("user_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_id = ")) - v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = ")) - satisfy_expiry(v, self.hs.get_clock().time_msec) - v.verify(macaroon, self.hs.config.key.macaroon_secret_key) - - return LoginTokenAttributes( - user_id=user_id, - auth_provider_id=auth_provider_id, - auth_provider_session_id=auth_provider_session_id, - ) - - def generate_delete_pusher_token(self, user_id: str) -> str: - macaroon = self._generate_base_macaroon(user_id) - macaroon.add_first_party_caveat("type = delete_pusher") - return macaroon.serialize() - - def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon: - macaroon = pymacaroons.Macaroon( - location=self.hs.config.server.server_name, - identifier="key", - key=self.hs.config.key.macaroon_secret_key, - ) - macaroon.add_first_party_caveat("gen = 1") - macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) - return macaroon - - def load_legacy_password_auth_providers(hs: "HomeServer") -> None: module_api = hs.get_module_api() for module, config in hs.config.authproviders.password_providers: diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 9de61d554..d7a822690 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -18,7 +18,6 @@ from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, U from urllib.parse import urlencode, urlparse import attr -import pymacaroons from authlib.common.security import generate_token from authlib.jose import JsonWebToken, jwt from authlib.oauth2.auth import ClientAuth @@ -44,7 +43,7 @@ from synapse.logging.context import make_deferred_yieldable from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import Clock, json_decoder from synapse.util.caches.cached_call import RetryOnExceptionCachedCall -from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry +from synapse.util.macaroons import MacaroonGenerator, OidcSessionData from synapse.util.templates import _localpart_from_email_filter if TYPE_CHECKING: @@ -105,9 +104,10 @@ class OidcHandler: # we should not have been instantiated if there is no configured provider. assert provider_confs - self._token_generator = OidcSessionTokenGenerator(hs) + self._macaroon_generator = hs.get_macaroon_generator() self._providers: Dict[str, "OidcProvider"] = { - p.idp_id: OidcProvider(hs, self._token_generator, p) for p in provider_confs + p.idp_id: OidcProvider(hs, self._macaroon_generator, p) + for p in provider_confs } async def load_metadata(self) -> None: @@ -216,7 +216,7 @@ class OidcHandler: # Deserialize the session token and verify it. try: - session_data = self._token_generator.verify_oidc_session_token( + session_data = self._macaroon_generator.verify_oidc_session_token( session, state ) except (MacaroonInitException, MacaroonDeserializationException, KeyError) as e: @@ -271,12 +271,12 @@ class OidcProvider: def __init__( self, hs: "HomeServer", - token_generator: "OidcSessionTokenGenerator", + macaroon_generator: MacaroonGenerator, provider: OidcProviderConfig, ): self._store = hs.get_datastores().main - self._token_generator = token_generator + self._macaroon_generaton = macaroon_generator self._config = provider self._callback_url: str = hs.config.oidc.oidc_callback_url @@ -761,7 +761,7 @@ class OidcProvider: if not client_redirect_url: client_redirect_url = b"" - cookie = self._token_generator.generate_oidc_session_token( + cookie = self._macaroon_generaton.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id=self.idp_id, @@ -1112,121 +1112,6 @@ class JwtClientSecret: return self._cached_secret -class OidcSessionTokenGenerator: - """Methods for generating and checking OIDC Session cookies.""" - - def __init__(self, hs: "HomeServer"): - self._clock = hs.get_clock() - self._server_name = hs.hostname - self._macaroon_secret_key = hs.config.key.macaroon_secret_key - - def generate_oidc_session_token( - self, - state: str, - session_data: "OidcSessionData", - duration_in_ms: int = (60 * 60 * 1000), - ) -> str: - """Generates a signed token storing data about an OIDC session. - - When Synapse initiates an authorization flow, it creates a random state - and a random nonce. Those parameters are given to the provider and - should be verified when the client comes back from the provider. - It is also used to store the client_redirect_url, which is used to - complete the SSO login flow. - - Args: - state: The ``state`` parameter passed to the OIDC provider. - session_data: data to include in the session token. - duration_in_ms: An optional duration for the token in milliseconds. - Defaults to an hour. - - Returns: - A signed macaroon token with the session information. - """ - macaroon = pymacaroons.Macaroon( - location=self._server_name, - identifier="key", - key=self._macaroon_secret_key, - ) - macaroon.add_first_party_caveat("gen = 1") - macaroon.add_first_party_caveat("type = session") - macaroon.add_first_party_caveat("state = %s" % (state,)) - macaroon.add_first_party_caveat("idp_id = %s" % (session_data.idp_id,)) - macaroon.add_first_party_caveat("nonce = %s" % (session_data.nonce,)) - macaroon.add_first_party_caveat( - "client_redirect_url = %s" % (session_data.client_redirect_url,) - ) - macaroon.add_first_party_caveat( - "ui_auth_session_id = %s" % (session_data.ui_auth_session_id,) - ) - now = self._clock.time_msec() - expiry = now + duration_in_ms - macaroon.add_first_party_caveat("time < %d" % (expiry,)) - - return macaroon.serialize() - - def verify_oidc_session_token( - self, session: bytes, state: str - ) -> "OidcSessionData": - """Verifies and extract an OIDC session token. - - This verifies that a given session token was issued by this homeserver - and extract the nonce and client_redirect_url caveats. - - Args: - session: The session token to verify - state: The state the OIDC provider gave back - - Returns: - The data extracted from the session cookie - - Raises: - KeyError if an expected caveat is missing from the macaroon. - """ - macaroon = pymacaroons.Macaroon.deserialize(session) - - v = pymacaroons.Verifier() - v.satisfy_exact("gen = 1") - v.satisfy_exact("type = session") - v.satisfy_exact("state = %s" % (state,)) - v.satisfy_general(lambda c: c.startswith("nonce = ")) - v.satisfy_general(lambda c: c.startswith("idp_id = ")) - v.satisfy_general(lambda c: c.startswith("client_redirect_url = ")) - v.satisfy_general(lambda c: c.startswith("ui_auth_session_id = ")) - satisfy_expiry(v, self._clock.time_msec) - - v.verify(macaroon, self._macaroon_secret_key) - - # Extract the session data from the token. - nonce = get_value_from_macaroon(macaroon, "nonce") - idp_id = get_value_from_macaroon(macaroon, "idp_id") - client_redirect_url = get_value_from_macaroon(macaroon, "client_redirect_url") - ui_auth_session_id = get_value_from_macaroon(macaroon, "ui_auth_session_id") - return OidcSessionData( - nonce=nonce, - idp_id=idp_id, - client_redirect_url=client_redirect_url, - ui_auth_session_id=ui_auth_session_id, - ) - - -@attr.s(frozen=True, slots=True, auto_attribs=True) -class OidcSessionData: - """The attributes which are stored in a OIDC session cookie""" - - # the Identity Provider being used - idp_id: str - - # The `nonce` parameter passed to the OIDC provider. - nonce: str - - # The URL the client gave when it initiated the flow. ("" if this is a UI Auth) - client_redirect_url: str - - # The session ID of the ongoing UI Auth ("" if this is a login) - ui_auth_session_id: str - - class UserAttributeDict(TypedDict): localpart: Optional[str] confirm_localpart: bool diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 015c19b2d..c2575ba3d 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -860,13 +860,14 @@ class Mailer: A link to unsubscribe from email notifications. """ params = { - "access_token": self.macaroon_gen.generate_delete_pusher_token(user_id), + "access_token": self.macaroon_gen.generate_delete_pusher_token( + user_id, app_id, email_address + ), "app_id": app_id, "pushkey": email_address, } - # XXX: make r0 once API is stable - return "%s_matrix/client/unstable/pushers/remove?%s" % ( + return "%s_synapse/client/unsubscribe?%s" % ( self.hs.config.server.public_baseurl, urllib.parse.urlencode(params), ) diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py index d6487c31d..9a1f10f4b 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py @@ -1,4 +1,5 @@ # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,17 +16,17 @@ import logging from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import Codes, StoreError, SynapseError -from synapse.http.server import HttpServer, respond_with_html_bytes +from synapse.api.errors import Codes, SynapseError +from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, - parse_string, ) from synapse.http.site import SynapseRequest from synapse.push import PusherConfigException from synapse.rest.client._base import client_patterns +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource from synapse.types import JsonDict if TYPE_CHECKING: @@ -132,48 +133,21 @@ class PushersSetRestServlet(RestServlet): return 200, {} -class PushersRemoveRestServlet(RestServlet): +class LegacyPushersRemoveRestServlet(UnsubscribeResource, RestServlet): """ - To allow pusher to be delete by clicking a link (ie. GET request) + A servlet to handle legacy "email unsubscribe" links, forwarding requests to the ``UnsubscribeResource`` + + This should be kept for some time, so unsubscribe links in past emails stay valid. """ - PATTERNS = client_patterns("/pushers/remove$", v1=True) - SUCCESS_HTML = b"You have been unsubscribed" - - def __init__(self, hs: "HomeServer"): - super().__init__() - self.hs = hs - self.notifier = hs.get_notifier() - self.auth = hs.get_auth() - self.pusher_pool = self.hs.get_pusherpool() + PATTERNS = client_patterns("/pushers/remove$", releases=[], v1=False, unstable=True) async def on_GET(self, request: SynapseRequest) -> None: - requester = await self.auth.get_user_by_req(request, rights="delete_pusher") - user = requester.user - - app_id = parse_string(request, "app_id", required=True) - pushkey = parse_string(request, "pushkey", required=True) - - try: - await self.pusher_pool.remove_pusher( - app_id=app_id, pushkey=pushkey, user_id=user.to_string() - ) - except StoreError as se: - if se.code != 404: - # This is fine: they're already unsubscribed - raise - - self.notifier.on_new_replication_data() - - respond_with_html_bytes( - request, - 200, - PushersRemoveRestServlet.SUCCESS_HTML, - ) - return None + # Forward the request to the UnsubscribeResource + await self._async_render(request) def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: PushersRestServlet(hs).register(http_server) PushersSetRestServlet(hs).register(http_server) - PushersRemoveRestServlet(hs).register(http_server) + LegacyPushersRemoveRestServlet(hs).register(http_server) diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index 6ad558f5d..e55924f59 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -20,6 +20,7 @@ from synapse.rest.synapse.client.new_user_consent import NewUserConsentResource from synapse.rest.synapse.client.pick_idp import PickIdpResource from synapse.rest.synapse.client.pick_username import pick_username_resource from synapse.rest.synapse.client.sso_register import SsoRegisterResource +from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource if TYPE_CHECKING: from synapse.server import HomeServer @@ -41,6 +42,8 @@ def build_synapse_client_resource_tree(hs: "HomeServer") -> Mapping[str, Resourc "/_synapse/client/pick_username": pick_username_resource(hs), "/_synapse/client/new_user_consent": NewUserConsentResource(hs), "/_synapse/client/sso_register": SsoRegisterResource(hs), + # Unsubscribe to notification emails link + "/_synapse/client/unsubscribe": UnsubscribeResource(hs), } # provider-specific SSO bits. Only load these if they are enabled, since they diff --git a/synapse/rest/synapse/client/unsubscribe.py b/synapse/rest/synapse/client/unsubscribe.py new file mode 100644 index 000000000..60321018f --- /dev/null +++ b/synapse/rest/synapse/client/unsubscribe.py @@ -0,0 +1,64 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from synapse.api.errors import StoreError +from synapse.http.server import DirectServeHtmlResource, respond_with_html_bytes +from synapse.http.servlet import parse_string +from synapse.http.site import SynapseRequest + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class UnsubscribeResource(DirectServeHtmlResource): + """ + To allow pusher to be delete by clicking a link (ie. GET request) + """ + + SUCCESS_HTML = b"You have been unsubscribed" + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.notifier = hs.get_notifier() + self.auth = hs.get_auth() + self.pusher_pool = hs.get_pusherpool() + self.macaroon_generator = hs.get_macaroon_generator() + + async def _async_render_GET(self, request: SynapseRequest) -> None: + token = parse_string(request, "access_token", required=True) + app_id = parse_string(request, "app_id", required=True) + pushkey = parse_string(request, "pushkey", required=True) + + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, app_id, pushkey + ) + + try: + await self.pusher_pool.remove_pusher( + app_id=app_id, pushkey=pushkey, user_id=user_id + ) + except StoreError as se: + if se.code != 404: + # This is fine: they're already unsubscribed + raise + + self.notifier.on_new_replication_data() + + respond_with_html_bytes( + request, + 200, + UnsubscribeResource.SUCCESS_HTML, + ) diff --git a/synapse/server.py b/synapse/server.py index a6a415aea..181984a1a 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -56,7 +56,7 @@ from synapse.handlers.account_data import AccountDataHandler from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler -from synapse.handlers.auth import AuthHandler, MacaroonGenerator, PasswordAuthProvider +from synapse.handlers.auth import AuthHandler, PasswordAuthProvider from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler @@ -130,6 +130,7 @@ from synapse.streams.events import EventSources from synapse.types import DomainSpecificString, ISynapseReactor from synapse.util import Clock from synapse.util.distributor import Distributor +from synapse.util.macaroons import MacaroonGenerator from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import random_string @@ -492,7 +493,9 @@ class HomeServer(metaclass=abc.ABCMeta): @cache_in_self def get_macaroon_generator(self) -> MacaroonGenerator: - return MacaroonGenerator(self) + return MacaroonGenerator( + self.get_clock(), self.hostname, self.config.key.macaroon_secret_key + ) @cache_in_self def get_device_handler(self): diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index 84e4f6ff5..df77edcce 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -17,8 +17,14 @@ from typing import Callable, Optional +import attr import pymacaroons from pymacaroons.exceptions import MacaroonVerificationFailedException +from typing_extensions import Literal + +from synapse.util import Clock, stringutils + +MacaroonType = Literal["access", "delete_pusher", "session", "login"] def get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: @@ -86,3 +92,305 @@ def satisfy_expiry(v: pymacaroons.Verifier, get_time_ms: Callable[[], int]) -> N return time_msec < expiry v.satisfy_general(verify_expiry_caveat) + + +@attr.s(frozen=True, slots=True, auto_attribs=True) +class OidcSessionData: + """The attributes which are stored in a OIDC session cookie""" + + idp_id: str + """The Identity Provider being used""" + + nonce: str + """The `nonce` parameter passed to the OIDC provider.""" + + client_redirect_url: str + """The URL the client gave when it initiated the flow. ("" if this is a UI Auth)""" + + ui_auth_session_id: str + """The session ID of the ongoing UI Auth ("" if this is a login)""" + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class LoginTokenAttributes: + """Data we store in a short-term login token""" + + user_id: str + + auth_provider_id: str + """The SSO Identity Provider that the user authenticated with, to get this token.""" + + auth_provider_session_id: Optional[str] + """The session ID advertised by the SSO Identity Provider.""" + + +class MacaroonGenerator: + def __init__(self, clock: Clock, location: str, secret_key: bytes): + self._clock = clock + self._location = location + self._secret_key = secret_key + + def generate_guest_access_token(self, user_id: str) -> str: + """Generate a guest access token for the given user ID + + Args: + user_id: The user ID for which the guest token should be generated. + + Returns: + A signed access token for that guest user. + """ + nonce = stringutils.random_string_with_symbols(16) + macaroon = self._generate_base_macaroon("access") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"nonce = {nonce}") + macaroon.add_first_party_caveat("guest = true") + return macaroon.serialize() + + def generate_delete_pusher_token( + self, user_id: str, app_id: str, pushkey: str + ) -> str: + """Generate a signed token used for unsubscribing from email notifications + + Args: + user_id: The user for which this token will be valid. + app_id: The app_id for this pusher. + pushkey: The unique identifier of this pusher. + + Returns: + A signed token which can be used in unsubscribe links. + """ + macaroon = self._generate_base_macaroon("delete_pusher") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"app_id = {app_id}") + macaroon.add_first_party_caveat(f"pushkey = {pushkey}") + return macaroon.serialize() + + def generate_short_term_login_token( + self, + user_id: str, + auth_provider_id: str, + auth_provider_session_id: Optional[str] = None, + duration_in_ms: int = (2 * 60 * 1000), + ) -> str: + """Generate a short-term login token used during SSO logins + + Args: + user_id: The user for which the token is valid. + auth_provider_id: The SSO IdP the user used. + auth_provider_session_id: The session ID got during login from the SSO IdP. + + Returns: + A signed token valid for using as a ``m.login.token`` token. + """ + now = self._clock.time_msec() + expiry = now + duration_in_ms + macaroon = self._generate_base_macaroon("login") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat(f"time < {expiry}") + macaroon.add_first_party_caveat(f"auth_provider_id = {auth_provider_id}") + if auth_provider_session_id is not None: + macaroon.add_first_party_caveat( + f"auth_provider_session_id = {auth_provider_session_id}" + ) + return macaroon.serialize() + + def generate_oidc_session_token( + self, + state: str, + session_data: OidcSessionData, + duration_in_ms: int = (60 * 60 * 1000), + ) -> str: + """Generates a signed token storing data about an OIDC session. + + When Synapse initiates an authorization flow, it creates a random state + and a random nonce. Those parameters are given to the provider and + should be verified when the client comes back from the provider. + It is also used to store the client_redirect_url, which is used to + complete the SSO login flow. + + Args: + state: The ``state`` parameter passed to the OIDC provider. + session_data: data to include in the session token. + duration_in_ms: An optional duration for the token in milliseconds. + Defaults to an hour. + + Returns: + A signed macaroon token with the session information. + """ + now = self._clock.time_msec() + expiry = now + duration_in_ms + macaroon = self._generate_base_macaroon("session") + macaroon.add_first_party_caveat(f"state = {state}") + macaroon.add_first_party_caveat(f"idp_id = {session_data.idp_id}") + macaroon.add_first_party_caveat(f"nonce = {session_data.nonce}") + macaroon.add_first_party_caveat( + f"client_redirect_url = {session_data.client_redirect_url}" + ) + macaroon.add_first_party_caveat( + f"ui_auth_session_id = {session_data.ui_auth_session_id}" + ) + macaroon.add_first_party_caveat(f"time < {expiry}") + + return macaroon.serialize() + + def verify_short_term_login_token(self, token: str) -> LoginTokenAttributes: + """Verify a short-term-login macaroon + + Checks that the given token is a valid, unexpired short-term-login token + minted by this server. + + Args: + token: The login token to verify. + + Returns: + A set of attributes carried by this token, including the + ``user_id`` and informations about the SSO IDP used during that + login. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + + v = self._base_verifier("login") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.satisfy_general(lambda c: c.startswith("auth_provider_id = ")) + v.satisfy_general(lambda c: c.startswith("auth_provider_session_id = ")) + satisfy_expiry(v, self._clock.time_msec) + v.verify(macaroon, self._secret_key) + + user_id = get_value_from_macaroon(macaroon, "user_id") + auth_provider_id = get_value_from_macaroon(macaroon, "auth_provider_id") + + auth_provider_session_id: Optional[str] = None + try: + auth_provider_session_id = get_value_from_macaroon( + macaroon, "auth_provider_session_id" + ) + except MacaroonVerificationFailedException: + pass + + return LoginTokenAttributes( + user_id=user_id, + auth_provider_id=auth_provider_id, + auth_provider_session_id=auth_provider_session_id, + ) + + def verify_guest_token(self, token: str) -> str: + """Verify a guest access token macaroon + + Checks that the given token is a valid, unexpired guest access token + minted by this server. + + Args: + token: The access token to verify. + + Returns: + The ``user_id`` that this token is valid for. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + user_id = get_value_from_macaroon(macaroon, "user_id") + + # At some point, Synapse would generate macaroons without the "guest" + # caveat for regular users. Because of how macaroon verification works, + # to avoid validating those as guest tokens, we explicitely verify if + # the macaroon includes the "guest = true" caveat. + is_guest = any( + (caveat.caveat_id == "guest = true" for caveat in macaroon.caveats) + ) + + if not is_guest: + raise MacaroonVerificationFailedException("Macaroon is not a guest token") + + v = self._base_verifier("access") + v.satisfy_exact("guest = true") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.satisfy_general(lambda c: c.startswith("nonce = ")) + satisfy_expiry(v, self._clock.time_msec) + v.verify(macaroon, self._secret_key) + + return user_id + + def verify_delete_pusher_token(self, token: str, app_id: str, pushkey: str) -> str: + """Verify a token from an email unsubscribe link + + Args: + token: The token to verify. + app_id: The app_id of the pusher to delete. + pushkey: The unique identifier of the pusher to delete. + + Return: + The ``user_id`` for which this token is valid. + + Raises: + MacaroonVerificationFailedException if the verification failed + """ + macaroon = pymacaroons.Macaroon.deserialize(token) + user_id = get_value_from_macaroon(macaroon, "user_id") + + v = self._base_verifier("delete_pusher") + v.satisfy_exact(f"app_id = {app_id}") + v.satisfy_exact(f"pushkey = {pushkey}") + v.satisfy_general(lambda c: c.startswith("user_id = ")) + v.verify(macaroon, self._secret_key) + + return user_id + + def verify_oidc_session_token(self, session: bytes, state: str) -> OidcSessionData: + """Verifies and extract an OIDC session token. + + This verifies that a given session token was issued by this homeserver + and extract the nonce and client_redirect_url caveats. + + Args: + session: The session token to verify + state: The state the OIDC provider gave back + + Returns: + The data extracted from the session cookie + + Raises: + KeyError if an expected caveat is missing from the macaroon. + """ + macaroon = pymacaroons.Macaroon.deserialize(session) + + v = self._base_verifier("session") + v.satisfy_exact(f"state = {state}") + v.satisfy_general(lambda c: c.startswith("nonce = ")) + v.satisfy_general(lambda c: c.startswith("idp_id = ")) + v.satisfy_general(lambda c: c.startswith("client_redirect_url = ")) + v.satisfy_general(lambda c: c.startswith("ui_auth_session_id = ")) + satisfy_expiry(v, self._clock.time_msec) + + v.verify(macaroon, self._secret_key) + + # Extract the session data from the token. + nonce = get_value_from_macaroon(macaroon, "nonce") + idp_id = get_value_from_macaroon(macaroon, "idp_id") + client_redirect_url = get_value_from_macaroon(macaroon, "client_redirect_url") + ui_auth_session_id = get_value_from_macaroon(macaroon, "ui_auth_session_id") + return OidcSessionData( + nonce=nonce, + idp_id=idp_id, + client_redirect_url=client_redirect_url, + ui_auth_session_id=ui_auth_session_id, + ) + + def _generate_base_macaroon(self, type: MacaroonType) -> pymacaroons.Macaroon: + macaroon = pymacaroons.Macaroon( + location=self._location, + identifier="key", + key=self._secret_key, + ) + macaroon.add_first_party_caveat("gen = 1") + macaroon.add_first_party_caveat(f"type = {type}") + return macaroon + + def _base_verifier(self, type: MacaroonType) -> pymacaroons.Verifier: + v = pymacaroons.Verifier() + v.satisfy_exact("gen = 1") + v.satisfy_exact(f"type = {type}") + return v diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 54af9089e..dfcfaf79b 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -313,9 +313,7 @@ class AuthTestCase(unittest.HomeserverTestCase): self.assertEqual(self.store.insert_client_ip.call_count, 2) def test_get_user_from_macaroon(self): - self.store.get_user_by_access_token = simple_async_mock( - TokenLookupResult(user_id="@baldrick:matrix.org", device_id="device") - ) + self.store.get_user_by_access_token = simple_async_mock(None) user_id = "@baldrick:matrix.org" macaroon = pymacaroons.Macaroon( @@ -323,17 +321,14 @@ class AuthTestCase(unittest.HomeserverTestCase): identifier="key", key=self.hs.config.key.macaroon_secret_key, ) + # "Legacy" macaroons should not work for regular users not in the database macaroon.add_first_party_caveat("gen = 1") macaroon.add_first_party_caveat("type = access") macaroon.add_first_party_caveat("user_id = %s" % (user_id,)) - user_info = self.get_success( - self.auth.get_user_by_access_token(macaroon.serialize()) + serialized = macaroon.serialize() + self.get_failure( + self.auth.get_user_by_access_token(serialized), InvalidClientTokenError ) - self.assertEqual(user_id, user_info.user_id) - - # TODO: device_id should come from the macaroon, but currently comes - # from the db. - self.assertEqual(user_info.device_id, "device") def test_get_guest_user_from_macaroon(self): self.store.get_user_by_id = simple_async_mock({"is_guest": True}) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 1231aed94..e6cd3af7b 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -25,7 +25,7 @@ from synapse.handlers.sso import MappingException from synapse.server import HomeServer from synapse.types import JsonDict, UserID from synapse.util import Clock -from synapse.util.macaroons import get_value_from_macaroon +from synapse.util.macaroons import OidcSessionData, get_value_from_macaroon from tests.test_utils import FakeResponse, get_awaitable_result, simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -1227,7 +1227,7 @@ class OidcHandlerTestCase(HomeserverTestCase): ) -> str: from synapse.handlers.oidc import OidcSessionData - return self.handler._token_generator.generate_oidc_session_token( + return self.handler._macaroon_generator.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id="oidc", @@ -1251,7 +1251,6 @@ async def _make_callback_with_userinfo( userinfo: the OIDC userinfo dict client_redirect_url: the URL to redirect to on success. """ - from synapse.handlers.oidc import OidcSessionData handler = hs.get_oidc_handler() provider = handler._providers["oidc"] @@ -1260,7 +1259,7 @@ async def _make_callback_with_userinfo( provider._fetch_userinfo = simple_async_mock(return_value=userinfo) # type: ignore[assignment] state = "state" - session = handler._token_generator.generate_oidc_session_token( + session = handler._macaroon_generator.generate_oidc_session_token( state=state, session_data=OidcSessionData( idp_id="oidc", diff --git a/tests/test_state.py b/tests/test_state.py index 95f81beba..b005dd8d0 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Dict, List, Optional +from typing import Collection, Dict, List, Optional, cast from unittest.mock import Mock from twisted.internet import defer @@ -22,6 +22,8 @@ from synapse.api.room_versions import RoomVersions from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler +from synapse.util import Clock +from synapse.util.macaroons import MacaroonGenerator from tests import unittest @@ -190,13 +192,18 @@ class StateTestCase(unittest.TestCase): "get_clock", "get_state_resolution_handler", "get_account_validity_handler", + "get_macaroon_generator", "hostname", ] ) + clock = cast(Clock, MockClock()) hs.config = default_config("tesths", True) hs.get_datastores.return_value = Mock(main=self.dummy_store) hs.get_state_handler.return_value = None - hs.get_clock.return_value = MockClock() + hs.get_clock.return_value = clock + hs.get_macaroon_generator.return_value = MacaroonGenerator( + clock, "tesths", b"verysecret" + ) hs.get_auth.return_value = Auth(hs) hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs) hs.get_storage_controllers.return_value = storage_controllers diff --git a/tests/unittest.py b/tests/unittest.py index e7f255b4f..c645dd356 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -315,7 +315,7 @@ class HomeserverTestCase(TestCase): "is_guest": False, } - async def get_user_by_req(request, allow_guest=False, rights="access"): + async def get_user_by_req(request, allow_guest=False): assert self.helper.auth_user_id is not None return create_requester( UserID.from_string(self.helper.auth_user_id), diff --git a/tests/util/test_macaroons.py b/tests/util/test_macaroons.py new file mode 100644 index 000000000..32125f7bb --- /dev/null +++ b/tests/util/test_macaroons.py @@ -0,0 +1,146 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pymacaroons.exceptions import MacaroonVerificationFailedException + +from synapse.util.macaroons import MacaroonGenerator, OidcSessionData + +from tests.server import get_clock +from tests.unittest import TestCase + + +class MacaroonGeneratorTestCase(TestCase): + def setUp(self): + self.reactor, hs_clock = get_clock() + self.macaroon_generator = MacaroonGenerator(hs_clock, "tesths", b"verysecret") + self.other_macaroon_generator = MacaroonGenerator( + hs_clock, "tesths", b"anothersecretkey" + ) + + def test_guest_access_token(self): + """Test the generation and verification of guest access tokens""" + token = self.macaroon_generator.generate_guest_access_token("@user:tesths") + user_id = self.macaroon_generator.verify_guest_token(token) + self.assertEqual(user_id, "@user:tesths") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_guest_token(token) + + # Check that an old access token without the guest caveat does not work + macaroon = self.macaroon_generator._generate_base_macaroon("access") + macaroon.add_first_party_caveat(f"user_id = {user_id}") + macaroon.add_first_party_caveat("nonce = 0123456789abcdef") + token = macaroon.serialize() + + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_guest_token(token) + + def test_delete_pusher_token(self): + """Test the generation and verification of delete_pusher tokens""" + token = self.macaroon_generator.generate_delete_pusher_token( + "@user:tesths", "m.mail", "john@example.com" + ) + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + self.assertEqual(user_id, "@user:tesths") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + + # Raises when verifying for another pushkey + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "other@example.com" + ) + + # Raises when verifying for another app_id + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_delete_pusher_token( + token, "somethingelse", "john@example.com" + ) + + # Check that an old token without the app_id and pushkey still works + macaroon = self.macaroon_generator._generate_base_macaroon("delete_pusher") + macaroon.add_first_party_caveat("user_id = @user:tesths") + token = macaroon.serialize() + user_id = self.macaroon_generator.verify_delete_pusher_token( + token, "m.mail", "john@example.com" + ) + self.assertEqual(user_id, "@user:tesths") + + def test_short_term_login_token(self): + """Test the generation and verification of short-term login tokens""" + token = self.macaroon_generator.generate_short_term_login_token( + user_id="@user:tesths", + auth_provider_id="oidc", + auth_provider_session_id="sid", + duration_in_ms=2 * 60 * 1000, + ) + + info = self.macaroon_generator.verify_short_term_login_token(token) + self.assertEqual(info.user_id, "@user:tesths") + self.assertEqual(info.auth_provider_id, "oidc") + self.assertEqual(info.auth_provider_session_id, "sid") + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_short_term_login_token(token) + + # Wait a minute + self.reactor.pump([60]) + # Shouldn't raise + self.macaroon_generator.verify_short_term_login_token(token) + # Wait another minute + self.reactor.pump([60]) + # Should raise since it expired + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_short_term_login_token(token) + + def test_oidc_session_token(self): + """Test the generation and verification of OIDC session cookies""" + state = "arandomstate" + session_data = OidcSessionData( + idp_id="oidc", + nonce="nonce", + client_redirect_url="https://example.com/", + ui_auth_session_id="", + ) + token = self.macaroon_generator.generate_oidc_session_token( + state, session_data, duration_in_ms=2 * 60 * 1000 + ).encode("utf-8") + info = self.macaroon_generator.verify_oidc_session_token(token, state) + self.assertEqual(session_data, info) + + # Raises with another secret key + with self.assertRaises(MacaroonVerificationFailedException): + self.other_macaroon_generator.verify_oidc_session_token(token, state) + + # Should raise with another state + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_oidc_session_token(token, "anotherstate") + + # Wait a minute + self.reactor.pump([60]) + # Shouldn't raise + self.macaroon_generator.verify_oidc_session_token(token, state) + # Wait another minute + self.reactor.pump([60]) + # Should raise since it expired + with self.assertRaises(MacaroonVerificationFailedException): + self.macaroon_generator.verify_oidc_session_token(token, state) From 5f4ecf759d5d4a63cb0f6f7dfe4ab65b8569e1ee Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 14 Jun 2022 10:34:04 -0400 Subject: [PATCH 34/85] Rename delta to apply in the proper schema version. (#13050) --- changelog.d/13050.misc | 1 + .../01remove_noop_background_updates.sql} | 0 2 files changed, 1 insertion(+) create mode 100644 changelog.d/13050.misc rename synapse/storage/schema/main/delta/{70/02remove_noop_background_updates.sql => 71/01remove_noop_background_updates.sql} (100%) diff --git a/changelog.d/13050.misc b/changelog.d/13050.misc new file mode 100644 index 000000000..20bf13673 --- /dev/null +++ b/changelog.d/13050.misc @@ -0,0 +1 @@ +Replace noop background updates with `DELETE` delta. diff --git a/synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql b/synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql similarity index 100% rename from synapse/storage/schema/main/delta/70/02remove_noop_background_updates.sql rename to synapse/storage/schema/main/delta/71/01remove_noop_background_updates.sql From 493c2fc44abcf3457953cc2f6f23509ff7855253 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 14 Jun 2022 07:53:42 -0700 Subject: [PATCH 35/85] Remove code generating comments in configuration file (#12941) --- changelog.d/12941.misc | 1 + docs/.sample_config_header.yaml | 22 +- docs/sample_config.yaml | 2828 +------------------------- synapse/config/_base.py | 10 +- synapse/config/api.py | 48 - synapse/config/appservice.py | 14 - synapse/config/auth.py | 75 - synapse/config/background_updates.py | 34 - synapse/config/cache.py | 91 - synapse/config/captcha.py | 27 - synapse/config/cas.py | 31 - synapse/config/consent.py | 55 - synapse/config/database.py | 50 - synapse/config/emailconfig.py | 154 -- synapse/config/federation.py | 39 - synapse/config/groups.py | 27 + synapse/config/jwt.py | 64 - synapse/config/key.py | 93 +- synapse/config/logger.py | 5 - synapse/config/metrics.py | 44 +- synapse/config/modules.py | 17 - synapse/config/oembed.py | 23 - synapse/config/oidc.py | 197 -- synapse/config/push.py | 33 - synapse/config/ratelimiting.py | 122 -- synapse/config/redis.py | 21 - synapse/config/registration.py | 279 +-- synapse/config/repository.py | 166 +- synapse/config/retention.py | 72 - synapse/config/room.py | 56 - synapse/config/room_directory.py | 66 - synapse/config/saml2.py | 183 -- synapse/config/server.py | 494 +---- synapse/config/server_notices.py | 24 - synapse/config/sso.py | 40 - synapse/config/stats.py | 13 - synapse/config/tls.py | 90 +- synapse/config/tracer.py | 50 - synapse/config/user_directory.py | 39 - synapse/config/voip.py | 31 - synapse/config/workers.py | 49 - 41 files changed, 66 insertions(+), 5711 deletions(-) create mode 100644 changelog.d/12941.misc create mode 100644 synapse/config/groups.py diff --git a/changelog.d/12941.misc b/changelog.d/12941.misc new file mode 100644 index 000000000..6a74f255d --- /dev/null +++ b/changelog.d/12941.misc @@ -0,0 +1 @@ +Remove code generating comments in configuration. diff --git a/docs/.sample_config_header.yaml b/docs/.sample_config_header.yaml index 09e86ca0c..2355337e6 100644 --- a/docs/.sample_config_header.yaml +++ b/docs/.sample_config_header.yaml @@ -1,26 +1,12 @@ # This file is maintained as an up-to-date snapshot of the default -# homeserver.yaml configuration generated by Synapse. -# -# It is intended to act as a reference for the default configuration, -# helping admins keep track of new options and other changes, and compare -# their configs with the current default. As such, many of the actual -# config values shown are placeholders. +# homeserver.yaml configuration generated by Synapse. You can find a +# complete accounting of possible configuration options at +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html # # It is *not* intended to be copied and used as the basis for a real # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in # https://matrix-org.github.io/synapse/latest/setup/installation.html. - -# Configuration options that take a time period can be set using a number -# followed by a letter. Letters have the following meanings: -# s = second -# m = minute -# h = hour -# d = day -# w = week -# y = year -# For example, setting redaction_retention_period: 5m would remove redacted -# messages from the database after 5 minutes, rather than 5 months. - +# ################################################################################ diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 56a25c534..6578ec022 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1,27 +1,13 @@ # This file is maintained as an up-to-date snapshot of the default -# homeserver.yaml configuration generated by Synapse. -# -# It is intended to act as a reference for the default configuration, -# helping admins keep track of new options and other changes, and compare -# their configs with the current default. As such, many of the actual -# config values shown are placeholders. +# homeserver.yaml configuration generated by Synapse. You can find a +# complete accounting of possible configuration options at +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html # # It is *not* intended to be copied and used as the basis for a real # homeserver.yaml. Instead, if you are starting from scratch, please generate # a fresh config using Synapse by following the instructions in # https://matrix-org.github.io/synapse/latest/setup/installation.html. - -# Configuration options that take a time period can be set using a number -# followed by a letter. Letters have the following meanings: -# s = second -# m = minute -# h = hour -# d = day -# w = week -# y = year -# For example, setting redaction_retention_period: 5m would remove redacted -# messages from the database after 5 minutes, rather than 5 months. - +# ################################################################################ # Configuration file for Synapse. @@ -31,2825 +17,27 @@ # should have the same indentation. # # [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html - - -## Modules ## - -# Server admins can expand Synapse's functionality with external modules. -# -# See https://matrix-org.github.io/synapse/latest/modules/index.html for more -# documentation on how to configure or create custom modules for Synapse. -# -modules: - #- module: my_super_module.MySuperClass - # config: - # do_thing: true - #- module: my_other_super_module.SomeClass - # config: {} - - -## Server ## - -# The public-facing domain of the server -# -# The server_name name will appear at the end of usernames and room addresses -# created on this server. For example if the server_name was example.com, -# usernames on this server would be in the format @user:example.com -# -# In most cases you should avoid using a matrix specific subdomain such as -# matrix.example.com or synapse.example.com as the server_name for the same -# reasons you wouldn't use user@email.example.com as your email address. -# See https://matrix-org.github.io/synapse/latest/delegate.html -# for information on how to host Synapse on a subdomain while preserving -# a clean server_name. -# -# The server_name cannot be changed later so it is important to -# configure this correctly before you start Synapse. It should be all -# lowercase and may contain an explicit port. -# Examples: matrix.org, localhost:8080 # +# For more information on how to configure Synapse, including a complete accounting of +# each option, go to docs/usage/configuration/config_documentation.md or +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html server_name: "SERVERNAME" - -# When running as a daemon, the file to store the pid in -# pid_file: DATADIR/homeserver.pid - -# The absolute URL to the web client which / will redirect to. -# -#web_client_location: https://riot.example.com/ - -# The public-facing base URL that clients use to access this Homeserver (not -# including _matrix/...). This is the same URL a user might enter into the -# 'Custom Homeserver URL' field on their client. If you use Synapse with a -# reverse proxy, this should be the URL to reach Synapse via the proxy. -# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see -# 'listeners' below). -# -# Defaults to 'https:///'. -# -#public_baseurl: https://example.com/ - -# Uncomment the following to tell other servers to send federation traffic on -# port 443. -# -# By default, other servers will try to reach our server on port 8448, which can -# be inconvenient in some environments. -# -# Provided 'https:///' on port 443 is routed to Synapse, this -# option configures Synapse to serve a file at -# 'https:///.well-known/matrix/server'. This will tell other -# servers to send traffic to port 443 instead. -# -# See https://matrix-org.github.io/synapse/latest/delegate.html for more -# information. -# -# Defaults to 'false'. -# -#serve_server_wellknown: true - -# Set the soft limit on the number of file descriptors synapse can use -# Zero is used to indicate synapse should set the soft limit to the -# hard limit. -# -#soft_file_limit: 0 - -# Presence tracking allows users to see the state (e.g online/offline) -# of other local and remote users. -# -presence: - # Uncomment to disable presence tracking on this homeserver. This option - # replaces the previous top-level 'use_presence' option. - # - #enabled: false - -# Whether to require authentication to retrieve profile data (avatars, -# display names) of other users through the client API. Defaults to -# 'false'. Note that profile data is also available via the federation -# API, unless allow_profile_lookup_over_federation is set to false. -# -#require_auth_for_profile_requests: true - -# Uncomment to require a user to share a room with another user in order -# to retrieve their profile information. Only checked on Client-Server -# requests. Profile requests from other servers should be checked by the -# requesting server. Defaults to 'false'. -# -#limit_profile_requests_to_users_who_share_rooms: true - -# Uncomment to prevent a user's profile data from being retrieved and -# displayed in a room until they have joined it. By default, a user's -# profile data is included in an invite event, regardless of the values -# of the above two settings, and whether or not the users share a server. -# Defaults to 'true'. -# -#include_profile_data_on_invite: false - -# If set to 'true', removes the need for authentication to access the server's -# public rooms directory through the client API, meaning that anyone can -# query the room directory. Defaults to 'false'. -# -#allow_public_rooms_without_auth: true - -# If set to 'true', allows any other homeserver to fetch the server's public -# rooms directory via federation. Defaults to 'false'. -# -#allow_public_rooms_over_federation: true - -# The default room version for newly created rooms. -# -# Known room versions are listed here: -# https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions -# -# For example, for room version 1, default_room_version should be set -# to "1". -# -#default_room_version: "9" - -# The GC threshold parameters to pass to `gc.set_threshold`, if defined -# -#gc_thresholds: [700, 10, 10] - -# The minimum time in seconds between each GC for a generation, regardless of -# the GC thresholds. This ensures that we don't do GC too frequently. -# -# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive -# generation 0 GCs, etc. -# -# Defaults to `[1s, 10s, 30s]`. -# -#gc_min_interval: [0.5s, 30s, 1m] - -# Set the limit on the returned events in the timeline in the get -# and sync operations. The default value is 100. -1 means no upper limit. -# -# Uncomment the following to increase the limit to 5000. -# -#filter_timeline_limit: 5000 - -# Whether room invites to users on this server should be blocked -# (except those sent by local server admins). The default is False. -# -#block_non_admin_invites: true - -# Room searching -# -# If disabled, new messages will not be indexed for searching and users -# will receive errors when searching for messages. Defaults to enabled. -# -#enable_search: false - -# Prevent outgoing requests from being sent to the following blacklisted IP address -# CIDR ranges. If this option is not specified then it defaults to private IP -# address ranges (see the example below). -# -# The blacklist applies to the outbound requests for federation, identity servers, -# push servers, and for checking key validity for third-party invite events. -# -# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -# listed here, since they correspond to unroutable addresses.) -# -# This option replaces federation_ip_range_blacklist in Synapse v1.25.0. -# -# Note: The value is ignored when an HTTP proxy is in use -# -#ip_range_blacklist: -# - '127.0.0.0/8' -# - '10.0.0.0/8' -# - '172.16.0.0/12' -# - '192.168.0.0/16' -# - '100.64.0.0/10' -# - '192.0.0.0/24' -# - '169.254.0.0/16' -# - '192.88.99.0/24' -# - '198.18.0.0/15' -# - '192.0.2.0/24' -# - '198.51.100.0/24' -# - '203.0.113.0/24' -# - '224.0.0.0/4' -# - '::1/128' -# - 'fe80::/10' -# - 'fc00::/7' -# - '2001:db8::/32' -# - 'ff00::/8' -# - 'fec0::/10' - -# List of IP address CIDR ranges that should be allowed for federation, -# identity servers, push servers, and for checking key validity for -# third-party invite events. This is useful for specifying exceptions to -# wide-ranging blacklisted target IP ranges - e.g. for communication with -# a push server only visible in your network. -# -# This whitelist overrides ip_range_blacklist and defaults to an empty -# list. -# -#ip_range_whitelist: -# - '192.168.1.1' - -# List of ports that Synapse should listen on, their purpose and their -# configuration. -# -# Options for each listener include: -# -# port: the TCP port to bind to -# -# bind_addresses: a list of local addresses to listen on. The default is -# 'all local interfaces'. -# -# type: the type of listener. Normally 'http', but other valid options are: -# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html), -# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html), -# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html). -# -# tls: set to true to enable TLS for this listener. Will use the TLS -# key/cert specified in tls_private_key_path / tls_certificate_path. -# -# x_forwarded: Only valid for an 'http' listener. Set to true to use the -# X-Forwarded-For header as the client IP. Useful when Synapse is -# behind a reverse-proxy. -# -# resources: Only valid for an 'http' listener. A list of resources to host -# on this port. Options for each resource are: -# -# names: a list of names of HTTP resources. See below for a list of -# valid resource names. -# -# compress: set to true to enable HTTP compression for this resource. -# -# additional_resources: Only valid for an 'http' listener. A map of -# additional endpoints which should be loaded via dynamic modules. -# -# Valid resource names are: -# -# client: the client-server API (/_matrix/client), and the synapse admin -# API (/_synapse/admin). Also implies 'media' and 'static'. -# -# consent: user consent forms (/_matrix/consent). -# See https://matrix-org.github.io/synapse/latest/consent_tracking.html. -# -# federation: the server-server API (/_matrix/federation). Also implies -# 'media', 'keys', 'openid' -# -# keys: the key discovery API (/_matrix/key). -# -# media: the media API (/_matrix/media). -# -# metrics: the metrics interface. -# See https://matrix-org.github.io/synapse/latest/metrics-howto.html. -# -# openid: OpenID authentication. -# -# replication: the HTTP replication API (/_synapse/replication). -# See https://matrix-org.github.io/synapse/latest/workers.html. -# -# static: static resources under synapse/static (/_matrix/static). (Mostly -# useful for 'fallback authentication'.) -# listeners: - # TLS-enabled listener: for when matrix traffic is sent directly to synapse. - # - # Disabled by default. To enable it, uncomment the following. (Note that you - # will also need to give Synapse a TLS key and certificate: see the TLS section - # below.) - # - #- port: 8448 - # type: http - # tls: true - # resources: - # - names: [client, federation] - - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy - # that unwraps TLS. - # - # If you plan to use a reverse proxy, please see - # https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # - port: 8008 tls: false type: http x_forwarded: true bind_addresses: ['::1', '127.0.0.1'] - resources: - names: [client, federation] compress: false - - # example additional_resources: - # - #additional_resources: - # "/_matrix/my/custom/endpoint": - # module: my_module.CustomRequestHandler - # config: {} - - # Turn on the twisted ssh manhole service on localhost on the given - # port. - # - #- port: 9000 - # bind_addresses: ['::1', '127.0.0.1'] - # type: manhole - -# Connection settings for the manhole -# -manhole_settings: - # The username for the manhole. This defaults to 'matrix'. - # - #username: manhole - - # The password for the manhole. This defaults to 'rabbithole'. - # - #password: mypassword - - # The private and public SSH key pair used to encrypt the manhole traffic. - # If these are left unset, then hardcoded and non-secret keys are used, - # which could allow traffic to be intercepted if sent over a public network. - # - #ssh_priv_key_path: CONFDIR/id_rsa - #ssh_pub_key_path: CONFDIR/id_rsa.pub - -# Forward extremities can build up in a room due to networking delays between -# homeservers. Once this happens in a large room, calculation of the state of -# that room can become quite expensive. To mitigate this, once the number of -# forward extremities reaches a given threshold, Synapse will send an -# org.matrix.dummy_event event, which will reduce the forward extremities -# in the room. -# -# This setting defines the threshold (i.e. number of forward extremities in the -# room) at which dummy events are sent. The default value is 10. -# -#dummy_events_threshold: 5 - - -## Homeserver blocking ## - -# How to reach the server admin, used in ResourceLimitError -# -#admin_contact: 'mailto:admin@server.com' - -# Global blocking -# -#hs_disabled: false -#hs_disabled_message: 'Human readable reason for why the HS is blocked' - -# Monthly Active User Blocking -# -# Used in cases where the admin or server owner wants to limit to the -# number of monthly active users. -# -# 'limit_usage_by_mau' disables/enables monthly active user blocking. When -# enabled and a limit is reached the server returns a 'ResourceLimitError' -# with error type Codes.RESOURCE_LIMIT_EXCEEDED -# -# 'max_mau_value' is the hard limit of monthly active users above which -# the server will start blocking user actions. -# -# 'mau_trial_days' is a means to add a grace period for active users. It -# means that users must be active for this number of days before they -# can be considered active and guards against the case where lots of users -# sign up in a short space of time never to return after their initial -# session. -# -# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but -# applies a different trial number if the user was registered by an appservice. -# A value of 0 means no trial days are applied. Appservices not listed in this -# dictionary use the value of `mau_trial_days` instead. -# -# 'mau_limit_alerting' is a means of limiting client side alerting -# should the mau limit be reached. This is useful for small instances -# where the admin has 5 mau seats (say) for 5 specific people and no -# interest increasing the mau limit further. Defaults to True, which -# means that alerting is enabled -# -#limit_usage_by_mau: false -#max_mau_value: 50 -#mau_trial_days: 2 -#mau_limit_alerting: false -#mau_appservice_trial_days: -# "appservice-id": 1 - -# If enabled, the metrics for the number of monthly active users will -# be populated, however no one will be limited. If limit_usage_by_mau -# is true, this is implied to be true. -# -#mau_stats_only: false - -# Sometimes the server admin will want to ensure certain accounts are -# never blocked by mau checking. These accounts are specified here. -# -#mau_limit_reserved_threepids: -# - medium: 'email' -# address: 'reserved_user@example.com' - -# Used by phonehome stats to group together related servers. -#server_context: context - -# Resource-constrained homeserver settings -# -# When this is enabled, the room "complexity" will be checked before a user -# joins a new remote room. If it is above the complexity limit, the server will -# disallow joining, or will instantly leave. -# -# Room complexity is an arbitrary measure based on factors such as the number of -# users in the room. -# -limit_remote_rooms: - # Uncomment to enable room complexity checking. - # - #enabled: true - - # the limit above which rooms cannot be joined. The default is 1.0. - # - #complexity: 0.5 - - # override the error which is returned when the room is too complex. - # - #complexity_error: "This room is too complex." - - # allow server admins to join complex rooms. Default is false. - # - #admins_can_join: true - -# Whether to require a user to be in the room to add an alias to it. -# Defaults to 'true'. -# -#require_membership_for_aliases: false - -# Whether to allow per-room membership profiles through the send of membership -# events with profile information that differ from the target's global profile. -# Defaults to 'true'. -# -#allow_per_room_profiles: false - -# The largest allowed file size for a user avatar. Defaults to no restriction. -# -# Note that user avatar changes will not work if this is set without -# using Synapse's media repository. -# -#max_avatar_size: 10M - -# The MIME types allowed for user avatars. Defaults to no restriction. -# -# Note that user avatar changes will not work if this is set without -# using Synapse's media repository. -# -#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] - -# How long to keep redacted events in unredacted form in the database. After -# this period redacted events get replaced with their redacted form in the DB. -# -# Defaults to `7d`. Set to `null` to disable. -# -#redaction_retention_period: 28d - -# How long to track users' last seen time and IPs in the database. -# -# Defaults to `28d`. Set to `null` to disable clearing out of old rows. -# -#user_ips_max_age: 14d - -# Inhibits the /requestToken endpoints from returning an error that might leak -# information about whether an e-mail address is in use or not on this -# homeserver. -# Note that for some endpoints the error situation is the e-mail already being -# used, and for others the error is entering the e-mail being unused. -# If this option is enabled, instead of returning an error, these endpoints will -# act as if no error happened and return a fake session ID ('sid') to clients. -# -#request_token_inhibit_3pid_errors: true - -# A list of domains that the domain portion of 'next_link' parameters -# must match. -# -# This parameter is optionally provided by clients while requesting -# validation of an email or phone number, and maps to a link that -# users will be automatically redirected to after validation -# succeeds. Clients can make use this parameter to aid the validation -# process. -# -# The whitelist is applied whether the homeserver or an -# identity server is handling validation. -# -# The default value is no whitelist functionality; all domains are -# allowed. Setting this value to an empty list will instead disallow -# all domains. -# -#next_link_domain_whitelist: ["matrix.org"] - -# Templates to use when generating email or HTML page contents. -# -templates: - # Directory in which Synapse will try to find template files to use to generate - # email or HTML page contents. - # If not set, or a file is not found within the template directory, a default - # template from within the Synapse package will be used. - # - # See https://matrix-org.github.io/synapse/latest/templates.html for more - # information about using custom templates. - # - #custom_template_directory: /path/to/custom/templates/ - -# List of rooms to exclude from sync responses. This is useful for server -# administrators wishing to group users into a room without these users being able -# to see it from their client. -# -# By default, no room is excluded. -# -#exclude_rooms_from_sync: -# - !foo:example.com - - -# Message retention policy at the server level. -# -# Room admins and mods can define a retention period for their rooms using the -# 'm.room.retention' state event, and server admins can cap this period by setting -# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. -# -# If this feature is enabled, Synapse will regularly look for and purge events -# which are older than the room's maximum retention period. Synapse will also -# filter events received over federation so that events that should have been -# purged are ignored and not stored again. -# -retention: - # The message retention policies feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # Default retention policy. If set, Synapse will apply it to rooms that lack the - # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't - # matter much because Synapse doesn't take it into account yet. - # - #default_policy: - # min_lifetime: 1d - # max_lifetime: 1y - - # Retention policy limits. If set, and the state of a room contains a - # 'm.room.retention' event in its state which contains a 'min_lifetime' or a - # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy - # to these limits when running purge jobs. - # - #allowed_lifetime_min: 1d - #allowed_lifetime_max: 1y - - # Server admins can define the settings of the background jobs purging the - # events which lifetime has expired under the 'purge_jobs' section. - # - # If no configuration is provided, a single job will be set up to delete expired - # events in every room daily. - # - # Each job's configuration defines which range of message lifetimes the job - # takes care of. For example, if 'shortest_max_lifetime' is '2d' and - # 'longest_max_lifetime' is '3d', the job will handle purging expired events in - # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and - # lower than or equal to 3 days. Both the minimum and the maximum value of a - # range are optional, e.g. a job with no 'shortest_max_lifetime' and a - # 'longest_max_lifetime' of '3d' will handle every room with a retention policy - # which 'max_lifetime' is lower than or equal to three days. - # - # The rationale for this per-job configuration is that some rooms might have a - # retention policy with a low 'max_lifetime', where history needs to be purged - # of outdated messages on a more frequent basis than for the rest of the rooms - # (e.g. every 12h), but not want that purge to be performed by a job that's - # iterating over every room it knows, which could be heavy on the server. - # - # If any purge job is configured, it is strongly recommended to have at least - # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' - # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if - # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a - # room's policy to these values is done after the policies are retrieved from - # Synapse's database (which is done using the range specified in a purge job's - # configuration). - # - #purge_jobs: - # - longest_max_lifetime: 3d - # interval: 12h - # - shortest_max_lifetime: 3d - # interval: 1d - - -## TLS ## - -# PEM-encoded X509 certificate for TLS. -# This certificate, as of Synapse 1.0, will need to be a valid and verifiable -# certificate, signed by a recognised Certificate Authority. -# -# Be sure to use a `.pem` file that includes the full certificate chain including -# any intermediate certificates (for instance, if using certbot, use -# `fullchain.pem` as your certificate, not `cert.pem`). -# -#tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" - -# PEM-encoded private key for TLS -# -#tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" - -# Whether to verify TLS server certificates for outbound federation requests. -# -# Defaults to `true`. To disable certificate verification, uncomment the -# following line. -# -#federation_verify_certificates: false - -# The minimum TLS version that will be used for outbound federation requests. -# -# Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note -# that setting this value higher than `1.2` will prevent federation to most -# of the public Matrix network: only configure it to `1.3` if you have an -# entirely private federation setup and you can ensure TLS 1.3 support. -# -#federation_client_minimum_tls_version: 1.2 - -# Skip federation certificate verification on the following whitelist -# of domains. -# -# This setting should only be used in very specific cases, such as -# federation over Tor hidden services and similar. For private networks -# of homeservers, you likely want to use a private CA instead. -# -# Only effective if federation_verify_certicates is `true`. -# -#federation_certificate_verification_whitelist: -# - lon.example.com -# - "*.domain.com" -# - "*.onion" - -# List of custom certificate authorities for federation traffic. -# -# This setting should only normally be used within a private network of -# homeservers. -# -# Note that this list will replace those that are provided by your -# operating environment. Certificates must be in PEM format. -# -#federation_custom_ca_list: -# - myCA1.pem -# - myCA2.pem -# - myCA3.pem - - -## Federation ## - -# Restrict federation to the following whitelist of domains. -# N.B. we recommend also firewalling your federation listener to limit -# inbound federation traffic as early as possible, rather than relying -# purely on this application-layer restriction. If not specified, the -# default is to whitelist everything. -# -#federation_domain_whitelist: -# - lon.example.com -# - nyc.example.com -# - syd.example.com - -# Report prometheus metrics on the age of PDUs being sent to and received from -# the following domains. This can be used to give an idea of "delay" on inbound -# and outbound federation, though be aware that any delay can be due to problems -# at either end or with the intermediate network. -# -# By default, no domains are monitored in this way. -# -#federation_metrics_domains: -# - matrix.org -# - example.com - -# Uncomment to disable profile lookup over federation. By default, the -# Federation API allows other homeservers to obtain profile data of any user -# on this homeserver. Defaults to 'true'. -# -#allow_profile_lookup_over_federation: false - -# Uncomment to allow device display name lookup over federation. By default, the -# Federation API prevents other homeservers from obtaining the display names of -# user devices on this homeserver. Defaults to 'false'. -# -#allow_device_name_lookup_over_federation: true - - -## Caching ## - -# Caching can be configured through the following options. -# -# A cache 'factor' is a multiplier that can be applied to each of -# Synapse's caches in order to increase or decrease the maximum -# number of entries that can be stored. -# -# The configuration for cache factors (caches.global_factor and -# caches.per_cache_factors) can be reloaded while the application is running, -# by sending a SIGHUP signal to the Synapse process. Changes to other parts of -# the caching config will NOT be applied after a SIGHUP is received; a restart -# is necessary. - -# The number of events to cache in memory. Not affected by -# caches.global_factor. -# -#event_cache_size: 10K - -caches: - # Controls the global cache factor, which is the default cache factor - # for all caches if a specific factor for that cache is not otherwise - # set. - # - # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment - # variable. Setting by environment variable takes priority over - # setting through the config file. - # - # Defaults to 0.5, which will half the size of all caches. - # - #global_factor: 1.0 - - # A dictionary of cache name to cache factor for that individual - # cache. Overrides the global cache factor for a given cache. - # - # These can also be set through environment variables comprised - # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital - # letters and underscores. Setting by environment variable - # takes priority over setting through the config file. - # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0 - # - # Some caches have '*' and other characters that are not - # alphanumeric or underscores. These caches can be named with or - # without the special characters stripped. For example, to specify - # the cache factor for `*stateGroupCache*` via an environment - # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - # - per_cache_factors: - #get_users_who_share_room_with_user: 2.0 - - # Controls whether cache entries are evicted after a specified time - # period. Defaults to true. Uncomment to disable this feature. - # - #expire_caches: false - - # If expire_caches is enabled, this flag controls how long an entry can - # be in a cache without having been accessed before being evicted. - # Defaults to 30m. Uncomment to set a different time to live for cache entries. - # - #cache_entry_ttl: 30m - - # This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`, - # `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain - # a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize - # this option, and all three of the options must be specified for this feature to work. - #cache_autotuning: - # This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted. - # They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in - # the flag below, or until the `min_cache_ttl` is hit. - #max_cache_memory_usage: 1024M - - # This flag sets a rough target for the desired memory usage of the caches. - #target_cache_memory_usage: 758M - - # 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when - # caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - # from being emptied while Synapse is evicting due to memory. - #min_cache_ttl: 5m - - # Controls how long the results of a /sync request are cached for after - # a successful response is returned. A higher duration can help clients with - # intermittent connections, at the cost of higher memory usage. - # - # By default, this is zero, which means that sync responses are not cached - # at all. - # - #sync_response_cache_duration: 2m - - -## Database ## - -# The 'database' setting defines the database that synapse uses to store all of -# its data. -# -# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or -# 'psycopg2' (for PostgreSQL). -# -# 'txn_limit' gives the maximum number of transactions to run per connection -# before reconnecting. Defaults to 0, which means no limit. -# -# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to -# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) -# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information -# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here: -# https://wiki.postgresql.org/wiki/Locale_data_changes -# -# 'args' gives options which are passed through to the database engine, -# except for options starting 'cp_', which are used to configure the Twisted -# connection pool. For a reference to valid arguments, see: -# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect -# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS -# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ -# -# -# Example SQLite configuration: -# -#database: -# name: sqlite3 -# args: -# database: /path/to/homeserver.db -# -# -# Example Postgres configuration: -# -#database: -# name: psycopg2 -# txn_limit: 10000 -# args: -# user: synapse_user -# password: secretpassword -# database: synapse -# host: localhost -# port: 5432 -# cp_min: 5 -# cp_max: 10 -# -# For more information on using Synapse with Postgres, -# see https://matrix-org.github.io/synapse/latest/postgres.html. -# database: name: sqlite3 args: database: DATADIR/homeserver.db - - -## Logging ## - -# A yaml python logging config file as described by -# https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema -# log_config: "CONFDIR/SERVERNAME.log.config" - - -## Ratelimiting ## - -# Ratelimiting settings for client actions (registration, login, messaging). -# -# Each ratelimiting configuration is made of two parameters: -# - per_second: number of requests a client can send per second. -# - burst_count: number of requests a client can send before being throttled. -# -# Synapse currently uses the following configurations: -# - one for messages that ratelimits sending based on the account the client -# is using -# - one for registration that ratelimits registration requests based on the -# client's IP address. -# - one for checking the validity of registration tokens that ratelimits -# requests based on the client's IP address. -# - one for login that ratelimits login requests based on the client's IP -# address. -# - one for login that ratelimits login requests based on the account the -# client is attempting to log into. -# - one for login that ratelimits login requests based on the account the -# client is attempting to log into, based on the amount of failed login -# attempts for this account. -# - one for ratelimiting redactions by room admins. If this is not explicitly -# set then it uses the same ratelimiting as per rc_message. This is useful -# to allow room admins to deal with abuse quickly. -# - two for ratelimiting number of rooms a user can join, "local" for when -# users are joining rooms the server is already in (this is cheap) vs -# "remote" for when users are trying to join rooms not on the server (which -# can be more expensive) -# - one for ratelimiting how often a user or IP can attempt to validate a 3PID. -# - two for ratelimiting how often invites can be sent in a room or to a -# specific user. -# - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID -# such as an email address or a phone number) based on the account that's -# sending the invite. -# -# The defaults are as shown below. -# -#rc_message: -# per_second: 0.2 -# burst_count: 10 -# -#rc_registration: -# per_second: 0.17 -# burst_count: 3 -# -#rc_registration_token_validity: -# per_second: 0.1 -# burst_count: 5 -# -#rc_login: -# address: -# per_second: 0.17 -# burst_count: 3 -# account: -# per_second: 0.17 -# burst_count: 3 -# failed_attempts: -# per_second: 0.17 -# burst_count: 3 -# -#rc_admin_redaction: -# per_second: 1 -# burst_count: 50 -# -#rc_joins: -# local: -# per_second: 0.1 -# burst_count: 10 -# remote: -# per_second: 0.01 -# burst_count: 10 -# -#rc_3pid_validation: -# per_second: 0.003 -# burst_count: 5 -# -#rc_invites: -# per_room: -# per_second: 0.3 -# burst_count: 10 -# per_user: -# per_second: 0.003 -# burst_count: 5 -# -#rc_third_party_invite: -# per_second: 0.2 -# burst_count: 10 - -# Ratelimiting settings for incoming federation -# -# The rc_federation configuration is made up of the following settings: -# - window_size: window size in milliseconds -# - sleep_limit: number of federation requests from a single server in -# a window before the server will delay processing the request. -# - sleep_delay: duration in milliseconds to delay processing events -# from remote servers by if they go over the sleep limit. -# - reject_limit: maximum number of concurrent federation requests -# allowed from a single server -# - concurrent: number of federation requests to concurrently process -# from a single server -# -# The defaults are as shown below. -# -#rc_federation: -# window_size: 1000 -# sleep_limit: 10 -# sleep_delay: 500 -# reject_limit: 50 -# concurrent: 3 - -# Target outgoing federation transaction frequency for sending read-receipts, -# per-room. -# -# If we end up trying to send out more read-receipts, they will get buffered up -# into fewer transactions. -# -#federation_rr_transactions_per_room_per_second: 50 - - - -## Media Store ## - -# Enable the media store service in the Synapse master. Uncomment the -# following if you are using a separate media store worker. -# -#enable_media_repo: false - -# Directory where uploaded images and attachments are stored. -# -media_store_path: "DATADIR/media_store" - -# Media storage providers allow media to be stored in different -# locations. -# -#media_storage_providers: -# - module: file_system -# # Whether to store newly uploaded local files -# store_local: false -# # Whether to store newly downloaded remote files -# store_remote: false -# # Whether to wait for successful storage for local uploads -# store_synchronous: false -# config: -# directory: /mnt/some/other/directory - -# The largest allowed upload size in bytes -# -# If you are using a reverse proxy you may also need to set this value in -# your reverse proxy's config. Notably Nginx has a small max body size by default. -# See https://matrix-org.github.io/synapse/latest/reverse_proxy.html. -# -#max_upload_size: 50M - -# Maximum number of pixels that will be thumbnailed -# -#max_image_pixels: 32M - -# Whether to generate new thumbnails on the fly to precisely match -# the resolution requested by the client. If true then whenever -# a new resolution is requested by the client the server will -# generate a new thumbnail. If false the server will pick a thumbnail -# from a precalculated list. -# -#dynamic_thumbnails: false - -# List of thumbnails to precalculate when an image is uploaded. -# -#thumbnail_sizes: -# - width: 32 -# height: 32 -# method: crop -# - width: 96 -# height: 96 -# method: crop -# - width: 320 -# height: 240 -# method: scale -# - width: 640 -# height: 480 -# method: scale -# - width: 800 -# height: 600 -# method: scale - -# Is the preview URL API enabled? -# -# 'false' by default: uncomment the following to enable it (and specify a -# url_preview_ip_range_blacklist blacklist). -# -#url_preview_enabled: true - -# List of IP address CIDR ranges that the URL preview spider is denied -# from accessing. There are no defaults: you must explicitly -# specify a list for URL previewing to work. You should specify any -# internal services in your network that you do not want synapse to try -# to connect to, otherwise anyone in any Matrix room could cause your -# synapse to issue arbitrary GET requests to your internal services, -# causing serious security issues. -# -# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly -# listed here, since they correspond to unroutable addresses.) -# -# This must be specified if url_preview_enabled is set. It is recommended that -# you uncomment the following list as a starting point. -# -# Note: The value is ignored when an HTTP proxy is in use -# -#url_preview_ip_range_blacklist: -# - '127.0.0.0/8' -# - '10.0.0.0/8' -# - '172.16.0.0/12' -# - '192.168.0.0/16' -# - '100.64.0.0/10' -# - '192.0.0.0/24' -# - '169.254.0.0/16' -# - '192.88.99.0/24' -# - '198.18.0.0/15' -# - '192.0.2.0/24' -# - '198.51.100.0/24' -# - '203.0.113.0/24' -# - '224.0.0.0/4' -# - '::1/128' -# - 'fe80::/10' -# - 'fc00::/7' -# - '2001:db8::/32' -# - 'ff00::/8' -# - 'fec0::/10' - -# List of IP address CIDR ranges that the URL preview spider is allowed -# to access even if they are specified in url_preview_ip_range_blacklist. -# This is useful for specifying exceptions to wide-ranging blacklisted -# target IP ranges - e.g. for enabling URL previews for a specific private -# website only visible in your network. -# -#url_preview_ip_range_whitelist: -# - '192.168.1.1' - -# Optional list of URL matches that the URL preview spider is -# denied from accessing. You should use url_preview_ip_range_blacklist -# in preference to this, otherwise someone could define a public DNS -# entry that points to a private IP address and circumvent the blacklist. -# This is more useful if you know there is an entire shape of URL that -# you know that will never want synapse to try to spider. -# -# Each list entry is a dictionary of url component attributes as returned -# by urlparse.urlsplit as applied to the absolute form of the URL. See -# https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit -# The values of the dictionary are treated as an filename match pattern -# applied to that component of URLs, unless they start with a ^ in which -# case they are treated as a regular expression match. If all the -# specified component matches for a given list item succeed, the URL is -# blacklisted. -# -#url_preview_url_blacklist: -# # blacklist any URL with a username in its URI -# - username: '*' -# -# # blacklist all *.google.com URLs -# - netloc: 'google.com' -# - netloc: '*.google.com' -# -# # blacklist all plain HTTP URLs -# - scheme: 'http' -# -# # blacklist http(s)://www.acme.com/foo -# - netloc: 'www.acme.com' -# path: '/foo' -# -# # blacklist any URL with a literal IPv4 address -# - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' - -# The largest allowed URL preview spidering size in bytes -# -#max_spider_size: 10M - -# A list of values for the Accept-Language HTTP header used when -# downloading webpages during URL preview generation. This allows -# Synapse to specify the preferred languages that URL previews should -# be in when communicating with remote servers. -# -# Each value is a IETF language tag; a 2-3 letter identifier for a -# language, optionally followed by subtags separated by '-', specifying -# a country or region variant. -# -# Multiple values can be provided, and a weight can be added to each by -# using quality value syntax (;q=). '*' translates to any language. -# -# Defaults to "en". -# -# Example: -# -# url_preview_accept_language: -# - en-UK -# - en-US;q=0.9 -# - fr;q=0.8 -# - *;q=0.7 -# -url_preview_accept_language: -# - en - - -# oEmbed allows for easier embedding content from a website. It can be -# used for generating URLs previews of services which support it. -# -oembed: - # A default list of oEmbed providers is included with Synapse. - # - # Uncomment the following to disable using these default oEmbed URLs. - # Defaults to 'false'. - # - #disable_default_providers: true - - # Additional files with oEmbed configuration (each should be in the - # form of providers.json). - # - # By default, this list is empty (so only the default providers.json - # is used). - # - #additional_providers: - # - oembed/my_providers.json - - -## Captcha ## -# See docs/CAPTCHA_SETUP.md for full details of configuring this. - -# This homeserver's ReCAPTCHA public key. Must be specified if -# enable_registration_captcha is enabled. -# -#recaptcha_public_key: "YOUR_PUBLIC_KEY" - -# This homeserver's ReCAPTCHA private key. Must be specified if -# enable_registration_captcha is enabled. -# -#recaptcha_private_key: "YOUR_PRIVATE_KEY" - -# Uncomment to enable ReCaptcha checks when registering, preventing signup -# unless a captcha is answered. Requires a valid ReCaptcha -# public/private key. Defaults to 'false'. -# -#enable_registration_captcha: true - -# The API endpoint to use for verifying m.login.recaptcha responses. -# Defaults to "https://www.recaptcha.net/recaptcha/api/siteverify". -# -#recaptcha_siteverify_api: "https://my.recaptcha.site" - - -## TURN ## - -# The public URIs of the TURN server to give to clients -# -#turn_uris: [] - -# The shared secret used to compute passwords for the TURN server -# -#turn_shared_secret: "YOUR_SHARED_SECRET" - -# The Username and password if the TURN server needs them and -# does not use a token -# -#turn_username: "TURNSERVER_USERNAME" -#turn_password: "TURNSERVER_PASSWORD" - -# How long generated TURN credentials last -# -#turn_user_lifetime: 1h - -# Whether guests should be allowed to use the TURN server. -# This defaults to True, otherwise VoIP will be unreliable for guests. -# However, it does introduce a slight security risk as it allows users to -# connect to arbitrary endpoints without having first signed up for a -# valid account (e.g. by passing a CAPTCHA). -# -#turn_allow_guests: true - - -## Registration ## -# -# Registration can be rate-limited using the parameters in the "Ratelimiting" -# section of this file. - -# Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration, -# you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration -# without any verification, you must also set `enable_registration_without_verification`, found below. -# -#enable_registration: false - -# Enable registration without email or captcha verification. Note: this option is *not* recommended, -# as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect -# unless `enable_registration` is also enabled. -# -#enable_registration_without_verification: true - -# Time that a user's session remains valid for, after they log in. -# -# Note that this is not currently compatible with guest logins. -# -# Note also that this is calculated at login time: changes are not applied -# retrospectively to users who have already logged in. -# -# By default, this is infinite. -# -#session_lifetime: 24h - -# Time that an access token remains valid for, if the session is -# using refresh tokens. -# For more information about refresh tokens, please see the manual. -# Note that this only applies to clients which advertise support for -# refresh tokens. -# -# Note also that this is calculated at login time and refresh time: -# changes are not applied to existing sessions until they are refreshed. -# -# By default, this is 5 minutes. -# -#refreshable_access_token_lifetime: 5m - -# Time that a refresh token remains valid for (provided that it is not -# exchanged for another one first). -# This option can be used to automatically log-out inactive sessions. -# Please see the manual for more information. -# -# Note also that this is calculated at login time and refresh time: -# changes are not applied to existing sessions until they are refreshed. -# -# By default, this is infinite. -# -#refresh_token_lifetime: 24h - -# Time that an access token remains valid for, if the session is NOT -# using refresh tokens. -# Please note that not all clients support refresh tokens, so setting -# this to a short value may be inconvenient for some users who will -# then be logged out frequently. -# -# Note also that this is calculated at login time: changes are not applied -# retrospectively to existing sessions for users that have already logged in. -# -# By default, this is infinite. -# -#nonrefreshable_access_token_lifetime: 24h - -# The user must provide all of the below types of 3PID when registering. -# -#registrations_require_3pid: -# - email -# - msisdn - -# Explicitly disable asking for MSISDNs from the registration -# flow (overrides registrations_require_3pid if MSISDNs are set as required) -# -#disable_msisdn_registration: true - -# Mandate that users are only allowed to associate certain formats of -# 3PIDs with accounts on this server. -# -#allowed_local_3pids: -# - medium: email -# pattern: '^[^@]+@matrix\.org$' -# - medium: email -# pattern: '^[^@]+@vector\.im$' -# - medium: msisdn -# pattern: '\+44' - -# Enable 3PIDs lookup requests to identity servers from this server. -# -#enable_3pid_lookup: true - -# Require users to submit a token during registration. -# Tokens can be managed using the admin API: -# https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/registration_tokens.html -# Note that `enable_registration` must be set to `true`. -# Disabling this option will not delete any tokens previously generated. -# Defaults to false. Uncomment the following to require tokens: -# -#registration_requires_token: true - -# Allow users to submit a token during registration to bypass any required 3pid -# steps configured in `registrations_require_3pid`. -# Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. -# -#enable_registration_token_3pid_bypass: false - -# If set, allows registration of standard or admin accounts by anyone who -# has the shared secret, even if registration is otherwise disabled. -# -#registration_shared_secret: - -# Set the number of bcrypt rounds used to generate password hash. -# Larger numbers increase the work factor needed to generate the hash. -# The default number is 12 (which equates to 2^12 rounds). -# N.B. that increasing this will exponentially increase the time required -# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. -# -#bcrypt_rounds: 12 - -# Allows users to register as guests without a password/email/etc, and -# participate in rooms hosted on this server which have been made -# accessible to anonymous users. -# -#allow_guest_access: false - -# The identity server which we suggest that clients should use when users log -# in on this server. -# -# (By default, no suggestion is made, so it is left up to the client. -# This setting is ignored unless public_baseurl is also explicitly set.) -# -#default_identity_server: https://matrix.org - -# Handle threepid (email/phone etc) registration and password resets through a set of -# *trusted* identity servers. Note that this allows the configured identity server to -# reset passwords for accounts! -# -# Be aware that if `email` is not set, and SMTP options have not been -# configured in the email config block, registration and user password resets via -# email will be globally disabled. -# -# Additionally, if `msisdn` is not set, registration and password resets via msisdn -# will be disabled regardless, and users will not be able to associate an msisdn -# identifier to their account. This is due to Synapse currently not supporting -# any method of sending SMS messages on its own. -# -# To enable using an identity server for operations regarding a particular third-party -# identifier type, set the value to the URL of that identity server as shown in the -# examples below. -# -# Servers handling the these requests must answer the `/requestToken` endpoints defined -# by the Matrix Identity Service API specification: -# https://matrix.org/docs/spec/identity_service/latest -# -account_threepid_delegates: - #email: https://example.com # Delegate email sending to example.com - #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - -# Whether users are allowed to change their displayname after it has -# been initially set. Useful when provisioning users based on the -# contents of a third-party directory. -# -# Does not apply to server administrators. Defaults to 'true' -# -#enable_set_displayname: false - -# Whether users are allowed to change their avatar after it has been -# initially set. Useful when provisioning users based on the contents -# of a third-party directory. -# -# Does not apply to server administrators. Defaults to 'true' -# -#enable_set_avatar_url: false - -# Whether users can change the 3PIDs associated with their accounts -# (email address and msisdn). -# -# Defaults to 'true' -# -#enable_3pid_changes: false - -# Users who register on this homeserver will automatically be joined -# to these rooms. -# -# By default, any room aliases included in this list will be created -# as a publicly joinable room when the first user registers for the -# homeserver. This behaviour can be customised with the settings below. -# If the room already exists, make certain it is a publicly joinable -# room. The join rule of the room must be set to 'public'. -# -#auto_join_rooms: -# - "#example:example.com" - -# Where auto_join_rooms are specified, setting this flag ensures that the -# the rooms exist by creating them when the first user on the -# homeserver registers. -# -# By default the auto-created rooms are publicly joinable from any federated -# server. Use the autocreate_auto_join_rooms_federated and -# autocreate_auto_join_room_preset settings below to customise this behaviour. -# -# Setting to false means that if the rooms are not manually created, -# users cannot be auto-joined since they do not exist. -# -# Defaults to true. Uncomment the following line to disable automatically -# creating auto-join rooms. -# -#autocreate_auto_join_rooms: false - -# Whether the auto_join_rooms that are auto-created are available via -# federation. Only has an effect if autocreate_auto_join_rooms is true. -# -# Note that whether a room is federated cannot be modified after -# creation. -# -# Defaults to true: the room will be joinable from other servers. -# Uncomment the following to prevent users from other homeservers from -# joining these rooms. -# -#autocreate_auto_join_rooms_federated: false - -# The room preset to use when auto-creating one of auto_join_rooms. Only has an -# effect if autocreate_auto_join_rooms is true. -# -# This can be one of "public_chat", "private_chat", or "trusted_private_chat". -# If a value of "private_chat" or "trusted_private_chat" is used then -# auto_join_mxid_localpart must also be configured. -# -# Defaults to "public_chat", meaning that the room is joinable by anyone, including -# federated servers if autocreate_auto_join_rooms_federated is true (the default). -# Uncomment the following to require an invitation to join these rooms. -# -#autocreate_auto_join_room_preset: private_chat - -# The local part of the user id which is used to create auto_join_rooms if -# autocreate_auto_join_rooms is true. If this is not provided then the -# initial user account that registers will be used to create the rooms. -# -# The user id is also used to invite new users to any auto-join rooms which -# are set to invite-only. -# -# It *must* be configured if autocreate_auto_join_room_preset is set to -# "private_chat" or "trusted_private_chat". -# -# Note that this must be specified in order for new users to be correctly -# invited to any auto-join rooms which have been set to invite-only (either -# at the time of creation or subsequently). -# -# Note that, if the room already exists, this user must be joined and -# have the appropriate permissions to invite new members. -# -#auto_join_mxid_localpart: system - -# When auto_join_rooms is specified, setting this flag to false prevents -# guest accounts from being automatically joined to the rooms. -# -# Defaults to true. -# -#auto_join_rooms_for_guests: false - -# Whether to inhibit errors raised when registering a new account if the user ID -# already exists. If turned on, that requests to /register/available will always -# show a user ID as available, and Synapse won't raise an error when starting -# a registration with a user ID that already exists. However, Synapse will still -# raise an error if the registration completes and the username conflicts. -# -# Defaults to false. -# -#inhibit_user_in_use_error: true - - -## Metrics ### - -# Enable collection and rendering of performance metrics -# -#enable_metrics: false - -# Enable sentry integration -# NOTE: While attempts are made to ensure that the logs don't contain -# any sensitive information, this cannot be guaranteed. By enabling -# this option the sentry server may therefore receive sensitive -# information, and it in turn may then diseminate sensitive information -# through insecure notification channels if so configured. -# -#sentry: -# dsn: "..." - -# Flags to enable Prometheus metrics which are not suitable to be -# enabled by default, either for performance reasons or limited use. -# -metrics_flags: - # Publish synapse_federation_known_servers, a gauge of the number of - # servers this homeserver knows about, including itself. May cause - # performance problems on large homeservers. - # - #known_servers: true - -# Whether or not to report anonymized homeserver usage statistics. -# -#report_stats: true|false - -# The endpoint to report the anonymized homeserver usage statistics to. -# Defaults to https://matrix.org/report-usage-stats/push -# -#report_stats_endpoint: https://example.com/report-usage-stats/push - - -## API Configuration ## - -# Controls for the state that is shared with users who receive an invite -# to a room -# -room_prejoin_state: - # By default, the following state event types are shared with users who - # receive invites to the room: - # - # - m.room.join_rules - # - m.room.canonical_alias - # - m.room.avatar - # - m.room.encryption - # - m.room.name - # - m.room.create - # - m.room.topic - # - # Uncomment the following to disable these defaults (so that only the event - # types listed in 'additional_event_types' are shared). Defaults to 'false'. - # - #disable_default_event_types: true - - # Additional state event types to share with users when they are invited - # to a room. - # - # By default, this list is empty (so only the default event types are shared). - # - #additional_event_types: - # - org.example.custom.event.type - -# We record the IP address of clients used to access the API for various -# reasons, including displaying it to the user in the "Where you're signed in" -# dialog. -# -# By default, when puppeting another user via the admin API, the client IP -# address is recorded against the user who created the access token (ie, the -# admin user), and *not* the puppeted user. -# -# Uncomment the following to also record the IP address against the puppeted -# user. (This also means that the puppeted user will count as an "active" user -# for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc -# above.) -# -#track_puppeted_user_ips: true - - -# A list of application service config files to use -# -#app_service_config_files: -# - app_service_1.yaml -# - app_service_2.yaml - -# Uncomment to enable tracking of application service IP addresses. Implicitly -# enables MAU tracking for application service users. -# -#track_appservice_user_ips: true - - -# a secret which is used to sign access tokens. If none is specified, -# the registration_shared_secret is used, if one is given; otherwise, -# a secret key is derived from the signing key. -# -#macaroon_secret_key: - -# a secret which is used to calculate HMACs for form values, to stop -# falsification of values. Must be specified for the User Consent -# forms to work. -# -#form_secret: - -## Signing Keys ## - -# Path to the signing key to sign messages with -# +media_store_path: DATADIR/media_store signing_key_path: "CONFDIR/SERVERNAME.signing.key" - -# The keys that the server used to sign messages with but won't use -# to sign new messages. -# -old_signing_keys: - # For each key, `key` should be the base64-encoded public key, and - # `expired_ts`should be the time (in milliseconds since the unix epoch) that - # it was last used. - # - # It is possible to build an entry from an old signing.key file using the - # `export_signing_key` script which is provided with synapse. - # - # For example: - # - #"ed25519:id": { key: "base64string", expired_ts: 123456789123 } - -# How long key response published by this server is valid for. -# Used to set the valid_until_ts in /key/v2 APIs. -# Determines how quickly servers will query to check which keys -# are still valid. -# -#key_refresh_interval: 1d - -# The trusted servers to download signing keys from. -# -# When we need to fetch a signing key, each server is tried in parallel. -# -# Normally, the connection to the key server is validated via TLS certificates. -# Additional security can be provided by configuring a `verify key`, which -# will make synapse check that the response is signed by that key. -# -# This setting supercedes an older setting named `perspectives`. The old format -# is still supported for backwards-compatibility, but it is deprecated. -# -# 'trusted_key_servers' defaults to matrix.org, but using it will generate a -# warning on start-up. To suppress this warning, set -# 'suppress_key_server_warning' to true. -# -# Options for each entry in the list include: -# -# server_name: the name of the server. required. -# -# verify_keys: an optional map from key id to base64-encoded public key. -# If specified, we will check that the response is signed by at least -# one of the given keys. -# -# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, -# and federation_verify_certificates is not `true`, synapse will refuse -# to start, because this would allow anyone who can spoof DNS responses -# to masquerade as the trusted key server. If you know what you are doing -# and are sure that your network environment provides a secure connection -# to the key server, you can set this to `true` to override this -# behaviour. -# -# An example configuration might look like: -# -#trusted_key_servers: -# - server_name: "my_trusted_server.example.com" -# verify_keys: -# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" -# - server_name: "my_other_trusted_server.example.com" -# trusted_key_servers: - server_name: "matrix.org" - -# Uncomment the following to disable the warning that is emitted when the -# trusted_key_servers include 'matrix.org'. See above. -# -#suppress_key_server_warning: true - -# The signing keys to use when acting as a trusted key server. If not specified -# defaults to the server signing key. -# -# Can contain multiple keys, one per line. -# -#key_server_signing_keys_path: "key_server_signing_keys.key" - - -## Single sign-on integration ## - -# The following settings can be used to make Synapse use a single sign-on -# provider for authentication, instead of its internal password database. -# -# You will probably also want to set the following options to `false` to -# disable the regular login/registration flows: -# * enable_registration -# * password_config.enabled -# -# You will also want to investigate the settings under the "sso" configuration -# section below. - -# Enable SAML2 for registration and login. Uses pysaml2. -# -# At least one of `sp_config` or `config_path` must be set in this section to -# enable SAML login. -# -# Once SAML support is enabled, a metadata file will be exposed at -# https://:/_synapse/client/saml2/metadata.xml, which you may be able to -# use to configure your SAML IdP with. Alternatively, you can manually configure -# the IdP to use an ACS location of -# https://:/_synapse/client/saml2/authn_response. -# -saml2_config: - # `sp_config` is the configuration for the pysaml2 Service Provider. - # See pysaml2 docs for format of config. - # - # Default values will be used for the 'entityid' and 'service' settings, - # so it is not normally necessary to specify them unless you need to - # override them. - # - sp_config: - # Point this to the IdP's metadata. You must provide either a local - # file via the `local` attribute or (preferably) a URL via the - # `remote` attribute. - # - #metadata: - # local: ["saml2/idp.xml"] - # remote: - # - url: https://our_idp/metadata.xml - - # Allowed clock difference in seconds between the homeserver and IdP. - # - # Uncomment the below to increase the accepted time difference from 0 to 3 seconds. - # - #accepted_time_diff: 3 - - # By default, the user has to go to our login page first. If you'd like - # to allow IdP-initiated login, set 'allow_unsolicited: true' in a - # 'service.sp' section: - # - #service: - # sp: - # allow_unsolicited: true - - # The examples below are just used to generate our metadata xml, and you - # may well not need them, depending on your setup. Alternatively you - # may need a whole lot more detail - see the pysaml2 docs! - - #description: ["My awesome SP", "en"] - #name: ["Test SP", "en"] - - #ui_info: - # display_name: - # - lang: en - # text: "Display Name is the descriptive name of your service." - # description: - # - lang: en - # text: "Description should be a short paragraph explaining the purpose of the service." - # information_url: - # - lang: en - # text: "https://example.com/terms-of-service" - # privacy_statement_url: - # - lang: en - # text: "https://example.com/privacy-policy" - # keywords: - # - lang: en - # text: ["Matrix", "Element"] - # logo: - # - lang: en - # text: "https://example.com/logo.svg" - # width: "200" - # height: "80" - - #organization: - # name: Example com - # display_name: - # - ["Example co", "en"] - # url: "http://example.com" - - #contact_person: - # - given_name: Bob - # sur_name: "the Sysadmin" - # email_address": ["admin@example.com"] - # contact_type": technical - - # Instead of putting the config inline as above, you can specify a - # separate pysaml2 configuration file: - # - #config_path: "CONFDIR/sp_conf.py" - - # The lifetime of a SAML session. This defines how long a user has to - # complete the authentication process, if allow_unsolicited is unset. - # The default is 15 minutes. - # - #saml_session_lifetime: 5m - - # An external module can be provided here as a custom solution to - # mapping attributes returned from a saml provider onto a matrix user. - # - user_mapping_provider: - # The custom module's class. Uncomment to use a custom module. - # - #module: mapping_provider.SamlMappingProvider - - # Custom configuration values for the module. Below options are - # intended for the built-in provider, they should be changed if - # using a custom module. This section will be passed as a Python - # dictionary to the module's `parse_config` method. - # - config: - # The SAML attribute (after mapping via the attribute maps) to use - # to derive the Matrix ID from. 'uid' by default. - # - # Note: This used to be configured by the - # saml2_config.mxid_source_attribute option. If that is still - # defined, its value will be used instead. - # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a - # matrix ID. - # - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with - # '.'). - # The default is 'hexencode'. - # - # Note: This used to be configured by the - # saml2_config.mxid_mapping option. If that is still defined, its - # value will be used instead. - # - #mxid_mapping: dotreplace - - # In previous versions of synapse, the mapping from SAML attribute to - # MXID was always calculated dynamically rather than stored in a - # table. For backwards- compatibility, we will look for user_ids - # matching such a pattern before creating a new account. - # - # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if - # the attribute maps are changed, it may be necessary to change it. - # - # The default is 'uid'. - # - #grandfathered_mxid_source_attribute: upn - - # It is possible to configure Synapse to only allow logins if SAML attributes - # match particular values. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. - # - #attribute_requirements: - # - attribute: userGroup - # value: "staff" - # - attribute: department - # value: "sales" - - # If the metadata XML contains multiple IdP entities then the `idp_entityid` - # option must be set to the entity to redirect users to. - # - # Most deployments only have a single IdP entity and so should omit this - # option. - # - #idp_entityid: 'https://our_idp/entityid' - - -# List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration -# and login. -# -# Options for each entry include: -# -# idp_id: a unique identifier for this identity provider. Used internally -# by Synapse; should be a single word such as 'github'. -# -# Note that, if this is changed, users authenticating via that provider -# will no longer be recognised as the same user! -# -# (Use "oidc" here if you are migrating from an old "oidc_config" -# configuration.) -# -# idp_name: A user-facing name for this identity provider, which is used to -# offer the user a choice of login mechanisms. -# -# idp_icon: An optional icon for this identity provider, which is presented -# by clients and Synapse's own IdP picker page. If given, must be an -# MXC URI of the format mxc:///. (An easy way to -# obtain such an MXC URI is to upload an image to an (unencrypted) room -# and then copy the "url" from the source of the event.) -# -# idp_brand: An optional brand for this identity provider, allowing clients -# to style the login flow according to the identity provider in question. -# See the spec for possible options here. -# -# discover: set to 'false' to disable the use of the OIDC discovery mechanism -# to discover endpoints. Defaults to true. -# -# issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery -# is enabled) to discover the provider's endpoints. -# -# client_id: Required. oauth2 client id to use. -# -# client_secret: oauth2 client secret to use. May be omitted if -# client_secret_jwt_key is given, or if client_auth_method is 'none'. -# -# client_secret_jwt_key: Alternative to client_secret: details of a key used -# to create a JSON Web Token to be used as an OAuth2 client secret. If -# given, must be a dictionary with the following properties: -# -# key: a pem-encoded signing key. Must be a suitable key for the -# algorithm specified. Required unless 'key_file' is given. -# -# key_file: the path to file containing a pem-encoded signing key file. -# Required unless 'key' is given. -# -# jwt_header: a dictionary giving properties to include in the JWT -# header. Must include the key 'alg', giving the algorithm used to -# sign the JWT, such as "ES256", using the JWA identifiers in -# RFC7518. -# -# jwt_payload: an optional dictionary giving properties to include in -# the JWT payload. Normally this should include an 'iss' key. -# -# client_auth_method: auth method to use when exchanging the token. Valid -# values are 'client_secret_basic' (default), 'client_secret_post' and -# 'none'. -# -# scopes: list of scopes to request. This should normally include the "openid" -# scope. Defaults to ["openid"]. -# -# authorization_endpoint: the oauth2 authorization endpoint. Required if -# provider discovery is disabled. -# -# token_endpoint: the oauth2 token endpoint. Required if provider discovery is -# disabled. -# -# userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is -# disabled and the 'openid' scope is not requested. -# -# jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and -# the 'openid' scope is used. -# -# skip_verification: set to 'true' to skip metadata verification. Use this if -# you are connecting to a provider that is not OpenID Connect compliant. -# Defaults to false. Avoid this in production. -# -# user_profile_method: Whether to fetch the user profile from the userinfo -# endpoint, or to rely on the data returned in the id_token from the -# token_endpoint. -# -# Valid values are: 'auto' or 'userinfo_endpoint'. -# -# Defaults to 'auto', which uses the userinfo endpoint if 'openid' is -# not included in 'scopes'. Set to 'userinfo_endpoint' to always use the -# userinfo endpoint. -# -# allow_existing_users: set to 'true' to allow a user logging in via OIDC to -# match a pre-existing account instead of failing. This could be used if -# switching from password logins to OIDC. Defaults to false. -# -# user_mapping_provider: Configuration for how attributes returned from a OIDC -# provider are mapped onto a matrix user. This setting has the following -# sub-properties: -# -# module: The class name of a custom mapping module. Default is -# 'synapse.handlers.oidc.JinjaOidcMappingProvider'. -# See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers -# for information on implementing a custom mapping provider. -# -# config: Configuration for the mapping provider module. This section will -# be passed as a Python dictionary to the user mapping provider -# module's `parse_config` method. -# -# For the default provider, the following settings are available: -# -# subject_claim: name of the claim containing a unique identifier -# for the user. Defaults to 'sub', which OpenID Connect -# compliant providers should provide. -# -# localpart_template: Jinja2 template for the localpart of the MXID. -# If this is not set, the user will be prompted to choose their -# own username (see the documentation for the -# 'sso_auth_account_details.html' template). This template can -# use the 'localpart_from_email' filter. -# -# confirm_localpart: Whether to prompt the user to validate (or -# change) the generated localpart (see the documentation for the -# 'sso_auth_account_details.html' template), instead of -# registering the account right away. -# -# display_name_template: Jinja2 template for the display name to set -# on first login. If unset, no displayname will be set. -# -# email_template: Jinja2 template for the email address of the user. -# If unset, no email address will be added to the account. -# -# extra_attributes: a map of Jinja2 templates for extra attributes -# to send back to the client during login. -# Note that these are non-standard and clients will ignore them -# without modifications. -# -# When rendering, the Jinja2 templates are given a 'user' variable, -# which is set to the claims returned by the UserInfo Endpoint and/or -# in the ID Token. -# -# It is possible to configure Synapse to only allow logins if certain attributes -# match particular values in the OIDC userinfo. The requirements can be listed under -# `attribute_requirements` as shown below. All of the listed attributes must -# match for the login to be permitted. Additional attributes can be added to -# userinfo by expanding the `scopes` section of the OIDC config to retrieve -# additional information from the OIDC provider. -# -# If the OIDC claim is a list, then the attribute must match any value in the list. -# Otherwise, it must exactly match the value of the claim. Using the example -# below, the `family_name` claim MUST be "Stephensson", but the `groups` -# claim MUST contain "admin". -# -# attribute_requirements: -# - attribute: family_name -# value: "Stephensson" -# - attribute: groups -# value: "admin" -# -# See https://matrix-org.github.io/synapse/latest/openid.html -# for information on how to configure these options. -# -# For backwards compatibility, it is also possible to configure a single OIDC -# provider via an 'oidc_config' setting. This is now deprecated and admins are -# advised to migrate to the 'oidc_providers' format. (When doing that migration, -# use 'oidc' for the idp_id to ensure that existing users continue to be -# recognised.) -# -oidc_providers: - # Generic example - # - #- idp_id: my_idp - # idp_name: "My OpenID provider" - # idp_icon: "mxc://example.com/mediaid" - # discover: false - # issuer: "https://accounts.example.com/" - # client_id: "provided-by-your-issuer" - # client_secret: "provided-by-your-issuer" - # client_auth_method: client_secret_post - # scopes: ["openid", "profile"] - # authorization_endpoint: "https://accounts.example.com/oauth2/auth" - # token_endpoint: "https://accounts.example.com/oauth2/token" - # userinfo_endpoint: "https://accounts.example.com/userinfo" - # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" - # skip_verification: true - # user_mapping_provider: - # config: - # subject_claim: "id" - # localpart_template: "{{ user.login }}" - # display_name_template: "{{ user.name }}" - # email_template: "{{ user.email }}" - # attribute_requirements: - # - attribute: userGroup - # value: "synapseUsers" - - -# Enable Central Authentication Service (CAS) for registration and login. -# -cas_config: - # Uncomment the following to enable authorization against a CAS server. - # Defaults to false. - # - #enabled: true - - # The URL of the CAS authorization endpoint. - # - #server_url: "https://cas-server.com" - - # The attribute of the CAS response to use as the display name. - # - # If unset, no displayname will be set. - # - #displayname_attribute: name - - # It is possible to configure Synapse to only allow logins if CAS attributes - # match particular values. All of the keys in the mapping below must exist - # and the values must match the given value. Alternately if the given value - # is None then any value is allowed (the attribute just must exist). - # All of the listed attributes must match for the login to be permitted. - # - #required_attributes: - # userGroup: "staff" - # department: None - - -# Additional settings to use with single-sign on systems such as OpenID Connect, -# SAML2 and CAS. -# -# Server admins can configure custom templates for pages related to SSO. See -# https://matrix-org.github.io/synapse/latest/templates.html for more information. -# -sso: - # A list of client URLs which are whitelisted so that the user does not - # have to confirm giving access to their account to the URL. Any client - # whose URL starts with an entry in the following list will not be subject - # to an additional confirmation step after the SSO login is completed. - # - # WARNING: An entry such as "https://my.client" is insecure, because it - # will also match "https://my.client.evil.site", exposing your users to - # phishing attacks from evil.site. To avoid this, include a slash after the - # hostname: "https://my.client/". - # - # The login fallback page (used by clients that don't natively support the - # required login flows) is whitelisted in addition to any URLs in this list. - # - # By default, this list contains only the login fallback page. - # - #client_whitelist: - # - https://riot.im/develop - # - https://my.custom.client/ - - # Uncomment to keep a user's profile fields in sync with information from - # the identity provider. Currently only syncing the displayname is - # supported. Fields are checked on every SSO login, and are updated - # if necessary. - # - # Note that enabling this option will override user profile information, - # regardless of whether users have opted-out of syncing that - # information when first signing in. Defaults to false. - # - #update_profile_information: true - - -# JSON web token integration. The following settings can be used to make -# Synapse JSON web tokens for authentication, instead of its internal -# password database. -# -# Each JSON Web Token needs to contain a "sub" (subject) claim, which is -# used as the localpart of the mxid. -# -# Additionally, the expiration time ("exp"), not before time ("nbf"), -# and issued at ("iat") claims are validated if present. -# -# Note that this is a non-standard login type and client support is -# expected to be non-existent. -# -# See https://matrix-org.github.io/synapse/latest/jwt.html. -# -#jwt_config: - # Uncomment the following to enable authorization using JSON web - # tokens. Defaults to false. - # - #enabled: true - - # This is either the private shared secret or the public key used to - # decode the contents of the JSON web token. - # - # Required if 'enabled' is true. - # - #secret: "provided-by-your-issuer" - - # The algorithm used to sign the JSON web token. - # - # Supported algorithms are listed at - # https://pyjwt.readthedocs.io/en/latest/algorithms.html - # - # Required if 'enabled' is true. - # - #algorithm: "provided-by-your-issuer" - - # Name of the claim containing a unique identifier for the user. - # - # Optional, defaults to `sub`. - # - #subject_claim: "sub" - - # The issuer to validate the "iss" claim against. - # - # Optional, if provided the "iss" claim will be required and - # validated for all JSON web tokens. - # - #issuer: "provided-by-your-issuer" - - # A list of audiences to validate the "aud" claim against. - # - # Optional, if provided the "aud" claim will be required and - # validated for all JSON web tokens. - # - # Note that if the "aud" claim is included in a JSON web token then - # validation will fail without configuring audiences. - # - #audiences: - # - "provided-by-your-issuer" - - -password_config: - # Uncomment to disable password login. - # Set to `only_for_reauth` to permit reauthentication for users that - # have passwords and are already logged in. - # - #enabled: false - - # Uncomment to disable authentication against the local password - # database. This is ignored if `enabled` is false, and is only useful - # if you have other password_providers. - # - #localdb_enabled: false - - # Uncomment and change to a secret random string for extra security. - # DO NOT CHANGE THIS AFTER INITIAL SETUP! - # - #pepper: "EVEN_MORE_SECRET" - - # Define and enforce a password policy. Each parameter is optional. - # This is an implementation of MSC2000. - # - policy: - # Whether to enforce the password policy. - # Defaults to 'false'. - # - #enabled: true - - # Minimum accepted length for a password. - # Defaults to 0. - # - #minimum_length: 15 - - # Whether a password must contain at least one digit. - # Defaults to 'false'. - # - #require_digit: true - - # Whether a password must contain at least one symbol. - # A symbol is any character that's not a number or a letter. - # Defaults to 'false'. - # - #require_symbol: true - - # Whether a password must contain at least one lowercase letter. - # Defaults to 'false'. - # - #require_lowercase: true - - # Whether a password must contain at least one uppercase letter. - # Defaults to 'false'. - # - #require_uppercase: true - -ui_auth: - # The amount of time to allow a user-interactive authentication session - # to be active. - # - # This defaults to 0, meaning the user is queried for their credentials - # before every action, but this can be overridden to allow a single - # validation to be re-used. This weakens the protections afforded by - # the user-interactive authentication process, by allowing for multiple - # (and potentially different) operations to use the same validation session. - # - # This is ignored for potentially "dangerous" operations (including - # deactivating an account, modifying an account password, and - # adding a 3PID). - # - # Uncomment below to allow for credential validation to last for 15 - # seconds. - # - #session_timeout: "15s" - - -# Configuration for sending emails from Synapse. -# -# Server admins can configure custom templates for email content. See -# https://matrix-org.github.io/synapse/latest/templates.html for more information. -# -email: - # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. - # - #smtp_host: mail.server - - # The port on the mail server for outgoing SMTP. Defaults to 25. - # - #smtp_port: 587 - - # Username/password for authentication to the SMTP server. By default, no - # authentication is attempted. - # - #smtp_user: "exampleusername" - #smtp_pass: "examplepassword" - - # Uncomment the following to require TLS transport security for SMTP. - # By default, Synapse will connect over plain text, and will then switch to - # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, - # Synapse will refuse to connect unless the server supports STARTTLS. - # - #require_transport_security: true - - # Uncomment the following to disable TLS for SMTP. - # - # By default, if the server supports TLS, it will be used, and the server - # must present a certificate that is valid for 'smtp_host'. If this option - # is set to false, TLS will not be used. - # - #enable_tls: false - - # notif_from defines the "From" address to use when sending emails. - # It must be set if email sending is enabled. - # - # The placeholder '%(app)s' will be replaced by the application name, - # which is normally 'app_name' (below), but may be overridden by the - # Matrix client application. - # - # Note that the placeholder must be written '%(app)s', including the - # trailing 's'. - # - #notif_from: "Your Friendly %(app)s homeserver " - - # app_name defines the default value for '%(app)s' in notif_from and email - # subjects. It defaults to 'Matrix'. - # - #app_name: my_branded_matrix_server - - # Uncomment the following to enable sending emails for messages that the user - # has missed. Disabled by default. - # - #enable_notifs: true - - # Uncomment the following to disable automatic subscription to email - # notifications for new users. Enabled by default. - # - #notif_for_new_users: false - - # Custom URL for client links within the email notifications. By default - # links will be based on "https://matrix.to". - # - # (This setting used to be called riot_base_url; the old name is still - # supported for backwards-compatibility but is now deprecated.) - # - #client_base_url: "http://localhost/riot" - - # Configure the time that a validation email will expire after sending. - # Defaults to 1h. - # - #validation_token_lifetime: 15m - - # The web client location to direct users to during an invite. This is passed - # to the identity server as the org.matrix.web_client_location key. Defaults - # to unset, giving no guidance to the identity server. - # - #invite_client_location: https://app.element.io - - # Subjects to use when sending emails from Synapse. - # - # The placeholder '%(app)s' will be replaced with the value of the 'app_name' - # setting above, or by a value dictated by the Matrix client application. - # - # If a subject isn't overridden in this configuration file, the value used as - # its example will be used. - # - #subjects: - - # Subjects for notification emails. - # - # On top of the '%(app)s' placeholder, these can use the following - # placeholders: - # - # * '%(person)s', which will be replaced by the display name of the user(s) - # that sent the message(s), e.g. "Alice and Bob". - # * '%(room)s', which will be replaced by the name of the room the - # message(s) have been sent to, e.g. "My super room". - # - # See the example provided for each setting to see which placeholder can be - # used and how to use them. - # - # Subject to use to notify about one message from one or more user(s) in a - # room which has a name. - #message_from_person_in_room: "[%(app)s] You have a message on %(app)s from %(person)s in the %(room)s room..." - # - # Subject to use to notify about one message from one or more user(s) in a - # room which doesn't have a name. - #message_from_person: "[%(app)s] You have a message on %(app)s from %(person)s..." - # - # Subject to use to notify about multiple messages from one or more users in - # a room which doesn't have a name. - #messages_from_person: "[%(app)s] You have messages on %(app)s from %(person)s..." - # - # Subject to use to notify about multiple messages in a room which has a - # name. - #messages_in_room: "[%(app)s] You have messages on %(app)s in the %(room)s room..." - # - # Subject to use to notify about multiple messages in multiple rooms. - #messages_in_room_and_others: "[%(app)s] You have messages on %(app)s in the %(room)s room and others..." - # - # Subject to use to notify about multiple messages from multiple persons in - # multiple rooms. This is similar to the setting above except it's used when - # the room in which the notification was triggered has no name. - #messages_from_person_and_others: "[%(app)s] You have messages on %(app)s from %(person)s and others..." - # - # Subject to use to notify about an invite to a room which has a name. - #invite_from_person_to_room: "[%(app)s] %(person)s has invited you to join the %(room)s room on %(app)s..." - # - # Subject to use to notify about an invite to a room which doesn't have a - # name. - #invite_from_person: "[%(app)s] %(person)s has invited you to chat on %(app)s..." - - # Subject for emails related to account administration. - # - # On top of the '%(app)s' placeholder, these one can use the - # '%(server_name)s' placeholder, which will be replaced by the value of the - # 'server_name' setting in your Synapse configuration. - # - # Subject to use when sending a password reset email. - #password_reset: "[%(server_name)s] Password reset" - # - # Subject to use when sending a verification email to assert an address's - # ownership. - #email_validation: "[%(server_name)s] Validate your email" - - - -## Push ## - -push: - # Clients requesting push notifications can either have the body of - # the message sent in the notification poke along with other details - # like the sender, or just the event ID and room ID (`event_id_only`). - # If clients choose the former, this option controls whether the - # notification request includes the content of the event (other details - # like the sender are still included). For `event_id_only` push, it - # has no effect. - # - # For modern android devices the notification content will still appear - # because it is loaded by the app. iPhone, however will send a - # notification saying only that a message arrived and who it came from. - # - # The default value is "true" to include message details. Uncomment to only - # include the event ID and room ID in push notification payloads. - # - #include_content: false - - # When a push notification is received, an unread count is also sent. - # This number can either be calculated as the number of unread messages - # for the user, or the number of *rooms* the user has unread messages in. - # - # The default value is "true", meaning push clients will see the number of - # rooms with unread messages in them. Uncomment to instead send the number - # of unread messages. - # - #group_unread_count_by_room: false - - -## Rooms ## - -# Controls whether locally-created rooms should be end-to-end encrypted by -# default. -# -# Possible options are "all", "invite", and "off". They are defined as: -# -# * "all": any locally-created room -# * "invite": any room created with the "private_chat" or "trusted_private_chat" -# room creation presets -# * "off": this option will take no effect -# -# The default value is "off". -# -# Note that this option will only affect rooms created after it is set. It -# will also not affect rooms created by other servers. -# -#encryption_enabled_by_default_for_room_type: invite - -# Override the default power levels for rooms created on this server, per -# room creation preset. -# -# The appropriate dictionary for the room preset will be applied on top -# of the existing power levels content. -# -# Useful if you know that your users need special permissions in rooms -# that they create (e.g. to send particular types of state events without -# needing an elevated power level). This takes the same shape as the -# `power_level_content_override` parameter in the /createRoom API, but -# is applied before that parameter. -# -# Valid keys are some or all of `private_chat`, `trusted_private_chat` -# and `public_chat`. Inside each of those should be any of the -# properties allowed in `power_level_content_override` in the -# /createRoom API. If any property is missing, its default value will -# continue to be used. If any property is present, it will overwrite -# the existing default completely (so if the `events` property exists, -# the default event power levels will be ignored). -# -#default_power_level_content_override: -# private_chat: -# "events": -# "com.example.myeventtype" : 0 -# "m.room.avatar": 50 -# "m.room.canonical_alias": 50 -# "m.room.encryption": 100 -# "m.room.history_visibility": 100 -# "m.room.name": 50 -# "m.room.power_levels": 100 -# "m.room.server_acl": 100 -# "m.room.tombstone": 100 -# "events_default": 1 - - - -# User Directory configuration -# -user_directory: - # Defines whether users can search the user directory. If false then - # empty responses are returned to all queries. Defaults to true. - # - # Uncomment to disable the user directory. - # - #enabled: false - - # Defines whether to search all users visible to your HS when searching - # the user directory. If false, search results will only contain users - # visible in public rooms and users sharing a room with the requester. - # Defaults to false. - # - # NB. If you set this to true, and the last time the user_directory search - # indexes were (re)built was before Synapse 1.44, you'll have to - # rebuild the indexes in order to search through all known users. - # These indexes are built the first time Synapse starts; admins can - # manually trigger a rebuild via API following the instructions at - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run - # - # Uncomment to return search results containing all known users, even if that - # user does not share a room with the requester. - # - #search_all_users: true - - # Defines whether to prefer local users in search query results. - # If True, local users are more likely to appear above remote users - # when searching the user directory. Defaults to false. - # - # Uncomment to prefer local over remote users in user directory search - # results. - # - #prefer_local_users: true - - -# User Consent configuration -# -# for detailed instructions, see -# https://matrix-org.github.io/synapse/latest/consent_tracking.html -# -# Parts of this section are required if enabling the 'consent' resource under -# 'listeners', in particular 'template_dir' and 'version'. -# -# 'template_dir' gives the location of the templates for the HTML forms. -# This directory should contain one subdirectory per language (eg, 'en', 'fr'), -# and each language directory should contain the policy document (named as -# '.html') and a success page (success.html). -# -# 'version' specifies the 'current' version of the policy document. It defines -# the version to be served by the consent resource if there is no 'v' -# parameter. -# -# 'server_notice_content', if enabled, will send a user a "Server Notice" -# asking them to consent to the privacy policy. The 'server_notices' section -# must also be configured for this to work. Notices will *not* be sent to -# guest users unless 'send_server_notice_to_guests' is set to true. -# -# 'block_events_error', if set, will block any attempts to send events -# until the user consents to the privacy policy. The value of the setting is -# used as the text of the error. -# -# 'require_at_registration', if enabled, will add a step to the registration -# process, similar to how captcha works. Users will be required to accept the -# policy before their account is created. -# -# 'policy_name' is the display name of the policy users will see when registering -# for an account. Has no effect unless `require_at_registration` is enabled. -# Defaults to "Privacy Policy". -# -#user_consent: -# template_dir: res/templates/privacy -# version: 1.0 -# server_notice_content: -# msgtype: m.text -# body: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# send_server_notice_to_guests: true -# block_events_error: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# require_at_registration: false -# policy_name: Privacy Policy -# - - - -# Settings for local room and user statistics collection. See -# https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html. -# -stats: - # Uncomment the following to disable room and user statistics. Note that doing - # so may cause certain features (such as the room directory) not to work - # correctly. - # - #enabled: false - - -# Server Notices room configuration -# -# Uncomment this section to enable a room which can be used to send notices -# from the server to users. It is a special room which cannot be left; notices -# come from a special "notices" user id. -# -# If you uncomment this section, you *must* define the system_mxid_localpart -# setting, which defines the id of the user which will be used to send the -# notices. -# -# It's also possible to override the room name, the display name of the -# "notices" user, and the avatar for the user. -# -#server_notices: -# system_mxid_localpart: notices -# system_mxid_display_name: "Server Notices" -# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ" -# room_name: "Server Notices" - - - -# Uncomment to disable searching the public room list. When disabled -# blocks searching local and remote room lists for local and remote -# users by always returning an empty list for all queries. -# -#enable_room_list_search: false - -# The `alias_creation` option controls who's allowed to create aliases -# on this server. -# -# The format of this option is a list of rules that contain globs that -# match against user_id, room_id and the new alias (fully qualified with -# server name). The action in the first rule that matches is taken, -# which can currently either be "allow" or "deny". -# -# Missing user_id/room_id/alias fields default to "*". -# -# If no rules match the request is denied. An empty list means no one -# can create aliases. -# -# Options for the rules include: -# -# user_id: Matches against the creator of the alias -# alias: Matches against the alias being created -# room_id: Matches against the room ID the alias is being pointed at -# action: Whether to "allow" or "deny" the request if the rule matches -# -# The default is: -# -#alias_creation_rules: -# - user_id: "*" -# alias: "*" -# room_id: "*" -# action: allow - -# The `room_list_publication_rules` option controls who can publish and -# which rooms can be published in the public room list. -# -# The format of this option is the same as that for -# `alias_creation_rules`. -# -# If the room has one or more aliases associated with it, only one of -# the aliases needs to match the alias rule. If there are no aliases -# then only rules with `alias: *` match. -# -# If no rules match the request is denied. An empty list means no one -# can publish rooms. -# -# Options for the rules include: -# -# user_id: Matches against the creator of the alias -# room_id: Matches against the room ID being published -# alias: Matches against any current local or canonical aliases -# associated with the room -# action: Whether to "allow" or "deny" the request if the rule matches -# -# The default is: -# -#room_list_publication_rules: -# - user_id: "*" -# alias: "*" -# room_id: "*" -# action: allow - - -## Opentracing ## - -# These settings enable opentracing, which implements distributed tracing. -# This allows you to observe the causal chains of events across servers -# including requests, key lookups etc., across any server running -# synapse or any other other services which supports opentracing -# (specifically those implemented with Jaeger). -# -opentracing: - # tracing is disabled by default. Uncomment the following line to enable it. - # - #enabled: true - - # The list of homeservers we wish to send and receive span contexts and span baggage. - # See https://matrix-org.github.io/synapse/latest/opentracing.html. - # - # This is a list of regexes which are matched against the server_name of the - # homeserver. - # - # By default, it is empty, so no servers are matched. - # - #homeserver_whitelist: - # - ".*" - - # A list of the matrix IDs of users whose requests will always be traced, - # even if the tracing system would otherwise drop the traces due to - # probabilistic sampling. - # - # By default, the list is empty. - # - #force_tracing_for_users: - # - "@user1:server_name" - # - "@user2:server_name" - - # Jaeger can be configured to sample traces at different rates. - # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration is mostly related to trace sampling which - # is documented here: - # https://www.jaegertracing.io/docs/latest/sampling/. - # - #jaeger_config: - # sampler: - # type: const - # param: 1 - # logging: - # false - - -## Workers ## - -# Disables sending of outbound federation transactions on the main process. -# Uncomment if using a federation sender worker. -# -#send_federation: false - -# It is possible to run multiple federation sender workers, in which case the -# work is balanced across them. -# -# This configuration must be shared between all federation sender workers, and if -# changed all federation sender workers must be stopped at the same time and then -# started, to ensure that all instances are running with the same config (otherwise -# events may be dropped). -# -#federation_sender_instances: -# - federation_sender1 - -# When using workers this should be a map from `worker_name` to the -# HTTP replication listener of the worker, if configured. -# -#instance_map: -# worker1: -# host: localhost -# port: 8034 - -# Experimental: When using workers you can define which workers should -# handle event persistence and typing notifications. Any worker -# specified here must also be in the `instance_map`. -# -#stream_writers: -# events: worker1 -# typing: worker1 - -# The worker that is used to run background tasks (e.g. cleaning up expired -# data). If not provided this defaults to the main process. -# -#run_background_tasks_on: worker1 - -# A shared secret used by the replication APIs to authenticate HTTP requests -# from workers. -# -# By default this is unused and traffic is not authenticated. -# -#worker_replication_secret: "" - - -# Configuration for Redis when using workers. This *must* be enabled when -# using workers (unless using old style direct TCP configuration). -# -redis: - # Uncomment the below to enable Redis support. - # - #enabled: true - - # Optional host and port to use to connect to redis. Defaults to - # localhost and 6379 - # - #host: localhost - #port: 6379 - - # Optional password if configured on the Redis instance - # - #password: - - -## Background Updates ## - -# Background updates are database updates that are run in the background in batches. -# The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to -# sleep can all be configured. This is helpful to speed up or slow down the updates. -# -background_updates: - # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set - # a time to change the default. - # - #background_update_duration_ms: 500 - - # Whether to sleep between updates. Defaults to True. Uncomment to change the default. - # - #sleep_enabled: false - - # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment - # and set a duration to change the default. - # - #sleep_duration_ms: 300 - - # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and - # set a size to change the default. - # - #min_batch_size: 10 - - # The batch size to use for the first iteration of a new background update. The default is 100. - # Uncomment and set a size to change the default. - # - #default_batch_size: 50 diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 42364fc13..095eca16c 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -18,6 +18,7 @@ import argparse import errno import logging import os +import re from collections import OrderedDict from hashlib import sha256 from textwrap import dedent @@ -123,7 +124,10 @@ CONFIG_FILE_HEADER = """\ # should have the same indentation. # # [1] https://docs.ansible.com/ansible/latest/reference_appendices/YAMLSyntax.html - +# +# For more information on how to configure Synapse, including a complete accounting of +# each option, go to docs/usage/configuration/config_documentation.md or +# https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html """ @@ -470,7 +474,7 @@ class RootConfig: The yaml config file """ - return CONFIG_FILE_HEADER + "\n\n".join( + conf = CONFIG_FILE_HEADER + "\n".join( dedent(conf) for conf in self.invoke_all( "generate_config_section", @@ -485,6 +489,8 @@ class RootConfig: tls_private_key_path=tls_private_key_path, ).values() ) + conf = re.sub("\n{2,}", "\n", conf) + return conf @classmethod def load_config( diff --git a/synapse/config/api.py b/synapse/config/api.py index 2cc630534..e46728e73 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -31,54 +31,6 @@ class ApiConfig(Config): self.room_prejoin_state = list(self._get_prejoin_state_types(config)) self.track_puppeted_user_ips = config.get("track_puppeted_user_ips", False) - def generate_config_section(cls, **kwargs: Any) -> str: - formatted_default_state_types = "\n".join( - " # - %s" % (t,) for t in _DEFAULT_PREJOIN_STATE_TYPES - ) - - return """\ - ## API Configuration ## - - # Controls for the state that is shared with users who receive an invite - # to a room - # - room_prejoin_state: - # By default, the following state event types are shared with users who - # receive invites to the room: - # -%(formatted_default_state_types)s - # - # Uncomment the following to disable these defaults (so that only the event - # types listed in 'additional_event_types' are shared). Defaults to 'false'. - # - #disable_default_event_types: true - - # Additional state event types to share with users when they are invited - # to a room. - # - # By default, this list is empty (so only the default event types are shared). - # - #additional_event_types: - # - org.example.custom.event.type - - # We record the IP address of clients used to access the API for various - # reasons, including displaying it to the user in the "Where you're signed in" - # dialog. - # - # By default, when puppeting another user via the admin API, the client IP - # address is recorded against the user who created the access token (ie, the - # admin user), and *not* the puppeted user. - # - # Uncomment the following to also record the IP address against the puppeted - # user. (This also means that the puppeted user will count as an "active" user - # for the purpose of monthly active user tracking - see 'limit_usage_by_mau' etc - # above.) - # - #track_puppeted_user_ips: true - """ % { - "formatted_default_state_types": formatted_default_state_types - } - def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: """Get the event types to include in the prejoin state diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 16f93273b..00182090b 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -35,20 +35,6 @@ class AppServiceConfig(Config): self.app_service_config_files = config.get("app_service_config_files", []) self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) - def generate_config_section(cls, **kwargs: Any) -> str: - return """\ - # A list of application service config files to use - # - #app_service_config_files: - # - app_service_1.yaml - # - app_service_2.yaml - - # Uncomment to enable tracking of application service IP addresses. Implicitly - # enables MAU tracking for application service users. - # - #track_appservice_user_ips: true - """ - def load_appservices( hostname: str, config_files: List[str] diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 265a554a5..35774962c 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -53,78 +53,3 @@ class AuthConfig(Config): self.ui_auth_session_timeout = self.parse_duration( ui_auth.get("session_timeout", 0) ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - password_config: - # Uncomment to disable password login. - # Set to `only_for_reauth` to permit reauthentication for users that - # have passwords and are already logged in. - # - #enabled: false - - # Uncomment to disable authentication against the local password - # database. This is ignored if `enabled` is false, and is only useful - # if you have other password_providers. - # - #localdb_enabled: false - - # Uncomment and change to a secret random string for extra security. - # DO NOT CHANGE THIS AFTER INITIAL SETUP! - # - #pepper: "EVEN_MORE_SECRET" - - # Define and enforce a password policy. Each parameter is optional. - # This is an implementation of MSC2000. - # - policy: - # Whether to enforce the password policy. - # Defaults to 'false'. - # - #enabled: true - - # Minimum accepted length for a password. - # Defaults to 0. - # - #minimum_length: 15 - - # Whether a password must contain at least one digit. - # Defaults to 'false'. - # - #require_digit: true - - # Whether a password must contain at least one symbol. - # A symbol is any character that's not a number or a letter. - # Defaults to 'false'. - # - #require_symbol: true - - # Whether a password must contain at least one lowercase letter. - # Defaults to 'false'. - # - #require_lowercase: true - - # Whether a password must contain at least one uppercase letter. - # Defaults to 'false'. - # - #require_uppercase: true - - ui_auth: - # The amount of time to allow a user-interactive authentication session - # to be active. - # - # This defaults to 0, meaning the user is queried for their credentials - # before every action, but this can be overridden to allow a single - # validation to be re-used. This weakens the protections afforded by - # the user-interactive authentication process, by allowing for multiple - # (and potentially different) operations to use the same validation session. - # - # This is ignored for potentially "dangerous" operations (including - # deactivating an account, modifying an account password, and - # adding a 3PID). - # - # Uncomment below to allow for credential validation to last for 15 - # seconds. - # - #session_timeout: "15s" - """ diff --git a/synapse/config/background_updates.py b/synapse/config/background_updates.py index 07fadbe04..1c6cd97de 100644 --- a/synapse/config/background_updates.py +++ b/synapse/config/background_updates.py @@ -21,40 +21,6 @@ from ._base import Config class BackgroundUpdateConfig(Config): section = "background_updates" - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Background Updates ## - - # Background updates are database updates that are run in the background in batches. - # The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to - # sleep can all be configured. This is helpful to speed up or slow down the updates. - # - background_updates: - # How long in milliseconds to run a batch of background updates for. Defaults to 100. Uncomment and set - # a time to change the default. - # - #background_update_duration_ms: 500 - - # Whether to sleep between updates. Defaults to True. Uncomment to change the default. - # - #sleep_enabled: false - - # If sleeping between updates, how long in milliseconds to sleep for. Defaults to 1000. Uncomment - # and set a duration to change the default. - # - #sleep_duration_ms: 300 - - # Minimum size a batch of background updates can be. Must be greater than 0. Defaults to 1. Uncomment and - # set a size to change the default. - # - #min_batch_size: 10 - - # The batch size to use for the first iteration of a new background update. The default is 100. - # Uncomment and set a size to change the default. - # - #default_batch_size: 50 - """ - def read_config(self, config: JsonDict, **kwargs: Any) -> None: bg_update_config = config.get("background_updates") or {} diff --git a/synapse/config/cache.py b/synapse/config/cache.py index d2f55534d..d0b491ea6 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -113,97 +113,6 @@ class CacheConfig(Config): with _CACHES_LOCK: _CACHES.clear() - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Caching ## - - # Caching can be configured through the following options. - # - # A cache 'factor' is a multiplier that can be applied to each of - # Synapse's caches in order to increase or decrease the maximum - # number of entries that can be stored. - # - # The configuration for cache factors (caches.global_factor and - # caches.per_cache_factors) can be reloaded while the application is running, - # by sending a SIGHUP signal to the Synapse process. Changes to other parts of - # the caching config will NOT be applied after a SIGHUP is received; a restart - # is necessary. - - # The number of events to cache in memory. Not affected by - # caches.global_factor. - # - #event_cache_size: 10K - - caches: - # Controls the global cache factor, which is the default cache factor - # for all caches if a specific factor for that cache is not otherwise - # set. - # - # This can also be set by the "SYNAPSE_CACHE_FACTOR" environment - # variable. Setting by environment variable takes priority over - # setting through the config file. - # - # Defaults to 0.5, which will half the size of all caches. - # - #global_factor: 1.0 - - # A dictionary of cache name to cache factor for that individual - # cache. Overrides the global cache factor for a given cache. - # - # These can also be set through environment variables comprised - # of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital - # letters and underscores. Setting by environment variable - # takes priority over setting through the config file. - # Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0 - # - # Some caches have '*' and other characters that are not - # alphanumeric or underscores. These caches can be named with or - # without the special characters stripped. For example, to specify - # the cache factor for `*stateGroupCache*` via an environment - # variable would be `SYNAPSE_CACHE_FACTOR_STATEGROUPCACHE=2.0`. - # - per_cache_factors: - #get_users_who_share_room_with_user: 2.0 - - # Controls whether cache entries are evicted after a specified time - # period. Defaults to true. Uncomment to disable this feature. - # - #expire_caches: false - - # If expire_caches is enabled, this flag controls how long an entry can - # be in a cache without having been accessed before being evicted. - # Defaults to 30m. Uncomment to set a different time to live for cache entries. - # - #cache_entry_ttl: 30m - - # This flag enables cache autotuning, and is further specified by the sub-options `max_cache_memory_usage`, - # `target_cache_memory_usage`, `min_cache_ttl`. These flags work in conjunction with each other to maintain - # a balance between cache memory usage and cache entry availability. You must be using jemalloc to utilize - # this option, and all three of the options must be specified for this feature to work. - #cache_autotuning: - # This flag sets a ceiling on much memory the cache can use before caches begin to be continuously evicted. - # They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in - # the flag below, or until the `min_cache_ttl` is hit. - #max_cache_memory_usage: 1024M - - # This flag sets a rough target for the desired memory usage of the caches. - #target_cache_memory_usage: 758M - - # 'min_cache_ttl` sets a limit under which newer cache entries are not evicted and is only applied when - # caches are actively being evicted/`max_cache_memory_usage` has been exceeded. This is to protect hot caches - # from being emptied while Synapse is evicting due to memory. - #min_cache_ttl: 5m - - # Controls how long the results of a /sync request are cached for after - # a successful response is returned. A higher duration can help clients with - # intermittent connections, at the cost of higher memory usage. - # - # By default, this is zero, which means that sync responses are not cached - # at all. - # - #sync_response_cache_duration: 2m - """ - def read_config(self, config: JsonDict, **kwargs: Any) -> None: """Populate this config object with values from `config`. diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 92c603f22..1737d5e32 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -45,30 +45,3 @@ class CaptchaConfig(Config): "https://www.recaptcha.net/recaptcha/api/siteverify", ) self.recaptcha_template = self.read_template("recaptcha.html") - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Captcha ## - # See docs/CAPTCHA_SETUP.md for full details of configuring this. - - # This homeserver's ReCAPTCHA public key. Must be specified if - # enable_registration_captcha is enabled. - # - #recaptcha_public_key: "YOUR_PUBLIC_KEY" - - # This homeserver's ReCAPTCHA private key. Must be specified if - # enable_registration_captcha is enabled. - # - #recaptcha_private_key: "YOUR_PRIVATE_KEY" - - # Uncomment to enable ReCaptcha checks when registering, preventing signup - # unless a captcha is answered. Requires a valid ReCaptcha - # public/private key. Defaults to 'false'. - # - #enable_registration_captcha: true - - # The API endpoint to use for verifying m.login.recaptcha responses. - # Defaults to "https://www.recaptcha.net/recaptcha/api/siteverify". - # - #recaptcha_siteverify_api: "https://my.recaptcha.site" - """ diff --git a/synapse/config/cas.py b/synapse/config/cas.py index 8af0794ba..9152c06bd 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -53,37 +53,6 @@ class CasConfig(Config): self.cas_displayname_attribute = None self.cas_required_attributes = [] - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Enable Central Authentication Service (CAS) for registration and login. - # - cas_config: - # Uncomment the following to enable authorization against a CAS server. - # Defaults to false. - # - #enabled: true - - # The URL of the CAS authorization endpoint. - # - #server_url: "https://cas-server.com" - - # The attribute of the CAS response to use as the display name. - # - # If unset, no displayname will be set. - # - #displayname_attribute: name - - # It is possible to configure Synapse to only allow logins if CAS attributes - # match particular values. All of the keys in the mapping below must exist - # and the values must match the given value. Alternately if the given value - # is None then any value is allowed (the attribute just must exist). - # All of the listed attributes must match for the login to be permitted. - # - #required_attributes: - # userGroup: "staff" - # department: None - """ - # CAS uses a legacy required attributes mapping, not the one provided by # SsoAttributeRequirement. diff --git a/synapse/config/consent.py b/synapse/config/consent.py index 8ee3d3452..be74609dc 100644 --- a/synapse/config/consent.py +++ b/synapse/config/consent.py @@ -20,58 +20,6 @@ from synapse.types import JsonDict from ._base import Config -DEFAULT_CONFIG = """\ -# User Consent configuration -# -# for detailed instructions, see -# https://matrix-org.github.io/synapse/latest/consent_tracking.html -# -# Parts of this section are required if enabling the 'consent' resource under -# 'listeners', in particular 'template_dir' and 'version'. -# -# 'template_dir' gives the location of the templates for the HTML forms. -# This directory should contain one subdirectory per language (eg, 'en', 'fr'), -# and each language directory should contain the policy document (named as -# '.html') and a success page (success.html). -# -# 'version' specifies the 'current' version of the policy document. It defines -# the version to be served by the consent resource if there is no 'v' -# parameter. -# -# 'server_notice_content', if enabled, will send a user a "Server Notice" -# asking them to consent to the privacy policy. The 'server_notices' section -# must also be configured for this to work. Notices will *not* be sent to -# guest users unless 'send_server_notice_to_guests' is set to true. -# -# 'block_events_error', if set, will block any attempts to send events -# until the user consents to the privacy policy. The value of the setting is -# used as the text of the error. -# -# 'require_at_registration', if enabled, will add a step to the registration -# process, similar to how captcha works. Users will be required to accept the -# policy before their account is created. -# -# 'policy_name' is the display name of the policy users will see when registering -# for an account. Has no effect unless `require_at_registration` is enabled. -# Defaults to "Privacy Policy". -# -#user_consent: -# template_dir: res/templates/privacy -# version: 1.0 -# server_notice_content: -# msgtype: m.text -# body: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# send_server_notice_to_guests: true -# block_events_error: >- -# To continue using this homeserver you must review and agree to the -# terms and conditions at %(consent_uri)s -# require_at_registration: false -# policy_name: Privacy Policy -# -""" - class ConsentConfig(Config): @@ -118,6 +66,3 @@ class ConsentConfig(Config): self.user_consent_policy_name = consent_config.get( "policy_name", "Privacy Policy" ) - - def generate_config_section(self, **kwargs: Any) -> str: - return DEFAULT_CONFIG diff --git a/synapse/config/database.py b/synapse/config/database.py index de0d3ca0f..928fec8df 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -28,56 +28,6 @@ Ignoring 'database_path' setting: not using a sqlite3 database. """ DEFAULT_CONFIG = """\ -## Database ## - -# The 'database' setting defines the database that synapse uses to store all of -# its data. -# -# 'name' gives the database engine to use: either 'sqlite3' (for SQLite) or -# 'psycopg2' (for PostgreSQL). -# -# 'txn_limit' gives the maximum number of transactions to run per connection -# before reconnecting. Defaults to 0, which means no limit. -# -# 'allow_unsafe_locale' is an option specific to Postgres. Under the default behavior, Synapse will refuse to -# start if the postgres db is set to a non-C locale. You can override this behavior (which is *not* recommended) -# by setting 'allow_unsafe_locale' to true. Note that doing so may corrupt your database. You can find more information -# here: https://matrix-org.github.io/synapse/latest/postgres.html#fixing-incorrect-collate-or-ctype and here: -# https://wiki.postgresql.org/wiki/Locale_data_changes -# -# 'args' gives options which are passed through to the database engine, -# except for options starting 'cp_', which are used to configure the Twisted -# connection pool. For a reference to valid arguments, see: -# * for sqlite: https://docs.python.org/3/library/sqlite3.html#sqlite3.connect -# * for postgres: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS -# * for the connection pool: https://twistedmatrix.com/documents/current/api/twisted.enterprise.adbapi.ConnectionPool.html#__init__ -# -# -# Example SQLite configuration: -# -#database: -# name: sqlite3 -# args: -# database: /path/to/homeserver.db -# -# -# Example Postgres configuration: -# -#database: -# name: psycopg2 -# txn_limit: 10000 -# args: -# user: synapse_user -# password: secretpassword -# database: synapse -# host: localhost -# port: 5432 -# cp_min: 5 -# cp_max: 10 -# -# For more information on using Synapse with Postgres, -# see https://matrix-org.github.io/synapse/latest/postgres.html. -# database: name: sqlite3 args: diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 5b5c2f4ff..c82f3ee7a 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -357,160 +357,6 @@ class EmailConfig(Config): path=("email", "invite_client_location"), ) - def generate_config_section(self, **kwargs: Any) -> str: - return ( - """\ - # Configuration for sending emails from Synapse. - # - # Server admins can configure custom templates for email content. See - # https://matrix-org.github.io/synapse/latest/templates.html for more information. - # - email: - # The hostname of the outgoing SMTP server to use. Defaults to 'localhost'. - # - #smtp_host: mail.server - - # The port on the mail server for outgoing SMTP. Defaults to 25. - # - #smtp_port: 587 - - # Username/password for authentication to the SMTP server. By default, no - # authentication is attempted. - # - #smtp_user: "exampleusername" - #smtp_pass: "examplepassword" - - # Uncomment the following to require TLS transport security for SMTP. - # By default, Synapse will connect over plain text, and will then switch to - # TLS via STARTTLS *if the SMTP server supports it*. If this option is set, - # Synapse will refuse to connect unless the server supports STARTTLS. - # - #require_transport_security: true - - # Uncomment the following to disable TLS for SMTP. - # - # By default, if the server supports TLS, it will be used, and the server - # must present a certificate that is valid for 'smtp_host'. If this option - # is set to false, TLS will not be used. - # - #enable_tls: false - - # notif_from defines the "From" address to use when sending emails. - # It must be set if email sending is enabled. - # - # The placeholder '%%(app)s' will be replaced by the application name, - # which is normally 'app_name' (below), but may be overridden by the - # Matrix client application. - # - # Note that the placeholder must be written '%%(app)s', including the - # trailing 's'. - # - #notif_from: "Your Friendly %%(app)s homeserver " - - # app_name defines the default value for '%%(app)s' in notif_from and email - # subjects. It defaults to 'Matrix'. - # - #app_name: my_branded_matrix_server - - # Uncomment the following to enable sending emails for messages that the user - # has missed. Disabled by default. - # - #enable_notifs: true - - # Uncomment the following to disable automatic subscription to email - # notifications for new users. Enabled by default. - # - #notif_for_new_users: false - - # Custom URL for client links within the email notifications. By default - # links will be based on "https://matrix.to". - # - # (This setting used to be called riot_base_url; the old name is still - # supported for backwards-compatibility but is now deprecated.) - # - #client_base_url: "http://localhost/riot" - - # Configure the time that a validation email will expire after sending. - # Defaults to 1h. - # - #validation_token_lifetime: 15m - - # The web client location to direct users to during an invite. This is passed - # to the identity server as the org.matrix.web_client_location key. Defaults - # to unset, giving no guidance to the identity server. - # - #invite_client_location: https://app.element.io - - # Subjects to use when sending emails from Synapse. - # - # The placeholder '%%(app)s' will be replaced with the value of the 'app_name' - # setting above, or by a value dictated by the Matrix client application. - # - # If a subject isn't overridden in this configuration file, the value used as - # its example will be used. - # - #subjects: - - # Subjects for notification emails. - # - # On top of the '%%(app)s' placeholder, these can use the following - # placeholders: - # - # * '%%(person)s', which will be replaced by the display name of the user(s) - # that sent the message(s), e.g. "Alice and Bob". - # * '%%(room)s', which will be replaced by the name of the room the - # message(s) have been sent to, e.g. "My super room". - # - # See the example provided for each setting to see which placeholder can be - # used and how to use them. - # - # Subject to use to notify about one message from one or more user(s) in a - # room which has a name. - #message_from_person_in_room: "%(message_from_person_in_room)s" - # - # Subject to use to notify about one message from one or more user(s) in a - # room which doesn't have a name. - #message_from_person: "%(message_from_person)s" - # - # Subject to use to notify about multiple messages from one or more users in - # a room which doesn't have a name. - #messages_from_person: "%(messages_from_person)s" - # - # Subject to use to notify about multiple messages in a room which has a - # name. - #messages_in_room: "%(messages_in_room)s" - # - # Subject to use to notify about multiple messages in multiple rooms. - #messages_in_room_and_others: "%(messages_in_room_and_others)s" - # - # Subject to use to notify about multiple messages from multiple persons in - # multiple rooms. This is similar to the setting above except it's used when - # the room in which the notification was triggered has no name. - #messages_from_person_and_others: "%(messages_from_person_and_others)s" - # - # Subject to use to notify about an invite to a room which has a name. - #invite_from_person_to_room: "%(invite_from_person_to_room)s" - # - # Subject to use to notify about an invite to a room which doesn't have a - # name. - #invite_from_person: "%(invite_from_person)s" - - # Subject for emails related to account administration. - # - # On top of the '%%(app)s' placeholder, these one can use the - # '%%(server_name)s' placeholder, which will be replaced by the value of the - # 'server_name' setting in your Synapse configuration. - # - # Subject to use when sending a password reset email. - #password_reset: "%(password_reset)s" - # - # Subject to use when sending a verification email to assert an address's - # ownership. - #email_validation: "%(email_validation)s" - """ - % DEFAULT_SUBJECTS - ) - class ThreepidBehaviour(Enum): """ diff --git a/synapse/config/federation.py b/synapse/config/federation.py index f83f93c0e..336fca578 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -49,44 +49,5 @@ class FederationConfig(Config): "allow_device_name_lookup_over_federation", False ) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Federation ## - - # Restrict federation to the following whitelist of domains. - # N.B. we recommend also firewalling your federation listener to limit - # inbound federation traffic as early as possible, rather than relying - # purely on this application-layer restriction. If not specified, the - # default is to whitelist everything. - # - #federation_domain_whitelist: - # - lon.example.com - # - nyc.example.com - # - syd.example.com - - # Report prometheus metrics on the age of PDUs being sent to and received from - # the following domains. This can be used to give an idea of "delay" on inbound - # and outbound federation, though be aware that any delay can be due to problems - # at either end or with the intermediate network. - # - # By default, no domains are monitored in this way. - # - #federation_metrics_domains: - # - matrix.org - # - example.com - - # Uncomment to disable profile lookup over federation. By default, the - # Federation API allows other homeservers to obtain profile data of any user - # on this homeserver. Defaults to 'true'. - # - #allow_profile_lookup_over_federation: false - - # Uncomment to allow device display name lookup over federation. By default, the - # Federation API prevents other homeservers from obtaining the display names of - # user devices on this homeserver. Defaults to 'false'. - # - #allow_device_name_lookup_over_federation: true - """ - _METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/config/groups.py b/synapse/config/groups.py new file mode 100644 index 000000000..baa051fdd --- /dev/null +++ b/synapse/config/groups.py @@ -0,0 +1,27 @@ +# Copyright 2017 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +from synapse.types import JsonDict + +from ._base import Config + + +class GroupsConfig(Config): + section = "groups" + + def read_config(self, config: JsonDict, **kwargs: Any) -> None: + self.enable_group_creation = config.get("enable_group_creation", False) + self.group_creation_prefix = config.get("group_creation_prefix", "") diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index 2a756d1a7..7e3c764b2 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -55,67 +55,3 @@ class JWTConfig(Config): self.jwt_subject_claim = None self.jwt_issuer = None self.jwt_audiences = None - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # JSON web token integration. The following settings can be used to make - # Synapse JSON web tokens for authentication, instead of its internal - # password database. - # - # Each JSON Web Token needs to contain a "sub" (subject) claim, which is - # used as the localpart of the mxid. - # - # Additionally, the expiration time ("exp"), not before time ("nbf"), - # and issued at ("iat") claims are validated if present. - # - # Note that this is a non-standard login type and client support is - # expected to be non-existent. - # - # See https://matrix-org.github.io/synapse/latest/jwt.html. - # - #jwt_config: - # Uncomment the following to enable authorization using JSON web - # tokens. Defaults to false. - # - #enabled: true - - # This is either the private shared secret or the public key used to - # decode the contents of the JSON web token. - # - # Required if 'enabled' is true. - # - #secret: "provided-by-your-issuer" - - # The algorithm used to sign the JSON web token. - # - # Supported algorithms are listed at - # https://pyjwt.readthedocs.io/en/latest/algorithms.html - # - # Required if 'enabled' is true. - # - #algorithm: "provided-by-your-issuer" - - # Name of the claim containing a unique identifier for the user. - # - # Optional, defaults to `sub`. - # - #subject_claim: "sub" - - # The issuer to validate the "iss" claim against. - # - # Optional, if provided the "iss" claim will be required and - # validated for all JSON web tokens. - # - #issuer: "provided-by-your-issuer" - - # A list of audiences to validate the "aud" claim against. - # - # Optional, if provided the "aud" claim will be required and - # validated for all JSON web tokens. - # - # Note that if the "aud" claim is included in a JSON web token then - # validation will fail without configuring audiences. - # - #audiences: - # - "provided-by-your-issuer" - """ diff --git a/synapse/config/key.py b/synapse/config/key.py index b250912e3..cc75efdf8 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -184,111 +184,22 @@ class KeyConfig(Config): **kwargs: Any, ) -> str: base_key_name = os.path.join(config_dir_path, server_name) + macaroon_secret_key = "" + form_secret = "" if generate_secrets: macaroon_secret_key = 'macaroon_secret_key: "%s"' % ( random_string_with_symbols(50), ) form_secret = 'form_secret: "%s"' % random_string_with_symbols(50) - else: - macaroon_secret_key = "#macaroon_secret_key: " - form_secret = "#form_secret: " return ( """\ - # a secret which is used to sign access tokens. If none is specified, - # the registration_shared_secret is used, if one is given; otherwise, - # a secret key is derived from the signing key. - # %(macaroon_secret_key)s - - # a secret which is used to calculate HMACs for form values, to stop - # falsification of values. Must be specified for the User Consent - # forms to work. - # %(form_secret)s - - ## Signing Keys ## - - # Path to the signing key to sign messages with - # signing_key_path: "%(base_key_name)s.signing.key" - - # The keys that the server used to sign messages with but won't use - # to sign new messages. - # - old_signing_keys: - # For each key, `key` should be the base64-encoded public key, and - # `expired_ts`should be the time (in milliseconds since the unix epoch) that - # it was last used. - # - # It is possible to build an entry from an old signing.key file using the - # `export_signing_key` script which is provided with synapse. - # - # For example: - # - #"ed25519:id": { key: "base64string", expired_ts: 123456789123 } - - # How long key response published by this server is valid for. - # Used to set the valid_until_ts in /key/v2 APIs. - # Determines how quickly servers will query to check which keys - # are still valid. - # - #key_refresh_interval: 1d - - # The trusted servers to download signing keys from. - # - # When we need to fetch a signing key, each server is tried in parallel. - # - # Normally, the connection to the key server is validated via TLS certificates. - # Additional security can be provided by configuring a `verify key`, which - # will make synapse check that the response is signed by that key. - # - # This setting supercedes an older setting named `perspectives`. The old format - # is still supported for backwards-compatibility, but it is deprecated. - # - # 'trusted_key_servers' defaults to matrix.org, but using it will generate a - # warning on start-up. To suppress this warning, set - # 'suppress_key_server_warning' to true. - # - # Options for each entry in the list include: - # - # server_name: the name of the server. required. - # - # verify_keys: an optional map from key id to base64-encoded public key. - # If specified, we will check that the response is signed by at least - # one of the given keys. - # - # accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, - # and federation_verify_certificates is not `true`, synapse will refuse - # to start, because this would allow anyone who can spoof DNS responses - # to masquerade as the trusted key server. If you know what you are doing - # and are sure that your network environment provides a secure connection - # to the key server, you can set this to `true` to override this - # behaviour. - # - # An example configuration might look like: - # - #trusted_key_servers: - # - server_name: "my_trusted_server.example.com" - # verify_keys: - # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" - # - server_name: "my_other_trusted_server.example.com" - # trusted_key_servers: - server_name: "matrix.org" - - # Uncomment the following to disable the warning that is emitted when the - # trusted_key_servers include 'matrix.org'. See above. - # - #suppress_key_server_warning: true - - # The signing keys to use when acting as a trusted key server. If not specified - # defaults to the server signing key. - # - # Can contain multiple keys, one per line. - # - #key_server_signing_keys_path: "key_server_signing_keys.key" """ % locals() ) diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 82a5b5fa1..6c1f78f8d 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -153,11 +153,6 @@ class LoggingConfig(Config): log_config = os.path.join(config_dir_path, server_name + ".log.config") return ( """\ - ## Logging ## - - # A yaml python logging config file as described by - # https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema - # log_config: "%(log_config)s" """ % locals() diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index aa360a417..d63650788 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -73,46 +73,8 @@ class MetricsConfig(Config): def generate_config_section( self, report_stats: Optional[bool] = None, **kwargs: Any ) -> str: - res = """\ - ## Metrics ### - - # Enable collection and rendering of performance metrics - # - #enable_metrics: false - - # Enable sentry integration - # NOTE: While attempts are made to ensure that the logs don't contain - # any sensitive information, this cannot be guaranteed. By enabling - # this option the sentry server may therefore receive sensitive - # information, and it in turn may then diseminate sensitive information - # through insecure notification channels if so configured. - # - #sentry: - # dsn: "..." - - # Flags to enable Prometheus metrics which are not suitable to be - # enabled by default, either for performance reasons or limited use. - # - metrics_flags: - # Publish synapse_federation_known_servers, a gauge of the number of - # servers this homeserver knows about, including itself. May cause - # performance problems on large homeservers. - # - #known_servers: true - - # Whether or not to report anonymized homeserver usage statistics. - # - """ - - if report_stats is None: - res += "#report_stats: true|false\n" + if report_stats is not None: + res = "report_stats: %s\n" % ("true" if report_stats else "false") else: - res += "report_stats: %s\n" % ("true" if report_stats else "false") - - res += """ - # The endpoint to report the anonymized homeserver usage statistics to. - # Defaults to https://matrix.org/report-usage-stats/push - # - #report_stats_endpoint: https://example.com/report-usage-stats/push - """ + res = "\n" return res diff --git a/synapse/config/modules.py b/synapse/config/modules.py index 0915014f7..903637be8 100644 --- a/synapse/config/modules.py +++ b/synapse/config/modules.py @@ -31,20 +31,3 @@ class ModulesConfig(Config): raise ConfigError("expected a mapping", config_path) self.loaded_modules.append(load_module(module, config_path)) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - ## Modules ## - - # Server admins can expand Synapse's functionality with external modules. - # - # See https://matrix-org.github.io/synapse/latest/modules/index.html for more - # documentation on how to configure or create custom modules for Synapse. - # - modules: - #- module: my_super_module.MySuperClass - # config: - # do_thing: true - #- module: my_other_super_module.SomeClass - # config: {} - """ diff --git a/synapse/config/oembed.py b/synapse/config/oembed.py index e9edea073..0d32aba70 100644 --- a/synapse/config/oembed.py +++ b/synapse/config/oembed.py @@ -143,29 +143,6 @@ class OembedConfig(Config): ) return re.compile(pattern) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # oEmbed allows for easier embedding content from a website. It can be - # used for generating URLs previews of services which support it. - # - oembed: - # A default list of oEmbed providers is included with Synapse. - # - # Uncomment the following to disable using these default oEmbed URLs. - # Defaults to 'false'. - # - #disable_default_providers: true - - # Additional files with oEmbed configuration (each should be in the - # form of providers.json). - # - # By default, this list is empty (so only the default providers.json - # is used). - # - #additional_providers: - # - oembed/my_providers.json - """ - _OEMBED_PROVIDER_SCHEMA = { "type": "array", diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index b9c40522d..98e8cd8b5 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -66,203 +66,6 @@ class OIDCConfig(Config): # OIDC is enabled if we have a provider return bool(self.oidc_providers) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration - # and login. - # - # Options for each entry include: - # - # idp_id: a unique identifier for this identity provider. Used internally - # by Synapse; should be a single word such as 'github'. - # - # Note that, if this is changed, users authenticating via that provider - # will no longer be recognised as the same user! - # - # (Use "oidc" here if you are migrating from an old "oidc_config" - # configuration.) - # - # idp_name: A user-facing name for this identity provider, which is used to - # offer the user a choice of login mechanisms. - # - # idp_icon: An optional icon for this identity provider, which is presented - # by clients and Synapse's own IdP picker page. If given, must be an - # MXC URI of the format mxc:///. (An easy way to - # obtain such an MXC URI is to upload an image to an (unencrypted) room - # and then copy the "url" from the source of the event.) - # - # idp_brand: An optional brand for this identity provider, allowing clients - # to style the login flow according to the identity provider in question. - # See the spec for possible options here. - # - # discover: set to 'false' to disable the use of the OIDC discovery mechanism - # to discover endpoints. Defaults to true. - # - # issuer: Required. The OIDC issuer. Used to validate tokens and (if discovery - # is enabled) to discover the provider's endpoints. - # - # client_id: Required. oauth2 client id to use. - # - # client_secret: oauth2 client secret to use. May be omitted if - # client_secret_jwt_key is given, or if client_auth_method is 'none'. - # - # client_secret_jwt_key: Alternative to client_secret: details of a key used - # to create a JSON Web Token to be used as an OAuth2 client secret. If - # given, must be a dictionary with the following properties: - # - # key: a pem-encoded signing key. Must be a suitable key for the - # algorithm specified. Required unless 'key_file' is given. - # - # key_file: the path to file containing a pem-encoded signing key file. - # Required unless 'key' is given. - # - # jwt_header: a dictionary giving properties to include in the JWT - # header. Must include the key 'alg', giving the algorithm used to - # sign the JWT, such as "ES256", using the JWA identifiers in - # RFC7518. - # - # jwt_payload: an optional dictionary giving properties to include in - # the JWT payload. Normally this should include an 'iss' key. - # - # client_auth_method: auth method to use when exchanging the token. Valid - # values are 'client_secret_basic' (default), 'client_secret_post' and - # 'none'. - # - # scopes: list of scopes to request. This should normally include the "openid" - # scope. Defaults to ["openid"]. - # - # authorization_endpoint: the oauth2 authorization endpoint. Required if - # provider discovery is disabled. - # - # token_endpoint: the oauth2 token endpoint. Required if provider discovery is - # disabled. - # - # userinfo_endpoint: the OIDC userinfo endpoint. Required if discovery is - # disabled and the 'openid' scope is not requested. - # - # jwks_uri: URI where to fetch the JWKS. Required if discovery is disabled and - # the 'openid' scope is used. - # - # skip_verification: set to 'true' to skip metadata verification. Use this if - # you are connecting to a provider that is not OpenID Connect compliant. - # Defaults to false. Avoid this in production. - # - # user_profile_method: Whether to fetch the user profile from the userinfo - # endpoint, or to rely on the data returned in the id_token from the - # token_endpoint. - # - # Valid values are: 'auto' or 'userinfo_endpoint'. - # - # Defaults to 'auto', which uses the userinfo endpoint if 'openid' is - # not included in 'scopes'. Set to 'userinfo_endpoint' to always use the - # userinfo endpoint. - # - # allow_existing_users: set to 'true' to allow a user logging in via OIDC to - # match a pre-existing account instead of failing. This could be used if - # switching from password logins to OIDC. Defaults to false. - # - # user_mapping_provider: Configuration for how attributes returned from a OIDC - # provider are mapped onto a matrix user. This setting has the following - # sub-properties: - # - # module: The class name of a custom mapping module. Default is - # {mapping_provider!r}. - # See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers - # for information on implementing a custom mapping provider. - # - # config: Configuration for the mapping provider module. This section will - # be passed as a Python dictionary to the user mapping provider - # module's `parse_config` method. - # - # For the default provider, the following settings are available: - # - # subject_claim: name of the claim containing a unique identifier - # for the user. Defaults to 'sub', which OpenID Connect - # compliant providers should provide. - # - # localpart_template: Jinja2 template for the localpart of the MXID. - # If this is not set, the user will be prompted to choose their - # own username (see the documentation for the - # 'sso_auth_account_details.html' template). This template can - # use the 'localpart_from_email' filter. - # - # confirm_localpart: Whether to prompt the user to validate (or - # change) the generated localpart (see the documentation for the - # 'sso_auth_account_details.html' template), instead of - # registering the account right away. - # - # display_name_template: Jinja2 template for the display name to set - # on first login. If unset, no displayname will be set. - # - # email_template: Jinja2 template for the email address of the user. - # If unset, no email address will be added to the account. - # - # extra_attributes: a map of Jinja2 templates for extra attributes - # to send back to the client during login. - # Note that these are non-standard and clients will ignore them - # without modifications. - # - # When rendering, the Jinja2 templates are given a 'user' variable, - # which is set to the claims returned by the UserInfo Endpoint and/or - # in the ID Token. - # - # It is possible to configure Synapse to only allow logins if certain attributes - # match particular values in the OIDC userinfo. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. Additional attributes can be added to - # userinfo by expanding the `scopes` section of the OIDC config to retrieve - # additional information from the OIDC provider. - # - # If the OIDC claim is a list, then the attribute must match any value in the list. - # Otherwise, it must exactly match the value of the claim. Using the example - # below, the `family_name` claim MUST be "Stephensson", but the `groups` - # claim MUST contain "admin". - # - # attribute_requirements: - # - attribute: family_name - # value: "Stephensson" - # - attribute: groups - # value: "admin" - # - # See https://matrix-org.github.io/synapse/latest/openid.html - # for information on how to configure these options. - # - # For backwards compatibility, it is also possible to configure a single OIDC - # provider via an 'oidc_config' setting. This is now deprecated and admins are - # advised to migrate to the 'oidc_providers' format. (When doing that migration, - # use 'oidc' for the idp_id to ensure that existing users continue to be - # recognised.) - # - oidc_providers: - # Generic example - # - #- idp_id: my_idp - # idp_name: "My OpenID provider" - # idp_icon: "mxc://example.com/mediaid" - # discover: false - # issuer: "https://accounts.example.com/" - # client_id: "provided-by-your-issuer" - # client_secret: "provided-by-your-issuer" - # client_auth_method: client_secret_post - # scopes: ["openid", "profile"] - # authorization_endpoint: "https://accounts.example.com/oauth2/auth" - # token_endpoint: "https://accounts.example.com/oauth2/token" - # userinfo_endpoint: "https://accounts.example.com/userinfo" - # jwks_uri: "https://accounts.example.com/.well-known/jwks.json" - # skip_verification: true - # user_mapping_provider: - # config: - # subject_claim: "id" - # localpart_template: "{{{{ user.login }}}}" - # display_name_template: "{{{{ user.name }}}}" - # email_template: "{{{{ user.email }}}}" - # attribute_requirements: - # - attribute: userGroup - # value: "synapseUsers" - """.format( - mapping_provider=DEFAULT_USER_MAPPING_PROVIDER - ) - # jsonschema definition of the configuration settings for an oidc identity provider OIDC_PROVIDER_CONFIG_SCHEMA = { diff --git a/synapse/config/push.py b/synapse/config/push.py index 2e796d1c4..979b128ea 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -49,36 +49,3 @@ class PushConfig(Config): "please set push.include_content instead" ) self.push_include_content = not redact_content - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - ## Push ## - - push: - # Clients requesting push notifications can either have the body of - # the message sent in the notification poke along with other details - # like the sender, or just the event ID and room ID (`event_id_only`). - # If clients choose the former, this option controls whether the - # notification request includes the content of the event (other details - # like the sender are still included). For `event_id_only` push, it - # has no effect. - # - # For modern android devices the notification content will still appear - # because it is loaded by the app. iPhone, however will send a - # notification saying only that a message arrived and who it came from. - # - # The default value is "true" to include message details. Uncomment to only - # include the event ID and room ID in push notification payloads. - # - #include_content: false - - # When a push notification is received, an unread count is also sent. - # This number can either be calculated as the number of unread messages - # for the user, or the number of *rooms* the user has unread messages in. - # - # The default value is "true", meaning push clients will see the number of - # rooms with unread messages in them. Uncomment to instead send the number - # of unread messages. - # - #group_unread_count_by_room: false - """ diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 0587f5c10..d4090a1f9 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -143,125 +143,3 @@ class RatelimitConfig(Config): "burst_count": self.rc_message.burst_count, }, ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Ratelimiting ## - - # Ratelimiting settings for client actions (registration, login, messaging). - # - # Each ratelimiting configuration is made of two parameters: - # - per_second: number of requests a client can send per second. - # - burst_count: number of requests a client can send before being throttled. - # - # Synapse currently uses the following configurations: - # - one for messages that ratelimits sending based on the account the client - # is using - # - one for registration that ratelimits registration requests based on the - # client's IP address. - # - one for checking the validity of registration tokens that ratelimits - # requests based on the client's IP address. - # - one for login that ratelimits login requests based on the client's IP - # address. - # - one for login that ratelimits login requests based on the account the - # client is attempting to log into. - # - one for login that ratelimits login requests based on the account the - # client is attempting to log into, based on the amount of failed login - # attempts for this account. - # - one for ratelimiting redactions by room admins. If this is not explicitly - # set then it uses the same ratelimiting as per rc_message. This is useful - # to allow room admins to deal with abuse quickly. - # - two for ratelimiting number of rooms a user can join, "local" for when - # users are joining rooms the server is already in (this is cheap) vs - # "remote" for when users are trying to join rooms not on the server (which - # can be more expensive) - # - one for ratelimiting how often a user or IP can attempt to validate a 3PID. - # - two for ratelimiting how often invites can be sent in a room or to a - # specific user. - # - one for ratelimiting 3PID invites (i.e. invites sent to a third-party ID - # such as an email address or a phone number) based on the account that's - # sending the invite. - # - # The defaults are as shown below. - # - #rc_message: - # per_second: 0.2 - # burst_count: 10 - # - #rc_registration: - # per_second: 0.17 - # burst_count: 3 - # - #rc_registration_token_validity: - # per_second: 0.1 - # burst_count: 5 - # - #rc_login: - # address: - # per_second: 0.17 - # burst_count: 3 - # account: - # per_second: 0.17 - # burst_count: 3 - # failed_attempts: - # per_second: 0.17 - # burst_count: 3 - # - #rc_admin_redaction: - # per_second: 1 - # burst_count: 50 - # - #rc_joins: - # local: - # per_second: 0.1 - # burst_count: 10 - # remote: - # per_second: 0.01 - # burst_count: 10 - # - #rc_3pid_validation: - # per_second: 0.003 - # burst_count: 5 - # - #rc_invites: - # per_room: - # per_second: 0.3 - # burst_count: 10 - # per_user: - # per_second: 0.003 - # burst_count: 5 - # - #rc_third_party_invite: - # per_second: 0.2 - # burst_count: 10 - - # Ratelimiting settings for incoming federation - # - # The rc_federation configuration is made up of the following settings: - # - window_size: window size in milliseconds - # - sleep_limit: number of federation requests from a single server in - # a window before the server will delay processing the request. - # - sleep_delay: duration in milliseconds to delay processing events - # from remote servers by if they go over the sleep limit. - # - reject_limit: maximum number of concurrent federation requests - # allowed from a single server - # - concurrent: number of federation requests to concurrently process - # from a single server - # - # The defaults are as shown below. - # - #rc_federation: - # window_size: 1000 - # sleep_limit: 10 - # sleep_delay: 500 - # reject_limit: 50 - # concurrent: 3 - - # Target outgoing federation transaction frequency for sending read-receipts, - # per-room. - # - # If we end up trying to send out more read-receipts, they will get buffered up - # into fewer transactions. - # - #federation_rr_transactions_per_room_per_second: 50 - """ diff --git a/synapse/config/redis.py b/synapse/config/redis.py index ec7a73541..b42dd2e93 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -34,24 +34,3 @@ class RedisConfig(Config): self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) self.redis_password = redis_config.get("password") - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Configuration for Redis when using workers. This *must* be enabled when - # using workers (unless using old style direct TCP configuration). - # - redis: - # Uncomment the below to enable Redis support. - # - #enabled: true - - # Optional host and port to use to connect to redis. Defaults to - # localhost and 6379 - # - #host: localhost - #port: 6379 - - # Optional password if configured on the Redis instance - # - #password: - """ diff --git a/synapse/config/registration.py b/synapse/config/registration.py index d2d0425e6..fcf99be09 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -206,284 +206,9 @@ class RegistrationConfig(Config): registration_shared_secret = 'registration_shared_secret: "%s"' % ( random_string_with_symbols(50), ) + return registration_shared_secret else: - registration_shared_secret = "#registration_shared_secret: " - - return ( - """\ - ## Registration ## - # - # Registration can be rate-limited using the parameters in the "Ratelimiting" - # section of this file. - - # Enable registration for new users. Defaults to 'false'. It is highly recommended that if you enable registration, - # you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration - # without any verification, you must also set `enable_registration_without_verification`, found below. - # - #enable_registration: false - - # Enable registration without email or captcha verification. Note: this option is *not* recommended, - # as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect - # unless `enable_registration` is also enabled. - # - #enable_registration_without_verification: true - - # Time that a user's session remains valid for, after they log in. - # - # Note that this is not currently compatible with guest logins. - # - # Note also that this is calculated at login time: changes are not applied - # retrospectively to users who have already logged in. - # - # By default, this is infinite. - # - #session_lifetime: 24h - - # Time that an access token remains valid for, if the session is - # using refresh tokens. - # For more information about refresh tokens, please see the manual. - # Note that this only applies to clients which advertise support for - # refresh tokens. - # - # Note also that this is calculated at login time and refresh time: - # changes are not applied to existing sessions until they are refreshed. - # - # By default, this is 5 minutes. - # - #refreshable_access_token_lifetime: 5m - - # Time that a refresh token remains valid for (provided that it is not - # exchanged for another one first). - # This option can be used to automatically log-out inactive sessions. - # Please see the manual for more information. - # - # Note also that this is calculated at login time and refresh time: - # changes are not applied to existing sessions until they are refreshed. - # - # By default, this is infinite. - # - #refresh_token_lifetime: 24h - - # Time that an access token remains valid for, if the session is NOT - # using refresh tokens. - # Please note that not all clients support refresh tokens, so setting - # this to a short value may be inconvenient for some users who will - # then be logged out frequently. - # - # Note also that this is calculated at login time: changes are not applied - # retrospectively to existing sessions for users that have already logged in. - # - # By default, this is infinite. - # - #nonrefreshable_access_token_lifetime: 24h - - # The user must provide all of the below types of 3PID when registering. - # - #registrations_require_3pid: - # - email - # - msisdn - - # Explicitly disable asking for MSISDNs from the registration - # flow (overrides registrations_require_3pid if MSISDNs are set as required) - # - #disable_msisdn_registration: true - - # Mandate that users are only allowed to associate certain formats of - # 3PIDs with accounts on this server. - # - #allowed_local_3pids: - # - medium: email - # pattern: '^[^@]+@matrix\\.org$' - # - medium: email - # pattern: '^[^@]+@vector\\.im$' - # - medium: msisdn - # pattern: '\\+44' - - # Enable 3PIDs lookup requests to identity servers from this server. - # - #enable_3pid_lookup: true - - # Require users to submit a token during registration. - # Tokens can be managed using the admin API: - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/registration_tokens.html - # Note that `enable_registration` must be set to `true`. - # Disabling this option will not delete any tokens previously generated. - # Defaults to false. Uncomment the following to require tokens: - # - #registration_requires_token: true - - # Allow users to submit a token during registration to bypass any required 3pid - # steps configured in `registrations_require_3pid`. - # Defaults to false, requiring that registration tokens (if enabled) complete a 3pid flow. - # - #enable_registration_token_3pid_bypass: false - - # If set, allows registration of standard or admin accounts by anyone who - # has the shared secret, even if registration is otherwise disabled. - # - %(registration_shared_secret)s - - # Set the number of bcrypt rounds used to generate password hash. - # Larger numbers increase the work factor needed to generate the hash. - # The default number is 12 (which equates to 2^12 rounds). - # N.B. that increasing this will exponentially increase the time required - # to register or login - e.g. 24 => 2^24 rounds which will take >20 mins. - # - #bcrypt_rounds: 12 - - # Allows users to register as guests without a password/email/etc, and - # participate in rooms hosted on this server which have been made - # accessible to anonymous users. - # - #allow_guest_access: false - - # The identity server which we suggest that clients should use when users log - # in on this server. - # - # (By default, no suggestion is made, so it is left up to the client. - # This setting is ignored unless public_baseurl is also explicitly set.) - # - #default_identity_server: https://matrix.org - - # Handle threepid (email/phone etc) registration and password resets through a set of - # *trusted* identity servers. Note that this allows the configured identity server to - # reset passwords for accounts! - # - # Be aware that if `email` is not set, and SMTP options have not been - # configured in the email config block, registration and user password resets via - # email will be globally disabled. - # - # Additionally, if `msisdn` is not set, registration and password resets via msisdn - # will be disabled regardless, and users will not be able to associate an msisdn - # identifier to their account. This is due to Synapse currently not supporting - # any method of sending SMS messages on its own. - # - # To enable using an identity server for operations regarding a particular third-party - # identifier type, set the value to the URL of that identity server as shown in the - # examples below. - # - # Servers handling the these requests must answer the `/requestToken` endpoints defined - # by the Matrix Identity Service API specification: - # https://matrix.org/docs/spec/identity_service/latest - # - account_threepid_delegates: - #email: https://example.com # Delegate email sending to example.com - #msisdn: http://localhost:8090 # Delegate SMS sending to this local process - - # Whether users are allowed to change their displayname after it has - # been initially set. Useful when provisioning users based on the - # contents of a third-party directory. - # - # Does not apply to server administrators. Defaults to 'true' - # - #enable_set_displayname: false - - # Whether users are allowed to change their avatar after it has been - # initially set. Useful when provisioning users based on the contents - # of a third-party directory. - # - # Does not apply to server administrators. Defaults to 'true' - # - #enable_set_avatar_url: false - - # Whether users can change the 3PIDs associated with their accounts - # (email address and msisdn). - # - # Defaults to 'true' - # - #enable_3pid_changes: false - - # Users who register on this homeserver will automatically be joined - # to these rooms. - # - # By default, any room aliases included in this list will be created - # as a publicly joinable room when the first user registers for the - # homeserver. This behaviour can be customised with the settings below. - # If the room already exists, make certain it is a publicly joinable - # room. The join rule of the room must be set to 'public'. - # - #auto_join_rooms: - # - "#example:example.com" - - # Where auto_join_rooms are specified, setting this flag ensures that the - # the rooms exist by creating them when the first user on the - # homeserver registers. - # - # By default the auto-created rooms are publicly joinable from any federated - # server. Use the autocreate_auto_join_rooms_federated and - # autocreate_auto_join_room_preset settings below to customise this behaviour. - # - # Setting to false means that if the rooms are not manually created, - # users cannot be auto-joined since they do not exist. - # - # Defaults to true. Uncomment the following line to disable automatically - # creating auto-join rooms. - # - #autocreate_auto_join_rooms: false - - # Whether the auto_join_rooms that are auto-created are available via - # federation. Only has an effect if autocreate_auto_join_rooms is true. - # - # Note that whether a room is federated cannot be modified after - # creation. - # - # Defaults to true: the room will be joinable from other servers. - # Uncomment the following to prevent users from other homeservers from - # joining these rooms. - # - #autocreate_auto_join_rooms_federated: false - - # The room preset to use when auto-creating one of auto_join_rooms. Only has an - # effect if autocreate_auto_join_rooms is true. - # - # This can be one of "public_chat", "private_chat", or "trusted_private_chat". - # If a value of "private_chat" or "trusted_private_chat" is used then - # auto_join_mxid_localpart must also be configured. - # - # Defaults to "public_chat", meaning that the room is joinable by anyone, including - # federated servers if autocreate_auto_join_rooms_federated is true (the default). - # Uncomment the following to require an invitation to join these rooms. - # - #autocreate_auto_join_room_preset: private_chat - - # The local part of the user id which is used to create auto_join_rooms if - # autocreate_auto_join_rooms is true. If this is not provided then the - # initial user account that registers will be used to create the rooms. - # - # The user id is also used to invite new users to any auto-join rooms which - # are set to invite-only. - # - # It *must* be configured if autocreate_auto_join_room_preset is set to - # "private_chat" or "trusted_private_chat". - # - # Note that this must be specified in order for new users to be correctly - # invited to any auto-join rooms which have been set to invite-only (either - # at the time of creation or subsequently). - # - # Note that, if the room already exists, this user must be joined and - # have the appropriate permissions to invite new members. - # - #auto_join_mxid_localpart: system - - # When auto_join_rooms is specified, setting this flag to false prevents - # guest accounts from being automatically joined to the rooms. - # - # Defaults to true. - # - #auto_join_rooms_for_guests: false - - # Whether to inhibit errors raised when registering a new account if the user ID - # already exists. If turned on, that requests to /register/available will always - # show a user ID as available, and Synapse won't raise an error when starting - # a registration with a user ID that already exists. However, Synapse will still - # raise an error if the registration completes and the username conflicts. - # - # Defaults to false. - # - #inhibit_user_in_use_error: true - """ - % locals() - ) + return "" @staticmethod def add_arguments(parser: argparse.ArgumentParser) -> None: diff --git a/synapse/config/repository.py b/synapse/config/repository.py index f9c55143c..aadec1e54 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -19,7 +19,7 @@ from urllib.request import getproxies_environment # type: ignore import attr -from synapse.config.server import DEFAULT_IP_RANGE_BLACKLIST, generate_ip_set +from synapse.config.server import generate_ip_set from synapse.types import JsonDict from synapse.util.check_dependencies import DependencyException, check_requirements from synapse.util.module_loader import load_module @@ -242,166 +242,4 @@ class ContentRepositoryConfig(Config): def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None media_store = os.path.join(data_dir_path, "media_store") - - formatted_thumbnail_sizes = "".join( - THUMBNAIL_SIZE_YAML % s for s in DEFAULT_THUMBNAIL_SIZES - ) - # strip final NL - formatted_thumbnail_sizes = formatted_thumbnail_sizes[:-1] - - ip_range_blacklist = "\n".join( - " # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST - ) - - return ( - r""" - ## Media Store ## - - # Enable the media store service in the Synapse master. Uncomment the - # following if you are using a separate media store worker. - # - #enable_media_repo: false - - # Directory where uploaded images and attachments are stored. - # - media_store_path: "%(media_store)s" - - # Media storage providers allow media to be stored in different - # locations. - # - #media_storage_providers: - # - module: file_system - # # Whether to store newly uploaded local files - # store_local: false - # # Whether to store newly downloaded remote files - # store_remote: false - # # Whether to wait for successful storage for local uploads - # store_synchronous: false - # config: - # directory: /mnt/some/other/directory - - # The largest allowed upload size in bytes - # - # If you are using a reverse proxy you may also need to set this value in - # your reverse proxy's config. Notably Nginx has a small max body size by default. - # See https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # - #max_upload_size: 50M - - # Maximum number of pixels that will be thumbnailed - # - #max_image_pixels: 32M - - # Whether to generate new thumbnails on the fly to precisely match - # the resolution requested by the client. If true then whenever - # a new resolution is requested by the client the server will - # generate a new thumbnail. If false the server will pick a thumbnail - # from a precalculated list. - # - #dynamic_thumbnails: false - - # List of thumbnails to precalculate when an image is uploaded. - # - #thumbnail_sizes: -%(formatted_thumbnail_sizes)s - - # Is the preview URL API enabled? - # - # 'false' by default: uncomment the following to enable it (and specify a - # url_preview_ip_range_blacklist blacklist). - # - #url_preview_enabled: true - - # List of IP address CIDR ranges that the URL preview spider is denied - # from accessing. There are no defaults: you must explicitly - # specify a list for URL previewing to work. You should specify any - # internal services in your network that you do not want synapse to try - # to connect to, otherwise anyone in any Matrix room could cause your - # synapse to issue arbitrary GET requests to your internal services, - # causing serious security issues. - # - # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly - # listed here, since they correspond to unroutable addresses.) - # - # This must be specified if url_preview_enabled is set. It is recommended that - # you uncomment the following list as a starting point. - # - # Note: The value is ignored when an HTTP proxy is in use - # - #url_preview_ip_range_blacklist: -%(ip_range_blacklist)s - - # List of IP address CIDR ranges that the URL preview spider is allowed - # to access even if they are specified in url_preview_ip_range_blacklist. - # This is useful for specifying exceptions to wide-ranging blacklisted - # target IP ranges - e.g. for enabling URL previews for a specific private - # website only visible in your network. - # - #url_preview_ip_range_whitelist: - # - '192.168.1.1' - - # Optional list of URL matches that the URL preview spider is - # denied from accessing. You should use url_preview_ip_range_blacklist - # in preference to this, otherwise someone could define a public DNS - # entry that points to a private IP address and circumvent the blacklist. - # This is more useful if you know there is an entire shape of URL that - # you know that will never want synapse to try to spider. - # - # Each list entry is a dictionary of url component attributes as returned - # by urlparse.urlsplit as applied to the absolute form of the URL. See - # https://docs.python.org/2/library/urlparse.html#urlparse.urlsplit - # The values of the dictionary are treated as an filename match pattern - # applied to that component of URLs, unless they start with a ^ in which - # case they are treated as a regular expression match. If all the - # specified component matches for a given list item succeed, the URL is - # blacklisted. - # - #url_preview_url_blacklist: - # # blacklist any URL with a username in its URI - # - username: '*' - # - # # blacklist all *.google.com URLs - # - netloc: 'google.com' - # - netloc: '*.google.com' - # - # # blacklist all plain HTTP URLs - # - scheme: 'http' - # - # # blacklist http(s)://www.acme.com/foo - # - netloc: 'www.acme.com' - # path: '/foo' - # - # # blacklist any URL with a literal IPv4 address - # - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' - - # The largest allowed URL preview spidering size in bytes - # - #max_spider_size: 10M - - # A list of values for the Accept-Language HTTP header used when - # downloading webpages during URL preview generation. This allows - # Synapse to specify the preferred languages that URL previews should - # be in when communicating with remote servers. - # - # Each value is a IETF language tag; a 2-3 letter identifier for a - # language, optionally followed by subtags separated by '-', specifying - # a country or region variant. - # - # Multiple values can be provided, and a weight can be added to each by - # using quality value syntax (;q=). '*' translates to any language. - # - # Defaults to "en". - # - # Example: - # - # url_preview_accept_language: - # - en-UK - # - en-US;q=0.9 - # - fr;q=0.8 - # - *;q=0.7 - # - url_preview_accept_language: - # - en - """ - % locals() - ) + return f"media_store_path: {media_store}" diff --git a/synapse/config/retention.py b/synapse/config/retention.py index 03b723b84..033051a9c 100644 --- a/synapse/config/retention.py +++ b/synapse/config/retention.py @@ -153,75 +153,3 @@ class RetentionConfig(Config): self.retention_purge_jobs = [ RetentionPurgeJob(self.parse_duration("1d"), None, None) ] - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Message retention policy at the server level. - # - # Room admins and mods can define a retention period for their rooms using the - # 'm.room.retention' state event, and server admins can cap this period by setting - # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. - # - # If this feature is enabled, Synapse will regularly look for and purge events - # which are older than the room's maximum retention period. Synapse will also - # filter events received over federation so that events that should have been - # purged are ignored and not stored again. - # - retention: - # The message retention policies feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # Default retention policy. If set, Synapse will apply it to rooms that lack the - # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't - # matter much because Synapse doesn't take it into account yet. - # - #default_policy: - # min_lifetime: 1d - # max_lifetime: 1y - - # Retention policy limits. If set, and the state of a room contains a - # 'm.room.retention' event in its state which contains a 'min_lifetime' or a - # 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy - # to these limits when running purge jobs. - # - #allowed_lifetime_min: 1d - #allowed_lifetime_max: 1y - - # Server admins can define the settings of the background jobs purging the - # events which lifetime has expired under the 'purge_jobs' section. - # - # If no configuration is provided, a single job will be set up to delete expired - # events in every room daily. - # - # Each job's configuration defines which range of message lifetimes the job - # takes care of. For example, if 'shortest_max_lifetime' is '2d' and - # 'longest_max_lifetime' is '3d', the job will handle purging expired events in - # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and - # lower than or equal to 3 days. Both the minimum and the maximum value of a - # range are optional, e.g. a job with no 'shortest_max_lifetime' and a - # 'longest_max_lifetime' of '3d' will handle every room with a retention policy - # which 'max_lifetime' is lower than or equal to three days. - # - # The rationale for this per-job configuration is that some rooms might have a - # retention policy with a low 'max_lifetime', where history needs to be purged - # of outdated messages on a more frequent basis than for the rest of the rooms - # (e.g. every 12h), but not want that purge to be performed by a job that's - # iterating over every room it knows, which could be heavy on the server. - # - # If any purge job is configured, it is strongly recommended to have at least - # a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime' - # set, or one job without 'shortest_max_lifetime' and one job without - # 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if - # 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a - # room's policy to these values is done after the policies are retrieved from - # Synapse's database (which is done using the range specified in a purge job's - # configuration). - # - #purge_jobs: - # - longest_max_lifetime: 3d - # interval: 12h - # - shortest_max_lifetime: 3d - # interval: 1d - """ diff --git a/synapse/config/room.py b/synapse/config/room.py index 462d85ac1..4a7ac0054 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -75,59 +75,3 @@ class RoomConfig(Config): % preset ) # We validate the actual overrides when we try to apply them. - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Rooms ## - - # Controls whether locally-created rooms should be end-to-end encrypted by - # default. - # - # Possible options are "all", "invite", and "off". They are defined as: - # - # * "all": any locally-created room - # * "invite": any room created with the "private_chat" or "trusted_private_chat" - # room creation presets - # * "off": this option will take no effect - # - # The default value is "off". - # - # Note that this option will only affect rooms created after it is set. It - # will also not affect rooms created by other servers. - # - #encryption_enabled_by_default_for_room_type: invite - - # Override the default power levels for rooms created on this server, per - # room creation preset. - # - # The appropriate dictionary for the room preset will be applied on top - # of the existing power levels content. - # - # Useful if you know that your users need special permissions in rooms - # that they create (e.g. to send particular types of state events without - # needing an elevated power level). This takes the same shape as the - # `power_level_content_override` parameter in the /createRoom API, but - # is applied before that parameter. - # - # Valid keys are some or all of `private_chat`, `trusted_private_chat` - # and `public_chat`. Inside each of those should be any of the - # properties allowed in `power_level_content_override` in the - # /createRoom API. If any property is missing, its default value will - # continue to be used. If any property is present, it will overwrite - # the existing default completely (so if the `events` property exists, - # the default event power levels will be ignored). - # - #default_power_level_content_override: - # private_chat: - # "events": - # "com.example.myeventtype" : 0 - # "m.room.avatar": 50 - # "m.room.canonical_alias": 50 - # "m.room.encryption": 100 - # "m.room.history_visibility": 100 - # "m.room.name": 50 - # "m.room.power_levels": 100 - # "m.room.server_acl": 100 - # "m.room.tombstone": 100 - # "events_default": 1 - """ diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 717ba70e1..3ed236217 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -52,72 +52,6 @@ class RoomDirectoryConfig(Config): _RoomDirectoryRule("room_list_publication_rules", {"action": "allow"}) ] - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # Uncomment to disable searching the public room list. When disabled - # blocks searching local and remote room lists for local and remote - # users by always returning an empty list for all queries. - # - #enable_room_list_search: false - - # The `alias_creation` option controls who's allowed to create aliases - # on this server. - # - # The format of this option is a list of rules that contain globs that - # match against user_id, room_id and the new alias (fully qualified with - # server name). The action in the first rule that matches is taken, - # which can currently either be "allow" or "deny". - # - # Missing user_id/room_id/alias fields default to "*". - # - # If no rules match the request is denied. An empty list means no one - # can create aliases. - # - # Options for the rules include: - # - # user_id: Matches against the creator of the alias - # alias: Matches against the alias being created - # room_id: Matches against the room ID the alias is being pointed at - # action: Whether to "allow" or "deny" the request if the rule matches - # - # The default is: - # - #alias_creation_rules: - # - user_id: "*" - # alias: "*" - # room_id: "*" - # action: allow - - # The `room_list_publication_rules` option controls who can publish and - # which rooms can be published in the public room list. - # - # The format of this option is the same as that for - # `alias_creation_rules`. - # - # If the room has one or more aliases associated with it, only one of - # the aliases needs to match the alias rule. If there are no aliases - # then only rules with `alias: *` match. - # - # If no rules match the request is denied. An empty list means no one - # can publish rooms. - # - # Options for the rules include: - # - # user_id: Matches against the creator of the alias - # room_id: Matches against the room ID being published - # alias: Matches against any current local or canonical aliases - # associated with the room - # action: Whether to "allow" or "deny" the request if the rule matches - # - # The default is: - # - #room_list_publication_rules: - # - user_id: "*" - # alias: "*" - # room_id: "*" - # action: allow - """ - def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> bool: """Checks if the given user is allowed to create the given alias diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 19b2f1b21..bd7c234d3 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -223,189 +223,6 @@ class SAML2Config(Config): }, } - def generate_config_section(self, config_dir_path: str, **kwargs: Any) -> str: - return """\ - ## Single sign-on integration ## - - # The following settings can be used to make Synapse use a single sign-on - # provider for authentication, instead of its internal password database. - # - # You will probably also want to set the following options to `false` to - # disable the regular login/registration flows: - # * enable_registration - # * password_config.enabled - # - # You will also want to investigate the settings under the "sso" configuration - # section below. - - # Enable SAML2 for registration and login. Uses pysaml2. - # - # At least one of `sp_config` or `config_path` must be set in this section to - # enable SAML login. - # - # Once SAML support is enabled, a metadata file will be exposed at - # https://:/_synapse/client/saml2/metadata.xml, which you may be able to - # use to configure your SAML IdP with. Alternatively, you can manually configure - # the IdP to use an ACS location of - # https://:/_synapse/client/saml2/authn_response. - # - saml2_config: - # `sp_config` is the configuration for the pysaml2 Service Provider. - # See pysaml2 docs for format of config. - # - # Default values will be used for the 'entityid' and 'service' settings, - # so it is not normally necessary to specify them unless you need to - # override them. - # - sp_config: - # Point this to the IdP's metadata. You must provide either a local - # file via the `local` attribute or (preferably) a URL via the - # `remote` attribute. - # - #metadata: - # local: ["saml2/idp.xml"] - # remote: - # - url: https://our_idp/metadata.xml - - # Allowed clock difference in seconds between the homeserver and IdP. - # - # Uncomment the below to increase the accepted time difference from 0 to 3 seconds. - # - #accepted_time_diff: 3 - - # By default, the user has to go to our login page first. If you'd like - # to allow IdP-initiated login, set 'allow_unsolicited: true' in a - # 'service.sp' section: - # - #service: - # sp: - # allow_unsolicited: true - - # The examples below are just used to generate our metadata xml, and you - # may well not need them, depending on your setup. Alternatively you - # may need a whole lot more detail - see the pysaml2 docs! - - #description: ["My awesome SP", "en"] - #name: ["Test SP", "en"] - - #ui_info: - # display_name: - # - lang: en - # text: "Display Name is the descriptive name of your service." - # description: - # - lang: en - # text: "Description should be a short paragraph explaining the purpose of the service." - # information_url: - # - lang: en - # text: "https://example.com/terms-of-service" - # privacy_statement_url: - # - lang: en - # text: "https://example.com/privacy-policy" - # keywords: - # - lang: en - # text: ["Matrix", "Element"] - # logo: - # - lang: en - # text: "https://example.com/logo.svg" - # width: "200" - # height: "80" - - #organization: - # name: Example com - # display_name: - # - ["Example co", "en"] - # url: "http://example.com" - - #contact_person: - # - given_name: Bob - # sur_name: "the Sysadmin" - # email_address": ["admin@example.com"] - # contact_type": technical - - # Instead of putting the config inline as above, you can specify a - # separate pysaml2 configuration file: - # - #config_path: "%(config_dir_path)s/sp_conf.py" - - # The lifetime of a SAML session. This defines how long a user has to - # complete the authentication process, if allow_unsolicited is unset. - # The default is 15 minutes. - # - #saml_session_lifetime: 5m - - # An external module can be provided here as a custom solution to - # mapping attributes returned from a saml provider onto a matrix user. - # - user_mapping_provider: - # The custom module's class. Uncomment to use a custom module. - # - #module: mapping_provider.SamlMappingProvider - - # Custom configuration values for the module. Below options are - # intended for the built-in provider, they should be changed if - # using a custom module. This section will be passed as a Python - # dictionary to the module's `parse_config` method. - # - config: - # The SAML attribute (after mapping via the attribute maps) to use - # to derive the Matrix ID from. 'uid' by default. - # - # Note: This used to be configured by the - # saml2_config.mxid_source_attribute option. If that is still - # defined, its value will be used instead. - # - #mxid_source_attribute: displayName - - # The mapping system to use for mapping the saml attribute onto a - # matrix ID. - # - # Options include: - # * 'hexencode' (which maps unpermitted characters to '=xx') - # * 'dotreplace' (which replaces unpermitted characters with - # '.'). - # The default is 'hexencode'. - # - # Note: This used to be configured by the - # saml2_config.mxid_mapping option. If that is still defined, its - # value will be used instead. - # - #mxid_mapping: dotreplace - - # In previous versions of synapse, the mapping from SAML attribute to - # MXID was always calculated dynamically rather than stored in a - # table. For backwards- compatibility, we will look for user_ids - # matching such a pattern before creating a new account. - # - # This setting controls the SAML attribute which will be used for this - # backwards-compatibility lookup. Typically it should be 'uid', but if - # the attribute maps are changed, it may be necessary to change it. - # - # The default is 'uid'. - # - #grandfathered_mxid_source_attribute: upn - - # It is possible to configure Synapse to only allow logins if SAML attributes - # match particular values. The requirements can be listed under - # `attribute_requirements` as shown below. All of the listed attributes must - # match for the login to be permitted. - # - #attribute_requirements: - # - attribute: userGroup - # value: "staff" - # - attribute: department - # value: "sales" - - # If the metadata XML contains multiple IdP entities then the `idp_entityid` - # option must be set to the entity to redirect users to. - # - # Most deployments only have a single IdP entity and so should omit this - # option. - # - #idp_entityid: 'https://our_idp/entityid' - """ % { - "config_dir_path": config_dir_path - } - ATTRIBUTE_REQUIREMENTS_SCHEMA = { "type": "array", diff --git a/synapse/config/server.py b/synapse/config/server.py index 657322cb1..828938e5e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -16,7 +16,6 @@ import argparse import itertools import logging import os.path -import re import urllib.parse from textwrap import indent from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union @@ -702,9 +701,6 @@ class ServerConfig(Config): listeners: Optional[List[dict]], **kwargs: Any, ) -> str: - ip_range_blacklist = "\n".join( - " # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST - ) _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: @@ -715,9 +711,6 @@ class ServerConfig(Config): pid_file = os.path.join(data_dir_path, "homeserver.pid") - # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the - # default config string - default_room_version = DEFAULT_ROOM_VERSION secure_listeners = [] unsecure_listeners = [] private_addresses = ["::1", "127.0.0.1"] @@ -765,501 +758,18 @@ class ServerConfig(Config): compress: false""" if listeners: - # comment out this block - unsecure_http_bindings = "#" + re.sub( - "\n {10}", - lambda match: match.group(0) + "#", - unsecure_http_bindings, - ) + unsecure_http_bindings = "" if not secure_listeners: - secure_http_bindings = ( - """#- port: %(bind_port)s - # type: http - # tls: true - # resources: - # - names: [client, federation]""" - % locals() - ) + secure_http_bindings = "" return ( """\ - ## Server ## - - # The public-facing domain of the server - # - # The server_name name will appear at the end of usernames and room addresses - # created on this server. For example if the server_name was example.com, - # usernames on this server would be in the format @user:example.com - # - # In most cases you should avoid using a matrix specific subdomain such as - # matrix.example.com or synapse.example.com as the server_name for the same - # reasons you wouldn't use user@email.example.com as your email address. - # See https://matrix-org.github.io/synapse/latest/delegate.html - # for information on how to host Synapse on a subdomain while preserving - # a clean server_name. - # - # The server_name cannot be changed later so it is important to - # configure this correctly before you start Synapse. It should be all - # lowercase and may contain an explicit port. - # Examples: matrix.org, localhost:8080 - # server_name: "%(server_name)s" - - # When running as a daemon, the file to store the pid in - # pid_file: %(pid_file)s - - # The absolute URL to the web client which / will redirect to. - # - #web_client_location: https://riot.example.com/ - - # The public-facing base URL that clients use to access this Homeserver (not - # including _matrix/...). This is the same URL a user might enter into the - # 'Custom Homeserver URL' field on their client. If you use Synapse with a - # reverse proxy, this should be the URL to reach Synapse via the proxy. - # Otherwise, it should be the URL to reach Synapse's client HTTP listener (see - # 'listeners' below). - # - # Defaults to 'https:///'. - # - #public_baseurl: https://example.com/ - - # Uncomment the following to tell other servers to send federation traffic on - # port 443. - # - # By default, other servers will try to reach our server on port 8448, which can - # be inconvenient in some environments. - # - # Provided 'https:///' on port 443 is routed to Synapse, this - # option configures Synapse to serve a file at - # 'https:///.well-known/matrix/server'. This will tell other - # servers to send traffic to port 443 instead. - # - # See https://matrix-org.github.io/synapse/latest/delegate.html for more - # information. - # - # Defaults to 'false'. - # - #serve_server_wellknown: true - - # Set the soft limit on the number of file descriptors synapse can use - # Zero is used to indicate synapse should set the soft limit to the - # hard limit. - # - #soft_file_limit: 0 - - # Presence tracking allows users to see the state (e.g online/offline) - # of other local and remote users. - # - presence: - # Uncomment to disable presence tracking on this homeserver. This option - # replaces the previous top-level 'use_presence' option. - # - #enabled: false - - # Whether to require authentication to retrieve profile data (avatars, - # display names) of other users through the client API. Defaults to - # 'false'. Note that profile data is also available via the federation - # API, unless allow_profile_lookup_over_federation is set to false. - # - #require_auth_for_profile_requests: true - - # Uncomment to require a user to share a room with another user in order - # to retrieve their profile information. Only checked on Client-Server - # requests. Profile requests from other servers should be checked by the - # requesting server. Defaults to 'false'. - # - #limit_profile_requests_to_users_who_share_rooms: true - - # Uncomment to prevent a user's profile data from being retrieved and - # displayed in a room until they have joined it. By default, a user's - # profile data is included in an invite event, regardless of the values - # of the above two settings, and whether or not the users share a server. - # Defaults to 'true'. - # - #include_profile_data_on_invite: false - - # If set to 'true', removes the need for authentication to access the server's - # public rooms directory through the client API, meaning that anyone can - # query the room directory. Defaults to 'false'. - # - #allow_public_rooms_without_auth: true - - # If set to 'true', allows any other homeserver to fetch the server's public - # rooms directory via federation. Defaults to 'false'. - # - #allow_public_rooms_over_federation: true - - # The default room version for newly created rooms. - # - # Known room versions are listed here: - # https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions - # - # For example, for room version 1, default_room_version should be set - # to "1". - # - #default_room_version: "%(default_room_version)s" - - # The GC threshold parameters to pass to `gc.set_threshold`, if defined - # - #gc_thresholds: [700, 10, 10] - - # The minimum time in seconds between each GC for a generation, regardless of - # the GC thresholds. This ensures that we don't do GC too frequently. - # - # A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive - # generation 0 GCs, etc. - # - # Defaults to `[1s, 10s, 30s]`. - # - #gc_min_interval: [0.5s, 30s, 1m] - - # Set the limit on the returned events in the timeline in the get - # and sync operations. The default value is 100. -1 means no upper limit. - # - # Uncomment the following to increase the limit to 5000. - # - #filter_timeline_limit: 5000 - - # Whether room invites to users on this server should be blocked - # (except those sent by local server admins). The default is False. - # - #block_non_admin_invites: true - - # Room searching - # - # If disabled, new messages will not be indexed for searching and users - # will receive errors when searching for messages. Defaults to enabled. - # - #enable_search: false - - # Prevent outgoing requests from being sent to the following blacklisted IP address - # CIDR ranges. If this option is not specified then it defaults to private IP - # address ranges (see the example below). - # - # The blacklist applies to the outbound requests for federation, identity servers, - # push servers, and for checking key validity for third-party invite events. - # - # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly - # listed here, since they correspond to unroutable addresses.) - # - # This option replaces federation_ip_range_blacklist in Synapse v1.25.0. - # - # Note: The value is ignored when an HTTP proxy is in use - # - #ip_range_blacklist: -%(ip_range_blacklist)s - - # List of IP address CIDR ranges that should be allowed for federation, - # identity servers, push servers, and for checking key validity for - # third-party invite events. This is useful for specifying exceptions to - # wide-ranging blacklisted target IP ranges - e.g. for communication with - # a push server only visible in your network. - # - # This whitelist overrides ip_range_blacklist and defaults to an empty - # list. - # - #ip_range_whitelist: - # - '192.168.1.1' - - # List of ports that Synapse should listen on, their purpose and their - # configuration. - # - # Options for each listener include: - # - # port: the TCP port to bind to - # - # bind_addresses: a list of local addresses to listen on. The default is - # 'all local interfaces'. - # - # type: the type of listener. Normally 'http', but other valid options are: - # 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html), - # 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html), - # 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html). - # - # tls: set to true to enable TLS for this listener. Will use the TLS - # key/cert specified in tls_private_key_path / tls_certificate_path. - # - # x_forwarded: Only valid for an 'http' listener. Set to true to use the - # X-Forwarded-For header as the client IP. Useful when Synapse is - # behind a reverse-proxy. - # - # resources: Only valid for an 'http' listener. A list of resources to host - # on this port. Options for each resource are: - # - # names: a list of names of HTTP resources. See below for a list of - # valid resource names. - # - # compress: set to true to enable HTTP compression for this resource. - # - # additional_resources: Only valid for an 'http' listener. A map of - # additional endpoints which should be loaded via dynamic modules. - # - # Valid resource names are: - # - # client: the client-server API (/_matrix/client), and the synapse admin - # API (/_synapse/admin). Also implies 'media' and 'static'. - # - # consent: user consent forms (/_matrix/consent). - # See https://matrix-org.github.io/synapse/latest/consent_tracking.html. - # - # federation: the server-server API (/_matrix/federation). Also implies - # 'media', 'keys', 'openid' - # - # keys: the key discovery API (/_matrix/key). - # - # media: the media API (/_matrix/media). - # - # metrics: the metrics interface. - # See https://matrix-org.github.io/synapse/latest/metrics-howto.html. - # - # openid: OpenID authentication. - # - # replication: the HTTP replication API (/_synapse/replication). - # See https://matrix-org.github.io/synapse/latest/workers.html. - # - # static: static resources under synapse/static (/_matrix/static). (Mostly - # useful for 'fallback authentication'.) - # listeners: - # TLS-enabled listener: for when matrix traffic is sent directly to synapse. - # - # Disabled by default. To enable it, uncomment the following. (Note that you - # will also need to give Synapse a TLS key and certificate: see the TLS section - # below.) - # %(secure_http_bindings)s - - # Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy - # that unwraps TLS. - # - # If you plan to use a reverse proxy, please see - # https://matrix-org.github.io/synapse/latest/reverse_proxy.html. - # %(unsecure_http_bindings)s - - # example additional_resources: - # - #additional_resources: - # "/_matrix/my/custom/endpoint": - # module: my_module.CustomRequestHandler - # config: {} - - # Turn on the twisted ssh manhole service on localhost on the given - # port. - # - #- port: 9000 - # bind_addresses: ['::1', '127.0.0.1'] - # type: manhole - - # Connection settings for the manhole - # - manhole_settings: - # The username for the manhole. This defaults to 'matrix'. - # - #username: manhole - - # The password for the manhole. This defaults to 'rabbithole'. - # - #password: mypassword - - # The private and public SSH key pair used to encrypt the manhole traffic. - # If these are left unset, then hardcoded and non-secret keys are used, - # which could allow traffic to be intercepted if sent over a public network. - # - #ssh_priv_key_path: %(config_dir_path)s/id_rsa - #ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub - - # Forward extremities can build up in a room due to networking delays between - # homeservers. Once this happens in a large room, calculation of the state of - # that room can become quite expensive. To mitigate this, once the number of - # forward extremities reaches a given threshold, Synapse will send an - # org.matrix.dummy_event event, which will reduce the forward extremities - # in the room. - # - # This setting defines the threshold (i.e. number of forward extremities in the - # room) at which dummy events are sent. The default value is 10. - # - #dummy_events_threshold: 5 - - - ## Homeserver blocking ## - - # How to reach the server admin, used in ResourceLimitError - # - #admin_contact: 'mailto:admin@server.com' - - # Global blocking - # - #hs_disabled: false - #hs_disabled_message: 'Human readable reason for why the HS is blocked' - - # Monthly Active User Blocking - # - # Used in cases where the admin or server owner wants to limit to the - # number of monthly active users. - # - # 'limit_usage_by_mau' disables/enables monthly active user blocking. When - # enabled and a limit is reached the server returns a 'ResourceLimitError' - # with error type Codes.RESOURCE_LIMIT_EXCEEDED - # - # 'max_mau_value' is the hard limit of monthly active users above which - # the server will start blocking user actions. - # - # 'mau_trial_days' is a means to add a grace period for active users. It - # means that users must be active for this number of days before they - # can be considered active and guards against the case where lots of users - # sign up in a short space of time never to return after their initial - # session. - # - # The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but - # applies a different trial number if the user was registered by an appservice. - # A value of 0 means no trial days are applied. Appservices not listed in this - # dictionary use the value of `mau_trial_days` instead. - # - # 'mau_limit_alerting' is a means of limiting client side alerting - # should the mau limit be reached. This is useful for small instances - # where the admin has 5 mau seats (say) for 5 specific people and no - # interest increasing the mau limit further. Defaults to True, which - # means that alerting is enabled - # - #limit_usage_by_mau: false - #max_mau_value: 50 - #mau_trial_days: 2 - #mau_limit_alerting: false - #mau_appservice_trial_days: - # "appservice-id": 1 - - # If enabled, the metrics for the number of monthly active users will - # be populated, however no one will be limited. If limit_usage_by_mau - # is true, this is implied to be true. - # - #mau_stats_only: false - - # Sometimes the server admin will want to ensure certain accounts are - # never blocked by mau checking. These accounts are specified here. - # - #mau_limit_reserved_threepids: - # - medium: 'email' - # address: 'reserved_user@example.com' - - # Used by phonehome stats to group together related servers. - #server_context: context - - # Resource-constrained homeserver settings - # - # When this is enabled, the room "complexity" will be checked before a user - # joins a new remote room. If it is above the complexity limit, the server will - # disallow joining, or will instantly leave. - # - # Room complexity is an arbitrary measure based on factors such as the number of - # users in the room. - # - limit_remote_rooms: - # Uncomment to enable room complexity checking. - # - #enabled: true - - # the limit above which rooms cannot be joined. The default is 1.0. - # - #complexity: 0.5 - - # override the error which is returned when the room is too complex. - # - #complexity_error: "This room is too complex." - - # allow server admins to join complex rooms. Default is false. - # - #admins_can_join: true - - # Whether to require a user to be in the room to add an alias to it. - # Defaults to 'true'. - # - #require_membership_for_aliases: false - - # Whether to allow per-room membership profiles through the send of membership - # events with profile information that differ from the target's global profile. - # Defaults to 'true'. - # - #allow_per_room_profiles: false - - # The largest allowed file size for a user avatar. Defaults to no restriction. - # - # Note that user avatar changes will not work if this is set without - # using Synapse's media repository. - # - #max_avatar_size: 10M - - # The MIME types allowed for user avatars. Defaults to no restriction. - # - # Note that user avatar changes will not work if this is set without - # using Synapse's media repository. - # - #allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] - - # How long to keep redacted events in unredacted form in the database. After - # this period redacted events get replaced with their redacted form in the DB. - # - # Defaults to `7d`. Set to `null` to disable. - # - #redaction_retention_period: 28d - - # How long to track users' last seen time and IPs in the database. - # - # Defaults to `28d`. Set to `null` to disable clearing out of old rows. - # - #user_ips_max_age: 14d - - # Inhibits the /requestToken endpoints from returning an error that might leak - # information about whether an e-mail address is in use or not on this - # homeserver. - # Note that for some endpoints the error situation is the e-mail already being - # used, and for others the error is entering the e-mail being unused. - # If this option is enabled, instead of returning an error, these endpoints will - # act as if no error happened and return a fake session ID ('sid') to clients. - # - #request_token_inhibit_3pid_errors: true - - # A list of domains that the domain portion of 'next_link' parameters - # must match. - # - # This parameter is optionally provided by clients while requesting - # validation of an email or phone number, and maps to a link that - # users will be automatically redirected to after validation - # succeeds. Clients can make use this parameter to aid the validation - # process. - # - # The whitelist is applied whether the homeserver or an - # identity server is handling validation. - # - # The default value is no whitelist functionality; all domains are - # allowed. Setting this value to an empty list will instead disallow - # all domains. - # - #next_link_domain_whitelist: ["matrix.org"] - - # Templates to use when generating email or HTML page contents. - # - templates: - # Directory in which Synapse will try to find template files to use to generate - # email or HTML page contents. - # If not set, or a file is not found within the template directory, a default - # template from within the Synapse package will be used. - # - # See https://matrix-org.github.io/synapse/latest/templates.html for more - # information about using custom templates. - # - #custom_template_directory: /path/to/custom/templates/ - - # List of rooms to exclude from sync responses. This is useful for server - # administrators wishing to group users into a room without these users being able - # to see it from their client. - # - # By default, no room is excluded. - # - #exclude_rooms_from_sync: - # - !foo:example.com """ % locals() ) diff --git a/synapse/config/server_notices.py b/synapse/config/server_notices.py index 505b4f6c6..ce041abe9 100644 --- a/synapse/config/server_notices.py +++ b/synapse/config/server_notices.py @@ -18,27 +18,6 @@ from synapse.types import JsonDict, UserID from ._base import Config -DEFAULT_CONFIG = """\ -# Server Notices room configuration -# -# Uncomment this section to enable a room which can be used to send notices -# from the server to users. It is a special room which cannot be left; notices -# come from a special "notices" user id. -# -# If you uncomment this section, you *must* define the system_mxid_localpart -# setting, which defines the id of the user which will be used to send the -# notices. -# -# It's also possible to override the room name, the display name of the -# "notices" user, and the avatar for the user. -# -#server_notices: -# system_mxid_localpart: notices -# system_mxid_display_name: "Server Notices" -# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ" -# room_name: "Server Notices" -""" - class ServerNoticesConfig(Config): """Configuration for the server notices room. @@ -83,6 +62,3 @@ class ServerNoticesConfig(Config): self.server_notices_mxid_avatar_url = c.get("system_mxid_avatar_url", None) # todo: i18n self.server_notices_room_name = c.get("room_name", "Server Notices") - - def generate_config_section(self, **kwargs: Any) -> str: - return DEFAULT_CONFIG diff --git a/synapse/config/sso.py b/synapse/config/sso.py index f88eba77d..2178cbf98 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -107,43 +107,3 @@ class SSOConfig(Config): self.root.server.public_baseurl + "_matrix/static/client/login" ) self.sso_client_whitelist.append(login_fallback_url) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - # Additional settings to use with single-sign on systems such as OpenID Connect, - # SAML2 and CAS. - # - # Server admins can configure custom templates for pages related to SSO. See - # https://matrix-org.github.io/synapse/latest/templates.html for more information. - # - sso: - # A list of client URLs which are whitelisted so that the user does not - # have to confirm giving access to their account to the URL. Any client - # whose URL starts with an entry in the following list will not be subject - # to an additional confirmation step after the SSO login is completed. - # - # WARNING: An entry such as "https://my.client" is insecure, because it - # will also match "https://my.client.evil.site", exposing your users to - # phishing attacks from evil.site. To avoid this, include a slash after the - # hostname: "https://my.client/". - # - # The login fallback page (used by clients that don't natively support the - # required login flows) is whitelisted in addition to any URLs in this list. - # - # By default, this list contains only the login fallback page. - # - #client_whitelist: - # - https://riot.im/develop - # - https://my.custom.client/ - - # Uncomment to keep a user's profile fields in sync with information from - # the identity provider. Currently only syncing the displayname is - # supported. Fields are checked on every SSO login, and are updated - # if necessary. - # - # Note that enabling this option will override user profile information, - # regardless of whether users have opted-out of syncing that - # information when first signing in. Defaults to false. - # - #update_profile_information: true - """ diff --git a/synapse/config/stats.py b/synapse/config/stats.py index ed1f416e4..9621acd77 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -46,16 +46,3 @@ class StatsConfig(Config): self.stats_enabled = stats_config.get("enabled", self.stats_enabled) if not self.stats_enabled: logger.warning(ROOM_STATS_DISABLED_WARN) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # Settings for local room and user statistics collection. See - # https://matrix-org.github.io/synapse/latest/room_and_user_statistics.html. - # - stats: - # Uncomment the following to disable room and user statistics. Note that doing - # so may cause certain features (such as the room directory) not to work - # correctly. - # - #enabled: false - """ diff --git a/synapse/config/tls.py b/synapse/config/tls.py index cb17950d2..336fe3e0d 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -13,7 +13,6 @@ # limitations under the License. import logging -import os from typing import Any, List, Optional, Pattern from matrix_common.regex import glob_to_regex @@ -143,9 +142,6 @@ class TlsConfig(Config): def generate_config_section( self, - config_dir_path: str, - data_dir_path: str, - server_name: str, tls_certificate_path: Optional[str], tls_private_key_path: Optional[str], **kwargs: Any, @@ -153,90 +149,18 @@ class TlsConfig(Config): """If the TLS paths are not specified the default will be certs in the config directory""" - base_key_name = os.path.join(config_dir_path, server_name) - if bool(tls_certificate_path) != bool(tls_private_key_path): raise ConfigError( "Please specify both a cert path and a key path or neither." ) - tls_enabled = "" if tls_certificate_path and tls_private_key_path else "#" - - if not tls_certificate_path: - tls_certificate_path = base_key_name + ".tls.crt" - if not tls_private_key_path: - tls_private_key_path = base_key_name + ".tls.key" - - # flake8 doesn't recognise that variables are used in the below string - _ = tls_enabled - - return ( - """\ - ## TLS ## - - # PEM-encoded X509 certificate for TLS. - # This certificate, as of Synapse 1.0, will need to be a valid and verifiable - # certificate, signed by a recognised Certificate Authority. - # - # Be sure to use a `.pem` file that includes the full certificate chain including - # any intermediate certificates (for instance, if using certbot, use - # `fullchain.pem` as your certificate, not `cert.pem`). - # - %(tls_enabled)stls_certificate_path: "%(tls_certificate_path)s" - - # PEM-encoded private key for TLS - # - %(tls_enabled)stls_private_key_path: "%(tls_private_key_path)s" - - # Whether to verify TLS server certificates for outbound federation requests. - # - # Defaults to `true`. To disable certificate verification, uncomment the - # following line. - # - #federation_verify_certificates: false - - # The minimum TLS version that will be used for outbound federation requests. - # - # Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note - # that setting this value higher than `1.2` will prevent federation to most - # of the public Matrix network: only configure it to `1.3` if you have an - # entirely private federation setup and you can ensure TLS 1.3 support. - # - #federation_client_minimum_tls_version: 1.2 - - # Skip federation certificate verification on the following whitelist - # of domains. - # - # This setting should only be used in very specific cases, such as - # federation over Tor hidden services and similar. For private networks - # of homeservers, you likely want to use a private CA instead. - # - # Only effective if federation_verify_certicates is `true`. - # - #federation_certificate_verification_whitelist: - # - lon.example.com - # - "*.domain.com" - # - "*.onion" - - # List of custom certificate authorities for federation traffic. - # - # This setting should only normally be used within a private network of - # homeservers. - # - # Note that this list will replace those that are provided by your - # operating environment. Certificates must be in PEM format. - # - #federation_custom_ca_list: - # - myCA1.pem - # - myCA2.pem - # - myCA3.pem - """ - # Lowercase the string representation of boolean values - % { - x[0]: str(x[1]).lower() if isinstance(x[1], bool) else x[1] - for x in locals().items() - } - ) + if tls_certificate_path and tls_private_key_path: + return f"""\ + tls_certificate_path: {tls_certificate_path} + tls_private_key_path: {tls_private_key_path} + """ + else: + return "" def read_tls_certificate(self) -> crypto.X509: """Reads the TLS certificate from the configured file, and returns it diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index ae68a3dd1..6fbf927f1 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -67,53 +67,3 @@ class TracerConfig(Config): ("opentracing", "force_tracing_for_users", f"index {i}"), ) self.force_tracing_for_users.add(u) - - def generate_config_section(cls, **kwargs: Any) -> str: - return """\ - ## Opentracing ## - - # These settings enable opentracing, which implements distributed tracing. - # This allows you to observe the causal chains of events across servers - # including requests, key lookups etc., across any server running - # synapse or any other other services which supports opentracing - # (specifically those implemented with Jaeger). - # - opentracing: - # tracing is disabled by default. Uncomment the following line to enable it. - # - #enabled: true - - # The list of homeservers we wish to send and receive span contexts and span baggage. - # See https://matrix-org.github.io/synapse/latest/opentracing.html. - # - # This is a list of regexes which are matched against the server_name of the - # homeserver. - # - # By default, it is empty, so no servers are matched. - # - #homeserver_whitelist: - # - ".*" - - # A list of the matrix IDs of users whose requests will always be traced, - # even if the tracing system would otherwise drop the traces due to - # probabilistic sampling. - # - # By default, the list is empty. - # - #force_tracing_for_users: - # - "@user1:server_name" - # - "@user2:server_name" - - # Jaeger can be configured to sample traces at different rates. - # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration is mostly related to trace sampling which - # is documented here: - # https://www.jaegertracing.io/docs/latest/sampling/. - # - #jaeger_config: - # sampler: - # type: const - # param: 1 - # logging: - # false - """ diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 010e79192..c9e18b91e 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -35,42 +35,3 @@ class UserDirectoryConfig(Config): self.user_directory_search_prefer_local_users = user_directory_config.get( "prefer_local_users", False ) - - def generate_config_section(self, **kwargs: Any) -> str: - return """ - # User Directory configuration - # - user_directory: - # Defines whether users can search the user directory. If false then - # empty responses are returned to all queries. Defaults to true. - # - # Uncomment to disable the user directory. - # - #enabled: false - - # Defines whether to search all users visible to your HS when searching - # the user directory. If false, search results will only contain users - # visible in public rooms and users sharing a room with the requester. - # Defaults to false. - # - # NB. If you set this to true, and the last time the user_directory search - # indexes were (re)built was before Synapse 1.44, you'll have to - # rebuild the indexes in order to search through all known users. - # These indexes are built the first time Synapse starts; admins can - # manually trigger a rebuild via API following the instructions at - # https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run - # - # Uncomment to return search results containing all known users, even if that - # user does not share a room with the requester. - # - #search_all_users: true - - # Defines whether to prefer local users in search query results. - # If True, local users are more likely to appear above remote users - # when searching the user directory. Defaults to false. - # - # Uncomment to prefer local over remote users in user directory search - # results. - # - #prefer_local_users: true - """ diff --git a/synapse/config/voip.py b/synapse/config/voip.py index 87c09abe2..43f0a0fa1 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py @@ -31,34 +31,3 @@ class VoipConfig(Config): config.get("turn_user_lifetime", "1h") ) self.turn_allow_guests = config.get("turn_allow_guests", True) - - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## TURN ## - - # The public URIs of the TURN server to give to clients - # - #turn_uris: [] - - # The shared secret used to compute passwords for the TURN server - # - #turn_shared_secret: "YOUR_SHARED_SECRET" - - # The Username and password if the TURN server needs them and - # does not use a token - # - #turn_username: "TURNSERVER_USERNAME" - #turn_password: "TURNSERVER_PASSWORD" - - # How long generated TURN credentials last - # - #turn_user_lifetime: 1h - - # Whether guests should be allowed to use the TURN server. - # This defaults to True, otherwise VoIP will be unreliable for guests. - # However, it does introduce a slight security risk as it allows users to - # connect to arbitrary endpoints without having first signed up for a - # valid account (e.g. by passing a CAPTCHA). - # - #turn_allow_guests: true - """ diff --git a/synapse/config/workers.py b/synapse/config/workers.py index e1569b3c1..f2716422b 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -410,55 +410,6 @@ class WorkerConfig(Config): # (By this point, these are either the same value or only one is not None.) return bool(new_option_should_run_here or legacy_option_should_run_here) - def generate_config_section(self, **kwargs: Any) -> str: - return """\ - ## Workers ## - - # Disables sending of outbound federation transactions on the main process. - # Uncomment if using a federation sender worker. - # - #send_federation: false - - # It is possible to run multiple federation sender workers, in which case the - # work is balanced across them. - # - # This configuration must be shared between all federation sender workers, and if - # changed all federation sender workers must be stopped at the same time and then - # started, to ensure that all instances are running with the same config (otherwise - # events may be dropped). - # - #federation_sender_instances: - # - federation_sender1 - - # When using workers this should be a map from `worker_name` to the - # HTTP replication listener of the worker, if configured. - # - #instance_map: - # worker1: - # host: localhost - # port: 8034 - - # Experimental: When using workers you can define which workers should - # handle event persistence and typing notifications. Any worker - # specified here must also be in the `instance_map`. - # - #stream_writers: - # events: worker1 - # typing: worker1 - - # The worker that is used to run background tasks (e.g. cleaning up expired - # data). If not provided this defaults to the main process. - # - #run_background_tasks_on: worker1 - - # A shared secret used by the replication APIs to authenticate HTTP requests - # from workers. - # - # By default this is unused and traffic is not authenticated. - # - #worker_replication_secret: "" - """ - def read_arguments(self, args: argparse.Namespace) -> None: # We support a bunch of command line arguments that override options in # the config. A lot of these options have a worker_* prefix when running From bdb6628dcf303e38960a56a9f97da71033826287 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 14 Jun 2022 17:24:25 +0200 Subject: [PATCH 36/85] Fix version number in spam checker callbacks doc (#13047) --- changelog.d/13047.feature | 1 + docs/modules/spam_checker_callbacks.md | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13047.feature diff --git a/changelog.d/13047.feature b/changelog.d/13047.feature new file mode 100644 index 000000000..ddd1dbe68 --- /dev/null +++ b/changelog.d/13047.feature @@ -0,0 +1 @@ +Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 8ca7d5bdb..50969edd4 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -70,7 +70,7 @@ this callback. _First introduced in Synapse v1.37.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] @@ -100,7 +100,7 @@ this callback. _First introduced in Synapse v1.45.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def user_may_send_3pid_invite( @@ -154,7 +154,7 @@ this callback. _First introduced in Synapse v1.37.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def user_may_create_room(user_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] @@ -183,7 +183,7 @@ this callback. _First introduced in Synapse v1.37.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def user_may_create_room_alias(user_id: str, room_alias: "synapse.module_api.RoomAlias") -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] @@ -212,7 +212,7 @@ this callback. _First introduced in Synapse v1.37.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def user_may_publish_room(user_id: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] @@ -303,7 +303,7 @@ this callback. _First introduced in Synapse v1.37.0_ -_Changed in Synapse v1.61.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ +_Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_api.errors.Codes` can be returned by this callback. Returning a boolean is now deprecated._ ```python async def check_media_file_for_spam( From aef398457fff33d6ac079e2665ed725397ade7f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Jun 2022 17:59:06 +0100 Subject: [PATCH 37/85] Up complement time outs (#13048) --- changelog.d/13048.misc | 1 + scripts-dev/complement.sh | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13048.misc diff --git a/changelog.d/13048.misc b/changelog.d/13048.misc new file mode 100644 index 000000000..073c8b1a9 --- /dev/null +++ b/changelog.d/13048.misc @@ -0,0 +1 @@ +Increase timeout of complement CI test runs. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 30b974b95..52ef1fd07 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -60,6 +60,9 @@ test_tags="synapse_blacklist,msc2716,msc3030,msc3787" # (The prefix is stripped off before reaching the container.) export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ +# It takes longer than 10m to run the whole suite. +extra_test_args+=("-timeout=60m") + if [[ -n "$WORKERS" ]]; then # Use workers. export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true @@ -73,9 +76,6 @@ if [[ -n "$WORKERS" ]]; then # time (the main problem is that we start 14 python processes for each test, # and complement likes to do two of them in parallel). export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 - - # ... and it takes longer than 10m to run the whole suite. - extra_test_args+=("-timeout=60m") else export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= if [[ -n "$POSTGRES" ]]; then From c99b511db950bff5129e717a225de78b95b9b5ad Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 14 Jun 2022 18:28:26 +0100 Subject: [PATCH 38/85] Fix `destination_is` errors seen in sentry. (#13041) * Rename test_fedclient to match its source file * Require at least one destination to be truthy * Explicitly validate user ID in profile endpoint GETs Co-authored-by: Patrick Cloke --- changelog.d/13041.bugfix | 2 ++ synapse/http/matrixfederationclient.py | 7 +++++-- synapse/rest/client/profile.py | 20 +++++++++++++++---- synapse/types.py | 3 ++- ...ient.py => test_matrixfederationclient.py} | 14 +++++++++++++ tests/rest/client/test_profile.py | 8 ++++++++ tests/test_types.py | 13 +++++++++++- 7 files changed, 59 insertions(+), 8 deletions(-) create mode 100644 changelog.d/13041.bugfix rename tests/http/{test_fedclient.py => test_matrixfederationclient.py} (96%) diff --git a/changelog.d/13041.bugfix b/changelog.d/13041.bugfix new file mode 100644 index 000000000..edb1635eb --- /dev/null +++ b/changelog.d/13041.bugfix @@ -0,0 +1,2 @@ +Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. + diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 776ed43f0..c63d068f7 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -731,8 +731,11 @@ class MatrixFederationHttpClient: Returns: A list of headers to be added as "Authorization:" headers """ - if destination is None and destination_is is None: - raise ValueError("destination and destination_is cannot both be None!") + if not destination and not destination_is: + raise ValueError( + "At least one of the arguments destination and destination_is " + "must be a nonempty bytestring." + ) request: JsonDict = { "method": method.decode("ascii"), diff --git a/synapse/rest/client/profile.py b/synapse/rest/client/profile.py index c684636c0..c16d70790 100644 --- a/synapse/rest/client/profile.py +++ b/synapse/rest/client/profile.py @@ -13,7 +13,7 @@ # limitations under the License. """ This module contains REST servlets to do with profile: /profile/ """ - +from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse.api.errors import Codes, SynapseError @@ -45,8 +45,12 @@ class ProfileDisplaynameRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) displayname = await self.profile_handler.get_displayname(user) @@ -98,8 +102,12 @@ class ProfileAvatarURLRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) avatar_url = await self.profile_handler.get_avatar_url(user) @@ -150,8 +158,12 @@ class ProfileRestServlet(RestServlet): requester = await self.auth.get_user_by_req(request) requester_user = requester.user - user = UserID.from_string(user_id) + if not UserID.is_valid(user_id): + raise SynapseError( + HTTPStatus.BAD_REQUEST, "Invalid user id", Codes.INVALID_PARAM + ) + user = UserID.from_string(user_id) await self.profile_handler.check_profile_query_allowed(user, requester_user) displayname = await self.profile_handler.get_displayname(user) diff --git a/synapse/types.py b/synapse/types.py index 0586d2cbb..668d48d64 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -267,7 +267,6 @@ class DomainSpecificString(metaclass=abc.ABCMeta): ) domain = parts[1] - # This code will need changing if we want to support multiple domain # names on one HS return cls(localpart=parts[0], domain=domain) @@ -279,6 +278,8 @@ class DomainSpecificString(metaclass=abc.ABCMeta): @classmethod def is_valid(cls: Type[DS], s: str) -> bool: """Parses the input string and attempts to ensure it is valid.""" + # TODO: this does not reject an empty localpart or an overly-long string. + # See https://spec.matrix.org/v1.2/appendices/#identifier-grammar try: obj = cls.from_string(s) # Apply additional validation to the domain. This is only done diff --git a/tests/http/test_fedclient.py b/tests/http/test_matrixfederationclient.py similarity index 96% rename from tests/http/test_fedclient.py rename to tests/http/test_matrixfederationclient.py index 006dbab09..be9eaf34e 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -617,3 +617,17 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertTrue(transport.disconnecting) + + def test_build_auth_headers_rejects_falsey_destinations(self) -> None: + with self.assertRaises(ValueError): + self.cl.build_auth_headers(None, b"GET", b"https://example.com") + with self.assertRaises(ValueError): + self.cl.build_auth_headers(b"", b"GET", b"https://example.com") + with self.assertRaises(ValueError): + self.cl.build_auth_headers( + None, b"GET", b"https://example.com", destination_is=b"" + ) + with self.assertRaises(ValueError): + self.cl.build_auth_headers( + b"", b"GET", b"https://example.com", destination_is=b"" + ) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 77c3ced42..29bed0e87 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -13,6 +13,8 @@ # limitations under the License. """Tests REST events for /profile paths.""" +import urllib.parse +from http import HTTPStatus from typing import Any, Dict, Optional from twisted.test.proto_helpers import MemoryReactor @@ -49,6 +51,12 @@ class ProfileTestCase(unittest.HomeserverTestCase): res = self._get_displayname() self.assertEqual(res, "owner") + def test_get_displayname_rejects_bad_username(self) -> None: + channel = self.make_request( + "GET", f"/profile/{urllib.parse.quote('@alice:')}/displayname" + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + def test_set_displayname(self) -> None: channel = self.make_request( "PUT", diff --git a/tests/test_types.py b/tests/test_types.py index 0b10dae84..d8d82a517 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -26,10 +26,21 @@ class UserIDTestCase(unittest.HomeserverTestCase): self.assertEqual("test", user.domain) self.assertEqual(True, self.hs.is_mine(user)) - def test_pase_empty(self): + def test_parse_rejects_empty_id(self): with self.assertRaises(SynapseError): UserID.from_string("") + def test_parse_rejects_missing_sigil(self): + with self.assertRaises(SynapseError): + UserID.from_string("alice:example.com") + + def test_parse_rejects_missing_separator(self): + with self.assertRaises(SynapseError): + UserID.from_string("@alice.example.com") + + def test_validation_rejects_missing_domain(self): + self.assertFalse(UserID.is_valid("@alice:")) + def test_build(self): user = UserID("5678efgh", "my.domain") From 5b645ae2ad9c3bc84bf95a30a77089b3f5fbc548 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 14 Jun 2022 18:41:06 +0100 Subject: [PATCH 39/85] Refactor entry points so that they all have a `main` function. (#13052) --- changelog.d/13052.misc | 1 + synapse/app/appservice.py | 7 ++++++- synapse/app/client_reader.py | 7 ++++++- synapse/app/event_creator.py | 7 ++++++- synapse/app/federation_reader.py | 7 ++++++- synapse/app/federation_sender.py | 7 ++++++- synapse/app/frontend_proxy.py | 7 ++++++- synapse/app/media_repository.py | 7 ++++++- synapse/app/pusher.py | 7 ++++++- synapse/app/synchrotron.py | 7 ++++++- synapse/app/user_dir.py | 7 ++++++- 11 files changed, 61 insertions(+), 10 deletions(-) create mode 100644 changelog.d/13052.misc diff --git a/changelog.d/13052.misc b/changelog.d/13052.misc new file mode 100644 index 000000000..0d11dfb12 --- /dev/null +++ b/changelog.d/13052.misc @@ -0,0 +1 @@ +Refactor entry points so that they all have a `main` function. \ No newline at end of file diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 885454ed4..7995d9982 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index de1bcee0a..b6aed651e 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 14bde2717..34f23c4e5 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -17,6 +17,11 @@ import sys from synapse.app.generic_worker import start from synapse.util.logcontext import LoggingContext -if __name__ == "__main__": + +def main() -> None: with LoggingContext("main"): start(sys.argv[1:]) + + +if __name__ == "__main__": + main() From a4ae1406d15280156510d0a07e85de8203ae1e6c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 15 Jun 2022 11:49:58 +0100 Subject: [PATCH 40/85] Fix typechecks against twisted trunk (#13061) --- changelog.d/13061.misc | 1 + tests/handlers/test_federation.py | 8 ++++---- tests/state/test_v2.py | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13061.misc diff --git a/changelog.d/13061.misc b/changelog.d/13061.misc new file mode 100644 index 000000000..4c55e2b4e --- /dev/null +++ b/changelog.d/13061.misc @@ -0,0 +1 @@ +Fix type checking errors against Twisted trunk. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index e0eda545b..9afba7b0e 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -119,7 +119,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) # check the state group - sg = self.successResultOf( + sg = self.get_success( self.store._get_state_group_for_event(join_event.event_id) ) @@ -149,7 +149,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): self.assertIsNotNone(e.rejected_reason) # ... and the state group should be the same as before - sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id)) + sg2 = self.get_success(self.store._get_state_group_for_event(ev.event_id)) self.assertEqual(sg, sg2) @@ -172,7 +172,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) # check the state group - sg = self.successResultOf( + sg = self.get_success( self.store._get_state_group_for_event(join_event.event_id) ) @@ -203,7 +203,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): self.assertIsNotNone(e.rejected_reason) # ... and the state group should be the same as before - sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id)) + sg2 = self.get_success(self.store._get_state_group_for_event(ev.event_id)) self.assertEqual(sg, sg2) diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 78b83d97b..2e3f2318d 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -495,6 +495,7 @@ class StateTestCase(unittest.TestCase): prev_events = list(graph[node_id]) + state_before: StateMap[str] if len(prev_events) == 0: state_before = {} elif len(prev_events) == 1: From 75fb10ee45950a175ee286b36fb5a46f123d7db5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 15 Jun 2022 12:29:42 +0100 Subject: [PATCH 41/85] Clean up schema for `event_edges` (#12893) * Remove redundant references to `event_edges.room_id` We don't need to care about the room_id here, because we are already checking the event id. * Clean up the event_edges table We make a number of changes to `event_edges`: * We give the `room_id` and `is_state` columns defaults (null and false respectively) so that we can stop populating them. * We drop any rows that have `is_state` set true - they should no longer exist. * We drop any rows that do not exist in `events` - these should not exist either. * We drop the old unique constraint on all the colums, which wasn't much use. * We create a new unique index on `(event_id, prev_event_id)`. * We add a foreign key constraint to `events`. These happen rather differently depending on whether we are on Postgres or SQLite. For SQLite, we just rebuild the whole table, copying only the rows we want to keep. For Postgres, we try to do things in the background as much as possible. * Stop populating `event_edges.room_id` and `is_state` We can just rely on the defaults. --- changelog.d/12893.misc | 1 + synapse/storage/databases/main/events.py | 6 +- .../databases/main/events_bg_updates.py | 116 +++++++++++++++++- .../storage/databases/main/purge_events.py | 2 +- synapse/storage/schema/__init__.py | 12 +- .../71/01rebuild_event_edges.sql.postgres | 43 +++++++ .../delta/71/01rebuild_event_edges.sql.sqlite | 47 +++++++ 7 files changed, 216 insertions(+), 11 deletions(-) create mode 100644 changelog.d/12893.misc create mode 100644 synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres create mode 100644 synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite diff --git a/changelog.d/12893.misc b/changelog.d/12893.misc new file mode 100644 index 000000000..570521030 --- /dev/null +++ b/changelog.d/12893.misc @@ -0,0 +1 @@ +Simplify the database schema for `event_edges`. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a8773374b..a3e12f1e9 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2296,11 +2296,9 @@ class PersistEventsStore: self.db_pool.simple_insert_many_txn( txn, table="event_edges", - keys=("event_id", "prev_event_id", "room_id", "is_state"), + keys=("event_id", "prev_event_id"), values=[ - (ev.event_id, e_id, ev.room_id, False) - for ev in events - for e_id in ev.prev_event_ids() + (ev.event_id, e_id) for ev in events for e_id in ev.prev_event_ids() ], ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index bea34a4c4..eeca85fc9 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 The Matrix.org Foundation C.I.C. +# Copyright 2019-2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,6 +64,9 @@ class _BackgroundUpdates: INDEX_STREAM_ORDERING2_TS = "index_stream_ordering2_ts" REPLACE_STREAM_ORDERING_COLUMN = "replace_stream_ordering_column" + EVENT_EDGES_DROP_INVALID_ROWS = "event_edges_drop_invalid_rows" + EVENT_EDGES_REPLACE_INDEX = "event_edges_replace_index" + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -235,6 +238,21 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ################################################################################ + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS, + self._background_drop_invalid_event_edges_rows, + ) + + self.db_pool.updates.register_background_index_update( + _BackgroundUpdates.EVENT_EDGES_REPLACE_INDEX, + index_name="event_edges_event_id_prev_event_id_idx", + table="event_edges", + columns=["event_id", "prev_event_id"], + unique=True, + # the old index which just covered event_id is now redundant. + replaces_index="ev_edges_id", + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: @@ -1285,3 +1303,99 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): ) return 0 + + async def _background_drop_invalid_event_edges_rows( + self, progress: JsonDict, batch_size: int + ) -> int: + """Drop invalid rows from event_edges + + This only runs for postgres. For SQLite, it all happens synchronously. + + Firstly, drop any rows with is_state=True. These may have been added a long time + ago, but they are no longer used. + + We also drop rows that do not correspond to entries in `events`, and add a + foreign key. + """ + + last_event_id = progress.get("last_event_id", "") + + def drop_invalid_event_edges_txn(txn: LoggingTransaction) -> bool: + """Returns True if we're done.""" + + # first we need to find an endpoint. + txn.execute( + """ + SELECT event_id FROM event_edges + WHERE event_id > ? + ORDER BY event_id + LIMIT 1 OFFSET ? + """, + (last_event_id, batch_size), + ) + + endpoint = None + row = txn.fetchone() + + if row: + endpoint = row[0] + + where_clause = "ee.event_id > ?" + args = [last_event_id] + if endpoint: + where_clause += " AND ee.event_id <= ?" + args.append(endpoint) + + # now delete any that: + # - have is_state=TRUE, or + # - do not correspond to a row in `events` + txn.execute( + f""" + DELETE FROM event_edges + WHERE event_id IN ( + SELECT ee.event_id + FROM event_edges ee + LEFT JOIN events ev USING (event_id) + WHERE ({where_clause}) AND + (is_state OR ev.event_id IS NULL) + )""", + args, + ) + + logger.info( + "cleaned up event_edges up to %s: removed %i/%i rows", + endpoint, + txn.rowcount, + batch_size, + ) + + if endpoint is not None: + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS, + {"last_event_id": endpoint}, + ) + return False + + # if that was the final batch, we validate the foreign key. + # + # The constraint should have been in place and enforced for new rows since + # before we started deleting invalid rows, so there's no chance for any + # invalid rows to have snuck in the meantime. In other words, this really + # ought to succeed. + logger.info("cleaned up event_edges; enabling foreign key") + txn.execute( + "ALTER TABLE event_edges VALIDATE CONSTRAINT event_edges_event_id_fkey" + ) + return True + + done = await self.db_pool.runInteraction( + desc="drop_invalid_event_edges", func=drop_invalid_event_edges_txn + ) + + if done: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.EVENT_EDGES_DROP_INVALID_ROWS + ) + + return batch_size diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index ba385f9fc..87b0d0903 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -214,10 +214,10 @@ class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): # Delete all remote non-state events for table in ( + "event_edges", "events", "event_json", "event_auth", - "event_edges", "event_forward_extremities", "event_relations", "event_search", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 5843fae60..dc237e303 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 71 # remember to update the list below when updating +SCHEMA_VERSION = 72 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -71,14 +71,16 @@ Changes in SCHEMA_VERSION = 70: Changes in SCHEMA_VERSION = 71: - event_edges.room_id is no longer read from. - Tables related to groups are no longer accessed. + +Changes in SCHEMA_VERSION = 72: + - event_edges.(room_id, is_state) are no longer written to. """ SCHEMA_COMPAT_VERSION = ( - # We now assume that `device_lists_changes_in_room` has been filled out for - # recent device_list_updates. - # ... and that `application_services_state.last_txn` is not used. - 69 + # We no longer maintain `event_edges.room_id`, so synapses with SCHEMA_VERSION < 71 + # will break. + 71 ) """Limit on how far the synapse codebase can be rolled back without breaking db compat diff --git a/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres b/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres new file mode 100644 index 000000000..f32f44585 --- /dev/null +++ b/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.postgres @@ -0,0 +1,43 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We're going to stop populating event_edges.room_id and event_edges.is_state, +-- which means we now need to give them defaults. + +-- We also drop the exising unique constraint which spans all four columns. Franky +-- it's not doing much, and there are other indexes on event_id and prev_event_id. +-- Later on we introduce a proper unique constraint on (event_id, prev_event_id). +-- +-- We also add a foreign key constraint (which will be enforced for new rows), but +-- don't yet validate it for existing rows (since that's slow, and we haven't yet +-- checked that all the rows are valid) + +ALTER TABLE event_edges + ALTER room_id DROP NOT NULL, + ALTER is_state SET DEFAULT FALSE, + DROP CONSTRAINT IF EXISTS event_edges_event_id_prev_event_id_room_id_is_state_key, + ADD CONSTRAINT event_edges_event_id_fkey FOREIGN KEY (event_id) REFERENCES events(event_id) NOT VALID; + +-- In the background, we drop any rows with is_state=True. These may have been +-- added a long time ago, but they are no longer used. +-- +-- We also drop rows that do not correspond to entries in `events`, and finally +-- validate the foreign key. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7101, 'event_edges_drop_invalid_rows', '{}'); + +-- We'll then create a new unique index on (event_id, prev_event_id). +INSERT INTO background_updates (ordering, update_name, progress_json, depends_on) VALUES + (7101, 'event_edges_replace_index', '{}', 'event_edges_drop_invalid_rows'); diff --git a/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite b/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite new file mode 100644 index 000000000..0bb86edd2 --- /dev/null +++ b/synapse/storage/schema/main/delta/71/01rebuild_event_edges.sql.sqlite @@ -0,0 +1,47 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We're going to stop populating event_edges.room_id and event_edges.is_state, +-- which means we now need to give them defaults. +-- +-- We also take the opportunity to: +-- - drop any rows with is_state=True (these were populated a long time ago, but +-- are no longer used.) +-- - drop any rows which do not correspond to entries in `events` +-- - tighten the unique index so that it applies just to (event_id, prev_event_id) +-- - drop the "ev_edges_id" index, which is redundant to the above. +-- - add a foreign key constraint from event_id to `events` + +CREATE TABLE new_event_edges ( + event_id TEXT NOT NULL, + prev_event_id TEXT NOT NULL, + room_id TEXT NULL, + is_state BOOL NOT NULL DEFAULT 0, + FOREIGN KEY(event_id) REFERENCES events(event_id) +); + +INSERT INTO new_event_edges + SELECT ee.event_id, ee.prev_event_id, ee.room_id, ee.is_state + FROM event_edges ee JOIN events ev USING (event_id) + WHERE NOT ee.is_state; + +DROP TABLE event_edges; + +ALTER TABLE new_event_edges RENAME TO event_edges; + +CREATE UNIQUE INDEX event_edges_event_id_prev_event_id_idx + ON event_edges (event_id, prev_event_id); + +CREATE INDEX ev_edges_prev_id ON event_edges (prev_event_id); From 417f4cf40b3f2dc19cab6b9d17d15e43af679ae6 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 15 Jun 2022 15:36:16 +0200 Subject: [PATCH 42/85] Don't use keyword arguments when initialising modules (#13060) --- changelog.d/13060.misc | 1 + synapse/app/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13060.misc diff --git a/changelog.d/13060.misc b/changelog.d/13060.misc new file mode 100644 index 000000000..c2376701f --- /dev/null +++ b/changelog.d/13060.misc @@ -0,0 +1 @@ +Don't instantiate modules with keyword arguments. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 84e389a6c..363ac98ea 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -450,7 +450,7 @@ async def start(hs: "HomeServer") -> None: # before we start the listeners. module_api = hs.get_module_api() for module, config in hs.config.modules.loaded_modules: - m = module(config=config, api=module_api) + m = module(config, module_api) logger.info("Loaded module %s", m) load_legacy_spam_checkers(hs) From 0dbdc3994063245900501a95b348f50d943fd72b Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Jun 2022 15:11:55 +0100 Subject: [PATCH 43/85] Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. (#13018) --- changelog.d/13018.bugfix | 1 + synapse/api/ratelimiting.py | 5 +++- tests/api/test_ratelimiting.py | 51 ++++++++++++++++++++++++++-------- 3 files changed, 45 insertions(+), 12 deletions(-) create mode 100644 changelog.d/13018.bugfix diff --git a/changelog.d/13018.bugfix b/changelog.d/13018.bugfix new file mode 100644 index 000000000..a84657f04 --- /dev/null +++ b/changelog.d/13018.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. \ No newline at end of file diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 849c18ced..54d13026c 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -128,6 +128,9 @@ class Ratelimiter: performed_count = action_count - time_delta * rate_hz if performed_count < 0: performed_count = 0 + + # Reset the start time and forgive all actions + action_count = 0 time_start = time_now_s # This check would be easier read as performed_count + n_actions > burst_count, @@ -140,7 +143,7 @@ class Ratelimiter: else: # We haven't reached our limit yet allowed = True - action_count = performed_count + n_actions + action_count = action_count + n_actions if update: self.actions[key] = (action_count, time_start, rate_hz) diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index f661a9ff8..18649c2c0 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -246,7 +246,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): self.assertTrue(allowed) self.assertEqual(10.0, time_allowed) - # Test that, after doing these 3 actions, we can't do any more action without + # Test that, after doing these 3 actions, we can't do any more actions without # waiting. allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0) @@ -254,7 +254,8 @@ class TestRatelimiter(unittest.HomeserverTestCase): self.assertFalse(allowed) self.assertEqual(10.0, time_allowed) - # Test that after waiting we can do only 1 action. + # Test that after waiting we would be able to do only 1 action. + # Note that we don't actually do it (update=False) here. allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action( None, @@ -265,23 +266,51 @@ class TestRatelimiter(unittest.HomeserverTestCase): ) ) self.assertTrue(allowed) - # The time allowed is the current time because we could still repeat the action - # once. - self.assertEqual(10.0, time_allowed) + # We would be able to do the 5th action at t=20. + self.assertEqual(20.0, time_allowed) + # Attempt (but fail) to perform TWO actions at t=10. + # Those would be the 4th and 5th actions. allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10) ) self.assertFalse(allowed) - # The time allowed doesn't change despite allowed being False because, while we - # don't allow 2 actions, we could still do 1. + # The returned time allowed for the next action is now even though we weren't + # allowed to perform the action because whilst we don't allow 2 actions, + # we could still do 1. self.assertEqual(10.0, time_allowed) - # Test that after waiting a bit more we can do 2 actions. + # Test that after waiting until t=20, we can do perform 2 actions. + # These are the 4th and 5th actions. allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=20) ) self.assertTrue(allowed) - # The time allowed is the current time because we could still repeat the action - # once. - self.assertEqual(20.0, time_allowed) + # We would be able to do the 6th action at t=30. + self.assertEqual(30.0, time_allowed) + + def test_rate_limit_burst_only_given_once(self) -> None: + """ + Regression test against a bug that meant that you could build up + extra tokens by timing requests. + """ + limiter = Ratelimiter( + store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + ) + + def consume_at(time: float) -> bool: + success, _ = self.get_success_or_raise( + limiter.can_do_action(requester=None, key="a", _time_now_s=time) + ) + return success + + # Use all our 3 burst tokens + self.assertTrue(consume_at(0.0)) + self.assertTrue(consume_at(0.1)) + self.assertTrue(consume_at(0.2)) + + # Wait to recover 1 token (10 seconds at 0.1 Hz). + self.assertTrue(consume_at(10.1)) + + # Check that we get rate limited after using that token. + self.assertFalse(consume_at(11.1)) From 941dc3db13f1c4c4b89da14a0dc60b4f7b54228c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 15 Jun 2022 15:19:49 +0100 Subject: [PATCH 44/85] Track a histogram of state res durations (#13036) --- changelog.d/13036.feature | 1 + synapse/state/__init__.py | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelog.d/13036.feature diff --git a/changelog.d/13036.feature b/changelog.d/13036.feature new file mode 100644 index 000000000..71e5a29fe --- /dev/null +++ b/changelog.d/13036.feature @@ -0,0 +1 @@ +Add metrics measuring the CPU and DB time spent in state resolution. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index da25f20ae..9d3fe6610 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -444,6 +444,15 @@ _biggest_room_by_db_counter = Counter( "expensive room for state resolution", ) +_cpu_times = Histogram( + "synapse_state_res_cpu_for_all_rooms_seconds", + "CPU time (utime+stime) spent computing a single state resolution", +) +_db_times = Histogram( + "synapse_state_res_db_for_all_rooms_seconds", + "Database time spent computing a single state resolution", +) + class StateResolutionHandler: """Responsible for doing state conflict resolution. @@ -609,6 +618,9 @@ class StateResolutionHandler: room_metrics.db_time += rusage.db_txn_duration_sec room_metrics.db_events += rusage.evt_db_fetch_count + _cpu_times.observe(rusage.ru_utime + rusage.ru_stime) + _db_times.observe(rusage.db_txn_duration_sec) + def _report_metrics(self) -> None: if not self._state_res_metrics: # no state res has happened since the last iteration: don't bother logging. From 538044ac01696c1b55d0e60418bc9a6bc2b03411 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Jun 2022 15:42:27 +0100 Subject: [PATCH 45/85] Collapse Docker build commands in Complement CI runs to make the logs easier to read. (#13058) --- .github/workflows/tests.yml | 2 +- changelog.d/13058.misc | 1 + docker/Dockerfile | 6 +++--- docker/Dockerfile-workers | 4 ++-- docker/complement/Dockerfile | 2 +- scripts-dev/complement.sh | 15 +++++++++++++++ 6 files changed, 23 insertions(+), 7 deletions(-) create mode 100644 changelog.d/13058.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 4ce27ff41..56f3e4646 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -371,7 +371,7 @@ jobs: - name: "Install Complement Dependencies" run: | - sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev + sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest - name: Run actions/checkout@v2 for synapse diff --git a/changelog.d/13058.misc b/changelog.d/13058.misc new file mode 100644 index 000000000..4102bf96b --- /dev/null +++ b/changelog.d/13058.misc @@ -0,0 +1 @@ +Make Complement CI logs easier to read. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 7af0e51f9..c676f8377 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -40,7 +40,7 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as requirements RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install -y git \ + apt-get update -qq && apt-get install -yqq git \ && rm -rf /var/lib/apt/lists/* # We install poetry in its own build stage to avoid its dependencies conflicting with @@ -73,7 +73,7 @@ FROM docker.io/python:${PYTHON_VERSION}-slim as builder RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install -y \ + apt-get update -qq && apt-get install -yqq \ build-essential \ libffi-dev \ libjpeg-dev \ @@ -118,7 +118,7 @@ LABEL org.opencontainers.image.licenses='Apache-2.0' RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && apt-get install -y \ + apt-get update -qq && apt-get install -yqq \ curl \ gosu \ libjpeg62-turbo \ diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers index 83db0a95b..0f1570cfb 100644 --- a/docker/Dockerfile-workers +++ b/docker/Dockerfile-workers @@ -6,8 +6,8 @@ FROM matrixdotorg/synapse:$SYNAPSE_VERSION RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + apt-get update -qq && \ + DEBIAN_FRONTEND=noninteractive apt-get install -yqq --no-install-recommends \ redis-server nginx-light # Install supervisord with pip instead of apt, to avoid installing a second diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 50684c956..8bec0f611 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -9,7 +9,7 @@ FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION # Install postgresql RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y postgresql-13 + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -yqq postgresql-13 # Configure a user and create a database for Synapse RUN pg_ctlcluster 13 main start && su postgres -c "echo \ diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 52ef1fd07..f1843717c 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -24,6 +24,15 @@ # Exit if a line returns a non-zero exit code set -e + +# Helper to emit annotations that collapse portions of the log in GitHub Actions +echo_if_github() { + if [[ -n "$GITHUB_WORKFLOW" ]]; then + echo $* + fi +} + + # enable buildkit for the docker builds export DOCKER_BUILDKIT=1 @@ -41,14 +50,20 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then fi # Build the base Synapse image from the local checkout +echo_if_github "::group::Build Docker image: matrixdotorg/synapse" docker build -t matrixdotorg/synapse -f "docker/Dockerfile" . +echo_if_github "::endgroup::" # Build the workers docker image (from the base Synapse image we just built). +echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" docker build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . +echo_if_github "::endgroup::" # Build the unified Complement image (from the worker Synapse image we just built). +echo_if_github "::group::Build Docker image: complement/Dockerfile" docker build -t complement-synapse \ -f "docker/complement/Dockerfile" "docker/complement" +echo_if_github "::endgroup::" export COMPLEMENT_BASE_IMAGE=complement-synapse From 212be2edc17fba65e2d1ac0099346e92f2e6bc49 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Jun 2022 15:54:32 +0100 Subject: [PATCH 46/85] Use updated `update_user_directory_from_worker` options in Complement tests. (#13069) --- changelog.d/13069.misc | 1 + docker/configure_workers_and_start.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13069.misc diff --git a/changelog.d/13069.misc b/changelog.d/13069.misc new file mode 100644 index 000000000..4102bf96b --- /dev/null +++ b/changelog.d/13069.misc @@ -0,0 +1 @@ +Make Complement CI logs easier to read. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 64697e035..2a2c13f77 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -52,12 +52,12 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "worker_extra_conf": "", }, "user_dir": { - "app": "synapse.app.user_dir", + "app": "synapse.app.generic_worker", "listener_resources": ["client"], "endpoint_patterns": [ "^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$" ], - "shared_extra_conf": {"update_user_directory": False}, + "shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"}, "worker_extra_conf": "", }, "media_repository": { @@ -78,7 +78,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"notify_appservices_from_worker": "appservice"}, + "shared_extra_conf": {"notify_appservices_from_worker": "appservice1"}, "worker_extra_conf": "", }, "federation_sender": { From 9ad2197fa7b1f9f0fd308b37cefc78061e95c04b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2022 16:11:42 +0100 Subject: [PATCH 47/85] Rename complement-developonly (#13046) --- .github/workflows/tests.yml | 2 ++ changelog.d/13046.misc | 1 + 2 files changed, 3 insertions(+) create mode 100644 changelog.d/13046.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 56f3e4646..5355bfa20 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -358,6 +358,8 @@ jobs: if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}" needs: linting-done runs-on: ubuntu-latest + + name: "Complement Workers (develop only)" steps: # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. diff --git a/changelog.d/13046.misc b/changelog.d/13046.misc new file mode 100644 index 000000000..1248c34d3 --- /dev/null +++ b/changelog.d/13046.misc @@ -0,0 +1 @@ +Rename CI test runs. From 0d1d3e070886694eff1fa862cd203206b1a63372 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2022 16:17:14 +0100 Subject: [PATCH 48/85] Speed up `get_unread_event_push_actions_by_room` (#13005) Fixes #11887 hopefully. The core change here is that `event_push_summary` now holds a summary of counts up until a much more recent point, meaning that the range of rows we need to count in `event_push_actions` is much smaller. This needs two major changes: 1. When we get a receipt we need to recalculate `event_push_summary` rather than just delete it 2. The logic for deleting `event_push_actions` is now divorced from calculating `event_push_summary`. In future it would be good to calculate `event_push_summary` while we persist a new event (it should just be a case of adding one to the relevant rows in `event_push_summary`), as that will further simplify the get counts logic and remove the need for us to periodically update `event_push_summary` in a background job. --- changelog.d/13005.misc | 1 + synapse/_scripts/synapse_port_db.py | 4 + synapse/handlers/sync.py | 10 +- synapse/push/push_tools.py | 33 +-- synapse/storage/database.py | 1 + synapse/storage/databases/main/__init__.py | 4 +- .../databases/main/event_push_actions.py | 256 +++++++++++++----- synapse/storage/databases/main/push_rule.py | 2 +- synapse/storage/databases/main/receipts.py | 74 ++--- .../main/delta/40/event_push_summary.sql | 7 +- .../delta/71/02event_push_summary_unique.sql | 18 ++ tests/push/test_http.py | 16 +- .../replication/slave/storage/test_events.py | 23 +- tests/storage/test_event_push_actions.py | 24 +- 14 files changed, 322 insertions(+), 151 deletions(-) create mode 100644 changelog.d/13005.misc create mode 100644 synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql diff --git a/changelog.d/13005.misc b/changelog.d/13005.misc new file mode 100644 index 000000000..3bb51962e --- /dev/null +++ b/changelog.d/13005.misc @@ -0,0 +1 @@ +Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 9586086c0..9c06c837d 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -58,6 +58,9 @@ from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateSt from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore +from synapse.storage.databases.main.event_push_actions import ( + EventPushActionsWorkerStore, +) from synapse.storage.databases.main.events_bg_updates import ( EventsBackgroundUpdatesStore, ) @@ -199,6 +202,7 @@ R = TypeVar("R") class Store( + EventPushActionsWorkerStore, ClientIpBackgroundUpdateStore, DeviceInboxBackgroundUpdateStore, DeviceBackgroundUpdateStore, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index af19c513b..6ad053f67 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tup import attr from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership, ReceiptTypes +from synapse.api.constants import EventTypes, Membership from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -1054,14 +1054,10 @@ class SyncHandler: self, room_id: str, sync_config: SyncConfig ) -> NotifCounts: with Measure(self.clock, "unread_notifs_for_room_id"): - last_unread_event_id = await self.store.get_last_receipt_event_id_for_user( - user_id=sync_config.user.to_string(), - room_id=room_id, - receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), - ) return await self.store.get_unread_event_push_actions_by_room_for_user( - room_id, sync_config.user.to_string(), last_unread_event_id + room_id, + sync_config.user.to_string(), ) async def generate_sync_result( diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 8397229cc..6661887d9 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Dict -from synapse.api.constants import ReceiptTypes from synapse.events import EventBase from synapse.push.presentable_names import calculate_room_name, name_from_member_event from synapse.storage.controllers import StorageControllers @@ -24,30 +23,24 @@ async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) - invites = await store.get_invited_rooms_for_local_user(user_id) joins = await store.get_rooms_for_user(user_id) - my_receipts_by_room = await store.get_receipts_for_user( - user_id, (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE) - ) - badge = len(invites) for room_id in joins: - if room_id in my_receipts_by_room: - last_unread_event_id = my_receipts_by_room[room_id] - - notifs = await ( - store.get_unread_event_push_actions_by_room_for_user( - room_id, user_id, last_unread_event_id - ) + notifs = await ( + store.get_unread_event_push_actions_by_room_for_user( + room_id, + user_id, ) - if notifs.notify_count == 0: - continue + ) + if notifs.notify_count == 0: + continue - if group_by_room: - # return one badge count per conversation - badge += 1 - else: - # increment the badge count by the number of unread messages in the room - badge += notifs.notify_count + if group_by_room: + # return one badge count per conversation + badge += 1 + else: + # increment the badge count by the number of unread messages in the room + badge += notifs.notify_count return badge diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a78d68a9d..e8c63cf56 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -92,6 +92,7 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = { "event_search": "event_search_event_id_idx", "local_media_repository_thumbnails": "local_media_repository_thumbnails_method_idx", "remote_media_cache_thumbnails": "remote_media_repository_thumbnails_method_idx", + "event_push_summary": "event_push_summary_unique_index", } diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 9121badb3..cb3d1242b 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -104,13 +104,14 @@ class DataStore( PusherStore, PushRuleStore, ApplicationServiceTransactionStore, + EventPushActionsStore, + ServerMetricsStore, ReceiptsStore, EndToEndKeyStore, EndToEndRoomKeyStore, SearchStore, TagsStore, AccountDataStore, - EventPushActionsStore, OpenIdStore, ClientIpWorkerStore, DeviceStore, @@ -124,7 +125,6 @@ class DataStore( UIAuthStore, EventForwardExtremitiesStore, CacheInvalidationWorkerStore, - ServerMetricsStore, LockStore, SessionStore, ): diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index b01997935..ae705889a 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast import attr +from synapse.api.constants import ReceiptTypes from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( @@ -24,6 +25,8 @@ from synapse.storage.database import ( LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -79,15 +82,15 @@ class UserPushAction(EmailPushAction): profile_tag: str -@attr.s(slots=True, frozen=True, auto_attribs=True) +@attr.s(slots=True, auto_attribs=True) class NotifCounts: """ The per-user, per-room count of notifications. Used by sync and push. """ - notify_count: int - unread_count: int - highlight_count: int + notify_count: int = 0 + unread_count: int = 0 + highlight_count: int = 0 def _serialize_action(actions: List[Union[dict, str]], is_highlight: bool) -> str: @@ -119,7 +122,7 @@ def _deserialize_action(actions: str, is_highlight: bool) -> List[Union[dict, st return DEFAULT_NOTIF_ACTION -class EventPushActionsWorkerStore(SQLBaseStore): +class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBaseStore): def __init__( self, database: DatabasePool, @@ -148,12 +151,20 @@ class EventPushActionsWorkerStore(SQLBaseStore): self._rotate_notifs, 30 * 60 * 1000 ) - @cached(num_args=3, tree=True, max_entries=5000) + self.db_pool.updates.register_background_index_update( + "event_push_summary_unique_index", + index_name="event_push_summary_unique_index", + table="event_push_summary", + columns=["user_id", "room_id"], + unique=True, + replaces_index="event_push_summary_user_rm", + ) + + @cached(tree=True, max_entries=5000) async def get_unread_event_push_actions_by_room_for_user( self, room_id: str, user_id: str, - last_read_event_id: Optional[str], ) -> NotifCounts: """Get the notification count, the highlight count and the unread message count for a given user in a given room after the given read receipt. @@ -165,8 +176,6 @@ class EventPushActionsWorkerStore(SQLBaseStore): Args: room_id: The room to retrieve the counts in. user_id: The user to retrieve the counts for. - last_read_event_id: The event associated with the latest read receipt for - this user in this room. None if no receipt for this user in this room. Returns A dict containing the counts mentioned earlier in this docstring, @@ -178,7 +187,6 @@ class EventPushActionsWorkerStore(SQLBaseStore): self._get_unread_counts_by_receipt_txn, room_id, user_id, - last_read_event_id, ) def _get_unread_counts_by_receipt_txn( @@ -186,16 +194,17 @@ class EventPushActionsWorkerStore(SQLBaseStore): txn: LoggingTransaction, room_id: str, user_id: str, - last_read_event_id: Optional[str], ) -> NotifCounts: - stream_ordering = None + result = self.get_last_receipt_for_user_txn( + txn, + user_id, + room_id, + receipt_types=(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), + ) - if last_read_event_id is not None: - stream_ordering = self.get_stream_id_for_event_txn( # type: ignore[attr-defined] - txn, - last_read_event_id, - allow_none=True, - ) + stream_ordering = None + if result: + _, stream_ordering = result if stream_ordering is None: # Either last_read_event_id is None, or it's an event we don't have (e.g. @@ -218,49 +227,95 @@ class EventPushActionsWorkerStore(SQLBaseStore): def _get_unread_counts_by_pos_txn( self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int ) -> NotifCounts: - sql = ( - "SELECT" - " COUNT(CASE WHEN notif = 1 THEN 1 END)," - " COUNT(CASE WHEN highlight = 1 THEN 1 END)," - " COUNT(CASE WHEN unread = 1 THEN 1 END)" - " FROM event_push_actions ea" - " WHERE user_id = ?" - " AND room_id = ?" - " AND stream_ordering > ?" - ) + """Get the number of unread messages for a user/room that have happened + since the given stream ordering. + """ - txn.execute(sql, (user_id, room_id, stream_ordering)) - row = txn.fetchone() - - (notif_count, highlight_count, unread_count) = (0, 0, 0) - - if row: - (notif_count, highlight_count, unread_count) = row + counts = NotifCounts() + # First we pull the counts from the summary table txn.execute( """ - SELECT notif_count, unread_count FROM event_push_summary + SELECT stream_ordering, notif_count, COALESCE(unread_count, 0) + FROM event_push_summary WHERE room_id = ? AND user_id = ? AND stream_ordering > ? """, (room_id, user_id, stream_ordering), ) row = txn.fetchone() + summary_stream_ordering = 0 if row: - notif_count += row[0] + summary_stream_ordering = row[0] + counts.notify_count += row[1] + counts.unread_count += row[2] - if row[1] is not None: - # The unread_count column of event_push_summary is NULLable, so we need - # to make sure we don't try increasing the unread counts if it's NULL - # for this row. - unread_count += row[1] + # Next we need to count highlights, which aren't summarized + sql = """ + SELECT COUNT(*) FROM event_push_actions + WHERE user_id = ? + AND room_id = ? + AND stream_ordering > ? + AND highlight = 1 + """ + txn.execute(sql, (user_id, room_id, stream_ordering)) + row = txn.fetchone() + if row: + counts.highlight_count += row[0] - return NotifCounts( - notify_count=notif_count, - unread_count=unread_count, - highlight_count=highlight_count, + # Finally we need to count push actions that haven't been summarized + # yet. + # We only want to pull out push actions that we haven't summarized yet. + stream_ordering = max(stream_ordering, summary_stream_ordering) + notify_count, unread_count = self._get_notif_unread_count_for_user_room( + txn, room_id, user_id, stream_ordering ) + counts.notify_count += notify_count + counts.unread_count += unread_count + + return counts + + def _get_notif_unread_count_for_user_room( + self, + txn: LoggingTransaction, + room_id: str, + user_id: str, + stream_ordering: int, + max_stream_ordering: Optional[int] = None, + ) -> Tuple[int, int]: + """Returns the notify and unread counts from `event_push_actions` for + the given user/room in the given range. + + Does not consult `event_push_summary` table, which may include push + actions that have been deleted from `event_push_actions` table. + """ + + clause = "" + args = [user_id, room_id, stream_ordering] + if max_stream_ordering is not None: + clause = "AND ea.stream_ordering <= ?" + args.append(max_stream_ordering) + + sql = f""" + SELECT + COUNT(CASE WHEN notif = 1 THEN 1 END), + COUNT(CASE WHEN unread = 1 THEN 1 END) + FROM event_push_actions ea + WHERE user_id = ? + AND room_id = ? + AND ea.stream_ordering > ? + {clause} + """ + + txn.execute(sql, args) + row = txn.fetchone() + + if row: + return cast(Tuple[int, int], row) + + return 0, 0 + async def get_push_action_users_in_range( self, min_stream_ordering: int, max_stream_ordering: int ) -> List[str]: @@ -754,6 +809,8 @@ class EventPushActionsWorkerStore(SQLBaseStore): if caught_up: break await self.hs.get_clock().sleep(self._rotate_delay) + + await self._remove_old_push_actions_that_have_rotated() finally: self._doing_notif_rotation = False @@ -782,20 +839,16 @@ class EventPushActionsWorkerStore(SQLBaseStore): stream_row = txn.fetchone() if stream_row: (offset_stream_ordering,) = stream_row - assert self.stream_ordering_day_ago is not None - rotate_to_stream_ordering = min( - self.stream_ordering_day_ago, offset_stream_ordering - ) - caught_up = offset_stream_ordering >= self.stream_ordering_day_ago + rotate_to_stream_ordering = offset_stream_ordering + caught_up = False else: - rotate_to_stream_ordering = self.stream_ordering_day_ago + rotate_to_stream_ordering = self._stream_id_gen.get_current_token() caught_up = True logger.info("Rotating notifications up to: %s", rotate_to_stream_ordering) self._rotate_notifs_before_txn(txn, rotate_to_stream_ordering) - # We have caught up iff we were limited by `stream_ordering_day_ago` return caught_up def _rotate_notifs_before_txn( @@ -819,7 +872,6 @@ class EventPushActionsWorkerStore(SQLBaseStore): max(stream_ordering) as stream_ordering FROM event_push_actions WHERE ? <= stream_ordering AND stream_ordering < ? - AND highlight = 0 AND %s = 1 GROUP BY user_id, room_id ) AS upd @@ -914,19 +966,73 @@ class EventPushActionsWorkerStore(SQLBaseStore): ), ) - txn.execute( - "DELETE FROM event_push_actions" - " WHERE ? <= stream_ordering AND stream_ordering < ? AND highlight = 0", - (old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - logger.info("Rotating notifications, deleted %s push actions", txn.rowcount) - txn.execute( "UPDATE event_push_summary_stream_ordering SET stream_ordering = ?", (rotate_to_stream_ordering,), ) + async def _remove_old_push_actions_that_have_rotated( + self, + ) -> None: + """Clear out old push actions that have been summarized.""" + + # We want to clear out anything that older than a day that *has* already + # been rotated. + rotated_upto_stream_ordering = await self.db_pool.simple_select_one_onecol( + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + + max_stream_ordering_to_delete = min( + rotated_upto_stream_ordering, self.stream_ordering_day_ago + ) + + def remove_old_push_actions_that_have_rotated_txn( + txn: LoggingTransaction, + ) -> bool: + # We don't want to clear out too much at a time, so we bound our + # deletes. + batch_size = 10000 + + txn.execute( + """ + SELECT stream_ordering FROM event_push_actions + WHERE stream_ordering < ? AND highlight = 0 + ORDER BY stream_ordering ASC LIMIT 1 OFFSET ? + """, + ( + max_stream_ordering_to_delete, + batch_size, + ), + ) + stream_row = txn.fetchone() + + if stream_row: + (stream_ordering,) = stream_row + else: + stream_ordering = max_stream_ordering_to_delete + + txn.execute( + """ + DELETE FROM event_push_actions + WHERE stream_ordering < ? AND highlight = 0 + """, + (stream_ordering,), + ) + + logger.info("Rotating notifications, deleted %s push actions", txn.rowcount) + + return txn.rowcount < batch_size + + while True: + done = await self.db_pool.runInteraction( + "_remove_old_push_actions_that_have_rotated", + remove_old_push_actions_that_have_rotated_txn, + ) + if done: + break + def _remove_old_push_actions_before_txn( self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int ) -> None: @@ -965,12 +1071,26 @@ class EventPushActionsWorkerStore(SQLBaseStore): (user_id, room_id, stream_ordering, self.stream_ordering_month_ago), ) - txn.execute( - """ - DELETE FROM event_push_summary - WHERE room_id = ? AND user_id = ? AND stream_ordering <= ? - """, - (room_id, user_id, stream_ordering), + old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + + notif_count, unread_count = self._get_notif_unread_count_for_user_room( + txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering + ) + + self.db_pool.simple_upsert_txn( + txn, + table="event_push_summary", + keyvalues={"room_id": room_id, "user_id": user_id}, + values={ + "notif_count": notif_count, + "unread_count": unread_count, + "stream_ordering": old_rotate_stream_ordering, + }, ) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index d5aefe02b..86649c1e6 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -110,9 +110,9 @@ def _load_rules( # the abstract methods being implemented. class PushRulesWorkerStore( ApplicationServiceWorkerStore, - ReceiptsWorkerStore, PusherWorkerStore, RoomMemberWorkerStore, + ReceiptsWorkerStore, EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta, diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index b6106affa..bec6d6057 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -118,7 +118,7 @@ class ReceiptsWorkerStore(SQLBaseStore): return self._receipts_id_gen.get_current_token() async def get_last_receipt_event_id_for_user( - self, user_id: str, room_id: str, receipt_types: Iterable[str] + self, user_id: str, room_id: str, receipt_types: Collection[str] ) -> Optional[str]: """ Fetch the event ID for the latest receipt in a room with one of the given receipt types. @@ -126,58 +126,63 @@ class ReceiptsWorkerStore(SQLBaseStore): Args: user_id: The user to fetch receipts for. room_id: The room ID to fetch the receipt for. - receipt_type: The receipt types to fetch. Earlier receipt types - are given priority if multiple receipts point to the same event. + receipt_type: The receipt types to fetch. Returns: The latest receipt, if one exists. """ - latest_event_id: Optional[str] = None - latest_stream_ordering = 0 - for receipt_type in receipt_types: - result = await self._get_last_receipt_event_id_for_user( - user_id, room_id, receipt_type - ) - if result is None: - continue - event_id, stream_ordering = result + result = await self.db_pool.runInteraction( + "get_last_receipt_event_id_for_user", + self.get_last_receipt_for_user_txn, + user_id, + room_id, + receipt_types, + ) + if not result: + return None - if latest_event_id is None or latest_stream_ordering < stream_ordering: - latest_event_id = event_id - latest_stream_ordering = stream_ordering + event_id, _ = result + return event_id - return latest_event_id - - @cached() - async def _get_last_receipt_event_id_for_user( - self, user_id: str, room_id: str, receipt_type: str + def get_last_receipt_for_user_txn( + self, + txn: LoggingTransaction, + user_id: str, + room_id: str, + receipt_types: Collection[str], ) -> Optional[Tuple[str, int]]: """ - Fetch the event ID and stream ordering for the latest receipt. + Fetch the event ID and stream_ordering for the latest receipt in a room + with one of the given receipt types. Args: user_id: The user to fetch receipts for. room_id: The room ID to fetch the receipt for. - receipt_type: The receipt type to fetch. + receipt_type: The receipt types to fetch. Returns: - The event ID and stream ordering of the latest receipt, if one exists; - otherwise `None`. + The latest receipt, if one exists. """ - sql = """ + + clause, args = make_in_list_sql_clause( + self.database_engine, "receipt_type", receipt_types + ) + + sql = f""" SELECT event_id, stream_ordering FROM receipts_linearized INNER JOIN events USING (room_id, event_id) - WHERE user_id = ? + WHERE {clause} + AND user_id = ? AND room_id = ? - AND receipt_type = ? + ORDER BY stream_ordering DESC + LIMIT 1 """ - def f(txn: LoggingTransaction) -> Optional[Tuple[str, int]]: - txn.execute(sql, (user_id, room_id, receipt_type)) - return cast(Optional[Tuple[str, int]], txn.fetchone()) + args.extend((user_id, room_id)) + txn.execute(sql, args) - return await self.db_pool.runInteraction("get_own_receipt_for_user", f) + return cast(Optional[Tuple[str, int]], txn.fetchone()) async def get_receipts_for_user( self, user_id: str, receipt_types: Iterable[str] @@ -577,8 +582,11 @@ class ReceiptsWorkerStore(SQLBaseStore): ) -> None: self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type)) self._get_linearized_receipts_for_room.invalidate((room_id,)) - self._get_last_receipt_event_id_for_user.invalidate( - (user_id, room_id, receipt_type) + + # We use this method to invalidate so that we don't end up with circular + # dependencies between the receipts and push action stores. + self._attempt_to_invalidate_cache( + "get_unread_event_push_actions_by_room_for_user", (room_id,) ) def process_replication_rows( diff --git a/synapse/storage/schema/main/delta/40/event_push_summary.sql b/synapse/storage/schema/main/delta/40/event_push_summary.sql index 3918f0b79..499bf6017 100644 --- a/synapse/storage/schema/main/delta/40/event_push_summary.sql +++ b/synapse/storage/schema/main/delta/40/event_push_summary.sql @@ -13,9 +13,10 @@ * limitations under the License. */ --- Aggregate of old notification counts that have been deleted out of the --- main event_push_actions table. This count does not include those that were --- highlights, as they remain in the event_push_actions table. +-- Aggregate of notification counts up to `stream_ordering`, including those +-- that may have been deleted out of the main event_push_actions table. This +-- count does not include those that were highlights, as they remain in the +-- event_push_actions table. CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, diff --git a/synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql b/synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql new file mode 100644 index 000000000..9cdcea21a --- /dev/null +++ b/synapse/storage/schema/main/delta/71/02event_push_summary_unique.sql @@ -0,0 +1,18 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a unique index to `event_push_summary` +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7002, 'event_push_summary_unique_index', '{}'); diff --git a/tests/push/test_http.py b/tests/push/test_http.py index ba158f5d9..d9c68cdd2 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -577,7 +577,7 @@ class HTTPPusherTests(HomeserverTestCase): # Carry out our option-value specific test # # This push should still only contain an unread count of 1 (for 1 unread room) - self._check_push_attempt(6, 1) + self._check_push_attempt(7, 1) @override_config({"push": {"group_unread_count_by_room": False}}) def test_push_unread_count_message_count(self) -> None: @@ -591,7 +591,7 @@ class HTTPPusherTests(HomeserverTestCase): # # We're counting every unread message, so there should now be 3 since the # last read receipt - self._check_push_attempt(6, 3) + self._check_push_attempt(7, 3) def _test_push_unread_count(self) -> None: """ @@ -641,18 +641,18 @@ class HTTPPusherTests(HomeserverTestCase): response = self.helper.send( room_id, body="Hello there!", tok=other_access_token ) - # To get an unread count, the user who is getting notified has to have a read - # position in the room. We'll set the read position to this event in a moment + first_message_event_id = response["event_id"] expected_push_attempts = 1 - self._check_push_attempt(expected_push_attempts, 0) + self._check_push_attempt(expected_push_attempts, 1) self._send_read_request(access_token, first_message_event_id, room_id) - # Unread count has not changed. Therefore, ensure that read request does not - # trigger a push notification. - self.assertEqual(len(self.push_attempts), 1) + # Unread count has changed. Therefore, ensure that read request triggers + # a push notification. + expected_push_attempts += 1 + self.assertEqual(len(self.push_attempts), expected_push_attempts) # Send another message response2 = self.helper.send( diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 6d3d4afe5..531a0db2d 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -15,7 +15,9 @@ import logging from typing import Iterable, Optional from canonicaljson import encode_canonical_json +from parameterized import parameterized +from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict from synapse.handlers.room import RoomEventSource @@ -156,17 +158,26 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ], ) - def test_push_actions_for_user(self): + @parameterized.expand([(True,), (False,)]) + def test_push_actions_for_user(self, send_receipt: bool): self.persist(type="m.room.create", key="", creator=USER_ID) - self.persist(type="m.room.join", key=USER_ID, membership="join") + self.persist(type="m.room.member", key=USER_ID, membership="join") self.persist( - type="m.room.join", sender=USER_ID, key=USER_ID_2, membership="join" + type="m.room.member", sender=USER_ID, key=USER_ID_2, membership="join" ) event1 = self.persist(type="m.room.message", msgtype="m.text", body="hello") self.replicate() + + if send_receipt: + self.get_success( + self.master_store.insert_receipt( + ROOM_ID, ReceiptTypes.READ, USER_ID_2, [event1.event_id], {} + ) + ) + self.check( "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2, event1.event_id], + [ROOM_ID, USER_ID_2], NotifCounts(highlight_count=0, unread_count=0, notify_count=0), ) @@ -179,7 +190,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): self.replicate() self.check( "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2, event1.event_id], + [ROOM_ID, USER_ID_2], NotifCounts(highlight_count=0, unread_count=0, notify_count=1), ) @@ -194,7 +205,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): self.replicate() self.check( "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2, event1.event_id], + [ROOM_ID, USER_ID_2], NotifCounts(highlight_count=1, unread_count=0, notify_count=2), ) diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 0f9add484..4273524c4 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -51,10 +51,16 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): room_id = "!foo:example.com" user_id = "@user1235:example.com" + last_read_stream_ordering = [0] + def _assert_counts(noitf_count, highlight_count): counts = self.get_success( self.store.db_pool.runInteraction( - "", self.store._get_unread_counts_by_pos_txn, room_id, user_id, 0 + "", + self.store._get_unread_counts_by_pos_txn, + room_id, + user_id, + last_read_stream_ordering[0], ) ) self.assertEqual( @@ -98,6 +104,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) def _mark_read(stream, depth): + last_read_stream_ordering[0] = stream self.get_success( self.store.db_pool.runInteraction( "", @@ -144,8 +151,19 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): _assert_counts(1, 1) _rotate(9) _assert_counts(1, 1) - _rotate(10) - _assert_counts(1, 1) + + # Check that adding another notification and rotating after highlight + # works. + _inject_actions(10, PlAIN_NOTIF) + _rotate(11) + _assert_counts(2, 1) + + # Check that sending read receipts at different points results in the + # right counts. + _mark_read(8, 8) + _assert_counts(1, 0) + _mark_read(10, 10) + _assert_counts(0, 0) def test_find_first_stream_ordering_after_ts(self): def add_event(so, ts): From 97e9fbe1b233d94325c23b9a27bc9d2d9d615f9e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 15 Jun 2022 16:20:04 +0100 Subject: [PATCH 49/85] Type annotations in `synapse.databases.main.devices` (#13025) Co-authored-by: Patrick Cloke --- changelog.d/13025.misc | 1 + mypy.ini | 1 - synapse/replication/slave/storage/devices.py | 3 +- synapse/storage/databases/main/__init__.py | 1 + synapse/storage/databases/main/devices.py | 51 +++++++++++++------- 5 files changed, 36 insertions(+), 21 deletions(-) create mode 100644 changelog.d/13025.misc diff --git a/changelog.d/13025.misc b/changelog.d/13025.misc new file mode 100644 index 000000000..7cb0d174b --- /dev/null +++ b/changelog.d/13025.misc @@ -0,0 +1 @@ +Add type annotations to `synapse.storage.databases.main.devices`. diff --git a/mypy.ini b/mypy.ini index 7973f2ac0..c5130feae 100644 --- a/mypy.ini +++ b/mypy.ini @@ -27,7 +27,6 @@ exclude = (?x) ^( |synapse/storage/databases/__init__.py |synapse/storage/databases/main/cache.py - |synapse/storage/databases/main/devices.py |synapse/storage/schema/ |tests/api/test_auth.py diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 30717c2bd..a48cc0206 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -19,13 +19,12 @@ from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.storage.databases.main.devices import DeviceWorkerStore -from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore if TYPE_CHECKING: from synapse.server import HomeServer -class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore): +class SlavedDeviceStore(DeviceWorkerStore, BaseSlavedStore): def __init__( self, database: DatabasePool, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index cb3d1242b..57aaf778e 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -195,6 +195,7 @@ class DataStore( self._min_stream_order_on_start = self.get_room_min_stream_ordering() def get_device_stream_token(self) -> int: + # TODO: shouldn't this be moved to `DeviceWorkerStore`? return self._device_list_id_gen.get_current_token() async def get_users(self) -> List[JsonDict]: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 2414a7dc3..03d1334e0 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -28,6 +28,8 @@ from typing import ( cast, ) +from typing_extensions import Literal + from synapse.api.constants import EduTypes from synapse.api.errors import Codes, StoreError from synapse.logging.opentracing import ( @@ -44,6 +46,8 @@ from synapse.storage.database import ( LoggingTransaction, make_tuple_comparison_clause, ) +from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyWorkerStore +from synapse.storage.types import Cursor from synapse.types import JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList @@ -65,7 +69,7 @@ DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES = ( BG_UPDATE_REMOVE_DUP_OUTBOUND_POKES = "remove_dup_outbound_pokes" -class DeviceWorkerStore(SQLBaseStore): +class DeviceWorkerStore(EndToEndKeyWorkerStore): def __init__( self, database: DatabasePool, @@ -74,7 +78,9 @@ class DeviceWorkerStore(SQLBaseStore): ): super().__init__(database, db_conn, hs) - device_list_max = self._device_list_id_gen.get_current_token() + # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a + # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). + device_list_max = self._device_list_id_gen.get_current_token() # type: ignore[attr-defined] device_list_prefill, min_device_list_id = self.db_pool.get_cache_dict( db_conn, "device_lists_stream", @@ -339,8 +345,9 @@ class DeviceWorkerStore(SQLBaseStore): # following this stream later. last_processed_stream_id = from_stream_id - query_map = {} - cross_signing_keys_by_user = {} + # A map of (user ID, device ID) to (stream ID, context). + query_map: Dict[Tuple[str, str], Tuple[int, Optional[str]]] = {} + cross_signing_keys_by_user: Dict[str, Dict[str, object]] = {} for user_id, device_id, update_stream_id, update_context in updates: # Calculate the remaining length budget. # Note that, for now, each entry in `cross_signing_keys_by_user` @@ -596,7 +603,7 @@ class DeviceWorkerStore(SQLBaseStore): txn=txn, table="device_lists_outbound_last_success", key_names=("destination", "user_id"), - key_values=((destination, user_id) for user_id, _ in rows), + key_values=[(destination, user_id) for user_id, _ in rows], value_names=("stream_id",), value_values=((stream_id,) for _, stream_id in rows), ) @@ -621,7 +628,9 @@ class DeviceWorkerStore(SQLBaseStore): The new stream ID. """ - async with self._device_list_id_gen.get_next() as stream_id: + # TODO: this looks like it's _writing_. Should this be on DeviceStore rather + # than DeviceWorkerStore? + async with self._device_list_id_gen.get_next() as stream_id: # type: ignore[attr-defined] await self.db_pool.runInteraction( "add_user_sig_change_to_streams", self._add_user_signature_change_txn, @@ -686,7 +695,7 @@ class DeviceWorkerStore(SQLBaseStore): } - users_needing_resync user_ids_not_in_cache = user_ids - user_ids_in_cache - results = {} + results: Dict[str, Dict[str, JsonDict]] = {} for user_id, device_id in query_list: if user_id not in user_ids_in_cache: continue @@ -727,7 +736,7 @@ class DeviceWorkerStore(SQLBaseStore): def get_cached_device_list_changes( self, from_key: int, - ) -> Optional[Set[str]]: + ) -> Optional[List[str]]: """Get set of users whose devices have changed since `from_key`, or None if that information is not in our cache. """ @@ -737,7 +746,7 @@ class DeviceWorkerStore(SQLBaseStore): async def get_users_whose_devices_changed( self, from_key: int, - user_ids: Optional[Iterable[str]] = None, + user_ids: Optional[Collection[str]] = None, to_key: Optional[int] = None, ) -> Set[str]: """Get set of users whose devices have changed since `from_key` that @@ -757,6 +766,7 @@ class DeviceWorkerStore(SQLBaseStore): """ # Get set of users who *may* have changed. Users not in the returned # list have definitely not changed. + user_ids_to_check: Optional[Collection[str]] if user_ids is None: # Get set of all users that have had device list changes since 'from_key' user_ids_to_check = self._device_list_stream_cache.get_all_entities_changed( @@ -772,7 +782,7 @@ class DeviceWorkerStore(SQLBaseStore): return set() def _get_users_whose_devices_changed_txn(txn: LoggingTransaction) -> Set[str]: - changes = set() + changes: Set[str] = set() stream_id_where_clause = "stream_id > ?" sql_args = [from_key] @@ -788,6 +798,9 @@ class DeviceWorkerStore(SQLBaseStore): """ # Query device changes with a batch of users at a time + # Assertion for mypy's benefit; see also + # https://mypy.readthedocs.io/en/stable/common_issues.html#narrowing-and-inner-functions + assert user_ids_to_check is not None for chunk in batch_iter(user_ids_to_check, 100): clause, args = make_in_list_sql_clause( txn.database_engine, "user_id", chunk @@ -854,7 +867,9 @@ class DeviceWorkerStore(SQLBaseStore): if last_id == current_id: return [], current_id, False - def _get_all_device_list_changes_for_remotes(txn): + def _get_all_device_list_changes_for_remotes( + txn: Cursor, + ) -> Tuple[List[Tuple[int, tuple]], int, bool]: # This query Does The Right Thing where it'll correctly apply the # bounds to the inner queries. sql = """ @@ -913,7 +928,7 @@ class DeviceWorkerStore(SQLBaseStore): desc="get_device_list_last_stream_id_for_remotes", ) - results = {user_id: None for user_id in user_ids} + results: Dict[str, Optional[str]] = {user_id: None for user_id in user_ids} results.update({row["user_id"]: row["stream_id"] for row in rows}) return results @@ -1337,9 +1352,9 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. - self.device_id_exists_cache = LruCache( - cache_name="device_id_exists", max_size=10000 - ) + self.device_id_exists_cache: LruCache[ + Tuple[str, str], Literal[True] + ] = LruCache(cache_name="device_id_exists", max_size=10000) async def store_device( self, @@ -1651,7 +1666,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): context, ) - async with self._device_list_id_gen.get_next_mult( + async with self._device_list_id_gen.get_next_mult( # type: ignore[attr-defined] len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( @@ -1704,7 +1719,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): device_ids: Iterable[str], hosts: Collection[str], stream_ids: List[int], - context: Dict[str, str], + context: Optional[Dict[str, str]], ) -> None: for host in hosts: txn.call_after( @@ -1875,7 +1890,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): [], ) - async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: + async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: # type: ignore[attr-defined] return await self.db_pool.runInteraction( "add_device_list_outbound_pokes", add_device_list_outbound_pokes_txn, From de334ac183f4b17c8dcec8dd70275dc5a19cebfa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2022 16:27:18 +0100 Subject: [PATCH 50/85] Add a CI job to check that schema deltas are in the correct folder. (#13063) --- .github/workflows/tests.yml | 10 ++- changelog.d/13063.misc | 1 + poetry.lock | 8 +-- pyproject.toml | 2 +- scripts-dev/check_schema_delta.py | 111 ++++++++++++++++++++++++++++++ 5 files changed, 126 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13063.misc create mode 100755 scripts-dev/check_schema_delta.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5355bfa20..193cb505c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -19,6 +19,14 @@ jobs: - run: scripts-dev/generate_sample_config.sh --check - run: scripts-dev/config-lint.sh + check-schema-delta: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" + - run: scripts-dev/check_schema_delta.py --force-colors + lint: uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v1" with: @@ -48,7 +56,7 @@ jobs: # Dummy step to gate other tests on without repeating the whole list linting-done: if: ${{ !cancelled() }} # Run this even if prior jobs were skipped - needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig] + needs: [lint, lint-crlf, lint-newsfile, check-sampleconfig, check-schema-delta] runs-on: ubuntu-latest steps: - run: "true" diff --git a/changelog.d/13063.misc b/changelog.d/13063.misc new file mode 100644 index 000000000..167d6d2cd --- /dev/null +++ b/changelog.d/13063.misc @@ -0,0 +1 @@ +Add a CI job to check that schema deltas are in the correct folder. diff --git a/poetry.lock b/poetry.lock index 8a54a939f..6a67f59bc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -139,7 +139,7 @@ unicode_backport = ["unicodedata2"] [[package]] name = "click" -version = "8.1.0" +version = "8.1.1" description = "Composable command line interface toolkit" category = "dev" optional = false @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "c1bb4dabba1e87517e25ca7bf778e8082fbc960a51d83819aec3a154110a374f" +content-hash = "37bd4bccfdb5a869635f2135a85bea4a0729af7375a27de153b4fd9a4aebc195" [metadata.files] attrs = [ @@ -1684,8 +1684,8 @@ charset-normalizer = [ {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, ] click = [ - {file = "click-8.1.0-py3-none-any.whl", hash = "sha256:19a4baa64da924c5e0cd889aba8e947f280309f1a2ce0947a3e3a7bcb7cc72d6"}, - {file = "click-8.1.0.tar.gz", hash = "sha256:977c213473c7665d3aa092b41ff12063227751c41d7b17165013e10069cc5cd2"}, + {file = "click-8.1.1-py3-none-any.whl", hash = "sha256:5e0d195c2067da3136efb897449ec1e9e6c98282fbf30d7f9e164af9be901a6b"}, + {file = "click-8.1.1.tar.gz", hash = "sha256:7ab900e38149c9872376e8f9b5986ddcaf68c0f413cf73678a0bca5547e6f976"}, ] click-default-group = [ {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"}, diff --git a/pyproject.toml b/pyproject.toml index 3c64e248a..85c2c9534 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -272,7 +272,7 @@ parameterized = ">=0.7.4" idna = ">=2.5" # The following are used by the release script -click = "==8.1.0" +click = "==8.1.1" # GitPython was == 3.1.14; bumped to 3.1.20, the first release with type hints. GitPython = ">=3.1.20" commonmark = "==0.9.1" diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py new file mode 100755 index 000000000..32fe7f50d --- /dev/null +++ b/scripts-dev/check_schema_delta.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 + +# Check that no schema deltas have been added to the wrong version. + +import re +from typing import Any, Dict, List + +import click +import git + +SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") + + +@click.command() +@click.option( + "--force-colors", + is_flag=True, + flag_value=True, + default=None, + help="Always output ANSI colours", +) +def main(force_colors: bool) -> None: + click.secho( + "+++ Checking schema deltas are in the right folder", + fg="green", + bold=True, + color=force_colors, + ) + + click.secho("Updating repo...") + + repo = git.Repo() + repo.remote().fetch() + + click.secho("Getting current schema version...") + + r = repo.git.show("origin/develop:synapse/storage/schema/__init__.py") + + locals: Dict[str, Any] = {} + exec(r, locals) + current_schema_version = locals["SCHEMA_VERSION"] + + click.secho(f"Current schema version: {current_schema_version}") + + diffs: List[git.Diff] = repo.remote().refs.develop.commit.diff(None) + + seen_deltas = False + bad_files = [] + for diff in diffs: + if not diff.new_file or diff.b_path is None: + continue + + match = SCHEMA_FILE_REGEX.match(diff.b_path) + if not match: + continue + + seen_deltas = True + + _, delta_version, _ = match.groups() + + if delta_version != str(current_schema_version): + bad_files.append(diff.b_path) + + if not seen_deltas: + click.secho( + "No deltas found.", + fg="green", + bold=True, + color=force_colors, + ) + return + + if not bad_files: + click.secho( + f"All deltas are in the correct folder: {current_schema_version}!", + fg="green", + bold=True, + color=force_colors, + ) + return + + bad_files.sort() + + click.secho( + "Found deltas in the wrong folder!", + fg="red", + bold=True, + color=force_colors, + ) + + for f in bad_files: + click.secho( + f"\t{f}", + fg="red", + bold=True, + color=force_colors, + ) + + click.secho() + click.secho( + f"Please move these files to delta/{current_schema_version}/", + fg="red", + bold=True, + color=force_colors, + ) + + click.get_current_context().exit(1) + + +if __name__ == "__main__": + main() From e12ff697a49f94aac86aeaf3192b432042d199c5 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Jun 2022 17:13:36 +0100 Subject: [PATCH 51/85] Sort failing jobs in Complement CI to the top of the logs to make them easier to read. (#13057) --- .ci/complement_package.gotpl | 93 ++++++++++++++++++++++++++++++++++++ .github/workflows/tests.yml | 10 ++++ changelog.d/13057.misc | 1 + 3 files changed, 104 insertions(+) create mode 100644 .ci/complement_package.gotpl create mode 100644 changelog.d/13057.misc diff --git a/.ci/complement_package.gotpl b/.ci/complement_package.gotpl new file mode 100644 index 000000000..e1625fd31 --- /dev/null +++ b/.ci/complement_package.gotpl @@ -0,0 +1,93 @@ +{{- /*gotype: github.com/haveyoudebuggedit/gotestfmt/parser.Package*/ -}} +{{- /* +This template contains the format for an individual package. GitHub actions does not currently support nested groups so +we are creating a stylized header for each package. + +This template is based on https://github.com/haveyoudebuggedit/gotestfmt/blob/f179b0e462a9dcf7101515d87eec4e4d7e58b92a/.gotestfmt/github/package.gotpl +which is under the Unlicense licence. +*/ -}} +{{- $settings := .Settings -}} +{{- if and (or (not $settings.HideSuccessfulPackages) (ne .Result "PASS")) (or (not $settings.HideEmptyPackages) (ne .Result "SKIP") (ne (len .TestCases) 0)) -}} + {{- if eq .Result "PASS" -}} + {{ "\033" }}[0;32m + {{- else if eq .Result "SKIP" -}} + {{ "\033" }}[0;33m + {{- else -}} + {{ "\033" }}[0;31m + {{- end -}} + 📦 {{ .Name }}{{- "\033" }}[0m + {{- with .Coverage -}} + {{- "\033" -}}[0;37m ({{ . }}% coverage){{- "\033" -}}[0m + {{- end -}} + {{- "\n" -}} + {{- with .Reason -}} + {{- " " -}}🛑 {{ . -}}{{- "\n" -}} + {{- end -}} + {{- with .Output -}} + {{- . -}}{{- "\n" -}} + {{- end -}} + {{- with .TestCases -}} + {{- /* Failing tests are first */ -}} + {{- range . -}} + {{- if and (ne .Result "PASS") (ne .Result "SKIP") -}} + ::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}} + {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} + {{- with .Coverage -}} + , coverage: {{ . }}% + {{- end -}}) + {{- "\033" -}}[0m + {{- "\n" -}} + + {{- with .Output -}} + {{- formatTestOutput . $settings -}} + {{- "\n" -}} + {{- end -}} + + ::endgroup::{{- "\n" -}} + {{- end -}} + {{- end -}} + + + {{- /* Then skipped tests are second */ -}} + {{- range . -}} + {{- if eq .Result "SKIP" -}} + ::group::{{ "\033" }}[0;33m🚧{{ " " }}{{- .Name -}} + {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} + {{- with .Coverage -}} + , coverage: {{ . }}% + {{- end -}}) + {{- "\033" -}}[0m + {{- "\n" -}} + + {{- with .Output -}} + {{- formatTestOutput . $settings -}} + {{- "\n" -}} + {{- end -}} + + ::endgroup::{{- "\n" -}} + {{- end -}} + {{- end -}} + + + {{- /* Then passing tests are last */ -}} + {{- range . -}} + {{- if eq .Result "PASS" -}} + ::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}} + {{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}} + {{- with .Coverage -}} + , coverage: {{ . }}% + {{- end -}}) + {{- "\033" -}}[0m + {{- "\n" -}} + + {{- with .Output -}} + {{- formatTestOutput . $settings -}} + {{- "\n" -}} + {{- end -}} + + ::endgroup::{{- "\n" -}} + {{- end -}} + {{- end -}} + {{- end -}} + {{- "\n" -}} +{{- end -}} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 193cb505c..2e4ee723d 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -348,6 +348,11 @@ jobs: with: path: synapse + - name: "Install custom gotestfmt template" + run: | + mkdir .gotestfmt/github -p + cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl + # Attempt to check out the same branch of Complement as the PR. If it # doesn't exist, fallback to HEAD. - name: Checkout complement @@ -389,6 +394,11 @@ jobs: with: path: synapse + - name: "Install custom gotestfmt template" + run: | + mkdir .gotestfmt/github -p + cp synapse/.ci/complement_package.gotpl .gotestfmt/github/package.gotpl + # Attempt to check out the same branch of Complement as the PR. If it # doesn't exist, fallback to HEAD. - name: Checkout complement diff --git a/changelog.d/13057.misc b/changelog.d/13057.misc new file mode 100644 index 000000000..4102bf96b --- /dev/null +++ b/changelog.d/13057.misc @@ -0,0 +1 @@ +Make Complement CI logs easier to read. \ No newline at end of file From 7d99414edf2c5c7e602a88c72245add665e6afb4 Mon Sep 17 00:00:00 2001 From: Hannes Lerchl Date: Wed, 15 Jun 2022 18:45:16 +0200 Subject: [PATCH 52/85] Replace pyjwt with authlib in `org.matrix.login.jwt` (#13011) --- changelog.d/13011.misc | 1 + docs/jwt.md | 35 +++++++++----- .../configuration/config_documentation.md | 6 ++- poetry.lock | 8 ++-- pyproject.toml | 7 +-- synapse/config/jwt.py | 10 ++-- synapse/rest/client/login.py | 46 +++++++++++++++---- tests/rest/client/test_login.py | 44 +++++++++--------- 8 files changed, 100 insertions(+), 57 deletions(-) create mode 100644 changelog.d/13011.misc diff --git a/changelog.d/13011.misc b/changelog.d/13011.misc new file mode 100644 index 000000000..4da223219 --- /dev/null +++ b/changelog.d/13011.misc @@ -0,0 +1 @@ +Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. diff --git a/docs/jwt.md b/docs/jwt.md index 346daf78a..8f859d59a 100644 --- a/docs/jwt.md +++ b/docs/jwt.md @@ -37,19 +37,19 @@ As with other login types, there are additional fields (e.g. `device_id` and ## Preparing Synapse The JSON Web Token integration in Synapse uses the -[`PyJWT`](https://pypi.org/project/pyjwt/) library, which must be installed +[`Authlib`](https://docs.authlib.org/en/latest/index.html) library, which must be installed as follows: - * The relevant libraries are included in the Docker images and Debian packages - provided by `matrix.org` so no further action is needed. +* The relevant libraries are included in the Docker images and Debian packages + provided by `matrix.org` so no further action is needed. - * If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip - install synapse[pyjwt]` to install the necessary dependencies. +* If you installed Synapse into a virtualenv, run `/path/to/env/bin/pip + install synapse[jwt]` to install the necessary dependencies. - * For other installation mechanisms, see the documentation provided by the - maintainer. +* For other installation mechanisms, see the documentation provided by the + maintainer. -To enable the JSON web token integration, you should then add an `jwt_config` section +To enable the JSON web token integration, you should then add a `jwt_config` section to your configuration file (or uncomment the `enabled: true` line in the existing section). See [sample_config.yaml](./sample_config.yaml) for some sample settings. @@ -57,7 +57,7 @@ sample settings. ## How to test JWT as a developer Although JSON Web Tokens are typically generated from an external server, the -examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly. +example below uses a locally generated JWT. 1. Configure Synapse with JWT logins, note that this example uses a pre-shared secret and an algorithm of HS256: @@ -70,10 +70,21 @@ examples below use [PyJWT](https://pyjwt.readthedocs.io/en/latest/) directly. ``` 2. Generate a JSON web token: - ```bash - $ pyjwt --key=my-secret-token --alg=HS256 encode sub=test-user - eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ0ZXN0LXVzZXIifQ.Ag71GT8v01UO3w80aqRPTeuVPBIBZkYhNTJJ-_-zQIc + You can use the following short Python snippet to generate a JWT + protected by an HMAC. + Take care that the `secret` and the algorithm given in the `header` match + the entries from `jwt_config` above. + + ```python + from authlib.jose import jwt + + header = {"alg": "HS256"} + payload = {"sub": "user1", "aud": ["audience"]} + secret = "my-secret-token" + result = jwt.encode(header, payload, secret) + print(result.decode("ascii")) ``` + 3. Query for the login types and ensure `org.matrix.login.jwt` is there: ```bash diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 392ae80a7..e88f68d2b 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2946,8 +2946,10 @@ Additional sub-options for this setting include: tokens. Defaults to false. * `secret`: This is either the private shared secret or the public key used to decode the contents of the JSON web token. Required if `enabled` is set to true. -* `algorithm`: The algorithm used to sign the JSON web token. Supported algorithms are listed at - https://pyjwt.readthedocs.io/en/latest/algorithms.html Required if `enabled` is set to true. +* `algorithm`: The algorithm used to sign (or HMAC) the JSON web token. + Supported algorithms are listed + [here (section JWS)](https://docs.authlib.org/en/latest/specs/rfc7518.html). + Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. * `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the diff --git a/poetry.lock b/poetry.lock index 6a67f59bc..849e8a7a9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -815,7 +815,7 @@ python-versions = ">=3.5" name = "pyjwt" version = "2.4.0" description = "JSON Web Token implementation in Python" -category = "main" +category = "dev" optional = false python-versions = ">=3.6" @@ -1546,9 +1546,9 @@ docs = ["sphinx", "repoze.sphinx.autointerface"] test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"] [extras] -all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "pyjwt", "txredisapi", "hiredis", "Pympler"] +all = ["matrix-synapse-ldap3", "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", "pysaml2", "authlib", "lxml", "sentry-sdk", "jaeger-client", "opentracing", "txredisapi", "hiredis", "Pympler"] cache_memory = ["Pympler"] -jwt = ["pyjwt"] +jwt = ["authlib"] matrix-synapse-ldap3 = ["matrix-synapse-ldap3"] oidc = ["authlib"] opentracing = ["jaeger-client", "opentracing"] @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "37bd4bccfdb5a869635f2135a85bea4a0729af7375a27de153b4fd9a4aebc195" +content-hash = "73882e279e0379482f2fc7414cb71addfd408ca48ad508ff8a02b0cb544762af" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index 85c2c9534..44aa775c3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -175,7 +175,6 @@ lxml = { version = ">=4.2.0", optional = true } sentry-sdk = { version = ">=0.7.2", optional = true } opentracing = { version = ">=2.2.0", optional = true } jaeger-client = { version = ">=4.0.0", optional = true } -pyjwt = { version = ">=1.6.4", optional = true } txredisapi = { version = ">=1.4.7", optional = true } hiredis = { version = "*", optional = true } Pympler = { version = "*", optional = true } @@ -196,7 +195,7 @@ systemd = ["systemd-python"] url_preview = ["lxml"] sentry = ["sentry-sdk"] opentracing = ["jaeger-client", "opentracing"] -jwt = ["pyjwt"] +jwt = ["authlib"] # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) redis = ["txredisapi", "hiredis"] @@ -222,7 +221,7 @@ all = [ "psycopg2", "psycopg2cffi", "psycopg2cffi-compat", # saml2 "pysaml2", - # oidc + # oidc and jwt "authlib", # url_preview "lxml", @@ -230,8 +229,6 @@ all = [ "sentry-sdk", # opentracing "jaeger-client", "opentracing", - # jwt - "pyjwt", # redis "txredisapi", "hiredis", # cache_memory diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index 7e3c764b2..49aaca7cf 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -18,10 +18,10 @@ from synapse.types import JsonDict from ._base import Config, ConfigError -MISSING_JWT = """Missing jwt library. This is required for jwt login. +MISSING_AUTHLIB = """Missing authlib library. This is required for jwt login. Install by running: - pip install pyjwt + pip install synapse[jwt] """ @@ -43,11 +43,11 @@ class JWTConfig(Config): self.jwt_audiences = jwt_config.get("audiences") try: - import jwt + from authlib.jose import JsonWebToken - jwt # To stop unused lint. + JsonWebToken # To stop unused lint. except ImportError: - raise ConfigError(MISSING_JWT) + raise ConfigError(MISSING_AUTHLIB) else: self.jwt_enabled = False self.jwt_secret = None diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index cf4196ac0..dd75e40f3 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -420,17 +420,31 @@ class LoginRestServlet(RestServlet): 403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN ) - import jwt + from authlib.jose import JsonWebToken, JWTClaims + from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError + + jwt = JsonWebToken([self.jwt_algorithm]) + claim_options = {} + if self.jwt_issuer is not None: + claim_options["iss"] = {"value": self.jwt_issuer, "essential": True} + if self.jwt_audiences is not None: + claim_options["aud"] = {"values": self.jwt_audiences, "essential": True} try: - payload = jwt.decode( + claims = jwt.decode( token, - self.jwt_secret, - algorithms=[self.jwt_algorithm], - issuer=self.jwt_issuer, - audience=self.jwt_audiences, + key=self.jwt_secret, + claims_cls=JWTClaims, + claims_options=claim_options, ) - except jwt.PyJWTError as e: + except BadSignatureError: + # We handle this case separately to provide a better error message + raise LoginError( + 403, + "JWT validation failed: Signature verification failed", + errcode=Codes.FORBIDDEN, + ) + except JoseError as e: # A JWT error occurred, return some info back to the client. raise LoginError( 403, @@ -438,7 +452,23 @@ class LoginRestServlet(RestServlet): errcode=Codes.FORBIDDEN, ) - user = payload.get(self.jwt_subject_claim, None) + try: + claims.validate(leeway=120) # allows 2 min of clock skew + + # Enforce the old behavior which is rolled out in productive + # servers: if the JWT contains an 'aud' claim but none is + # configured, the login attempt will fail + if claims.get("aud") is not None: + if self.jwt_audiences is None or len(self.jwt_audiences) == 0: + raise InvalidClaimError("aud") + except JoseError as e: + raise LoginError( + 403, + "JWT validation failed: %s" % (str(e),), + errcode=Codes.FORBIDDEN, + ) + + user = claims.get(self.jwt_subject_claim, None) if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index f4ea1209d..f6efa5fe3 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -14,7 +14,7 @@ import json import time import urllib.parse -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional from unittest.mock import Mock from urllib.parse import urlencode @@ -41,7 +41,7 @@ from tests.test_utils.html_parsers import TestHtmlParser from tests.unittest import HomeserverTestCase, override_config, skip_unless try: - import jwt + from authlib.jose import jwk, jwt HAS_JWT = True except ImportError: @@ -841,7 +841,7 @@ class CASTestCase(unittest.HomeserverTestCase): self.assertIn(b"SSO account deactivated", channel.result["body"]) -@skip_unless(HAS_JWT, "requires jwt") +@skip_unless(HAS_JWT, "requires authlib") class JWTTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -866,11 +866,9 @@ class JWTTestCase(unittest.HomeserverTestCase): return config def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_secret) -> str: - # PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str. - result: Union[str, bytes] = jwt.encode(payload, secret, self.jwt_algorithm) - if isinstance(result, bytes): - return result.decode("ascii") - return result + header = {"alg": self.jwt_algorithm} + result: bytes = jwt.encode(header, payload, secret) + return result.decode("ascii") def jwt_login(self, *args: Any) -> FakeChannel: params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} @@ -902,7 +900,8 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( - channel.json_body["error"], "JWT validation failed: Signature has expired" + channel.json_body["error"], + "JWT validation failed: expired_token: The token is expired", ) def test_login_jwt_not_before(self) -> None: @@ -912,7 +911,7 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], - "JWT validation failed: The token is not yet valid (nbf)", + "JWT validation failed: invalid_token: The token is not valid yet", ) def test_login_no_sub(self) -> None: @@ -934,7 +933,8 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( - channel.json_body["error"], "JWT validation failed: Invalid issuer" + channel.json_body["error"], + 'JWT validation failed: invalid_claim: Invalid claim "iss"', ) # Not providing an issuer. @@ -943,7 +943,7 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], - 'JWT validation failed: Token is missing the "iss" claim', + 'JWT validation failed: missing_claim: Missing "iss" claim', ) def test_login_iss_no_config(self) -> None: @@ -965,7 +965,8 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( - channel.json_body["error"], "JWT validation failed: Invalid audience" + channel.json_body["error"], + 'JWT validation failed: invalid_claim: Invalid claim "aud"', ) # Not providing an audience. @@ -974,7 +975,7 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( channel.json_body["error"], - 'JWT validation failed: Token is missing the "aud" claim', + 'JWT validation failed: missing_claim: Missing "aud" claim', ) def test_login_aud_no_config(self) -> None: @@ -983,7 +984,8 @@ class JWTTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.result["code"], b"403", channel.result) self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") self.assertEqual( - channel.json_body["error"], "JWT validation failed: Invalid audience" + channel.json_body["error"], + 'JWT validation failed: invalid_claim: Invalid claim "aud"', ) def test_login_default_sub(self) -> None: @@ -1010,7 +1012,7 @@ class JWTTestCase(unittest.HomeserverTestCase): # The JWTPubKeyTestCase is a complement to JWTTestCase where we instead use # RSS256, with a public key configured in synapse as "jwt_secret", and tokens # signed by the private key. -@skip_unless(HAS_JWT, "requires jwt") +@skip_unless(HAS_JWT, "requires authlib") class JWTPubKeyTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, @@ -1071,11 +1073,11 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): return config def jwt_encode(self, payload: Dict[str, Any], secret: str = jwt_privatekey) -> str: - # PyJWT 2.0.0 changed the return type of jwt.encode from bytes to str. - result: Union[bytes, str] = jwt.encode(payload, secret, "RS256") - if isinstance(result, bytes): - return result.decode("ascii") - return result + header = {"alg": "RS256"} + if secret.startswith("-----BEGIN RSA PRIVATE KEY-----"): + secret = jwk.dumps(secret, kty="RSA") + result: bytes = jwt.encode(header, payload, secret) + return result.decode("ascii") def jwt_login(self, *args: Any) -> FakeChannel: params = {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} From c95b04bb0e719d3f5de1714b442f95a39c6e3634 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 Jun 2022 17:55:20 +0100 Subject: [PATCH 53/85] Change default `sync_response_cache_duration` (#13042) --- changelog.d/13042.misc | 1 + docker/complement/conf/workers-shared-extra.yaml.j2 | 6 ++++++ docs/usage/configuration/config_documentation.md | 4 ++-- synapse/config/cache.py | 2 +- tests/utils.py | 2 +- 5 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13042.misc diff --git a/changelog.d/13042.misc b/changelog.d/13042.misc new file mode 100644 index 000000000..745d5fcf8 --- /dev/null +++ b/changelog.d/13042.misc @@ -0,0 +1 @@ +Set default `sync_response_cache_duration` to two minutes. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index a5b1b6bb8..7c6a0fd75 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -103,4 +103,10 @@ server_notices: system_mxid_avatar_url: "" room_name: "Server Alert" + +# Disable sync cache so that initial `/sync` requests are up-to-date. +caches: + sync_response_cache_duration: 0 + + {% include "shared-orig.yaml.j2" %} diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index e88f68d2b..4e6880193 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1137,8 +1137,8 @@ Caching can be configured through the following sub-options: * `sync_response_cache_duration`: Controls how long the results of a /sync request are cached for after a successful response is returned. A higher duration can help clients with intermittent connections, at the cost of higher memory usage. - By default, this is zero, which means that sync responses are not cached - at all. + A value of zero means that sync responses are not cached. + Defaults to 2m. * `cache_autotuning` and its sub-options `max_cache_memory_usage`, `target_cache_memory_usage`, and `min_cache_ttl` work in conjunction with each other to maintain a balance between cache memory usage and cache entry availability. You must be using [jemalloc](https://github.com/matrix-org/synapse#help-synapse-is-slow-and-eats-all-my-ramcpu) diff --git a/synapse/config/cache.py b/synapse/config/cache.py index d0b491ea6..63310c8d0 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -206,7 +206,7 @@ class CacheConfig(Config): self.cache_autotuning["min_cache_ttl"] = self.parse_duration(min_cache_ttl) self.sync_response_cache_duration = self.parse_duration( - cache_config.get("sync_response_cache_duration", 0) + cache_config.get("sync_response_cache_duration", "2m") ) def resize_all_caches(self) -> None: diff --git a/tests/utils.py b/tests/utils.py index 3059c453d..cabb2c0de 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -169,7 +169,7 @@ def default_config(name, parse=False): # disable user directory updates, because they get done in the # background, which upsets the test runner. "update_user_directory": False, - "caches": {"global_factor": 1}, + "caches": {"global_factor": 1, "sync_response_cache_duration": 0}, "listeners": [{"port": 0, "type": "http"}], } From 99d3931974e65865d1102ee79d7b7e2b017a3180 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 15 Jun 2022 18:58:23 +0100 Subject: [PATCH 54/85] Add more tests for room upgrades (#13074) Signed-off-by: Sean Quah --- changelog.d/13074.misc | 1 + tests/rest/client/test_upgrade_room.py | 83 ++++++++++++++++++++++++-- 2 files changed, 79 insertions(+), 5 deletions(-) create mode 100644 changelog.d/13074.misc diff --git a/changelog.d/13074.misc b/changelog.d/13074.misc new file mode 100644 index 000000000..a502e44d9 --- /dev/null +++ b/changelog.d/13074.misc @@ -0,0 +1 @@ +Add more tests for room upgrades. diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 98c1039d3..5e7bf9748 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -48,10 +48,14 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): self.helper.join(self.room_id, self.other, tok=self.other_token) def _upgrade_room( - self, token: Optional[str] = None, room_id: Optional[str] = None + self, + token: Optional[str] = None, + room_id: Optional[str] = None, + expire_cache: bool = True, ) -> FakeChannel: - # We never want a cached response. - self.reactor.advance(5 * 60 + 1) + if expire_cache: + # We don't want a cached response. + self.reactor.advance(5 * 60 + 1) if room_id is None: room_id = self.room_id @@ -72,9 +76,24 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): self.assertEqual(200, channel.code, channel.result) self.assertIn("replacement_room", channel.json_body) - def test_not_in_room(self) -> None: + new_room_id = channel.json_body["replacement_room"] + + # Check that the tombstone event points to the new room. + tombstone_event = self.get_success( + self.hs.get_storage_controllers().state.get_current_state_event( + self.room_id, EventTypes.Tombstone, "" + ) + ) + self.assertIsNotNone(tombstone_event) + self.assertEqual(new_room_id, tombstone_event.content["replacement_room"]) + + # Check that the new room exists. + room = self.get_success(self.store.get_room(new_room_id)) + self.assertIsNotNone(room) + + def test_never_in_room(self) -> None: """ - Upgrading a room should work fine. + A user who has never been in the room cannot upgrade the room. """ # The user isn't in the room. roomless = self.register_user("roomless", "pass") @@ -83,6 +102,16 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): channel = self._upgrade_room(roomless_token) self.assertEqual(403, channel.code, channel.result) + def test_left_room(self) -> None: + """ + A user who is no longer in the room cannot upgrade the room. + """ + # Remove the user from the room. + self.helper.leave(self.room_id, self.creator, tok=self.creator_token) + + channel = self._upgrade_room(self.creator_token) + self.assertEqual(403, channel.code, channel.result) + def test_power_levels(self) -> None: """ Another user can upgrade the room if their power level is increased. @@ -297,3 +326,47 @@ class UpgradeRoomTest(unittest.HomeserverTestCase): self.assertEqual( create_event.content.get(EventContentFields.ROOM_TYPE), test_room_type ) + + def test_second_upgrade_from_same_user(self) -> None: + """A second room upgrade from the same user is deduplicated.""" + channel1 = self._upgrade_room() + self.assertEqual(200, channel1.code, channel1.result) + + channel2 = self._upgrade_room(expire_cache=False) + self.assertEqual(200, channel2.code, channel2.result) + + self.assertEqual( + channel1.json_body["replacement_room"], + channel2.json_body["replacement_room"], + ) + + def test_second_upgrade_after_delay(self) -> None: + """A second room upgrade is not deduplicated after some time has passed.""" + channel1 = self._upgrade_room() + self.assertEqual(200, channel1.code, channel1.result) + + channel2 = self._upgrade_room(expire_cache=True) + self.assertEqual(200, channel2.code, channel2.result) + + self.assertNotEqual( + channel1.json_body["replacement_room"], + channel2.json_body["replacement_room"], + ) + + def test_second_upgrade_from_different_user(self) -> None: + """A second room upgrade from a different user is blocked.""" + channel = self._upgrade_room() + self.assertEqual(200, channel.code, channel.result) + + channel = self._upgrade_room(self.other_token, expire_cache=False) + self.assertEqual(400, channel.code, channel.result) + + def test_first_upgrade_does_not_block_second(self) -> None: + """A second room upgrade is not blocked when a previous upgrade attempt was not + allowed. + """ + channel = self._upgrade_room(self.other_token) + self.assertEqual(403, channel.code, channel.result) + + channel = self._upgrade_room(expire_cache=False) + self.assertEqual(200, channel.code, channel.result) From cba1c5cbc293b2601d81b0cb9b1a28ec6f1e26a1 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 15 Jun 2022 11:31:46 -0700 Subject: [PATCH 55/85] Add headers to individual options in config documentation to allow for linking. (#13055) --- changelog.d/13055.misc | 1 + .../configuration/config_documentation.md | 326 +++++++++--------- 2 files changed, 164 insertions(+), 163 deletions(-) create mode 100644 changelog.d/13055.misc diff --git a/changelog.d/13055.misc b/changelog.d/13055.misc new file mode 100644 index 000000000..92a02a608 --- /dev/null +++ b/changelog.d/13055.misc @@ -0,0 +1 @@ +Add headers to individual options in config documentation to allow for linking. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 4e6880193..7c9860c3e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -67,7 +67,7 @@ apply if you want your config file to be read properly. A few helpful things to enabled: false ``` In this manual, all top-level settings (ones with no indentation) are identified - at the beginning of their section (i.e. "Config option: `example_setting`") and + at the beginning of their section (i.e. "### `example_setting`") and the sub-options, if any, are identified and listed in the body of the section. In addition, each setting has an example of its usage, with the proper indentation shown. @@ -124,7 +124,7 @@ documentation on how to configure or create custom modules for Synapse. --- -Config option: `modules` +### `modules` Use the `module` sub-option to add modules under this option to extend functionality. The `module` setting then has a sub-option, `config`, which can be used to define some configuration @@ -147,7 +147,7 @@ modules: Define your homeserver name and other base options. --- -Config option: `server_name` +### `server_name` This sets the public-facing domain of the server. @@ -177,7 +177,7 @@ Example configuration #2: server_name: localhost:8080 ``` --- -Config option: `pid_file` +### `pid_file` When running Synapse as a daemon, the file to store the pid in. Defaults to none. @@ -186,7 +186,7 @@ Example configuration: pid_file: DATADIR/homeserver.pid ``` --- -Config option: `web_client_location` +### `web_client_location` The absolute URL to the web client which `/` will redirect to. Defaults to none. @@ -195,7 +195,7 @@ Example configuration: web_client_location: https://riot.example.com/ ``` --- -Config option: `public_baseurl` +### `public_baseurl` The public-facing base URL that clients use to access this Homeserver (not including _matrix/...). This is the same URL a user might enter into the @@ -211,7 +211,7 @@ Example configuration: public_baseurl: https://example.com/ ``` --- -Config option: `serve_server_wellknown` +### `serve_server_wellknown` By default, other servers will try to reach our server on port 8448, which can be inconvenient in some environments. @@ -230,7 +230,7 @@ Example configuration: serve_server_wellknown: true ``` --- -Config option: `soft_file_limit` +### `soft_file_limit` Set the soft limit on the number of file descriptors synapse can use. Zero is used to indicate synapse should set the soft limit to the hard limit. @@ -241,7 +241,7 @@ Example configuration: soft_file_limit: 3 ``` --- -Config option: `presence` +### `presence` Presence tracking allows users to see the state (e.g online/offline) of other local and remote users. Set the `enabled` sub-option to false to @@ -254,7 +254,7 @@ presence: enabled: false ``` --- -Config option: `require_auth_for_profile_requests` +### `require_auth_for_profile_requests` Whether to require authentication to retrieve profile data (avatars, display names) of other users through the client API. Defaults to false. Note that profile data is also available @@ -265,7 +265,7 @@ Example configuration: require_auth_for_profile_requests: true ``` --- -Config option: `limit_profile_requests_to_users_who_share_rooms` +### `limit_profile_requests_to_users_who_share_rooms` Use this option to require a user to share a room with another user in order to retrieve their profile information. Only checked on Client-Server @@ -277,7 +277,7 @@ Example configuration: limit_profile_requests_to_users_who_share_rooms: true ``` --- -Config option: `include_profile_data_on_invite` +### `include_profile_data_on_invite` Use this option to prevent a user's profile data from being retrieved and displayed in a room until they have joined it. By default, a user's @@ -290,7 +290,7 @@ Example configuration: include_profile_data_on_invite: false ``` --- -Config option: `allow_public_rooms_without_auth` +### `allow_public_rooms_without_auth` If set to true, removes the need for authentication to access the server's public rooms directory through the client API, meaning that anyone can @@ -301,7 +301,7 @@ Example configuration: allow_public_rooms_without_auth: true ``` --- -Config option: `allow_public_rooms_without_auth` +### `allow_public_rooms_without_auth` If set to true, allows any other homeserver to fetch the server's public rooms directory via federation. Defaults to false. @@ -311,7 +311,7 @@ Example configuration: allow_public_rooms_over_federation: true ``` --- -Config option: `default_room_version` +### `default_room_version` The default room version for newly created rooms on this server. @@ -327,7 +327,7 @@ Example configuration: default_room_version: "8" ``` --- -Config option: `gc_thresholds` +### `gc_thresholds` The garbage collection threshold parameters to pass to `gc.set_threshold`, if defined. Defaults to none. @@ -337,7 +337,7 @@ Example configuration: gc_thresholds: [700, 10, 10] ``` --- -Config option: `gc_min_interval` +### `gc_min_interval` The minimum time in seconds between each GC for a generation, regardless of the GC thresholds. This ensures that we don't do GC too frequently. A value of `[1s, 10s, 30s]` @@ -350,7 +350,7 @@ Example configuration: gc_min_interval: [0.5s, 30s, 1m] ``` --- -Config option: `filter_timeline_limit` +### `filter_timeline_limit` Set the limit on the returned events in the timeline in the get and sync operations. Defaults to 100. A value of -1 means no upper limit. @@ -361,7 +361,7 @@ Example configuration: filter_timeline_limit: 5000 ``` --- -Config option: `block_non_admin_invites` +### `block_non_admin_invites` Whether room invites to users on this server should be blocked (except those sent by local server admins). Defaults to false. @@ -371,7 +371,7 @@ Example configuration: block_non_admin_invites: true ``` --- -Config option: `enable_search` +### `enable_search` If set to false, new messages will not be indexed for searching and users will receive errors when searching for messages. Defaults to true. @@ -381,7 +381,7 @@ Example configuration: enable_search: false ``` --- -Config option: `ip_range_blacklist` +### `ip_range_blacklist` This option prevents outgoing requests from being sent to the specified blacklisted IP address CIDR ranges. If this option is not specified then it defaults to private IP @@ -421,7 +421,7 @@ ip_range_blacklist: - 'fec0::/10' ``` --- -Config option: `ip_range_whitelist` +### `ip_range_whitelist` List of IP address CIDR ranges that should be allowed for federation, identity servers, push servers, and for checking key validity for @@ -438,7 +438,7 @@ ip_range_whitelist: - '192.168.1.1' ``` --- -Config option: `listeners` +### `listeners` List of ports that Synapse should listen on, their purpose and their configuration. @@ -539,7 +539,7 @@ listeners: type: manhole ``` --- -Config option: `manhole_settings` +### `manhole_settings` Connection settings for the manhole. You can find more information on the manhole [here](../../manhole.md). Manhole sub-options include: @@ -558,7 +558,7 @@ manhole_settings: ssh_pub_key_path: CONFDIR/id_rsa.pub ``` --- -Config option: `dummy_events_threshold` +### `dummy_events_threshold` Forward extremities can build up in a room due to networking delays between homeservers. Once this happens in a large room, calculation of the state of @@ -592,7 +592,7 @@ Useful options for Synapse admins. --- -Config option: `admin_contact` +### `admin_contact` How to reach the server admin, used in `ResourceLimitError`. Defaults to none. @@ -601,7 +601,7 @@ Example configuration: admin_contact: 'mailto:admin@server.com' ``` --- -Config option: `hs_disabled` and `hs_disabled_message` +### `hs_disabled` and `hs_disabled_message` Blocks users from connecting to the homeserver and provides a human-readable reason why the connection was blocked. Defaults to false. @@ -612,7 +612,7 @@ hs_disabled: true hs_disabled_message: 'Reason for why the HS is blocked' ``` --- -Config option: `limit_usage_by_mau` +### `limit_usage_by_mau` This option disables/enables monthly active user blocking. Used in cases where the admin or server owner wants to limit to the number of monthly active users. When enabled and a limit is @@ -624,7 +624,7 @@ Example configuration: limit_usage_by_mau: true ``` --- -Config option: `max_mau_value` +### `max_mau_value` This option sets the hard limit of monthly active users above which the server will start blocking user actions if `limit_usage_by_mau` is enabled. Defaults to 0. @@ -634,7 +634,7 @@ Example configuration: max_mau_value: 50 ``` --- -Config option: `mau_trial_days` +### `mau_trial_days` The option `mau_trial_days` is a means to add a grace period for active users. It means that users must be active for the specified number of days before they @@ -647,7 +647,7 @@ Example configuration: mau_trial_days: 5 ``` --- -Config option: `mau_appservice_trial_days` +### `mau_appservice_trial_days` The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but applies a different trial number if the user was registered by an appservice. A value @@ -661,7 +661,7 @@ mau_appservice_trial_days: another_appservice_id: 6 ``` --- -Config option: `mau_limit_alerting` +### `mau_limit_alerting` The option `mau_limit_alerting` is a means of limiting client-side alerting should the mau limit be reached. This is useful for small instances @@ -674,7 +674,7 @@ Example configuration: mau_limit_alerting: false ``` --- -Config option: `mau_stats_only` +### `mau_stats_only` If enabled, the metrics for the number of monthly active users will be populated, however no one will be limited based on these numbers. If `limit_usage_by_mau` @@ -685,7 +685,7 @@ Example configuration: mau_stats_only: true ``` --- -Config option: `mau_limit_reserved_threepids` +### `mau_limit_reserved_threepids` Sometimes the server admin will want to ensure certain accounts are never blocked by mau checking. These accounts are specified by this option. @@ -699,7 +699,7 @@ mau_limit_reserved_threepids: address: 'reserved_user@example.com' ``` --- -Config option: `server_context` +### `server_context` This option is used by phonehome stats to group together related servers. Defaults to none. @@ -709,7 +709,7 @@ Example configuration: server_context: context ``` --- -Config option: `limit_remote_rooms` +### `limit_remote_rooms` When this option is enabled, the room "complexity" will be checked before a user joins a new remote room. If it is above the complexity limit, the server will @@ -733,7 +733,7 @@ limit_remote_rooms: admins_can_join: true ``` --- -Config option: `require_membership_for_aliases` +### `require_membership_for_aliases` Whether to require a user to be in the room to add an alias to it. Defaults to true. @@ -743,7 +743,7 @@ Example configuration: require_membership_for_aliases: false ``` --- -Config option: `allow_per_room_profiles` +### `allow_per_room_profiles` Whether to allow per-room membership profiles through the sending of membership events with profile information that differs from the target's global profile. @@ -754,7 +754,7 @@ Example configuration: allow_per_room_profiles: false ``` --- -Config option: `max_avatar_size` +### `max_avatar_size` The largest permissible file size in bytes for a user avatar. Defaults to no restriction. Use M for MB and K for KB. @@ -766,7 +766,7 @@ Example configuration: max_avatar_size: 10M ``` --- -Config option: `allowed_avatar_mimetypes` +### `allowed_avatar_mimetypes` The MIME types allowed for user avatars. Defaults to no restriction. @@ -778,7 +778,7 @@ Example configuration: allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"] ``` --- -Config option: `redaction_retention_period` +### `redaction_retention_period` How long to keep redacted events in unredacted form in the database. After this period redacted events get replaced with their redacted form in the DB. @@ -790,7 +790,7 @@ Example configuration: redaction_retention_period: 28d ``` --- -Config option: `user_ips_max_age` +### `user_ips_max_age` How long to track users' last seen time and IPs in the database. @@ -801,7 +801,7 @@ Example configuration: user_ips_max_age: 14d ``` --- -Config option: `request_token_inhibit_3pid_errors` +### `request_token_inhibit_3pid_errors` Inhibits the `/requestToken` endpoints from returning an error that might leak information about whether an e-mail address is in use or not on this @@ -816,7 +816,7 @@ Example configuration: request_token_inhibit_3pid_errors: true ``` --- -Config option: `next_link_domain_whitelist` +### `next_link_domain_whitelist` A list of domains that the domain portion of `next_link` parameters must match. @@ -838,7 +838,7 @@ Example configuration: next_link_domain_whitelist: ["matrix.org"] ``` --- -Config option: `templates` and `custom_template_directory` +### `templates` and `custom_template_directory` These options define templates to use when generating email or HTML page contents. The `custom_template_directory` determines which directory Synapse will try to @@ -855,7 +855,7 @@ templates: custom_template_directory: /path/to/custom/templates/ ``` --- -Config option: `retention` +### `retention` This option and the associated options determine message retention policy at the server level. @@ -934,7 +934,7 @@ retention: Options related to TLS. --- -Config option: `tls_certificate_path` +### `tls_certificate_path` This option specifies a PEM-encoded X509 certificate for TLS. This certificate, as of Synapse 1.0, will need to be a valid and verifiable @@ -949,7 +949,7 @@ Example configuration: tls_certificate_path: "CONFDIR/SERVERNAME.tls.crt" ``` --- -Config option: `tls_private_key_path` +### `tls_private_key_path` PEM-encoded private key for TLS. Defaults to none. @@ -958,7 +958,7 @@ Example configuration: tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" ``` --- -Config option: `federation_verify_certificates` +### `federation_verify_certificates` Whether to verify TLS server certificates for outbound federation requests. Defaults to true. To disable certificate verification, set the option to false. @@ -968,7 +968,7 @@ Example configuration: federation_verify_certificates: false ``` --- -Config option: `federation_client_minimum_tls_version` +### `federation_client_minimum_tls_version` The minimum TLS version that will be used for outbound federation requests. @@ -982,7 +982,7 @@ Example configuration: federation_client_minimum_tls_version: 1.2 ``` --- -Config option: `federation_certificate_verification_whitelist` +### `federation_certificate_verification_whitelist` Skip federation certificate verification on a given whitelist of domains. @@ -1001,7 +1001,7 @@ federation_certificate_verification_whitelist: - "*.onion" ``` --- -Config option: `federation_custom_ca_list` +### `federation_custom_ca_list` List of custom certificate authorities for federation traffic. @@ -1024,7 +1024,7 @@ federation_custom_ca_list: Options related to federation. --- -Config option: `federation_domain_whitelist` +### `federation_domain_whitelist` Restrict federation to the given whitelist of domains. N.B. we recommend also firewalling your federation listener to limit @@ -1040,7 +1040,7 @@ federation_domain_whitelist: - syd.example.com ``` --- -Config option: `federation_metrics_domains` +### `federation_metrics_domains` Report prometheus metrics on the age of PDUs being sent to and received from the given domains. This can be used to give an idea of "delay" on inbound @@ -1056,7 +1056,7 @@ federation_metrics_domains: - example.com ``` --- -Config option: `allow_profile_lookup_over_federation` +### `allow_profile_lookup_over_federation` Set to false to disable profile lookup over federation. By default, the Federation API allows other homeservers to obtain profile data of any user @@ -1067,7 +1067,7 @@ Example configuration: allow_profile_lookup_over_federation: false ``` --- -Config option: `allow_device_name_lookup_over_federation` +### `allow_device_name_lookup_over_federation` Set this option to true to allow device display name lookup over federation. By default, the Federation API prevents other homeservers from obtaining the display names of any user devices @@ -1083,7 +1083,7 @@ allow_device_name_lookup_over_federation: true Options related to caching --- -Config option: `event_cache_size` +### `event_cache_size` The number of events to cache in memory. Not affected by `caches.global_factor`. Defaults to 10K. @@ -1093,7 +1093,7 @@ Example configuration: event_cache_size: 15K ``` --- -Config option: `cache` and associated values +### `cache` and associated values A cache 'factor' is a multiplier that can be applied to each of Synapse's caches in order to increase or decrease the maximum @@ -1190,7 +1190,7 @@ file in Synapse's `contrib` directory, you can send a `SIGHUP` signal by using Config options related to database settings. --- -Config option: `database` +### `database` The `database` setting defines the database that synapse uses to store all of its data. @@ -1245,7 +1245,7 @@ database: Config options related to logging. --- -Config option: `log_config` +### `log_config` This option specifies a yaml python logging config file as described [here](https://docs.python.org/3.7/library/logging.config.html#configuration-dictionary-schema). @@ -1261,7 +1261,7 @@ Each ratelimiting configuration is made of two parameters: - `per_second`: number of requests a client can send per second. - `burst_count`: number of requests a client can send before being throttled. --- -Config option: `rc_message` +### `rc_message` Ratelimiting settings for client messaging. @@ -1276,7 +1276,7 @@ rc_message: burst_count: 15 ``` --- -Config option: `rc_registration` +### `rc_registration` This option ratelimits registration requests based on the client's IP address. It defaults to `per_second: 0.17`, `burst_count: 3`. @@ -1288,7 +1288,7 @@ rc_registration: burst_count: 2 ``` --- -Config option: `rc_registration_token_validity` +### `rc_registration_token_validity` This option checks the validity of registration tokens that ratelimits requests based on the client's IP address. @@ -1301,7 +1301,7 @@ rc_registration_token_validity: burst_count: 6 ``` --- -Config option: `rc_login` +### `rc_login` This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP @@ -1329,7 +1329,7 @@ rc_login: burst_count: 7 ``` --- -Config option: `rc_admin_redaction` +### `rc_admin_redaction` This option sets ratelimiting redactions by room admins. If this is not explicitly set then it uses the same ratelimiting as per `rc_message`. This is useful @@ -1342,7 +1342,7 @@ rc_admin_redaction: burst_count: 50 ``` --- -Config option: `rc_joins` +### `rc_joins` This option allows for ratelimiting number of rooms a user can join. This setting has the following sub-options: @@ -1364,7 +1364,7 @@ rc_joins: burst_count: 12 ``` --- -Config option: `rc_3pid_validation` +### `rc_3pid_validation` This option ratelimits how often a user or IP can attempt to validate a 3PID. Defaults to `per_second: 0.003`, `burst_count: 5`. @@ -1376,7 +1376,7 @@ rc_3pid_validation: burst_count: 5 ``` --- -Config option: `rc_invites` +### `rc_invites` This option sets ratelimiting how often invites can be sent in a room or to a specific user. `per_room` defaults to `per_second: 0.3`, `burst_count: 10` and @@ -1407,7 +1407,7 @@ rc_invites: burst_count: 3 ``` --- -Config option: `rc_third_party_invite` +### `rc_third_party_invite` This option ratelimits 3PID invites (i.e. invites sent to a third-party ID such as an email address or a phone number) based on the account that's @@ -1420,7 +1420,7 @@ rc_third_party_invite: burst_count: 10 ``` --- -Config option: `rc_federation` +### `rc_federation` Defines limits on federation requests. @@ -1445,7 +1445,7 @@ rc_federation: concurrent: 5 ``` --- -Config option: `federation_rr_transactions_per_room_per_second` +### `federation_rr_transactions_per_room_per_second` Sets outgoing federation transaction frequency for sending read-receipts, per-room. @@ -1462,7 +1462,7 @@ federation_rr_transactions_per_room_per_second: 40 Config options related to Synapse's media store. --- -Config option: `enable_media_repo` +### `enable_media_repo` Enable the media store service in the Synapse master. Defaults to true. Set to false if you are using a separate media store worker. @@ -1472,7 +1472,7 @@ Example configuration: enable_media_repo: false ``` --- -Config option: `media_store_path` +### `media_store_path` Directory where uploaded images and attachments are stored. @@ -1481,7 +1481,7 @@ Example configuration: media_store_path: "DATADIR/media_store" ``` --- -Config option: `media_storage_providers` +### `media_storage_providers` Media storage providers allow media to be stored in different locations. Defaults to none. Associated sub-options are: @@ -1502,7 +1502,7 @@ media_storage_providers: directory: /mnt/some/other/directory ``` --- -Config option: `max_upload_size` +### `max_upload_size` The largest allowed upload size in bytes. @@ -1515,7 +1515,7 @@ Example configuration: max_upload_size: 60M ``` --- -Config option: `max_image_pixels` +### `max_image_pixels` Maximum number of pixels that will be thumbnailed. Defaults to 32M. @@ -1524,7 +1524,7 @@ Example configuration: max_image_pixels: 35M ``` --- -Config option: `dynamic_thumbnails` +### `dynamic_thumbnails` Whether to generate new thumbnails on the fly to precisely match the resolution requested by the client. If true then whenever @@ -1537,7 +1537,7 @@ Example configuration: dynamic_thumbnails: true ``` --- -Config option: `thumbnail_sizes` +### `thumbnail_sizes` List of thumbnails to precalculate when an image is uploaded. Associated sub-options are: * `width` @@ -1564,7 +1564,7 @@ thumbnail_sizes: method: scale ``` --- -Config option: `media_retention` +### `media_retention` Controls whether local media and entries in the remote media cache (media that is downloaded from other homeservers) should be removed @@ -1596,7 +1596,7 @@ media_retention: remote_media_lifetime: 14d ``` --- -Config option: `url_preview_enabled` +### `url_preview_enabled` This setting determines whether the preview URL API is enabled. It is disabled by default. Set to true to enable. If enabled you must specify a @@ -1607,7 +1607,7 @@ Example configuration: url_preview_enabled: true ``` --- -Config option: `url_preview_ip_range_blacklist` +### `url_preview_ip_range_blacklist` List of IP address CIDR ranges that the URL preview spider is denied from accessing. There are no defaults: you must explicitly @@ -1649,7 +1649,7 @@ url_preview_ip_range_blacklist: - 'fec0::/10' ``` ---- -Config option: `url_preview_ip_range_whitelist` +### `url_preview_ip_range_whitelist` This option sets a list of IP address CIDR ranges that the URL preview spider is allowed to access even if they are specified in `url_preview_ip_range_blacklist`. @@ -1663,7 +1663,7 @@ url_preview_ip_range_whitelist: - '192.168.1.1' ``` --- -Config option: `url_preview_url_blacklist` +### `url_preview_url_blacklist` Optional list of URL matches that the URL preview spider is denied from accessing. You should use `url_preview_ip_range_blacklist` @@ -1709,7 +1709,7 @@ url_preview_url_blacklist: - netloc: '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$' ``` --- -Config option: `max_spider_size` +### `max_spider_size` The largest allowed URL preview spidering size in bytes. Defaults to 10M. @@ -1718,7 +1718,7 @@ Example configuration: max_spider_size: 8M ``` --- -Config option: `url_preview_language` +### `url_preview_language` A list of values for the Accept-Language HTTP header used when downloading webpages during URL preview generation. This allows @@ -1743,7 +1743,7 @@ Example configuration: - '*;q=0.7' ``` ---- -Config option: `oembed` +### `oembed` oEmbed allows for easier embedding content from a website. It can be used for generating URLs previews of services which support it. A default list of oEmbed providers @@ -1764,7 +1764,7 @@ oembed: See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha. --- -Config option: `recaptcha_public_key` +### `recaptcha_public_key` This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is enabled. @@ -1774,7 +1774,7 @@ Example configuration: recaptcha_public_key: "YOUR_PUBLIC_KEY" ``` --- -Config option: `recaptcha_private_key` +### `recaptcha_private_key` This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is enabled. @@ -1784,7 +1784,7 @@ Example configuration: recaptcha_private_key: "YOUR_PRIVATE_KEY" ``` --- -Config option: `enable_registration_captcha` +### `enable_registration_captcha` Set to true to enable ReCaptcha checks when registering, preventing signup unless a captcha is answered. Requires a valid ReCaptcha public/private key. @@ -1795,7 +1795,7 @@ Example configuration: enable_registration_captcha: true ``` --- -Config option: `recaptcha_siteverify_api` +### `recaptcha_siteverify_api` The API endpoint to use for verifying `m.login.recaptcha` responses. Defaults to `https://www.recaptcha.net/recaptcha/api/siteverify`. @@ -1809,7 +1809,7 @@ recaptcha_siteverify_api: "https://my.recaptcha.site" Options related to adding a TURN server to Synapse. --- -Config option: `turn_uris` +### `turn_uris` The public URIs of the TURN server to give to clients. @@ -1818,7 +1818,7 @@ Example configuration: turn_uris: [turn:example.org] ``` --- -Config option: `turn_shared_secret` +### `turn_shared_secret` The shared secret used to compute passwords for the TURN server. @@ -1837,7 +1837,7 @@ turn_username: "TURNSERVER_USERNAME" turn_password: "TURNSERVER_PASSWORD" ``` --- -Config option: `turn_user_lifetime` +### `turn_user_lifetime` How long generated TURN credentials last. Defaults to 1h. @@ -1846,7 +1846,7 @@ Example configuration: turn_user_lifetime: 2h ``` --- -Config option: `turn_allow_guests` +### `turn_allow_guests` Whether guests should be allowed to use the TURN server. This defaults to true, otherwise VoIP will be unreliable for guests. However, it does introduce a slight security risk as @@ -1862,7 +1862,7 @@ turn_allow_guests: false Registration can be rate-limited using the parameters in the [Ratelimiting](#ratelimiting) section of this manual. --- -Config option: `enable_registration` +### `enable_registration` Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration, you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration @@ -1873,7 +1873,7 @@ Example configuration: enable_registration: true ``` --- -Config option: `enable_registration_without_verification` +### `enable_registration_without_verification` Enable registration without email or captcha verification. Note: this option is *not* recommended, as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect unless `enable_registration` is also enabled. @@ -1883,7 +1883,7 @@ Example configuration: enable_registration_without_verification: true ``` --- -Config option: `session_lifetime` +### `session_lifetime` Time that a user's session remains valid for, after they log in. @@ -1899,7 +1899,7 @@ Example configuration: session_lifetime: 24h ``` ---- -Config option: `refresh_access_token_lifetime` +### `refresh_access_token_lifetime` Time that an access token remains valid for, if the session is using refresh tokens. @@ -1917,7 +1917,7 @@ Example configuration: refreshable_access_token_lifetime: 10m ``` --- -Config option: `refresh_token_lifetime: 24h` +### `refresh_token_lifetime: 24h` Time that a refresh token remains valid for (provided that it is not exchanged for another one first). @@ -1934,7 +1934,7 @@ Example configuration: refresh_token_lifetime: 24h ``` --- -Config option: `nonrefreshable_access_token_lifetime` +### `nonrefreshable_access_token_lifetime` Time that an access token remains valid for, if the session is NOT using refresh tokens. @@ -1953,7 +1953,7 @@ Example configuration: nonrefreshable_access_token_lifetime: 24h ``` --- -Config option: `registrations_require_3pid` +### `registrations_require_3pid` If this is set, the user must provide all of the specified types of 3PID when registering. @@ -1964,7 +1964,7 @@ registrations_require_3pid: - msisdn ``` --- -Config option: `disable_msisdn_registration` +### `disable_msisdn_registration` Explicitly disable asking for MSISDNs from the registration flow (overrides `registrations_require_3pid` if MSISDNs are set as required). @@ -1974,7 +1974,7 @@ Example configuration: disable_msisdn_registration: true ``` --- -Config option: `allowed_local_3pids` +### `allowed_local_3pids` Mandate that users are only allowed to associate certain formats of 3PIDs with accounts on this server, as specified by the `medium` and `pattern` sub-options. @@ -1990,7 +1990,7 @@ allowed_local_3pids: pattern: '\+44' ``` --- -Config option: `enable_3pid_lookup` +### `enable_3pid_lookup` Enable 3PIDs lookup requests to identity servers from this server. Defaults to true. @@ -1999,7 +1999,7 @@ Example configuration: enable_3pid_lookup: false ``` --- -Config option: `registration_requires_token` +### `registration_requires_token` Require users to submit a token during registration. Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md). @@ -2012,7 +2012,7 @@ Example configuration: registration_requires_token: true ``` --- -Config option: `registration_shared_secret` +### `registration_shared_secret` If set, allows registration of standard or admin accounts by anyone who has the shared secret, even if registration is otherwise disabled. @@ -2022,7 +2022,7 @@ Example configuration: registration_shared_secret: ``` --- -Config option: `bcrypt_rounds` +### `bcrypt_rounds` Set the number of bcrypt rounds used to generate password hash. Larger numbers increase the work factor needed to generate the hash. @@ -2034,7 +2034,7 @@ Example configuration: bcrypt_rounds: 14 ``` --- -Config option: `allow_guest_access` +### `allow_guest_access` Allows users to register as guests without a password/email/etc, and participate in rooms hosted on this server which have been made @@ -2045,7 +2045,7 @@ Example configuration: allow_guest_access: true ``` --- -Config option: `default_identity_server` +### `default_identity_server` The identity server which we suggest that clients should use when users log in on this server. @@ -2058,7 +2058,7 @@ Example configuration: default_identity_server: https://matrix.org ``` --- -Config option: `account_threepid_delegates` +### `account_threepid_delegates` Handle threepid (email/phone etc) registration and password resets through a set of *trusted* identity servers. Note that this allows the configured identity server to @@ -2087,7 +2087,7 @@ account_threepid_delegates: msisdn: http://localhost:8090 # Delegate SMS sending to this local process ``` --- -Config option: `enable_set_displayname` +### `enable_set_displayname` Whether users are allowed to change their displayname after it has been initially set. Useful when provisioning users based on the @@ -2100,7 +2100,7 @@ Example configuration: enable_set_displayname: false ``` --- -Config option: `enable_set_avatar_url` +### `enable_set_avatar_url` Whether users are allowed to change their avatar after it has been initially set. Useful when provisioning users based on the contents @@ -2113,7 +2113,7 @@ Example configuration: enable_set_avatar_url: false ``` --- -Config option: `enable_3pid_changes` +### `enable_3pid_changes` Whether users can change the third-party IDs associated with their accounts (email address and msisdn). @@ -2125,7 +2125,7 @@ Example configuration: enable_3pid_changes: false ``` --- -Config option: `auto_join_rooms` +### `auto_join_rooms` Users who register on this homeserver will automatically be joined to the rooms listed under this option. @@ -2143,7 +2143,7 @@ auto_join_rooms: - "#anotherexampleroom:example.com" ``` --- -Config option: `autocreate_auto_join_rooms` +### `autocreate_auto_join_rooms` Where `auto_join_rooms` are specified, setting this flag ensures that the rooms exist by creating them when the first user on the @@ -2163,7 +2163,7 @@ Example configuration: autocreate_auto_join_rooms: false ``` --- -Config option: `autocreate_auto_join_rooms_federated` +### `autocreate_auto_join_rooms_federated` Whether the rooms listen in `auto_join_rooms` that are auto-created are available via federation. Only has an effect if `autocreate_auto_join_rooms` is true. @@ -2180,7 +2180,7 @@ Example configuration: autocreate_auto_join_rooms_federated: false ``` --- -Config option: `autocreate_auto_join_room_preset` +### `autocreate_auto_join_room_preset` The room preset to use when auto-creating one of `auto_join_rooms`. Only has an effect if `autocreate_auto_join_rooms` is true. @@ -2202,7 +2202,7 @@ Example configuration: autocreate_auto_join_room_preset: private_chat ``` --- -Config option: `auto_join_mxid_localpart` +### `auto_join_mxid_localpart` The local part of the user id which is used to create `auto_join_rooms` if `autocreate_auto_join_rooms` is true. If this is not provided then the @@ -2226,7 +2226,7 @@ Example configuration: auto_join_mxid_localpart: system ``` --- -Config option: `auto_join_rooms_for_guests` +### `auto_join_rooms_for_guests` When `auto_join_rooms` is specified, setting this flag to false prevents guest accounts from being automatically joined to the rooms. @@ -2238,7 +2238,7 @@ Example configuration: auto_join_rooms_for_guests: false ``` --- -Config option: `inhibit_user_in_use_error` +### `inhibit_user_in_use_error` Whether to inhibit errors raised when registering a new account if the user ID already exists. If turned on, requests to `/register/available` will always @@ -2257,7 +2257,7 @@ inhibit_user_in_use_error: true Config options related to metrics. --- -Config option: `enable_metrics` +### `enable_metrics` Set to true to enable collection and rendering of performance metrics. Defaults to false. @@ -2267,7 +2267,7 @@ Example configuration: enable_metrics: true ``` --- -Config option: `sentry` +### `sentry` Use this option to enable sentry integration. Provide the DSN assigned to you by sentry with the `dsn` setting. @@ -2284,7 +2284,7 @@ sentry: dsn: "..." ``` --- -Config option: `metrics_flags` +### `metrics_flags` Flags to enable Prometheus metrics which are not suitable to be enabled by default, either for performance reasons or limited use. @@ -2299,7 +2299,7 @@ metrics_flags: known_servers: true ``` --- -Config option: `report_stats` +### `report_stats` Whether or not to report anonymized homeserver usage statistics. This is originally set when generating the config. Set this option to true or false to change the current @@ -2310,7 +2310,7 @@ Example configuration: report_stats: true ``` --- -Config option: `report_stats_endpoint` +### `report_stats_endpoint` The endpoint to report the anonymized homeserver usage statistics to. Defaults to https://matrix.org/report-usage-stats/push @@ -2324,7 +2324,7 @@ report_stats_endpoint: https://example.com/report-usage-stats/push Config settings related to the client/server API --- -Config option: `room_prejoin_state:` +### `room_prejoin_state:` Controls for the state that is shared with users who receive an invite to a room. By default, the following state event types are shared with users who @@ -2353,7 +2353,7 @@ room_prejoin_state: - m.room.join_rules ``` --- -Config option: `track_puppeted_user_ips` +### `track_puppeted_user_ips` We record the IP address of clients used to access the API for various reasons, including displaying it to the user in the "Where you're signed in" @@ -2373,7 +2373,7 @@ Example configuration: track_puppeted_user_ips: true ``` --- -Config option: `app_service_config_files` +### `app_service_config_files` A list of application service config files to use. @@ -2384,7 +2384,7 @@ app_service_config_files: - app_service_2.yaml ``` --- -Config option: `track_appservice_user_ips` +### `track_appservice_user_ips` Defaults to false. Set to true to enable tracking of application service IP addresses. Implicitly enables MAU tracking for application service users. @@ -2394,7 +2394,7 @@ Example configuration: track_appservice_user_ips: true ``` --- -Config option: `macaroon_secret_key` +### `macaroon_secret_key` A secret which is used to sign access tokens. If none is specified, the `registration_shared_secret` is used, if one is given; otherwise, @@ -2405,7 +2405,7 @@ Example configuration: macaroon_secret_key: ``` --- -Config option: `form_secret` +### `form_secret` A secret which is used to calculate HMACs for form values, to stop falsification of values. Must be specified for the User Consent @@ -2420,7 +2420,7 @@ form_secret: Config options relating to signing keys --- -Config option: `signing_key_path` +### `signing_key_path` Path to the signing key to sign messages with. @@ -2429,7 +2429,7 @@ Example configuration: signing_key_path: "CONFDIR/SERVERNAME.signing.key" ``` --- -Config option: `old_signing_keys` +### `old_signing_keys` The keys that the server used to sign messages with but won't use to sign new messages. For each key, `key` should be the base64-encoded public key, and @@ -2445,7 +2445,7 @@ old_signing_keys: "ed25519:id": { key: "base64string", expired_ts: 123456789123 } ``` --- -Config option: `key_refresh_interval` +### `key_refresh_interval` How long key response published by this server is valid for. Used to set the `valid_until_ts` in `/key/v2` APIs. @@ -2457,7 +2457,7 @@ Example configuration: key_refresh_interval: 2d ``` --- -Config option: `trusted_key_servers:` +### `trusted_key_servers:` The trusted servers to download signing keys from. @@ -2500,7 +2500,7 @@ trusted_key_servers: - server_name: "matrix.org" ``` --- -Config option: `suppress_key_server_warning` +### `suppress_key_server_warning` Set the following to true to disable the warning that is emitted when the `trusted_key_servers` include 'matrix.org'. See above. @@ -2510,7 +2510,7 @@ Example configuration: suppress_key_server_warning: true ``` --- -Config option: `key_server_signing_keys_path` +### `key_server_signing_keys_path` The signing keys to use when acting as a trusted key server. If not specified defaults to the server signing key. @@ -2536,7 +2536,7 @@ You will also want to investigate the settings under the "sso" configuration section below. --- -Config option: `saml2_config` +### `saml2_config` Enable SAML2 for registration and login. Uses pysaml2. To learn more about pysaml and to find a full list options for configuring pysaml, read the docs [here](https://pysaml2.readthedocs.io/en/latest/). @@ -2673,7 +2673,7 @@ saml2_config: idp_entityid: 'https://our_idp/entityid' ``` --- -Config option: `oidc_providers` +### `oidc_providers` List of OpenID Connect (OIDC) / OAuth 2.0 identity providers, for registration and login. See [here](../../openid.md) @@ -2861,7 +2861,7 @@ oidc_providers: value: "synapseUsers" ``` --- -Config option: `cas_config` +### `cas_config` Enable Central Authentication Service (CAS) for registration and login. Has the following sub-options: @@ -2887,7 +2887,7 @@ cas_config: department: None ``` --- -Config option: `sso` +### `sso` Additional settings to use with single-sign on systems such as OpenID Connect, SAML2 and CAS. @@ -2924,7 +2924,7 @@ sso: update_profile_information: true ``` --- -Config option: `jwt_config` +### `jwt_config` JSON web token integration. The following settings can be used to make Synapse JSON web tokens for authentication, instead of its internal @@ -2971,7 +2971,7 @@ jwt_config: - "provided-by-your-issuer" ``` --- -Config option: `password_config` +### `password_config` Use this setting to enable password-based logins. @@ -3015,7 +3015,7 @@ password_config: require_uppercase: true ``` --- -Config option: `ui_auth` +### `ui_auth` The amount of time to allow a user-interactive authentication session to be active. @@ -3037,7 +3037,7 @@ ui_auth: session_timeout: "15s" ``` --- -Config option: `email` +### `email` Configuration for sending emails from Synapse. @@ -3140,7 +3140,7 @@ email: Configuration settings related to push notifications --- -Config option: `push` +### `push` This setting defines options for push notifications. @@ -3173,7 +3173,7 @@ push: Config options relating to rooms. --- -Config option: `encryption_enabled_by_default` +### `encryption_enabled_by_default` Controls whether locally-created rooms should be end-to-end encrypted by default. @@ -3195,7 +3195,7 @@ Example configuration: encryption_enabled_by_default_for_room_type: invite ``` --- -Config option: `user_directory` +### `user_directory` This setting defines options related to the user directory. @@ -3226,7 +3226,7 @@ user_directory: prefer_local_users: true ``` --- -Config option: `user_consent` +### `user_consent` For detailed instructions on user consent configuration, see [here](../../consent_tracking.md). @@ -3277,7 +3277,7 @@ user_consent: policy_name: Privacy Policy ``` --- -Config option: `stats` +### `stats` Settings for local room and user statistics collection. See [here](../../room_and_user_statistics.md) for more. @@ -3292,7 +3292,7 @@ stats: enabled: false ``` --- -Config option: `server_notices` +### `server_notices` Use this setting to enable a room which can be used to send notices from the server to users. It is a special room which users cannot leave; notices @@ -3316,7 +3316,7 @@ server_notices: room_name: "Server Notices" ``` --- -Config option: `enable_room_list_search` +### `enable_room_list_search` Set to false to disable searching the public room list. When disabled blocks searching local and remote room lists for local and remote @@ -3327,7 +3327,7 @@ Example configuration: enable_room_list_search: false ``` --- -Config option: `alias_creation` +### `alias_creation` The `alias_creation` option controls who is allowed to create aliases on this server. @@ -3388,7 +3388,7 @@ room_list_publication_rules: ``` --- -Config option: `default_power_level_content_override` +### `default_power_level_content_override` The `default_power_level_content_override` option controls the default power levels for rooms. @@ -3417,7 +3417,7 @@ default_power_level_content_override: Configuration options related to Opentracing support. --- -Config option: `opentracing` +### `opentracing` These settings enable and configure opentracing, which implements distributed tracing. This allows you to observe the causal chains of events across servers @@ -3460,7 +3460,7 @@ opentracing: Configuration options related to workers. --- -Config option: `send_federation` +### `send_federation` Controls sending of outbound federation transactions on the main process. Set to false if using a federation sender worker. Defaults to true. @@ -3470,7 +3470,7 @@ Example configuration: send_federation: false ``` --- -Config option: `federation_sender_instances` +### `federation_sender_instances` It is possible to run multiple federation sender workers, in which case the work is balanced across them. Use this setting to list the senders. @@ -3486,7 +3486,7 @@ federation_sender_instances: - federation_sender1 ``` --- -Config option: `instance_map` +### `instance_map` When using workers this should be a map from worker name to the HTTP replication listener of the worker, if configured. @@ -3499,7 +3499,7 @@ instance_map: port: 8034 ``` --- -Config option: `stream_writers` +### `stream_writers` Experimental: When using workers you can define which workers should handle event persistence and typing notifications. Any worker @@ -3512,7 +3512,7 @@ stream_writers: typing: worker1 ``` --- -Config option: `run_background_tasks_on` +### `run_background_tasks_on` The worker that is used to run background tasks (e.g. cleaning up expired data). If not provided this defaults to the main process. @@ -3522,7 +3522,7 @@ Example configuration: run_background_tasks_on: worker1 ``` --- -Config option: `worker_replication_secret` +### `worker_replication_secret` A shared secret used by the replication APIs to authenticate HTTP requests from workers. @@ -3533,7 +3533,7 @@ Example configuration: ```yaml worker_replication_secret: "secret_secret" ``` -Config option: `redis` +### `redis` Configuration for Redis when using workers. This *must* be enabled when using workers (unless using old style direct TCP configuration). @@ -3555,7 +3555,7 @@ redis: Configuration settings related to background updates. --- -Config option: `background_updates` +### `background_updates` Background updates are database updates that are run in the background in batches. The duration, minimum batch size, default batch size, whether to sleep between batches and if so, how long to From 8ecf6be1e1a737a09f51137302ad0d9ae4ed519b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 15 Jun 2022 19:48:22 +0100 Subject: [PATCH 56/85] Move some event auth checks out to a different method (#13065) * Add auth events to events used in tests * Move some event auth checks out to a different method Some of the event auth checks apply to an event's auth_events, rather than the state at the event - which means they can play no part in state resolution. Move them out to a separate method. * Rename check_auth_rules_for_event Now it only checks the state-dependent auth rules, it needs a better name. --- changelog.d/13065.misc | 1 + synapse/event_auth.py | 108 ++++++++++++----- synapse/handlers/event_auth.py | 8 +- synapse/handlers/federation_event.py | 27 +++-- synapse/state/v1.py | 4 +- synapse/state/v2.py | 2 +- tests/test_event_auth.py | 167 ++++++++++++++++++--------- 7 files changed, 219 insertions(+), 98 deletions(-) create mode 100644 changelog.d/13065.misc diff --git a/changelog.d/13065.misc b/changelog.d/13065.misc new file mode 100644 index 000000000..e9e8a7659 --- /dev/null +++ b/changelog.d/13065.misc @@ -0,0 +1 @@ +Avoid rechecking event auth rules which are independent of room state. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e23503c1e..360a50cc7 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -15,11 +15,12 @@ import logging import typing -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple, Union from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes from signedjson.sign import SignatureVerifyException, verify_signed_json +from typing_extensions import Protocol from unpaddedbase64 import decode_base64 from synapse.api.constants import ( @@ -35,7 +36,8 @@ from synapse.api.room_versions import ( EventFormatVersions, RoomVersion, ) -from synapse.types import StateMap, UserID, get_domain_from_id +from synapse.storage.databases.main.events_worker import EventRedactBehaviour +from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id if typing.TYPE_CHECKING: # conditional imports to avoid import cycle @@ -45,6 +47,17 @@ if typing.TYPE_CHECKING: logger = logging.getLogger(__name__) +class _EventSourceStore(Protocol): + async def get_events( + self, + event_ids: Collection[str], + redact_behaviour: EventRedactBehaviour, + get_prev_content: bool = False, + allow_rejected: bool = False, + ) -> Dict[str, "EventBase"]: + ... + + def validate_event_for_room_version(event: "EventBase") -> None: """Ensure that the event complies with the limits, and has the right signatures @@ -112,47 +125,52 @@ def validate_event_for_room_version(event: "EventBase") -> None: raise AuthError(403, "Event not signed by authorising server") -def check_auth_rules_for_event( +async def check_state_independent_auth_rules( + store: _EventSourceStore, event: "EventBase", - auth_events: Iterable["EventBase"], ) -> None: - """Check that an event complies with the auth rules + """Check that an event complies with auth rules that are independent of room state - Checks whether an event passes the auth rules with a given set of state events - - Assumes that we have already checked that the event is the right shape (it has - enough signatures, has a room ID, etc). In other words: - - - it's fine for use in state resolution, when we have already decided whether to - accept the event or not, and are now trying to decide whether it should make it - into the room state - - - when we're doing the initial event auth, it is only suitable in combination with - a bunch of other tests. + Runs through the first few auth rules, which are independent of room state. (Which + means that we only need to them once for each received event) Args: + store: the datastore; used to fetch the auth events for validation event: the event being checked. - auth_events: the room state to check the events against. Raises: AuthError if the checks fail """ - # We need to ensure that the auth events are actually for the same room, to - # stop people from using powers they've been granted in other rooms for - # example. - # - # Arguably we don't need to do this when we're just doing state res, as presumably - # the state res algorithm isn't silly enough to give us events from different rooms. - # Still, it's easier to do it anyway. + # Check the auth events. + auth_events = await store.get_events( + event.auth_event_ids(), + redact_behaviour=EventRedactBehaviour.as_is, + allow_rejected=True, + ) room_id = event.room_id - for auth_event in auth_events: + auth_dict: MutableStateMap[str] = {} + for auth_event_id in event.auth_event_ids(): + auth_event = auth_events.get(auth_event_id) + + # we should have all the auth events by now, so if we do not, that suggests + # a synapse programming error + if auth_event is None: + raise RuntimeError( + f"Event {event.event_id} has unknown auth event {auth_event_id}" + ) + + # We need to ensure that the auth events are actually for the same room, to + # stop people from using powers they've been granted in other rooms for + # example. if auth_event.room_id != room_id: raise AuthError( 403, "During auth for event %s in room %s, found event %s in the state " "which is in room %s" - % (event.event_id, room_id, auth_event.event_id, auth_event.room_id), + % (event.event_id, room_id, auth_event_id, auth_event.room_id), ) + + # We also need to check that the auth event itself is not rejected. if auth_event.rejected_reason: raise AuthError( 403, @@ -160,6 +178,8 @@ def check_auth_rules_for_event( % (event.event_id, auth_event.event_id), ) + auth_dict[(auth_event.type, auth_event.state_key)] = auth_event_id + # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules # # 1. If type is m.room.create: @@ -181,16 +201,46 @@ def check_auth_rules_for_event( "room appears to have unsupported version %s" % (room_version_prop,), ) - logger.debug("Allowing! %s", event) return - auth_dict = {(e.type, e.state_key): e for e in auth_events} - # 3. If event does not have a m.room.create in its auth_events, reject. creation_event = auth_dict.get((EventTypes.Create, ""), None) if not creation_event: raise AuthError(403, "No create event in auth events") + +def check_state_dependent_auth_rules( + event: "EventBase", + auth_events: Iterable["EventBase"], +) -> None: + """Check that an event complies with auth rules that depend on room state + + Runs through the parts of the auth rules that check an event against bits of room + state. + + Note: + + - it's fine for use in state resolution, when we have already decided whether to + accept the event or not, and are now trying to decide whether it should make it + into the room state + + - when we're doing the initial event auth, it is only suitable in combination with + a bunch of other tests (including, but not limited to, check_state_independent_auth_rules). + + Args: + event: the event being checked. + auth_events: the room state to check the events against. + + Raises: + AuthError if the checks fail + """ + # there are no state-dependent auth rules for create events. + if event.type == EventTypes.Create: + logger.debug("Allowing! %s", event) + return + + auth_dict = {(e.type, e.state_key): e for e in auth_events} + # additional check for m.federate creating_domain = get_domain_from_id(event.room_id) originating_domain = get_domain_from_id(event.sender) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index ed4149bd5..a2dd9c7ef 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -23,7 +23,10 @@ from synapse.api.constants import ( ) from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.room_versions import RoomVersion -from synapse.event_auth import check_auth_rules_for_event +from synapse.event_auth import ( + check_state_dependent_auth_rules, + check_state_independent_auth_rules, +) from synapse.events import EventBase from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext @@ -52,9 +55,10 @@ class EventAuthHandler: context: EventContext, ) -> None: """Check an event passes the auth rules at its own auth events""" + await check_state_independent_auth_rules(self._store, event) auth_event_ids = event.auth_event_ids() auth_events_by_id = await self._store.get_events(auth_event_ids) - check_auth_rules_for_event(event, auth_events_by_id.values()) + check_state_dependent_auth_rules(event, auth_events_by_id.values()) def compute_auth_events( self, diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 6c9e6a00b..565ffd7cf 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -50,7 +50,8 @@ from synapse.api.errors import ( from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion, RoomVersions from synapse.event_auth import ( auth_types_for_event, - check_auth_rules_for_event, + check_state_dependent_auth_rules, + check_state_independent_auth_rules, validate_event_for_room_version, ) from synapse.events import EventBase @@ -1430,7 +1431,9 @@ class FederationEventHandler: allow_rejected=True, ) - def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]: + events_and_contexts_to_persist: List[Tuple[EventBase, EventContext]] = [] + + async def prep(event: EventBase) -> None: with nested_logging_context(suffix=event.event_id): auth = [] for auth_event_id in event.auth_event_ids(): @@ -1444,7 +1447,7 @@ class FederationEventHandler: event, auth_event_id, ) - return None + return auth.append(ae) # we're not bothering about room state, so flag the event as an outlier. @@ -1453,17 +1456,20 @@ class FederationEventHandler: context = EventContext.for_outlier(self._storage_controllers) try: validate_event_for_room_version(event) - check_auth_rules_for_event(event, auth) + await check_state_independent_auth_rules(self._store, event) + check_state_dependent_auth_rules(event, auth) except AuthError as e: logger.warning("Rejecting %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR - return event, context + events_and_contexts_to_persist.append((event, context)) + + for event in fetched_events: + await prep(event) - events_to_persist = (x for x in (prep(event) for event in fetched_events) if x) await self.persist_events_and_notify( room_id, - tuple(events_to_persist), + events_and_contexts_to_persist, # Mark these events backfilled as they're historic events that will # eventually be backfilled. For example, missing events we fetch # during backfill should be marked as backfilled as well. @@ -1515,7 +1521,8 @@ class FederationEventHandler: # ... and check that the event passes auth at those auth events. try: - check_auth_rules_for_event(event, claimed_auth_events) + await check_state_independent_auth_rules(self._store, event) + check_state_dependent_auth_rules(event, claimed_auth_events) except AuthError as e: logger.warning( "While checking auth of %r against auth_events: %s", event, e @@ -1563,7 +1570,7 @@ class FederationEventHandler: auth_events_for_auth = calculated_auth_event_map try: - check_auth_rules_for_event(event, auth_events_for_auth.values()) + check_state_dependent_auth_rules(event, auth_events_for_auth.values()) except AuthError as e: logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR @@ -1663,7 +1670,7 @@ class FederationEventHandler: ) try: - check_auth_rules_for_event(event, current_auth_events) + check_state_dependent_auth_rules(event, current_auth_events) except AuthError as e: logger.warning( "Soft-failing %r (from %s) because %s", diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 8bbb4ce41..500e38469 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -330,7 +330,7 @@ def _resolve_auth_events( auth_events[(prev_event.type, prev_event.state_key)] = prev_event try: # The signatures have already been checked at this point - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( event, auth_events.values(), ) @@ -347,7 +347,7 @@ def _resolve_normal_events( for event in _ordered_events(events): try: # The signatures have already been checked at this point - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( event, auth_events.values(), ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 6a16f38a1..7db032203 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -573,7 +573,7 @@ async def _iterative_auth_checks( auth_events[key] = event_map[ev_id] try: - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( event, auth_events.values(), ) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 229ecd84a..e8e458cfd 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -1,4 +1,4 @@ -# Copyright 2018 New Vector Ltd +# Copyright 2018-2022 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from typing import Optional +from typing import Collection, Dict, Iterable, List, Optional from parameterized import parameterized @@ -22,8 +22,41 @@ from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict +from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import JsonDict, get_domain_from_id +from tests.test_utils import get_awaitable_result + + +class _StubEventSourceStore: + """A stub implementation of the EventSourceStore""" + + def __init__(self): + self._store: Dict[str, EventBase] = {} + + def add_event(self, event: EventBase): + self._store[event.event_id] = event + + def add_events(self, events: Iterable[EventBase]): + for event in events: + self._store[event.event_id] = event + + async def get_events( + self, + event_ids: Collection[str], + redact_behaviour: EventRedactBehaviour, + get_prev_content: bool = False, + allow_rejected: bool = False, + ) -> Dict[str, EventBase]: + assert allow_rejected + assert not get_prev_content + assert redact_behaviour == EventRedactBehaviour.as_is + results = {} + for e in event_ids: + if e in self._store: + results[e] = self._store[e] + return results + class EventAuthTestCase(unittest.TestCase): def test_rejected_auth_events(self): @@ -36,11 +69,15 @@ class EventAuthTestCase(unittest.TestCase): _join_event(RoomVersions.V9, creator), ] + event_store = _StubEventSourceStore() + event_store.add_events(auth_events) + # creator should be able to send state - event_auth.check_auth_rules_for_event( - _random_state_event(RoomVersions.V9, creator), - auth_events, + event = _random_state_event(RoomVersions.V9, creator, auth_events) + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, event) ) + event_auth.check_state_dependent_auth_rules(event, auth_events) # ... but a rejected join_rules event should cause it to be rejected rejected_join_rules = _join_rules_event( @@ -50,23 +87,27 @@ class EventAuthTestCase(unittest.TestCase): ) rejected_join_rules.rejected_reason = "stinky" auth_events.append(rejected_join_rules) + event_store.add_event(rejected_join_rules) - self.assertRaises( - AuthError, - event_auth.check_auth_rules_for_event, - _random_state_event(RoomVersions.V9, creator), - auth_events, - ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules( + event_store, + _random_state_event(RoomVersions.V9, creator), + ) + ) # ... even if there is *also* a good join rules auth_events.append(_join_rules_event(RoomVersions.V9, creator, "public")) + event_store.add_event(rejected_join_rules) - self.assertRaises( - AuthError, - event_auth.check_auth_rules_for_event, - _random_state_event(RoomVersions.V9, creator), - auth_events, - ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules( + event_store, + _random_state_event(RoomVersions.V9, creator), + ) + ) def test_random_users_cannot_send_state_before_first_pl(self): """ @@ -82,7 +123,7 @@ class EventAuthTestCase(unittest.TestCase): ] # creator should be able to send state - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _random_state_event(RoomVersions.V1, creator), auth_events, ) @@ -90,7 +131,7 @@ class EventAuthTestCase(unittest.TestCase): # joiner should not be able to send state self.assertRaises( AuthError, - event_auth.check_auth_rules_for_event, + event_auth.check_state_dependent_auth_rules, _random_state_event(RoomVersions.V1, joiner), auth_events, ) @@ -119,13 +160,13 @@ class EventAuthTestCase(unittest.TestCase): # pleb should not be able to send state self.assertRaises( AuthError, - event_auth.check_auth_rules_for_event, + event_auth.check_state_dependent_auth_rules, _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _random_state_event(RoomVersions.V1, king), auth_events, ) @@ -140,27 +181,27 @@ class EventAuthTestCase(unittest.TestCase): ] # creator should be able to send aliases - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V1, creator), auth_events, ) # Reject an event with no state key. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V1, creator, state_key=""), auth_events, ) # If the domain of the sender does not match the state key, reject. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V1, creator, state_key="test.com"), auth_events, ) # Note that the member does *not* need to be in the room. - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V1, other), auth_events, ) @@ -175,24 +216,24 @@ class EventAuthTestCase(unittest.TestCase): ] # creator should be able to send aliases - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V6, creator), auth_events, ) # No particular checks are done on the state key. - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V6, creator, state_key=""), auth_events, ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V6, creator, state_key="test.com"), auth_events, ) # Per standard auth rules, the member must be in the room. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _alias_event(RoomVersions.V6, other), auth_events, ) @@ -220,12 +261,12 @@ class EventAuthTestCase(unittest.TestCase): # on room V1, pleb should be able to modify the notifications power level. if allow_modification: - event_auth.check_auth_rules_for_event(pl_event, auth_events) + event_auth.check_state_dependent_auth_rules(pl_event, auth_events) else: # But an MSC2209 room rejects this change. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event(pl_event, auth_events) + event_auth.check_state_dependent_auth_rules(pl_event, auth_events) def test_join_rules_public(self): """ @@ -243,14 +284,14 @@ class EventAuthTestCase(unittest.TestCase): } # Check join. - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) @@ -260,7 +301,7 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "ban" ) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -269,7 +310,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "leave" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -278,7 +319,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "join" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -287,7 +328,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "invite", sender=creator ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -309,14 +350,14 @@ class EventAuthTestCase(unittest.TestCase): # A join without an invite is rejected. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) # A user cannot be force-joined to a room. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _member_event(RoomVersions.V6, pleb, "join", sender=creator), auth_events.values(), ) @@ -326,7 +367,7 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "ban" ) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -336,7 +377,7 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V6, pleb, "leave" ) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -345,7 +386,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "join" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -354,7 +395,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V6, pleb, "invite", sender=creator ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -376,7 +417,7 @@ class EventAuthTestCase(unittest.TestCase): } with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V6, pleb), auth_events.values(), ) @@ -413,7 +454,7 @@ class EventAuthTestCase(unittest.TestCase): EventContentFields.AUTHORISING_USER: "@creator:example.com" }, ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( authorised_join_event, auth_events.values(), ) @@ -429,7 +470,7 @@ class EventAuthTestCase(unittest.TestCase): pl_auth_events[("m.room.member", "@inviter:foo.test")] = _join_event( RoomVersions.V8, "@inviter:foo.test" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event( RoomVersions.V8, pleb, @@ -442,7 +483,7 @@ class EventAuthTestCase(unittest.TestCase): # A join which is missing an authorised server is rejected. with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -455,7 +496,7 @@ class EventAuthTestCase(unittest.TestCase): {"invite": 100, "users": {"@other:example.com": 150}}, ) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event( RoomVersions.V8, pleb, @@ -469,7 +510,7 @@ class EventAuthTestCase(unittest.TestCase): # A user cannot be force-joined to a room. (This uses an event which # *would* be valid, but is sent be a different user.) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _member_event( RoomVersions.V8, pleb, @@ -487,7 +528,7 @@ class EventAuthTestCase(unittest.TestCase): RoomVersions.V8, pleb, "ban" ) with self.assertRaises(AuthError): - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( authorised_join_event, auth_events.values(), ) @@ -496,7 +537,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "leave" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( authorised_join_event, auth_events.values(), ) @@ -506,7 +547,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "join" ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -516,7 +557,7 @@ class EventAuthTestCase(unittest.TestCase): auth_events[("m.room.member", pleb)] = _member_event( RoomVersions.V8, pleb, "invite", sender=creator ) - event_auth.check_auth_rules_for_event( + event_auth.check_state_dependent_auth_rules( _join_event(RoomVersions.V8, pleb), auth_events.values(), ) @@ -539,6 +580,7 @@ def _create_event( "state_key": "", "sender": user_id, "content": {"creator": user_id}, + "auth_events": [], }, room_version=room_version, ) @@ -559,6 +601,7 @@ def _member_event( "sender": sender or user_id, "state_key": user_id, "content": {"membership": membership, **(additional_content or {})}, + "auth_events": [], "prev_events": [], }, room_version=room_version, @@ -609,7 +652,22 @@ def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: return make_event_from_dict(data, room_version=room_version) -def _random_state_event(room_version: RoomVersion, sender: str) -> EventBase: +def _build_auth_dict_for_room_version( + room_version: RoomVersion, auth_events: Iterable[EventBase] +) -> List: + if room_version.event_format == EventFormatVersions.V1: + return [(e.event_id, "not_used") for e in auth_events] + else: + return [e.event_id for e in auth_events] + + +def _random_state_event( + room_version: RoomVersion, + sender: str, + auth_events: Optional[Iterable[EventBase]] = None, +) -> EventBase: + if auth_events is None: + auth_events = [] return make_event_from_dict( { "room_id": TEST_ROOM_ID, @@ -618,6 +676,7 @@ def _random_state_event(room_version: RoomVersion, sender: str) -> EventBase: "sender": sender, "state_key": "", "content": {"membership": "join"}, + "auth_events": _build_auth_dict_for_room_version(room_version, auth_events), }, room_version=room_version, ) From ffe2464836dec7bbce2659b2b4e62eb956bf2a90 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 16 Jun 2022 10:31:10 +0100 Subject: [PATCH 57/85] Add instructions for running Complement with `gotestfmt`-formatted output locally. (#13073) --- changelog.d/13073.doc | 1 + docs/development/contributing_guide.md | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 changelog.d/13073.doc diff --git a/changelog.d/13073.doc b/changelog.d/13073.doc new file mode 100644 index 000000000..e162a8404 --- /dev/null +++ b/changelog.d/13073.doc @@ -0,0 +1 @@ +Add instructions for running Complement with `gotestfmt`-formatted output locally. \ No newline at end of file diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index c2f04a390..4738f8a6b 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -310,6 +310,20 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data - Passing `WORKERS=1` as an environment variable to use a workerised setup instead. This option implies the use of Postgres. +### Prettier formatting with `gotestfmt` + +If you want to format the output of the tests the same way as it looks in CI, +install [gotestfmt](https://github.com/haveyoudebuggedit/gotestfmt). + +You can then use this incantation to format the tests appropriately: + +```sh +COMPLEMENT_DIR=../complement ./scripts-dev/complement.sh -json | gotestfmt -hide successful-tests +``` + +(Remove `-hide successful-tests` if you don't want to hide successful tests.) + + ### Access database for homeserver after Complement test runs. If you're curious what the database looks like after you run some tests, here are some steps to get you going in Synapse: From 0ef1307619799bec2bbb96ce6516f307b0f8f217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacek=20Ku=C5=9Bnierz?= Date: Thu, 16 Jun 2022 12:48:18 +0200 Subject: [PATCH 58/85] Add custom well-known (#13035) Co-authored-by: David Robertson --- changelog.d/13035.feature | 1 + .../configuration/config_documentation.md | 17 ++++++++++++++ synapse/config/server.py | 20 +++++++++++++++++ synapse/rest/well_known.py | 9 +++++++- tests/rest/test_well_known.py | 22 +++++++++++++++++++ 5 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13035.feature diff --git a/changelog.d/13035.feature b/changelog.d/13035.feature new file mode 100644 index 000000000..cfca3ab4b --- /dev/null +++ b/changelog.d/13035.feature @@ -0,0 +1 @@ +Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 7c9860c3e..58a74ace4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -230,6 +230,22 @@ Example configuration: serve_server_wellknown: true ``` --- +### `extra_well_known_client_content ` + +This option allows server runners to add arbitrary key-value pairs to the [client-facing `.well-known` response](https://spec.matrix.org/latest/client-server-api/#well-known-uri). +Note that the `public_baseurl` config option must be provided for Synapse to serve a response to `/.well-known/matrix/client` at all. + +If this option is provided, it parses the given yaml to json and +serves it on `/.well-known/matrix/client` endpoint +alongside the standard properties. + +Example configuration: +```yaml +extra_well_known_client_content : + option1: value1 + option2: value2 +``` +--- ### `soft_file_limit` Set the soft limit on the number of file descriptors synapse can use. @@ -3580,3 +3596,4 @@ background_updates: min_batch_size: 10 default_batch_size: 50 ``` + diff --git a/synapse/config/server.py b/synapse/config/server.py index 828938e5e..085fe22c5 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -301,6 +301,26 @@ class ServerConfig(Config): "public_baseurl cannot contain query parameters or a #-fragment" ) + self.extra_well_known_client_content = config.get( + "extra_well_known_client_content", {} + ) + + if not isinstance(self.extra_well_known_client_content, dict): + raise ConfigError( + "extra_well_known_content must be a dictionary of key-value pairs" + ) + + if "m.homeserver" in self.extra_well_known_client_content: + raise ConfigError( + "m.homeserver is not supported in extra_well_known_content, " + "use public_baseurl in base config instead." + ) + if "m.identity_server" in self.extra_well_known_client_content: + raise ConfigError( + "m.identity_server is not supported in extra_well_known_content, " + "use default_identity_server in base config instead." + ) + # Whether to enable user presence. presence_config = config.get("presence") or {} self.use_presence = presence_config.get("enabled") diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index 04b035a1b..6f7ac54c6 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import TYPE_CHECKING, Optional @@ -44,6 +43,14 @@ class WellKnownBuilder: "base_url": self._config.registration.default_identity_server } + if self._config.server.extra_well_known_client_content: + for ( + key, + value, + ) in self._config.server.extra_well_known_client_content.items(): + if key not in result: + result[key] = value + return result diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 11f78f52b..d8faafec7 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -59,6 +59,28 @@ class WellKnownTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, HTTPStatus.NOT_FOUND) + @unittest.override_config( + { + "public_baseurl": "https://tesths", + "default_identity_server": "https://testis", + "extra_well_known_client_content": {"custom": False}, + } + ) + def test_client_well_known_custom(self) -> None: + channel = self.make_request( + "GET", "/.well-known/matrix/client", shorthand=False + ) + + self.assertEqual(channel.code, HTTPStatus.OK) + self.assertEqual( + channel.json_body, + { + "m.homeserver": {"base_url": "https://tesths/"}, + "m.identity_server": {"base_url": "https://testis"}, + "custom": False, + }, + ) + @unittest.override_config({"serve_server_wellknown": True}) def test_server_well_known(self) -> None: channel = self.make_request( From 1e0044e8f9f20ee0e8be9ad40c48be3a67e0f54e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 16 Jun 2022 12:12:26 +0100 Subject: [PATCH 59/85] Complement: use SQLite by default (#13075) If no database is configured explicitly, use sqlite. This means that you don't have to pass any variables into the image. --- changelog.d/13075.misc | 1 + docker/complement/README.md | 2 +- docker/complement/conf/start_for_complement.sh | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13075.misc diff --git a/changelog.d/13075.misc b/changelog.d/13075.misc new file mode 100644 index 000000000..2311629f7 --- /dev/null +++ b/changelog.d/13075.misc @@ -0,0 +1 @@ +Merge the Complement testing Docker images into a single, multi-purpose image. diff --git a/docker/complement/README.md b/docker/complement/README.md index 37c39e2df..62682219e 100644 --- a/docker/complement/README.md +++ b/docker/complement/README.md @@ -7,7 +7,7 @@ so **please don't use this image for a production server**. This multi-purpose image is built on top of `Dockerfile-workers` in the parent directory and can be switched using environment variables between the following configurations: -- Monolithic Synapse with SQLite (`SYNAPSE_COMPLEMENT_DATABASE=sqlite`) +- Monolithic Synapse with SQLite (default, or `SYNAPSE_COMPLEMENT_DATABASE=sqlite`) - Monolithic Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres`) - Workerised Synapse with Postgres (`SYNAPSE_COMPLEMENT_DATABASE=postgres` and `SYNAPSE_COMPLEMENT_USE_WORKERS=true`) diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index b9c97ab68..65da99b8d 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -31,7 +31,7 @@ case "$SYNAPSE_COMPLEMENT_DATABASE" in export START_POSTGRES=true ;; - sqlite) + sqlite|"") # Configure supervisord not to start Postgres, as we don't need it export START_POSTGRES=false ;; From 755261524728c4a8c3f48c91590bdaef0731651e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 16 Jun 2022 12:40:29 +0100 Subject: [PATCH 60/85] Reduce the duplication of code that invokes the rate limiter. (#13070) --- changelog.d/13070.misc | 1 + synapse/handlers/room_member.py | 30 +++--------------------------- 2 files changed, 4 insertions(+), 27 deletions(-) create mode 100644 changelog.d/13070.misc diff --git a/changelog.d/13070.misc b/changelog.d/13070.misc new file mode 100644 index 000000000..ce1f14342 --- /dev/null +++ b/changelog.d/13070.misc @@ -0,0 +1 @@ +Reduce the duplication of code that invokes the rate limiter. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e89b7441a..bf6bae123 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,13 +26,7 @@ from synapse.api.constants import ( GuestAccess, Membership, ) -from synapse.api.errors import ( - AuthError, - Codes, - LimitExceededError, - ShadowBanError, - SynapseError, -) +from synapse.api.errors import AuthError, Codes, ShadowBanError, SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase @@ -380,16 +374,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Only rate-limit if the user actually joined the room, otherwise we'll end # up blocking profile updates. if newly_joined and ratelimit: - time_now_s = self.clock.time() - ( - allowed, - time_allowed, - ) = await self._join_rate_limiter_local.can_do_action(requester) - - if not allowed: - raise LimitExceededError( - retry_after_ms=int(1000 * (time_allowed - time_now_s)) - ) + await self._join_rate_limiter_local.ratelimit(requester) result_event = await self.event_creation_handler.handle_new_client_event( requester, @@ -835,19 +820,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) if remote_join: if ratelimit: - time_now_s = self.clock.time() - ( - allowed, - time_allowed, - ) = await self._join_rate_limiter_remote.can_do_action( + await self._join_rate_limiter_remote.ratelimit( requester, ) - if not allowed: - raise LimitExceededError( - retry_after_ms=int(1000 * (time_allowed - time_now_s)) - ) - inviter = await self._get_inviter(target.to_string(), room_id) if inviter and not self.hs.is_mine(inviter): remote_room_hosts.append(inviter.domain) From 0fcc0ae37c959116c910f349a8025bd6921fdfc8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 16 Jun 2022 07:41:57 -0400 Subject: [PATCH 61/85] Improve URL previews for sites with only Twitter card information. (#13056) Pull out `twitter:` meta tags when generating a preview and use it to augment any `og:` meta tags. Prefers Open Graph information over Twitter card information. --- changelog.d/13056.feature | 1 + synapse/rest/media/v1/preview_html.py | 112 +++++++++++++++++++---- tests/rest/media/v1/test_html_preview.py | 41 +++++++++ 3 files changed, 137 insertions(+), 17 deletions(-) create mode 100644 changelog.d/13056.feature diff --git a/changelog.d/13056.feature b/changelog.d/13056.feature new file mode 100644 index 000000000..219e2f6c1 --- /dev/null +++ b/changelog.d/13056.feature @@ -0,0 +1 @@ +Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index ed8f21a48..c826a1309 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -15,7 +15,16 @@ import codecs import itertools import logging import re -from typing import TYPE_CHECKING, Dict, Generator, Iterable, Optional, Set, Union +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + Generator, + Iterable, + Optional, + Set, + Union, +) if TYPE_CHECKING: from lxml import etree @@ -146,6 +155,70 @@ def decode_body( return etree.fromstring(body, parser) +def _get_meta_tags( + tree: "etree.Element", + property: str, + prefix: str, + property_mapper: Optional[Callable[[str], Optional[str]]] = None, +) -> Dict[str, Optional[str]]: + """ + Search for meta tags prefixed with a particular string. + + Args: + tree: The parsed HTML document. + property: The name of the property which contains the tag name, e.g. + "property" for Open Graph. + prefix: The prefix on the property to search for, e.g. "og" for Open Graph. + property_mapper: An optional callable to map the property to the Open Graph + form. Can return None for a key to ignore that key. + + Returns: + A map of tag name to value. + """ + results: Dict[str, Optional[str]] = {} + for tag in tree.xpath( + f"//*/meta[starts-with(@{property}, '{prefix}:')][@content][not(@content='')]" + ): + # if we've got more than 50 tags, someone is taking the piss + if len(results) >= 50: + logger.warning( + "Skipping parsing of Open Graph for page with too many '%s:' tags", + prefix, + ) + return {} + + key = tag.attrib[property] + if property_mapper: + key = property_mapper(key) + # None is a special value used to ignore a value. + if key is None: + continue + + results[key] = tag.attrib["content"] + + return results + + +def _map_twitter_to_open_graph(key: str) -> Optional[str]: + """ + Map a Twitter card property to the analogous Open Graph property. + + Args: + key: The Twitter card property (starts with "twitter:"). + + Returns: + The Open Graph property (starts with "og:") or None to have this property + be ignored. + """ + # Twitter card properties with no analogous Open Graph property. + if key == "twitter:card" or key == "twitter:creator": + return None + if key == "twitter:site": + return "og:site_name" + # Otherwise, swap twitter to og. + return "og" + key[7:] + + def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: """ Parse the HTML document into an Open Graph response. @@ -160,10 +233,8 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: The Open Graph response as a dictionary. """ - # if we see any image URLs in the OG response, then spider them - # (although the client could choose to do this by asking for previews of those - # URLs to avoid DoSing the server) - + # Search for Open Graph (og:) meta tags, e.g.: + # # "og:type" : "video", # "og:url" : "https://www.youtube.com/watch?v=LXDBoHyjmtw", # "og:site_name" : "YouTube", @@ -176,19 +247,11 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: # "og:video:height" : "720", # "og:video:secure_url": "https://www.youtube.com/v/LXDBoHyjmtw?version=3", - og: Dict[str, Optional[str]] = {} - for tag in tree.xpath( - "//*/meta[starts-with(@property, 'og:')][@content][not(@content='')]" - ): - # if we've got more than 50 tags, someone is taking the piss - if len(og) >= 50: - logger.warning("Skipping OG for page with too many 'og:' tags") - return {} - - og[tag.attrib["property"]] = tag.attrib["content"] - - # TODO: grab article: meta tags too, e.g.: + og = _get_meta_tags(tree, "property", "og") + # TODO: Search for properties specific to the different Open Graph types, + # such as article: meta tags, e.g.: + # # "article:publisher" : "https://www.facebook.com/thethudonline" /> # "article:author" content="https://www.facebook.com/thethudonline" /> # "article:tag" content="baby" /> @@ -196,6 +259,21 @@ def parse_html_to_open_graph(tree: "etree.Element") -> Dict[str, Optional[str]]: # "article:published_time" content="2016-03-31T19:58:24+00:00" /> # "article:modified_time" content="2016-04-01T18:31:53+00:00" /> + # Search for Twitter Card (twitter:) meta tags, e.g.: + # + # "twitter:site" : "@matrixdotorg" + # "twitter:creator" : "@matrixdotorg" + # + # Twitter cards tags also duplicate Open Graph tags. + # + # See https://developer.twitter.com/en/docs/twitter-for-websites/cards/guides/getting-started + twitter = _get_meta_tags(tree, "name", "twitter", _map_twitter_to_open_graph) + # Merge the Twitter values with the Open Graph values, but do not overwrite + # information from Open Graph tags. + for key, value in twitter.items(): + if key not in og: + og[key] = value + if "og:title" not in og: # Attempt to find a title from the title tag, or the biggest header on the page. title = tree.xpath("((//title)[1] | (//h1)[1] | (//h2)[1] | (//h3)[1])/text()") diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/rest/media/v1/test_html_preview.py index ea9e5889b..cbdf210ae 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/rest/media/v1/test_html_preview.py @@ -370,6 +370,47 @@ class OpenGraphFromHtmlTestCase(unittest.TestCase): og = parse_html_to_open_graph(tree) self.assertEqual(og, {"og:title": "ó", "og:description": "Some text."}) + def test_twitter_tag(self) -> None: + """Twitter card tags should be used if nothing else is available.""" + html = b""" + + + + + + """ + tree = decode_body(html, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) + self.assertEqual( + og, + { + "og:title": None, + "og:description": "Description", + "og:site_name": "@matrixdotorg", + }, + ) + + # But they shouldn't override Open Graph values. + html = b""" + + + + + + + + """ + tree = decode_body(html, "http://example.com/test.html") + og = parse_html_to_open_graph(tree) + self.assertEqual( + og, + { + "og:title": None, + "og:description": "Real Description", + "og:site_name": "matrix.org", + }, + ) + class MediaEncodingTestCase(unittest.TestCase): def test_meta_charset(self) -> None: From 90cadcd403a5652a3f789ccfa8b608c639c0cc6d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 16 Jun 2022 12:43:21 +0100 Subject: [PATCH 62/85] Add a Subject Alternative Name to the certificate generated for Complement tests. (#13071) --- changelog.d/13071.misc | 1 + .../complement/conf/start_for_complement.sh | 22 ++++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) create mode 100644 changelog.d/13071.misc diff --git a/changelog.d/13071.misc b/changelog.d/13071.misc new file mode 100644 index 000000000..a6e1e6b3a --- /dev/null +++ b/changelog.d/13071.misc @@ -0,0 +1 @@ +Add a Subject Alternative Name to the certificate generated for Complement tests. \ No newline at end of file diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index 65da99b8d..773c7db22 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -73,14 +73,30 @@ fi # Generate a TLS key, then generate a certificate by having Complement's CA sign it # Note that both the key and certificate are in PEM format (not DER). + +# First generate a configuration file to set up a Subject Alternative Name. +cat > /conf/server.tls.conf < Date: Thu, 16 Jun 2022 18:50:46 +0100 Subject: [PATCH 63/85] Add desc to `get_earliest_token_for_stats` (#13085) --- changelog.d/13085.misc | 1 + synapse/storage/databases/main/stats.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/13085.misc diff --git a/changelog.d/13085.misc b/changelog.d/13085.misc new file mode 100644 index 000000000..2401d4f38 --- /dev/null +++ b/changelog.d/13085.misc @@ -0,0 +1 @@ +Correctly report prometheus DB stats for `get_earliest_token_for_stats`. diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 538451b05..82851ffa9 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -295,6 +295,7 @@ class StatsStore(StateDeltasStore): keyvalues={id_col: id}, retcol="completed_delta_stream_id", allow_none=True, + desc="get_earliest_token_for_stats", ) async def bulk_update_stats_delta( From 9372f6f842e3f8c0166d68a3a49ccc73a76954ea Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 17 Jun 2022 10:22:50 +0100 Subject: [PATCH 64/85] Fix logging context misuse when we fail to persist a federation event (#13089) When we fail to persist a federation event, we kick off a task to remove its push actions in the background, using the current logging context. Since we don't `await` that task, we may finish our logging context before the task finishes. There's no reason to not `await` the task, so let's do that. Signed-off-by: Sean Quah --- changelog.d/13089.misc | 1 + synapse/handlers/federation_event.py | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/13089.misc diff --git a/changelog.d/13089.misc b/changelog.d/13089.misc new file mode 100644 index 000000000..5868507cb --- /dev/null +++ b/changelog.d/13089.misc @@ -0,0 +1 @@ +Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 565ffd7cf..b7c54e642 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -57,7 +57,7 @@ from synapse.event_auth import ( from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_client import InvalidResponseError -from synapse.logging.context import nested_logging_context, run_in_background +from synapse.logging.context import nested_logging_context from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.replication.http.federation import ( @@ -1964,9 +1964,7 @@ class FederationEventHandler: event.room_id, [(event, context)], backfilled=backfilled ) except Exception: - run_in_background( - self._store.remove_push_actions_from_staging, event.event_id - ) + await self._store.remove_push_actions_from_staging(event.event_id) raise async def persist_events_and_notify( From c6d617641186221829c644204f24654430858826 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 12:39:26 +0200 Subject: [PATCH 65/85] Allow MSC3030 'timestamp_to_event' calls from anyone on world-readable rooms. (#13062) Signed-off-by: Quentin Gliech --- changelog.d/13062.misc | 1 + synapse/rest/client/room.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13062.misc diff --git a/changelog.d/13062.misc b/changelog.d/13062.misc new file mode 100644 index 000000000..d425e9a9a --- /dev/null +++ b/changelog.d/13062.misc @@ -0,0 +1 @@ +Allow MSC3030 'timestamp_to_event' calls from anyone on world-readable rooms. diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index a26e97649..2f513164c 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -1177,7 +1177,9 @@ class TimestampLookupRestServlet(RestServlet): self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: requester = await self._auth.get_user_by_req(request) - await self._auth.check_user_in_room(room_id, requester.user.to_string()) + await self._auth.check_user_in_room_or_world_readable( + room_id, requester.user.to_string() + ) timestamp = parse_integer(request, "ts", required=True) direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) From 5099b5ecc735b98ac9d559ef6191554bafff964b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2022 11:42:03 +0100 Subject: [PATCH 66/85] Use new `device_list_changes_in_room` table when getting device list changes (#13045) --- changelog.d/13045.feature | 1 + synapse/handlers/device.py | 69 +++++++++++++++++------ synapse/handlers/sync.py | 19 ++----- synapse/storage/databases/main/devices.py | 59 +++++++++++++++++++ 4 files changed, 117 insertions(+), 31 deletions(-) create mode 100644 changelog.d/13045.feature diff --git a/changelog.d/13045.feature b/changelog.d/13045.feature new file mode 100644 index 000000000..7b0667ba9 --- /dev/null +++ b/changelog.d/13045.feature @@ -0,0 +1 @@ +Speed up fetching of device list changes in `/sync` and `/keys/changes`. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b79c55170..c05a170c5 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -123,6 +123,43 @@ class DeviceWorkerHandler: return device + async def get_device_changes_in_shared_rooms( + self, user_id: str, room_ids: Collection[str], from_token: StreamToken + ) -> Collection[str]: + """Get the set of users whose devices have changed who share a room with + the given user. + """ + changed_users = await self.store.get_device_list_changes_in_rooms( + room_ids, from_token.device_list_key + ) + + if changed_users is not None: + # We also check if the given user has changed their device. If + # they're in no rooms then the above query won't include them. + changed = await self.store.get_users_whose_devices_changed( + from_token.device_list_key, [user_id] + ) + changed_users.update(changed) + return changed_users + + # If the DB returned None then the `from_token` is too old, so we fall + # back on looking for device updates for all users. + + users_who_share_room = await self.store.get_users_who_share_room_with_user( + user_id + ) + + tracked_users = set(users_who_share_room) + + # Always tell the user about their own devices + tracked_users.add(user_id) + + changed = await self.store.get_users_whose_devices_changed( + from_token.device_list_key, tracked_users + ) + + return changed + @trace @measure_func("device.get_user_ids_changed") async def get_user_ids_changed( @@ -138,19 +175,8 @@ class DeviceWorkerHandler: room_ids = await self.store.get_rooms_for_user(user_id) - # First we check if any devices have changed for users that we share - # rooms with. - users_who_share_room = await self.store.get_users_who_share_room_with_user( - user_id - ) - - tracked_users = set(users_who_share_room) - - # Always tell the user about their own devices - tracked_users.add(user_id) - - changed = await self.store.get_users_whose_devices_changed( - from_token.device_list_key, tracked_users + changed = await self.get_device_changes_in_shared_rooms( + user_id, room_ids, from_token ) # Then work out if any users have since joined @@ -237,10 +263,19 @@ class DeviceWorkerHandler: break if possibly_changed or possibly_left: - # Take the intersection of the users whose devices may have changed - # and those that actually still share a room with the user - possibly_joined = possibly_changed & users_who_share_room - possibly_left = (possibly_changed | possibly_left) - users_who_share_room + possibly_joined = possibly_changed + possibly_left = possibly_changed | possibly_left + + # Double check if we still share rooms with the given user. + users_rooms = await self.store.get_rooms_for_users_with_stream_ordering( + possibly_left + ) + for changed_user_id, entries in users_rooms.items(): + if any(e.room_id in room_ids for e in entries): + possibly_left.discard(changed_user_id) + else: + possibly_joined.discard(changed_user_id) + else: possibly_joined = set() possibly_left = set() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 6ad053f67..d42a414c9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -240,6 +240,7 @@ class SyncHandler: self.auth_blocking = hs.get_auth_blocking() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state + self._device_handler = hs.get_device_handler() # TODO: flush cache entries on subsequent sync request. # Once we get the next /sync request (ie, one with the same access token @@ -1268,21 +1269,11 @@ class SyncHandler: ): users_that_have_changed.add(changed_user_id) else: - users_who_share_room = ( - await self.store.get_users_who_share_room_with_user(user_id) - ) - - # Always tell the user about their own devices. We check as the user - # ID is almost certainly already included (unless they're not in any - # rooms) and taking a copy of the set is relatively expensive. - if user_id not in users_who_share_room: - users_who_share_room = set(users_who_share_room) - users_who_share_room.add(user_id) - - tracked_users = users_who_share_room users_that_have_changed = ( - await self.store.get_users_whose_devices_changed( - since_token.device_list_key, tracked_users + await self._device_handler.get_device_changes_in_shared_rooms( + user_id, + sync_result_builder.joined_room_ids, + from_token=since_token, ) ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 03d1334e0..93d980786 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1208,6 +1208,65 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): return devices + @cached() + async def _get_min_device_lists_changes_in_room(self) -> int: + """Returns the minimum stream ID that we have entries for + `device_lists_changes_in_room` + """ + + return await self.db_pool.simple_select_one_onecol( + table="device_lists_changes_in_room", + keyvalues={}, + retcol="COALESCE(MIN(stream_id), 0)", + desc="get_min_device_lists_changes_in_room", + ) + + async def get_device_list_changes_in_rooms( + self, room_ids: Collection[str], from_id: int + ) -> Optional[Set[str]]: + """Return the set of users whose devices have changed in the given rooms + since the given stream ID. + + Returns None if the given stream ID is too old. + """ + + if not room_ids: + return set() + + min_stream_id = await self._get_min_device_lists_changes_in_room() + + if min_stream_id > from_id: + return None + + sql = """ + SELECT DISTINCT user_id FROM device_lists_changes_in_room + WHERE {clause} AND stream_id >= ? + """ + + def _get_device_list_changes_in_rooms_txn( + txn: LoggingTransaction, + clause, + args, + ) -> Set[str]: + txn.execute(sql.format(clause=clause), args) + return {user_id for user_id, in txn} + + changes = set() + for chunk in batch_iter(room_ids, 1000): + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", chunk + ) + args.append(from_id) + + changes |= await self.db_pool.runInteraction( + "get_device_list_changes_in_rooms", + _get_device_list_changes_in_rooms_txn, + clause, + args, + ) + + return changes + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__( From 5ef05c70c30ec06376c48f443c5722fbf5dd2aa0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 Jun 2022 11:58:00 +0100 Subject: [PATCH 67/85] Rotate notifications more frequently (#13096) --- changelog.d/13096.misc | 1 + synapse/storage/databases/main/event_push_actions.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13096.misc diff --git a/changelog.d/13096.misc b/changelog.d/13096.misc new file mode 100644 index 000000000..3bb51962e --- /dev/null +++ b/changelog.d/13096.misc @@ -0,0 +1 @@ +Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index ae705889a..10a796238 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -148,7 +148,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas self._doing_notif_rotation = False if hs.config.worker.run_background_tasks: self._rotate_notif_loop = self._clock.looping_call( - self._rotate_notifs, 30 * 60 * 1000 + self._rotate_notifs, 30 * 1000 ) self.db_pool.updates.register_background_index_update( From 73af10f419346a5f2d70131ac1ed8e69942edca0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 17 Jun 2022 13:19:22 +0200 Subject: [PATCH 68/85] Simplify the alias deletion logic as an application service. (#13093) --- changelog.d/13093.misc | 1 + synapse/rest/client/directory.py | 35 +++++++++++------------------ tests/rest/client/test_directory.py | 34 ++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 22 deletions(-) create mode 100644 changelog.d/13093.misc diff --git a/changelog.d/13093.misc b/changelog.d/13093.misc new file mode 100644 index 000000000..2547c87fa --- /dev/null +++ b/changelog.d/13093.misc @@ -0,0 +1 @@ +Simplify the alias deletion logic as an application service. diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index e181a0dde..9639d4fe2 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -17,13 +17,7 @@ from typing import TYPE_CHECKING, Tuple from twisted.web.server import Request -from synapse.api.errors import ( - AuthError, - Codes, - InvalidClientCredentialsError, - NotFoundError, - SynapseError, -) +from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -96,30 +90,27 @@ class ClientDirectoryServer(RestServlet): self, request: SynapseRequest, room_alias: str ) -> Tuple[int, JsonDict]: room_alias_obj = RoomAlias.from_string(room_alias) + requester = await self.auth.get_user_by_req(request) - try: - service = self.auth.get_appservice_by_req(request) + if requester.app_service: await self.directory_handler.delete_appservice_association( - service, room_alias_obj + requester.app_service, room_alias_obj ) + logger.info( "Application service at %s deleted alias %s", - service.url, + requester.app_service.url, room_alias_obj.to_string(), ) - return 200, {} - except InvalidClientCredentialsError: - # fallback to default user behaviour if they aren't an AS - pass - requester = await self.auth.get_user_by_req(request) - user = requester.user + else: + await self.directory_handler.delete_association(requester, room_alias_obj) - await self.directory_handler.delete_association(requester, room_alias_obj) - - logger.info( - "User %s deleted alias %s", user.to_string(), room_alias_obj.to_string() - ) + logger.info( + "User %s deleted alias %s", + requester.user.to_string(), + room_alias_obj.to_string(), + ) return 200, {} diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index aca03afd0..67473a68d 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -16,6 +16,7 @@ from http import HTTPStatus from twisted.test.proto_helpers import MemoryReactor +from synapse.appservice import ApplicationService from synapse.rest import admin from synapse.rest.client import directory, login, room from synapse.server import HomeServer @@ -129,6 +130,39 @@ class DirectoryTestCase(unittest.HomeserverTestCase): ) self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + def test_deleting_alias_via_directory_appservice(self) -> None: + user_id = "@as:test" + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + id="1234", + namespaces={"aliases": [{"regex": "#asns-*", "exclusive": True}]}, + sender=user_id, + ) + self.hs.get_datastores().main.services_cache.append(appservice) + + # Add an alias for the room, as the appservice + alias = RoomAlias(f"asns-{random_string(5)}", self.hs.hostname).to_string() + data = {"room_id": self.room_id} + request_data = json.dumps(data) + + channel = self.make_request( + "PUT", + f"/_matrix/client/r0/directory/room/{alias}", + request_data, + access_token=as_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + # Then try to remove the alias, as the appservice + channel = self.make_request( + "DELETE", + f"/_matrix/client/r0/directory/room/{alias}", + access_token=as_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + def test_deleting_nonexistant_alias(self) -> None: # Check that no alias exists alias = "#potato:test" From 5d6f55959e8dfdfa194fd1ea955ef714114e5a71 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 Jun 2022 12:47:22 +0100 Subject: [PATCH 69/85] Update info on downstream debs (#13095) --- changelog.d/13095.doc | 1 + docs/setup/installation.md | 17 ++++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) create mode 100644 changelog.d/13095.doc diff --git a/changelog.d/13095.doc b/changelog.d/13095.doc new file mode 100644 index 000000000..4651f25e1 --- /dev/null +++ b/changelog.d/13095.doc @@ -0,0 +1 @@ +Update information on downstream Debian packages. diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 69ade036c..5bdefe2bc 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -84,20 +84,19 @@ file when you upgrade the Debian package to a later version. ##### Downstream Debian packages -We do not recommend using the packages from the default Debian `buster` -repository at this time, as they are old and suffer from known security -vulnerabilities. You can install the latest version of Synapse from -[our repository](#matrixorg-packages) or from `buster-backports`. Please -see the [Debian documentation](https://backports.debian.org/Instructions/) -for information on how to use backports. - -If you are using Debian `sid` or testing, Synapse is available in the default -repositories and it should be possible to install it simply with: +Andrej Shadura maintains a `matrix-synapse` package in the Debian repositories. +For `bookworm` and `sid`, it can be installed simply with: ```sh sudo apt install matrix-synapse ``` +Synapse is also avaliable in `bullseye-backports`. Please +see the [Debian documentation](https://backports.debian.org/Instructions/) +for information on how to use backports. + +`matrix-synapse` is no longer maintained for `buster` and older. + ##### Downstream Ubuntu packages We do not recommend using the packages in the default Ubuntu repository From b26cbe3d4573c22b8a1743ae65db4f61770e69e9 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 17 Jun 2022 13:05:27 +0100 Subject: [PATCH 70/85] Fix type error that made its way onto develop (#13098) * Fix type error introduced accidentally by #13045 * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/13098.feature | 1 + synapse/storage/databases/main/devices.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13098.feature diff --git a/changelog.d/13098.feature b/changelog.d/13098.feature new file mode 100644 index 000000000..7b0667ba9 --- /dev/null +++ b/changelog.d/13098.feature @@ -0,0 +1 @@ +Speed up fetching of device list changes in `/sync` and `/keys/changes`. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 93d980786..adde5d097 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1245,8 +1245,8 @@ class DeviceWorkerStore(EndToEndKeyWorkerStore): def _get_device_list_changes_in_rooms_txn( txn: LoggingTransaction, - clause, - args, + clause: str, + args: List[Any], ) -> Set[str]: txn.execute(sql.format(clause=clause), args) return {user_id for user_id, in txn} From d3d84685ce1acc05cbec00d2934548473850f9d0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 17 Jun 2022 08:38:13 -0400 Subject: [PATCH 71/85] Add type hints to event push actions tests. (#13099) --- changelog.d/12985.misc | 2 +- changelog.d/13099.misc | 1 + tests/storage/test_event_push_actions.py | 28 ++++++++++++++---------- 3 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 changelog.d/13099.misc diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc index d5ab9eede..7f6492d58 100644 --- a/changelog.d/12985.misc +++ b/changelog.d/12985.misc @@ -1 +1 @@ -Add type annotations to `tests.state.test_v2`. +Add type hints to tests. diff --git a/changelog.d/13099.misc b/changelog.d/13099.misc new file mode 100644 index 000000000..7f6492d58 --- /dev/null +++ b/changelog.d/13099.misc @@ -0,0 +1 @@ +Add type hints to tests. diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 4273524c4..2ac5f6db5 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -14,7 +14,11 @@ from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + +from synapse.server import HomeServer from synapse.storage.databases.main.event_push_actions import NotifCounts +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -29,31 +33,33 @@ HIGHLIGHT = [ class EventPushActionsStoreTestCase(HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main - self.persist_events_store = hs.get_datastores().persist_events + persist_events_store = hs.get_datastores().persist_events + assert persist_events_store is not None + self.persist_events_store = persist_events_store - def test_get_unread_push_actions_for_user_in_range_for_http(self): + def test_get_unread_push_actions_for_user_in_range_for_http(self) -> None: self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_http( USER_ID, 0, 1000, 20 ) ) - def test_get_unread_push_actions_for_user_in_range_for_email(self): + def test_get_unread_push_actions_for_user_in_range_for_email(self) -> None: self.get_success( self.store.get_unread_push_actions_for_user_in_range_for_email( USER_ID, 0, 1000, 20 ) ) - def test_count_aggregation(self): + def test_count_aggregation(self) -> None: room_id = "!foo:example.com" user_id = "@user1235:example.com" last_read_stream_ordering = [0] - def _assert_counts(noitf_count, highlight_count): + def _assert_counts(noitf_count: int, highlight_count: int) -> None: counts = self.get_success( self.store.db_pool.runInteraction( "", @@ -72,7 +78,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ), ) - def _inject_actions(stream, action): + def _inject_actions(stream: int, action: list) -> None: event = Mock() event.room_id = room_id event.event_id = "$test:example.com" @@ -96,14 +102,14 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): ) ) - def _rotate(stream): + def _rotate(stream: int) -> None: self.get_success( self.store.db_pool.runInteraction( "", self.store._rotate_notifs_before_txn, stream ) ) - def _mark_read(stream, depth): + def _mark_read(stream: int, depth: int) -> None: last_read_stream_ordering[0] = stream self.get_success( self.store.db_pool.runInteraction( @@ -165,8 +171,8 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): _mark_read(10, 10) _assert_counts(0, 0) - def test_find_first_stream_ordering_after_ts(self): - def add_event(so, ts): + def test_find_first_stream_ordering_after_ts(self) -> None: + def add_event(so: int, ts: int) -> None: self.get_success( self.store.db_pool.simple_insert( "events", From e16ea87d0f8c4c30cad36f85488eb1f647e640b0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 Jun 2022 14:56:46 +0100 Subject: [PATCH 72/85] Fix inconsistencies in event validation for `m.room.create` events (#13087) * Extend the auth rule checks for `m.room.create` events ... and move them up to the top of the function. Since the no auth_events are allowed for m.room.create events, we may as well get the m.room.create event checks out of the way first. * Add a test for create events with prev_events --- changelog.d/13087.bugfix | 1 + synapse/event_auth.py | 67 ++++++++++++++++++++++++++-------------- tests/test_event_auth.py | 45 +++++++++++++++++++++++++-- 3 files changed, 88 insertions(+), 25 deletions(-) create mode 100644 changelog.d/13087.bugfix diff --git a/changelog.d/13087.bugfix b/changelog.d/13087.bugfix new file mode 100644 index 000000000..7c69801af --- /dev/null +++ b/changelog.d/13087.bugfix @@ -0,0 +1 @@ +Fix some inconsistencies in the event authentication code. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 360a50cc7..440b1ae41 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -141,6 +141,15 @@ async def check_state_independent_auth_rules( Raises: AuthError if the checks fail """ + # Implementation of https://spec.matrix.org/v1.2/rooms/v9/#authorization-rules + + # 1. If type is m.room.create: + if event.type == EventTypes.Create: + _check_create(event) + + # 1.5 Otherwise, allow + return + # Check the auth events. auth_events = await store.get_events( event.auth_event_ids(), @@ -180,29 +189,6 @@ async def check_state_independent_auth_rules( auth_dict[(auth_event.type, auth_event.state_key)] = auth_event_id - # Implementation of https://matrix.org/docs/spec/rooms/v1#authorization-rules - # - # 1. If type is m.room.create: - if event.type == EventTypes.Create: - # 1b. If the domain of the room_id does not match the domain of the sender, - # reject. - sender_domain = get_domain_from_id(event.sender) - room_id_domain = get_domain_from_id(event.room_id) - if room_id_domain != sender_domain: - raise AuthError( - 403, "Creation event's room_id domain does not match sender's" - ) - - # 1c. If content.room_version is present and is not a recognised version, reject - room_version_prop = event.content.get("room_version", "1") - if room_version_prop not in KNOWN_ROOM_VERSIONS: - raise AuthError( - 403, - "room appears to have unsupported version %s" % (room_version_prop,), - ) - - return - # 3. If event does not have a m.room.create in its auth_events, reject. creation_event = auth_dict.get((EventTypes.Create, ""), None) if not creation_event: @@ -324,6 +310,41 @@ def _check_size_limits(event: "EventBase") -> None: raise EventSizeError("event too large") +def _check_create(event: "EventBase") -> None: + """Implementation of the auth rules for m.room.create events + + Args: + event: The `m.room.create` event to be checked + + Raises: + AuthError if the event does not pass the auth rules + """ + assert event.type == EventTypes.Create + + # 1.1 If it has any previous events, reject. + if event.prev_event_ids(): + raise AuthError(403, "Create event has prev events") + + # 1.2 If the domain of the room_id does not match the domain of the sender, + # reject. + sender_domain = get_domain_from_id(event.sender) + room_id_domain = get_domain_from_id(event.room_id) + if room_id_domain != sender_domain: + raise AuthError(403, "Creation event's room_id domain does not match sender's") + + # 1.3 If content.room_version is present and is not a recognised version, reject + room_version_prop = event.content.get("room_version", "1") + if room_version_prop not in KNOWN_ROOM_VERSIONS: + raise AuthError( + 403, + "room appears to have unsupported version %s" % (room_version_prop,), + ) + + # 1.4 If content has no creator field, reject. + if EventContentFields.ROOM_CREATOR not in event.content: + raise AuthError(403, "Create event lacks a 'creator' property") + + def _can_federate(event: "EventBase", auth_events: StateMap["EventBase"]) -> bool: creation_event = auth_events.get((EventTypes.Create, "")) # There should always be a creation event, but if not don't federate. diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index e8e458cfd..ed7a3cbce 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -109,6 +109,47 @@ class EventAuthTestCase(unittest.TestCase): ) ) + def test_create_event_with_prev_events(self): + """A create event with prev_events should be rejected + + https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules + 1: If type is m.room.create: + 1. If it has any previous events, reject. + """ + creator = f"@creator:{TEST_DOMAIN}" + + # we make both a good event and a bad event, to check that we are rejecting + # the bad event for the reason we think we are. + good_event = make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + "type": "m.room.create", + "state_key": "", + "sender": creator, + "content": { + "creator": creator, + "room_version": RoomVersions.V9.identifier, + }, + "auth_events": [], + "prev_events": [], + }, + room_version=RoomVersions.V9, + ) + bad_event = make_event_from_dict( + {**good_event.get_dict(), "prev_events": ["$fakeevent"]}, + room_version=RoomVersions.V9, + ) + + event_store = _StubEventSourceStore() + + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, good_event) + ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, bad_event) + ) + def test_random_users_cannot_send_state_before_first_pl(self): """ Check that, before the first PL lands, the creator is the only user @@ -564,8 +605,8 @@ class EventAuthTestCase(unittest.TestCase): # helpers for making events - -TEST_ROOM_ID = "!test:room" +TEST_DOMAIN = "example.com" +TEST_ROOM_ID = f"!test_room:{TEST_DOMAIN}" def _create_event( From d4b1c0d800eaa83c4d56a9cf17881ad362b9194b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 Jun 2022 16:30:59 +0100 Subject: [PATCH 73/85] Fix inconsistencies in event validation (#13088) --- changelog.d/13088.bugfix | 1 + synapse/event_auth.py | 23 ++++++- tests/handlers/test_federation.py | 14 ++-- tests/handlers/test_federation_event.py | 1 - tests/test_event_auth.py | 86 +++++++++++++++++++++++++ 5 files changed, 118 insertions(+), 7 deletions(-) create mode 100644 changelog.d/13088.bugfix diff --git a/changelog.d/13088.bugfix b/changelog.d/13088.bugfix new file mode 100644 index 000000000..7c69801af --- /dev/null +++ b/changelog.d/13088.bugfix @@ -0,0 +1 @@ +Fix some inconsistencies in the event authentication code. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 440b1ae41..0fc2c4b27 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -150,7 +150,7 @@ async def check_state_independent_auth_rules( # 1.5 Otherwise, allow return - # Check the auth events. + # 2. Reject if event has auth_events that: ... auth_events = await store.get_events( event.auth_event_ids(), redact_behaviour=EventRedactBehaviour.as_is, @@ -158,6 +158,7 @@ async def check_state_independent_auth_rules( ) room_id = event.room_id auth_dict: MutableStateMap[str] = {} + expected_auth_types = auth_types_for_event(event.room_version, event) for auth_event_id in event.auth_event_ids(): auth_event = auth_events.get(auth_event_id) @@ -179,6 +180,24 @@ async def check_state_independent_auth_rules( % (event.event_id, room_id, auth_event_id, auth_event.room_id), ) + k = (auth_event.type, auth_event.state_key) + + # 2.1 ... have duplicate entries for a given type and state_key pair + if k in auth_dict: + raise AuthError( + 403, + f"Event {event.event_id} has duplicate auth_events for {k}: {auth_dict[k]} and {auth_event_id}", + ) + + # 2.2 ... have entries whose type and state_key don’t match those specified by + # the auth events selection algorithm described in the server + # specification. + if k not in expected_auth_types: + raise AuthError( + 403, + f"Event {event.event_id} has unexpected auth_event for {k}: {auth_event_id}", + ) + # We also need to check that the auth event itself is not rejected. if auth_event.rejected_reason: raise AuthError( @@ -187,7 +206,7 @@ async def check_state_independent_auth_rules( % (event.event_id, auth_event.event_id), ) - auth_dict[(auth_event.type, auth_event.state_key)] = auth_event_id + auth_dict[k] = auth_event_id # 3. If event does not have a m.room.create in its auth_events, reject. creation_event = auth_dict.get((EventTypes.Create, ""), None) diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 9afba7b0e..9b9c11fab 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -225,9 +225,10 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # we need a user on the remote server to be a member, so that we can send # extremity-causing events. + remote_server_user_id = f"@user:{self.OTHER_SERVER_NAME}" self.get_success( event_injection.inject_member_event( - self.hs, room_id, f"@user:{self.OTHER_SERVER_NAME}", "join" + self.hs, room_id, remote_server_user_id, "join" ) ) @@ -247,6 +248,12 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): # create more than is 5 which corresponds to the number of backward # extremities we slice off in `_maybe_backfill_inner` federation_event_handler = self.hs.get_federation_event_handler() + auth_events = [ + ev + for ev in current_state + if (ev.type, ev.state_key) + in {("m.room.create", ""), ("m.room.member", remote_server_user_id)} + ] for _ in range(0, 8): event = make_event_from_dict( self.add_hashes_and_signatures( @@ -258,15 +265,14 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase): "body": "message connected to fake event", }, "room_id": room_id, - "sender": f"@user:{self.OTHER_SERVER_NAME}", + "sender": remote_server_user_id, "prev_events": [ ev1.event_id, # We're creating an backward extremity each time thanks # to this fake event generate_fake_event_id(), ], - # lazy: *everything* is an auth event - "auth_events": [ev.event_id for ev in current_state], + "auth_events": [ev.event_id for ev in auth_events], "depth": ev1.depth + 1, }, room_version, diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 1a36c25c4..4b1a8f04d 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -98,7 +98,6 @@ class FederationEventHandlerTests(unittest.FederatingHomeserverTestCase): auth_event_ids = [ initial_state_map[("m.room.create", "")], initial_state_map[("m.room.power_levels", "")], - initial_state_map[("m.room.join_rules", "")], member_event.event_id, ] diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index ed7a3cbce..371cd201a 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -150,6 +150,92 @@ class EventAuthTestCase(unittest.TestCase): event_auth.check_state_independent_auth_rules(event_store, bad_event) ) + def test_duplicate_auth_events(self): + """Events with duplicate auth_events should be rejected + + https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules + 2. Reject if event has auth_events that: + 1. have duplicate entries for a given type and state_key pair + """ + creator = "@creator:example.com" + + create_event = _create_event(RoomVersions.V9, creator) + join_event1 = _join_event(RoomVersions.V9, creator) + pl_event = _power_levels_event( + RoomVersions.V9, + creator, + {"state_default": 30, "users": {"creator": 100}}, + ) + + # create a second join event, so that we can make a duplicate + join_event2 = _join_event(RoomVersions.V9, creator) + + event_store = _StubEventSourceStore() + event_store.add_events([create_event, join_event1, join_event2, pl_event]) + + good_event = _random_state_event( + RoomVersions.V9, creator, [create_event, join_event2, pl_event] + ) + bad_event = _random_state_event( + RoomVersions.V9, creator, [create_event, join_event1, join_event2, pl_event] + ) + # a variation: two instances of the *same* event + bad_event2 = _random_state_event( + RoomVersions.V9, creator, [create_event, join_event2, join_event2, pl_event] + ) + + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, good_event) + ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, bad_event) + ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, bad_event2) + ) + + def test_unexpected_auth_events(self): + """Events with excess auth_events should be rejected + + https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules + 2. Reject if event has auth_events that: + 2. have entries whose type and state_key don’t match those specified by the + auth events selection algorithm described in the server specification. + """ + creator = "@creator:example.com" + + create_event = _create_event(RoomVersions.V9, creator) + join_event = _join_event(RoomVersions.V9, creator) + pl_event = _power_levels_event( + RoomVersions.V9, + creator, + {"state_default": 30, "users": {"creator": 100}}, + ) + join_rules_event = _join_rules_event(RoomVersions.V9, creator, "public") + + event_store = _StubEventSourceStore() + event_store.add_events([create_event, join_event, pl_event, join_rules_event]) + + good_event = _random_state_event( + RoomVersions.V9, creator, [create_event, join_event, pl_event] + ) + # join rules should *not* be included in the auth events. + bad_event = _random_state_event( + RoomVersions.V9, + creator, + [create_event, join_event, pl_event, join_rules_event], + ) + + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, good_event) + ) + with self.assertRaises(AuthError): + get_awaitable_result( + event_auth.check_state_independent_auth_rules(event_store, bad_event) + ) + def test_random_users_cannot_send_state_before_first_pl(self): """ Check that, before the first PL lands, the creator is the only user From 3d94d07db39bf29f9742c95e19b52b8ffcf6baa7 Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 17 Jun 2022 10:47:38 -0700 Subject: [PATCH 74/85] Update opentracing docs to reference the configuration manual rather than the configuation file. (#13076) --- changelog.d/13076.doc | 1 + docs/opentracing.md | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/13076.doc diff --git a/changelog.d/13076.doc b/changelog.d/13076.doc new file mode 100644 index 000000000..75dc4630e --- /dev/null +++ b/changelog.d/13076.doc @@ -0,0 +1 @@ +Update OpenTracing docs to reference the configuration manual rather than the configuration file. diff --git a/docs/opentracing.md b/docs/opentracing.md index f91362f11..abb94b565 100644 --- a/docs/opentracing.md +++ b/docs/opentracing.md @@ -57,8 +57,9 @@ https://www.jaegertracing.io/docs/latest/getting-started. ## Enable OpenTracing in Synapse OpenTracing is not enabled by default. It must be enabled in the -homeserver config by uncommenting the config options under `opentracing` -as shown in the [sample config](./sample_config.yaml). For example: +homeserver config by adding the `opentracing` option to your config file. You can find +documentation about how to do this in the [config manual under the header 'Opentracing'](usage/configuration/config_documentation.md#opentracing). +See below for an example Opentracing configuration: ```yaml opentracing: From f33356e8f86f5271376467febfad0936e4e8a72d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 17 Jun 2022 19:07:04 +0100 Subject: [PATCH 75/85] Use caret (semver bounds) for matrix.org packages (#13082) --- .ci/scripts/test_old_deps.sh | 6 ++++-- changelog.d/13082.misc | 1 + poetry.lock | 2 +- pyproject.toml | 6 +++--- 4 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 changelog.d/13082.misc diff --git a/.ci/scripts/test_old_deps.sh b/.ci/scripts/test_old_deps.sh index 769ca4517..7d0625fa8 100755 --- a/.ci/scripts/test_old_deps.sh +++ b/.ci/scripts/test_old_deps.sh @@ -27,9 +27,10 @@ export VIRTUALENV_NO_DOWNLOAD=1 # Patch the project definitions in-place: # - Replace all lower and tilde bounds with exact bounds -# - Make the pyopenssl 17.0, which is the oldest version that works with -# a `cryptography` compiled against OpenSSL 1.1. +# - Replace all caret bounds---but not the one that defines the supported Python version! # - Delete all lines referring to psycopg2 --- so no testing of postgres support. +# - Use pyopenssl 17.0, which is the oldest version that works with +# a `cryptography` compiled against OpenSSL 1.1. # - Omit systemd: we're not logging to journal here. # TODO: also replace caret bounds, see https://python-poetry.org/docs/dependency-specification/#version-constraints @@ -40,6 +41,7 @@ export VIRTUALENV_NO_DOWNLOAD=1 sed -i \ -e "s/[~>]=/==/g" \ + -e '/^python = "^/!s/\^/==/g' \ -e "/psycopg2/d" \ -e 's/pyOpenSSL = "==16.0.0"/pyOpenSSL = "==17.0.0"/' \ -e '/systemd/d' \ diff --git a/changelog.d/13082.misc b/changelog.d/13082.misc new file mode 100644 index 000000000..1aa386dbf --- /dev/null +++ b/changelog.d/13082.misc @@ -0,0 +1 @@ +Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. diff --git a/poetry.lock b/poetry.lock index 849e8a7a9..49fbaab57 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1563,7 +1563,7 @@ url_preview = ["lxml"] [metadata] lock-version = "1.1" python-versions = "^3.7.1" -content-hash = "73882e279e0379482f2fc7414cb71addfd408ca48ad508ff8a02b0cb544762af" +content-hash = "e96625923122e29b6ea5964379828e321b6cede2b020fc32c6f86c09d86d1ae8" [metadata.files] attrs = [ diff --git a/pyproject.toml b/pyproject.toml index 44aa775c3..3a56c42c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -110,9 +110,9 @@ jsonschema = ">=3.0.0" frozendict = ">=1,!=2.1.2" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" -canonicaljson = ">=1.4.0" +canonicaljson = "^1.4.0" # we use the type definitions added in signedjson 1.1. -signedjson = ">=1.1.0" +signedjson = "^1.1.0" # validating SSL certs for IP addresses requires service_identity 18.1. service-identity = ">=18.1.0" # Twisted 18.9 introduces some logger improvements that the structured @@ -150,7 +150,7 @@ typing-extensions = ">=3.10.0.1" cryptography = ">=3.4.7" # ijson 3.1.4 fixes a bug with "." in property names ijson = ">=3.1.4" -matrix-common = "~=1.2.1" +matrix-common = "^1.2.1" # We need packaging.requirements.Requirement, added in 16.1. packaging = ">=16.1" # At the time of writing, we only use functions from the version `importlib.metadata` From d54909956ef616d976b3d9969be994df5b65030a Mon Sep 17 00:00:00 2001 From: santhoshivan23 <47689668+santhoshivan23@users.noreply.github.com> Date: Wed, 22 Jun 2022 20:02:18 +0530 Subject: [PATCH 76/85] validate room alias before interacting with the room directory (#13106) --- changelog.d/13106.bugfix | 1 + synapse/rest/client/directory.py | 6 ++++++ tests/rest/client/test_directory.py | 13 +++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 changelog.d/13106.bugfix diff --git a/changelog.d/13106.bugfix b/changelog.d/13106.bugfix new file mode 100644 index 000000000..0dc16bad0 --- /dev/null +++ b/changelog.d/13106.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. \ No newline at end of file diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index 9639d4fe2..d6c89cb16 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -46,6 +46,8 @@ class ClientDirectoryServer(RestServlet): self.auth = hs.get_auth() async def on_GET(self, request: Request, room_alias: str) -> Tuple[int, JsonDict]: + if not RoomAlias.is_valid(room_alias): + raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) res = await self.directory_handler.get_association(room_alias_obj) @@ -55,6 +57,8 @@ class ClientDirectoryServer(RestServlet): async def on_PUT( self, request: SynapseRequest, room_alias: str ) -> Tuple[int, JsonDict]: + if not RoomAlias.is_valid(room_alias): + raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) content = parse_json_object_from_request(request) @@ -89,6 +93,8 @@ class ClientDirectoryServer(RestServlet): async def on_DELETE( self, request: SynapseRequest, room_alias: str ) -> Tuple[int, JsonDict]: + if not RoomAlias.is_valid(room_alias): + raise SynapseError(400, "Room alias invalid", errcode=Codes.INVALID_PARAM) room_alias_obj = RoomAlias.from_string(room_alias) requester = await self.auth.get_user_by_req(request) diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index 67473a68d..16e7ef41b 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -215,6 +215,19 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, expected_code, channel.result) return alias + def test_invalid_alias(self) -> None: + alias = "#potato" + channel = self.make_request( + "GET", + f"/_matrix/client/r0/directory/room/{alias}", + access_token=self.user_tok, + ) + self.assertEqual(channel.code, HTTPStatus.BAD_REQUEST, channel.result) + self.assertIn("error", channel.json_body, channel.json_body) + self.assertEqual( + channel.json_body["errcode"], "M_INVALID_PARAM", channel.json_body + ) + def random_alias(self, length: int) -> str: return RoomAlias(random_string(length), self.hs.hostname).to_string() From 3ceaf1462d90281c31dc64d79fb35b0def30150a Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 27 Jun 2022 10:15:25 +0000 Subject: [PATCH 77/85] Remove docs for Delete Group Admin API (#13112) This API no longer exists. Signed-off-by: Aaron Raimist --- changelog.d/13112.doc | 1 + docs/SUMMARY.md | 1 - docs/admin_api/delete_group.md | 14 -------------- 3 files changed, 1 insertion(+), 15 deletions(-) create mode 100644 changelog.d/13112.doc delete mode 100644 docs/admin_api/delete_group.md diff --git a/changelog.d/13112.doc b/changelog.d/13112.doc new file mode 100644 index 000000000..4b99951c7 --- /dev/null +++ b/changelog.d/13112.doc @@ -0,0 +1 @@ +Remove documentation for the Delete Group Admin API which no longer exists. \ No newline at end of file diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d7cf2df11..b51c7a3cb 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -55,7 +55,6 @@ - [Admin API](usage/administration/admin_api/README.md) - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) - - [Delete Group](admin_api/delete_group.md) - [Event Reports](admin_api/event_reports.md) - [Media](admin_api/media_admin_api.md) - [Purge History](admin_api/purge_history_api.md) diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md deleted file mode 100644 index 73a96842a..000000000 --- a/docs/admin_api/delete_group.md +++ /dev/null @@ -1,14 +0,0 @@ -# Delete a local group - -This API lets a server admin delete a local group. Doing so will kick all -users out of the group so that their clients will correctly handle the group -being deleted. - -To use it, you will need to authenticate by providing an `access_token` -for a server admin: see [Admin API](../usage/administration/admin_api). - -The API is: - -``` -POST /_synapse/admin/v1/delete_group/ -``` From 3c5549e74ad37c07b8613729aa99117cbed81424 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 27 Jun 2022 11:43:20 +0100 Subject: [PATCH 78/85] Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. (#13054) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/13054.misc | 1 + docker/conf-workers/supervisord.conf.j2 | 14 -------- .../conf-workers/synapse.supervisord.conf.j2 | 30 ++++++++++++++++ docker/configure_workers_and_start.py | 36 +++++++------------ 4 files changed, 43 insertions(+), 38 deletions(-) create mode 100644 changelog.d/13054.misc create mode 100644 docker/conf-workers/synapse.supervisord.conf.j2 diff --git a/changelog.d/13054.misc b/changelog.d/13054.misc new file mode 100644 index 000000000..088055373 --- /dev/null +++ b/changelog.d/13054.misc @@ -0,0 +1 @@ +Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. \ No newline at end of file diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 index 7afab0513..086137494 100644 --- a/docker/conf-workers/supervisord.conf.j2 +++ b/docker/conf-workers/supervisord.conf.j2 @@ -31,17 +31,3 @@ autorestart=true # Redis can be disabled if the image is being used without workers autostart={{ enable_redis }} -[program:synapse_main] -command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml -priority=10 -# Log startup failures to supervisord's stdout/err -# Regular synapse logs will still go in the configured data directory -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 -autorestart=unexpected -exitcodes=0 - -# Additional process blocks -{{ worker_config }} diff --git a/docker/conf-workers/synapse.supervisord.conf.j2 b/docker/conf-workers/synapse.supervisord.conf.j2 new file mode 100644 index 000000000..644345049 --- /dev/null +++ b/docker/conf-workers/synapse.supervisord.conf.j2 @@ -0,0 +1,30 @@ +[program:synapse_main] +command=/usr/local/bin/prefix-log /usr/local/bin/python -m synapse.app.homeserver + --config-path="{{ main_config_path }}" + --config-path=/conf/workers/shared.yaml +priority=10 +# Log startup failures to supervisord's stdout/err +# Regular synapse logs will still go in the configured data directory +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +autorestart=unexpected +exitcodes=0 + + +{% for worker in workers %} +[program:synapse_{{ worker.name }}] +command=/usr/local/bin/prefix-log /usr/local/bin/python -m {{ worker.app }} + --config-path="{{ main_config_path }}" + --config-path=/conf/workers/shared.yaml + --config-path=/conf/workers/{{ worker.name }}.yaml +autorestart=unexpected +priority=500 +exitcodes=0 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 + +{% endfor %} diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 2a2c13f77..2134b648d 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -176,21 +176,6 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = { } # Templates for sections that may be inserted multiple times in config files -SUPERVISORD_PROCESS_CONFIG_BLOCK = """ -[program:synapse_{name}] -command=/usr/local/bin/prefix-log /usr/local/bin/python -m {app} \ - --config-path="{config_path}" \ - --config-path=/conf/workers/shared.yaml \ - --config-path=/conf/workers/{name}.yaml -autorestart=unexpected -priority=500 -exitcodes=0 -stdout_logfile=/dev/stdout -stdout_logfile_maxbytes=0 -stderr_logfile=/dev/stderr -stderr_logfile_maxbytes=0 -""" - NGINX_LOCATION_CONFIG_BLOCK = """ location ~* {endpoint} {{ proxy_pass {upstream}; @@ -353,13 +338,10 @@ def generate_worker_files( # This config file will be passed to all workers, included Synapse's main process. shared_config: Dict[str, Any] = {"listeners": listeners} - # The supervisord config. The contents of which will be inserted into the - # base supervisord jinja2 template. - # - # Supervisord will be in charge of running everything, from redis to nginx to Synapse - # and all of its worker processes. Load the config template, which defines a few - # services that are necessary to run. - supervisord_config = "" + # List of dicts that describe workers. + # We pass this to the Supervisor template later to generate the appropriate + # program blocks. + worker_descriptors: List[Dict[str, Any]] = [] # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the # ports of each worker. For example: @@ -437,7 +419,7 @@ def generate_worker_files( ) # Enable the worker in supervisord - supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config) + worker_descriptors.append(worker_config) # Add nginx location blocks for this worker's endpoints (if any are defined) for pattern in worker_config["endpoint_patterns"]: @@ -535,10 +517,16 @@ def generate_worker_files( "/conf/supervisord.conf.j2", "/etc/supervisor/supervisord.conf", main_config_path=config_path, - worker_config=supervisord_config, enable_redis=workers_in_use, ) + convert( + "/conf/synapse.supervisord.conf.j2", + "/etc/supervisor/conf.d/synapse.conf", + workers=worker_descriptors, + main_config_path=config_path, + ) + # healthcheck config convert( "/conf/healthcheck.sh.j2", From 9b683ea80f94de4249264cbf375523b987900c89 Mon Sep 17 00:00:00 2001 From: Robert Long Date: Mon, 27 Jun 2022 06:44:05 -0700 Subject: [PATCH 79/85] Add Cross-Origin-Resource-Policy header to thumbnail and download media endpoints (#12944) --- changelog.d/12944.misc | 1 + synapse/http/server.py | 11 +++++++++++ synapse/rest/media/v1/download_resource.py | 7 ++++++- synapse/rest/media/v1/thumbnail_resource.py | 7 ++++++- tests/rest/media/v1/test_media_storage.py | 20 ++++++++++++++++++++ 5 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 changelog.d/12944.misc diff --git a/changelog.d/12944.misc b/changelog.d/12944.misc new file mode 100644 index 000000000..bf27fe7e2 --- /dev/null +++ b/changelog.d/12944.misc @@ -0,0 +1 @@ +Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. \ No newline at end of file diff --git a/synapse/http/server.py b/synapse/http/server.py index e3dcc3f3d..cf2d6f904 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -928,6 +928,17 @@ def set_cors_headers(request: Request) -> None: ) +def set_corp_headers(request: Request) -> None: + """Set the CORP headers so that javascript running in a web browsers can + embed the resource returned from this request when their client requires + the `Cross-Origin-Embedder-Policy: require-corp` header. + + Args: + request: The http request to add the CORP header to. + """ + request.setHeader(b"Cross-Origin-Resource-Policy", b"cross-origin") + + def respond_with_html(request: Request, code: int, html: str) -> None: """ Wraps `respond_with_html_bytes` by first encoding HTML from a str to UTF-8 bytes. diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 6180fa575..048a04269 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -15,7 +15,11 @@ import logging from typing import TYPE_CHECKING -from synapse.http.server import DirectServeJsonResource, set_cors_headers +from synapse.http.server import ( + DirectServeJsonResource, + set_corp_headers, + set_cors_headers, +) from synapse.http.servlet import parse_boolean from synapse.http.site import SynapseRequest @@ -38,6 +42,7 @@ class DownloadResource(DirectServeJsonResource): async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) + set_corp_headers(request) request.setHeader( b"Content-Security-Policy", b"sandbox;" diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 53b156524..2295adfaa 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -18,7 +18,11 @@ import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from synapse.api.errors import SynapseError -from synapse.http.server import DirectServeJsonResource, set_cors_headers +from synapse.http.server import ( + DirectServeJsonResource, + set_corp_headers, + set_cors_headers, +) from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.media.v1.media_storage import MediaStorage @@ -58,6 +62,7 @@ class ThumbnailResource(DirectServeJsonResource): async def _async_render_GET(self, request: SynapseRequest) -> None: set_cors_headers(request) + set_corp_headers(request) server_name, media_id, _ = parse_media_id(request) width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 7204b2dfe..1c67e1ca9 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -481,6 +481,12 @@ class MediaRepoTests(unittest.HomeserverTestCase): if expected_found: self.assertEqual(channel.code, 200) + + self.assertEqual( + channel.headers.getRawHeaders(b"Cross-Origin-Resource-Policy"), + [b"cross-origin"], + ) + if expected_body is not None: self.assertEqual( channel.result["body"], expected_body, channel.result["body"] @@ -549,6 +555,20 @@ class MediaRepoTests(unittest.HomeserverTestCase): [b"noindex, nofollow, noarchive, noimageindex"], ) + def test_cross_origin_resource_policy_header(self) -> None: + """ + Test that the Cross-Origin-Resource-Policy header is set to "cross-origin" + allowing web clients to embed media from the downloads API. + """ + channel = self._req(b"inline; filename=out" + self.test_image.extension) + + headers = channel.headers + + self.assertEqual( + headers.getRawHeaders(b"Cross-Origin-Resource-Policy"), + [b"cross-origin"], + ) + class TestSpamChecker: """A spam checker module that rejects all media that includes the bytes From 1017f09c18b2ae6e350df1e7755ae480fd180853 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0imon=20Brandner?= Date: Mon, 27 Jun 2022 21:28:34 +0200 Subject: [PATCH 80/85] Update MSC3786 implementation: Check the `state_key` (#12939) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Šimon Brandner --- changelog.d/12939.bugfix | 1 + synapse/push/baserules.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/12939.bugfix diff --git a/changelog.d/12939.bugfix b/changelog.d/12939.bugfix new file mode 100644 index 000000000..d9061cf8e --- /dev/null +++ b/changelog.d/12939.bugfix @@ -0,0 +1 @@ +Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 819bc9e9b..6c0cc5a6c 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -290,7 +290,13 @@ BASE_APPEND_OVERRIDE_RULES: List[Dict[str, Any]] = [ "key": "type", "pattern": "m.room.server_acl", "_cache_key": "_room_server_acl", - } + }, + { + "kind": "event_match", + "key": "state_key", + "pattern": "", + "_cache_key": "_room_server_acl_state_key", + }, ], "actions": [], }, From 6b99a66fe0260682fa95a0b19d3bee19c1e48876 Mon Sep 17 00:00:00 2001 From: santhoshivan23 <47689668+santhoshivan23@users.noreply.github.com> Date: Tue, 28 Jun 2022 16:52:59 +0530 Subject: [PATCH 81/85] Remove unspecced DELETE endpoint that modifies room visibility (#13123) --- changelog.d/13123.removal | 1 + synapse/rest/client/directory.py | 11 ----------- 2 files changed, 1 insertion(+), 11 deletions(-) create mode 100644 changelog.d/13123.removal diff --git a/changelog.d/13123.removal b/changelog.d/13123.removal new file mode 100644 index 000000000..f013f1616 --- /dev/null +++ b/changelog.d/13123.removal @@ -0,0 +1 @@ +Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. \ No newline at end of file diff --git a/synapse/rest/client/directory.py b/synapse/rest/client/directory.py index d6c89cb16..bc1b18c92 100644 --- a/synapse/rest/client/directory.py +++ b/synapse/rest/client/directory.py @@ -151,17 +151,6 @@ class ClientDirectoryListServer(RestServlet): return 200, {} - async def on_DELETE( - self, request: SynapseRequest, room_id: str - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - - await self.directory_handler.edit_published_room_list( - requester, room_id, "private" - ) - - return 200, {} - class ClientAppserviceDirectoryListServer(RestServlet): PATTERNS = client_patterns( From f1145563f662653e451525032b043d1a58998b6d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 28 Jun 2022 14:12:17 +0200 Subject: [PATCH 82/85] Extra type annotations in `test_server` (#13124) --- changelog.d/13124.misc | 1 + mypy.ini | 3 ++ tests/test_server.py | 81 +++++++++++++++++++++++------------------- 3 files changed, 48 insertions(+), 37 deletions(-) create mode 100644 changelog.d/13124.misc diff --git a/changelog.d/13124.misc b/changelog.d/13124.misc new file mode 100644 index 000000000..513078f8d --- /dev/null +++ b/changelog.d/13124.misc @@ -0,0 +1 @@ +Add type annotations to `tests.test_server`. diff --git a/mypy.ini b/mypy.ini index c5130feae..4b08f45c6 100644 --- a/mypy.ini +++ b/mypy.ini @@ -113,6 +113,9 @@ disallow_untyped_defs = False [mypy-tests.handlers.test_user_directory] disallow_untyped_defs = True +[mypy-tests.test_server] +disallow_untyped_defs = True + [mypy-tests.state.test_profile] disallow_untyped_defs = True diff --git a/tests/test_server.py b/tests/test_server.py index 847432f79..fc4bce899 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -14,7 +14,7 @@ import re from http import HTTPStatus -from typing import Tuple +from typing import Awaitable, Callable, Dict, NoReturn, Optional, Tuple from twisted.internet.defer import Deferred from twisted.web.resource import Resource @@ -36,6 +36,7 @@ from synapse.util import Clock from tests import unittest from tests.http.server._base import test_disconnect from tests.server import ( + FakeChannel, FakeSite, ThreadedMemoryReactorClock, make_request, @@ -44,7 +45,7 @@ from tests.server import ( class JsonResourceTests(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() self.hs_clock = Clock(self.reactor) self.homeserver = setup_test_homeserver( @@ -54,7 +55,7 @@ class JsonResourceTests(unittest.TestCase): reactor=self.reactor, ) - def test_handler_for_request(self): + def test_handler_for_request(self) -> None: """ JsonResource.handler_for_request gives correctly decoded URL args to the callback, while Twisted will give the raw bytes of URL query @@ -62,7 +63,9 @@ class JsonResourceTests(unittest.TestCase): """ got_kwargs = {} - def _callback(request, **kwargs): + def _callback( + request: SynapseRequest, **kwargs: object + ) -> Tuple[int, Dict[str, object]]: got_kwargs.update(kwargs) return 200, kwargs @@ -83,13 +86,13 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(got_kwargs, {"room_id": "\N{SNOWMAN}"}) - def test_callback_direct_exception(self): + def test_callback_direct_exception(self) -> None: """ If the web callback raises an uncaught exception, it will be translated into a 500. """ - def _callback(request, **kwargs): + def _callback(request: SynapseRequest, **kwargs: object) -> NoReturn: raise Exception("boo") res = JsonResource(self.homeserver) @@ -103,17 +106,17 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.result["code"], b"500") - def test_callback_indirect_exception(self): + def test_callback_indirect_exception(self) -> None: """ If the web callback raises an uncaught exception in a Deferred, it will be translated into a 500. """ - def _throw(*args): + def _throw(*args: object) -> NoReturn: raise Exception("boo") - def _callback(request, **kwargs): - d = Deferred() + def _callback(request: SynapseRequest, **kwargs: object) -> "Deferred[None]": + d: "Deferred[None]" = Deferred() d.addCallback(_throw) self.reactor.callLater(0.5, d.callback, True) return make_deferred_yieldable(d) @@ -129,13 +132,13 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.result["code"], b"500") - def test_callback_synapseerror(self): + def test_callback_synapseerror(self) -> None: """ If the web callback raises a SynapseError, it returns the appropriate status code and message set in it. """ - def _callback(request, **kwargs): + def _callback(request: SynapseRequest, **kwargs: object) -> NoReturn: raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN) res = JsonResource(self.homeserver) @@ -151,12 +154,12 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.json_body["error"], "Forbidden!!one!") self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") - def test_no_handler(self): + def test_no_handler(self) -> None: """ If there is no handler to process the request, Synapse will return 400. """ - def _callback(request, **kwargs): + def _callback(request: SynapseRequest, **kwargs: object) -> None: """ Not ever actually called! """ @@ -175,14 +178,16 @@ class JsonResourceTests(unittest.TestCase): self.assertEqual(channel.json_body["error"], "Unrecognized request") self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") - def test_head_request(self): + def test_head_request(self) -> None: """ JsonResource.handler_for_request gives correctly decoded URL args to the callback, while Twisted will give the raw bytes of URL query arguments. """ - def _callback(request, **kwargs): + def _callback( + request: SynapseRequest, **kwargs: object + ) -> Tuple[int, Dict[str, object]]: return 200, {"result": True} res = JsonResource(self.homeserver) @@ -203,20 +208,21 @@ class JsonResourceTests(unittest.TestCase): class OptionsResourceTests(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() class DummyResource(Resource): isLeaf = True - def render(self, request): - return request.path + def render(self, request: SynapseRequest) -> bytes: + # Type-ignore: mypy thinks request.path is Optional[Any], not bytes. + return request.path # type: ignore[return-value] # Setup a resource with some children. self.resource = OptionsResource() self.resource.putChild(b"res", DummyResource()) - def _make_request(self, method, path): + def _make_request(self, method: bytes, path: bytes) -> FakeChannel: """Create a request from the method/path and return a channel with the response.""" # Create a site and query for the resource. site = SynapseSite( @@ -233,7 +239,7 @@ class OptionsResourceTests(unittest.TestCase): channel = make_request(self.reactor, site, method, path, shorthand=False) return channel - def test_unknown_options_request(self): + def test_unknown_options_request(self) -> None: """An OPTIONS requests to an unknown URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/foo/") self.assertEqual(channel.result["code"], b"204") @@ -253,7 +259,7 @@ class OptionsResourceTests(unittest.TestCase): "has CORS Headers header", ) - def test_known_options_request(self): + def test_known_options_request(self) -> None: """An OPTIONS requests to an known URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/res/") self.assertEqual(channel.result["code"], b"204") @@ -273,12 +279,12 @@ class OptionsResourceTests(unittest.TestCase): "has CORS Headers header", ) - def test_unknown_request(self): + def test_unknown_request(self) -> None: """A non-OPTIONS request to an unknown URL should 404.""" channel = self._make_request(b"GET", b"/foo/") self.assertEqual(channel.result["code"], b"404") - def test_known_request(self): + def test_known_request(self) -> None: """A non-OPTIONS request to an known URL should query the proper resource.""" channel = self._make_request(b"GET", b"/res/") self.assertEqual(channel.result["code"], b"200") @@ -287,16 +293,17 @@ class OptionsResourceTests(unittest.TestCase): class WrapHtmlRequestHandlerTests(unittest.TestCase): class TestResource(DirectServeHtmlResource): - callback = None + callback: Optional[Callable[..., Awaitable[None]]] - async def _async_render_GET(self, request): + async def _async_render_GET(self, request: SynapseRequest) -> None: + assert self.callback is not None await self.callback(request) - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() - def test_good_response(self): - async def callback(request): + def test_good_response(self) -> None: + async def callback(request: SynapseRequest) -> None: request.write(b"response") request.finish() @@ -311,13 +318,13 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): body = channel.result["body"] self.assertEqual(body, b"response") - def test_redirect_exception(self): + def test_redirect_exception(self) -> None: """ If the callback raises a RedirectException, it is turned into a 30x with the right location. """ - async def callback(request, **kwargs): + async def callback(request: SynapseRequest, **kwargs: object) -> None: raise RedirectException(b"/look/an/eagle", 301) res = WrapHtmlRequestHandlerTests.TestResource() @@ -332,13 +339,13 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): location_headers = [v for k, v in headers if k == b"Location"] self.assertEqual(location_headers, [b"/look/an/eagle"]) - def test_redirect_exception_with_cookie(self): + def test_redirect_exception_with_cookie(self) -> None: """ If the callback raises a RedirectException which sets a cookie, that is returned too """ - async def callback(request, **kwargs): + async def callback(request: SynapseRequest, **kwargs: object) -> NoReturn: e = RedirectException(b"/no/over/there", 304) e.cookies.append(b"session=yespls") raise e @@ -357,10 +364,10 @@ class WrapHtmlRequestHandlerTests(unittest.TestCase): cookies_headers = [v for k, v in headers if k == b"Set-Cookie"] self.assertEqual(cookies_headers, [b"session=yespls"]) - def test_head_request(self): + def test_head_request(self) -> None: """A head request should work by being turned into a GET request.""" - async def callback(request): + async def callback(request: SynapseRequest) -> None: request.write(b"response") request.finish() @@ -410,7 +417,7 @@ class CancellableDirectServeHtmlResource(DirectServeHtmlResource): class DirectServeJsonResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeJsonResource` cancellation.""" - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() self.clock = Clock(self.reactor) self.resource = CancellableDirectServeJsonResource(self.clock) @@ -444,7 +451,7 @@ class DirectServeJsonResourceCancellationTests(unittest.TestCase): class DirectServeHtmlResourceCancellationTests(unittest.TestCase): """Tests for `DirectServeHtmlResource` cancellation.""" - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() self.clock = Clock(self.reactor) self.resource = CancellableDirectServeHtmlResource(self.clock) From 7469824d5838577f5a07aec6ab73b457459d8b4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Jun 2022 13:13:44 +0100 Subject: [PATCH 83/85] Fix serialization errors when rotating notifications (#13118) --- changelog.d/13118.misc | 1 + .../databases/main/event_push_actions.py | 201 ++++++++++++------ synapse/storage/databases/main/receipts.py | 13 +- .../delta/72/01event_push_summary_receipt.sql | 35 +++ tests/storage/test_event_push_actions.py | 35 ++- 5 files changed, 202 insertions(+), 83 deletions(-) create mode 100644 changelog.d/13118.misc create mode 100644 synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql diff --git a/changelog.d/13118.misc b/changelog.d/13118.misc new file mode 100644 index 000000000..3bb51962e --- /dev/null +++ b/changelog.d/13118.misc @@ -0,0 +1 @@ +Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 10a796238..80ca2fd0b 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -233,14 +233,30 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas counts = NotifCounts() - # First we pull the counts from the summary table + # First we pull the counts from the summary table. + # + # We check that `last_receipt_stream_ordering` matches the stream + # ordering given. If it doesn't match then a new read receipt has arrived and + # we haven't yet updated the counts in `event_push_summary` to reflect + # that; in that case we simply ignore `event_push_summary` counts + # and do a manual count of all of the rows in the `event_push_actions` table + # for this user/room. + # + # If `last_receipt_stream_ordering` is null then that means it's up to + # date (as the row was written by an older version of Synapse that + # updated `event_push_summary` synchronously when persisting a new read + # receipt). txn.execute( """ SELECT stream_ordering, notif_count, COALESCE(unread_count, 0) FROM event_push_summary - WHERE room_id = ? AND user_id = ? AND stream_ordering > ? + WHERE room_id = ? AND user_id = ? + AND ( + (last_receipt_stream_ordering IS NULL AND stream_ordering > ?) + OR last_receipt_stream_ordering = ? + ) """, - (room_id, user_id, stream_ordering), + (room_id, user_id, stream_ordering, stream_ordering), ) row = txn.fetchone() @@ -263,9 +279,9 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas if row: counts.highlight_count += row[0] - # Finally we need to count push actions that haven't been summarized - # yet. - # We only want to pull out push actions that we haven't summarized yet. + # Finally we need to count push actions that aren't included in the + # summary returned above, e.g. recent events that haven't been + # summarized yet, or the summary is empty due to a recent read receipt. stream_ordering = max(stream_ordering, summary_stream_ordering) notify_count, unread_count = self._get_notif_unread_count_for_user_room( txn, room_id, user_id, stream_ordering @@ -800,6 +816,19 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas self._doing_notif_rotation = True try: + # First we recalculate push summaries and delete stale push actions + # for rooms/users with new receipts. + while True: + logger.debug("Handling new receipts") + + caught_up = await self.db_pool.runInteraction( + "_handle_new_receipts_for_notifs_txn", + self._handle_new_receipts_for_notifs_txn, + ) + if caught_up: + break + + # Then we update the event push summaries for any new events while True: logger.info("Rotating notifications") @@ -810,10 +839,110 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas break await self.hs.get_clock().sleep(self._rotate_delay) + # Finally we clear out old event push actions. await self._remove_old_push_actions_that_have_rotated() finally: self._doing_notif_rotation = False + def _handle_new_receipts_for_notifs_txn(self, txn: LoggingTransaction) -> bool: + """Check for new read receipts and delete from event push actions. + + Any push actions which predate the user's most recent read receipt are + now redundant, so we can remove them from `event_push_actions` and + update `event_push_summary`. + """ + + limit = 100 + + min_stream_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_push_summary_last_receipt_stream_id", + keyvalues={}, + retcol="stream_id", + ) + + sql = """ + SELECT r.stream_id, r.room_id, r.user_id, e.stream_ordering + FROM receipts_linearized AS r + INNER JOIN events AS e USING (event_id) + WHERE r.stream_id > ? AND user_id LIKE ? + ORDER BY r.stream_id ASC + LIMIT ? + """ + + # We only want local users, so we add a dodgy filter to the above query + # and recheck it below. + user_filter = "%:" + self.hs.hostname + + txn.execute( + sql, + ( + min_stream_id, + user_filter, + limit, + ), + ) + rows = txn.fetchall() + + # For each new read receipt we delete push actions from before it and + # recalculate the summary. + for _, room_id, user_id, stream_ordering in rows: + # Only handle our own read receipts. + if not self.hs.is_mine_id(user_id): + continue + + txn.execute( + """ + DELETE FROM event_push_actions + WHERE room_id = ? + AND user_id = ? + AND stream_ordering <= ? + AND highlight = 0 + """, + (room_id, user_id, stream_ordering), + ) + + old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( + txn, + table="event_push_summary_stream_ordering", + keyvalues={}, + retcol="stream_ordering", + ) + + notif_count, unread_count = self._get_notif_unread_count_for_user_room( + txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering + ) + + self.db_pool.simple_upsert_txn( + txn, + table="event_push_summary", + keyvalues={"room_id": room_id, "user_id": user_id}, + values={ + "notif_count": notif_count, + "unread_count": unread_count, + "stream_ordering": old_rotate_stream_ordering, + "last_receipt_stream_ordering": stream_ordering, + }, + ) + + # We always update `event_push_summary_last_receipt_stream_id` to + # ensure that we don't rescan the same receipts for remote users. + # + # This requires repeatable read to be safe, as we need the + # `MAX(stream_id)` to not include any new rows that have been committed + # since the start of the transaction (since those rows won't have been + # returned by the query above). Alternatively we could query the max + # stream ID at the start of the transaction and bound everything by + # that. + txn.execute( + """ + UPDATE event_push_summary_last_receipt_stream_id + SET stream_id = (SELECT COALESCE(MAX(stream_id), 0) FROM receipts_linearized) + """ + ) + + return len(rows) < limit + def _rotate_notifs_txn(self, txn: LoggingTransaction) -> bool: """Archives older notifications into event_push_summary. Returns whether the archiving process has caught up or not. @@ -1033,66 +1162,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, EventsWorkerStore, SQLBas if done: break - def _remove_old_push_actions_before_txn( - self, txn: LoggingTransaction, room_id: str, user_id: str, stream_ordering: int - ) -> None: - """ - Purges old push actions for a user and room before a given - stream_ordering. - - We however keep a months worth of highlighted notifications, so that - users can still get a list of recent highlights. - - Args: - txn: The transaction - room_id: Room ID to delete from - user_id: user ID to delete for - stream_ordering: The lowest stream ordering which will - not be deleted. - """ - txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate, - (room_id, user_id), - ) - - # We need to join on the events table to get the received_ts for - # event_push_actions and sqlite won't let us use a join in a delete so - # we can't just delete where received_ts < x. Furthermore we can - # only identify event_push_actions by a tuple of room_id, event_id - # we we can't use a subquery. - # Instead, we look up the stream ordering for the last event in that - # room received before the threshold time and delete event_push_actions - # in the room with a stream_odering before that. - txn.execute( - "DELETE FROM event_push_actions " - " WHERE user_id = ? AND room_id = ? AND " - " stream_ordering <= ?" - " AND ((stream_ordering < ? AND highlight = 1) or highlight = 0)", - (user_id, room_id, stream_ordering, self.stream_ordering_month_ago), - ) - - old_rotate_stream_ordering = self.db_pool.simple_select_one_onecol_txn( - txn, - table="event_push_summary_stream_ordering", - keyvalues={}, - retcol="stream_ordering", - ) - - notif_count, unread_count = self._get_notif_unread_count_for_user_room( - txn, room_id, user_id, stream_ordering, old_rotate_stream_ordering - ) - - self.db_pool.simple_upsert_txn( - txn, - table="event_push_summary", - keyvalues={"room_id": room_id, "user_id": user_id}, - values={ - "notif_count": notif_count, - "unread_count": unread_count, - "stream_ordering": old_rotate_stream_ordering, - }, - ) - class EventPushActionsStore(EventPushActionsWorkerStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index bec6d6057..0090c9f22 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -26,7 +26,7 @@ from typing import ( cast, ) -from synapse.api.constants import EduTypes, ReceiptTypes +from synapse.api.constants import EduTypes from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker from synapse.replication.tcp.streams import ReceiptsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -682,17 +682,6 @@ class ReceiptsWorkerStore(SQLBaseStore): lock=False, ) - # When updating a local users read receipt, remove any push actions - # which resulted from the receipt's event and all earlier events. - if ( - self.hs.is_mine_id(user_id) - and receipt_type in (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE) - and stream_ordering is not None - ): - self._remove_old_push_actions_before_txn( # type: ignore[attr-defined] - txn, room_id=room_id, user_id=user_id, stream_ordering=stream_ordering - ) - return rx_ts def _graph_to_linear( diff --git a/synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql b/synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql new file mode 100644 index 000000000..e45db6152 --- /dev/null +++ b/synapse/storage/schema/main/delta/72/01event_push_summary_receipt.sql @@ -0,0 +1,35 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a column that records the position of the read receipt for the user at +-- the time we summarised the push actions. This is used to check if the counts +-- are up to date after a new read receipt has been sent. +-- +-- Null means that we can skip that check, as the row was written by an older +-- version of Synapse that updated `event_push_summary` synchronously when +-- persisting a new read receipt +ALTER TABLE event_push_summary ADD COLUMN last_receipt_stream_ordering BIGINT; + + +-- Tracks which new receipts we've handled +CREATE TABLE event_push_summary_last_receipt_stream_id ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +INSERT INTO event_push_summary_last_receipt_stream_id (stream_id) + SELECT COALESCE(MAX(stream_id), 0) + FROM receipts_linearized; diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 2ac5f6db5..ef069a811 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -55,7 +55,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): def test_count_aggregation(self) -> None: room_id = "!foo:example.com" - user_id = "@user1235:example.com" + user_id = "@user1235:test" last_read_stream_ordering = [0] @@ -81,11 +81,26 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): def _inject_actions(stream: int, action: list) -> None: event = Mock() event.room_id = room_id - event.event_id = "$test:example.com" + event.event_id = f"$test{stream}:example.com" event.internal_metadata.stream_ordering = stream event.internal_metadata.is_outlier.return_value = False event.depth = stream + self.get_success( + self.store.db_pool.simple_insert( + table="events", + values={ + "stream_ordering": stream, + "topological_ordering": stream, + "type": "m.room.message", + "room_id": room_id, + "processed": True, + "outlier": False, + "event_id": event.event_id, + }, + ) + ) + self.get_success( self.store.add_push_actions_to_staging( event.event_id, @@ -105,18 +120,28 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): def _rotate(stream: int) -> None: self.get_success( self.store.db_pool.runInteraction( - "", self.store._rotate_notifs_before_txn, stream + "rotate-receipts", self.store._handle_new_receipts_for_notifs_txn + ) + ) + + self.get_success( + self.store.db_pool.runInteraction( + "rotate-notifs", self.store._rotate_notifs_before_txn, stream ) ) def _mark_read(stream: int, depth: int) -> None: last_read_stream_ordering[0] = stream + self.get_success( self.store.db_pool.runInteraction( "", - self.store._remove_old_push_actions_before_txn, + self.store._insert_linearized_receipt_txn, room_id, + "m.read", user_id, + f"$test{stream}:example.com", + {}, stream, ) ) @@ -150,7 +175,7 @@ class EventPushActionsStoreTestCase(HomeserverTestCase): _assert_counts(1, 0) - _mark_read(7, 7) + _mark_read(6, 6) _assert_counts(0, 0) _inject_actions(8, HIGHLIGHT) From b210146fd97c58c29ee4dacab2f964e7b9b33c46 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 28 Jun 2022 16:36:08 +0100 Subject: [PATCH 84/85] 1.62.0rc1 --- CHANGES.md | 95 +++++++++++++++++++++++++++++++++++++++ changelog.d/12674.misc | 1 - changelog.d/12737.doc | 1 - changelog.d/12738.misc | 1 - changelog.d/12857.feature | 1 - changelog.d/12881.misc | 1 - changelog.d/12893.misc | 1 - changelog.d/12929.misc | 1 - changelog.d/12939.bugfix | 1 - changelog.d/12941.misc | 1 - changelog.d/12944.misc | 1 - changelog.d/12954.misc | 1 - changelog.d/12957.misc | 1 - changelog.d/12963.misc | 1 - changelog.d/12965.misc | 1 - changelog.d/12969.misc | 1 - changelog.d/12970.misc | 1 - changelog.d/12973.bugfix | 1 - changelog.d/12979.bugfix | 1 - changelog.d/12982.misc | 1 - changelog.d/12984.misc | 1 - changelog.d/12985.misc | 1 - changelog.d/12986.misc | 1 - changelog.d/12990.misc | 1 - changelog.d/12991.bugfix | 2 - changelog.d/13004.misc | 1 - changelog.d/13005.misc | 1 - changelog.d/13011.misc | 1 - changelog.d/13013.misc | 1 - changelog.d/13017.misc | 1 - changelog.d/13018.bugfix | 1 - changelog.d/13021.misc | 1 - changelog.d/13022.doc | 1 - changelog.d/13023.doc | 1 - changelog.d/13025.misc | 1 - changelog.d/13034.misc | 1 - changelog.d/13035.feature | 1 - changelog.d/13036.feature | 1 - changelog.d/13041.bugfix | 2 - changelog.d/13042.misc | 1 - changelog.d/13045.feature | 1 - changelog.d/13046.misc | 1 - changelog.d/13047.feature | 1 - changelog.d/13048.misc | 1 - changelog.d/13050.misc | 1 - changelog.d/13052.misc | 1 - changelog.d/13054.misc | 1 - changelog.d/13055.misc | 1 - changelog.d/13056.feature | 1 - changelog.d/13057.misc | 1 - changelog.d/13058.misc | 1 - changelog.d/13060.misc | 1 - changelog.d/13061.misc | 1 - changelog.d/13062.misc | 1 - changelog.d/13063.misc | 1 - changelog.d/13065.misc | 1 - changelog.d/13069.misc | 1 - changelog.d/13070.misc | 1 - changelog.d/13071.misc | 1 - changelog.d/13073.doc | 1 - changelog.d/13074.misc | 1 - changelog.d/13075.misc | 1 - changelog.d/13076.doc | 1 - changelog.d/13082.misc | 1 - changelog.d/13085.misc | 1 - changelog.d/13087.bugfix | 1 - changelog.d/13088.bugfix | 1 - changelog.d/13089.misc | 1 - changelog.d/13093.misc | 1 - changelog.d/13095.doc | 1 - changelog.d/13096.misc | 1 - changelog.d/13098.feature | 1 - changelog.d/13099.misc | 1 - changelog.d/13106.bugfix | 1 - changelog.d/13112.doc | 1 - changelog.d/13118.misc | 1 - changelog.d/13123.removal | 1 - changelog.d/13124.misc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- 80 files changed, 102 insertions(+), 80 deletions(-) delete mode 100644 changelog.d/12674.misc delete mode 100644 changelog.d/12737.doc delete mode 100644 changelog.d/12738.misc delete mode 100644 changelog.d/12857.feature delete mode 100644 changelog.d/12881.misc delete mode 100644 changelog.d/12893.misc delete mode 100644 changelog.d/12929.misc delete mode 100644 changelog.d/12939.bugfix delete mode 100644 changelog.d/12941.misc delete mode 100644 changelog.d/12944.misc delete mode 100644 changelog.d/12954.misc delete mode 100644 changelog.d/12957.misc delete mode 100644 changelog.d/12963.misc delete mode 100644 changelog.d/12965.misc delete mode 100644 changelog.d/12969.misc delete mode 100644 changelog.d/12970.misc delete mode 100644 changelog.d/12973.bugfix delete mode 100644 changelog.d/12979.bugfix delete mode 100644 changelog.d/12982.misc delete mode 100644 changelog.d/12984.misc delete mode 100644 changelog.d/12985.misc delete mode 100644 changelog.d/12986.misc delete mode 100644 changelog.d/12990.misc delete mode 100644 changelog.d/12991.bugfix delete mode 100644 changelog.d/13004.misc delete mode 100644 changelog.d/13005.misc delete mode 100644 changelog.d/13011.misc delete mode 100644 changelog.d/13013.misc delete mode 100644 changelog.d/13017.misc delete mode 100644 changelog.d/13018.bugfix delete mode 100644 changelog.d/13021.misc delete mode 100644 changelog.d/13022.doc delete mode 100644 changelog.d/13023.doc delete mode 100644 changelog.d/13025.misc delete mode 100644 changelog.d/13034.misc delete mode 100644 changelog.d/13035.feature delete mode 100644 changelog.d/13036.feature delete mode 100644 changelog.d/13041.bugfix delete mode 100644 changelog.d/13042.misc delete mode 100644 changelog.d/13045.feature delete mode 100644 changelog.d/13046.misc delete mode 100644 changelog.d/13047.feature delete mode 100644 changelog.d/13048.misc delete mode 100644 changelog.d/13050.misc delete mode 100644 changelog.d/13052.misc delete mode 100644 changelog.d/13054.misc delete mode 100644 changelog.d/13055.misc delete mode 100644 changelog.d/13056.feature delete mode 100644 changelog.d/13057.misc delete mode 100644 changelog.d/13058.misc delete mode 100644 changelog.d/13060.misc delete mode 100644 changelog.d/13061.misc delete mode 100644 changelog.d/13062.misc delete mode 100644 changelog.d/13063.misc delete mode 100644 changelog.d/13065.misc delete mode 100644 changelog.d/13069.misc delete mode 100644 changelog.d/13070.misc delete mode 100644 changelog.d/13071.misc delete mode 100644 changelog.d/13073.doc delete mode 100644 changelog.d/13074.misc delete mode 100644 changelog.d/13075.misc delete mode 100644 changelog.d/13076.doc delete mode 100644 changelog.d/13082.misc delete mode 100644 changelog.d/13085.misc delete mode 100644 changelog.d/13087.bugfix delete mode 100644 changelog.d/13088.bugfix delete mode 100644 changelog.d/13089.misc delete mode 100644 changelog.d/13093.misc delete mode 100644 changelog.d/13095.doc delete mode 100644 changelog.d/13096.misc delete mode 100644 changelog.d/13098.feature delete mode 100644 changelog.d/13099.misc delete mode 100644 changelog.d/13106.bugfix delete mode 100644 changelog.d/13112.doc delete mode 100644 changelog.d/13118.misc delete mode 100644 changelog.d/13123.removal delete mode 100644 changelog.d/13124.misc diff --git a/CHANGES.md b/CHANGES.md index 0db01d409..4c1decf8f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,98 @@ +Synapse 1.62.0rc1 (2022-06-28) +============================== + +Features +-------- + +- Port the spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. ([\#12857](https://github.com/matrix-org/synapse/issues/12857), [\#13047](https://github.com/matrix-org/synapse/issues/13047)) +- Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. ([\#13035](https://github.com/matrix-org/synapse/issues/13035)) +- Add metrics measuring the CPU and DB time spent in state resolution. ([\#13036](https://github.com/matrix-org/synapse/issues/13036)) +- Speed up fetching of device list changes in `/sync` and `/keys/changes`. ([\#13045](https://github.com/matrix-org/synapse/issues/13045), [\#13098](https://github.com/matrix-org/synapse/issues/13098)) +- Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. ([\#13056](https://github.com/matrix-org/synapse/issues/13056)) + + +Bugfixes +-------- + +- Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. ([\#12939](https://github.com/matrix-org/synapse/issues/12939)) +- Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. ([\#12973](https://github.com/matrix-org/synapse/issues/12973)) +- Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. ([\#12979](https://github.com/matrix-org/synapse/issues/12979)) +- Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced + in Synapse v1.41.0. ([\#12991](https://github.com/matrix-org/synapse/issues/12991)) +- Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. ([\#13018](https://github.com/matrix-org/synapse/issues/13018)) +- Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. ([\#13041](https://github.com/matrix-org/synapse/issues/13041)) +- Fix some inconsistencies in the event authentication code. ([\#13087](https://github.com/matrix-org/synapse/issues/13087), [\#13088](https://github.com/matrix-org/synapse/issues/13088)) +- Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. ([\#13106](https://github.com/matrix-org/synapse/issues/13106)) + + +Improved Documentation +---------------------- + +- Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. ([\#12737](https://github.com/matrix-org/synapse/issues/12737)) +- Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. ([\#13022](https://github.com/matrix-org/synapse/issues/13022)) +- Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023)) +- Add instructions for running Complement with `gotestfmt`-formatted output locally. ([\#13073](https://github.com/matrix-org/synapse/issues/13073)) +- Update OpenTracing docs to reference the configuration manual rather than the configuration file. ([\#13076](https://github.com/matrix-org/synapse/issues/13076)) +- Update information on downstream Debian packages. ([\#13095](https://github.com/matrix-org/synapse/issues/13095)) +- Remove documentation for the Delete Group Admin API which no longer exists. ([\#13112](https://github.com/matrix-org/synapse/issues/13112)) + + +Deprecations and Removals +------------------------- + +- Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. ([\#13123](https://github.com/matrix-org/synapse/issues/13123)) + + +Internal Changes +---------------- + +- Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. ([\#12674](https://github.com/matrix-org/synapse/issues/12674)) +- Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. ([\#12738](https://github.com/matrix-org/synapse/issues/12738)) +- Merge the Complement testing Docker images into a single, multi-purpose image. ([\#12881](https://github.com/matrix-org/synapse/issues/12881), [\#13075](https://github.com/matrix-org/synapse/issues/13075)) +- Simplify the database schema for `event_edges`. ([\#12893](https://github.com/matrix-org/synapse/issues/12893)) +- Clean up the test code for client disconnection. ([\#12929](https://github.com/matrix-org/synapse/issues/12929)) +- Remove code generating comments in configuration. ([\#12941](https://github.com/matrix-org/synapse/issues/12941)) +- Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. ([\#12944](https://github.com/matrix-org/synapse/issues/12944)) +- Replace noop background updates with `DELETE` delta. ([\#12954](https://github.com/matrix-org/synapse/issues/12954), [\#13050](https://github.com/matrix-org/synapse/issues/13050)) +- Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. ([\#12957](https://github.com/matrix-org/synapse/issues/12957)) +- Reduce the amount of state we pull from the DB. ([\#12963](https://github.com/matrix-org/synapse/issues/12963)) +- Enable testing against PostgreSQL databases in Complement CI. ([\#12965](https://github.com/matrix-org/synapse/issues/12965), [\#13034](https://github.com/matrix-org/synapse/issues/13034)) +- Fix an inaccurate comment. ([\#12969](https://github.com/matrix-org/synapse/issues/12969)) +- Remove the `delete_device` method and always call `delete_devices`. ([\#12970](https://github.com/matrix-org/synapse/issues/12970)) +- Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. ([\#12982](https://github.com/matrix-org/synapse/issues/12982)) +- Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. ([\#12984](https://github.com/matrix-org/synapse/issues/12984)) +- Add type hints to tests. ([\#12985](https://github.com/matrix-org/synapse/issues/12985), [\#13099](https://github.com/matrix-org/synapse/issues/13099)) +- Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. ([\#12986](https://github.com/matrix-org/synapse/issues/12986)) +- Fix documentation for running complement tests. ([\#12990](https://github.com/matrix-org/synapse/issues/12990)) +- Faster joins: add issue links to the TODO comments in the code. ([\#13004](https://github.com/matrix-org/synapse/issues/13004)) +- Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. ([\#13005](https://github.com/matrix-org/synapse/issues/13005), [\#13096](https://github.com/matrix-org/synapse/issues/13096), [\#13118](https://github.com/matrix-org/synapse/issues/13118)) +- Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. ([\#13011](https://github.com/matrix-org/synapse/issues/13011)) +- Modernize the `contrib/graph/` scripts. ([\#13013](https://github.com/matrix-org/synapse/issues/13013)) +- Remove redundant `room_version` parameters from event auth functions. ([\#13017](https://github.com/matrix-org/synapse/issues/13017)) +- Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. ([\#13021](https://github.com/matrix-org/synapse/issues/13021)) +- Add type annotations to `synapse.storage.databases.main.devices`. ([\#13025](https://github.com/matrix-org/synapse/issues/13025)) +- Set default `sync_response_cache_duration` to two minutes. ([\#13042](https://github.com/matrix-org/synapse/issues/13042)) +- Rename CI test runs. ([\#13046](https://github.com/matrix-org/synapse/issues/13046)) +- Increase timeout of complement CI test runs. ([\#13048](https://github.com/matrix-org/synapse/issues/13048)) +- Refactor entry points so that they all have a `main` function. ([\#13052](https://github.com/matrix-org/synapse/issues/13052)) +- Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. ([\#13054](https://github.com/matrix-org/synapse/issues/13054)) +- Add headers to individual options in config documentation to allow for linking. ([\#13055](https://github.com/matrix-org/synapse/issues/13055)) +- Make Complement CI logs easier to read. ([\#13057](https://github.com/matrix-org/synapse/issues/13057), [\#13058](https://github.com/matrix-org/synapse/issues/13058), [\#13069](https://github.com/matrix-org/synapse/issues/13069)) +- Don't instantiate modules with keyword arguments. ([\#13060](https://github.com/matrix-org/synapse/issues/13060)) +- Fix type checking errors against Twisted trunk. ([\#13061](https://github.com/matrix-org/synapse/issues/13061)) +- Allow MSC3030 `timestamp_to_event` calls from anyone on world-readable rooms. ([\#13062](https://github.com/matrix-org/synapse/issues/13062)) +- Add a CI job to check that schema deltas are in the correct folder. ([\#13063](https://github.com/matrix-org/synapse/issues/13063)) +- Avoid rechecking event auth rules which are independent of room state. ([\#13065](https://github.com/matrix-org/synapse/issues/13065)) +- Reduce the duplication of code that invokes the rate limiter. ([\#13070](https://github.com/matrix-org/synapse/issues/13070)) +- Add a Subject Alternative Name to the certificate generated for Complement tests. ([\#13071](https://github.com/matrix-org/synapse/issues/13071)) +- Add more tests for room upgrades. ([\#13074](https://github.com/matrix-org/synapse/issues/13074)) +- Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. ([\#13082](https://github.com/matrix-org/synapse/issues/13082)) +- Correctly report prometheus DB stats for `get_earliest_token_for_stats`. ([\#13085](https://github.com/matrix-org/synapse/issues/13085)) +- Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. ([\#13089](https://github.com/matrix-org/synapse/issues/13089)) +- Simplify the alias deletion logic as an application service. ([\#13093](https://github.com/matrix-org/synapse/issues/13093)) +- Add type annotations to `tests.test_server`. ([\#13124](https://github.com/matrix-org/synapse/issues/13124)) + + Synapse 1.61.1 (2022-06-28) =========================== diff --git a/changelog.d/12674.misc b/changelog.d/12674.misc deleted file mode 100644 index c8a8f32f0..000000000 --- a/changelog.d/12674.misc +++ /dev/null @@ -1 +0,0 @@ -Add tests for cancellation of `GET /rooms/$room_id/members` and `GET /rooms/$room_id/state` requests. diff --git a/changelog.d/12737.doc b/changelog.d/12737.doc deleted file mode 100644 index ab2d1f2fd..000000000 --- a/changelog.d/12737.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation for how to configure Synapse with Workers using Docker Compose. Includes example worker config and docker-compose.yaml. Contributed by @Thumbscrew. \ No newline at end of file diff --git a/changelog.d/12738.misc b/changelog.d/12738.misc deleted file mode 100644 index 825222347..000000000 --- a/changelog.d/12738.misc +++ /dev/null @@ -1 +0,0 @@ -Report login failures due to unknown third party identifiers in the same way as failures due to invalid passwords. This prevents an attacker from using the error response to determine if the identifier exists. Contributed by Daniel Aloni. \ No newline at end of file diff --git a/changelog.d/12857.feature b/changelog.d/12857.feature deleted file mode 100644 index ddd1dbe68..000000000 --- a/changelog.d/12857.feature +++ /dev/null @@ -1 +0,0 @@ -Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/changelog.d/12881.misc b/changelog.d/12881.misc deleted file mode 100644 index 8a83182bd..000000000 --- a/changelog.d/12881.misc +++ /dev/null @@ -1 +0,0 @@ -Merge the Complement testing Docker images into a single, multi-purpose image. \ No newline at end of file diff --git a/changelog.d/12893.misc b/changelog.d/12893.misc deleted file mode 100644 index 570521030..000000000 --- a/changelog.d/12893.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the database schema for `event_edges`. diff --git a/changelog.d/12929.misc b/changelog.d/12929.misc deleted file mode 100644 index 20718d258..000000000 --- a/changelog.d/12929.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up the test code for client disconnection. diff --git a/changelog.d/12939.bugfix b/changelog.d/12939.bugfix deleted file mode 100644 index d9061cf8e..000000000 --- a/changelog.d/12939.bugfix +++ /dev/null @@ -1 +0,0 @@ -Update [MSC3786](https://github.com/matrix-org/matrix-spec-proposals/pull/3786) implementation to check `state_key`. diff --git a/changelog.d/12941.misc b/changelog.d/12941.misc deleted file mode 100644 index 6a74f255d..000000000 --- a/changelog.d/12941.misc +++ /dev/null @@ -1 +0,0 @@ -Remove code generating comments in configuration. diff --git a/changelog.d/12944.misc b/changelog.d/12944.misc deleted file mode 100644 index bf27fe7e2..000000000 --- a/changelog.d/12944.misc +++ /dev/null @@ -1 +0,0 @@ -Add `Cross-Origin-Resource-Policy: cross-origin` header to content repository's thumbnail and download endpoints. \ No newline at end of file diff --git a/changelog.d/12954.misc b/changelog.d/12954.misc deleted file mode 100644 index 20bf13673..000000000 --- a/changelog.d/12954.misc +++ /dev/null @@ -1 +0,0 @@ -Replace noop background updates with `DELETE` delta. diff --git a/changelog.d/12957.misc b/changelog.d/12957.misc deleted file mode 100644 index 0c075276e..000000000 --- a/changelog.d/12957.misc +++ /dev/null @@ -1 +0,0 @@ -Use lower isolation level when inserting read receipts to avoid serialization errors. Contributed by Nick @ Beeper. diff --git a/changelog.d/12963.misc b/changelog.d/12963.misc deleted file mode 100644 index d57e1aca6..000000000 --- a/changelog.d/12963.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the amount of state we pull from the DB. diff --git a/changelog.d/12965.misc b/changelog.d/12965.misc deleted file mode 100644 index cc2823e12..000000000 --- a/changelog.d/12965.misc +++ /dev/null @@ -1 +0,0 @@ -Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file diff --git a/changelog.d/12969.misc b/changelog.d/12969.misc deleted file mode 100644 index 05de7ce83..000000000 --- a/changelog.d/12969.misc +++ /dev/null @@ -1 +0,0 @@ -Fix an inaccurate comment. diff --git a/changelog.d/12970.misc b/changelog.d/12970.misc deleted file mode 100644 index 8f874aa07..000000000 --- a/changelog.d/12970.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the `delete_device` method and always call `delete_devices`. diff --git a/changelog.d/12973.bugfix b/changelog.d/12973.bugfix deleted file mode 100644 index 1bf45854f..000000000 --- a/changelog.d/12973.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.58 where Synapse would not report full version information when installed from a git checkout. This is a best-effort affair and not guaranteed to be stable. diff --git a/changelog.d/12979.bugfix b/changelog.d/12979.bugfix deleted file mode 100644 index 6b5440802..000000000 --- a/changelog.d/12979.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.60 where Synapse would fail to start if the `sqlite3` module was not available. diff --git a/changelog.d/12982.misc b/changelog.d/12982.misc deleted file mode 100644 index 036b69efe..000000000 --- a/changelog.d/12982.misc +++ /dev/null @@ -1 +0,0 @@ -Use a GitHub form for issues rather than a hard-to-read, easy-to-ignore template. \ No newline at end of file diff --git a/changelog.d/12984.misc b/changelog.d/12984.misc deleted file mode 100644 index a90201718..000000000 --- a/changelog.d/12984.misc +++ /dev/null @@ -1 +0,0 @@ -Move [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) behind an experimental config flag. diff --git a/changelog.d/12985.misc b/changelog.d/12985.misc deleted file mode 100644 index 7f6492d58..000000000 --- a/changelog.d/12985.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests. diff --git a/changelog.d/12986.misc b/changelog.d/12986.misc deleted file mode 100644 index 937b88802..000000000 --- a/changelog.d/12986.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor macaroon tokens generation and move the unsubscribe link in notification emails to `/_synapse/client/unsubscribe`. diff --git a/changelog.d/12990.misc b/changelog.d/12990.misc deleted file mode 100644 index c68f6a731..000000000 --- a/changelog.d/12990.misc +++ /dev/null @@ -1 +0,0 @@ -Fix documentation for running complement tests. diff --git a/changelog.d/12991.bugfix b/changelog.d/12991.bugfix deleted file mode 100644 index c6e388d5b..000000000 --- a/changelog.d/12991.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug where non-standard information was required when requesting the `/hierarchy` API over federation. Introduced -in Synapse v1.41.0. diff --git a/changelog.d/13004.misc b/changelog.d/13004.misc deleted file mode 100644 index d8e93d87a..000000000 --- a/changelog.d/13004.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: add issue links to the TODO comments in the code. diff --git a/changelog.d/13005.misc b/changelog.d/13005.misc deleted file mode 100644 index 3bb51962e..000000000 --- a/changelog.d/13005.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/changelog.d/13011.misc b/changelog.d/13011.misc deleted file mode 100644 index 4da223219..000000000 --- a/changelog.d/13011.misc +++ /dev/null @@ -1 +0,0 @@ -Replaced usage of PyJWT with methods from Authlib in `org.matrix.login.jwt`. Contributed by Hannes Lerchl. diff --git a/changelog.d/13013.misc b/changelog.d/13013.misc deleted file mode 100644 index 903c6a3c8..000000000 --- a/changelog.d/13013.misc +++ /dev/null @@ -1 +0,0 @@ -Modernize the `contrib/graph/` scripts. diff --git a/changelog.d/13017.misc b/changelog.d/13017.misc deleted file mode 100644 index b314687f9..000000000 --- a/changelog.d/13017.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `room_version` parameters from event auth functions. diff --git a/changelog.d/13018.bugfix b/changelog.d/13018.bugfix deleted file mode 100644 index a84657f04..000000000 --- a/changelog.d/13018.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which meant that rate limiting was not restrictive enough in some cases. \ No newline at end of file diff --git a/changelog.d/13021.misc b/changelog.d/13021.misc deleted file mode 100644 index 84c41cdf5..000000000 --- a/changelog.d/13021.misc +++ /dev/null @@ -1 +0,0 @@ -Decouple `synapse.api.auth_blocking.AuthBlocking` from `synapse.api.auth.Auth`. diff --git a/changelog.d/13022.doc b/changelog.d/13022.doc deleted file mode 100644 index 4d6ac7ae9..000000000 --- a/changelog.d/13022.doc +++ /dev/null @@ -1 +0,0 @@ -Ensure the [Poetry cheat sheet](https://matrix-org.github.io/synapse/develop/development/dependencies.html) is available in the online documentation. diff --git a/changelog.d/13023.doc b/changelog.d/13023.doc deleted file mode 100644 index 5589c7492..000000000 --- a/changelog.d/13023.doc +++ /dev/null @@ -1 +0,0 @@ -Mention removed community/group worker endpoints in upgrade.md. Contributed by @olmari. \ No newline at end of file diff --git a/changelog.d/13025.misc b/changelog.d/13025.misc deleted file mode 100644 index 7cb0d174b..000000000 --- a/changelog.d/13025.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `synapse.storage.databases.main.devices`. diff --git a/changelog.d/13034.misc b/changelog.d/13034.misc deleted file mode 100644 index cc2823e12..000000000 --- a/changelog.d/13034.misc +++ /dev/null @@ -1 +0,0 @@ -Enable testing against PostgreSQL databases in Complement CI. \ No newline at end of file diff --git a/changelog.d/13035.feature b/changelog.d/13035.feature deleted file mode 100644 index cfca3ab4b..000000000 --- a/changelog.d/13035.feature +++ /dev/null @@ -1 +0,0 @@ -Allow server admins to customise the response of the `/.well-known/matrix/client` endpoint. diff --git a/changelog.d/13036.feature b/changelog.d/13036.feature deleted file mode 100644 index 71e5a29fe..000000000 --- a/changelog.d/13036.feature +++ /dev/null @@ -1 +0,0 @@ -Add metrics measuring the CPU and DB time spent in state resolution. diff --git a/changelog.d/13041.bugfix b/changelog.d/13041.bugfix deleted file mode 100644 index edb1635eb..000000000 --- a/changelog.d/13041.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug introduced in Synapse 1.58 where profile requests for a malformed user ID would ccause an internal error. Synapse now returns 400 Bad Request in this situation. - diff --git a/changelog.d/13042.misc b/changelog.d/13042.misc deleted file mode 100644 index 745d5fcf8..000000000 --- a/changelog.d/13042.misc +++ /dev/null @@ -1 +0,0 @@ -Set default `sync_response_cache_duration` to two minutes. diff --git a/changelog.d/13045.feature b/changelog.d/13045.feature deleted file mode 100644 index 7b0667ba9..000000000 --- a/changelog.d/13045.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up fetching of device list changes in `/sync` and `/keys/changes`. diff --git a/changelog.d/13046.misc b/changelog.d/13046.misc deleted file mode 100644 index 1248c34d3..000000000 --- a/changelog.d/13046.misc +++ /dev/null @@ -1 +0,0 @@ -Rename CI test runs. diff --git a/changelog.d/13047.feature b/changelog.d/13047.feature deleted file mode 100644 index ddd1dbe68..000000000 --- a/changelog.d/13047.feature +++ /dev/null @@ -1 +0,0 @@ -Port spam-checker API callbacks to a new, richer API. This is part of an ongoing change to let spam-checker modules inform users of the reason their event or operation is rejected. diff --git a/changelog.d/13048.misc b/changelog.d/13048.misc deleted file mode 100644 index 073c8b1a9..000000000 --- a/changelog.d/13048.misc +++ /dev/null @@ -1 +0,0 @@ -Increase timeout of complement CI test runs. diff --git a/changelog.d/13050.misc b/changelog.d/13050.misc deleted file mode 100644 index 20bf13673..000000000 --- a/changelog.d/13050.misc +++ /dev/null @@ -1 +0,0 @@ -Replace noop background updates with `DELETE` delta. diff --git a/changelog.d/13052.misc b/changelog.d/13052.misc deleted file mode 100644 index 0d11dfb12..000000000 --- a/changelog.d/13052.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor entry points so that they all have a `main` function. \ No newline at end of file diff --git a/changelog.d/13054.misc b/changelog.d/13054.misc deleted file mode 100644 index 088055373..000000000 --- a/changelog.d/13054.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the Dockerfile-workers configuration script to use Jinja2 templates in Synapse workers' Supervisord blocks. \ No newline at end of file diff --git a/changelog.d/13055.misc b/changelog.d/13055.misc deleted file mode 100644 index 92a02a608..000000000 --- a/changelog.d/13055.misc +++ /dev/null @@ -1 +0,0 @@ -Add headers to individual options in config documentation to allow for linking. diff --git a/changelog.d/13056.feature b/changelog.d/13056.feature deleted file mode 100644 index 219e2f6c1..000000000 --- a/changelog.d/13056.feature +++ /dev/null @@ -1 +0,0 @@ -Improve URL previews for sites which only provide Twitter Card metadata, e.g. LWN.net. diff --git a/changelog.d/13057.misc b/changelog.d/13057.misc deleted file mode 100644 index 4102bf96b..000000000 --- a/changelog.d/13057.misc +++ /dev/null @@ -1 +0,0 @@ -Make Complement CI logs easier to read. \ No newline at end of file diff --git a/changelog.d/13058.misc b/changelog.d/13058.misc deleted file mode 100644 index 4102bf96b..000000000 --- a/changelog.d/13058.misc +++ /dev/null @@ -1 +0,0 @@ -Make Complement CI logs easier to read. \ No newline at end of file diff --git a/changelog.d/13060.misc b/changelog.d/13060.misc deleted file mode 100644 index c2376701f..000000000 --- a/changelog.d/13060.misc +++ /dev/null @@ -1 +0,0 @@ -Don't instantiate modules with keyword arguments. diff --git a/changelog.d/13061.misc b/changelog.d/13061.misc deleted file mode 100644 index 4c55e2b4e..000000000 --- a/changelog.d/13061.misc +++ /dev/null @@ -1 +0,0 @@ -Fix type checking errors against Twisted trunk. diff --git a/changelog.d/13062.misc b/changelog.d/13062.misc deleted file mode 100644 index d425e9a9a..000000000 --- a/changelog.d/13062.misc +++ /dev/null @@ -1 +0,0 @@ -Allow MSC3030 'timestamp_to_event' calls from anyone on world-readable rooms. diff --git a/changelog.d/13063.misc b/changelog.d/13063.misc deleted file mode 100644 index 167d6d2cd..000000000 --- a/changelog.d/13063.misc +++ /dev/null @@ -1 +0,0 @@ -Add a CI job to check that schema deltas are in the correct folder. diff --git a/changelog.d/13065.misc b/changelog.d/13065.misc deleted file mode 100644 index e9e8a7659..000000000 --- a/changelog.d/13065.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid rechecking event auth rules which are independent of room state. diff --git a/changelog.d/13069.misc b/changelog.d/13069.misc deleted file mode 100644 index 4102bf96b..000000000 --- a/changelog.d/13069.misc +++ /dev/null @@ -1 +0,0 @@ -Make Complement CI logs easier to read. \ No newline at end of file diff --git a/changelog.d/13070.misc b/changelog.d/13070.misc deleted file mode 100644 index ce1f14342..000000000 --- a/changelog.d/13070.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the duplication of code that invokes the rate limiter. \ No newline at end of file diff --git a/changelog.d/13071.misc b/changelog.d/13071.misc deleted file mode 100644 index a6e1e6b3a..000000000 --- a/changelog.d/13071.misc +++ /dev/null @@ -1 +0,0 @@ -Add a Subject Alternative Name to the certificate generated for Complement tests. \ No newline at end of file diff --git a/changelog.d/13073.doc b/changelog.d/13073.doc deleted file mode 100644 index e162a8404..000000000 --- a/changelog.d/13073.doc +++ /dev/null @@ -1 +0,0 @@ -Add instructions for running Complement with `gotestfmt`-formatted output locally. \ No newline at end of file diff --git a/changelog.d/13074.misc b/changelog.d/13074.misc deleted file mode 100644 index a502e44d9..000000000 --- a/changelog.d/13074.misc +++ /dev/null @@ -1 +0,0 @@ -Add more tests for room upgrades. diff --git a/changelog.d/13075.misc b/changelog.d/13075.misc deleted file mode 100644 index 2311629f7..000000000 --- a/changelog.d/13075.misc +++ /dev/null @@ -1 +0,0 @@ -Merge the Complement testing Docker images into a single, multi-purpose image. diff --git a/changelog.d/13076.doc b/changelog.d/13076.doc deleted file mode 100644 index 75dc4630e..000000000 --- a/changelog.d/13076.doc +++ /dev/null @@ -1 +0,0 @@ -Update OpenTracing docs to reference the configuration manual rather than the configuration file. diff --git a/changelog.d/13082.misc b/changelog.d/13082.misc deleted file mode 100644 index 1aa386dbf..000000000 --- a/changelog.d/13082.misc +++ /dev/null @@ -1 +0,0 @@ -Pin dependencies maintained by matrix.org to [semantic version](https://semver.org/) bounds. diff --git a/changelog.d/13085.misc b/changelog.d/13085.misc deleted file mode 100644 index 2401d4f38..000000000 --- a/changelog.d/13085.misc +++ /dev/null @@ -1 +0,0 @@ -Correctly report prometheus DB stats for `get_earliest_token_for_stats`. diff --git a/changelog.d/13087.bugfix b/changelog.d/13087.bugfix deleted file mode 100644 index 7c69801af..000000000 --- a/changelog.d/13087.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some inconsistencies in the event authentication code. diff --git a/changelog.d/13088.bugfix b/changelog.d/13088.bugfix deleted file mode 100644 index 7c69801af..000000000 --- a/changelog.d/13088.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some inconsistencies in the event authentication code. diff --git a/changelog.d/13089.misc b/changelog.d/13089.misc deleted file mode 100644 index 5868507cb..000000000 --- a/changelog.d/13089.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where a finished logging context would be re-started when Synapse failed to persist an event from federation. diff --git a/changelog.d/13093.misc b/changelog.d/13093.misc deleted file mode 100644 index 2547c87fa..000000000 --- a/changelog.d/13093.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the alias deletion logic as an application service. diff --git a/changelog.d/13095.doc b/changelog.d/13095.doc deleted file mode 100644 index 4651f25e1..000000000 --- a/changelog.d/13095.doc +++ /dev/null @@ -1 +0,0 @@ -Update information on downstream Debian packages. diff --git a/changelog.d/13096.misc b/changelog.d/13096.misc deleted file mode 100644 index 3bb51962e..000000000 --- a/changelog.d/13096.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/changelog.d/13098.feature b/changelog.d/13098.feature deleted file mode 100644 index 7b0667ba9..000000000 --- a/changelog.d/13098.feature +++ /dev/null @@ -1 +0,0 @@ -Speed up fetching of device list changes in `/sync` and `/keys/changes`. diff --git a/changelog.d/13099.misc b/changelog.d/13099.misc deleted file mode 100644 index 7f6492d58..000000000 --- a/changelog.d/13099.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to tests. diff --git a/changelog.d/13106.bugfix b/changelog.d/13106.bugfix deleted file mode 100644 index 0dc16bad0..000000000 --- a/changelog.d/13106.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where room directory requests would cause an internal server error if given a malformed room alias. \ No newline at end of file diff --git a/changelog.d/13112.doc b/changelog.d/13112.doc deleted file mode 100644 index 4b99951c7..000000000 --- a/changelog.d/13112.doc +++ /dev/null @@ -1 +0,0 @@ -Remove documentation for the Delete Group Admin API which no longer exists. \ No newline at end of file diff --git a/changelog.d/13118.misc b/changelog.d/13118.misc deleted file mode 100644 index 3bb51962e..000000000 --- a/changelog.d/13118.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce DB usage of `/sync` when a large number of unread messages have recently been sent in a room. diff --git a/changelog.d/13123.removal b/changelog.d/13123.removal deleted file mode 100644 index f013f1616..000000000 --- a/changelog.d/13123.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the unspecced `DELETE /directory/list/room/{roomId}` endpoint, which hid rooms from the [public room directory](https://spec.matrix.org/v1.3/client-server-api/#listing-rooms). Instead, `PUT` to the same URL with a visibility of `"private"`. \ No newline at end of file diff --git a/changelog.d/13124.misc b/changelog.d/13124.misc deleted file mode 100644 index 513078f8d..000000000 --- a/changelog.d/13124.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `tests.test_server`. diff --git a/debian/changelog b/debian/changelog index 2ca565a15..7fbd9baef 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.62.0~rc1) stable; urgency=medium + + * New Synapse release 1.62.0rc1. + + -- Synapse Packaging team Tue, 28 Jun 2022 16:34:57 +0100 + matrix-synapse-py3 (1.61.1) stable; urgency=medium * New Synapse release 1.61.1. diff --git a/pyproject.toml b/pyproject.toml index df44ee314..8b66d3a9e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ skip_gitignore = true [tool.poetry] name = "matrix-synapse" -version = "1.61.1" +version = "1.62.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From bc9b0912cc147713c42e850fbfbb4ee396c8c839 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 28 Jun 2022 16:47:04 +0100 Subject: [PATCH 85/85] fix linting error from the 1.61.1 main -> develop merge --- synapse/rest/media/v1/preview_html.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/rest/media/v1/preview_html.py index afe4e2975..516d0434f 100644 --- a/synapse/rest/media/v1/preview_html.py +++ b/synapse/rest/media/v1/preview_html.py @@ -20,7 +20,8 @@ from typing import ( Dict, Generator, Iterable, - List, Optional, + List, + Optional, Set, Union, )