0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-06-01 18:28:56 +02:00

Move experimental & retention config out of the server module. (#11070)

This commit is contained in:
Patrick Cloke 2021-10-15 10:30:48 -04:00 committed by GitHub
parent 6a67f3786a
commit 5573133348
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 290 additions and 255 deletions

1
changelog.d/11070.misc Normal file
View file

@ -0,0 +1 @@
Create a separate module for the retention configuration.

View file

@ -472,6 +472,48 @@ limit_remote_rooms:
#
#user_ips_max_age: 14d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
@ -541,47 +583,6 @@ retention:
# - shortest_max_lifetime: 3d
# interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
## TLS ##

View file

@ -26,6 +26,7 @@ from synapse.config import (
redis,
registration,
repository,
retention,
room_directory,
saml2,
server,
@ -91,6 +92,7 @@ class RootConfig:
modules: modules.ModulesConfig
caches: cache.CacheConfig
federation: federation.FederationConfig
retention: retention.RetentionConfig
config_classes: List = ...
def __init__(self) -> None: ...

View file

@ -24,6 +24,9 @@ class ExperimentalConfig(Config):
def read_config(self, config: JsonDict, **kwargs):
experimental = config.get("experimental_features") or {}
# Whether to enable experimental MSC1849 (aka relations) support
self.msc1849_enabled = config.get("experimental_msc1849_support_enabled", True)
# MSC3026 (busy presence state)
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)

View file

@ -38,6 +38,7 @@ from .ratelimiting import RatelimitConfig
from .redis import RedisConfig
from .registration import RegistrationConfig
from .repository import ContentRepositoryConfig
from .retention import RetentionConfig
from .room import RoomConfig
from .room_directory import RoomDirectoryConfig
from .saml2 import SAML2Config
@ -59,6 +60,7 @@ class HomeServerConfig(RootConfig):
config_classes = [
ModulesConfig,
ServerConfig,
RetentionConfig,
TlsConfig,
FederationConfig,
CacheConfig,

226
synapse/config/retention.py Normal file
View file

@ -0,0 +1,226 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Optional
import attr
from synapse.config._base import Config, ConfigError
logger = logging.getLogger(__name__)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RetentionPurgeJob:
"""Object describing the configuration of the manhole"""
interval: int
shortest_max_lifetime: Optional[int]
longest_max_lifetime: Optional[int]
class RetentionConfig(Config):
section = "retention"
def read_config(self, config, **kwargs):
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs: List[RetentionPurgeJob] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
RetentionPurgeJob(interval, shortest_max_lifetime, longest_max_lifetime)
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
RetentionPurgeJob(self.parse_duration("1d"), None, None)
]
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """\
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, and the state of a room contains a
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
# to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a more frequent basis than for the rest of the rooms
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
# If any purge job is configured, it is strongly recommended to have at least
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
# set, or one job without 'shortest_max_lifetime' and one job without
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
# room's policy to these values is done after the policies are retrieved from
# Synapse's database (which is done using the range specified in a purge job's
# configuration).
#
#purge_jobs:
# - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
# interval: 1d
"""

View file

@ -225,15 +225,6 @@ class ManholeConfig:
pub_key: Optional[Key]
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RetentionConfig:
"""Object describing the configuration of the manhole"""
interval: int
shortest_max_lifetime: Optional[int]
longest_max_lifetime: Optional[int]
@attr.s(frozen=True)
class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@ -376,11 +367,6 @@ class ServerConfig(Config):
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Whether to enable experimental MSC1849 (aka relations) support
self.experimental_msc1849_support_enabled = config.get(
"experimental_msc1849_support_enabled", True
)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
@ -466,124 +452,6 @@ class ServerConfig(Config):
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
retention_config = config.get("retention")
if retention_config is None:
retention_config = {}
self.retention_enabled = retention_config.get("enabled", False)
retention_default_policy = retention_config.get("default_policy")
if retention_default_policy is not None:
self.retention_default_min_lifetime = retention_default_policy.get(
"min_lifetime"
)
if self.retention_default_min_lifetime is not None:
self.retention_default_min_lifetime = self.parse_duration(
self.retention_default_min_lifetime
)
self.retention_default_max_lifetime = retention_default_policy.get(
"max_lifetime"
)
if self.retention_default_max_lifetime is not None:
self.retention_default_max_lifetime = self.parse_duration(
self.retention_default_max_lifetime
)
if (
self.retention_default_min_lifetime is not None
and self.retention_default_max_lifetime is not None
and (
self.retention_default_min_lifetime
> self.retention_default_max_lifetime
)
):
raise ConfigError(
"The default retention policy's 'min_lifetime' can not be greater"
" than its 'max_lifetime'"
)
else:
self.retention_default_min_lifetime = None
self.retention_default_max_lifetime = None
if self.retention_enabled:
logger.info(
"Message retention policies support enabled with the following default"
" policy: min_lifetime = %s ; max_lifetime = %s",
self.retention_default_min_lifetime,
self.retention_default_max_lifetime,
)
self.retention_allowed_lifetime_min = retention_config.get(
"allowed_lifetime_min"
)
if self.retention_allowed_lifetime_min is not None:
self.retention_allowed_lifetime_min = self.parse_duration(
self.retention_allowed_lifetime_min
)
self.retention_allowed_lifetime_max = retention_config.get(
"allowed_lifetime_max"
)
if self.retention_allowed_lifetime_max is not None:
self.retention_allowed_lifetime_max = self.parse_duration(
self.retention_allowed_lifetime_max
)
if (
self.retention_allowed_lifetime_min is not None
and self.retention_allowed_lifetime_max is not None
and self.retention_allowed_lifetime_min
> self.retention_allowed_lifetime_max
):
raise ConfigError(
"Invalid retention policy limits: 'allowed_lifetime_min' can not be"
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs: List[RetentionConfig] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
if interval_config is None:
raise ConfigError(
"A retention policy's purge jobs configuration must have the"
" 'interval' key set."
)
interval = self.parse_duration(interval_config)
shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime")
if shortest_max_lifetime is not None:
shortest_max_lifetime = self.parse_duration(shortest_max_lifetime)
longest_max_lifetime = purge_job_config.get("longest_max_lifetime")
if longest_max_lifetime is not None:
longest_max_lifetime = self.parse_duration(longest_max_lifetime)
if (
shortest_max_lifetime is not None
and longest_max_lifetime is not None
and shortest_max_lifetime > longest_max_lifetime
):
raise ConfigError(
"A retention policy's purge jobs configuration's"
" 'shortest_max_lifetime' value can not be greater than its"
" 'longest_max_lifetime' value."
)
self.retention_purge_jobs.append(
RetentionConfig(interval, shortest_max_lifetime, longest_max_lifetime)
)
if not self.retention_purge_jobs:
self.retention_purge_jobs = [
RetentionConfig(self.parse_duration("1d"), None, None)
]
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
@ -1255,75 +1123,6 @@ class ServerConfig(Config):
#
#user_ips_max_age: 14d
# Message retention policy at the server level.
#
# Room admins and mods can define a retention period for their rooms using the
# 'm.room.retention' state event, and server admins can cap this period by setting
# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options.
#
# If this feature is enabled, Synapse will regularly look for and purge events
# which are older than the room's maximum retention period. Synapse will also
# filter events received over federation so that events that should have been
# purged are ignored and not stored again.
#
retention:
# The message retention policies feature is disabled by default. Uncomment the
# following line to enable it.
#
#enabled: true
# Default retention policy. If set, Synapse will apply it to rooms that lack the
# 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't
# matter much because Synapse doesn't take it into account yet.
#
#default_policy:
# min_lifetime: 1d
# max_lifetime: 1y
# Retention policy limits. If set, and the state of a room contains a
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
# to these limits when running purge jobs.
#
#allowed_lifetime_min: 1d
#allowed_lifetime_max: 1y
# Server admins can define the settings of the background jobs purging the
# events which lifetime has expired under the 'purge_jobs' section.
#
# If no configuration is provided, a single job will be set up to delete expired
# events in every room daily.
#
# Each job's configuration defines which range of message lifetimes the job
# takes care of. For example, if 'shortest_max_lifetime' is '2d' and
# 'longest_max_lifetime' is '3d', the job will handle purging expired events in
# rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and
# lower than or equal to 3 days. Both the minimum and the maximum value of a
# range are optional, e.g. a job with no 'shortest_max_lifetime' and a
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
# which 'max_lifetime' is lower than or equal to three days.
#
# The rationale for this per-job configuration is that some rooms might have a
# retention policy with a low 'max_lifetime', where history needs to be purged
# of outdated messages on a more frequent basis than for the rest of the rooms
# (e.g. every 12h), but not want that purge to be performed by a job that's
# iterating over every room it knows, which could be heavy on the server.
#
# If any purge job is configured, it is strongly recommended to have at least
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
# set, or one job without 'shortest_max_lifetime' and one job without
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
# room's policy to these values is done after the policies are retrieved from
# Synapse's database (which is done using the range specified in a purge job's
# configuration).
#
#purge_jobs:
# - longest_max_lifetime: 3d
# interval: 12h
# - shortest_max_lifetime: 3d
# interval: 1d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.

View file

@ -385,9 +385,7 @@ class EventClientSerializer:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
hs.config.server.experimental_msc1849_support_enabled
)
self._msc1849_enabled = hs.config.experimental.msc1849_enabled
async def serialize_event(
self,
@ -418,7 +416,7 @@ class EventClientSerializer:
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
self._msc1849_enabled and bundle_aggregations
):
annotations = await self.store.get_aggregation_groups_for_event(event_id)
references = await self.store.get_relations_for_event(

View file

@ -86,19 +86,22 @@ class PaginationHandler:
self._event_serializer = hs.get_event_client_serializer()
self._retention_default_max_lifetime = (
hs.config.server.retention_default_max_lifetime
hs.config.retention.retention_default_max_lifetime
)
self._retention_allowed_lifetime_min = (
hs.config.server.retention_allowed_lifetime_min
hs.config.retention.retention_allowed_lifetime_min
)
self._retention_allowed_lifetime_max = (
hs.config.server.retention_allowed_lifetime_max
hs.config.retention.retention_allowed_lifetime_max
)
if hs.config.worker.run_background_tasks and hs.config.server.retention_enabled:
if (
hs.config.worker.run_background_tasks
and hs.config.retention.retention_enabled
):
# Run the purge jobs described in the configuration file.
for job in hs.config.server.retention_purge_jobs:
for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)
self.clock.looping_call(

View file

@ -679,8 +679,8 @@ class RoomWorkerStore(SQLBaseStore):
# policy.
if not ret:
return {
"min_lifetime": self.config.server.retention_default_min_lifetime,
"max_lifetime": self.config.server.retention_default_max_lifetime,
"min_lifetime": self.config.retention.retention_default_min_lifetime,
"max_lifetime": self.config.retention.retention_default_max_lifetime,
}
row = ret[0]
@ -690,10 +690,10 @@ class RoomWorkerStore(SQLBaseStore):
# The default values will be None if no default policy has been defined, or if one
# of the attributes is missing from the default policy.
if row["min_lifetime"] is None:
row["min_lifetime"] = self.config.server.retention_default_min_lifetime
row["min_lifetime"] = self.config.retention.retention_default_min_lifetime
if row["max_lifetime"] is None:
row["max_lifetime"] = self.config.server.retention_default_max_lifetime
row["max_lifetime"] = self.config.retention.retention_default_max_lifetime
return row