mirror of
https://mau.dev/maunium/synapse.git
synced 2024-12-14 10:23:57 +01:00
Prevent expired events from being filtered out when retention is disabled (#12611)
Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
This commit is contained in:
parent
a608ac847b
commit
4cc4229cd7
7 changed files with 71 additions and 32 deletions
1
changelog.d/12611.bugfix
Normal file
1
changelog.d/12611.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix a bug introduced in Synapse 1.7.0 that would prevent events from being sent to clients if there's a retention policy in the room when the support for retention policies is disabled.
|
|
@ -239,7 +239,7 @@ class PaginationHandler:
|
||||||
# defined in the server's configuration, we can safely assume that's the
|
# defined in the server's configuration, we can safely assume that's the
|
||||||
# case and use it for this room.
|
# case and use it for this room.
|
||||||
max_lifetime = (
|
max_lifetime = (
|
||||||
retention_policy["max_lifetime"] or self._retention_default_max_lifetime
|
retention_policy.max_lifetime or self._retention_default_max_lifetime
|
||||||
)
|
)
|
||||||
|
|
||||||
# Cap the effective max_lifetime to be within the range allowed in the
|
# Cap the effective max_lifetime to be within the range allowed in the
|
||||||
|
|
|
@ -45,7 +45,7 @@ from synapse.storage.database import (
|
||||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||||
from synapse.storage.types import Cursor
|
from synapse.storage.types import Cursor
|
||||||
from synapse.storage.util.id_generators import IdGenerator
|
from synapse.storage.util.id_generators import IdGenerator
|
||||||
from synapse.types import JsonDict, ThirdPartyInstanceID
|
from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID
|
||||||
from synapse.util import json_encoder
|
from synapse.util import json_encoder
|
||||||
from synapse.util.caches.descriptors import cached
|
from synapse.util.caches.descriptors import cached
|
||||||
from synapse.util.stringutils import MXC_REGEX
|
from synapse.util.stringutils import MXC_REGEX
|
||||||
|
@ -699,7 +699,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn)
|
await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn)
|
||||||
|
|
||||||
@cached()
|
@cached()
|
||||||
async def get_retention_policy_for_room(self, room_id: str) -> Dict[str, int]:
|
async def get_retention_policy_for_room(self, room_id: str) -> RetentionPolicy:
|
||||||
"""Get the retention policy for a given room.
|
"""Get the retention policy for a given room.
|
||||||
|
|
||||||
If no retention policy has been found for this room, returns a policy defined
|
If no retention policy has been found for this room, returns a policy defined
|
||||||
|
@ -707,12 +707,20 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
the 'max_lifetime' if no default policy has been defined in the server's
|
the 'max_lifetime' if no default policy has been defined in the server's
|
||||||
configuration).
|
configuration).
|
||||||
|
|
||||||
|
If support for retention policies is disabled, a policy with a 'min_lifetime' and
|
||||||
|
'max_lifetime' of None is returned.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
room_id: The ID of the room to get the retention policy of.
|
room_id: The ID of the room to get the retention policy of.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A dict containing "min_lifetime" and "max_lifetime" for this room.
|
A dict containing "min_lifetime" and "max_lifetime" for this room.
|
||||||
"""
|
"""
|
||||||
|
# If the room retention feature is disabled, return a policy with no minimum nor
|
||||||
|
# maximum. This prevents incorrectly filtering out events when sending to
|
||||||
|
# the client.
|
||||||
|
if not self.config.retention.retention_enabled:
|
||||||
|
return RetentionPolicy()
|
||||||
|
|
||||||
def get_retention_policy_for_room_txn(
|
def get_retention_policy_for_room_txn(
|
||||||
txn: LoggingTransaction,
|
txn: LoggingTransaction,
|
||||||
|
@ -736,10 +744,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
# If we don't know this room ID, ret will be None, in this case return the default
|
# If we don't know this room ID, ret will be None, in this case return the default
|
||||||
# policy.
|
# policy.
|
||||||
if not ret:
|
if not ret:
|
||||||
return {
|
return RetentionPolicy(
|
||||||
"min_lifetime": self.config.retention.retention_default_min_lifetime,
|
min_lifetime=self.config.retention.retention_default_min_lifetime,
|
||||||
"max_lifetime": self.config.retention.retention_default_max_lifetime,
|
max_lifetime=self.config.retention.retention_default_max_lifetime,
|
||||||
}
|
)
|
||||||
|
|
||||||
min_lifetime = ret[0]["min_lifetime"]
|
min_lifetime = ret[0]["min_lifetime"]
|
||||||
max_lifetime = ret[0]["max_lifetime"]
|
max_lifetime = ret[0]["max_lifetime"]
|
||||||
|
@ -754,10 +762,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
if max_lifetime is None:
|
if max_lifetime is None:
|
||||||
max_lifetime = self.config.retention.retention_default_max_lifetime
|
max_lifetime = self.config.retention.retention_default_max_lifetime
|
||||||
|
|
||||||
return {
|
return RetentionPolicy(
|
||||||
"min_lifetime": min_lifetime,
|
min_lifetime=min_lifetime,
|
||||||
"max_lifetime": max_lifetime,
|
max_lifetime=max_lifetime,
|
||||||
}
|
)
|
||||||
|
|
||||||
async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]:
|
async def get_media_mxcs_in_room(self, room_id: str) -> Tuple[List[str], List[str]]:
|
||||||
"""Retrieves all the local and remote media MXC URIs in a given room
|
"""Retrieves all the local and remote media MXC URIs in a given room
|
||||||
|
@ -994,7 +1002,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
|
|
||||||
async def get_rooms_for_retention_period_in_range(
|
async def get_rooms_for_retention_period_in_range(
|
||||||
self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
|
self, min_ms: Optional[int], max_ms: Optional[int], include_null: bool = False
|
||||||
) -> Dict[str, Dict[str, Optional[int]]]:
|
) -> Dict[str, RetentionPolicy]:
|
||||||
"""Retrieves all of the rooms within the given retention range.
|
"""Retrieves all of the rooms within the given retention range.
|
||||||
|
|
||||||
Optionally includes the rooms which don't have a retention policy.
|
Optionally includes the rooms which don't have a retention policy.
|
||||||
|
@ -1016,7 +1024,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
|
|
||||||
def get_rooms_for_retention_period_in_range_txn(
|
def get_rooms_for_retention_period_in_range_txn(
|
||||||
txn: LoggingTransaction,
|
txn: LoggingTransaction,
|
||||||
) -> Dict[str, Dict[str, Optional[int]]]:
|
) -> Dict[str, RetentionPolicy]:
|
||||||
range_conditions = []
|
range_conditions = []
|
||||||
args = []
|
args = []
|
||||||
|
|
||||||
|
@ -1047,10 +1055,10 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
rooms_dict = {}
|
rooms_dict = {}
|
||||||
|
|
||||||
for row in rows:
|
for row in rows:
|
||||||
rooms_dict[row["room_id"]] = {
|
rooms_dict[row["room_id"]] = RetentionPolicy(
|
||||||
"min_lifetime": row["min_lifetime"],
|
min_lifetime=row["min_lifetime"],
|
||||||
"max_lifetime": row["max_lifetime"],
|
max_lifetime=row["max_lifetime"],
|
||||||
}
|
)
|
||||||
|
|
||||||
if include_null:
|
if include_null:
|
||||||
# If required, do a second query that retrieves all of the rooms we know
|
# If required, do a second query that retrieves all of the rooms we know
|
||||||
|
@ -1065,10 +1073,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
|
||||||
# policy in its state), add it with a null policy.
|
# policy in its state), add it with a null policy.
|
||||||
for row in rows:
|
for row in rows:
|
||||||
if row["room_id"] not in rooms_dict:
|
if row["room_id"] not in rooms_dict:
|
||||||
rooms_dict[row["room_id"]] = {
|
rooms_dict[row["room_id"]] = RetentionPolicy()
|
||||||
"min_lifetime": None,
|
|
||||||
"max_lifetime": None,
|
|
||||||
}
|
|
||||||
|
|
||||||
return rooms_dict
|
return rooms_dict
|
||||||
|
|
||||||
|
|
|
@ -932,3 +932,9 @@ class UserProfile(TypedDict):
|
||||||
user_id: str
|
user_id: str
|
||||||
display_name: Optional[str]
|
display_name: Optional[str]
|
||||||
avatar_url: Optional[str]
|
avatar_url: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
|
@attr.s(auto_attribs=True, frozen=True, slots=True)
|
||||||
|
class RetentionPolicy:
|
||||||
|
min_lifetime: Optional[int] = None
|
||||||
|
max_lifetime: Optional[int] = None
|
||||||
|
|
|
@ -22,7 +22,7 @@ from synapse.events import EventBase
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.storage import Storage
|
from synapse.storage import Storage
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.types import StateMap, get_domain_from_id
|
from synapse.types import RetentionPolicy, StateMap, get_domain_from_id
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ async def filter_events_for_client(
|
||||||
|
|
||||||
if filter_send_to_client:
|
if filter_send_to_client:
|
||||||
room_ids = {e.room_id for e in events}
|
room_ids = {e.room_id for e in events}
|
||||||
retention_policies = {}
|
retention_policies: Dict[str, RetentionPolicy] = {}
|
||||||
|
|
||||||
for room_id in room_ids:
|
for room_id in room_ids:
|
||||||
retention_policies[
|
retention_policies[
|
||||||
|
@ -137,7 +137,7 @@ async def filter_events_for_client(
|
||||||
# events.
|
# events.
|
||||||
if not event.is_state():
|
if not event.is_state():
|
||||||
retention_policy = retention_policies[event.room_id]
|
retention_policy = retention_policies[event.room_id]
|
||||||
max_lifetime = retention_policy.get("max_lifetime")
|
max_lifetime = retention_policy.max_lifetime
|
||||||
|
|
||||||
if max_lifetime is not None:
|
if max_lifetime is not None:
|
||||||
oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
|
oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime
|
||||||
|
|
|
@ -995,7 +995,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||||
bundled_aggregations,
|
bundled_aggregations,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7)
|
self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 6)
|
||||||
|
|
||||||
def test_annotation_to_annotation(self) -> None:
|
def test_annotation_to_annotation(self) -> None:
|
||||||
"""Any relation to an annotation should be ignored."""
|
"""Any relation to an annotation should be ignored."""
|
||||||
|
@ -1031,7 +1031,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||||
bundled_aggregations,
|
bundled_aggregations,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7)
|
self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6)
|
||||||
|
|
||||||
def test_thread(self) -> None:
|
def test_thread(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -1060,7 +1060,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||||
bundled_aggregations.get("latest_event"),
|
bundled_aggregations.get("latest_event"),
|
||||||
)
|
)
|
||||||
|
|
||||||
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
|
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
|
||||||
|
|
||||||
def test_thread_with_bundled_aggregations_for_latest(self) -> None:
|
def test_thread_with_bundled_aggregations_for_latest(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -1106,7 +1106,7 @@ class BundledAggregationsTestCase(BaseRelationsTestCase):
|
||||||
bundled_aggregations["latest_event"].get("unsigned"),
|
bundled_aggregations["latest_event"].get("unsigned"),
|
||||||
)
|
)
|
||||||
|
|
||||||
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 10)
|
self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
|
||||||
|
|
||||||
def test_nested_thread(self) -> None:
|
def test_nested_thread(self) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
from typing import Any, Dict
|
||||||
from unittest.mock import Mock
|
from unittest.mock import Mock
|
||||||
|
|
||||||
from twisted.test.proto_helpers import MemoryReactor
|
from twisted.test.proto_helpers import MemoryReactor
|
||||||
|
@ -252,16 +253,24 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
|
||||||
room.register_servlets,
|
room.register_servlets,
|
||||||
]
|
]
|
||||||
|
|
||||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
def default_config(self) -> Dict[str, Any]:
|
||||||
config = self.default_config()
|
config = super().default_config()
|
||||||
config["retention"] = {
|
|
||||||
|
retention_config = {
|
||||||
"enabled": True,
|
"enabled": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Update this config with what's in the default config so that
|
||||||
|
# override_config works as expected.
|
||||||
|
retention_config.update(config.get("retention", {}))
|
||||||
|
config["retention"] = retention_config
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||||
mock_federation_client = Mock(spec=["backfill"])
|
mock_federation_client = Mock(spec=["backfill"])
|
||||||
|
|
||||||
self.hs = self.setup_test_homeserver(
|
self.hs = self.setup_test_homeserver(
|
||||||
config=config,
|
|
||||||
federation_client=mock_federation_client,
|
federation_client=mock_federation_client,
|
||||||
)
|
)
|
||||||
return self.hs
|
return self.hs
|
||||||
|
@ -295,6 +304,24 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
|
||||||
|
|
||||||
self._test_retention(room_id, expected_code_for_first_event=404)
|
self._test_retention(room_id, expected_code_for_first_event=404)
|
||||||
|
|
||||||
|
@unittest.override_config({"retention": {"enabled": False}})
|
||||||
|
def test_visibility_when_disabled(self) -> None:
|
||||||
|
"""Retention policies should be ignored when the retention feature is disabled."""
|
||||||
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
||||||
|
|
||||||
|
self.helper.send_state(
|
||||||
|
room_id=room_id,
|
||||||
|
event_type=EventTypes.Retention,
|
||||||
|
body={"max_lifetime": one_day_ms},
|
||||||
|
tok=self.token,
|
||||||
|
)
|
||||||
|
|
||||||
|
resp = self.helper.send(room_id=room_id, body="test", tok=self.token)
|
||||||
|
|
||||||
|
self.reactor.advance(one_day_ms * 2 / 1000)
|
||||||
|
|
||||||
|
self.get_event(room_id, resp["event_id"])
|
||||||
|
|
||||||
def _test_retention(
|
def _test_retention(
|
||||||
self, room_id: str, expected_code_for_first_event: int = 200
|
self, room_id: str, expected_code_for_first_event: int = 200
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
Loading…
Reference in a new issue