2018-08-08 18:54:49 +02:00
|
|
|
# Copyright 2018 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2021-08-19 17:12:55 +02:00
|
|
|
from typing import Optional
|
2022-01-07 17:43:21 +01:00
|
|
|
from unittest.mock import MagicMock, Mock, patch
|
2021-08-19 17:12:55 +02:00
|
|
|
|
|
|
|
from synapse.api.constants import EventTypes, JoinRules
|
2018-08-16 19:02:02 +02:00
|
|
|
from synapse.api.errors import Codes, ResourceLimitError
|
2021-11-09 14:10:58 +01:00
|
|
|
from synapse.api.filtering import Filtering
|
2021-08-19 17:12:55 +02:00
|
|
|
from synapse.api.room_versions import RoomVersions
|
2022-01-07 17:43:21 +01:00
|
|
|
from synapse.handlers.sync import SyncConfig, SyncResult
|
2021-08-19 17:12:55 +02:00
|
|
|
from synapse.rest import admin
|
|
|
|
from synapse.rest.client import knock, login, room
|
|
|
|
from synapse.server import HomeServer
|
2020-11-17 11:51:25 +01:00
|
|
|
from synapse.types import UserID, create_requester
|
2018-08-09 13:26:27 +02:00
|
|
|
|
2018-08-08 18:54:49 +02:00
|
|
|
import tests.unittest
|
|
|
|
import tests.utils
|
2022-01-07 17:43:21 +01:00
|
|
|
from tests.test_utils import make_awaitable
|
2018-08-08 18:54:49 +02:00
|
|
|
|
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
class SyncTestCase(tests.unittest.HomeserverTestCase):
|
2021-06-17 16:20:06 +02:00
|
|
|
"""Tests Sync Handler."""
|
2018-08-08 18:54:49 +02:00
|
|
|
|
2021-08-19 17:12:55 +02:00
|
|
|
servlets = [
|
|
|
|
admin.register_servlets,
|
|
|
|
knock.register_servlets,
|
|
|
|
login.register_servlets,
|
|
|
|
room.register_servlets,
|
|
|
|
]
|
|
|
|
|
|
|
|
def prepare(self, reactor, clock, hs: HomeServer):
|
2019-12-05 18:58:25 +01:00
|
|
|
self.sync_handler = self.hs.get_sync_handler()
|
2022-02-23 12:04:02 +01:00
|
|
|
self.store = self.hs.get_datastores().main
|
2018-08-08 18:54:49 +02:00
|
|
|
|
2020-05-06 16:54:58 +02:00
|
|
|
# AuthBlocking reads from the hs' config on initialization. We need to
|
|
|
|
# modify its config instead of the hs'
|
|
|
|
self.auth_blocking = self.hs.get_auth()._auth_blocking
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2020-05-06 16:54:58 +02:00
|
|
|
def test_wait_for_sync_for_user_auth_blocking(self):
|
2020-01-15 15:59:33 +01:00
|
|
|
user_id1 = "@user1:test"
|
|
|
|
user_id2 = "@user2:test"
|
2021-04-06 15:38:30 +02:00
|
|
|
sync_config = generate_sync_config(user_id1)
|
2020-11-17 11:51:25 +01:00
|
|
|
requester = create_requester(user_id1)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
self.reactor.advance(100) # So we get not 0 time
|
2020-05-06 16:54:58 +02:00
|
|
|
self.auth_blocking._limit_usage_by_mau = True
|
|
|
|
self.auth_blocking._max_mau_value = 1
|
2018-08-09 18:39:12 +02:00
|
|
|
|
|
|
|
# Check that the happy case does not throw errors
|
2019-12-05 18:58:25 +01:00
|
|
|
self.get_success(self.store.upsert_monthly_active_user(user_id1))
|
2020-11-17 11:51:25 +01:00
|
|
|
self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(requester, sync_config)
|
|
|
|
)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
|
|
|
# Test that global lock works
|
2020-05-06 16:54:58 +02:00
|
|
|
self.auth_blocking._hs_disabled = True
|
2019-12-05 18:58:25 +01:00
|
|
|
e = self.get_failure(
|
2020-11-17 11:51:25 +01:00
|
|
|
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
|
|
|
|
ResourceLimitError,
|
2019-12-05 18:58:25 +01:00
|
|
|
)
|
2022-02-28 13:12:29 +01:00
|
|
|
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2020-05-06 16:54:58 +02:00
|
|
|
self.auth_blocking._hs_disabled = False
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2021-04-06 15:38:30 +02:00
|
|
|
sync_config = generate_sync_config(user_id2)
|
2020-11-17 11:51:25 +01:00
|
|
|
requester = create_requester(user_id2)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2019-12-05 18:58:25 +01:00
|
|
|
e = self.get_failure(
|
2020-11-17 11:51:25 +01:00
|
|
|
self.sync_handler.wait_for_sync_for_user(requester, sync_config),
|
|
|
|
ResourceLimitError,
|
2019-12-05 18:58:25 +01:00
|
|
|
)
|
2022-02-28 13:12:29 +01:00
|
|
|
self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)
|
2018-08-09 18:39:12 +02:00
|
|
|
|
2021-08-19 17:12:55 +02:00
|
|
|
def test_unknown_room_version(self):
|
|
|
|
"""
|
|
|
|
A room with an unknown room version should not break sync (and should be excluded).
|
|
|
|
"""
|
|
|
|
inviter = self.register_user("creator", "pass", admin=True)
|
|
|
|
inviter_tok = self.login("@creator:test", "pass")
|
|
|
|
|
|
|
|
user = self.register_user("user", "pass")
|
|
|
|
tok = self.login("user", "pass")
|
|
|
|
|
|
|
|
# Do an initial sync on a different device.
|
|
|
|
requester = create_requester(user)
|
|
|
|
initial_result = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
requester, sync_config=generate_sync_config(user, device_id="dev")
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Create a room as the user.
|
|
|
|
joined_room = self.helper.create_room_as(user, tok=tok)
|
|
|
|
|
|
|
|
# Invite the user to the room as someone else.
|
|
|
|
invite_room = self.helper.create_room_as(inviter, tok=inviter_tok)
|
|
|
|
self.helper.invite(invite_room, targ=user, tok=inviter_tok)
|
|
|
|
|
|
|
|
knock_room = self.helper.create_room_as(
|
|
|
|
inviter, room_version=RoomVersions.V7.identifier, tok=inviter_tok
|
|
|
|
)
|
|
|
|
self.helper.send_state(
|
|
|
|
knock_room,
|
|
|
|
EventTypes.JoinRules,
|
|
|
|
{"join_rule": JoinRules.KNOCK},
|
|
|
|
tok=inviter_tok,
|
|
|
|
)
|
|
|
|
channel = self.make_request(
|
|
|
|
"POST",
|
|
|
|
"/_matrix/client/r0/knock/%s" % (knock_room,),
|
|
|
|
b"{}",
|
|
|
|
tok,
|
|
|
|
)
|
2022-02-28 13:12:29 +01:00
|
|
|
self.assertEqual(200, channel.code, channel.result)
|
2021-08-19 17:12:55 +02:00
|
|
|
|
|
|
|
# The rooms should appear in the sync response.
|
|
|
|
result = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
requester, sync_config=generate_sync_config(user)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertIn(joined_room, [r.room_id for r in result.joined])
|
|
|
|
self.assertIn(invite_room, [r.room_id for r in result.invited])
|
|
|
|
self.assertIn(knock_room, [r.room_id for r in result.knocked])
|
|
|
|
|
|
|
|
# Test a incremental sync (by providing a since_token).
|
|
|
|
result = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
requester,
|
|
|
|
sync_config=generate_sync_config(user, device_id="dev"),
|
|
|
|
since_token=initial_result.next_batch,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertIn(joined_room, [r.room_id for r in result.joined])
|
|
|
|
self.assertIn(invite_room, [r.room_id for r in result.invited])
|
|
|
|
self.assertIn(knock_room, [r.room_id for r in result.knocked])
|
|
|
|
|
|
|
|
# Poke the database and update the room version to an unknown one.
|
|
|
|
for room_id in (joined_room, invite_room, knock_room):
|
|
|
|
self.get_success(
|
|
|
|
self.hs.get_datastores().main.db_pool.simple_update(
|
|
|
|
"rooms",
|
|
|
|
keyvalues={"room_id": room_id},
|
|
|
|
updatevalues={"room_version": "unknown-room-version"},
|
|
|
|
desc="updated-room-version",
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Blow away caches (supported room versions can only change due to a restart).
|
2022-04-01 17:10:31 +02:00
|
|
|
self.store.get_rooms_for_user_with_stream_ordering.invalidate_all()
|
2021-08-19 17:12:55 +02:00
|
|
|
self.store._get_event_cache.clear()
|
|
|
|
|
|
|
|
# The rooms should be excluded from the sync response.
|
|
|
|
# Get a new request key.
|
|
|
|
result = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
requester, sync_config=generate_sync_config(user)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
|
|
|
|
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
|
|
|
|
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
|
|
|
|
|
|
|
|
# The rooms should also not be in an incremental sync.
|
|
|
|
result = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
requester,
|
|
|
|
sync_config=generate_sync_config(user, device_id="dev"),
|
|
|
|
since_token=initial_result.next_batch,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertNotIn(joined_room, [r.room_id for r in result.joined])
|
|
|
|
self.assertNotIn(invite_room, [r.room_id for r in result.invited])
|
|
|
|
self.assertNotIn(knock_room, [r.room_id for r in result.knocked])
|
|
|
|
|
2022-01-07 17:43:21 +01:00
|
|
|
def test_ban_wins_race_with_join(self):
|
|
|
|
"""Rooms shouldn't appear under "joined" if a join loses a race to a ban.
|
|
|
|
|
|
|
|
A complicated edge case. Imagine the following scenario:
|
|
|
|
|
|
|
|
* you attempt to join a room
|
|
|
|
* racing with that is a ban which comes in over federation, which ends up with
|
|
|
|
an earlier stream_ordering than the join.
|
|
|
|
* you get a sync response with a sync token which is _after_ the ban, but before
|
|
|
|
the join
|
|
|
|
* now your join lands; it is a valid event because its `prev_event`s predate the
|
|
|
|
ban, but will not make it into current_state_events (because bans win over
|
|
|
|
joins in state res, essentially).
|
|
|
|
* When we do a sync from the incremental sync, the only event in the timeline
|
|
|
|
is your join ... and yet you aren't joined.
|
|
|
|
|
|
|
|
The ban coming in over federation isn't crucial for this behaviour; the key
|
|
|
|
requirements are:
|
|
|
|
1. the homeserver generates a join event with prev_events that precede the ban
|
|
|
|
(so that it passes the "are you banned" test)
|
|
|
|
2. the join event has a stream_ordering after that of the ban.
|
|
|
|
|
|
|
|
We use monkeypatching to artificially trigger condition (1).
|
|
|
|
"""
|
|
|
|
# A local user Alice creates a room.
|
|
|
|
owner = self.register_user("alice", "password")
|
|
|
|
owner_tok = self.login(owner, "password")
|
|
|
|
room_id = self.helper.create_room_as(owner, is_public=True, tok=owner_tok)
|
|
|
|
|
|
|
|
# Do a sync as Alice to get the latest event in the room.
|
|
|
|
alice_sync_result: SyncResult = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
create_requester(owner), generate_sync_config(owner)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertEqual(len(alice_sync_result.joined), 1)
|
|
|
|
self.assertEqual(alice_sync_result.joined[0].room_id, room_id)
|
|
|
|
last_room_creation_event_id = (
|
|
|
|
alice_sync_result.joined[0].timeline.events[-1].event_id
|
|
|
|
)
|
|
|
|
|
|
|
|
# Eve, a ne'er-do-well, registers.
|
|
|
|
eve = self.register_user("eve", "password")
|
|
|
|
eve_token = self.login(eve, "password")
|
|
|
|
|
|
|
|
# Alice preemptively bans Eve.
|
|
|
|
self.helper.ban(room_id, owner, eve, tok=owner_tok)
|
|
|
|
|
|
|
|
# Eve syncs.
|
|
|
|
eve_requester = create_requester(eve)
|
|
|
|
eve_sync_config = generate_sync_config(eve)
|
|
|
|
eve_sync_after_ban: SyncResult = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(eve_requester, eve_sync_config)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Sanity check this sync result. We shouldn't be joined to the room.
|
|
|
|
self.assertEqual(eve_sync_after_ban.joined, [])
|
|
|
|
|
|
|
|
# Eve tries to join the room. We monkey patch the internal logic which selects
|
|
|
|
# the prev_events used when creating the join event, such that the ban does not
|
|
|
|
# precede the join.
|
|
|
|
mocked_get_prev_events = patch.object(
|
2022-02-23 12:04:02 +01:00
|
|
|
self.hs.get_datastores().main,
|
2022-01-07 17:43:21 +01:00
|
|
|
"get_prev_events_for_room",
|
|
|
|
new_callable=MagicMock,
|
|
|
|
return_value=make_awaitable([last_room_creation_event_id]),
|
|
|
|
)
|
|
|
|
with mocked_get_prev_events:
|
|
|
|
self.helper.join(room_id, eve, tok=eve_token)
|
|
|
|
|
|
|
|
# Eve makes a second, incremental sync.
|
|
|
|
eve_incremental_sync_after_join: SyncResult = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
eve_requester,
|
|
|
|
eve_sync_config,
|
|
|
|
since_token=eve_sync_after_ban.next_batch,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# Eve should not see herself as joined to the room.
|
|
|
|
self.assertEqual(eve_incremental_sync_after_join.joined, [])
|
|
|
|
|
|
|
|
# If we did a third initial sync, we should _still_ see eve is not joined to the room.
|
|
|
|
eve_initial_sync_after_join: SyncResult = self.get_success(
|
|
|
|
self.sync_handler.wait_for_sync_for_user(
|
|
|
|
eve_requester,
|
|
|
|
eve_sync_config,
|
|
|
|
since_token=None,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.assertEqual(eve_initial_sync_after_join.joined, [])
|
|
|
|
|
2021-08-19 17:12:55 +02:00
|
|
|
|
|
|
|
_request_key = 0
|
|
|
|
|
2021-04-06 15:38:30 +02:00
|
|
|
|
2021-08-19 17:12:55 +02:00
|
|
|
def generate_sync_config(
|
|
|
|
user_id: str, device_id: Optional[str] = "device_id"
|
|
|
|
) -> SyncConfig:
|
|
|
|
"""Generate a sync config (with a unique request key)."""
|
|
|
|
global _request_key
|
|
|
|
_request_key += 1
|
2021-04-06 15:38:30 +02:00
|
|
|
return SyncConfig(
|
2021-08-19 17:12:55 +02:00
|
|
|
user=UserID.from_string(user_id),
|
2021-11-09 14:10:58 +01:00
|
|
|
filter_collection=Filtering(Mock()).DEFAULT_FILTER_COLLECTION,
|
2021-04-06 15:38:30 +02:00
|
|
|
is_guest=False,
|
2021-08-19 17:12:55 +02:00
|
|
|
request_key=("request_key", _request_key),
|
|
|
|
device_id=device_id,
|
2021-04-06 15:38:30 +02:00
|
|
|
)
|