2019-03-25 17:38:05 +01:00
|
|
|
# Copyright 2019 New Vector Ltd
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-04-29 13:30:36 +02:00
|
|
|
import logging
|
2022-05-19 17:29:08 +02:00
|
|
|
from collections import defaultdict
|
|
|
|
from typing import Any, Dict, List, Optional, Set, Tuple
|
2020-04-28 18:42:03 +02:00
|
|
|
|
2022-02-08 17:03:08 +01:00
|
|
|
from twisted.internet.address import IPv4Address
|
2023-02-06 15:55:00 +01:00
|
|
|
from twisted.internet.protocol import Protocol, connectionDone
|
|
|
|
from twisted.python.failure import Failure
|
|
|
|
from twisted.test.proto_helpers import MemoryReactor
|
2020-12-02 16:26:25 +01:00
|
|
|
from twisted.web.resource import Resource
|
2020-04-28 18:42:03 +02:00
|
|
|
|
2021-04-14 18:06:06 +02:00
|
|
|
from synapse.app.generic_worker import GenericWorkerServer
|
2020-10-02 10:57:12 +02:00
|
|
|
from synapse.http.site import SynapseRequest, SynapseSite
|
2020-12-02 16:26:25 +01:00
|
|
|
from synapse.replication.http import ReplicationRestResource
|
2021-04-14 18:06:06 +02:00
|
|
|
from synapse.replication.tcp.client import ReplicationDataHandler
|
2020-04-06 10:58:42 +02:00
|
|
|
from synapse.replication.tcp.handler import ReplicationCommandHandler
|
2022-08-19 14:25:24 +02:00
|
|
|
from synapse.replication.tcp.protocol import (
|
|
|
|
ClientReplicationStreamProtocol,
|
2021-03-09 13:41:32 +01:00
|
|
|
ServerReplicationStreamProtocol,
|
|
|
|
)
|
2022-08-19 14:25:24 +02:00
|
|
|
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
|
2020-05-01 16:21:35 +02:00
|
|
|
from synapse.server import HomeServer
|
2023-02-06 15:55:00 +01:00
|
|
|
from synapse.util import Clock
|
2019-03-25 17:38:05 +01:00
|
|
|
|
|
|
|
from tests import unittest
|
2020-11-15 23:49:21 +01:00
|
|
|
from tests.server import FakeTransport
|
2022-05-19 17:29:08 +02:00
|
|
|
from tests.utils import USE_POSTGRES_FOR_TESTS
|
2019-03-25 17:38:05 +01:00
|
|
|
|
2020-10-29 12:17:35 +01:00
|
|
|
try:
|
|
|
|
import hiredis
|
|
|
|
except ImportError:
|
2021-03-29 15:42:38 +02:00
|
|
|
hiredis = None # type: ignore
|
2020-10-29 12:17:35 +01:00
|
|
|
|
2020-04-28 18:42:03 +02:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2019-03-25 17:38:05 +01:00
|
|
|
|
|
|
|
class BaseStreamTestCase(unittest.HomeserverTestCase):
|
|
|
|
"""Base class for tests of the replication streams"""
|
2019-05-10 07:12:11 +02:00
|
|
|
|
2020-10-29 12:17:35 +01:00
|
|
|
# hiredis is an optional dependency so we don't want to require it for running
|
|
|
|
# the tests.
|
|
|
|
if not hiredis:
|
|
|
|
skip = "Requires hiredis"
|
|
|
|
|
2023-03-30 19:41:14 +02:00
|
|
|
if not USE_POSTGRES_FOR_TESTS:
|
|
|
|
# Redis replication only takes place on Postgres
|
|
|
|
skip = "Requires Postgres"
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
2019-03-25 17:38:05 +01:00
|
|
|
# build a replication server
|
2020-04-06 10:58:42 +02:00
|
|
|
server_factory = ReplicationStreamProtocolFactory(hs)
|
|
|
|
self.streamer = hs.get_replication_streamer()
|
2021-07-13 12:52:58 +02:00
|
|
|
self.server: ServerReplicationStreamProtocol = server_factory.buildProtocol(
|
2022-02-08 17:03:08 +01:00
|
|
|
IPv4Address("TCP", "127.0.0.1", 0)
|
2021-07-13 12:52:58 +02:00
|
|
|
)
|
2019-03-25 17:38:05 +01:00
|
|
|
|
2020-04-28 18:42:03 +02:00
|
|
|
# Make a new HomeServer object for the worker
|
|
|
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
|
|
|
self.worker_hs = self.setup_test_homeserver(
|
2023-07-10 18:10:20 +02:00
|
|
|
federation_http_client=None,
|
2020-10-15 21:29:13 +02:00
|
|
|
homeserver_to_use=GenericWorkerServer,
|
2020-05-05 15:15:57 +02:00
|
|
|
config=self._get_worker_hs_config(),
|
2020-04-28 18:42:03 +02:00
|
|
|
reactor=self.reactor,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Since we use sqlite in memory databases we need to make sure the
|
|
|
|
# databases objects are the same.
|
2022-02-23 12:04:02 +01:00
|
|
|
self.worker_hs.get_datastores().main.db_pool = hs.get_datastores().main.db_pool
|
2020-04-28 18:42:03 +02:00
|
|
|
|
2021-10-01 13:22:47 +02:00
|
|
|
# Normally we'd pass in the handler to `setup_test_homeserver`, which would
|
|
|
|
# eventually hit "Install @cache_in_self attributes" in tests/utils.py.
|
|
|
|
# Unfortunately our handler wants a reference to the homeserver. That leaves
|
|
|
|
# us with a chicken-and-egg problem.
|
|
|
|
# We can workaround this: create the homeserver first, create the handler
|
|
|
|
# and bodge it in after the fact. The bodging requires us to know the
|
|
|
|
# dirty details of how `cache_in_self` works. We politely ask mypy to
|
|
|
|
# ignore our dirty dealings.
|
2020-04-29 13:30:36 +02:00
|
|
|
self.test_handler = self._build_replication_data_handler()
|
2021-10-01 13:22:47 +02:00
|
|
|
self.worker_hs._replication_data_handler = self.test_handler # type: ignore[attr-defined]
|
2020-04-28 18:42:03 +02:00
|
|
|
|
|
|
|
repl_handler = ReplicationCommandHandler(self.worker_hs)
|
2019-03-25 17:38:05 +01:00
|
|
|
self.client = ClientReplicationStreamProtocol(
|
2021-02-16 23:32:34 +01:00
|
|
|
self.worker_hs,
|
|
|
|
"client",
|
|
|
|
"test",
|
|
|
|
clock,
|
|
|
|
repl_handler,
|
2019-03-25 17:38:05 +01:00
|
|
|
)
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
self._client_transport: Optional[FakeTransport] = None
|
|
|
|
self._server_transport: Optional[FakeTransport] = None
|
2020-03-25 15:54:01 +01:00
|
|
|
|
2020-12-02 16:26:25 +01:00
|
|
|
def create_resource_dict(self) -> Dict[str, Resource]:
|
|
|
|
d = super().create_resource_dict()
|
|
|
|
d["/_synapse/replication"] = ReplicationRestResource(self.hs)
|
|
|
|
return d
|
|
|
|
|
2020-05-05 15:15:57 +02:00
|
|
|
def _get_worker_hs_config(self) -> dict:
|
|
|
|
config = self.default_config()
|
|
|
|
config["worker_app"] = "synapse.app.generic_worker"
|
2023-05-11 12:30:56 +02:00
|
|
|
config["instance_map"] = {"main": {"host": "testserv", "port": 8765}}
|
2020-05-05 15:15:57 +02:00
|
|
|
return config
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def _build_replication_data_handler(self) -> "TestReplicationDataHandler":
|
2020-05-01 16:21:35 +02:00
|
|
|
return TestReplicationDataHandler(self.worker_hs)
|
2020-04-29 13:30:36 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def reconnect(self) -> None:
|
2020-03-25 15:54:01 +01:00
|
|
|
if self._client_transport:
|
|
|
|
self.client.close()
|
|
|
|
|
|
|
|
if self._server_transport:
|
|
|
|
self.server.close()
|
|
|
|
|
|
|
|
self._client_transport = FakeTransport(self.server, self.reactor)
|
|
|
|
self.client.makeConnection(self._client_transport)
|
|
|
|
|
|
|
|
self._server_transport = FakeTransport(self.client, self.reactor)
|
|
|
|
self.server.makeConnection(self._server_transport)
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def disconnect(self) -> None:
|
2020-03-25 15:54:01 +01:00
|
|
|
if self._client_transport:
|
|
|
|
self._client_transport = None
|
|
|
|
self.client.close()
|
|
|
|
|
|
|
|
if self._server_transport:
|
|
|
|
self._server_transport = None
|
|
|
|
self.server.close()
|
2019-03-25 17:38:05 +01:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def replicate(self) -> None:
|
2019-03-25 17:38:05 +01:00
|
|
|
"""Tell the master side of replication that something has happened, and then
|
|
|
|
wait for the replication to occur.
|
|
|
|
"""
|
|
|
|
self.streamer.on_notifier_poke()
|
|
|
|
self.pump(0.1)
|
|
|
|
|
2020-04-28 18:42:03 +02:00
|
|
|
def handle_http_replication_attempt(self) -> SynapseRequest:
|
|
|
|
"""Asserts that a connection attempt was made to the master HS on the
|
|
|
|
HTTP replication port, then proxies it to the master HS object to be
|
|
|
|
handled.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The request object received by master HS.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# We should have an outbound connection attempt.
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertEqual(len(clients), 1)
|
|
|
|
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
|
|
|
self.assertEqual(port, 8765)
|
|
|
|
|
|
|
|
# Set up client side protocol
|
2022-05-04 20:11:21 +02:00
|
|
|
client_address = IPv4Address("TCP", "127.0.0.1", 1234)
|
|
|
|
client_protocol = client_factory.buildProtocol(("127.0.0.1", 1234))
|
2020-04-28 18:42:03 +02:00
|
|
|
|
|
|
|
# Set up the server side protocol
|
2022-05-04 20:11:21 +02:00
|
|
|
server_address = IPv4Address("TCP", host, port)
|
|
|
|
channel = self.site.buildProtocol((host, port))
|
2021-04-23 19:40:57 +02:00
|
|
|
|
|
|
|
# hook into the channel's request factory so that we can keep a record
|
|
|
|
# of the requests
|
|
|
|
requests: List[SynapseRequest] = []
|
|
|
|
real_request_factory = channel.requestFactory
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def request_factory(*args: Any, **kwargs: Any) -> SynapseRequest:
|
2021-04-23 19:40:57 +02:00
|
|
|
request = real_request_factory(*args, **kwargs)
|
|
|
|
requests.append(request)
|
|
|
|
return request
|
|
|
|
|
|
|
|
channel.requestFactory = request_factory
|
2020-04-28 18:42:03 +02:00
|
|
|
|
|
|
|
# Connect client to server and vice versa.
|
|
|
|
client_to_server_transport = FakeTransport(
|
2022-05-04 20:11:21 +02:00
|
|
|
channel, self.reactor, client_protocol, server_address, client_address
|
2020-04-28 18:42:03 +02:00
|
|
|
)
|
|
|
|
client_protocol.makeConnection(client_to_server_transport)
|
|
|
|
|
|
|
|
server_to_client_transport = FakeTransport(
|
2022-05-04 20:11:21 +02:00
|
|
|
client_protocol, self.reactor, channel, client_address, server_address
|
2020-04-28 18:42:03 +02:00
|
|
|
)
|
|
|
|
channel.makeConnection(server_to_client_transport)
|
|
|
|
|
|
|
|
# The request will now be processed by `self.site` and the response
|
|
|
|
# streamed back.
|
|
|
|
self.reactor.advance(0)
|
|
|
|
|
|
|
|
# We tear down the connection so it doesn't get reused without our
|
|
|
|
# knowledge.
|
|
|
|
server_to_client_transport.loseConnection()
|
|
|
|
client_to_server_transport.loseConnection()
|
|
|
|
|
2021-04-23 19:40:57 +02:00
|
|
|
# there should have been exactly one request
|
|
|
|
self.assertEqual(len(requests), 1)
|
|
|
|
|
|
|
|
return requests[0]
|
2020-04-28 18:42:03 +02:00
|
|
|
|
|
|
|
def assert_request_is_get_repl_stream_updates(
|
|
|
|
self, request: SynapseRequest, stream_name: str
|
2023-02-06 15:55:00 +01:00
|
|
|
) -> None:
|
2020-04-28 18:42:03 +02:00
|
|
|
"""Asserts that the given request is a HTTP replication request for
|
|
|
|
fetching updates for given stream.
|
|
|
|
"""
|
|
|
|
|
2021-07-13 12:52:58 +02:00
|
|
|
path: bytes = request.path # type: ignore
|
2020-04-28 18:42:03 +02:00
|
|
|
self.assertRegex(
|
2021-03-09 13:41:32 +01:00
|
|
|
path,
|
2022-03-29 12:41:19 +02:00
|
|
|
rb"^/_synapse/replication/get_repl_stream_updates/%s/[^/]+$"
|
2020-04-28 18:42:03 +02:00
|
|
|
% (stream_name.encode("ascii"),),
|
|
|
|
)
|
|
|
|
|
|
|
|
self.assertEqual(request.method, b"GET")
|
|
|
|
|
|
|
|
|
2020-07-15 16:27:35 +02:00
|
|
|
class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase):
|
|
|
|
"""Base class for tests running multiple workers.
|
|
|
|
|
2022-08-19 14:25:24 +02:00
|
|
|
Enables Redis, providing a fake Redis server.
|
|
|
|
|
2020-07-15 16:27:35 +02:00
|
|
|
Automatically handle HTTP replication requests from workers to master,
|
|
|
|
unlike `BaseStreamTestCase`.
|
|
|
|
"""
|
|
|
|
|
2022-08-19 14:25:24 +02:00
|
|
|
if not hiredis:
|
|
|
|
skip = "Requires hiredis"
|
|
|
|
|
|
|
|
if not USE_POSTGRES_FOR_TESTS:
|
|
|
|
# Redis replication only takes place on Postgres
|
|
|
|
skip = "Requires Postgres"
|
|
|
|
|
|
|
|
def default_config(self) -> Dict[str, Any]:
|
|
|
|
"""
|
|
|
|
Overrides the default config to enable Redis.
|
|
|
|
Even if the test only uses make_worker_hs, the main process needs Redis
|
|
|
|
enabled otherwise it won't create a Fake Redis server to listen on the
|
|
|
|
Redis port and accept fake TCP connections.
|
|
|
|
"""
|
|
|
|
base = super().default_config()
|
|
|
|
base["redis"] = {"enabled": True}
|
2023-05-11 12:30:56 +02:00
|
|
|
base["instance_map"] = {"main": {"host": "testserv", "port": 8765}}
|
2022-08-19 14:25:24 +02:00
|
|
|
return base
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def setUp(self) -> None:
|
2020-07-15 16:27:35 +02:00
|
|
|
super().setUp()
|
|
|
|
|
|
|
|
# build a replication server
|
|
|
|
self.streamer = self.hs.get_replication_streamer()
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
# Fake in memory Redis server that servers can connect to.
|
|
|
|
self._redis_server = FakeRedisPubSubServer()
|
|
|
|
|
2021-01-26 14:57:31 +01:00
|
|
|
# We may have an attempt to connect to redis for the external cache already.
|
|
|
|
self.connect_any_redis_attempts()
|
|
|
|
|
2022-02-23 12:04:02 +01:00
|
|
|
store = self.hs.get_datastores().main
|
2020-08-05 22:38:57 +02:00
|
|
|
self.database_pool = store.db_pool
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
self.reactor.lookups["testserv"] = "1.2.3.4"
|
2020-10-02 10:57:12 +02:00
|
|
|
self.reactor.lookups["localhost"] = "127.0.0.1"
|
|
|
|
|
|
|
|
# A map from a HS instance to the associated HTTP Site to use for
|
|
|
|
# handling inbound HTTP requests to that instance.
|
|
|
|
self._hs_to_site = {self.hs: self.site}
|
|
|
|
|
2022-08-19 14:25:24 +02:00
|
|
|
# Handle attempts to connect to fake redis server.
|
|
|
|
self.reactor.add_tcp_client_callback(
|
|
|
|
"localhost",
|
|
|
|
6379,
|
|
|
|
self.connect_any_redis_attempts,
|
|
|
|
)
|
2020-07-15 16:27:35 +02:00
|
|
|
|
2022-08-19 14:25:24 +02:00
|
|
|
self.hs.get_replication_command_handler().start_replication(self.hs)
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
# When we see a connection attempt to the master replication listener we
|
|
|
|
# automatically set up the connection. This is so that tests don't
|
|
|
|
# manually have to go and explicitly set it up each time (plus sometimes
|
|
|
|
# it is impossible to write the handling explicitly in the tests).
|
2020-10-02 10:57:12 +02:00
|
|
|
#
|
|
|
|
# Register the master replication listener:
|
2020-07-15 16:27:35 +02:00
|
|
|
self.reactor.add_tcp_client_callback(
|
2020-10-02 10:57:12 +02:00
|
|
|
"1.2.3.4",
|
|
|
|
8765,
|
|
|
|
lambda: self._handle_http_replication_attempt(self.hs, 8765),
|
2020-07-15 16:27:35 +02:00
|
|
|
)
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def create_test_resource(self) -> ReplicationRestResource:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Overrides `HomeserverTestCase.create_test_resource`."""
|
2020-07-15 16:27:35 +02:00
|
|
|
# We override this so that it automatically registers all the HTTP
|
|
|
|
# replication servlets, without having to explicitly do that in all
|
|
|
|
# subclassses.
|
|
|
|
|
|
|
|
resource = ReplicationRestResource(self.hs)
|
|
|
|
|
|
|
|
for servlet in self.servlets:
|
|
|
|
servlet(self.hs, resource)
|
|
|
|
|
|
|
|
return resource
|
|
|
|
|
|
|
|
def make_worker_hs(
|
2023-02-06 15:55:00 +01:00
|
|
|
self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any
|
2020-07-15 16:27:35 +02:00
|
|
|
) -> HomeServer:
|
2023-05-11 12:30:56 +02:00
|
|
|
"""Make a new worker HS instance, correctly connecting replication
|
2020-07-15 16:27:35 +02:00
|
|
|
stream to the master HS.
|
|
|
|
|
|
|
|
Args:
|
2022-12-01 13:38:27 +01:00
|
|
|
worker_app: Type of worker, e.g. `synapse.app.generic_worker`.
|
2020-07-15 16:27:35 +02:00
|
|
|
extra_config: Any extra config to use for this instances.
|
|
|
|
**kwargs: Options that get passed to `self.setup_test_homeserver`,
|
2020-12-02 17:09:24 +01:00
|
|
|
useful to e.g. pass some mocks for things like `federation_http_client`
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
The new worker HomeServer instance.
|
|
|
|
"""
|
|
|
|
|
|
|
|
config = self._get_worker_hs_config()
|
|
|
|
config["worker_app"] = worker_app
|
2021-04-08 23:38:54 +02:00
|
|
|
config.update(extra_config or {})
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
worker_hs = self.setup_test_homeserver(
|
2020-10-15 21:29:13 +02:00
|
|
|
homeserver_to_use=GenericWorkerServer,
|
2020-07-15 16:27:35 +02:00
|
|
|
config=config,
|
|
|
|
reactor=self.reactor,
|
2020-10-28 00:26:36 +01:00
|
|
|
**kwargs,
|
2020-07-15 16:27:35 +02:00
|
|
|
)
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
# If the instance is in the `instance_map` config then workers may try
|
|
|
|
# and send HTTP requests to it, so we register it with
|
|
|
|
# `_handle_http_replication_attempt` like we do with the master HS.
|
|
|
|
instance_name = worker_hs.get_instance_name()
|
|
|
|
instance_loc = worker_hs.config.worker.instance_map.get(instance_name)
|
|
|
|
if instance_loc:
|
|
|
|
# Ensure the host is one that has a fake DNS entry.
|
|
|
|
if instance_loc.host not in self.reactor.lookups:
|
|
|
|
raise Exception(
|
|
|
|
"Host does not have an IP for instance_map[%r].host = %r"
|
2021-02-16 23:32:34 +01:00
|
|
|
% (
|
|
|
|
instance_name,
|
|
|
|
instance_loc.host,
|
|
|
|
)
|
2020-10-02 10:57:12 +02:00
|
|
|
)
|
|
|
|
|
2021-10-01 13:22:47 +02:00
|
|
|
# Copy the port into a new, non-Optional variable so mypy knows we're
|
|
|
|
# not going to reset `instance_loc` to `None` under its feet. See
|
|
|
|
# https://mypy.readthedocs.io/en/latest/common_issues.html#narrowing-and-inner-functions
|
|
|
|
port = instance_loc.port
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
self.reactor.add_tcp_client_callback(
|
|
|
|
self.reactor.lookups[instance_loc.host],
|
|
|
|
instance_loc.port,
|
2021-10-01 13:22:47 +02:00
|
|
|
lambda: self._handle_http_replication_attempt(worker_hs, port),
|
2020-10-02 10:57:12 +02:00
|
|
|
)
|
|
|
|
|
2022-02-23 12:04:02 +01:00
|
|
|
store = worker_hs.get_datastores().main
|
2020-08-05 22:38:57 +02:00
|
|
|
store.db_pool._db_pool = self.database_pool._db_pool
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
# Set up a resource for the worker
|
2020-10-02 10:57:12 +02:00
|
|
|
resource = ReplicationRestResource(worker_hs)
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
for servlet in self.servlets:
|
|
|
|
servlet(worker_hs, resource)
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
self._hs_to_site[worker_hs] = SynapseSite(
|
|
|
|
logger_name="synapse.access.http.fake",
|
|
|
|
site_tag="{}-{}".format(
|
|
|
|
worker_hs.config.server.server_name, worker_hs.get_instance_name()
|
|
|
|
),
|
|
|
|
config=worker_hs.config.server.listeners[0],
|
|
|
|
resource=resource,
|
|
|
|
server_version_string="1",
|
2022-10-21 19:46:22 +02:00
|
|
|
max_request_body_size=8192,
|
2021-04-23 18:06:47 +02:00
|
|
|
reactor=self.reactor,
|
2020-10-02 10:57:12 +02:00
|
|
|
)
|
|
|
|
|
2022-08-19 14:25:24 +02:00
|
|
|
worker_hs.get_replication_command_handler().start_replication(worker_hs)
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
return worker_hs
|
|
|
|
|
|
|
|
def _get_worker_hs_config(self) -> dict:
|
|
|
|
config = self.default_config()
|
|
|
|
return config
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def replicate(self) -> None:
|
2020-07-15 16:27:35 +02:00
|
|
|
"""Tell the master side of replication that something has happened, and then
|
|
|
|
wait for the replication to occur.
|
|
|
|
"""
|
|
|
|
self.streamer.on_notifier_poke()
|
|
|
|
self.pump()
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def _handle_http_replication_attempt(self, hs: HomeServer, repl_port: int) -> None:
|
2020-10-02 10:57:12 +02:00
|
|
|
"""Handles a connection attempt to the given HS replication HTTP
|
|
|
|
listener on the given port.
|
2020-07-15 16:27:35 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
# We should have at least one outbound connection attempt, where the
|
|
|
|
# last is one to the HTTP repication IP/port.
|
|
|
|
clients = self.reactor.tcpClients
|
|
|
|
self.assertGreaterEqual(len(clients), 1)
|
|
|
|
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
|
|
|
|
self.assertEqual(host, "1.2.3.4")
|
2020-10-02 10:57:12 +02:00
|
|
|
self.assertEqual(port, repl_port)
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
# Set up client side protocol
|
2022-05-04 20:11:21 +02:00
|
|
|
client_address = IPv4Address("TCP", "127.0.0.1", 1234)
|
|
|
|
client_protocol = client_factory.buildProtocol(("127.0.0.1", 1234))
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
# Set up the server side protocol
|
2022-05-04 20:11:21 +02:00
|
|
|
server_address = IPv4Address("TCP", host, port)
|
|
|
|
channel = self._hs_to_site[hs].buildProtocol((host, port))
|
2020-07-15 16:27:35 +02:00
|
|
|
|
|
|
|
# Connect client to server and vice versa.
|
|
|
|
client_to_server_transport = FakeTransport(
|
2022-05-04 20:11:21 +02:00
|
|
|
channel, self.reactor, client_protocol, server_address, client_address
|
2020-07-15 16:27:35 +02:00
|
|
|
)
|
|
|
|
client_protocol.makeConnection(client_to_server_transport)
|
|
|
|
|
|
|
|
server_to_client_transport = FakeTransport(
|
2022-05-04 20:11:21 +02:00
|
|
|
client_protocol, self.reactor, channel, client_address, server_address
|
2020-07-15 16:27:35 +02:00
|
|
|
)
|
|
|
|
channel.makeConnection(server_to_client_transport)
|
|
|
|
|
|
|
|
# Note: at this point we've wired everything up, but we need to return
|
|
|
|
# before the data starts flowing over the connections as this is called
|
|
|
|
# inside `connecTCP` before the connection has been passed back to the
|
|
|
|
# code that requested the TCP connection.
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def connect_any_redis_attempts(self) -> None:
|
2020-10-02 10:57:12 +02:00
|
|
|
"""If redis is enabled we need to deal with workers connecting to a
|
|
|
|
redis server. We don't want to use a real Redis server so we use a
|
|
|
|
fake one.
|
|
|
|
"""
|
|
|
|
clients = self.reactor.tcpClients
|
2021-01-26 14:57:31 +01:00
|
|
|
while clients:
|
|
|
|
(host, port, client_factory, _timeout, _bindAddress) = clients.pop(0)
|
2021-09-30 13:51:47 +02:00
|
|
|
self.assertEqual(host, "localhost")
|
2021-01-26 14:57:31 +01:00
|
|
|
self.assertEqual(port, 6379)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
client_address = IPv4Address("TCP", "127.0.0.1", 6379)
|
|
|
|
client_protocol = client_factory.buildProtocol(client_address)
|
|
|
|
|
|
|
|
server_address = IPv4Address("TCP", host, port)
|
|
|
|
server_protocol = self._redis_server.buildProtocol(server_address)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2021-01-26 14:57:31 +01:00
|
|
|
client_to_server_transport = FakeTransport(
|
|
|
|
server_protocol, self.reactor, client_protocol
|
|
|
|
)
|
|
|
|
client_protocol.makeConnection(client_to_server_transport)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2021-01-26 14:57:31 +01:00
|
|
|
server_to_client_transport = FakeTransport(
|
|
|
|
client_protocol, self.reactor, server_protocol
|
|
|
|
)
|
|
|
|
server_protocol.makeConnection(server_to_client_transport)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2020-07-15 16:27:35 +02:00
|
|
|
|
2021-04-14 18:06:06 +02:00
|
|
|
class TestReplicationDataHandler(ReplicationDataHandler):
|
2020-04-06 10:58:42 +02:00
|
|
|
"""Drop-in for ReplicationDataHandler which just collects RDATA rows"""
|
2019-05-10 07:12:11 +02:00
|
|
|
|
2020-05-01 16:21:35 +02:00
|
|
|
def __init__(self, hs: HomeServer):
|
|
|
|
super().__init__(hs)
|
2020-04-29 13:30:36 +02:00
|
|
|
|
|
|
|
# list of received (stream_name, token, row) tuples
|
2021-07-13 12:52:58 +02:00
|
|
|
self.received_rdata_rows: List[Tuple[str, int, Any]] = []
|
2019-03-25 17:38:05 +01:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
async def on_rdata(
|
|
|
|
self, stream_name: str, instance_name: str, token: int, rows: list
|
|
|
|
) -> None:
|
2020-05-01 18:19:56 +02:00
|
|
|
await super().on_rdata(stream_name, instance_name, token, rows)
|
2019-03-25 17:38:05 +01:00
|
|
|
for r in rows:
|
2020-04-29 13:30:36 +02:00
|
|
|
self.received_rdata_rows.append((stream_name, token, r))
|
|
|
|
|
2020-04-28 18:42:03 +02:00
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
class FakeRedisPubSubServer:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""A fake Redis server for pub/sub."""
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def __init__(self) -> None:
|
2022-05-19 17:29:08 +02:00
|
|
|
self._subscribers_by_channel: Dict[
|
|
|
|
bytes, Set["FakeRedisPubSubProtocol"]
|
|
|
|
] = defaultdict(set)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def add_subscriber(self, conn: "FakeRedisPubSubProtocol", channel: bytes) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""A connection has called SUBSCRIBE"""
|
2022-05-19 17:29:08 +02:00
|
|
|
self._subscribers_by_channel[channel].add(conn)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def remove_subscriber(self, conn: "FakeRedisPubSubProtocol") -> None:
|
2022-05-19 17:29:08 +02:00
|
|
|
"""A connection has lost connection"""
|
|
|
|
for subscribers in self._subscribers_by_channel.values():
|
|
|
|
subscribers.discard(conn)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def publish(
|
|
|
|
self, conn: "FakeRedisPubSubProtocol", channel: bytes, msg: object
|
|
|
|
) -> int:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""A connection want to publish a message to subscribers."""
|
2022-05-19 17:29:08 +02:00
|
|
|
for sub in self._subscribers_by_channel[channel]:
|
2020-10-02 10:57:12 +02:00
|
|
|
sub.send(["message", channel, msg])
|
|
|
|
|
2022-05-19 17:29:08 +02:00
|
|
|
return len(self._subscribers_by_channel)
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def buildProtocol(self, addr: IPv4Address) -> "FakeRedisPubSubProtocol":
|
2020-10-02 10:57:12 +02:00
|
|
|
return FakeRedisPubSubProtocol(self)
|
|
|
|
|
|
|
|
|
|
|
|
class FakeRedisPubSubProtocol(Protocol):
|
2021-02-16 23:32:34 +01:00
|
|
|
"""A connection from a client talking to the fake Redis server."""
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2021-07-13 12:52:58 +02:00
|
|
|
transport: Optional[FakeTransport] = None
|
2021-03-15 16:14:39 +01:00
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
def __init__(self, server: FakeRedisPubSubServer):
|
|
|
|
self._server = server
|
|
|
|
self._reader = hiredis.Reader()
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def dataReceived(self, data: bytes) -> None:
|
2020-10-02 10:57:12 +02:00
|
|
|
self._reader.feed(data)
|
|
|
|
|
|
|
|
# We might get multiple messages in one packet.
|
|
|
|
while True:
|
|
|
|
msg = self._reader.gets()
|
|
|
|
|
|
|
|
if msg is False:
|
|
|
|
# No more messages.
|
|
|
|
return
|
|
|
|
|
|
|
|
if not isinstance(msg, list):
|
|
|
|
# Inbound commands should always be a list
|
|
|
|
raise Exception("Expected redis list")
|
|
|
|
|
|
|
|
self.handle_command(msg[0], *msg[1:])
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def handle_command(self, command: bytes, *args: bytes) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Received a Redis command from the client."""
|
2020-10-02 10:57:12 +02:00
|
|
|
|
|
|
|
# We currently only support pub/sub.
|
|
|
|
if command == b"PUBLISH":
|
|
|
|
channel, message = args
|
|
|
|
num_subscribers = self._server.publish(self, channel, message)
|
|
|
|
self.send(num_subscribers)
|
|
|
|
elif command == b"SUBSCRIBE":
|
2022-05-19 17:29:08 +02:00
|
|
|
for idx, channel in enumerate(args):
|
|
|
|
num_channels = idx + 1
|
|
|
|
self._server.add_subscriber(self, channel)
|
|
|
|
self.send(["subscribe", channel, num_channels])
|
2021-01-26 14:57:31 +01:00
|
|
|
|
|
|
|
# Since we use SET/GET to cache things we can safely no-op them.
|
|
|
|
elif command == b"SET":
|
|
|
|
self.send("OK")
|
|
|
|
elif command == b"GET":
|
|
|
|
self.send(None)
|
2022-11-16 16:25:35 +01:00
|
|
|
|
|
|
|
# Connection keep-alives.
|
|
|
|
elif command == b"PING":
|
|
|
|
self.send("PONG")
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
else:
|
2023-02-06 15:55:00 +01:00
|
|
|
raise Exception(f"Unknown command: {command!r}")
|
2020-10-02 10:57:12 +02:00
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def send(self, msg: object) -> None:
|
2021-02-16 23:32:34 +01:00
|
|
|
"""Send a message back to the client."""
|
2021-03-15 16:14:39 +01:00
|
|
|
assert self.transport is not None
|
|
|
|
|
2020-10-02 10:57:12 +02:00
|
|
|
raw = self.encode(msg).encode("utf-8")
|
|
|
|
|
|
|
|
self.transport.write(raw)
|
|
|
|
self.transport.flush()
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def encode(self, obj: object) -> str:
|
2020-10-02 10:57:12 +02:00
|
|
|
"""Encode an object to its Redis format.
|
|
|
|
|
|
|
|
Supports: strings/bytes, integers and list/tuples.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(obj, bytes):
|
|
|
|
# We assume bytes are just unicode strings.
|
|
|
|
obj = obj.decode("utf-8")
|
|
|
|
|
2021-01-26 14:57:31 +01:00
|
|
|
if obj is None:
|
|
|
|
return "$-1\r\n"
|
2020-10-02 10:57:12 +02:00
|
|
|
if isinstance(obj, str):
|
2021-07-13 12:43:15 +02:00
|
|
|
return f"${len(obj)}\r\n{obj}\r\n"
|
2020-10-02 10:57:12 +02:00
|
|
|
if isinstance(obj, int):
|
2021-07-13 12:43:15 +02:00
|
|
|
return f":{obj}\r\n"
|
2020-10-02 10:57:12 +02:00
|
|
|
if isinstance(obj, (list, tuple)):
|
|
|
|
items = "".join(self.encode(a) for a in obj)
|
2021-07-13 12:43:15 +02:00
|
|
|
return f"*{len(obj)}\r\n{items}"
|
2020-10-02 10:57:12 +02:00
|
|
|
|
|
|
|
raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj)
|
|
|
|
|
2023-02-06 15:55:00 +01:00
|
|
|
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
2020-10-02 10:57:12 +02:00
|
|
|
self._server.remove_subscriber(self)
|