0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-06-02 10:48:56 +02:00

Merge remote-tracking branch 'upstream/release-v1.61'

This commit is contained in:
Tulir Asokan 2022-06-14 13:54:26 +03:00
commit 1543a3643e
235 changed files with 5079 additions and 14896 deletions

View file

@ -0,0 +1,25 @@
#!/bin/bash
#
# Fetches a version of complement which best matches the current build.
#
# The tarball is unpacked into `./complement`.
set -e
mkdir -p complement
# Pick an appropriate version of complement. Depending on whether this is a PR or release,
# etc. we need to use different fallbacks:
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done

View file

@ -306,7 +306,7 @@ jobs:
- run: .ci/scripts/test_synapse_port_db.sh
complement:
if: ${{ !failure() && !cancelled() }}
if: "${{ !failure() && !cancelled() }}"
needs: linting-done
runs-on: ubuntu-latest
@ -333,26 +333,7 @@ jobs:
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
shell: bash
run: |
mkdir -p complement
# Attempt to use the version of complement which best matches the current
# build. Depending on whether this is a PR or release, etc. we need to
# use different fallbacks.
#
# 1. First check if there's a similarly named branch (GITHUB_HEAD_REF
# for pull requests, otherwise GITHUB_REF).
# 2. Attempt to use the base branch, e.g. when merging into release-vX.Y
# (GITHUB_BASE_REF for pull requests).
# 3. Use the default complement branch ("HEAD").
for BRANCH_NAME in "$GITHUB_HEAD_REF" "$GITHUB_BASE_REF" "${GITHUB_REF#refs/heads/}" "HEAD"; do
# Skip empty branch names and merge commits.
if [[ -z "$BRANCH_NAME" || $BRANCH_NAME =~ ^refs/pull/.* ]]; then
continue
fi
(wget -O - "https://github.com/matrix-org/complement/archive/$BRANCH_NAME.tar.gz" | tar -xz --strip-components=1 -C complement) && break
done
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
@ -360,6 +341,45 @@ jobs:
shell: bash
name: Run Complement Tests
# We only run the workers tests on `develop` for now, because they're too slow to wait for on PRs.
# Sadly, you can't have an `if` condition on the value of a matrix, so this is a temporary, separate job for now.
# GitHub Actions doesn't support YAML anchors, so it's full-on duplication for now.
complement-developonly:
if: "${{ !failure() && !cancelled() && (github.ref == 'refs/heads/develop') }}"
needs: linting-done
runs-on: ubuntu-latest
steps:
# The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement.
# See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path
- name: "Set Go Version"
run: |
# Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2
echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH
# Add the Go path to the PATH: We need this so we can call gotestfmt
echo "~/go/bin" >> $GITHUB_PATH
- name: "Install Complement Dependencies"
run: |
sudo apt-get update && sudo apt-get install -y libolm3 libolm-dev
go get -v github.com/haveyoudebuggedit/gotestfmt/v2/cmd/gotestfmt@latest
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
with:
path: synapse
# Attempt to check out the same branch of Complement as the PR. If it
# doesn't exist, fallback to HEAD.
- name: Checkout complement
run: synapse/.ci/scripts/checkout_complement.sh
- run: |
set -o pipefail
WORKERS=1 COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
shell: bash
name: Run Complement Tests
# a job which marks all the other jobs as complete, thus allowing PRs to be merged.
tests-done:
if: ${{ always() }}

View file

@ -1,3 +1,103 @@
Synapse 1.61.0 (2022-06-14)
===========================
This release removes support for the non-standard feature known both as 'groups' and as 'communities', which have been superseded by *Spaces*.
See [the upgrade notes](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610)
for more details.
Improved Documentation
----------------------
- Mention removed community/group worker endpoints in [upgrade.md](https://github.com/matrix-org/synapse/blob/develop/docs/upgrade.md#upgrading-to-v1610s). Contributed by @olmari. ([\#13023](https://github.com/matrix-org/synapse/issues/13023))
Synapse 1.61.0rc1 (2022-06-07)
==============================
Features
--------
- Add new `media_retention` options to the homeserver config for routinely cleaning up non-recently accessed media. ([\#12732](https://github.com/matrix-org/synapse/issues/12732), [\#12972](https://github.com/matrix-org/synapse/issues/12972), [\#12977](https://github.com/matrix-org/synapse/issues/12977))
- Experimental support for [MSC3772](https://github.com/matrix-org/matrix-spec-proposals/pull/3772): Push rule for mutually related events. ([\#12740](https://github.com/matrix-org/synapse/issues/12740), [\#12859](https://github.com/matrix-org/synapse/issues/12859))
- Update to the `check_event_for_spam` module callback: Deprecate the current callback signature, replace it with a new signature that is both less ambiguous (replacing booleans with explicit allow/block) and more powerful (ability to return explicit error codes). ([\#12808](https://github.com/matrix-org/synapse/issues/12808))
- Add storage and module API methods to get monthly active users (and their corresponding appservices) within an optionally specified time range. ([\#12838](https://github.com/matrix-org/synapse/issues/12838), [\#12917](https://github.com/matrix-org/synapse/issues/12917))
- Support the new error code `ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED` from [MSC3823](https://github.com/matrix-org/matrix-spec-proposals/pull/3823). ([\#12845](https://github.com/matrix-org/synapse/issues/12845), [\#12923](https://github.com/matrix-org/synapse/issues/12923))
- Add a configurable background job to delete stale devices. ([\#12855](https://github.com/matrix-org/synapse/issues/12855))
- Improve URL previews for pages with empty elements. ([\#12951](https://github.com/matrix-org/synapse/issues/12951))
- Allow updating a user's password using the admin API without logging out their devices. Contributed by @jcgruenhage. ([\#12952](https://github.com/matrix-org/synapse/issues/12952))
Bugfixes
--------
- Always send an `access_token` in `/thirdparty/` requests to appservices, as required by the [Application Service API specification](https://spec.matrix.org/v1.1/application-service-api/#third-party-networks). ([\#12746](https://github.com/matrix-org/synapse/issues/12746))
- Implement [MSC3816](https://github.com/matrix-org/matrix-spec-proposals/pull/3816): sending the root event in a thread should count as having 'participated' in it. ([\#12766](https://github.com/matrix-org/synapse/issues/12766))
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12784](https://github.com/matrix-org/synapse/issues/12784))
- Fix a bug where we did not correctly handle invalid device list updates over federation. Contributed by Carl Bordum Hansen. ([\#12829](https://github.com/matrix-org/synapse/issues/12829))
- Fix a bug which allowed multiple async operations to access database locks concurrently. Contributed by @sumnerevans @ Beeper. ([\#12832](https://github.com/matrix-org/synapse/issues/12832))
- Fix an issue introduced in Synapse 0.34 where the `/notifications` endpoint would only return notifications if a user registered at least one pusher. Contributed by Famedly. ([\#12840](https://github.com/matrix-org/synapse/issues/12840))
- Fix a bug where servers using a Postgres database would fail to backfill from an insertion event when MSC2716 is enabled (`experimental_features.msc2716_enabled`). ([\#12843](https://github.com/matrix-org/synapse/issues/12843))
- Fix [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) rooms being omitted from room directory, room summary and space hierarchy responses. ([\#12858](https://github.com/matrix-org/synapse/issues/12858))
- Fix a bug introduced in Synapse 1.54.0 which could sometimes cause exceptions when handling federated traffic. ([\#12877](https://github.com/matrix-org/synapse/issues/12877))
- Fix a bug introduced in Synapse 1.59.0 which caused room deletion to fail with a foreign key violation error. ([\#12889](https://github.com/matrix-org/synapse/issues/12889))
- Fix a long-standing bug which caused the `/messages` endpoint to return an incorrect `end` attribute when there were no more events. Contributed by @Vetchu. ([\#12903](https://github.com/matrix-org/synapse/issues/12903))
- Fix a bug introduced in Synapse 1.58.0 where `/sync` would fail if the most recent event in a room was a redaction of an event that has since been purged. ([\#12905](https://github.com/matrix-org/synapse/issues/12905))
- Fix a potential memory leak when generating thumbnails. ([\#12932](https://github.com/matrix-org/synapse/issues/12932))
- Fix a long-standing bug where a URL preview would break if the image failed to download. ([\#12950](https://github.com/matrix-org/synapse/issues/12950))
Improved Documentation
----------------------
- Fix typographical errors in documentation. ([\#12863](https://github.com/matrix-org/synapse/issues/12863))
- Fix documentation incorrectly stating the `sendToDevice` endpoint can be directed at generic workers. Contributed by Nick @ Beeper. ([\#12867](https://github.com/matrix-org/synapse/issues/12867))
Deprecations and Removals
-------------------------
- Remove support for the non-standard groups/communities feature from Synapse. ([\#12553](https://github.com/matrix-org/synapse/issues/12553), [\#12558](https://github.com/matrix-org/synapse/issues/12558), [\#12563](https://github.com/matrix-org/synapse/issues/12563), [\#12895](https://github.com/matrix-org/synapse/issues/12895), [\#12897](https://github.com/matrix-org/synapse/issues/12897), [\#12899](https://github.com/matrix-org/synapse/issues/12899), [\#12900](https://github.com/matrix-org/synapse/issues/12900), [\#12936](https://github.com/matrix-org/synapse/issues/12936), [\#12966](https://github.com/matrix-org/synapse/issues/12966))
- Remove contributed `kick_users.py` script. This is broken under Python 3, and is not added to the environment when `pip install`ing Synapse. ([\#12908](https://github.com/matrix-org/synapse/issues/12908))
- Remove `contrib/jitsimeetbridge`. This was an unused experiment that hasn't been meaningfully changed since 2014. ([\#12909](https://github.com/matrix-org/synapse/issues/12909))
- Remove unused `contrib/experiements/cursesio.py` script, which fails to run under Python 3. ([\#12910](https://github.com/matrix-org/synapse/issues/12910))
- Remove unused `contrib/experiements/test_messaging.py` script. This fails to run on Python 3. ([\#12911](https://github.com/matrix-org/synapse/issues/12911))
Internal Changes
----------------
- Test Synapse against Complement with workers. ([\#12810](https://github.com/matrix-org/synapse/issues/12810), [\#12933](https://github.com/matrix-org/synapse/issues/12933))
- Reduce the amount of state we pull from the DB. ([\#12811](https://github.com/matrix-org/synapse/issues/12811), [\#12964](https://github.com/matrix-org/synapse/issues/12964))
- Try other homeservers when re-syncing state for rooms with partial state. ([\#12812](https://github.com/matrix-org/synapse/issues/12812))
- Resume state re-syncing for rooms with partial state after a Synapse restart. ([\#12813](https://github.com/matrix-org/synapse/issues/12813))
- Remove Mutual Rooms' ([MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666)) endpoint dependency on the User Directory. ([\#12836](https://github.com/matrix-org/synapse/issues/12836))
- Experimental: expand `check_event_for_spam` with ability to return additional fields. This enables spam-checker implementations to experiment with mechanisms to give users more information about why they are blocked and whether any action is needed from them to be unblocked. ([\#12846](https://github.com/matrix-org/synapse/issues/12846))
- Remove `dont_notify` from the `.m.rule.room.server_acl` rule. ([\#12849](https://github.com/matrix-org/synapse/issues/12849))
- Remove the unstable `/hierarchy` endpoint from [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946). ([\#12851](https://github.com/matrix-org/synapse/issues/12851))
- Pull out less state when handling gaps in room DAG. ([\#12852](https://github.com/matrix-org/synapse/issues/12852), [\#12904](https://github.com/matrix-org/synapse/issues/12904))
- Clean-up the push rules datastore. ([\#12856](https://github.com/matrix-org/synapse/issues/12856))
- Correct a type annotation in the URL preview source code. ([\#12860](https://github.com/matrix-org/synapse/issues/12860))
- Update `pyjwt` dependency to [2.4.0](https://github.com/jpadilla/pyjwt/releases/tag/2.4.0). ([\#12865](https://github.com/matrix-org/synapse/issues/12865))
- Enable the `/account/whoami` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12866](https://github.com/matrix-org/synapse/issues/12866))
- Enable the `batch_send` endpoint on synapse worker processes. Contributed by Nick @ Beeper. ([\#12868](https://github.com/matrix-org/synapse/issues/12868))
- Don't generate empty AS transactions when the AS is flagged as down. Contributed by Nick @ Beeper. ([\#12869](https://github.com/matrix-org/synapse/issues/12869))
- Fix up the variable `state_store` naming. ([\#12871](https://github.com/matrix-org/synapse/issues/12871))
- Faster room joins: when querying the current state of the room, wait for state to be populated. ([\#12872](https://github.com/matrix-org/synapse/issues/12872))
- Avoid running queries which will never result in deletions. ([\#12879](https://github.com/matrix-org/synapse/issues/12879))
- Use constants for EDU types. ([\#12884](https://github.com/matrix-org/synapse/issues/12884))
- Reduce database load of `/sync` when presence is enabled. ([\#12885](https://github.com/matrix-org/synapse/issues/12885))
- Refactor `have_seen_events` to reduce memory consumed when processing federation traffic. ([\#12886](https://github.com/matrix-org/synapse/issues/12886))
- Refactor receipt linearization code. ([\#12888](https://github.com/matrix-org/synapse/issues/12888))
- Add type annotations to `synapse.logging.opentracing`. ([\#12894](https://github.com/matrix-org/synapse/issues/12894))
- Remove PyNaCl occurrences directly used in Synapse code. ([\#12902](https://github.com/matrix-org/synapse/issues/12902))
- Bump types-jsonschema from 4.4.1 to 4.4.6. ([\#12912](https://github.com/matrix-org/synapse/issues/12912))
- Rename storage classes. ([\#12913](https://github.com/matrix-org/synapse/issues/12913))
- Preparation for database schema simplifications: stop reading from `event_edges.room_id`. ([\#12914](https://github.com/matrix-org/synapse/issues/12914))
- Check if we are in a virtual environment before overriding the `PYTHONPATH` environment variable in the demo script. ([\#12916](https://github.com/matrix-org/synapse/issues/12916))
- Improve the logging when signature checks on events fail. ([\#12925](https://github.com/matrix-org/synapse/issues/12925))
Synapse 1.60.0 (2022-05-31)
===========================
@ -69,6 +169,7 @@ Bugfixes
- Fix a bug introduced in Synapse 1.30.0 where empty rooms could be automatically created if a monthly active users limit is set. ([\#12713](https://github.com/matrix-org/synapse/issues/12713))
- Fix push to dismiss notifications when read on another client. Contributed by @SpiritCroc @ Beeper. ([\#12721](https://github.com/matrix-org/synapse/issues/12721))
- Fix poor database performance when reading the cache invalidation stream for large servers with lots of workers. ([\#12747](https://github.com/matrix-org/synapse/issues/12747))
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
- Delete events from the `federation_inbound_events_staging` table when a room is purged through the admin API. ([\#12770](https://github.com/matrix-org/synapse/issues/12770))
- Give a meaningful error message when a client tries to create a room with an invalid alias localpart. ([\#12779](https://github.com/matrix-org/synapse/issues/12779))
- Fix a bug introduced in 1.43.0 where a file (`providers.json`) was never closed. Contributed by @arkamar. ([\#12794](https://github.com/matrix-org/synapse/issues/12794))
@ -124,7 +225,6 @@ Internal Changes
- Drop the logging level of status messages for the URL preview cache expiry job from INFO to DEBUG. ([\#12720](https://github.com/matrix-org/synapse/issues/12720))
- Downgrade some OIDC errors to warnings in the logs, to reduce the noise of Sentry reports. ([\#12723](https://github.com/matrix-org/synapse/issues/12723))
- Update configs used by Complement to allow more invites/3PID validations during tests. ([\#12731](https://github.com/matrix-org/synapse/issues/12731))
- Fix a long-standing bug where the user directory background process would fail to make forward progress if a user included a null codepoint in their display name or avatar. ([\#12762](https://github.com/matrix-org/synapse/issues/12762))
- Tweak the mypy plugin so that `@cached` can accept `on_invalidate=None`. ([\#12769](https://github.com/matrix-org/synapse/issues/12769))
- Move methods that call `add_push_rule` to the `PushRuleStore` class. ([\#12772](https://github.com/matrix-org/synapse/issues/12772))
- Make handling of federation Authorization header (more) compliant with RFC7230. ([\#12774](https://github.com/matrix-org/synapse/issues/12774))
@ -231,7 +331,7 @@ Deprecations and Removals
-------------------------
- Remove unstable identifiers from [MSC3069](https://github.com/matrix-org/matrix-doc/pull/3069). ([\#12596](https://github.com/matrix-org/synapse/issues/12596))
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
- Remove the unspecified `m.login.jwt` login type and the unstable `uk.half-shot.msc2778.login.application_service` from
[MSC2778](https://github.com/matrix-org/matrix-doc/pull/2778). ([\#12597](https://github.com/matrix-org/synapse/issues/12597))
- Synapse now requires at least Python 3.7.1 (up from 3.7.0), for compatibility with the latest Twisted trunk. ([\#12613](https://github.com/matrix-org/synapse/issues/12613))

View file

@ -16,6 +16,7 @@
""" Starts a synapse client console. """
import argparse
import binascii
import cmd
import getpass
import json
@ -26,9 +27,8 @@ import urllib
from http import TwistedHttpClient
from typing import Optional
import nacl.encoding
import nacl.signing
import urlparse
from signedjson.key import NACL_ED25519, decode_verify_key_bytes
from signedjson.sign import SignatureVerifyException, verify_signed_json
from twisted.internet import defer, reactor, threads
@ -41,7 +41,6 @@ TRUSTED_ID_SERVERS = ["localhost:8001"]
class SynapseCmd(cmd.Cmd):
"""Basic synapse command-line processor.
This processes commands from the user and calls the relevant HTTP methods.
@ -420,8 +419,8 @@ class SynapseCmd(cmd.Cmd):
pubKey = None
pubKeyObj = yield self.http_client.do_request("GET", url)
if "public_key" in pubKeyObj:
pubKey = nacl.signing.VerifyKey(
pubKeyObj["public_key"], encoder=nacl.encoding.HexEncoder
pubKey = decode_verify_key_bytes(
NACL_ED25519, binascii.unhexlify(pubKeyObj["public_key"])
)
else:
print("No public key found in pubkey response!")

View file

@ -1,165 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import curses
import curses.wrapper
from curses.ascii import isprint
from twisted.internet import reactor
class CursesStdIO:
def __init__(self, stdscr, callback=None):
self.statusText = "Synapse test app -"
self.searchText = ""
self.stdscr = stdscr
self.logLine = ""
self.callback = callback
self._setup()
def _setup(self):
self.stdscr.nodelay(1) # Make non blocking
self.rows, self.cols = self.stdscr.getmaxyx()
self.lines = []
curses.use_default_colors()
self.paintStatus(self.statusText)
self.stdscr.refresh()
def set_callback(self, callback):
self.callback = callback
def fileno(self):
"""We want to select on FD 0"""
return 0
def connectionLost(self, reason):
self.close()
def print_line(self, text):
"""add a line to the internal list of lines"""
self.lines.append(text)
self.redraw()
def print_log(self, text):
self.logLine = text
self.redraw()
def redraw(self):
"""method for redisplaying lines based on internal list of lines"""
self.stdscr.clear()
self.paintStatus(self.statusText)
i = 0
index = len(self.lines) - 1
while i < (self.rows - 3) and index >= 0:
self.stdscr.addstr(self.rows - 3 - i, 0, self.lines[index], curses.A_NORMAL)
i = i + 1
index = index - 1
self.printLogLine(self.logLine)
self.stdscr.refresh()
def paintStatus(self, text):
if len(text) > self.cols:
raise RuntimeError("TextTooLongError")
self.stdscr.addstr(
self.rows - 2, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def printLogLine(self, text):
self.stdscr.addstr(
0, 0, text + " " * (self.cols - len(text)), curses.A_STANDOUT
)
def doRead(self):
"""Input is ready!"""
curses.noecho()
c = self.stdscr.getch() # read a character
if c == curses.KEY_BACKSPACE:
self.searchText = self.searchText[:-1]
elif c == curses.KEY_ENTER or c == 10:
text = self.searchText
self.searchText = ""
self.print_line(">> %s" % text)
try:
if self.callback:
self.callback.on_line(text)
except Exception as e:
self.print_line(str(e))
self.stdscr.refresh()
elif isprint(c):
if len(self.searchText) == self.cols - 2:
return
self.searchText = self.searchText + chr(c)
self.stdscr.addstr(
self.rows - 1,
0,
self.searchText + (" " * (self.cols - len(self.searchText) - 2)),
)
self.paintStatus(self.statusText + " %d" % len(self.searchText))
self.stdscr.move(self.rows - 1, len(self.searchText))
self.stdscr.refresh()
def logPrefix(self):
return "CursesStdIO"
def close(self):
"""clean up"""
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
class Callback:
def __init__(self, stdio):
self.stdio = stdio
def on_line(self, text):
self.stdio.print_line(text)
def main(stdscr):
screen = CursesStdIO(stdscr) # create Screen object
callback = Callback(screen)
screen.set_callback(callback)
stdscr.refresh()
reactor.addReader(screen)
reactor.run()
screen.close()
if __name__ == "__main__":
curses.wrapper(main)

View file

@ -1,367 +0,0 @@
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is an example of using the server to server implementation to do a
basic chat style thing. It accepts commands from stdin and outputs to stdout.
It assumes that ucids are of the form <user>@<domain>, and uses <domain> as
the address of the remote home server to hit.
Usage:
python test_messaging.py <port>
Currently assumes the local address is localhost:<port>
"""
import argparse
import curses.wrapper
import json
import logging
import os
import re
import cursesio
from twisted.internet import defer, reactor
from twisted.python import log
from synapse.app.homeserver import SynapseHomeServer
from synapse.federation import ReplicationHandler
from synapse.federation.units import Pdu
from synapse.util import origin_from_ucid
# from synapse.logging.utils import log_function
logger = logging.getLogger("example")
def excpetion_errback(failure):
logging.exception(failure)
class InputOutput:
"""This is responsible for basic I/O so that a user can interact with
the example app.
"""
def __init__(self, screen, user):
self.screen = screen
self.user = user
def set_home_server(self, server):
self.server = server
def on_line(self, line):
"""This is where we process commands."""
try:
m = re.match(r"^join (\S+)$", line)
if m:
# The `sender` wants to join a room.
(room_name,) = m.groups()
self.print_line("%s joining %s" % (self.user, room_name))
self.server.join_room(room_name, self.user, self.user)
# self.print_line("OK.")
return
m = re.match(r"^invite (\S+) (\S+)$", line)
if m:
# `sender` wants to invite someone to a room
room_name, invitee = m.groups()
self.print_line("%s invited to %s" % (invitee, room_name))
self.server.invite_to_room(room_name, self.user, invitee)
# self.print_line("OK.")
return
m = re.match(r"^send (\S+) (.*)$", line)
if m:
# `sender` wants to message a room
room_name, body = m.groups()
self.print_line("%s send to %s" % (self.user, room_name))
self.server.send_message(room_name, self.user, body)
# self.print_line("OK.")
return
m = re.match(r"^backfill (\S+)$", line)
if m:
# we want to backfill a room
(room_name,) = m.groups()
self.print_line("backfill %s" % room_name)
self.server.backfill(room_name)
return
self.print_line("Unrecognized command")
except Exception as e:
logger.exception(e)
def print_line(self, text):
self.screen.print_line(text)
def print_log(self, text):
self.screen.print_log(text)
class IOLoggerHandler(logging.Handler):
def __init__(self, io):
logging.Handler.__init__(self)
self.io = io
def emit(self, record):
if record.levelno < logging.WARN:
return
msg = self.format(record)
self.io.print_log(msg)
class Room:
"""Used to store (in memory) the current membership state of a room, and
which home servers we should send PDUs associated with the room to.
"""
def __init__(self, room_name):
self.room_name = room_name
self.invited = set()
self.participants = set()
self.servers = set()
self.oldest_server = None
self.have_got_metadata = False
def add_participant(self, participant):
"""Someone has joined the room"""
self.participants.add(participant)
self.invited.discard(participant)
server = origin_from_ucid(participant)
self.servers.add(server)
if not self.oldest_server:
self.oldest_server = server
def add_invited(self, invitee):
"""Someone has been invited to the room"""
self.invited.add(invitee)
self.servers.add(origin_from_ucid(invitee))
class HomeServer(ReplicationHandler):
"""A very basic home server implentation that allows people to join a
room and then invite other people.
"""
def __init__(self, server_name, replication_layer, output):
self.server_name = server_name
self.replication_layer = replication_layer
self.replication_layer.set_handler(self)
self.joined_rooms = {}
self.output = output
def on_receive_pdu(self, pdu):
"""We just received a PDU"""
pdu_type = pdu.pdu_type
if pdu_type == "sy.room.message":
self._on_message(pdu)
elif pdu_type == "sy.room.member" and "membership" in pdu.content:
if pdu.content["membership"] == "join":
self._on_join(pdu.context, pdu.state_key)
elif pdu.content["membership"] == "invite":
self._on_invite(pdu.origin, pdu.context, pdu.state_key)
else:
self.output.print_line(
"#%s (unrec) %s = %s"
% (pdu.context, pdu.pdu_type, json.dumps(pdu.content))
)
def _on_message(self, pdu):
"""We received a message"""
self.output.print_line(
"#%s %s %s" % (pdu.context, pdu.content["sender"], pdu.content["body"])
)
def _on_join(self, context, joinee):
"""Someone has joined a room, either a remote user or a local user"""
room = self._get_or_create_room(context)
room.add_participant(joinee)
self.output.print_line("#%s %s %s" % (context, joinee, "*** JOINED"))
def _on_invite(self, origin, context, invitee):
"""Someone has been invited"""
room = self._get_or_create_room(context)
room.add_invited(invitee)
self.output.print_line("#%s %s %s" % (context, invitee, "*** INVITED"))
if not room.have_got_metadata and origin is not self.server_name:
logger.debug("Get room state")
self.replication_layer.get_state_for_context(origin, context)
room.have_got_metadata = True
@defer.inlineCallbacks
def send_message(self, room_name, sender, body):
"""Send a message to a room!"""
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
pdu_type="sy.room.message",
content={"sender": sender, "body": body},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def join_room(self, room_name, sender, joinee):
"""Join a room!"""
self._on_join(room_name, joinee)
destinations = yield self.get_servers_for_context(room_name)
try:
pdu = Pdu.create_new(
context=room_name,
pdu_type="sy.room.member",
is_state=True,
state_key=joinee,
content={"membership": "join"},
origin=self.server_name,
destinations=destinations,
)
yield self.replication_layer.send_pdu(pdu)
except Exception as e:
logger.exception(e)
@defer.inlineCallbacks
def invite_to_room(self, room_name, sender, invitee):
"""Invite someone to a room!"""
self._on_invite(self.server_name, room_name, invitee)
destinations = yield self.get_servers_for_context(room_name)
try:
yield self.replication_layer.send_pdu(
Pdu.create_new(
context=room_name,
is_state=True,
pdu_type="sy.room.member",
state_key=invitee,
content={"membership": "invite"},
origin=self.server_name,
destinations=destinations,
)
)
except Exception as e:
logger.exception(e)
def backfill(self, room_name, limit=5):
room = self.joined_rooms.get(room_name)
if not room:
return
dest = room.oldest_server
return self.replication_layer.backfill(dest, room_name, limit)
def _get_room_remote_servers(self, room_name):
return list(self.joined_rooms.setdefault(room_name).servers)
def _get_or_create_room(self, room_name):
return self.joined_rooms.setdefault(room_name, Room(room_name))
def get_servers_for_context(self, context):
return defer.succeed(
self.joined_rooms.setdefault(context, Room(context)).servers
)
def main(stdscr):
parser = argparse.ArgumentParser()
parser.add_argument("user", type=str)
parser.add_argument("-v", "--verbose", action="count")
args = parser.parse_args()
user = args.user
server_name = origin_from_ucid(user)
# Set up logging
root_logger = logging.getLogger()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
)
if not os.path.exists("logs"):
os.makedirs("logs")
fh = logging.FileHandler("logs/%s" % user)
fh.setFormatter(formatter)
root_logger.addHandler(fh)
root_logger.setLevel(logging.DEBUG)
# Hack: The only way to get it to stop logging to sys.stderr :(
log.theLogPublisher.observers = []
observer = log.PythonLoggingObserver()
observer.start()
# Set up synapse server
curses_stdio = cursesio.CursesStdIO(stdscr)
input_output = InputOutput(curses_stdio, user)
curses_stdio.set_callback(input_output)
app_hs = SynapseHomeServer(server_name, db_name="dbs/%s" % user)
replication = app_hs.get_replication_layer()
hs = HomeServer(server_name, replication, curses_stdio)
input_output.set_home_server(hs)
# Add input_output logger
io_logger = IOLoggerHandler(input_output)
io_logger.setFormatter(formatter)
root_logger.addHandler(io_logger)
# Start!
try:
port = int(server_name.split(":")[1])
except Exception:
port = 12345
app_hs.get_http_server().start_listening(port)
reactor.addReader(curses_stdio)
reactor.run()
if __name__ == "__main__":
curses.wrapper(main)

View file

@ -1,298 +0,0 @@
#!/usr/bin/env python
"""
This is an attempt at bridging matrix clients into a Jitis meet room via Matrix
video call. It uses hard-coded xml strings overg XMPP BOSH. It can display one
of the streams from the Jitsi bridge until the second lot of SDP comes down and
we set the remote SDP at which point the stream ends. Our video never gets to
the bridge.
Requires:
npm install jquery jsdom
"""
import json
import subprocess
import time
import gevent
import grequests
from BeautifulSoup import BeautifulSoup
ACCESS_TOKEN = ""
MATRIXBASE = "https://matrix.org/_matrix/client/api/v1/"
MYUSERNAME = "@davetest:matrix.org"
HTTPBIND = "https://meet.jit.si/http-bind"
# HTTPBIND = 'https://jitsi.vuc.me/http-bind'
# ROOMNAME = "matrix"
ROOMNAME = "pibble"
HOST = "guest.jit.si"
# HOST="jitsi.vuc.me"
TURNSERVER = "turn.guest.jit.si"
# TURNSERVER="turn.jitsi.vuc.me"
ROOMDOMAIN = "meet.jit.si"
# ROOMDOMAIN="conference.jitsi.vuc.me"
class TrivialMatrixClient:
def __init__(self, access_token):
self.token = None
self.access_token = access_token
def getEvent(self):
while True:
url = (
MATRIXBASE
+ "events?access_token="
+ self.access_token
+ "&timeout=60000"
)
if self.token:
url += "&from=" + self.token
req = grequests.get(url)
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("incoming from matrix", obj)
if "end" not in obj:
continue
self.token = obj["end"]
if len(obj["chunk"]):
return obj["chunk"][0]
def joinRoom(self, roomId):
url = MATRIXBASE + "rooms/" + roomId + "/join?access_token=" + self.access_token
print(url)
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data="{}")
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
def sendEvent(self, roomId, evType, event):
url = (
MATRIXBASE
+ "rooms/"
+ roomId
+ "/send/"
+ evType
+ "?access_token="
+ self.access_token
)
print(url)
print(json.dumps(event))
headers = {"Content-Type": "application/json"}
req = grequests.post(url, headers=headers, data=json.dumps(event))
resps = grequests.map([req])
obj = json.loads(resps[0].content)
print("response: ", obj)
xmppClients = {}
def matrixLoop():
while True:
ev = matrixCli.getEvent()
print(ev)
if ev["type"] == "m.room.member":
print("membership event")
if ev["membership"] == "invite" and ev["state_key"] == MYUSERNAME:
roomId = ev["room_id"]
print("joining room %s" % (roomId))
matrixCli.joinRoom(roomId)
elif ev["type"] == "m.room.message":
if ev["room_id"] in xmppClients:
print("already have a bridge for that user, ignoring")
continue
print("got message, connecting")
xmppClients[ev["room_id"]] = TrivialXmppClient(ev["room_id"], ev["user_id"])
gevent.spawn(xmppClients[ev["room_id"]].xmppLoop)
elif ev["type"] == "m.call.invite":
print("Incoming call")
# sdp = ev['content']['offer']['sdp']
# print "sdp: %s" % (sdp)
# xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id'])
# gevent.spawn(xmppClients[ev['room_id']].xmppLoop)
elif ev["type"] == "m.call.answer":
print("Call answered")
sdp = ev["content"]["answer"]["sdp"]
if ev["room_id"] not in xmppClients:
print("We didn't have a call for that room")
continue
# should probably check call ID too
xmppCli = xmppClients[ev["room_id"]]
xmppCli.sendAnswer(sdp)
elif ev["type"] == "m.call.hangup":
if ev["room_id"] in xmppClients:
xmppClients[ev["room_id"]].stop()
del xmppClients[ev["room_id"]]
class TrivialXmppClient:
def __init__(self, matrixRoom, userId):
self.rid = 0
self.matrixRoom = matrixRoom
self.userId = userId
self.running = True
def stop(self):
self.running = False
def nextRid(self):
self.rid += 1
return "%d" % (self.rid)
def sendIq(self, xml):
fullXml = (
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'>%s</body>"
% (self.nextRid(), self.sid, xml)
)
# print "\t>>>%s" % (fullXml)
return self.xmppPoke(fullXml)
def xmppPoke(self, xml):
headers = {"Content-Type": "application/xml"}
req = grequests.post(HTTPBIND, verify=False, headers=headers, data=xml)
resps = grequests.map([req])
obj = BeautifulSoup(resps[0].content)
return obj
def sendAnswer(self, answer):
print("sdp from matrix client", answer)
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--sdp"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
jingle, out_err = p.communicate(answer)
jingle = jingle % {
"tojid": self.callfrom,
"action": "session-accept",
"initiator": self.callfrom,
"responder": self.jid,
"sid": self.callsid,
}
print("answer jingle from sdp", jingle)
res = self.sendIq(jingle)
print("reply from answer: ", res)
self.ssrcs = {}
jingleSoup = BeautifulSoup(jingle)
for cont in jingleSoup.iq.jingle.findAll("content"):
if cont.description:
self.ssrcs[cont["name"]] = cont.description["ssrc"]
print("my ssrcs:", self.ssrcs)
gevent.joinall([gevent.spawn(self.advertiseSsrcs)])
def advertiseSsrcs(self):
time.sleep(7)
print("SSRC spammer started")
while self.running:
ssrcMsg = (
"<presence to='%(tojid)s' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%(nick)s</nick><stats xmlns='http://jitsi.org/jitmeet/stats'><stat name='bitrate_download' value='175'/><stat name='bitrate_upload' value='176'/><stat name='packetLoss_total' value='0'/><stat name='packetLoss_download' value='0'/><stat name='packetLoss_upload' value='0'/></stats><media xmlns='http://estos.de/ns/mjs'><source type='audio' ssrc='%(assrc)s' direction='sendre'/><source type='video' ssrc='%(vssrc)s' direction='sendre'/></media></presence>"
% {
"tojid": "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid),
"nick": self.userId,
"assrc": self.ssrcs["audio"],
"vssrc": self.ssrcs["video"],
}
)
res = self.sendIq(ssrcMsg)
print("reply from ssrc announce: ", res)
time.sleep(10)
def xmppLoop(self):
self.matrixCallId = time.time()
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='%s' xml:lang='en' wait='60' hold='1' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), HOST)
)
print(res)
self.sid = res.body["sid"]
print("sid %s" % (self.sid))
res = self.sendIq(
"<auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='ANONYMOUS'/>"
)
res = self.xmppPoke(
"<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s' to='%s' xml:lang='en' xmpp:restart='true' xmlns:xmpp='urn:xmpp:xbosh'/>"
% (self.nextRid(), self.sid, HOST)
)
res = self.sendIq(
"<iq type='set' id='_bind_auth_2' xmlns='jabber:client'><bind xmlns='urn:ietf:params:xml:ns:xmpp-bind'/></iq>"
)
print(res)
self.jid = res.body.iq.bind.jid.string
print("jid: %s" % (self.jid))
self.shortJid = self.jid.split("-")[0]
res = self.sendIq(
"<iq type='set' id='_session_auth_2' xmlns='jabber:client'><session xmlns='urn:ietf:params:xml:ns:xmpp-session'/></iq>"
)
# randomthing = res.body.iq['to']
# whatsitpart = randomthing.split('-')[0]
# print "other random bind thing: %s" % (randomthing)
# advertise preence to the jitsi room, with our nick
res = self.sendIq(
"<iq type='get' to='%s' xmlns='jabber:client' id='1:sendIQ'><services xmlns='urn:xmpp:extdisco:1'><service host='%s'/></services></iq><presence to='%s@%s/d98f6c40' xmlns='jabber:client'><x xmlns='http://jabber.org/protocol/muc'/><c xmlns='http://jabber.org/protocol/caps' hash='sha-1' node='http://jitsi.org/jitsimeet' ver='0WkSdhFnAUxrz4ImQQLdB80GFlE='/><nick xmlns='http://jabber.org/protocol/nick'>%s</nick></presence>"
% (HOST, TURNSERVER, ROOMNAME, ROOMDOMAIN, self.userId)
)
self.muc = {"users": []}
for p in res.body.findAll("presence"):
u = {}
u["shortJid"] = p["from"].split("/")[1]
if p.c and p.c.nick:
u["nick"] = p.c.nick.string
self.muc["users"].append(u)
print("muc: ", self.muc)
# wait for stuff
while True:
print("waiting...")
res = self.sendIq("")
print("got from stream: ", res)
if res.body.iq:
jingles = res.body.iq.findAll("jingle")
if len(jingles):
self.callfrom = res.body.iq["from"]
self.handleInvite(jingles[0])
elif "type" in res.body and res.body["type"] == "terminate":
self.running = False
del xmppClients[self.matrixRoom]
return
def handleInvite(self, jingle):
self.initiator = jingle["initiator"]
self.callsid = jingle["sid"]
p = subprocess.Popen(
["node", "unjingle/unjingle.js", "--jingle"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
print("raw jingle invite", str(jingle))
sdp, out_err = p.communicate(str(jingle))
print("transformed remote offer sdp", sdp)
inviteEvent = {
"offer": {"type": "offer", "sdp": sdp},
"call_id": self.matrixCallId,
"version": 0,
"lifetime": 30000,
}
matrixCli.sendEvent(self.matrixRoom, "m.call.invite", inviteEvent)
matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name
gevent.joinall([gevent.spawn(matrixLoop)])

View file

@ -1,188 +0,0 @@
diff --git a/syweb/webclient/app/components/matrix/matrix-call.js b/syweb/webclient/app/components/matrix/matrix-call.js
index 9fbfff0..dc68077 100644
--- a/syweb/webclient/app/components/matrix/matrix-call.js
+++ b/syweb/webclient/app/components/matrix/matrix-call.js
@@ -16,6 +16,45 @@ limitations under the License.
'use strict';
+
+function sendKeyframe(pc) {
+ console.log('sendkeyframe', pc.iceConnectionState);
+ if (pc.iceConnectionState !== 'connected') return; // safe...
+ pc.setRemoteDescription(
+ pc.remoteDescription,
+ function () {
+ pc.createAnswer(
+ function (modifiedAnswer) {
+ pc.setLocalDescription(
+ modifiedAnswer,
+ function () {
+ // noop
+ },
+ function (error) {
+ console.log('triggerKeyframe setLocalDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe createAnswer failed', error);
+ messageHandler.showError();
+ }
+ );
+ },
+ function (error) {
+ console.log('triggerKeyframe setRemoteDescription failed', error);
+ messageHandler.showError();
+ }
+ );
+}
+
+
+
+
+
+
+
var forAllVideoTracksOnStream = function(s, f) {
var tracks = s.getVideoTracks();
for (var i = 0; i < tracks.length; i++) {
@@ -83,7 +122,7 @@ angular.module('MatrixCall', [])
}
// FIXME: we should prevent any calls from being placed or accepted before this has finished
- MatrixCall.getTurnServer();
+ //MatrixCall.getTurnServer();
MatrixCall.CALL_TIMEOUT = 60000;
MatrixCall.FALLBACK_STUN_SERVER = 'stun:stun.l.google.com:19302';
@@ -132,6 +171,22 @@ angular.module('MatrixCall', [])
pc.onsignalingstatechange = function() { self.onSignallingStateChanged(); };
pc.onicecandidate = function(c) { self.gotLocalIceCandidate(c); };
pc.onaddstream = function(s) { self.onAddStream(s); };
+
+ var datachan = pc.createDataChannel('RTCDataChannel', {
+ reliable: false
+ });
+ console.log("data chan: "+datachan);
+ datachan.onopen = function() {
+ console.log("data channel open");
+ };
+ datachan.onmessage = function() {
+ console.log("data channel message");
+ };
+ pc.ondatachannel = function(event) {
+ console.log("have data channel");
+ event.channel.binaryType = 'blob';
+ };
+
return pc;
}
@@ -200,6 +255,12 @@ angular.module('MatrixCall', [])
}, this.msg.lifetime - event.age);
};
+ MatrixCall.prototype.receivedInvite = function(event) {
+ console.log("Got second invite for call "+this.call_id);
+ this.peerConn.setRemoteDescription(new RTCSessionDescription(this.msg.offer), this.onSetRemoteDescriptionSuccess, this.onSetRemoteDescriptionError);
+ };
+
+
// perverse as it may seem, sometimes we want to instantiate a call with a hangup message
// (because when getting the state of the room on load, events come in reverse order and
// we want to remember that a call has been hung up)
@@ -349,7 +410,7 @@ angular.module('MatrixCall', [])
'mandatory': {
'OfferToReceiveAudio': true,
'OfferToReceiveVideo': this.type == 'video'
- },
+ }
};
this.peerConn.createAnswer(function(d) { self.createdAnswer(d); }, function(e) {}, constraints);
// This can't be in an apply() because it's called by a predecessor call under glare conditions :(
@@ -359,8 +420,20 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.gotLocalIceCandidate = function(event) {
if (event.candidate) {
console.log("Got local ICE "+event.candidate.sdpMid+" candidate: "+event.candidate.candidate);
- this.sendCandidate(event.candidate);
- }
+ //this.sendCandidate(event.candidate);
+ } else {
+ console.log("have all candidates, sending answer");
+ var content = {
+ version: 0,
+ call_id: this.call_id,
+ answer: this.peerConn.localDescription
+ };
+ this.sendEventWithRetry('m.call.answer', content);
+ var self = this;
+ $rootScope.$apply(function() {
+ self.state = 'connecting';
+ });
+ }
}
MatrixCall.prototype.gotRemoteIceCandidate = function(cand) {
@@ -418,15 +491,6 @@ angular.module('MatrixCall', [])
console.log("Created answer: "+description);
var self = this;
this.peerConn.setLocalDescription(description, function() {
- var content = {
- version: 0,
- call_id: self.call_id,
- answer: self.peerConn.localDescription
- };
- self.sendEventWithRetry('m.call.answer', content);
- $rootScope.$apply(function() {
- self.state = 'connecting';
- });
}, function() { console.log("Error setting local description!"); } );
};
@@ -448,6 +512,9 @@ angular.module('MatrixCall', [])
$rootScope.$apply(function() {
self.state = 'connected';
self.didConnect = true;
+ /*$timeout(function() {
+ sendKeyframe(self.peerConn);
+ }, 1000);*/
});
} else if (this.peerConn.iceConnectionState == 'failed') {
this.hangup('ice_failed');
@@ -518,6 +585,7 @@ angular.module('MatrixCall', [])
MatrixCall.prototype.onRemoteStreamEnded = function(event) {
console.log("Remote stream ended");
+ return;
var self = this;
$rootScope.$apply(function() {
self.state = 'ended';
diff --git a/syweb/webclient/app/components/matrix/matrix-phone-service.js b/syweb/webclient/app/components/matrix/matrix-phone-service.js
index 55dbbf5..272fa27 100644
--- a/syweb/webclient/app/components/matrix/matrix-phone-service.js
+++ b/syweb/webclient/app/components/matrix/matrix-phone-service.js
@@ -48,6 +48,13 @@ angular.module('matrixPhoneService', [])
return;
}
+ // do we already have an entry for this call ID?
+ var existingEntry = matrixPhoneService.allCalls[msg.call_id];
+ if (existingEntry) {
+ existingEntry.receivedInvite(msg);
+ return;
+ }
+
var call = undefined;
if (!isLive) {
// if this event wasn't live then this call may already be over
@@ -108,7 +115,7 @@ angular.module('matrixPhoneService', [])
call.hangup();
}
} else {
- $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
+ $rootScope.$broadcast(matrixPhoneService.INCOMING_CALL_EVENT, call);
}
} else if (event.type == 'm.call.answer') {
var call = matrixPhoneService.allCalls[msg.call_id];

View file

@ -1,712 +0,0 @@
/* jshint -W117 */
// SDP STUFF
function SDP(sdp) {
this.media = sdp.split('\r\nm=');
for (var i = 1; i < this.media.length; i++) {
this.media[i] = 'm=' + this.media[i];
if (i != this.media.length - 1) {
this.media[i] += '\r\n';
}
}
this.session = this.media.shift() + '\r\n';
this.raw = this.session + this.media.join('');
}
exports.SDP = SDP;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var SDPUtil = require('./strophe.jingle.sdp.util.js').SDPUtil;
/**
* Returns map of MediaChannel mapped per channel idx.
*/
SDP.prototype.getMediaSsrcMap = function() {
var self = this;
var media_ssrcs = {};
for (channelNum = 0; channelNum < self.media.length; channelNum++) {
modified = true;
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc:');
var type = SDPUtil.parse_mid(SDPUtil.find_line(self.media[channelNum], 'a=mid:'));
var channel = new MediaChannel(channelNum, type);
media_ssrcs[channelNum] = channel;
tmp.forEach(function (line) {
var linessrc = line.substring(7).split(' ')[0];
// allocate new ChannelSsrc
if(!channel.ssrcs[linessrc]) {
channel.ssrcs[linessrc] = new ChannelSsrc(linessrc, type);
}
channel.ssrcs[linessrc].lines.push(line);
});
tmp = SDPUtil.find_lines(self.media[channelNum], 'a=ssrc-group:');
tmp.forEach(function(line){
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
var ssrcGroup = new ChannelSsrcGroup(semantics, ssrcs);
channel.ssrcGroups.push(ssrcGroup);
}
});
}
return media_ssrcs;
};
/**
* Returns <tt>true</tt> if this SDP contains given SSRC.
* @param ssrc the ssrc to check.
* @returns {boolean} <tt>true</tt> if this SDP contains given SSRC.
*/
SDP.prototype.containsSSRC = function(ssrc) {
var channels = this.getMediaSsrcMap();
var contains = false;
Object.keys(channels).forEach(function(chNumber){
var channel = channels[chNumber];
//console.log("Check", channel, ssrc);
if(Object.keys(channel.ssrcs).indexOf(ssrc) != -1){
contains = true;
}
});
return contains;
};
/**
* Returns map of MediaChannel that contains only media not contained in <tt>otherSdp</tt>. Mapped by channel idx.
* @param otherSdp the other SDP to check ssrc with.
*/
SDP.prototype.getNewMedia = function(otherSdp) {
// this could be useful in Array.prototype.
function arrayEquals(array) {
// if the other array is a falsy value, return
if (!array)
return false;
// compare lengths - can save a lot of time
if (this.length != array.length)
return false;
for (var i = 0, l=this.length; i < l; i++) {
// Check if we have nested arrays
if (this[i] instanceof Array && array[i] instanceof Array) {
// recurse into the nested arrays
if (!this[i].equals(array[i]))
return false;
}
else if (this[i] != array[i]) {
// Warning - two different object instances will never be equal: {x:20} != {x:20}
return false;
}
}
return true;
}
var myMedia = this.getMediaSsrcMap();
var othersMedia = otherSdp.getMediaSsrcMap();
var newMedia = {};
Object.keys(othersMedia).forEach(function(channelNum) {
var myChannel = myMedia[channelNum];
var othersChannel = othersMedia[channelNum];
if(!myChannel && othersChannel) {
// Add whole channel
newMedia[channelNum] = othersChannel;
return;
}
// Look for new ssrcs accross the channel
Object.keys(othersChannel.ssrcs).forEach(function(ssrc) {
if(Object.keys(myChannel.ssrcs).indexOf(ssrc) === -1) {
// Allocate channel if we've found ssrc that doesn't exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcs[ssrc] = othersChannel.ssrcs[ssrc];
}
});
// Look for new ssrc groups across the channels
othersChannel.ssrcGroups.forEach(function(otherSsrcGroup){
// try to match the other ssrc-group with an ssrc-group of ours
var matched = false;
for (var i = 0; i < myChannel.ssrcGroups.length; i++) {
var mySsrcGroup = myChannel.ssrcGroups[i];
if (otherSsrcGroup.semantics == mySsrcGroup.semantics
&& arrayEquals.apply(otherSsrcGroup.ssrcs, [mySsrcGroup.ssrcs])) {
matched = true;
break;
}
}
if (!matched) {
// Allocate channel if we've found an ssrc-group that doesn't
// exist in our channel
if(!newMedia[channelNum]){
newMedia[channelNum] = new MediaChannel(othersChannel.chNumber, othersChannel.mediaType);
}
newMedia[channelNum].ssrcGroups.push(otherSsrcGroup);
}
});
});
return newMedia;
};
// remove iSAC and CN from SDP
SDP.prototype.mangle = function () {
var i, j, mline, lines, rtpmap, newdesc;
for (i = 0; i < this.media.length; i++) {
lines = this.media[i].split('\r\n');
lines.pop(); // remove empty last element
mline = SDPUtil.parse_mline(lines.shift());
if (mline.media != 'audio')
continue;
newdesc = '';
mline.fmt.length = 0;
for (j = 0; j < lines.length; j++) {
if (lines[j].substr(0, 9) == 'a=rtpmap:') {
rtpmap = SDPUtil.parse_rtpmap(lines[j]);
if (rtpmap.name == 'CN' || rtpmap.name == 'ISAC')
continue;
mline.fmt.push(rtpmap.id);
newdesc += lines[j] + '\r\n';
} else {
newdesc += lines[j] + '\r\n';
}
}
this.media[i] = SDPUtil.build_mline(mline) + '\r\n';
this.media[i] += newdesc;
}
this.raw = this.session + this.media.join('');
};
// remove lines matching prefix from session section
SDP.prototype.removeSessionLines = function(prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.session, prefix);
lines.forEach(function(line) {
self.session = self.session.replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// remove lines matching prefix from a media section specified by mediaindex
// TODO: non-numeric mediaindex could match mid
SDP.prototype.removeMediaLines = function(mediaindex, prefix) {
var self = this;
var lines = SDPUtil.find_lines(this.media[mediaindex], prefix);
lines.forEach(function(line) {
self.media[mediaindex] = self.media[mediaindex].replace(line + '\r\n', '');
});
this.raw = this.session + this.media.join('');
return lines;
}
// add content's to a jingle element
SDP.prototype.toJingle = function (elem, thecreator) {
var i, j, k, mline, ssrc, rtpmap, tmp, line, lines;
var self = this;
// new bundle plan
if (SDPUtil.find_line(this.session, 'a=group:')) {
lines = SDPUtil.find_lines(this.session, 'a=group:');
for (i = 0; i < lines.length; i++) {
tmp = lines[i].split(' ');
var semantics = tmp.shift().substr(8);
elem.c('group', {xmlns: 'urn:xmpp:jingle:apps:grouping:0', semantics:semantics});
for (j = 0; j < tmp.length; j++) {
elem.c('content', {name: tmp[j]}).up();
}
elem.up();
}
}
// old bundle plan, to be removed
var bundle = [];
if (SDPUtil.find_line(this.session, 'a=group:BUNDLE')) {
bundle = SDPUtil.find_line(this.session, 'a=group:BUNDLE ').split(' ');
bundle.shift();
}
for (i = 0; i < this.media.length; i++) {
mline = SDPUtil.parse_mline(this.media[i].split('\r\n')[0]);
if (!(mline.media === 'audio' ||
mline.media === 'video' ||
mline.media === 'application'))
{
continue;
}
if (SDPUtil.find_line(this.media[i], 'a=ssrc:')) {
ssrc = SDPUtil.find_line(this.media[i], 'a=ssrc:').substring(7).split(' ')[0]; // take the first
} else {
ssrc = false;
}
elem.c('content', {creator: thecreator, name: mline.media});
if (SDPUtil.find_line(this.media[i], 'a=mid:')) {
// prefer identifier from a=mid if present
var mid = SDPUtil.parse_mid(SDPUtil.find_line(this.media[i], 'a=mid:'));
elem.attrs({ name: mid });
// old BUNDLE plan, to be removed
if (bundle.indexOf(mid) !== -1) {
elem.c('bundle', {xmlns: 'http://estos.de/ns/bundle'}).up();
bundle.splice(bundle.indexOf(mid), 1);
}
}
if (SDPUtil.find_line(this.media[i], 'a=rtpmap:').length)
{
elem.c('description',
{xmlns: 'urn:xmpp:jingle:apps:rtp:1',
media: mline.media });
if (ssrc) {
elem.attrs({ssrc: ssrc});
}
for (j = 0; j < mline.fmt.length; j++) {
rtpmap = SDPUtil.find_line(this.media[i], 'a=rtpmap:' + mline.fmt[j]);
elem.c('payload-type', SDPUtil.parse_rtpmap(rtpmap));
// put any 'a=fmtp:' + mline.fmt[j] lines into <param name=foo value=bar/>
if (SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j])) {
tmp = SDPUtil.parse_fmtp(SDPUtil.find_line(this.media[i], 'a=fmtp:' + mline.fmt[j]));
for (k = 0; k < tmp.length; k++) {
elem.c('parameter', tmp[k]).up();
}
}
this.RtcpFbToJingle(i, elem, mline.fmt[j]); // XEP-0293 -- map a=rtcp-fb
elem.up();
}
if (SDPUtil.find_line(this.media[i], 'a=crypto:', this.session)) {
elem.c('encryption', {required: 1});
var crypto = SDPUtil.find_lines(this.media[i], 'a=crypto:', this.session);
crypto.forEach(function(line) {
elem.c('crypto', SDPUtil.parse_crypto(line)).up();
});
elem.up(); // end of encryption
}
if (ssrc) {
// new style mapping
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
// FIXME: group by ssrc and support multiple different ssrcs
var ssrclines = SDPUtil.find_lines(this.media[i], 'a=ssrc:');
ssrclines.forEach(function(line) {
idx = line.indexOf(' ');
var linessrc = line.substr(0, idx).substr(7);
if (linessrc != ssrc) {
elem.up();
ssrc = linessrc;
elem.c('source', { ssrc: ssrc, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
}
var kv = line.substr(idx + 1);
elem.c('parameter');
if (kv.indexOf(':') == -1) {
elem.attrs({ name: kv });
} else {
elem.attrs({ name: kv.split(':', 2)[0] });
elem.attrs({ value: kv.split(':', 2)[1] });
}
elem.up();
});
elem.up();
// old proprietary mapping, to be removed at some point
tmp = SDPUtil.parse_ssrc(this.media[i]);
tmp.xmlns = 'http://estos.de/ns/ssrc';
tmp.ssrc = ssrc;
elem.c('ssrc', tmp).up(); // ssrc is part of description
// XEP-0339 handle ssrc-group attributes
var ssrc_group_lines = SDPUtil.find_lines(this.media[i], 'a=ssrc-group:');
ssrc_group_lines.forEach(function(line) {
idx = line.indexOf(' ');
var semantics = line.substr(0, idx).substr(13);
var ssrcs = line.substr(14 + semantics.length).split(' ');
if (ssrcs.length != 0) {
elem.c('ssrc-group', { semantics: semantics, xmlns: 'urn:xmpp:jingle:apps:rtp:ssma:0' });
ssrcs.forEach(function(ssrc) {
elem.c('source', { ssrc: ssrc })
.up();
});
elem.up();
}
});
}
if (SDPUtil.find_line(this.media[i], 'a=rtcp-mux')) {
elem.c('rtcp-mux').up();
}
// XEP-0293 -- map a=rtcp-fb:*
this.RtcpFbToJingle(i, elem, '*');
// XEP-0294
if (SDPUtil.find_line(this.media[i], 'a=extmap:')) {
lines = SDPUtil.find_lines(this.media[i], 'a=extmap:');
for (j = 0; j < lines.length; j++) {
tmp = SDPUtil.parse_extmap(lines[j]);
elem.c('rtp-hdrext', { xmlns: 'urn:xmpp:jingle:apps:rtp:rtp-hdrext:0',
uri: tmp.uri,
id: tmp.value });
if (tmp.hasOwnProperty('direction')) {
switch (tmp.direction) {
case 'sendonly':
elem.attrs({senders: 'responder'});
break;
case 'recvonly':
elem.attrs({senders: 'initiator'});
break;
case 'sendrecv':
elem.attrs({senders: 'both'});
break;
case 'inactive':
elem.attrs({senders: 'none'});
break;
}
}
// TODO: handle params
elem.up();
}
}
elem.up(); // end of description
}
// map ice-ufrag/pwd, dtls fingerprint, candidates
this.TransportToJingle(i, elem);
if (SDPUtil.find_line(this.media[i], 'a=sendrecv', this.session)) {
elem.attrs({senders: 'both'});
} else if (SDPUtil.find_line(this.media[i], 'a=sendonly', this.session)) {
elem.attrs({senders: 'initiator'});
} else if (SDPUtil.find_line(this.media[i], 'a=recvonly', this.session)) {
elem.attrs({senders: 'responder'});
} else if (SDPUtil.find_line(this.media[i], 'a=inactive', this.session)) {
elem.attrs({senders: 'none'});
}
if (mline.port == '0') {
// estos hack to reject an m-line
elem.attrs({senders: 'rejected'});
}
elem.up(); // end of content
}
elem.up();
return elem;
};
SDP.prototype.TransportToJingle = function (mediaindex, elem) {
var i = mediaindex;
var tmp;
var self = this;
elem.c('transport');
// XEP-0343 DTLS/SCTP
if (SDPUtil.find_line(this.media[mediaindex], 'a=sctpmap:').length)
{
var sctpmap = SDPUtil.find_line(
this.media[i], 'a=sctpmap:', self.session);
if (sctpmap)
{
var sctpAttrs = SDPUtil.parse_sctpmap(sctpmap);
elem.c('sctpmap',
{
xmlns: 'urn:xmpp:jingle:transports:dtls-sctp:1',
number: sctpAttrs[0], /* SCTP port */
protocol: sctpAttrs[1], /* protocol */
});
// Optional stream count attribute
if (sctpAttrs.length > 2)
elem.attrs({ streams: sctpAttrs[2]});
elem.up();
}
}
// XEP-0320
var fingerprints = SDPUtil.find_lines(this.media[mediaindex], 'a=fingerprint:', this.session);
fingerprints.forEach(function(line) {
tmp = SDPUtil.parse_fingerprint(line);
tmp.xmlns = 'urn:xmpp:jingle:apps:dtls:0';
elem.c('fingerprint').t(tmp.fingerprint);
delete tmp.fingerprint;
line = SDPUtil.find_line(self.media[mediaindex], 'a=setup:', self.session);
if (line) {
tmp.setup = line.substr(8);
}
elem.attrs(tmp);
elem.up(); // end of fingerprint
});
tmp = SDPUtil.iceparams(this.media[mediaindex], this.session);
if (tmp) {
tmp.xmlns = 'urn:xmpp:jingle:transports:ice-udp:1';
elem.attrs(tmp);
// XEP-0176
if (SDPUtil.find_line(this.media[mediaindex], 'a=candidate:', this.session)) { // add any a=candidate lines
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=candidate:', this.session);
lines.forEach(function (line) {
elem.c('candidate', SDPUtil.candidateToJingle(line)).up();
});
}
}
elem.up(); // end of transport
}
SDP.prototype.RtcpFbToJingle = function (mediaindex, elem, payloadtype) { // XEP-0293
var lines = SDPUtil.find_lines(this.media[mediaindex], 'a=rtcp-fb:' + payloadtype);
lines.forEach(function (line) {
var tmp = SDPUtil.parse_rtcpfb(line);
if (tmp.type == 'trr-int') {
elem.c('rtcp-fb-trr-int', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', value: tmp.params[0]});
elem.up();
} else {
elem.c('rtcp-fb', {xmlns: 'urn:xmpp:jingle:apps:rtp:rtcp-fb:0', type: tmp.type});
if (tmp.params.length > 0) {
elem.attrs({'subtype': tmp.params[0]});
}
elem.up();
}
});
};
SDP.prototype.RtcpFbFromJingle = function (elem, payloadtype) { // XEP-0293
var media = '';
var tmp = elem.find('>rtcp-fb-trr-int[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
if (tmp.length) {
media += 'a=rtcp-fb:' + '*' + ' ' + 'trr-int' + ' ';
if (tmp.attr('value')) {
media += tmp.attr('value');
} else {
media += '0';
}
media += '\r\n';
}
tmp = elem.find('>rtcp-fb[xmlns="urn:xmpp:jingle:apps:rtp:rtcp-fb:0"]');
tmp.each(function () {
media += 'a=rtcp-fb:' + payloadtype + ' ' + $(this).attr('type');
if ($(this).attr('subtype')) {
media += ' ' + $(this).attr('subtype');
}
media += '\r\n';
});
return media;
};
// construct an SDP from a jingle stanza
SDP.prototype.fromJingle = function (jingle) {
var self = this;
this.raw = 'v=0\r\n' +
'o=- ' + '1923518516' + ' 2 IN IP4 0.0.0.0\r\n' +// FIXME
's=-\r\n' +
't=0 0\r\n';
// http://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-04#section-8
if ($(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').length) {
$(jingle).find('>group[xmlns="urn:xmpp:jingle:apps:grouping:0"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (contents.length > 0) {
self.raw += 'a=group:' + (group.getAttribute('semantics') || group.getAttribute('type')) + ' ' + contents.join(' ') + '\r\n';
}
});
} else if ($(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').length) {
// temporary namespace, not to be used. to be removed soon.
$(jingle).find('>group[xmlns="urn:ietf:rfc:5888"]').each(function (idx, group) {
var contents = $(group).find('>content').map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (group.getAttribute('type') !== null && contents.length > 0) {
self.raw += 'a=group:' + group.getAttribute('type') + ' ' + contents.join(' ') + '\r\n';
}
});
} else {
// for backward compability, to be removed soon
// assume all contents are in the same bundle group, can be improved upon later
var bundle = $(jingle).find('>content').filter(function (idx, content) {
//elem.c('bundle', {xmlns:'http://estos.de/ns/bundle'});
return $(content).find('>bundle').length > 0;
}).map(function (idx, content) {
return content.getAttribute('name');
}).get();
if (bundle.length) {
this.raw += 'a=group:BUNDLE ' + bundle.join(' ') + '\r\n';
}
}
this.session = this.raw;
jingle.find('>content').each(function () {
var m = self.jingle2media($(this));
self.media.push(m);
});
// reconstruct msid-semantic -- apparently not necessary
/*
var msid = SDPUtil.parse_ssrc(this.raw);
if (msid.hasOwnProperty('mslabel')) {
this.session += "a=msid-semantic: WMS " + msid.mslabel + "\r\n";
}
*/
this.raw = this.session + this.media.join('');
};
// translate a jingle content element into an an SDP media part
SDP.prototype.jingle2media = function (content) {
var media = '',
desc = content.find('description'),
ssrc = desc.attr('ssrc'),
self = this,
tmp;
var sctp = content.find(
'>transport>sctpmap[xmlns="urn:xmpp:jingle:transports:dtls-sctp:1"]');
tmp = { media: desc.attr('media') };
tmp.port = '1';
if (content.attr('senders') == 'rejected') {
// estos hack to reject an m-line.
tmp.port = '0';
}
if (content.find('>transport>fingerprint').length || desc.find('encryption').length) {
if (sctp.length)
tmp.proto = 'DTLS/SCTP';
else
tmp.proto = 'RTP/SAVPF';
} else {
tmp.proto = 'RTP/AVPF';
}
if (!sctp.length)
{
tmp.fmt = desc.find('payload-type').map(
function () { return this.getAttribute('id'); }).get();
media += SDPUtil.build_mline(tmp) + '\r\n';
}
else
{
media += 'm=application 1 DTLS/SCTP ' + sctp.attr('number') + '\r\n';
media += 'a=sctpmap:' + sctp.attr('number') +
' ' + sctp.attr('protocol');
var streamCount = sctp.attr('streams');
if (streamCount)
media += ' ' + streamCount + '\r\n';
else
media += '\r\n';
}
media += 'c=IN IP4 0.0.0.0\r\n';
if (!sctp.length)
media += 'a=rtcp:1 IN IP4 0.0.0.0\r\n';
//tmp = content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
tmp = content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]');
//console.log('transports: '+content.find('>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log('bundle.transports: '+content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]').length);
//console.log("tmp fingerprint: "+tmp.find('>fingerprint').innerHTML);
if (tmp.length) {
if (tmp.attr('ufrag')) {
media += SDPUtil.build_iceufrag(tmp.attr('ufrag')) + '\r\n';
}
if (tmp.attr('pwd')) {
media += SDPUtil.build_icepwd(tmp.attr('pwd')) + '\r\n';
}
tmp.find('>fingerprint').each(function () {
// FIXME: check namespace at some point
media += 'a=fingerprint:' + this.getAttribute('hash');
media += ' ' + $(this).text();
media += '\r\n';
//console.log("mline "+media);
if (this.getAttribute('setup')) {
media += 'a=setup:' + this.getAttribute('setup') + '\r\n';
}
});
}
switch (content.attr('senders')) {
case 'initiator':
media += 'a=sendonly\r\n';
break;
case 'responder':
media += 'a=recvonly\r\n';
break;
case 'none':
media += 'a=inactive\r\n';
break;
case 'both':
media += 'a=sendrecv\r\n';
break;
}
media += 'a=mid:' + content.attr('name') + '\r\n';
/*if (content.attr('name') == 'video') {
media += 'a=x-google-flag:conference' + '\r\n';
}*/
// <description><rtcp-mux/></description>
// see http://code.google.com/p/libjingle/issues/detail?id=309 -- no spec though
// and http://mail.jabber.org/pipermail/jingle/2011-December/001761.html
if (desc.find('rtcp-mux').length) {
media += 'a=rtcp-mux\r\n';
}
if (desc.find('encryption').length) {
desc.find('encryption>crypto').each(function () {
media += 'a=crypto:' + this.getAttribute('tag');
media += ' ' + this.getAttribute('crypto-suite');
media += ' ' + this.getAttribute('key-params');
if (this.getAttribute('session-params')) {
media += ' ' + this.getAttribute('session-params');
}
media += '\r\n';
});
}
desc.find('payload-type').each(function () {
media += SDPUtil.build_rtpmap(this) + '\r\n';
if ($(this).find('>parameter').length) {
media += 'a=fmtp:' + this.getAttribute('id') + ' ';
media += $(this).find('parameter').map(function () { return (this.getAttribute('name') ? (this.getAttribute('name') + '=') : '') + this.getAttribute('value'); }).get().join('; ');
media += '\r\n';
}
// xep-0293
media += self.RtcpFbFromJingle($(this), this.getAttribute('id'));
});
// xep-0293
media += self.RtcpFbFromJingle(desc, '*');
// xep-0294
tmp = desc.find('>rtp-hdrext[xmlns="urn:xmpp:jingle:apps:rtp:rtp-hdrext:0"]');
tmp.each(function () {
media += 'a=extmap:' + this.getAttribute('id') + ' ' + this.getAttribute('uri') + '\r\n';
});
content.find('>bundle>transport[xmlns="urn:xmpp:jingle:transports:ice-udp:1"]>candidate').each(function () {
media += SDPUtil.candidateFromJingle(this);
});
// XEP-0339 handle ssrc-group attributes
tmp = content.find('description>ssrc-group[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]').each(function() {
var semantics = this.getAttribute('semantics');
var ssrcs = $(this).find('>source').map(function() {
return this.getAttribute('ssrc');
}).get();
if (ssrcs.length != 0) {
media += 'a=ssrc-group:' + semantics + ' ' + ssrcs.join(' ') + '\r\n';
}
});
tmp = content.find('description>source[xmlns="urn:xmpp:jingle:apps:rtp:ssma:0"]');
tmp.each(function () {
var ssrc = this.getAttribute('ssrc');
$(this).find('>parameter').each(function () {
media += 'a=ssrc:' + ssrc + ' ' + this.getAttribute('name');
if (this.getAttribute('value') && this.getAttribute('value').length)
media += ':' + this.getAttribute('value');
media += '\r\n';
});
});
if (tmp.length === 0) {
// fallback to proprietary mapping of a=ssrc lines
tmp = content.find('description>ssrc[xmlns="http://estos.de/ns/ssrc"]');
if (tmp.length) {
media += 'a=ssrc:' + ssrc + ' cname:' + tmp.attr('cname') + '\r\n';
media += 'a=ssrc:' + ssrc + ' msid:' + tmp.attr('msid') + '\r\n';
media += 'a=ssrc:' + ssrc + ' mslabel:' + tmp.attr('mslabel') + '\r\n';
media += 'a=ssrc:' + ssrc + ' label:' + tmp.attr('label') + '\r\n';
}
}
return media;
};

View file

@ -1,408 +0,0 @@
/**
* Contains utility classes used in SDP class.
*
*/
/**
* Class holds a=ssrc lines and media type a=mid
* @param ssrc synchronization source identifier number(a=ssrc lines from SDP)
* @param type media type eg. "audio" or "video"(a=mid frm SDP)
* @constructor
*/
function ChannelSsrc(ssrc, type) {
this.ssrc = ssrc;
this.type = type;
this.lines = [];
}
/**
* Class holds a=ssrc-group: lines
* @param semantics
* @param ssrcs
* @constructor
*/
function ChannelSsrcGroup(semantics, ssrcs, line) {
this.semantics = semantics;
this.ssrcs = ssrcs;
}
/**
* Helper class represents media channel. Is a container for ChannelSsrc, holds channel idx and media type.
* @param channelNumber channel idx in SDP media array.
* @param mediaType media type(a=mid)
* @constructor
*/
function MediaChannel(channelNumber, mediaType) {
/**
* SDP channel number
* @type {*}
*/
this.chNumber = channelNumber;
/**
* Channel media type(a=mid)
* @type {*}
*/
this.mediaType = mediaType;
/**
* The maps of ssrc numbers to ChannelSsrc objects.
*/
this.ssrcs = {};
/**
* The array of ChannelSsrcGroup objects.
* @type {Array}
*/
this.ssrcGroups = [];
}
SDPUtil = {
iceparams: function (mediadesc, sessiondesc) {
var data = null;
if (SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc) &&
SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc)) {
data = {
ufrag: SDPUtil.parse_iceufrag(SDPUtil.find_line(mediadesc, 'a=ice-ufrag:', sessiondesc)),
pwd: SDPUtil.parse_icepwd(SDPUtil.find_line(mediadesc, 'a=ice-pwd:', sessiondesc))
};
}
return data;
},
parse_iceufrag: function (line) {
return line.substring(12);
},
build_iceufrag: function (frag) {
return 'a=ice-ufrag:' + frag;
},
parse_icepwd: function (line) {
return line.substring(10);
},
build_icepwd: function (pwd) {
return 'a=ice-pwd:' + pwd;
},
parse_mid: function (line) {
return line.substring(6);
},
parse_mline: function (line) {
var parts = line.substring(2).split(' '),
data = {};
data.media = parts.shift();
data.port = parts.shift();
data.proto = parts.shift();
if (parts[parts.length - 1] === '') { // trailing whitespace
parts.pop();
}
data.fmt = parts;
return data;
},
build_mline: function (mline) {
return 'm=' + mline.media + ' ' + mline.port + ' ' + mline.proto + ' ' + mline.fmt.join(' ');
},
parse_rtpmap: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.id = parts.shift();
parts = parts[0].split('/');
data.name = parts.shift();
data.clockrate = parts.shift();
data.channels = parts.length ? parts.shift() : '1';
return data;
},
/**
* Parses SDP line "a=sctpmap:..." and extracts SCTP port from it.
* @param line eg. "a=sctpmap:5000 webrtc-datachannel"
* @returns [SCTP port number, protocol, streams]
*/
parse_sctpmap: function (line)
{
var parts = line.substring(10).split(' ');
var sctpPort = parts[0];
var protocol = parts[1];
// Stream count is optional
var streamCount = parts.length > 2 ? parts[2] : null;
return [sctpPort, protocol, streamCount];// SCTP port
},
build_rtpmap: function (el) {
var line = 'a=rtpmap:' + el.getAttribute('id') + ' ' + el.getAttribute('name') + '/' + el.getAttribute('clockrate');
if (el.getAttribute('channels') && el.getAttribute('channels') != '1') {
line += '/' + el.getAttribute('channels');
}
return line;
},
parse_crypto: function (line) {
var parts = line.substring(9).split(' '),
data = {};
data.tag = parts.shift();
data['crypto-suite'] = parts.shift();
data['key-params'] = parts.shift();
if (parts.length) {
data['session-params'] = parts.join(' ');
}
return data;
},
parse_fingerprint: function (line) { // RFC 4572
var parts = line.substring(14).split(' '),
data = {};
data.hash = parts.shift();
data.fingerprint = parts.shift();
// TODO assert that fingerprint satisfies 2UHEX *(":" 2UHEX) ?
return data;
},
parse_fmtp: function (line) {
var parts = line.split(' '),
i, key, value,
data = [];
parts.shift();
parts = parts.join(' ').split(';');
for (i = 0; i < parts.length; i++) {
key = parts[i].split('=')[0];
while (key.length && key[0] == ' ') {
key = key.substring(1);
}
value = parts[i].split('=')[1];
if (key && value) {
data.push({name: key, value: value});
} else if (key) {
// rfc 4733 (DTMF) style stuff
data.push({name: '', value: key});
}
}
return data;
},
parse_icecandidate: function (line) {
var candidate = {},
elems = line.split(' ');
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = 0; // default value, may be overwritten below
for (var i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('parse_icecandidate not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
build_icecandidate: function (cand) {
var line = ['a=candidate:' + cand.foundation, cand.component, cand.protocol, cand.priority, cand.ip, cand.port, 'typ', cand.type].join(' ');
line += ' ';
switch (cand.type) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.hasOwnAttribute('rel-addr') && cand.hasOwnAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand['rel-addr'];
line += ' ';
line += 'rport';
line += ' ';
line += cand['rel-port'];
line += ' ';
}
break;
}
if (cand.hasOwnAttribute('tcptype')) {
line += 'tcptype';
line += ' ';
line += cand.tcptype;
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.hasOwnAttribute('generation') ? cand.generation : '0';
return line;
},
parse_ssrc: function (desc) {
// proprietary mapping of a=ssrc lines
// TODO: see "Jingle RTP Source Description" by Juberti and P. Thatcher on google docs
// and parse according to that
var lines = desc.split('\r\n'),
data = {};
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, 7) == 'a=ssrc:') {
var idx = lines[i].indexOf(' ');
data[lines[i].substr(idx + 1).split(':', 2)[0]] = lines[i].substr(idx + 1).split(':', 2)[1];
}
}
return data;
},
parse_rtcpfb: function (line) {
var parts = line.substr(10).split(' ');
var data = {};
data.pt = parts.shift();
data.type = parts.shift();
data.params = parts;
return data;
},
parse_extmap: function (line) {
var parts = line.substr(9).split(' ');
var data = {};
data.value = parts.shift();
if (data.value.indexOf('/') != -1) {
data.direction = data.value.substr(data.value.indexOf('/') + 1);
data.value = data.value.substr(0, data.value.indexOf('/'));
} else {
data.direction = 'both';
}
data.uri = parts.shift();
data.params = parts;
return data;
},
find_line: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n');
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle) {
return lines[i];
}
}
if (!sessionpart) {
return false;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
return lines[j];
}
}
return false;
},
find_lines: function (haystack, needle, sessionpart) {
var lines = haystack.split('\r\n'),
needles = [];
for (var i = 0; i < lines.length; i++) {
if (lines[i].substring(0, needle.length) == needle)
needles.push(lines[i]);
}
if (needles.length || !sessionpart) {
return needles;
}
// search session part
lines = sessionpart.split('\r\n');
for (var j = 0; j < lines.length; j++) {
if (lines[j].substring(0, needle.length) == needle) {
needles.push(lines[j]);
}
}
return needles;
},
candidateToJingle: function (line) {
// a=candidate:2979166662 1 udp 2113937151 192.168.2.100 57698 typ host generation 0
// <candidate component=... foundation=... generation=... id=... ip=... network=... port=... priority=... protocol=... type=.../>
if (line.indexOf('candidate:') === 0) {
line = 'a=' + line;
} else if (line.substring(0, 12) != 'a=candidate:') {
console.log('parseCandidate called with a line that is not a candidate line');
console.log(line);
return null;
}
if (line.substring(line.length - 2) == '\r\n') // chomp it
line = line.substring(0, line.length - 2);
var candidate = {},
elems = line.split(' '),
i;
if (elems[6] != 'typ') {
console.log('did not find typ in the right place');
console.log(line);
return null;
}
candidate.foundation = elems[0].substring(12);
candidate.component = elems[1];
candidate.protocol = elems[2].toLowerCase();
candidate.priority = elems[3];
candidate.ip = elems[4];
candidate.port = elems[5];
// elems[6] => "typ"
candidate.type = elems[7];
candidate.generation = '0'; // default, may be overwritten below
for (i = 8; i < elems.length; i += 2) {
switch (elems[i]) {
case 'raddr':
candidate['rel-addr'] = elems[i + 1];
break;
case 'rport':
candidate['rel-port'] = elems[i + 1];
break;
case 'generation':
candidate.generation = elems[i + 1];
break;
case 'tcptype':
candidate.tcptype = elems[i + 1];
break;
default: // TODO
console.log('not translating "' + elems[i] + '" = "' + elems[i + 1] + '"');
}
}
candidate.network = '1';
candidate.id = Math.random().toString(36).substr(2, 10); // not applicable to SDP -- FIXME: should be unique, not just random
return candidate;
},
candidateFromJingle: function (cand) {
var line = 'a=candidate:';
line += cand.getAttribute('foundation');
line += ' ';
line += cand.getAttribute('component');
line += ' ';
line += cand.getAttribute('protocol'); //.toUpperCase(); // chrome M23 doesn't like this
line += ' ';
line += cand.getAttribute('priority');
line += ' ';
line += cand.getAttribute('ip');
line += ' ';
line += cand.getAttribute('port');
line += ' ';
line += 'typ';
line += ' ' + cand.getAttribute('type');
line += ' ';
switch (cand.getAttribute('type')) {
case 'srflx':
case 'prflx':
case 'relay':
if (cand.getAttribute('rel-addr') && cand.getAttribute('rel-port')) {
line += 'raddr';
line += ' ';
line += cand.getAttribute('rel-addr');
line += ' ';
line += 'rport';
line += ' ';
line += cand.getAttribute('rel-port');
line += ' ';
}
break;
}
if (cand.getAttribute('protocol').toLowerCase() == 'tcp') {
line += 'tcptype';
line += ' ';
line += cand.getAttribute('tcptype');
line += ' ';
}
line += 'generation';
line += ' ';
line += cand.getAttribute('generation') || '0';
return line + '\r\n';
}
};
exports.SDPUtil = SDPUtil;

View file

@ -1,254 +0,0 @@
/**
* Wrapper for built-in http.js to emulate the browser XMLHttpRequest object.
*
* This can be used with JS designed for browsers to improve reuse of code and
* allow the use of existing libraries.
*
* Usage: include("XMLHttpRequest.js") and use XMLHttpRequest per W3C specs.
*
* @todo SSL Support
* @author Dan DeFelippi <dan@driverdan.com>
* @license MIT
*/
var Url = require("url")
,sys = require("util");
exports.XMLHttpRequest = function() {
/**
* Private variables
*/
var self = this;
var http = require('http');
var https = require('https');
// Holds http.js objects
var client;
var request;
var response;
// Request settings
var settings = {};
// Set some default headers
var defaultHeaders = {
"User-Agent": "node.js",
"Accept": "*/*",
};
var headers = defaultHeaders;
/**
* Constants
*/
this.UNSENT = 0;
this.OPENED = 1;
this.HEADERS_RECEIVED = 2;
this.LOADING = 3;
this.DONE = 4;
/**
* Public vars
*/
// Current state
this.readyState = this.UNSENT;
// default ready state change handler in case one is not set or is set late
this.onreadystatechange = function() {};
// Result & response
this.responseText = "";
this.responseXML = "";
this.status = null;
this.statusText = null;
/**
* Open the connection. Currently supports local server requests.
*
* @param string method Connection method (eg GET, POST)
* @param string url URL for the connection.
* @param boolean async Asynchronous connection. Default is true.
* @param string user Username for basic authentication (optional)
* @param string password Password for basic authentication (optional)
*/
this.open = function(method, url, async, user, password) {
settings = {
"method": method,
"url": url,
"async": async || null,
"user": user || null,
"password": password || null
};
this.abort();
setState(this.OPENED);
};
/**
* Sets a header for the request.
*
* @param string header Header name
* @param string value Header value
*/
this.setRequestHeader = function(header, value) {
headers[header] = value;
};
/**
* Gets a header from the server response.
*
* @param string header Name of header to get.
* @return string Text of the header or null if it doesn't exist.
*/
this.getResponseHeader = function(header) {
if (this.readyState > this.OPENED && response.headers[header]) {
return header + ": " + response.headers[header];
}
return null;
};
/**
* Gets all the response headers.
*
* @return string
*/
this.getAllResponseHeaders = function() {
if (this.readyState < this.HEADERS_RECEIVED) {
throw "INVALID_STATE_ERR: Headers have not been received.";
}
var result = "";
for (var i in response.headers) {
result += i + ": " + response.headers[i] + "\r\n";
}
return result.substr(0, result.length - 2);
};
/**
* Sends the request to the server.
*
* @param string data Optional data to send as request body.
*/
this.send = function(data) {
if (this.readyState != this.OPENED) {
throw "INVALID_STATE_ERR: connection must be opened before send() is called";
}
var ssl = false;
var url = Url.parse(settings.url);
// Determine the server
switch (url.protocol) {
case 'https:':
ssl = true;
// SSL & non-SSL both need host, no break here.
case 'http:':
var host = url.hostname;
break;
case undefined:
case '':
var host = "localhost";
break;
default:
throw "Protocol not supported.";
}
// Default to port 80. If accessing localhost on another port be sure
// to use http://localhost:port/path
var port = url.port || (ssl ? 443 : 80);
// Add query string if one is used
var uri = url.pathname + (url.search ? url.search : '');
// Set the Host header or the server may reject the request
this.setRequestHeader("Host", host);
// Set content length header
if (settings.method == "GET" || settings.method == "HEAD") {
data = null;
} else if (data) {
this.setRequestHeader("Content-Length", Buffer.byteLength(data));
if (!headers["Content-Type"]) {
this.setRequestHeader("Content-Type", "text/plain;charset=UTF-8");
}
}
// Use the proper protocol
var doRequest = ssl ? https.request : http.request;
var options = {
host: host,
port: port,
path: uri,
method: settings.method,
headers: headers,
agent: false
};
var req = doRequest(options, function(res) {
response = res;
response.setEncoding("utf8");
setState(self.HEADERS_RECEIVED);
self.status = response.statusCode;
response.on('data', function(chunk) {
// Make sure there's some data
if (chunk) {
self.responseText += chunk;
}
setState(self.LOADING);
});
response.on('end', function() {
setState(self.DONE);
});
response.on('error', function() {
self.handleError(error);
});
}).on('error', function(error) {
self.handleError(error);
});
req.setHeader("Connection", "Close");
// Node 0.4 and later won't accept empty data. Make sure it's needed.
if (data) {
req.write(data);
}
req.end();
};
this.handleError = function(error) {
this.status = 503;
this.statusText = error;
this.responseText = error.stack;
setState(this.DONE);
};
/**
* Aborts a request.
*/
this.abort = function() {
headers = defaultHeaders;
this.readyState = this.UNSENT;
this.responseText = "";
this.responseXML = "";
};
/**
* Changes readyState and calls onreadystatechange.
*
* @param int state New state
*/
var setState = function(state) {
self.readyState = state;
self.onreadystatechange();
}
};

View file

@ -1,83 +0,0 @@
// This code was written by Tyler Akins and has been placed in the
// public domain. It would be nice if you left this header intact.
// Base64 code from Tyler Akins -- http://rumkin.com
var Base64 = (function () {
var keyStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var obj = {
/**
* Encodes a string in base64
* @param {String} input The string to encode in base64.
*/
encode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
do {
chr1 = input.charCodeAt(i++);
chr2 = input.charCodeAt(i++);
chr3 = input.charCodeAt(i++);
enc1 = chr1 >> 2;
enc2 = ((chr1 & 3) << 4) | (chr2 >> 4);
enc3 = ((chr2 & 15) << 2) | (chr3 >> 6);
enc4 = chr3 & 63;
if (isNaN(chr2)) {
enc3 = enc4 = 64;
} else if (isNaN(chr3)) {
enc4 = 64;
}
output = output + keyStr.charAt(enc1) + keyStr.charAt(enc2) +
keyStr.charAt(enc3) + keyStr.charAt(enc4);
} while (i < input.length);
return output;
},
/**
* Decodes a base64 string.
* @param {String} input The string to decode.
*/
decode: function (input) {
var output = "";
var chr1, chr2, chr3;
var enc1, enc2, enc3, enc4;
var i = 0;
// remove all characters that are not A-Z, a-z, 0-9, +, /, or =
input = input.replace(/[^A-Za-z0-9\+\/\=]/g, '');
do {
enc1 = keyStr.indexOf(input.charAt(i++));
enc2 = keyStr.indexOf(input.charAt(i++));
enc3 = keyStr.indexOf(input.charAt(i++));
enc4 = keyStr.indexOf(input.charAt(i++));
chr1 = (enc1 << 2) | (enc2 >> 4);
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2);
chr3 = ((enc3 & 3) << 6) | enc4;
output = output + String.fromCharCode(chr1);
if (enc3 != 64) {
output = output + String.fromCharCode(chr2);
}
if (enc4 != 64) {
output = output + String.fromCharCode(chr3);
}
} while (i < input.length);
return output;
}
};
return obj;
})();
// Nodify
exports.Base64 = Base64;

View file

@ -1,279 +0,0 @@
/*
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
* Digest Algorithm, as defined in RFC 1321.
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for more info.
*/
var MD5 = (function () {
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
var safe_add = function (x, y) {
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
};
/*
* Bitwise rotate a 32-bit number to the left.
*/
var bit_rol = function (num, cnt) {
return (num << cnt) | (num >>> (32 - cnt));
};
/*
* Convert a string to an array of little-endian words
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
*/
var str2binl = function (str) {
var bin = [];
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
{
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
}
return bin;
};
/*
* Convert an array of little-endian words to a string
*/
var binl2str = function (bin) {
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
{
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
}
return str;
};
/*
* Convert an array of little-endian words to a hex string.
*/
var binl2hex = function (binarray) {
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
}
return str;
};
/*
* Convert an array of little-endian words to a base-64 string
*/
var binl2b64 = function (binarray) {
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
var triplet, j;
for(var i = 0; i < binarray.length * 4; i += 3)
{
triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16) |
(((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 ) |
((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
for(j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) { str += b64pad; }
else { str += tab.charAt((triplet >> 6*(3-j)) & 0x3F); }
}
}
return str;
};
/*
* These functions implement the four basic operations the algorithm uses.
*/
var md5_cmn = function (q, a, b, x, s, t) {
return safe_add(bit_rol(safe_add(safe_add(a, q),safe_add(x, t)), s),b);
};
var md5_ff = function (a, b, c, d, x, s, t) {
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
};
var md5_gg = function (a, b, c, d, x, s, t) {
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
};
var md5_hh = function (a, b, c, d, x, s, t) {
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
};
var md5_ii = function (a, b, c, d, x, s, t) {
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
};
/*
* Calculate the MD5 of an array of little-endian words, and a bit length
*/
var core_md5 = function (x, len) {
/* append padding */
x[len >> 5] |= 0x80 << ((len) % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var olda, oldb, oldc, oldd;
for (var i = 0; i < x.length; i += 16)
{
olda = a;
oldb = b;
oldc = c;
oldd = d;
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
}
return [a, b, c, d];
};
/*
* Calculate the HMAC-MD5, of a key and some data
*/
var core_hmac_md5 = function (key, data) {
var bkey = str2binl(key);
if(bkey.length > 16) { bkey = core_md5(bkey, key.length * chrsz); }
var ipad = new Array(16), opad = new Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
return core_md5(opad.concat(hash), 512 + 128);
};
var obj = {
/*
* These are the functions you'll usually want to call.
* They take string arguments and return either hex or base-64 encoded
* strings.
*/
hexdigest: function (s) {
return binl2hex(core_md5(str2binl(s), s.length * chrsz));
},
b64digest: function (s) {
return binl2b64(core_md5(str2binl(s), s.length * chrsz));
},
hash: function (s) {
return binl2str(core_md5(str2binl(s), s.length * chrsz));
},
hmac_hexdigest: function (key, data) {
return binl2hex(core_hmac_md5(key, data));
},
hmac_b64digest: function (key, data) {
return binl2b64(core_hmac_md5(key, data));
},
hmac_hash: function (key, data) {
return binl2str(core_hmac_md5(key, data));
},
/*
* Perform a simple self-test to see if the VM is working
*/
test: function () {
return MD5.hexdigest("abc") === "900150983cd24fb0d6963f7d28e17f72";
}
};
return obj;
})();
// Nodify
exports.MD5 = MD5;

File diff suppressed because it is too large Load diff

View file

@ -1,48 +0,0 @@
var strophe = require("./strophe/strophe.js").Strophe;
var Strophe = strophe.Strophe;
var $iq = strophe.$iq;
var $msg = strophe.$msg;
var $build = strophe.$build;
var $pres = strophe.$pres;
var jsdom = require("jsdom");
var window = jsdom.jsdom().parentWindow;
var $ = require('jquery')(window);
var stropheJingle = require("./strophe.jingle.sdp.js");
var input = '';
process.stdin.on('readable', function() {
var chunk = process.stdin.read();
if (chunk !== null) {
input += chunk;
}
});
process.stdin.on('end', function() {
if (process.argv[2] == '--jingle') {
var elem = $(input);
// app does:
// sess.setRemoteDescription($(iq).find('>jingle'), 'offer');
//console.log(elem.find('>content'));
var sdp = new stropheJingle.SDP('');
sdp.fromJingle(elem);
console.log(sdp.raw);
} else if (process.argv[2] == '--sdp') {
var sdp = new stropheJingle.SDP(input);
var accept = $iq({to: '%(tojid)s',
type: 'set'})
.c('jingle', {xmlns: 'urn:xmpp:jingle:1',
//action: 'session-accept',
action: '%(action)s',
initiator: '%(initiator)s',
responder: '%(responder)s',
sid: '%(sid)s' });
sdp.toJingle(accept, 'responder');
console.log(Strophe.serialize(accept));
}
});

View file

@ -1,88 +0,0 @@
#!/usr/bin/env python
import json
import sys
import urllib
from argparse import ArgumentParser
import requests
def _mkurl(template, kws):
for key in kws:
template = template.replace(key, kws[key])
return template
def main(hs, room_id, access_token, user_id_prefix, why):
if not why:
why = "Automated kick."
print(
"Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)
)
room_state_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN",
{"$HS": hs, "$ROOM": room_id, "$TOKEN": access_token},
)
print("Getting room state => %s" % room_state_url)
res = requests.get(room_state_url)
print("HTTP %s" % res.status_code)
state_events = res.json()
if "error" in state_events:
print("FATAL")
print(state_events)
return
kick_list = []
room_name = room_id
for event in state_events:
if not event["type"] == "m.room.member":
if event["type"] == "m.room.name":
room_name = event["content"].get("name")
continue
if not event["content"].get("membership") == "join":
continue
if event["state_key"].startswith(user_id_prefix):
kick_list.append(event["state_key"])
if len(kick_list) == 0:
print("No user IDs match the prefix '%s'" % user_id_prefix)
return
print("The following user IDs will be kicked from %s" % room_name)
for uid in kick_list:
print(uid)
doit = input("Continue? [Y]es\n")
if len(doit) > 0 and doit.lower() == "y":
print("Kicking members...")
# encode them all
kick_list = [urllib.quote(uid) for uid in kick_list]
for uid in kick_list:
kick_url = _mkurl(
"$HS/_matrix/client/api/v1/rooms/$ROOM/state/m.room.member/$UID?access_token=$TOKEN",
{"$HS": hs, "$UID": uid, "$ROOM": room_id, "$TOKEN": access_token},
)
kick_body = {"membership": "leave", "reason": why}
print("Kicking %s" % uid)
res = requests.put(kick_url, data=json.dumps(kick_body))
if res.status_code != 200:
print("ERROR: HTTP %s" % res.status_code)
if res.json().get("error"):
print("ERROR: JSON %s" % res.json())
if __name__ == "__main__":
parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")
parser.add_argument("-u", "--user-id", help="The user ID prefix e.g. '@irc_'")
parser.add_argument("-t", "--token", help="Your access_token")
parser.add_argument("-r", "--room", help="The room ID to kick members in")
parser.add_argument(
"-s", "--homeserver", help="The base HS url e.g. http://matrix.org"
)
parser.add_argument("-w", "--why", help="Reason for the kick. Optional.")
args = parser.parse_args()
if not args.room or not args.token or not args.user_id or not args.homeserver:
parser.print_help()
sys.exit(1)
else:
main(args.homeserver, args.room, args.token, args.user_id, args.why)

13
debian/changelog vendored
View file

@ -1,3 +1,16 @@
matrix-synapse-py3 (1.61.0) stable; urgency=medium
* New Synapse release 1.61.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 14 Jun 2022 11:44:19 +0100
matrix-synapse-py3 (1.61.0~rc1) stable; urgency=medium
* Remove unused `jitsimeetbridge` experiment from `contrib` directory.
* New Synapse release 1.61.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 07 Jun 2022 12:42:31 +0100
matrix-synapse-py3 (1.60.0) stable; urgency=medium
* New Synapse release 1.60.0.

23
debian/copyright vendored
View file

@ -22,29 +22,6 @@ Files: synapse/config/repository.py
Copyright: 2014-2015, matrix.org
License: Apache-2.0
Files: contrib/jitsimeetbridge/unjingle/strophe/base64.js
Copyright: Public Domain (Tyler Akins http://rumkin.com)
License: public-domain
This code was written by Tyler Akins and has been placed in the
public domain. It would be nice if you left this header intact.
Base64 code from Tyler Akins -- http://rumkin.com
Files: contrib/jitsimeetbridge/unjingle/strophe/md5.js
Copyright: 1999-2002, Paul Johnston & Contributors
License: BSD-3-clause
Files: contrib/jitsimeetbridge/unjingle/strophe/strophe.js
Copyright: 2006-2008, OGG, LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/strophe/XMLHttpRequest.js
Copyright: 2010 passive.ly LLC
License: Expat
Files: contrib/jitsimeetbridge/unjingle/*.js
Copyright: 2014 Jitsi
License: Apache-2.0
Files: debian/*
Copyright: 2016-2017, Erik Johnston <erik@matrix.org>
2017, Rahul De <rahulde@swecha.net>

View file

@ -6,11 +6,12 @@ CWD=$(pwd)
cd "$DIR/.." || exit
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
# Do not override PYTHONPATH if we are in a virtual env
if [ "$VIRTUAL_ENV" = "" ]; then
PYTHONPATH=$(readlink -f "$(pwd)")
export PYTHONPATH
echo "$PYTHONPATH"
fi
# Create servers which listen on HTTP at 808x and HTTPS at 848x.
for port in 8080 8081 8082; do

View file

@ -158,6 +158,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
"^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$",
"^/_matrix/client/(api/v1|r0|v3|unstable)/join/",
"^/_matrix/client/(api/v1|r0|v3|unstable)/profile/",
"^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send",
],
"shared_extra_conf": {},
"worker_extra_conf": "",

View file

@ -115,7 +115,9 @@ URL parameters:
Body parameters:
- `password` - string, optional. If provided, the user's password is updated and all
devices are logged out.
devices are logged out, unless `logout_devices` is set to `false`.
- `logout_devices` - bool, optional, defaults to `true`. If set to false, devices aren't
logged out even when `password` is provided.
- `displayname` - string, optional, defaults to the value of `user_id`.
- `threepids` - array, optional, allows setting the third-party IDs (email, msisdn)
- `medium` - string. Kind of third-party ID, either `email` or `msisdn`.

View file

@ -117,7 +117,7 @@ In this example, we define three jobs:
Note that this example is tailored to show different configurations and
features slightly more jobs than it's probably necessary (in practice, a
server admin would probably consider it better to replace the two last
jobs with one that runs once a day and handles rooms which which
jobs with one that runs once a day and handles rooms which
policy's `max_lifetime` is greater than 3 days).
Keep in mind, when configuring these jobs, that a purge job can become

View file

@ -2523,16 +2523,6 @@ push:
# "events_default": 1
# Uncomment to allow non-server-admin users to create groups on this server
#
#enable_group_creation: true
# If enabled, non server admins can only create groups with local parts
# starting with this prefix
#
#group_creation_prefix: "unofficial_"
# User Directory configuration
#

View file

@ -43,7 +43,7 @@ loggers:
The above logging config will set Synapse as 'INFO' logging level by default,
with the SQL layer at 'WARNING', and will log to a file, stored as JSON.
It is also possible to figure Synapse to log to a remote endpoint by using the
It is also possible to configure Synapse to log to a remote endpoint by using the
`synapse.logging.RemoteHandler` class included with Synapse. It takes the
following arguments:

View file

@ -89,6 +89,24 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.61.0
## Removal of depracated community/groups
This release of Synapse will remove deprecated community/groups from codebase.
### Worker endpoints
For those who have deployed workers, following worker endpoints will no longer be existing
and they are expected to be removed from reverse proxy config:
- `^/_matrix/federation/v1/get_groups_publicised$`
- `^/_matrix/client/(r0|v3|unstable)/joined_groups$`
- `^/_matrix/client/(r0|v3|unstable)/publicised_groups$`
- `^/_matrix/client/(r0|v3|unstable)/publicised_groups/`
- `^/_matrix/federation/v1/groups/`
- `^/_matrix/client/(r0|v3|unstable)/groups/`
# Upgrading to v1.60.0
## Adding a new unique index to `state_group_edges` could fail if your database is corrupted

View file

@ -575,6 +575,18 @@ Example configuration:
dummy_events_threshold: 5
```
---
Config option `delete_stale_devices_after`
An optional duration. If set, Synapse will run a daily background task to log out and
delete any device that hasn't been accessed for more than the specified amount of time.
Defaults to no duration, which means devices are never pruned.
Example configuration:
```yaml
delete_stale_devices_after: 1y
```
## Homeserver blocking ##
Useful options for Synapse admins.
@ -1447,7 +1459,7 @@ federation_rr_transactions_per_room_per_second: 40
```
---
## Media Store ##
Config options relating to Synapse media store.
Config options related to Synapse's media store.
---
Config option: `enable_media_repo`
@ -1551,6 +1563,39 @@ thumbnail_sizes:
height: 600
method: scale
```
---
Config option: `media_retention`
Controls whether local media and entries in the remote media cache
(media that is downloaded from other homeservers) should be removed
under certain conditions, typically for the purpose of saving space.
Purging media files will be the carried out by the media worker
(that is, the worker that has the `enable_media_repo` homeserver config
option set to 'true'). This may be the main process.
The `media_retention.local_media_lifetime` and
`media_retention.remote_media_lifetime` config options control whether
media will be purged if it has not been accessed in a given amount of
time. Note that media is 'accessed' when loaded in a room in a client, or
otherwise downloaded by a local or remote user. If the media has never
been accessed, the media's creation time is used instead. Both thumbnails
and the original media will be removed. If either of these options are unset,
then media of that type will not be purged.
Local or cached remote media that has been
[quarantined](../../admin_api/media_admin_api.md#quarantining-media-in-a-room)
will not be deleted. Similarly, local media that has been marked as
[protected from quarantine](../../admin_api/media_admin_api.md#protecting-media-from-being-quarantined)
will not be deleted.
Example configuration:
```yaml
media_retention:
local_media_lifetime: 90d
remote_media_lifetime: 14d
```
---
Config option: `url_preview_enabled`
This setting determines whether the preview URL API is enabled.
@ -3148,25 +3193,6 @@ Example configuration:
encryption_enabled_by_default_for_room_type: invite
```
---
Config option: `enable_group_creation`
Set to true to allow non-server-admin users to create groups on this server
Example configuration:
```yaml
enable_group_creation: true
```
---
Config option: `group_creation_prefix`
If enabled/present, non-server admins can only create groups with local parts
starting with this prefix.
Example configuration:
```yaml
group_creation_prefix: "unofficial_"
```
---
Config option: `user_directory`
This setting defines options related to the user directory.

View file

@ -1,6 +1,6 @@
# Scaling synapse via workers
For small instances it recommended to run Synapse in the default monolith mode.
For small instances it is recommended to run Synapse in the default monolith mode.
For larger instances where performance is a concern it can be helpful to split
out functionality into multiple separate python processes. These processes are
called 'workers', and are (eventually) intended to scale horizontally
@ -191,9 +191,8 @@ information.
^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/user/devices/
^/_matrix/federation/v1/get_groups_publicised$
^/_matrix/key/v2/query
^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/
^/_matrix/federation/v1/hierarchy/
# Inbound federation transaction request
^/_matrix/federation/v1/send/
@ -205,15 +204,14 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$
^/_matrix/client/v1/rooms/.*/hierarchy$
^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
^/_matrix/client/(r0|v3|unstable)/account/3pid$
^/_matrix/client/(r0|v3|unstable)/account/whoami$
^/_matrix/client/(r0|v3|unstable)/devices$
^/_matrix/client/versions$
^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
^/_matrix/client/(r0|v3|unstable)/joined_groups$
^/_matrix/client/(r0|v3|unstable)/publicised_groups$
^/_matrix/client/(r0|v3|unstable)/publicised_groups/
^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
@ -237,9 +235,6 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/join/
^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
# Device requests
^/_matrix/client/(r0|v3|unstable)/sendToDevice/
# Account data requests
^/_matrix/client/(r0|v3|unstable)/.*/tags
^/_matrix/client/(r0|v3|unstable)/.*/account_data
@ -256,9 +251,7 @@ information.
Additionally, the following REST endpoints can be handled for GET requests:
^/_matrix/federation/v1/groups/
^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
^/_matrix/client/(r0|v3|unstable)/groups/
Pagination requests can also be handled, but all requests for a given
room must be routed to the same instance. Additionally, care must be taken to

788
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -54,7 +54,7 @@ skip_gitignore = true
[tool.poetry]
name = "matrix-synapse"
version = "1.60.0"
version = "1.61.0"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"
@ -113,7 +113,6 @@ unpaddedbase64 = ">=2.1.0"
canonicaljson = ">=1.4.0"
# we use the type definitions added in signedjson 1.1.
signedjson = ">=1.1.0"
PyNaCl = ">=1.2.1"
# validating SSL certs for IP addresses requires service_identity 18.1.
service-identity = ">=18.1.0"
# Twisted 18.9 introduces some logger improvements that the structured

View file

@ -4,24 +4,23 @@ attrs==21.4.0 \
automat==20.2.0 ; python_full_version >= "3.6.7" \
--hash=sha256:b6feb6455337df834f6c9962d6ccf771515b7d939bca142b29c20c2376bc6111 \
--hash=sha256:7979803c74610e11ef0c0d68a2942b152df52da55336e0c9d58daf1831cbdf33
bcrypt==3.2.2 \
--hash=sha256:7180d98a96f00b1050e93f5b0f556e658605dd9f524d0b0e68ae7944673f525e \
--hash=sha256:61bae49580dce88095d669226d5076d0b9d927754cedbdf76c6c9f5099ad6f26 \
--hash=sha256:88273d806ab3a50d06bc6a2fc7c87d737dd669b76ad955f449c43095389bc8fb \
--hash=sha256:6d2cb9d969bfca5bc08e45864137276e4c3d3d7de2b162171def3d188bf9d34a \
--hash=sha256:2b02d6bfc6336d1094276f3f588aa1225a598e27f8e3388f4db9948cb707b521 \
--hash=sha256:a2c46100e315c3a5b90fdc53e429c006c5f962529bc27e1dfd656292c20ccc40 \
--hash=sha256:7d9ba2e41e330d2af4af6b1b6ec9e6128e91343d0b4afb9282e54e5508f31baa \
--hash=sha256:cd43303d6b8a165c29ec6756afd169faba9396a9472cdff753fe9f19b96ce2fa \
--hash=sha256:4e029cef560967fb0cf4a802bcf4d562d3d6b4b1bf81de5ec1abbe0f1adb027e \
--hash=sha256:7ff2069240c6bbe49109fe84ca80508773a904f5a8cb960e02a977f7f519b129 \
--hash=sha256:433c410c2177057705da2a9f2cd01dd157493b2a7ac14c8593a16b3dab6b6bfb
bleach==5.0.0 \
--hash=sha256:08a1fe86d253b5c88c92cc3d810fd8048a16d15762e1e5b74d502256e5926aa1 \
--hash=sha256:c6d6cc054bdc9c83b48b8083e236e5f00f238428666d2ce2e083eaa5fd568565
canonicaljson==1.6.1 \
--hash=sha256:9ccf1079a928b520b9b7dd8567f842ca44d1877d8b6540b83254b8a6ac6cfc11 \
--hash=sha256:a93664f698556dbd4bab9c3fc4fb35834cf2535da1ea00b6b7758d8fe2bbb824
bcrypt==3.2.0 \
--hash=sha256:b589229207630484aefe5899122fb938a5b017b0f4349f769b8c13e78d99a8fd \
--hash=sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6 \
--hash=sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7 \
--hash=sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1 \
--hash=sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d \
--hash=sha256:a0584a92329210fcd75eb8a3250c5a941633f8bfaf2a18f81009b097732839b7 \
--hash=sha256:56e5da069a76470679f312a7d3d23deb3ac4519991a0361abc11da837087b61d \
--hash=sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55 \
--hash=sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34 \
--hash=sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29
bleach==4.1.0 \
--hash=sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994 \
--hash=sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da
canonicaljson==1.6.0 \
--hash=sha256:7230c2a2a3db07874f622af84effe41a655e07bf23734830e18a454e65d5b998 \
--hash=sha256:8739d5fd91aca7281d425660ae65af7663808c8177778965f67e90b16a2b2427
certifi==2021.10.8 ; python_full_version >= "3.6.0" \
--hash=sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569 \
--hash=sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872
@ -82,47 +81,45 @@ charset-normalizer==2.0.12 ; python_full_version >= "3.6.0" \
constantly==15.1.0 ; python_full_version >= "3.6.7" \
--hash=sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d \
--hash=sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35
cryptography==37.0.2 \
--hash=sha256:ef15c2df7656763b4ff20a9bc4381d8352e6640cfeb95c2972c38ef508e75181 \
--hash=sha256:3c81599befb4d4f3d7648ed3217e00d21a9341a9a688ecdd615ff72ffbed7336 \
--hash=sha256:2bd1096476aaac820426239ab534b636c77d71af66c547b9ddcd76eb9c79e004 \
--hash=sha256:31fe38d14d2e5f787e0aecef831457da6cec68e0bb09a35835b0b44ae8b988fe \
--hash=sha256:093cb351031656d3ee2f4fa1be579a8c69c754cf874206be1d4cf3b542042804 \
--hash=sha256:59b281eab51e1b6b6afa525af2bd93c16d49358404f814fe2c2410058623928c \
--hash=sha256:0cc20f655157d4cfc7bada909dc5cc228211b075ba8407c46467f63597c78178 \
--hash=sha256:f8ec91983e638a9bcd75b39f1396e5c0dc2330cbd9ce4accefe68717e6779e0a \
--hash=sha256:46f4c544f6557a2fefa7ac8ac7d1b17bf9b647bd20b16decc8fbcab7117fbc15 \
--hash=sha256:731c8abd27693323b348518ed0e0705713a36d79fdbd969ad968fbef0979a7e0 \
--hash=sha256:471e0d70201c069f74c837983189949aa0d24bb2d751b57e26e3761f2f782b8d \
--hash=sha256:a68254dd88021f24a68b613d8c51d5c5e74d735878b9e32cc0adf19d1f10aaf9 \
--hash=sha256:a7d5137e556cc0ea418dca6186deabe9129cee318618eb1ffecbd35bee55ddc1 \
--hash=sha256:aeaba7b5e756ea52c8861c133c596afe93dd716cbcacae23b80bc238202dc023 \
--hash=sha256:95e590dd70642eb2079d280420a888190aa040ad20f19ec8c6e097e38aa29e06 \
--hash=sha256:1b9362d34363f2c71b7853f6251219298124aa4cc2075ae2932e64c91a3e2717 \
--hash=sha256:e53258e69874a306fcecb88b7534d61820db8a98655662a3dd2ec7f1afd9132f \
--hash=sha256:1f3bfbd611db5cb58ca82f3deb35e83af34bb8cf06043fa61500157d50a70982 \
--hash=sha256:419c57d7b63f5ec38b1199a9521d77d7d1754eb97827bbb773162073ccd8c8d4 \
--hash=sha256:dc26bb134452081859aa21d4990474ddb7e863aa39e60d1592800a8865a702de \
--hash=sha256:3b8398b3d0efc420e777c40c16764d6870bcef2eb383df9c6dbb9ffe12c64452 \
--hash=sha256:f224ad253cc9cea7568f49077007d2263efa57396a2f2f78114066fd54b5c68e
frozendict==2.3.2 \
--hash=sha256:4fb171d1e84d17335365877e19d17440373b47ca74a73c06f65ac0b16d01e87f \
--hash=sha256:c0a3640e9d7533d164160b758351aa49d9e85bbe0bd76d219d4021e90ffa6a52 \
--hash=sha256:87cfd00fafbc147d8cd2590d1109b7db8ac8d7d5bdaa708ba46caee132b55d4d \
--hash=sha256:fb09761e093cfabb2f179dbfdb2521e1ec5701df714d1eb5c51fa7849027be19 \
--hash=sha256:82176dc7adf01cf8f0193e909401939415a230a1853f4a672ec1629a06ceae18 \
--hash=sha256:c1c70826aa4a50fa283fe161834ac4a3ac7c753902c980bb8b595b0998a38ddb \
--hash=sha256:1db5035ddbed995badd1a62c4102b5e207b5aeb24472df2c60aba79639d7996b \
--hash=sha256:4246fc4cb1413645ba4d3513939b90d979a5bae724be605a10b2b26ee12f839c \
--hash=sha256:680cd42fb0a255da1ce45678ccbd7f69da750d5243809524ebe8f45b2eda6e6b \
--hash=sha256:6a7f3a181d6722c92a9fab12d0c5c2b006a18ca5666098531f316d1e1c8984e3 \
--hash=sha256:b1cb866eabb3c1384a7fe88e1e1033e2b6623073589012ab637c552bf03f6364 \
--hash=sha256:952c5e5e664578c5c2ce8489ee0ab6a1855da02b58ef593ee728fc10d672641a \
--hash=sha256:608b77904cd0117cd816df605a80d0043a5326ee62529327d2136c792165a823 \
--hash=sha256:0eed41fd326f0bcc779837d8d9e1374da1bc9857fe3b9f2910195bbd5fff3aeb \
--hash=sha256:bde28db6b5868dd3c45b3555f9d1dc5a1cca6d93591502fa5dcecce0dde6a335 \
--hash=sha256:6882a9bbe08ab9b5ff96ce11bdff3fe40b114b9813bc6801261e2a7b45e20012 \
--hash=sha256:7fac4542f0a13fbe704db4942f41ba3abffec5af8b100025973e59dff6a09d0d
cryptography==36.0.1 \
--hash=sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b \
--hash=sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31 \
--hash=sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a \
--hash=sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173 \
--hash=sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12 \
--hash=sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3 \
--hash=sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2 \
--hash=sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f \
--hash=sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3 \
--hash=sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca \
--hash=sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf \
--hash=sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9 \
--hash=sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1 \
--hash=sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94 \
--hash=sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac \
--hash=sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee \
--hash=sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46 \
--hash=sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903 \
--hash=sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316 \
--hash=sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638
frozendict==2.3.0 \
--hash=sha256:e18e2abd144a9433b0a8334582843b2aa0d3b9ac8b209aaa912ad365115fe2e1 \
--hash=sha256:96dc7a02e78da5725e5e642269bb7ae792e0c9f13f10f2e02689175ebbfedb35 \
--hash=sha256:752a6dcfaf9bb20a7ecab24980e4dbe041f154509c989207caf185522ef85461 \
--hash=sha256:5346d9fc1c936c76d33975a9a9f1a067342963105d9a403a99e787c939cc2bb2 \
--hash=sha256:60dd2253f1bacb63a7c486ec541a968af4f985ffb06602ee8954a3d39ec6bd2e \
--hash=sha256:b2e044602ce17e5cd86724add46660fb9d80169545164e763300a3b839cb1b79 \
--hash=sha256:a27a69b1ac3591e4258325108aee62b53c0eeb6ad0a993ae68d3c7eaea980420 \
--hash=sha256:4f45ef5f6b184d84744fff97b61f6b9a855e24d36b713ea2352fc723a047afa5 \
--hash=sha256:2d3f5016650c0e9a192f5024e68fb4d63f670d0ee58b099ed3f5b4c62ea30ecb \
--hash=sha256:6cf605916f50aabaaba5624c81eb270200f6c2c466c46960237a125ec8fe3ae0 \
--hash=sha256:f6da06e44904beae4412199d7e49be4f85c6cc168ab06b77c735ea7da5ce3454 \
--hash=sha256:1f34793fb409c4fa70ffd25bea87b01f3bd305fb1c6b09e7dff085b126302206 \
--hash=sha256:fd72494a559bdcd28aa71f4aa81860269cd0b7c45fff3e2614a0a053ecfd2a13 \
--hash=sha256:00ea9166aa68cc5feed05986206fdbf35e838a09cb3feef998cf35978ff8a803 \
--hash=sha256:9ffaf440648b44e0bc694c1a4701801941378ba3ba6541e17750ae4b4aeeb116 \
--hash=sha256:8578fe06815fcdcc672bd5603eebc98361a5317c1c3a13b28c6c810f6ea3b323 \
--hash=sha256:da4231adefc5928e7810da2732269d3ad7b5616295b3e693746392a8205ea0b5
hiredis==2.0.0 \
--hash=sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048 \
--hash=sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26 \
@ -237,18 +234,18 @@ ijson==3.1.4 \
importlib-metadata==4.2.0 ; python_version < "3.8" and python_version >= "3.7" or python_version >= "3.6" and python_version < "3.8" or python_version < "3.8" \
--hash=sha256:057e92c15bc8d9e8109738a48db0ccb31b4d9d5cfbee5a8670879a30be66304b \
--hash=sha256:b7e52a1f8dec14a75ea73e0891f3060099ca1d8e6a462a4dff11c3e119ea1b31
importlib-resources==5.7.1 ; python_version < "3.9" and python_version >= "3.7" \
--hash=sha256:e447dc01619b1e951286f3929be820029d48c75eb25d265c28b92a16548212b8 \
--hash=sha256:b6062987dfc51f0fcb809187cffbd60f35df7acb4589091f154214af6d0d49d3
importlib-resources==5.4.0 ; python_version < "3.9" and python_version >= "3.7" \
--hash=sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45 \
--hash=sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b
incremental==21.3.0 ; python_version >= "3.6" \
--hash=sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321 \
--hash=sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57
jinja2==3.1.2 \
--hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 \
--hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852
jsonschema==4.5.1 \
--hash=sha256:71b5e39324422543546572954ce71c67728922c104902cb7ce252e522235b33f \
--hash=sha256:7c6d882619340c3347a1bf7315e147e6d3dae439033ae6383d6acb908c101dfc
jinja2==3.0.3 \
--hash=sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8 \
--hash=sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7
jsonschema==4.4.0 \
--hash=sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823 \
--hash=sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83
lxml==4.8.0 \
--hash=sha256:e1ab2fac607842ac36864e358c42feb0960ae62c34aa4caaf12ada0a1fb5d99b \
--hash=sha256:28d1af847786f68bec57961f31221125c29d6f52d9187c01cd34dc14e2b29430 \
@ -311,47 +308,47 @@ lxml==4.8.0 \
--hash=sha256:730766072fd5dcb219dd2b95c4c49752a54f00157f322bc6d71f7d2a31fecd79 \
--hash=sha256:8b99ec73073b37f9ebe8caf399001848fced9c08064effdbfc4da2b5a8d07b93 \
--hash=sha256:f63f62fc60e6228a4ca9abae28228f35e1bd3ce675013d1dfb828688d50c6e23
markupsafe==2.1.1 ; python_version >= "3.7" \
--hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \
--hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \
--hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \
--hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \
--hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \
--hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \
--hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \
--hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \
--hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \
--hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \
--hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \
--hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \
--hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \
--hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \
--hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \
--hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \
--hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \
--hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \
--hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \
--hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \
--hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \
--hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \
--hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \
--hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \
--hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \
--hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \
--hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \
--hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \
--hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 \
--hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \
--hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \
--hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \
--hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \
--hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \
--hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \
--hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \
--hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \
--hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \
--hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \
--hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b
markupsafe==2.1.0 ; python_version >= "3.7" \
--hash=sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c \
--hash=sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a \
--hash=sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce \
--hash=sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3 \
--hash=sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989 \
--hash=sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26 \
--hash=sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076 \
--hash=sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f \
--hash=sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454 \
--hash=sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c \
--hash=sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357 \
--hash=sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61 \
--hash=sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8 \
--hash=sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb \
--hash=sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e \
--hash=sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49 \
--hash=sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7 \
--hash=sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a \
--hash=sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad \
--hash=sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759 \
--hash=sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7 \
--hash=sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed \
--hash=sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea \
--hash=sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730 \
--hash=sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1 \
--hash=sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8 \
--hash=sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f \
--hash=sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8 \
--hash=sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea \
--hash=sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3 \
--hash=sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448 \
--hash=sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c \
--hash=sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956 \
--hash=sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c \
--hash=sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7 \
--hash=sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d \
--hash=sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635 \
--hash=sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05 \
--hash=sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7 \
--hash=sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f
matrix-common==1.1.0 \
--hash=sha256:5d6dfd777503b2f3a031b566e6af25b6e95f9c0818ef57d954c3190fce5eb407 \
--hash=sha256:a8238748afc2b37079818367fed5156f355771b07c8ff0a175934f47e0ff3276
@ -399,51 +396,48 @@ packaging==21.3 \
parameterized==0.8.1 \
--hash=sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9 \
--hash=sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c
phonenumbers==8.12.48 \
--hash=sha256:429273b98966475d0c18ee293096eaf81c6b5727d0d55c7ba5ce9c60ec8c59ef \
--hash=sha256:af0681fbfe9fa0721376ad9b729e772e5d20bf2cf50d9dd8ca2f0bdd78e9f0ce
pillow==9.1.0 \
--hash=sha256:af79d3fde1fc2e33561166d62e3b63f0cc3e47b5a3a2e5fea40d4917754734ea \
--hash=sha256:55dd1cf09a1fd7c7b78425967aacae9b0d70125f7d3ab973fadc7b5abc3de652 \
--hash=sha256:66822d01e82506a19407d1afc104c3fcea3b81d5eb11485e593ad6b8492f995a \
--hash=sha256:a5eaf3b42df2bcda61c53a742ee2c6e63f777d0e085bbc6b2ab7ed57deb13db7 \
--hash=sha256:01ce45deec9df310cbbee11104bae1a2a43308dd9c317f99235b6d3080ddd66e \
--hash=sha256:aea7ce61328e15943d7b9eaca87e81f7c62ff90f669116f857262e9da4057ba3 \
--hash=sha256:7a053bd4d65a3294b153bdd7724dce864a1d548416a5ef61f6d03bf149205160 \
--hash=sha256:97bda660702a856c2c9e12ec26fc6d187631ddfd896ff685814ab21ef0597033 \
--hash=sha256:21dee8466b42912335151d24c1665fcf44dc2ee47e021d233a40c3ca5adae59c \
--hash=sha256:6b6d4050b208c8ff886fd3db6690bf04f9a48749d78b41b7a5bf24c236ab0165 \
--hash=sha256:5cfca31ab4c13552a0f354c87fbd7f162a4fafd25e6b521bba93a57fe6a3700a \
--hash=sha256:ed742214068efa95e9844c2d9129e209ed63f61baa4d54dbf4cf8b5e2d30ccf2 \
--hash=sha256:c9efef876c21788366ea1f50ecb39d5d6f65febe25ad1d4c0b8dff98843ac244 \
--hash=sha256:de344bcf6e2463bb25179d74d6e7989e375f906bcec8cb86edb8b12acbc7dfef \
--hash=sha256:17869489de2fce6c36690a0c721bd3db176194af5f39249c1ac56d0bb0fcc512 \
--hash=sha256:25023a6209a4d7c42154073144608c9a71d3512b648a2f5d4465182cb93d3477 \
--hash=sha256:8782189c796eff29dbb37dd87afa4ad4d40fc90b2742704f94812851b725964b \
--hash=sha256:463acf531f5d0925ca55904fa668bb3461c3ef6bc779e1d6d8a488092bdee378 \
--hash=sha256:3f42364485bfdab19c1373b5cd62f7c5ab7cc052e19644862ec8f15bb8af289e \
--hash=sha256:3fddcdb619ba04491e8f771636583a7cc5a5051cd193ff1aa1ee8616d2a692c5 \
--hash=sha256:4fe29a070de394e449fd88ebe1624d1e2d7ddeed4c12e0b31624561b58948d9a \
--hash=sha256:c24f718f9dd73bb2b31a6201e6db5ea4a61fdd1d1c200f43ee585fc6dcd21b34 \
--hash=sha256:fb89397013cf302f282f0fc998bb7abf11d49dcff72c8ecb320f76ea6e2c5717 \
--hash=sha256:c870193cce4b76713a2b29be5d8327c8ccbe0d4a49bc22968aa1e680930f5581 \
--hash=sha256:69e5ddc609230d4408277af135c5b5c8fe7a54b2bdb8ad7c5100b86b3aab04c6 \
--hash=sha256:35be4a9f65441d9982240e6966c1eaa1c654c4e5e931eaf580130409e31804d4 \
--hash=sha256:82283af99c1c3a5ba1da44c67296d5aad19f11c535b551a5ae55328a317ce331 \
--hash=sha256:a325ac71914c5c043fa50441b36606e64a10cd262de12f7a179620f579752ff8 \
--hash=sha256:a598d8830f6ef5501002ae85c7dbfcd9c27cc4efc02a1989369303ba85573e58 \
--hash=sha256:0c51cb9edac8a5abd069fd0758ac0a8bfe52c261ee0e330f363548aca6893595 \
--hash=sha256:a336a4f74baf67e26f3acc4d61c913e378e931817cd1e2ef4dfb79d3e051b481 \
--hash=sha256:eb1b89b11256b5b6cad5e7593f9061ac4624f7651f7a8eb4dfa37caa1dfaa4d0 \
--hash=sha256:255c9d69754a4c90b0ee484967fc8818c7ff8311c6dddcc43a4340e10cd1636a \
--hash=sha256:5a3ecc026ea0e14d0ad7cd990ea7f48bfcb3eb4271034657dc9d06933c6629a7 \
--hash=sha256:c5b0ff59785d93b3437c3703e3c64c178aabada51dea2a7f2c5eccf1bcf565a3 \
--hash=sha256:c7110ec1701b0bf8df569a7592a196c9d07c764a0a74f65471ea56816f10e2c8 \
--hash=sha256:8d79c6f468215d1a8415aa53d9868a6b40c4682165b8cb62a221b1baa47db458 \
--hash=sha256:f401ed2bbb155e1ade150ccc63db1a4f6c1909d3d378f7d1235a44e90d75fb97
prometheus-client==0.14.1 \
--hash=sha256:522fded625282822a89e2773452f42df14b5a8e84a86433e3f8a189c1d54dc01 \
--hash=sha256:5459c427624961076277fdc6dc50540e2bacb98eebde99886e59ec55ed92093a
phonenumbers==8.12.44 \
--hash=sha256:cc1299cf37b309ecab6214297663ab86cb3d64ae37fd5b88e904fe7983a874a6 \
--hash=sha256:26cfd0257d1704fe2f88caff2caabb70d16a877b1e65b6aae51f9fbbe10aa8ce
pillow==9.0.1 \
--hash=sha256:a5d24e1d674dd9d72c66ad3ea9131322819ff86250b30dc5821cbafcfa0b96b4 \
--hash=sha256:2632d0f846b7c7600edf53c48f8f9f1e13e62f66a6dbc15191029d950bfed976 \
--hash=sha256:b9618823bd237c0d2575283f2939655f54d51b4527ec3972907a927acbcc5bfc \
--hash=sha256:9bfdb82cdfeccec50aad441afc332faf8606dfa5e8efd18a6692b5d6e79f00fd \
--hash=sha256:5100b45a4638e3c00e4d2320d3193bdabb2d75e79793af7c3eb139e4f569f16f \
--hash=sha256:528a2a692c65dd5cafc130de286030af251d2ee0483a5bf50c9348aefe834e8a \
--hash=sha256:0f29d831e2151e0b7b39981756d201f7108d3d215896212ffe2e992d06bfe049 \
--hash=sha256:855c583f268edde09474b081e3ddcd5cf3b20c12f26e0d434e1386cc5d318e7a \
--hash=sha256:d9d7942b624b04b895cb95af03a23407f17646815495ce4547f0e60e0b06f58e \
--hash=sha256:81c4b81611e3a3cb30e59b0cf05b888c675f97e3adb2c8672c3154047980726b \
--hash=sha256:413ce0bbf9fc6278b2d63309dfeefe452835e1c78398efb431bab0672fe9274e \
--hash=sha256:80fe64a6deb6fcfdf7b8386f2cf216d329be6f2781f7d90304351811fb591360 \
--hash=sha256:cef9c85ccbe9bee00909758936ea841ef12035296c748aaceee535969e27d31b \
--hash=sha256:1d19397351f73a88904ad1aee421e800fe4bbcd1aeee6435fb62d0a05ccd1030 \
--hash=sha256:d21237d0cd37acded35154e29aec853e945950321dd2ffd1a7d86fe686814669 \
--hash=sha256:ede5af4a2702444a832a800b8eb7f0a7a1c0eed55b644642e049c98d589e5092 \
--hash=sha256:b5b3f092fe345c03bca1e0b687dfbb39364b21ebb8ba90e3fa707374b7915204 \
--hash=sha256:335ace1a22325395c4ea88e00ba3dc89ca029bd66bd5a3c382d53e44f0ccd77e \
--hash=sha256:db6d9fac65bd08cea7f3540b899977c6dee9edad959fa4eaf305940d9cbd861c \
--hash=sha256:f154d173286a5d1863637a7dcd8c3437bb557520b01bddb0be0258dcb72696b5 \
--hash=sha256:14d4b1341ac07ae07eb2cc682f459bec932a380c3b122f5540432d8977e64eae \
--hash=sha256:effb7749713d5317478bb3acb3f81d9d7c7f86726d41c1facca068a04cf5bb4c \
--hash=sha256:7f7609a718b177bf171ac93cea9fd2ddc0e03e84d8fa4e887bdfc39671d46b00 \
--hash=sha256:80ca33961ced9c63358056bd08403ff866512038883e74f3a4bf88ad3eb66838 \
--hash=sha256:1c3c33ac69cf059bbb9d1a71eeaba76781b450bc307e2291f8a4764d779a6b28 \
--hash=sha256:12875d118f21cf35604176872447cdb57b07126750a33748bac15e77f90f1f9c \
--hash=sha256:514ceac913076feefbeaf89771fd6febde78b0c4c1b23aaeab082c41c694e81b \
--hash=sha256:d3c5c79ab7dfce6d88f1ba639b77e77a17ea33a01b07b99840d6ed08031cb2a7 \
--hash=sha256:718856856ba31f14f13ba885ff13874be7fefc53984d2832458f12c38205f7f7 \
--hash=sha256:f25ed6e28ddf50de7e7ea99d7a976d6a9c415f03adcaac9c41ff6ff41b6d86ac \
--hash=sha256:011233e0c42a4a7836498e98c1acf5e744c96a67dd5032a6f666cc1fb97eab97 \
--hash=sha256:253e8a302a96df6927310a9d44e6103055e8fb96a6822f8b7f514bb7ef77de56 \
--hash=sha256:6295f6763749b89c994fcb6d8a7f7ce03c3992e695f89f00b741b4580b199b7e \
--hash=sha256:a9f44cd7e162ac6191491d7249cceb02b8116b0f7e847ee33f739d7cb1ea1f70 \
--hash=sha256:6c8bc8238a7dfdaf7a75f5ec5a663f4173f8c367e5a39f87e720495e1eed75fa
prometheus-client==0.14.0 \
--hash=sha256:f4aba3fdd1735852049f537c1f0ab177159b7ab76f271ecc4d2f45aa2a1d01f2 \
--hash=sha256:8f7a922dd5455ad524b6ba212ce8eb2b4b05e073f4ec7218287f88b1cac34750
psycopg2==2.9.3 ; python_version >= "3.6" and platform_python_implementation == "PyPy" or platform_python_implementation != "PyPy" \
--hash=sha256:083707a696e5e1c330af2508d8fab36f9700b26621ccbcb538abe22e15485362 \
--hash=sha256:d3ca6421b942f60c008f81a3541e8faf6865a28d5a9b48544b0ee4f40cac7fca \
@ -491,13 +485,13 @@ pyasn1==0.4.8 \
pycparser==2.21 ; python_full_version >= "3.6.7" and platform_python_implementation == "PyPy" \
--hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \
--hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206
pyjwt==2.3.0 \
--hash=sha256:e0c4bb8d9f0af0c7f5b1ec4c5036309617d03d56932877f2f7a0beeb5318322f \
--hash=sha256:b888b4d56f06f6dcd777210c334e69c737be74755d3e5e9ee3fe67dc18a0ee41
pyjwt==2.4.0 \
--hash=sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf \
--hash=sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba
pymacaroons==0.13.0 \
--hash=sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907 \
--hash=sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8
pynacl==1.5.0 \
pynacl==1.5.0 ; python_version >= "3.6" \
--hash=sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1 \
--hash=sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92 \
--hash=sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394 \
@ -511,9 +505,9 @@ pynacl==1.5.0 \
pyopenssl==22.0.0 \
--hash=sha256:ea252b38c87425b64116f808355e8da644ef9b07e429398bfece610f893ee2e0 \
--hash=sha256:660b1b1425aac4a1bea1d94168a85d99f0b3144c869dd4390d27629d0087f1bf
pyparsing==3.0.8 ; python_full_version >= "3.6.8" \
--hash=sha256:ef7b523f6356f763771559412c0d7134753f037822dad1b16945b7b846f7ad06 \
--hash=sha256:7bf433498c016c4314268d95df76c81b842a4cb2b276fa3312cfb1e1d85f6954
pyparsing==3.0.7 ; python_version >= "3.6" \
--hash=sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484 \
--hash=sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea
pyrsistent==0.18.1 ; python_version >= "3.7" \
--hash=sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1 \
--hash=sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26 \
@ -576,9 +570,6 @@ requests==2.27.1 ; python_full_version >= "3.6.0" \
service-identity==21.1.0 \
--hash=sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34 \
--hash=sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db
setuptools==62.1.0 ; python_version >= "3.7" \
--hash=sha256:26ead7d1f93efc0f8c804d9fafafbe4a44b179580a7105754b245155f9af05a8 \
--hash=sha256:47c7b0c0f8fc10eec4cf1e71c6fdadf8decaa74ffa087e68cd1c20db7ad6a592
signedjson==1.1.4 \
--hash=sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228 \
--hash=sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492
@ -672,21 +663,21 @@ twisted==22.4.0 \
txredisapi==1.4.7 \
--hash=sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059 \
--hash=sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb
typing-extensions==4.2.0 \
--hash=sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708 \
--hash=sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376
typing-extensions==4.1.1 \
--hash=sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2 \
--hash=sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42
unpaddedbase64==2.1.0 \
--hash=sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6 \
--hash=sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005
urllib3==1.26.9 ; python_full_version >= "3.6.0" and python_version < "4" \
--hash=sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14 \
--hash=sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e
webencodings==0.5.1 ; python_version >= "3.7" \
urllib3==1.26.8 ; python_full_version >= "3.6.0" and python_version < "4" \
--hash=sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed \
--hash=sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c
webencodings==0.5.1 ; python_version >= "3.6" \
--hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \
--hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923
zipp==3.8.0 ; python_version >= "3.7" and python_version < "3.8" or python_version < "3.9" and python_version >= "3.7" \
--hash=sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099 \
--hash=sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad
zipp==3.7.0 ; python_version >= "3.7" and python_version < "3.8" or python_version < "3.9" and python_version >= "3.7" \
--hash=sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375 \
--hash=sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d
zope.interface==5.4.0 ; python_full_version >= "3.6.7" \
--hash=sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7 \
--hash=sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021 \

View file

@ -45,7 +45,7 @@ docker build -t matrixdotorg/synapse -f "docker/Dockerfile" .
extra_test_args=()
test_tags="synapse_blacklist,msc2716,msc3030"
test_tags="synapse_blacklist,msc2716,msc3030,msc3787"
# If we're using workers, modify the docker files slightly.
if [[ -n "$WORKERS" ]]; then

View file

@ -62,7 +62,7 @@ from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackground
from synapse.storage.databases.main.events_bg_updates import (
EventsBackgroundUpdatesStore,
)
from synapse.storage.databases.main.group_server import GroupServerWorkerStore
from synapse.storage.databases.main.group_server import GroupServerStore
from synapse.storage.databases.main.media_repository import (
MediaRepositoryBackgroundUpdateStore,
)
@ -102,14 +102,6 @@ BOOLEAN_COLUMNS = {
"devices": ["hidden"],
"device_lists_outbound_pokes": ["sent"],
"users_who_share_rooms": ["share_private"],
"groups": ["is_public"],
"group_rooms": ["is_public"],
"group_users": ["is_public", "is_admin"],
"group_summary_rooms": ["is_public"],
"group_room_categories": ["is_public"],
"group_summary_users": ["is_public"],
"group_roles": ["is_public"],
"local_group_membership": ["is_publicised", "is_admin"],
"e2e_room_keys": ["is_verified"],
"account_validity": ["email_sent"],
"redactions": ["have_censored"],
@ -175,6 +167,22 @@ IGNORED_TABLES = {
"ui_auth_sessions",
"ui_auth_sessions_credentials",
"ui_auth_sessions_ips",
# Groups/communities is no longer supported.
"group_attestations_remote",
"group_attestations_renewals",
"group_invites",
"group_roles",
"group_room_categories",
"group_rooms",
"group_summary_roles",
"group_summary_room_categories",
"group_summary_rooms",
"group_summary_users",
"group_users",
"groups",
"local_group_membership",
"local_group_updates",
"remote_profile_cache",
}
@ -211,7 +219,7 @@ class Store(
PushRuleStore,
PusherWorkerStore,
PresenceBackgroundUpdateStore,
GroupServerWorkerStore,
GroupServerStore,
):
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)

View file

@ -29,12 +29,11 @@ from synapse.api.errors import (
MissingClientTokenError,
)
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.http import get_request_user_agent
from synapse.http.site import SynapseRequest
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
from synapse.storage.databases.main.registration import TokenLookupResult
from synapse.types import Requester, StateMap, UserID, create_requester
from synapse.types import Requester, UserID, create_requester
from synapse.util.caches.lrucache import LruCache
from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry
@ -61,8 +60,8 @@ class Auth:
self.hs = hs
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self.state = hs.get_state_handler()
self._account_validity_handler = hs.get_account_validity_handler()
self._storage_controllers = hs.get_storage_controllers()
self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache(
10000, "token_cache"
@ -79,9 +78,8 @@ class Auth:
self,
room_id: str,
user_id: str,
current_state: Optional[StateMap[EventBase]] = None,
allow_departed_users: bool = False,
) -> EventBase:
) -> Tuple[str, Optional[str]]:
"""Check if the user is in the room, or was at some point.
Args:
room_id: The room to check.
@ -99,29 +97,28 @@ class Auth:
Raises:
AuthError if the user is/was not in the room.
Returns:
Membership event for the user if the user was in the
room. This will be the join event if they are currently joined to
the room. This will be the leave event if they have left the room.
The current membership of the user in the room and the
membership event ID of the user.
"""
if current_state:
member = current_state.get((EventTypes.Member, user_id), None)
else:
member = await self.state.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
if member:
membership = member.membership
(
membership,
member_event_id,
) = await self.store.get_local_current_membership_for_user_in_room(
user_id=user_id,
room_id=room_id,
)
if membership:
if membership == Membership.JOIN:
return member
return membership, member_event_id
# XXX this looks totally bogus. Why do we not allow users who have been banned,
# or those who were members previously and have been re-invited?
if allow_departed_users and membership == Membership.LEAVE:
forgot = await self.store.did_forget(user_id, room_id)
if not forgot:
return member
return membership, member_event_id
raise AuthError(403, "User %s not in room %s" % (user_id, room_id))
@ -603,8 +600,11 @@ class Auth:
# We currently require the user is a "moderator" in the room. We do this
# by checking if they would (theoretically) be able to change the
# m.room.canonical_alias events
power_level_event = await self.state.get_current_state(
room_id, EventTypes.PowerLevels, ""
power_level_event = (
await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.PowerLevels, ""
)
)
auth_events = {}
@ -694,12 +694,11 @@ class Auth:
# * The user is a non-guest user, and was ever in the room
# * The user is a guest user, and has joined the room
# else it will throw.
member_event = await self.check_user_in_room(
return await self.check_user_in_room(
room_id, user_id, allow_departed_users=allow_departed_users
)
return member_event.membership, member_event.event_id
except AuthError:
visibility = await self.state.get_current_state(
visibility = await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if (

View file

@ -31,11 +31,6 @@ MAX_ALIAS_LENGTH = 255
# the maximum length for a user id is 255 characters
MAX_USERID_LENGTH = 255
# The maximum length for a group id is 255 characters
MAX_GROUPID_LENGTH = 255
MAX_GROUP_CATEGORYID_LENGTH = 255
MAX_GROUP_ROLEID_LENGTH = 255
class Membership:
@ -100,7 +95,6 @@ class EventTypes:
Aliases: Final = "m.room.aliases"
Redaction: Final = "m.room.redaction"
ThirdPartyInvite: Final = "m.room.third_party_invite"
RelatedGroups: Final = "m.room.related_groups"
RoomHistoryVisibility: Final = "m.room.history_visibility"
CanonicalAlias: Final = "m.room.canonical_alias"
@ -142,7 +136,13 @@ class DeviceKeyAlgorithms:
class EduTypes:
Presence: Final = "m.presence"
PRESENCE: Final = "m.presence"
TYPING: Final = "m.typing"
RECEIPT: Final = "m.receipt"
DEVICE_LIST_UPDATE: Final = "m.device_list_update"
SIGNING_KEY_UPDATE: Final = "m.signing_key_update"
UNSTABLE_SIGNING_KEY_UPDATE: Final = "org.matrix.signing_key_update"
DIRECT_TO_DEVICE: Final = "m.direct_to_device"
class RejectedReason:

View file

@ -79,6 +79,13 @@ class Codes(str, Enum):
WEAK_PASSWORD = "M_WEAK_PASSWORD"
INVALID_SIGNATURE = "M_INVALID_SIGNATURE"
USER_DEACTIVATED = "M_USER_DEACTIVATED"
# The account has been suspended on the server.
# By opposition to `USER_DEACTIVATED`, this is a reversible measure
# that can possibly be appealed and reverted.
# Part of MSC3823.
USER_ACCOUNT_SUSPENDED = "ORG.MATRIX.MSC3823.USER_ACCOUNT_SUSPENDED"
BAD_ALIAS = "M_BAD_ALIAS"
# For restricted join rules.
UNABLE_AUTHORISE_JOIN = "M_UNABLE_TO_AUTHORISE_JOIN"
@ -139,7 +146,13 @@ class SynapseError(CodeMessageException):
errcode: Matrix error code e.g 'M_FORBIDDEN'
"""
def __init__(self, code: int, msg: str, errcode: str = Codes.UNKNOWN):
def __init__(
self,
code: int,
msg: str,
errcode: str = Codes.UNKNOWN,
additional_fields: Optional[Dict] = None,
):
"""Constructs a synapse error.
Args:
@ -149,9 +162,13 @@ class SynapseError(CodeMessageException):
"""
super().__init__(code, msg)
self.errcode = errcode
if additional_fields is None:
self._additional_fields: Dict = {}
else:
self._additional_fields = dict(additional_fields)
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode)
return cs_error(self.msg, self.errcode, **self._additional_fields)
class InvalidAPICallError(SynapseError):
@ -176,14 +193,7 @@ class ProxiedRequestError(SynapseError):
errcode: str = Codes.UNKNOWN,
additional_fields: Optional[Dict] = None,
):
super().__init__(code, msg, errcode)
if additional_fields is None:
self._additional_fields: Dict = {}
else:
self._additional_fields = dict(additional_fields)
def error_dict(self) -> "JsonDict":
return cs_error(self.msg, self.errcode, **self._additional_fields)
super().__init__(code, msg, errcode, additional_fields)
class ConsentNotGivenError(SynapseError):

View file

@ -33,7 +33,7 @@ from typing import (
import jsonschema
from jsonschema import FormatChecker
from synapse.api.constants import EventContentFields
from synapse.api.constants import EduTypes, EventContentFields
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
from synapse.events import EventBase
@ -347,7 +347,7 @@ class Filter:
user_id = event.user_id
field_matchers = {
"senders": lambda v: user_id == v,
"types": lambda v: "m.presence" == v,
"types": lambda v: EduTypes.PRESENCE == v,
}
return self._check_fields(field_matchers)
else:

View file

@ -37,7 +37,6 @@ from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
@ -55,7 +54,6 @@ class AdminCmdSlavedStore(
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedFilteringStore,
SlavedGroupServerStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedPushRuleStore,

View file

@ -58,7 +58,6 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
@ -69,7 +68,6 @@ from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client import (
account_data,
events,
groups,
initial_sync,
login,
presence,
@ -78,6 +76,7 @@ from synapse.rest.client import (
read_marker,
receipts,
room,
room_batch,
room_keys,
sendtodevice,
sync,
@ -87,7 +86,7 @@ from synapse.rest.client import (
voip,
)
from synapse.rest.client._base import client_patterns
from synapse.rest.client.account import ThreepidRestServlet
from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
from synapse.rest.client.devices import DevicesRestServlet
from synapse.rest.client.keys import (
KeyChangesServlet,
@ -233,7 +232,6 @@ class GenericWorkerSlavedStore(
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedGroupServerStore,
SlavedAccountDataStore,
SlavedPusherStore,
CensorEventsStore,
@ -289,6 +287,7 @@ class GenericWorkerServer(HomeServer):
RegistrationTokenValidityRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
WhoamiRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
# Read-only
@ -308,6 +307,7 @@ class GenericWorkerServer(HomeServer):
room.register_servlets(self, resource, is_worker=True)
room.register_deprecated_servlets(self, resource)
initial_sync.register_servlets(self, resource)
room_batch.register_servlets(self, resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
@ -320,9 +320,6 @@ class GenericWorkerServer(HomeServer):
presence.register_servlets(self, resource)
if self.config.experimental.groups_enabled:
groups.register_servlets(self, resource)
resources.update({CLIENT_API_PREFIX: resource})
resources.update(build_synapse_client_resource_tree(self))

View file

@ -23,13 +23,7 @@ from netaddr import IPSet
from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.types import (
DeviceListUpdates,
GroupID,
JsonDict,
UserID,
get_domain_from_id,
)
from synapse.types import DeviceListUpdates, JsonDict, UserID
from synapse.util.caches.descriptors import _CacheContext, cached
if TYPE_CHECKING:
@ -55,7 +49,6 @@ class ApplicationServiceState(Enum):
@attr.s(slots=True, frozen=True, auto_attribs=True)
class Namespace:
exclusive: bool
group_id: Optional[str]
regex: Pattern[str]
@ -77,7 +70,6 @@ class ApplicationService:
def __init__(
self,
token: str,
hostname: str,
id: str,
sender: str,
url: Optional[str] = None,
@ -95,7 +87,6 @@ class ApplicationService:
) # url must not end with a slash
self.hs_token = hs_token
self.sender = sender
self.server_name = hostname
self.namespaces = self._check_namespaces(namespaces)
self.id = id
self.ip_range_whitelist = ip_range_whitelist
@ -141,30 +132,13 @@ class ApplicationService:
exclusive = regex_obj.get("exclusive")
if not isinstance(exclusive, bool):
raise ValueError("Expected bool for 'exclusive' in ns '%s'" % ns)
group_id = regex_obj.get("group_id")
if group_id:
if not isinstance(group_id, str):
raise ValueError(
"Expected string for 'group_id' in ns '%s'" % ns
)
try:
GroupID.from_string(group_id)
except Exception:
raise ValueError(
"Expected valid group ID for 'group_id' in ns '%s'" % ns
)
if get_domain_from_id(group_id) != self.server_name:
raise ValueError(
"Expected 'group_id' to be this host in ns '%s'" % ns
)
regex = regex_obj.get("regex")
if not isinstance(regex, str):
raise ValueError("Expected string for 'regex' in ns '%s'" % ns)
# Pre-compile regex.
result[ns].append(Namespace(exclusive, group_id, re.compile(regex)))
result[ns].append(Namespace(exclusive, re.compile(regex)))
return result
@ -369,21 +343,6 @@ class ApplicationService:
if namespace.exclusive
]
def get_groups_for_user(self, user_id: str) -> Iterable[str]:
"""Get the groups that this user is associated with by this AS
Args:
user_id: The ID of the user.
Returns:
An iterable that yields group_id strings.
"""
return (
namespace.group_id
for namespace in self.namespaces[ApplicationService.NS_USERS]
if namespace.group_id and namespace.regex.match(user_id)
)
def is_rate_limited(self) -> bool:
return self.rate_limited

View file

@ -14,7 +14,7 @@
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple
from prometheus_client import Counter
from typing_extensions import TypeGuard
@ -155,6 +155,9 @@ class ApplicationServiceApi(SimpleHttpClient):
if service.url is None:
return []
# This is required by the configuration.
assert service.hs_token is not None
uri = "%s%s/thirdparty/%s/%s" % (
service.url,
APP_SERVICE_PREFIX,
@ -162,7 +165,11 @@ class ApplicationServiceApi(SimpleHttpClient):
urllib.parse.quote(protocol),
)
try:
response = await self.get_json(uri, fields)
args: Mapping[Any, Any] = {
**fields,
b"access_token": service.hs_token,
}
response = await self.get_json(uri, args=args)
if not isinstance(response, list):
logger.warning(
"query_3pe to %s returned an invalid response %r", uri, response
@ -190,13 +197,15 @@ class ApplicationServiceApi(SimpleHttpClient):
return {}
async def _get() -> Optional[JsonDict]:
# This is required by the configuration.
assert service.hs_token is not None
uri = "%s%s/thirdparty/protocol/%s" % (
service.url,
APP_SERVICE_PREFIX,
urllib.parse.quote(protocol),
)
try:
info = await self.get_json(uri)
info = await self.get_json(uri, {"access_token": service.hs_token})
if not _is_valid_3pe_metadata(info):
logger.warning(

View file

@ -384,6 +384,11 @@ class _TransactionController:
device_list_summary: The device list summary to include in the transaction.
"""
try:
service_is_up = await self._is_service_up(service)
# Don't create empty txns when in recovery mode (ephemeral events are dropped)
if not service_is_up and not events:
return
txn = await self.store.create_appservice_txn(
service=service,
events=events,
@ -393,7 +398,6 @@ class _TransactionController:
unused_fallback_keys=unused_fallback_keys or {},
device_list_summary=device_list_summary or DeviceListUpdates(),
)
service_is_up = await self._is_service_up(service)
if service_is_up:
sent = await txn.send(self.as_api)
if sent:

View file

@ -32,7 +32,6 @@ from synapse.config import (
emailconfig,
experimental,
federation,
groups,
jwt,
key,
logger,
@ -109,7 +108,6 @@ class RootConfig:
push: push.PushConfig
spamchecker: spam_checker.SpamCheckerConfig
room: room.RoomConfig
groups: groups.GroupsConfig
userdirectory: user_directory.UserDirectoryConfig
consent: consent.ConsentConfig
stats: stats.StatsConfig

View file

@ -179,7 +179,6 @@ def _load_appservice(
return ApplicationService(
token=as_info["as_token"],
hostname=hostname,
url=as_info["url"],
namespaces=as_info["namespaces"],
hs_token=as_info["hs_token"],

View file

@ -73,9 +73,6 @@ class ExperimentalConfig(Config):
# MSC3720 (Account status endpoint)
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
# The deprecated groups feature.
self.groups_enabled: bool = experimental.get("groups_enabled", False)
# MSC2654: Unread counts
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
@ -84,3 +81,6 @@ class ExperimentalConfig(Config):
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
# MSC3772: A push rule for mutual relations.
self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)

View file

@ -1,39 +0,0 @@
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from synapse.types import JsonDict
from ._base import Config
class GroupsConfig(Config):
section = "groups"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_group_creation = config.get("enable_group_creation", False)
self.group_creation_prefix = config.get("group_creation_prefix", "")
def generate_config_section(self, **kwargs: Any) -> str:
return """\
# Uncomment to allow non-server-admin users to create groups on this server
#
#enable_group_creation: true
# If enabled, non server admins can only create groups with local parts
# starting with this prefix
#
#group_creation_prefix: "unofficial_"
"""

View file

@ -26,7 +26,6 @@ from .database import DatabaseConfig
from .emailconfig import EmailConfig
from .experimental import ExperimentalConfig
from .federation import FederationConfig
from .groups import GroupsConfig
from .jwt import JWTConfig
from .key import KeyConfig
from .logger import LoggingConfig
@ -91,7 +90,6 @@ class HomeServerConfig(RootConfig):
PushConfig,
SpamCheckerConfig,
RoomConfig,
GroupsConfig,
UserDirectoryConfig,
ConsentConfig,
StatsConfig,

View file

@ -224,6 +224,22 @@ class ContentRepositoryConfig(Config):
"url_preview_accept_language"
) or ["en"]
media_retention = config.get("media_retention") or {}
self.media_retention_local_media_lifetime_ms = None
local_media_lifetime = media_retention.get("local_media_lifetime")
if local_media_lifetime is not None:
self.media_retention_local_media_lifetime_ms = self.parse_duration(
local_media_lifetime
)
self.media_retention_remote_media_lifetime_ms = None
remote_media_lifetime = media_retention.get("remote_media_lifetime")
if remote_media_lifetime is not None:
self.media_retention_remote_media_lifetime_ms = self.parse_duration(
remote_media_lifetime
)
def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str:
assert data_dir_path is not None
media_store = os.path.join(data_dir_path, "media_store")

View file

@ -679,6 +679,17 @@ class ServerConfig(Config):
config.get("exclude_rooms_from_sync") or []
)
delete_stale_devices_after: Optional[str] = (
config.get("delete_stale_devices_after") or None
)
if delete_stale_devices_after is not None:
self.delete_stale_devices_after: Optional[int] = self.parse_duration(
delete_stale_devices_after
)
else:
self.delete_stale_devices_after = None
def has_tls_listener(self) -> bool:
return any(listener.tls for listener in self.listeners)

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Set
from typing import Any, List, Set
from synapse.types import JsonDict
from synapse.util.check_dependencies import DependencyException, check_requirements
@ -49,7 +49,9 @@ class TracerConfig(Config):
# The tracer is enabled so sanitize the config
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
self.opentracer_whitelist: List[str] = opentracing_config.get(
"homeserver_whitelist", []
)
if not isinstance(self.opentracer_whitelist, list):
raise ConfigError("Tracer homeserver_whitelist config is malformed")

View file

@ -22,7 +22,7 @@ from synapse.events import EventBase
from synapse.types import JsonDict, StateMap
if TYPE_CHECKING:
from synapse.storage import Storage
from synapse.storage.controllers import StorageControllers
from synapse.storage.databases.main import DataStore
from synapse.storage.state import StateFilter
@ -84,7 +84,7 @@ class EventContext:
incomplete state.
"""
_storage: "Storage"
_storage: "StorageControllers"
rejected: Union[Literal[False], str] = False
_state_group: Optional[int] = None
state_group_before_event: Optional[int] = None
@ -97,7 +97,7 @@ class EventContext:
@staticmethod
def with_state(
storage: "Storage",
storage: "StorageControllers",
state_group: Optional[int],
state_group_before_event: Optional[int],
state_delta_due_to_event: Optional[StateMap[str]],
@ -117,7 +117,7 @@ class EventContext:
@staticmethod
def for_outlier(
storage: "Storage",
storage: "StorageControllers",
) -> "EventContext":
"""Return an EventContext instance suitable for persisting an outlier event"""
return EventContext(storage=storage)
@ -147,7 +147,7 @@ class EventContext:
}
@staticmethod
def deserialize(storage: "Storage", input: JsonDict) -> "EventContext":
def deserialize(storage: "StorageControllers", input: JsonDict) -> "EventContext":
"""Converts a dict that was produced by `serialize` back into a
EventContext.

View file

@ -21,6 +21,7 @@ from typing import (
Awaitable,
Callable,
Collection,
Dict,
List,
Optional,
Tuple,
@ -41,12 +42,17 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
CHECK_EVENT_FOR_SPAM_CALLBACK = Callable[
["synapse.events.EventBase"],
Awaitable[
Union[
str,
Codes,
# Highly experimental, not officially part of the spamchecker API, may
# disappear without warning depending on the results of ongoing
# experiments.
# Use this to return additional information as part of an error.
Tuple[Codes, Dict],
# Deprecated
bool,
]
@ -267,7 +273,9 @@ class SpamChecker:
if check_media_file_for_spam is not None:
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
async def check_event_for_spam(self, event: "synapse.events.EventBase") -> str:
async def check_event_for_spam(
self, event: "synapse.events.EventBase"
) -> Union[Tuple[Codes, Dict], str]:
"""Checks if a given event is considered "spammy" by this server.
If the server considers an event spammy, then it will be rejected if
@ -303,7 +311,7 @@ class SpamChecker:
# mypy complains that we can't reach this code because of the
# return type in CHECK_EVENT_FOR_SPAM_CALLBACK, but we don't know
# for sure that the module actually returns it.
logger.warning( # type: ignore[unreachable]
logger.warning(
"Module returned invalid value, rejecting message as spam"
)
res = "This message has been rejected as probable spam"

View file

@ -152,6 +152,7 @@ class ThirdPartyEventRules:
self.third_party_rules = None
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._check_event_allowed_callbacks: List[CHECK_EVENT_ALLOWED_CALLBACK] = []
self._on_create_room_callbacks: List[ON_CREATE_ROOM_CALLBACK] = []
@ -463,7 +464,7 @@ class ThirdPartyEventRules:
Returns:
A dict mapping (event type, state key) to state event.
"""
state_ids = await self.store.get_filtered_current_state_ids(room_id)
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
room_state_events = await self.store.get_events(state_ids.values())
state_events = {}

View file

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
from typing import Iterable, Type, Union
from typing import Iterable, Type, Union, cast
import jsonschema
@ -109,7 +109,12 @@ class EventValidator:
except jsonschema.ValidationError as e:
if e.path:
# example: "users_default": '0' is not of type 'integer'
message = '"' + e.path[-1] + '": ' + e.message # noqa: B306
# cast safety: path entries can be integers, if we fail to validate
# items in an array. However the POWER_LEVELS_SCHEMA doesn't expect
# to see any arrays.
message = (
'"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306
)
# jsonschema.ValidationError.message is a valid attribute
else:
# example: '0' is not of type 'integer'

View file

@ -32,6 +32,18 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
class InvalidEventSignatureError(RuntimeError):
"""Raised when the signature on an event is invalid.
The stringification of this exception is just the error message without reference
to the event id. The event id is available as a property.
"""
def __init__(self, message: str, event_id: str):
super().__init__(message)
self.event_id = event_id
class FederationBase:
def __init__(self, hs: "HomeServer"):
self.hs = hs
@ -41,6 +53,7 @@ class FederationBase:
self.spam_checker = hs.get_spam_checker()
self.store = hs.get_datastores().main
self._clock = hs.get_clock()
self._storage_controllers = hs.get_storage_controllers()
async def _check_sigs_and_hash(
self, room_version: RoomVersion, pdu: EventBase
@ -59,20 +72,13 @@ class FederationBase:
Returns:
* the original event if the checks pass
* a redacted version of the event (if the signature
matched but the hash did not)
matched but the hash did not). In this case a warning will be logged.
Raises:
SynapseError if the signature check failed.
InvalidEventSignatureError if the signature check failed. Nothing
will be logged in this case.
"""
try:
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
except SynapseError as e:
logger.warning(
"Signature check failed for %s: %s",
pdu.event_id,
e,
)
raise
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
@ -87,7 +93,7 @@ class FederationBase:
if set(redacted_event.keys()) == set(pdu.keys()) and set(
redacted_event.content.keys()
) == set(pdu.content.keys()):
logger.info(
logger.debug(
"Event %s seems to have been redacted; using our redacted copy",
pdu.event_id,
)
@ -116,12 +122,13 @@ async def _check_sigs_on_pdu(
) -> None:
"""Check that the given events are correctly signed
Raise a SynapseError if the event wasn't correctly signed.
Args:
keyring: keyring object to do the checks
room_version: the room version of the PDUs
pdus: the events to be checked
Raises:
InvalidEventSignatureError if the event wasn't correctly signed.
"""
# we want to check that the event is signed by:
@ -147,44 +154,38 @@ async def _check_sigs_on_pdu(
# First we check that the sender event is signed by the sender's domain
# (except if its a 3pid invite, in which case it may be sent by any server)
sender_domain = get_domain_from_id(pdu.sender)
if not _is_invite_via_3pid(pdu):
try:
await keyring.verify_event_for_server(
get_domain_from_id(pdu.sender),
sender_domain,
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = "event id %s: unable to verify signature for sender %s: %s" % (
raise InvalidEventSignatureError(
f"unable to verify signature for sender domain {sender_domain}: {e}",
pdu.event_id,
get_domain_from_id(pdu.sender),
e,
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
) from None
# now let's look for events where the sender's domain is different to the
# event id's domain (normally only the case for joins/leaves), and add additional
# checks. Only do this if the room version has a concept of event ID domain
# (ie, the room version uses old-style non-hash event IDs).
if room_version.event_format == EventFormatVersions.V1 and get_domain_from_id(
pdu.event_id
) != get_domain_from_id(pdu.sender):
try:
await keyring.verify_event_for_server(
get_domain_from_id(pdu.event_id),
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = (
"event id %s: unable to verify signature for event id domain %s: %s"
% (
pdu.event_id,
get_domain_from_id(pdu.event_id),
e,
if room_version.event_format == EventFormatVersions.V1:
event_domain = get_domain_from_id(pdu.event_id)
if event_domain != sender_domain:
try:
await keyring.verify_event_for_server(
event_domain,
pdu,
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
except Exception as e:
raise InvalidEventSignatureError(
f"unable to verify signature for event domain {event_domain}: {e}",
pdu.event_id,
) from None
# If this is a join event for a restricted room it may have been authorised
# via a different server from the sending server. Check those signatures.
@ -204,15 +205,10 @@ async def _check_sigs_on_pdu(
pdu.origin_server_ts if room_version.enforce_key_validity else 0,
)
except Exception as e:
errmsg = (
"event id %s: unable to verify signature for authorising server %s: %s"
% (
pdu.event_id,
authorising_server,
e,
)
)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
raise InvalidEventSignatureError(
f"unable to verify signature for authorising serve {authorising_server}: {e}",
pdu.event_id,
) from None
def _is_invite_via_3pid(event: EventBase) -> bool:

View file

@ -54,7 +54,11 @@ from synapse.api.room_versions import (
RoomVersions,
)
from synapse.events import EventBase, builder
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.federation_base import (
FederationBase,
InvalidEventSignatureError,
event_from_pdu_json,
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.types import QueryParams
from synapse.types import JsonDict, UserID, get_domain_from_id
@ -319,7 +323,13 @@ class FederationClient(FederationBase):
pdu = pdu_list[0]
# Check signatures are correct.
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
try:
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
return signed_pdu
return None
@ -405,6 +415,9 @@ class FederationClient(FederationBase):
Returns:
a tuple of (state event_ids, auth event_ids)
Raises:
InvalidResponseError: if fields in the response have the wrong type.
"""
result = await self.transport_layer.get_room_state_ids(
destination, room_id, event_id=event_id
@ -416,7 +429,7 @@ class FederationClient(FederationBase):
if not isinstance(state_event_ids, list) or not isinstance(
auth_event_ids, list
):
raise Exception("invalid response from /state_ids")
raise InvalidResponseError("invalid response from /state_ids")
return state_event_ids, auth_event_ids
@ -552,20 +565,24 @@ class FederationClient(FederationBase):
Returns:
The PDU (possibly redacted) if it has valid signatures and hashes.
None if no valid copy could be found.
"""
res = None
try:
res = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError:
pass
if not res:
# Check local db.
res = await self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
return await self._check_sigs_and_hash(room_version, pdu)
except InvalidEventSignatureError as e:
logger.warning(
"Signature on retrieved event %s was invalid (%s). "
"Checking local store/orgin server",
pdu.event_id,
e,
)
# Check local db.
res = await self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
)
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:
@ -1040,9 +1057,14 @@ class FederationClient(FederationBase):
pdu = event_from_pdu_json(pdu_dict, room_version)
# Check signatures are correct.
pdu = await self._check_sigs_and_hash(room_version, pdu)
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
# FIXME: We should handle signature failures more gracefully.
# FIXME: We should handle signature failures more gracefully.
return pdu

View file

@ -48,7 +48,11 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion
from synapse.crypto.event_signing import compute_event_signature
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
from synapse.federation.federation_base import (
FederationBase,
InvalidEventSignatureError,
event_from_pdu_json,
)
from synapse.federation.persistence import TransactionActions
from synapse.federation.units import Edu, Transaction
from synapse.http.servlet import assert_params_in_dict
@ -109,12 +113,13 @@ class FederationServer(FederationBase):
super().__init__(hs)
self.handler = hs.get_federation_handler()
self.storage = hs.get_storage()
self._spam_checker = hs.get_spam_checker()
self._federation_event_handler = hs.get_federation_event_handler()
self.state = hs.get_state_handler()
self._event_auth_handler = hs.get_event_auth_handler()
self._state_storage_controller = hs.get_storage_controllers().state
self.device_handler = hs.get_device_handler()
# Ensure the following handlers are loaded since they register callbacks
@ -632,7 +637,12 @@ class FederationServer(FederationBase):
pdu = event_from_pdu_json(content, room_version)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, pdu.room_id)
pdu = await self._check_sigs_and_hash(room_version, pdu)
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
ret_pdu = await self.handler.on_invite_request(origin, pdu, room_version)
time_now = self._clock.time_msec()
return {"event": ret_pdu.get_pdu_json(time_now)}
@ -865,7 +875,12 @@ class FederationServer(FederationBase):
)
)
event = await self._check_sigs_and_hash(room_version, event)
try:
event = await self._check_sigs_and_hash(room_version, event)
except InvalidEventSignatureError as e:
errmsg = f"event id {event.event_id}: {e}"
logger.warning("%s", errmsg)
raise SynapseError(403, errmsg, Codes.FORBIDDEN)
return await self._federation_event_handler.on_send_membership_event(
origin, event
@ -1017,8 +1032,9 @@ class FederationServer(FederationBase):
# Check signature.
try:
pdu = await self._check_sigs_and_hash(room_version, pdu)
except SynapseError as e:
raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
except InvalidEventSignatureError as e:
logger.warning("event id %s: %s", pdu.event_id, e)
raise FederationError("ERROR", 403, str(e), affected=pdu.event_id)
if await self._spam_checker.should_drop_federated_event(pdu):
logger.warning(
@ -1207,14 +1223,10 @@ class FederationServer(FederationBase):
Raises:
AuthError if the server does not match the ACL
"""
state_ids = await self.store.get_current_state_ids(room_id)
acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
if not acl_event_id:
return
acl_event = await self.store.get_event(acl_event_id)
if server_matches_acl_event(server_name, acl_event):
acl_event = await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.ServerACL, ""
)
if not acl_event or server_matches_acl_event(server_name, acl_event):
return
raise AuthError(code=403, msg="Server is banned from room")
@ -1353,7 +1365,7 @@ class FederationHandlerRegistry:
self._edu_type_to_instance[edu_type] = instance_names
async def on_edu(self, edu_type: str, origin: str, content: dict) -> None:
if not self.config.server.use_presence and edu_type == EduTypes.Presence:
if not self.config.server.use_presence and edu_type == EduTypes.PRESENCE:
return
# Check if we have a handler on this instance

View file

@ -245,6 +245,8 @@ class FederationSender(AbstractFederationSender):
self.store = hs.get_datastores().main
self.state = hs.get_state_handler()
self._storage_controllers = hs.get_storage_controllers()
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
@ -602,7 +604,9 @@ class FederationSender(AbstractFederationSender):
room_id = receipt.room_id
# Work out which remote servers should be poked and poke them.
domains_set = await self.state.get_current_hosts_in_room(room_id)
domains_set = await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
domains = [
d
for d in domains_set

View file

@ -21,6 +21,7 @@ from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tupl
import attr
from prometheus_client import Counter
from synapse.api.constants import EduTypes
from synapse.api.errors import (
FederationDeniedError,
HttpResponseException,
@ -223,7 +224,7 @@ class PerDestinationQueue:
"""Marks that the destination has new data to send, without starting a
new transaction.
If a transaction loop is already in progress then a new transcation will
If a transaction loop is already in progress then a new transaction will
be attempted when the current one finishes.
"""
@ -542,7 +543,7 @@ class PerDestinationQueue:
edu = Edu(
origin=self._server_name,
destination=self._destination,
edu_type="m.receipt",
edu_type=EduTypes.RECEIPT,
content=self._pending_rrs,
)
self._pending_rrs = {}
@ -592,7 +593,7 @@ class PerDestinationQueue:
Edu(
origin=self._server_name,
destination=self._destination,
edu_type="m.direct_to_device",
edu_type=EduTypes.DIRECT_TO_DEVICE,
content=content,
)
for content in contents
@ -670,7 +671,7 @@ class _TransactionQueueManager:
Edu(
origin=self.queue._server_name,
destination=self.queue._destination,
edu_type="m.presence",
edu_type=EduTypes.PRESENCE,
content={
"push": [
format_user_presence_state(

View file

@ -16,6 +16,7 @@ from typing import TYPE_CHECKING, List
from prometheus_client import Gauge
from synapse.api.constants import EduTypes
from synapse.api.errors import HttpResponseException
from synapse.events import EventBase
from synapse.federation.persistence import TransactionActions
@ -126,7 +127,10 @@ class TransactionManager:
len(edus),
)
if issue_8631_logger.isEnabledFor(logging.DEBUG):
DEVICE_UPDATE_EDUS = {"m.device_list_update", "m.signing_key_update"}
DEVICE_UPDATE_EDUS = {
EduTypes.DEVICE_LIST_UPDATE,
EduTypes.SIGNING_KEY_UPDATE,
}
device_list_updates = [
edu.content for edu in edus if edu.edu_type in DEVICE_UPDATE_EDUS
]

View file

@ -17,7 +17,6 @@ import logging
import urllib
from typing import (
Any,
Awaitable,
Callable,
Collection,
Dict,
@ -49,11 +48,6 @@ from synapse.types import JsonDict
logger = logging.getLogger(__name__)
# Send join responses can be huge, so we set a separate limit here. The response
# is parsed in a streaming manner, which helps alleviate the issue of memory
# usage a bit.
MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024
class TransportLayerClient:
"""Sends federation HTTP requests to other servers"""
@ -349,7 +343,6 @@ class TransportLayerClient:
path=path,
data=content,
parser=SendJoinParser(room_version, v1_api=True),
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
async def send_join_v2(
@ -372,7 +365,6 @@ class TransportLayerClient:
args=query_params,
data=content,
parser=SendJoinParser(room_version, v1_api=False),
max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN,
)
async def send_leave_v1(
@ -688,488 +680,6 @@ class TransportLayerClient:
timeout=timeout,
)
async def get_group_profile(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get a group profile"""
path = _create_v1_path("/groups/%s/profile", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def update_group_profile(
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
"""Update a remote group profile
Args:
destination
group_id
requester_user_id
content: The new profile of the group
"""
path = _create_v1_path("/groups/%s/profile", group_id)
return self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def get_group_summary(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get a group summary"""
path = _create_v1_path("/groups/%s/summary", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_rooms_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get all rooms in a group"""
path = _create_v1_path("/groups/%s/rooms", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def add_room_to_group(
self,
destination: str,
group_id: str,
requester_user_id: str,
room_id: str,
content: JsonDict,
) -> JsonDict:
"""Add a room to a group"""
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def update_room_in_group(
self,
destination: str,
group_id: str,
requester_user_id: str,
room_id: str,
config_key: str,
content: JsonDict,
) -> JsonDict:
"""Update room in group"""
path = _create_v1_path(
"/groups/%s/room/%s/config/%s", group_id, room_id, config_key
)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def remove_room_from_group(
self, destination: str, group_id: str, requester_user_id: str, room_id: str
) -> JsonDict:
"""Remove a room from a group"""
path = _create_v1_path("/groups/%s/room/%s", group_id, room_id)
return await self.client.delete_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_users_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get users in a group"""
path = _create_v1_path("/groups/%s/users", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_invited_users_in_group(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get users that have been invited to a group"""
path = _create_v1_path("/groups/%s/invited_users", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def accept_group_invite(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Accept a group invite"""
path = _create_v1_path("/groups/%s/users/%s/accept_invite", group_id, user_id)
return await self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
def join_group(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> Awaitable[JsonDict]:
"""Attempts to join a group"""
path = _create_v1_path("/groups/%s/users/%s/join", group_id, user_id)
return self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
async def invite_to_group(
self,
destination: str,
group_id: str,
user_id: str,
requester_user_id: str,
content: JsonDict,
) -> JsonDict:
"""Invite a user to a group"""
path = _create_v1_path("/groups/%s/users/%s/invite", group_id, user_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def invite_to_group_notification(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Sent by group server to inform a user's server that they have been
invited.
"""
path = _create_v1_path("/groups/local/%s/users/%s/invite", group_id, user_id)
return await self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
async def remove_user_from_group(
self,
destination: str,
group_id: str,
requester_user_id: str,
user_id: str,
content: JsonDict,
) -> JsonDict:
"""Remove a user from a group"""
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def remove_user_from_group_notification(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Sent by group server to inform a user's server that they have been
kicked from the group.
"""
path = _create_v1_path("/groups/local/%s/users/%s/remove", group_id, user_id)
return await self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
async def renew_group_attestation(
self, destination: str, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Sent by either a group server or a user's server to periodically update
the attestations
"""
path = _create_v1_path("/groups/%s/renew_attestation/%s", group_id, user_id)
return await self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
async def update_group_summary_room(
self,
destination: str,
group_id: str,
user_id: str,
room_id: str,
category_id: str,
content: JsonDict,
) -> JsonDict:
"""Update a room entry in a group summary"""
if category_id:
path = _create_v1_path(
"/groups/%s/summary/categories/%s/rooms/%s",
group_id,
category_id,
room_id,
)
else:
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": user_id},
data=content,
ignore_backoff=True,
)
async def delete_group_summary_room(
self,
destination: str,
group_id: str,
user_id: str,
room_id: str,
category_id: str,
) -> JsonDict:
"""Delete a room entry in a group summary"""
if category_id:
path = _create_v1_path(
"/groups/%s/summary/categories/%s/rooms/%s",
group_id,
category_id,
room_id,
)
else:
path = _create_v1_path("/groups/%s/summary/rooms/%s", group_id, room_id)
return await self.client.delete_json(
destination=destination,
path=path,
args={"requester_user_id": user_id},
ignore_backoff=True,
)
async def get_group_categories(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get all categories in a group"""
path = _create_v1_path("/groups/%s/categories", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_group_category(
self, destination: str, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
"""Get category info in a group"""
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def update_group_category(
self,
destination: str,
group_id: str,
requester_user_id: str,
category_id: str,
content: JsonDict,
) -> JsonDict:
"""Update a category in a group"""
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def delete_group_category(
self, destination: str, group_id: str, requester_user_id: str, category_id: str
) -> JsonDict:
"""Delete a category in a group"""
path = _create_v1_path("/groups/%s/categories/%s", group_id, category_id)
return await self.client.delete_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_group_roles(
self, destination: str, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get all roles in a group"""
path = _create_v1_path("/groups/%s/roles", group_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def get_group_role(
self, destination: str, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
"""Get a roles info"""
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
return await self.client.get_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def update_group_role(
self,
destination: str,
group_id: str,
requester_user_id: str,
role_id: str,
content: JsonDict,
) -> JsonDict:
"""Update a role in a group"""
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def delete_group_role(
self, destination: str, group_id: str, requester_user_id: str, role_id: str
) -> JsonDict:
"""Delete a role in a group"""
path = _create_v1_path("/groups/%s/roles/%s", group_id, role_id)
return await self.client.delete_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def update_group_summary_user(
self,
destination: str,
group_id: str,
requester_user_id: str,
user_id: str,
role_id: str,
content: JsonDict,
) -> JsonDict:
"""Update a users entry in a group"""
if role_id:
path = _create_v1_path(
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
)
else:
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id)
return await self.client.post_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def set_group_join_policy(
self, destination: str, group_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
"""Sets the join policy for a group"""
path = _create_v1_path("/groups/%s/settings/m.join_policy", group_id)
return await self.client.put_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
data=content,
ignore_backoff=True,
)
async def delete_group_summary_user(
self,
destination: str,
group_id: str,
requester_user_id: str,
user_id: str,
role_id: str,
) -> JsonDict:
"""Delete a users entry in a group"""
if role_id:
path = _create_v1_path(
"/groups/%s/summary/roles/%s/users/%s", group_id, role_id, user_id
)
else:
path = _create_v1_path("/groups/%s/summary/users/%s", group_id, user_id)
return await self.client.delete_json(
destination=destination,
path=path,
args={"requester_user_id": requester_user_id},
ignore_backoff=True,
)
async def bulk_get_publicised_groups(
self, destination: str, user_ids: Iterable[str]
) -> JsonDict:
"""Get the groups a list of users are publicising"""
path = _create_v1_path("/get_groups_publicised")
content = {"user_ids": user_ids}
return await self.client.post_json(
destination=destination, path=path, data=content, ignore_backoff=True
)
async def get_room_complexity(self, destination: str, room_id: str) -> JsonDict:
"""
Args:
@ -1360,6 +870,11 @@ class SendJoinParser(ByteParser[SendJoinResponse]):
CONTENT_TYPE = "application/json"
# /send_join responses can be huge, so we override the size limit here. The response
# is parsed in a streaming manner, which helps alleviate the issue of memory
# usage a bit.
MAX_RESPONSE_SIZE = 500 * 1024 * 1024
def __init__(self, room_version: RoomVersion, v1_api: bool):
self._response = SendJoinResponse([], [], event_dict={})
self._room_version = room_version
@ -1430,6 +945,9 @@ class _StateParser(ByteParser[StateRequestResponse]):
CONTENT_TYPE = "application/json"
# As with /send_join, /state responses can be huge.
MAX_RESPONSE_SIZE = 500 * 1024 * 1024
def __init__(self, room_version: RoomVersion):
self._response = StateRequestResponse([], [])
self._room_version = room_version

View file

@ -27,10 +27,6 @@ from synapse.federation.transport.server.federation import (
FederationAccountStatusServlet,
FederationTimestampLookupServlet,
)
from synapse.federation.transport.server.groups_local import GROUP_LOCAL_SERVLET_CLASSES
from synapse.federation.transport.server.groups_server import (
GROUP_SERVER_SERVLET_CLASSES,
)
from synapse.http.server import HttpServer, JsonResource
from synapse.http.servlet import (
parse_boolean_from_args,
@ -199,38 +195,6 @@ class PublicRoomList(BaseFederationServlet):
return 200, data
class FederationGroupsRenewAttestaionServlet(BaseFederationServlet):
"""A group or user's server renews their attestation"""
PATH = "/groups/(?P<group_id>[^/]*)/renew_attestation/(?P<user_id>[^/]*)"
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_attestation_renewer()
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
# We don't need to check auth here as we check the attestation signatures
new_content = await self.handler.on_renew_attestation(
group_id, user_id, content
)
return 200, new_content
class OpenIdUserInfo(BaseFederationServlet):
"""
Exchange a bearer token for information about a user.
@ -292,16 +256,9 @@ class OpenIdUserInfo(BaseFederationServlet):
SERVLET_GROUPS: Dict[str, Iterable[Type[BaseFederationServlet]]] = {
"federation": FEDERATION_SERVLET_CLASSES,
"room_list": (PublicRoomList,),
"group_server": GROUP_SERVER_SERVLET_CLASSES,
"group_local": GROUP_LOCAL_SERVLET_CLASSES,
"group_attestation": (FederationGroupsRenewAttestaionServlet,),
"openid": (OpenIdUserInfo,),
}
DEFAULT_SERVLET_GROUPS = ("federation", "room_list", "openid")
GROUP_SERVLET_GROUPS = ("group_server", "group_local", "group_attestation")
def register_servlets(
hs: "HomeServer",
@ -324,10 +281,7 @@ def register_servlets(
Defaults to ``DEFAULT_SERVLET_GROUPS``.
"""
if not servlet_groups:
servlet_groups = DEFAULT_SERVLET_GROUPS
# Only allow the groups servlets if the deprecated groups feature is enabled.
if hs.config.experimental.groups_enabled:
servlet_groups = servlet_groups + GROUP_SERVLET_GROUPS
servlet_groups = SERVLET_GROUPS.keys()
for servlet_group in servlet_groups:
# Skip unknown servlet groups.

View file

@ -27,6 +27,7 @@ from typing import (
from matrix_common.versionstring import get_distribution_version_string
from typing_extensions import Literal
from synapse.api.constants import EduTypes
from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersions
from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX
@ -108,7 +109,10 @@ class FederationSendServlet(BaseFederationServerServlet):
)
if issue_8631_logger.isEnabledFor(logging.DEBUG):
DEVICE_UPDATE_EDUS = ["m.device_list_update", "m.signing_key_update"]
DEVICE_UPDATE_EDUS = [
EduTypes.DEVICE_LIST_UPDATE,
EduTypes.SIGNING_KEY_UPDATE,
]
device_list_updates = [
edu.get("content", {})
for edu in transaction_data.get("edus", [])
@ -650,10 +654,6 @@ class FederationRoomHierarchyServlet(BaseFederationServlet):
)
class FederationRoomHierarchyUnstableServlet(FederationRoomHierarchyServlet):
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946"
class RoomComplexityServlet(BaseFederationServlet):
"""
Indicates to other servers how complex (and therefore likely
@ -752,7 +752,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationVersionServlet,
RoomComplexityServlet,
FederationRoomHierarchyServlet,
FederationRoomHierarchyUnstableServlet,
FederationV1SendKnockServlet,
FederationMakeKnockServlet,
FederationAccountStatusServlet,

View file

@ -1,115 +0,0 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Tuple, Type
from synapse.api.errors import SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.handlers.groups_local import GroupsLocalHandler
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
if TYPE_CHECKING:
from synapse.server import HomeServer
class BaseGroupsLocalServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a groups local handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_local_handler()
class FederationGroupsLocalInviteServlet(BaseGroupsLocalServlet):
"""A group server has invited a local user"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "group_id doesn't match origin")
assert isinstance(
self.handler, GroupsLocalHandler
), "Workers cannot handle group invites."
new_content = await self.handler.on_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveLocalUserServlet(BaseGroupsLocalServlet):
"""A group server has removed a local user"""
PATH = "/groups/local/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, None]:
if get_domain_from_id(group_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
assert isinstance(
self.handler, GroupsLocalHandler
), "Workers cannot handle group removals."
await self.handler.user_removed_from_group(group_id, user_id, content)
return 200, None
class FederationGroupsBulkPublicisedServlet(BaseGroupsLocalServlet):
"""Get roles in a group"""
PATH = "/get_groups_publicised"
async def on_POST(
self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]]
) -> Tuple[int, JsonDict]:
resp = await self.handler.bulk_get_publicised_groups(
content["user_ids"], proxy=False
)
return 200, resp
GROUP_LOCAL_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsLocalInviteServlet,
FederationGroupsRemoveLocalUserServlet,
FederationGroupsBulkPublicisedServlet,
)

View file

@ -1,755 +0,0 @@
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Tuple, Type
from typing_extensions import Literal
from synapse.api.constants import MAX_GROUP_CATEGORYID_LENGTH, MAX_GROUP_ROLEID_LENGTH
from synapse.api.errors import Codes, SynapseError
from synapse.federation.transport.server._base import (
Authenticator,
BaseFederationServlet,
)
from synapse.http.servlet import parse_string_from_args
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.ratelimitutils import FederationRateLimiter
if TYPE_CHECKING:
from synapse.server import HomeServer
class BaseGroupsServerServlet(BaseFederationServlet):
"""Abstract base class for federation servlet classes which provides a groups server handler.
See BaseFederationServlet for more information.
"""
def __init__(
self,
hs: "HomeServer",
authenticator: Authenticator,
ratelimiter: FederationRateLimiter,
server_name: str,
):
super().__init__(hs, authenticator, ratelimiter, server_name)
self.handler = hs.get_groups_server_handler()
class FederationGroupsProfileServlet(BaseGroupsServerServlet):
"""Get/set the basic profile of a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/profile"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_profile(group_id, requester_user_id)
return 200, new_content
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.update_group_profile(
group_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryServlet(BaseGroupsServerServlet):
PATH = "/groups/(?P<group_id>[^/]*)/summary"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_group_summary(group_id, requester_user_id)
return 200, new_content
class FederationGroupsRoomsServlet(BaseGroupsServerServlet):
"""Get the rooms in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/rooms"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsAddRoomsServlet(BaseGroupsServerServlet):
"""Add/remove room from group"""
PATH = "/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.add_room_to_group(
group_id, requester_user_id, room_id, content
)
return 200, new_content
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_room_from_group(
group_id, requester_user_id, room_id
)
return 200, new_content
class FederationGroupsAddRoomsConfigServlet(BaseGroupsServerServlet):
"""Update room config in group"""
PATH = (
"/groups/(?P<group_id>[^/]*)/room/(?P<room_id>[^/]*)"
"/config/(?P<config_key>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
room_id: str,
config_key: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
result = await self.handler.update_room_in_group(
group_id, requester_user_id, room_id, config_key, content
)
return 200, result
class FederationGroupsUsersServlet(BaseGroupsServerServlet):
"""Get the users in a group on behalf of a user"""
PATH = "/groups/(?P<group_id>[^/]*)/users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_users_in_group(group_id, requester_user_id)
return 200, new_content
class FederationGroupsInvitedUsersServlet(BaseGroupsServerServlet):
"""Get the users that have been invited to a group"""
PATH = "/groups/(?P<group_id>[^/]*)/invited_users"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.get_invited_users_in_group(
group_id, requester_user_id
)
return 200, new_content
class FederationGroupsInviteServlet(BaseGroupsServerServlet):
"""Ask a group server to invite someone to the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsAcceptInviteServlet(BaseGroupsServerServlet):
"""Accept an invitation from the group server"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/accept_invite"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.accept_invite(group_id, user_id, content)
return 200, new_content
class FederationGroupsJoinServlet(BaseGroupsServerServlet):
"""Attempt to join a group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
if get_domain_from_id(user_id) != origin:
raise SynapseError(403, "user_id doesn't match origin")
new_content = await self.handler.join_group(group_id, user_id, content)
return 200, new_content
class FederationGroupsRemoveUserServlet(BaseGroupsServerServlet):
"""Leave or kick a user from the group"""
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/remove"
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
return 200, new_content
class FederationGroupsSummaryRoomsServlet(BaseGroupsServerServlet):
"""Add/remove a room from the group summary, with optional category.
Matches both:
- /groups/:group/summary/rooms/:room_id
- /groups/:group/summary/categories/:category/rooms/:room_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/categories/(?P<category_id>[^/]+))?"
"/rooms/(?P<room_id>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(
400, "category_id cannot be empty string", Codes.INVALID_PARAM
)
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_room(
group_id,
requester_user_id,
room_id=room_id,
category_id=category_id,
content=content,
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
room_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_summary_room(
group_id, requester_user_id, room_id=room_id, category_id=category_id
)
return 200, resp
class FederationGroupsCategoriesServlet(BaseGroupsServerServlet):
"""Get all categories for a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_categories(group_id, requester_user_id)
return 200, resp
class FederationGroupsCategoryServlet(BaseGroupsServerServlet):
"""Add/remove/get a category in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/categories/(?P<category_id>[^/]+)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
if len(category_id) > MAX_GROUP_CATEGORYID_LENGTH:
raise SynapseError(
400,
"category_id may not be longer than %s characters"
% (MAX_GROUP_CATEGORYID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.upsert_group_category(
group_id, requester_user_id, category_id, content
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
category_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if category_id == "":
raise SynapseError(400, "category_id cannot be empty string")
resp = await self.handler.delete_group_category(
group_id, requester_user_id, category_id
)
return 200, resp
class FederationGroupsRolesServlet(BaseGroupsServerServlet):
"""Get roles in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/?"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_roles(group_id, requester_user_id)
return 200, resp
class FederationGroupsRoleServlet(BaseGroupsServerServlet):
"""Add/remove/get a role in a group"""
PATH = "/groups/(?P<group_id>[^/]*)/roles/(?P<role_id>[^/]+)"
async def on_GET(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
resp = await self.handler.get_group_role(group_id, requester_user_id, role_id)
return 200, resp
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(
400, "role_id cannot be empty string", Codes.INVALID_PARAM
)
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_role(
group_id, requester_user_id, role_id, content
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_role(
group_id, requester_user_id, role_id
)
return 200, resp
class FederationGroupsSummaryUsersServlet(BaseGroupsServerServlet):
"""Add/remove a user from the group summary, with optional role.
Matches both:
- /groups/:group/summary/users/:user_id
- /groups/:group/summary/roles/:role/users/:user_id
"""
PATH = (
"/groups/(?P<group_id>[^/]*)/summary"
"(/roles/(?P<role_id>[^/]+))?"
"/users/(?P<user_id>[^/]*)"
)
async def on_POST(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
if len(role_id) > MAX_GROUP_ROLEID_LENGTH:
raise SynapseError(
400,
"role_id may not be longer than %s characters"
% (MAX_GROUP_ROLEID_LENGTH,),
Codes.INVALID_PARAM,
)
resp = await self.handler.update_group_summary_user(
group_id,
requester_user_id,
user_id=user_id,
role_id=role_id,
content=content,
)
return 200, resp
async def on_DELETE(
self,
origin: str,
content: Literal[None],
query: Dict[bytes, List[bytes]],
group_id: str,
role_id: str,
user_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
if role_id == "":
raise SynapseError(400, "role_id cannot be empty string")
resp = await self.handler.delete_group_summary_user(
group_id, requester_user_id, user_id=user_id, role_id=role_id
)
return 200, resp
class FederationGroupsSettingJoinPolicyServlet(BaseGroupsServerServlet):
"""Sets whether a group is joinable without an invite or knock"""
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy"
async def on_PUT(
self,
origin: str,
content: JsonDict,
query: Dict[bytes, List[bytes]],
group_id: str,
) -> Tuple[int, JsonDict]:
requester_user_id = parse_string_from_args(
query, "requester_user_id", required=True
)
if get_domain_from_id(requester_user_id) != origin:
raise SynapseError(403, "requester_user_id doesn't match origin")
new_content = await self.handler.set_group_join_policy(
group_id, requester_user_id, content
)
return 200, new_content
GROUP_SERVER_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
FederationGroupsProfileServlet,
FederationGroupsSummaryServlet,
FederationGroupsRoomsServlet,
FederationGroupsUsersServlet,
FederationGroupsInvitedUsersServlet,
FederationGroupsInviteServlet,
FederationGroupsAcceptInviteServlet,
FederationGroupsJoinServlet,
FederationGroupsRemoveUserServlet,
FederationGroupsSummaryRoomsServlet,
FederationGroupsCategoriesServlet,
FederationGroupsCategoryServlet,
FederationGroupsRolesServlet,
FederationGroupsRoleServlet,
FederationGroupsSummaryUsersServlet,
FederationGroupsAddRoomsServlet,
FederationGroupsAddRoomsConfigServlet,
FederationGroupsSettingJoinPolicyServlet,
)

View file

@ -1,218 +0,0 @@
# Copyright 2017 Vector Creations Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attestations ensure that users and groups can't lie about their memberships.
When a user joins a group the HS and GS swap attestations, which allow them
both to independently prove to third parties their membership.These
attestations have a validity period so need to be periodically renewed.
If a user leaves (or gets kicked out of) a group, either side can still use
their attestation to "prove" their membership, until the attestation expires.
Therefore attestations shouldn't be relied on to prove membership in important
cases, but can for less important situations, e.g. showing a users membership
of groups on their profile, showing flairs, etc.
An attestation is a signed blob of json that looks like:
{
"user_id": "@foo:a.example.com",
"group_id": "+bar:b.example.com",
"valid_until_ms": 1507994728530,
"signatures":{"matrix.org":{"ed25519:auto":"..."}}
}
"""
import logging
import random
from typing import TYPE_CHECKING, Optional, Tuple
from signedjson.sign import sign_json
from twisted.internet.defer import Deferred
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# Default validity duration for new attestations we create
DEFAULT_ATTESTATION_LENGTH_MS = 3 * 24 * 60 * 60 * 1000
# We add some jitter to the validity duration of attestations so that if we
# add lots of users at once we don't need to renew them all at once.
# The jitter is a multiplier picked randomly between the first and second number
DEFAULT_ATTESTATION_JITTER = (0.9, 1.3)
# Start trying to update our attestations when they come this close to expiring
UPDATE_ATTESTATION_TIME_MS = 1 * 24 * 60 * 60 * 1000
class GroupAttestationSigning:
"""Creates and verifies group attestations."""
def __init__(self, hs: "HomeServer"):
self.keyring = hs.get_keyring()
self.clock = hs.get_clock()
self.server_name = hs.hostname
self.signing_key = hs.signing_key
async def verify_attestation(
self,
attestation: JsonDict,
group_id: str,
user_id: str,
server_name: Optional[str] = None,
) -> None:
"""Verifies that the given attestation matches the given parameters.
An optional server_name can be supplied to explicitly set which server's
signature is expected. Otherwise assumes that either the group_id or user_id
is local and uses the other's server as the one to check.
"""
if not server_name:
if get_domain_from_id(group_id) == self.server_name:
server_name = get_domain_from_id(user_id)
elif get_domain_from_id(user_id) == self.server_name:
server_name = get_domain_from_id(group_id)
else:
raise Exception("Expected either group_id or user_id to be local")
if user_id != attestation["user_id"]:
raise SynapseError(400, "Attestation has incorrect user_id")
if group_id != attestation["group_id"]:
raise SynapseError(400, "Attestation has incorrect group_id")
valid_until_ms = attestation["valid_until_ms"]
# TODO: We also want to check that *new* attestations that people give
# us to store are valid for at least a little while.
now = self.clock.time_msec()
if valid_until_ms < now:
raise SynapseError(400, "Attestation expired")
assert server_name is not None
await self.keyring.verify_json_for_server(
server_name,
attestation,
now,
)
def create_attestation(self, group_id: str, user_id: str) -> JsonDict:
"""Create an attestation for the group_id and user_id with default
validity length.
"""
validity_period = DEFAULT_ATTESTATION_LENGTH_MS * random.uniform(
*DEFAULT_ATTESTATION_JITTER
)
valid_until_ms = int(self.clock.time_msec() + validity_period)
return sign_json(
{
"group_id": group_id,
"user_id": user_id,
"valid_until_ms": valid_until_ms,
},
self.server_name,
self.signing_key,
)
class GroupAttestionRenewer:
"""Responsible for sending and receiving attestation updates."""
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self.assestations = hs.get_groups_attestation_signing()
self.transport_client = hs.get_federation_transport_client()
self.is_mine_id = hs.is_mine_id
self.attestations = hs.get_groups_attestation_signing()
if not hs.config.worker.worker_app:
self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000
)
async def on_renew_attestation(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""When a remote updates an attestation"""
attestation = content["attestation"]
if not self.is_mine_id(group_id) and not self.is_mine_id(user_id):
raise SynapseError(400, "Neither user not group are on this server")
await self.attestations.verify_attestation(
attestation, user_id=user_id, group_id=group_id
)
await self.store.update_remote_attestion(group_id, user_id, attestation)
return {}
def _start_renew_attestations(self) -> "Deferred[None]":
return run_as_background_process("renew_attestations", self._renew_attestations)
async def _renew_attestations(self) -> None:
"""Called periodically to check if we need to update any of our attestations"""
now = self.clock.time_msec()
rows = await self.store.get_attestations_need_renewals(
now + UPDATE_ATTESTATION_TIME_MS
)
async def _renew_attestation(group_user: Tuple[str, str]) -> None:
group_id, user_id = group_user
try:
if not self.is_mine_id(group_id):
destination = get_domain_from_id(group_id)
elif not self.is_mine_id(user_id):
destination = get_domain_from_id(user_id)
else:
logger.warning(
"Incorrectly trying to do attestations for user: %r in %r",
user_id,
group_id,
)
await self.store.remove_attestation_renewal(group_id, user_id)
return
attestation = self.attestations.create_attestation(group_id, user_id)
await self.transport_client.renew_group_attestation(
destination, group_id, user_id, content={"attestation": attestation}
)
await self.store.update_attestation_renewal(
group_id, user_id, attestation
)
except (RequestSendFailed, HttpResponseException) as e:
logger.warning(
"Failed to renew attestation of %r in %r: %s", user_id, group_id, e
)
except Exception:
logger.exception(
"Error renewing attestation of %r in %r", user_id, group_id
)
for row in rows:
await _renew_attestation((row["group_id"], row["user_id"]))

File diff suppressed because it is too large Load diff

View file

@ -30,8 +30,8 @@ logger = logging.getLogger(__name__)
class AdminHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
async def get_whois(self, user: UserID) -> JsonDict:
connections = []
@ -197,7 +197,9 @@ class AdminHandler:
from_key = events[-1].internal_metadata.after
events = await filter_events_for_client(self.storage, user_id, events)
events = await filter_events_for_client(
self._storage_controllers, user_id, events
)
writer.write_events(room_id, events)
@ -233,7 +235,9 @@ class AdminHandler:
for event_id in extremities:
if not event_to_unseen_prevs[event_id]:
continue
state = await self.state_store.get_state_for_event(event_id)
state = await self._state_storage_controller.get_state_for_event(
event_id
)
writer.write_state(room_id, event_id, state)
return writer.finished()

View file

@ -19,7 +19,7 @@ from prometheus_client import Counter
from twisted.internet import defer
import synapse
from synapse.api.constants import EventTypes
from synapse.api.constants import EduTypes, EventTypes
from synapse.appservice import ApplicationService
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
@ -503,7 +503,7 @@ class ApplicationServicesHandler:
time_now = self.clock.time_msec()
events.extend(
{
"type": "m.presence",
"type": EduTypes.PRESENCE,
"sender": event.user_id,
"content": format_user_presence_state(
event, time_now, include_user_id=False

View file

@ -28,7 +28,7 @@ from typing import (
)
from synapse.api import errors
from synapse.api.constants import EventTypes
from synapse.api.constants import EduTypes, EventTypes
from synapse.api.errors import (
Codes,
FederationDeniedError,
@ -61,6 +61,7 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
MAX_DEVICE_DISPLAY_NAME_LEN = 100
DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000
class DeviceWorkerHandler:
@ -70,7 +71,7 @@ class DeviceWorkerHandler:
self.store = hs.get_datastores().main
self.notifier = hs.get_notifier()
self.state = hs.get_state_handler()
self.state_store = hs.get_storage().state
self._state_storage = hs.get_storage_controllers().state
self._auth_handler = hs.get_auth_handler()
self.server_name = hs.hostname
@ -165,7 +166,7 @@ class DeviceWorkerHandler:
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
current_state_ids = await self.store.get_current_state_ids(room_id)
current_state_ids = await self._state_storage.get_current_state_ids(room_id)
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
@ -203,7 +204,9 @@ class DeviceWorkerHandler:
continue
# mapping from event_id -> state_dict
prev_state_ids = await self.state_store.get_state_ids_for_events(event_ids)
prev_state_ids = await self._state_storage.get_state_ids_for_events(
event_ids
)
# Check if we've joined the room? If so we just blindly add all the users to
# the "possibly changed" users.
@ -277,7 +280,8 @@ class DeviceHandler(DeviceWorkerHandler):
federation_registry = hs.get_federation_registry()
federation_registry.register_edu_handler(
"m.device_list_update", self.device_list_updater.incoming_device_list_update
EduTypes.DEVICE_LIST_UPDATE,
self.device_list_updater.incoming_device_list_update,
)
hs.get_distributor().observe("user_left_room", self.user_left_room)
@ -292,6 +296,19 @@ class DeviceHandler(DeviceWorkerHandler):
# On start up check if there are any updates pending.
hs.get_reactor().callWhenRunning(self._handle_new_device_update_async)
self._delete_stale_devices_after = hs.config.server.delete_stale_devices_after
# Ideally we would run this on a worker and condition this on the
# "run_background_tasks_on" setting, but this would mean making the notification
# of device list changes over federation work on workers, which is nontrivial.
if self._delete_stale_devices_after is not None:
self.clock.looping_call(
run_as_background_process,
DELETE_STALE_DEVICES_INTERVAL_MS,
"delete_stale_devices",
self._delete_stale_devices,
)
def _check_device_name_length(self, name: Optional[str]) -> None:
"""
Checks whether a device name is longer than the maximum allowed length.
@ -367,6 +384,19 @@ class DeviceHandler(DeviceWorkerHandler):
raise errors.StoreError(500, "Couldn't generate a device ID.")
async def _delete_stale_devices(self) -> None:
"""Background task that deletes devices which haven't been accessed for more than
a configured time period.
"""
# We should only be running this job if the config option is defined.
assert self._delete_stale_devices_after is not None
now_ms = self.clock.time_msec()
since_ms = now_ms - self._delete_stale_devices_after
devices = await self.store.get_local_devices_not_accessed_since(since_ms)
for user_id, user_devices in devices.items():
await self.delete_devices(user_id, user_devices)
@trace
async def delete_device(self, user_id: str, device_id: str) -> None:
"""Delete the given device
@ -689,7 +719,8 @@ class DeviceHandler(DeviceWorkerHandler):
)
# TODO: when called, this isn't in a logging context.
# This leads to log spam, sentry event spam, and massive
# memory usage. See #12552.
# memory usage.
# See https://github.com/matrix-org/synapse/issues/12552.
# log_kv(
# {"message": "sent device update to host", "host": host}
# )
@ -763,6 +794,10 @@ class DeviceListUpdater:
device_id = edu_content.pop("device_id")
stream_id = str(edu_content.pop("stream_id")) # They may come as ints
prev_ids = edu_content.pop("prev_id", [])
if not isinstance(prev_ids, list):
raise SynapseError(
400, "Device list update had an invalid 'prev_ids' field"
)
prev_ids = [str(p) for p in prev_ids] # They may come as ints
if get_domain_from_id(user_id) != origin:

View file

@ -15,7 +15,7 @@
import logging
from typing import TYPE_CHECKING, Any, Dict
from synapse.api.constants import ToDeviceEventTypes
from synapse.api.constants import EduTypes, ToDeviceEventTypes
from synapse.api.errors import SynapseError
from synapse.api.ratelimiting import Ratelimiter
from synapse.logging.context import run_in_background
@ -59,11 +59,11 @@ class DeviceMessageHandler:
# to the appropriate worker.
if hs.get_instance_name() in hs.config.worker.writers.to_device:
hs.get_federation_registry().register_edu_handler(
"m.direct_to_device", self.on_direct_to_device_edu
EduTypes.DIRECT_TO_DEVICE, self.on_direct_to_device_edu
)
else:
hs.get_federation_registry().register_instances_for_edu(
"m.direct_to_device",
EduTypes.DIRECT_TO_DEVICE,
hs.config.worker.writers.to_device,
)

View file

@ -45,6 +45,7 @@ class DirectoryHandler:
self.appservice_handler = hs.get_application_service_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.config = hs.config
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.require_membership = hs.config.server.require_membership_for_aliases
@ -321,7 +322,7 @@ class DirectoryHandler:
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
alias_event = await self.state.get_current_state(
alias_event = await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.CanonicalAlias, ""
)
@ -465,7 +466,11 @@ class DirectoryHandler:
making_public = visibility == "public"
if making_public:
room_aliases = await self.store.get_aliases_for_room(room_id)
canonical_alias = await self.store.get_canonical_alias_for_room(room_id)
canonical_alias = (
await self._storage_controllers.state.get_canonical_alias_for_room(
room_id
)
)
if canonical_alias:
room_aliases.append(canonical_alias)

View file

@ -25,6 +25,7 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer
from synapse.api.constants import EduTypes
from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
@ -66,13 +67,13 @@ class E2eKeysHandler:
# Only register this edu handler on master as it requires writing
# device updates to the db
federation_registry.register_edu_handler(
"m.signing_key_update",
EduTypes.SIGNING_KEY_UPDATE,
self._edu_updater.incoming_signing_key_update,
)
# also handle the unstable version
# FIXME: remove this when enough servers have upgraded
federation_registry.register_edu_handler(
"org.matrix.signing_key_update",
EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
self._edu_updater.incoming_signing_key_update,
)

View file

@ -113,7 +113,7 @@ class EventStreamHandler:
states = await presence_handler.get_states(users)
to_add.extend(
{
"type": EduTypes.Presence,
"type": EduTypes.PRESENCE,
"content": format_user_presence_state(state, time_now),
}
for state in states
@ -139,7 +139,7 @@ class EventStreamHandler:
class EventHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self._storage_controllers = hs.get_storage_controllers()
async def get_event(
self,
@ -177,7 +177,7 @@ class EventHandler:
is_peeking = user.to_string() not in users
filtered = await filter_events_for_client(
self.storage, user.to_string(), [event], is_peeking=is_peeking
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking
)
if not filtered:

View file

@ -20,7 +20,16 @@ import itertools
import logging
from enum import Enum
from http import HTTPStatus
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
from typing import (
TYPE_CHECKING,
Collection,
Dict,
Iterable,
List,
Optional,
Tuple,
Union,
)
import attr
from signedjson.key import decode_verify_key_bytes
@ -34,6 +43,7 @@ from synapse.api.errors import (
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
HttpResponseException,
NotFoundError,
RequestSendFailed,
@ -125,8 +135,8 @@ class FederationHandler:
self.hs = hs
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self.federation_client = hs.get_federation_client()
self.state_handler = hs.get_state_handler()
self.server_name = hs.hostname
@ -159,6 +169,14 @@ class FederationHandler:
self.third_party_event_rules = hs.get_third_party_event_rules()
# if this is the main process, fire off a background process to resume
# any partial-state-resync operations which were in flight when we
# were shut down.
if not hs.config.worker.worker_app:
run_as_background_process(
"resume_sync_partial_state_room", self._resume_sync_partial_state_room
)
async def maybe_backfill(
self, room_id: str, current_depth: int, limit: int
) -> bool:
@ -324,7 +342,7 @@ class FederationHandler:
# We set `check_history_visibility_only` as we might otherwise get false
# positives from users having been erased.
filtered_extremities = await filter_events_for_server(
self.storage,
self._storage_controllers,
self.server_name,
events_to_check,
redact=False,
@ -353,7 +371,7 @@ class FederationHandler:
# First we try hosts that are already in the room
# TODO: HEURISTIC ALERT.
curr_state = await self.state_handler.get_current_state(room_id)
curr_state = await self._storage_controllers.state.get_current_state(room_id)
curr_domains = get_domains_from_state(curr_state)
@ -460,6 +478,8 @@ class FederationHandler:
"""
# TODO: We should be able to call this on workers, but the upgrading of
# room stuff after join currently doesn't work on workers.
# TODO: Before we relax this condition, we need to allow re-syncing of
# partial room state to happen on workers.
assert self.config.worker.worker_app is None
logger.debug("Joining %s to %s", joinee, room_id)
@ -540,12 +560,11 @@ class FederationHandler:
if ret.partial_state:
# Kick off the process of asynchronously fetching the state for this
# room.
#
# TODO(faster_joins): pick this up again on restart
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
destination=origin,
initial_destination=origin,
other_destinations=ret.servers_in_room,
room_id=room_id,
)
@ -660,7 +679,7 @@ class FederationHandler:
# in the invitee's sync stream. It is stripped out for all other local users.
event.unsigned["knock_room_state"] = stripped_room_state["knock_state_events"]
context = EventContext.for_outlier(self.storage)
context = EventContext.for_outlier(self._storage_controllers)
stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@ -731,7 +750,9 @@ class FederationHandler:
# Note that this requires the /send_join request to come back to the
# same server.
if room_version.msc3083_join_rules:
state_ids = await self.store.get_current_state_ids(room_id)
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id
)
if await self._event_auth_handler.has_restricted_join_rules(
state_ids, room_version
):
@ -849,7 +870,7 @@ class FederationHandler:
)
)
context = EventContext.for_outlier(self.storage)
context = EventContext.for_outlier(self._storage_controllers)
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@ -878,7 +899,7 @@ class FederationHandler:
await self.federation_client.send_leave(host_list, event)
context = EventContext.for_outlier(self.storage)
context = EventContext.for_outlier(self._storage_controllers)
stream_id = await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
@ -1027,7 +1048,9 @@ class FederationHandler:
if event.internal_metadata.outlier:
raise NotFoundError("State not known at event %s" % (event_id,))
state_groups = await self.state_store.get_state_groups_ids(room_id, [event_id])
state_groups = await self._state_storage_controller.get_state_groups_ids(
room_id, [event_id]
)
# get_state_groups_ids should return exactly one result
assert len(state_groups) == 1
@ -1076,7 +1099,9 @@ class FederationHandler:
],
)
events = await filter_events_for_server(self.storage, origin, events)
events = await filter_events_for_server(
self._storage_controllers, origin, events
)
return events
@ -1107,7 +1132,9 @@ class FederationHandler:
if not in_room:
raise AuthError(403, "Host not in room.")
events = await filter_events_for_server(self.storage, origin, [event])
events = await filter_events_for_server(
self._storage_controllers, origin, [event]
)
event = events[0]
return event
else:
@ -1136,7 +1163,7 @@ class FederationHandler:
)
missing_events = await filter_events_for_server(
self.storage, origin, missing_events
self._storage_controllers, origin, missing_events
)
return missing_events
@ -1446,17 +1473,35 @@ class FederationHandler:
# well.
return None
async def _resume_sync_partial_state_room(self) -> None:
"""Resumes resyncing of all partial-state rooms after a restart."""
assert not self.config.worker.worker_app
partial_state_rooms = await self.store.get_partial_state_rooms_and_servers()
for room_id, servers_in_room in partial_state_rooms.items():
run_as_background_process(
desc="sync_partial_state_room",
func=self._sync_partial_state_room,
initial_destination=None,
other_destinations=servers_in_room,
room_id=room_id,
)
async def _sync_partial_state_room(
self,
destination: str,
initial_destination: Optional[str],
other_destinations: Collection[str],
room_id: str,
) -> None:
"""Background process to resync the state of a partial-state room
Args:
destination: homeserver to pull the state from
initial_destination: the initial homeserver to pull the state from
other_destinations: other homeservers to try to pull the state from, if
`initial_destination` is unavailable
room_id: room to be resynced
"""
assert not self.config.worker.worker_app
# TODO(faster_joins): do we need to lock to avoid races? What happens if other
# worker processes kick off a resync in parallel? Perhaps we should just elect
@ -1466,8 +1511,29 @@ class FederationHandler:
# really leave, that might mean we have difficulty getting the room state over
# federation.
#
# TODO(faster_joins): try other destinations if the one we have fails
# TODO(faster_joins): we need some way of prioritising which homeservers in
# `other_destinations` to try first, otherwise we'll spend ages trying dead
# homeservers for large rooms.
if initial_destination is None and len(other_destinations) == 0:
raise ValueError(
f"Cannot resync state of {room_id}: no destinations provided"
)
# Make an infinite iterator of destinations to try. Once we find a working
# destination, we'll stick with it until it flakes.
if initial_destination is not None:
# Move `initial_destination` to the front of the list.
destinations = list(other_destinations)
if initial_destination in destinations:
destinations.remove(initial_destination)
destinations = [initial_destination] + destinations
destination_iter = itertools.cycle(destinations)
else:
destination_iter = itertools.cycle(other_destinations)
# `destination` is the current remote homeserver we're pulling from.
destination = next(destination_iter)
logger.info("Syncing state for room %s via %s", room_id, destination)
# we work through the queue in order of increasing stream ordering.
@ -1478,14 +1544,19 @@ class FederationHandler:
# clear the lazy-loading flag.
logger.info("Updating current state for %s", room_id)
assert (
self.storage.persistence is not None
self._storage_controllers.persistence is not None
), "TODO(faster_joins): support for workers"
await self.storage.persistence.update_current_state(room_id)
await self._storage_controllers.persistence.update_current_state(
room_id
)
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:
logger.info("State resync complete for %s", room_id)
self._storage_controllers.state.notify_room_un_partial_stated(
room_id
)
# TODO(faster_joins) update room stats and user directory?
return
@ -1503,6 +1574,41 @@ class FederationHandler:
allow_rejected=True,
)
for event in events:
await self._federation_event_handler.update_state_for_partial_state_event(
destination, event
)
for attempt in itertools.count():
try:
await self._federation_event_handler.update_state_for_partial_state_event(
destination, event
)
break
except FederationError as e:
if attempt == len(destinations) - 1:
# We have tried every remote server for this event. Give up.
# TODO(faster_joins) giving up isn't the right thing to do
# if there's a temporary network outage. retrying
# indefinitely is also not the right thing to do if we can
# reach all homeservers and they all claim they don't have
# the state we want.
logger.error(
"Failed to get state for %s at %s from %s because %s, "
"giving up!",
room_id,
event,
destination,
e,
)
raise
# Try the next remote server.
logger.info(
"Failed to get state for %s at %s from %s because %s",
room_id,
event,
destination,
e,
)
destination = next(destination_iter)
logger.info(
"Syncing state for room %s via %s instead",
room_id,
destination,
)

View file

@ -98,8 +98,8 @@ class FederationEventHandler:
def __init__(self, hs: "HomeServer"):
self._store = hs.get_datastores().main
self._storage = hs.get_storage()
self._state_store = self._storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._state_handler = hs.get_state_handler()
self._event_creation_handler = hs.get_event_creation_handler()
@ -274,7 +274,7 @@ class FederationEventHandler:
affected=pdu.event_id,
)
await self._process_received_pdu(origin, pdu, state=None)
await self._process_received_pdu(origin, pdu, state_ids=None)
async def on_send_membership_event(
self, origin: str, event: EventBase
@ -463,7 +463,9 @@ class FederationEventHandler:
with nested_logging_context(suffix=event.event_id):
context = await self._state_handler.compute_event_context(
event,
old_state=state,
state_ids_before_event={
(e.type, e.state_key): e.event_id for e in state
},
partial_state=partial_state,
)
@ -503,6 +505,9 @@ class FederationEventHandler:
Args:
destination: server to request full state from
event: partial-state event to be de-partial-stated
Raises:
FederationError if we fail to request state from the remote server.
"""
logger.info("Updating state for %s", event.event_id)
with nested_logging_context(suffix=event.event_id):
@ -512,12 +517,12 @@ class FederationEventHandler:
#
# This is the same operation as we do when we receive a regular event
# over federation.
state = await self._resolve_state_at_missing_prevs(destination, event)
state_ids = await self._resolve_state_at_missing_prevs(destination, event)
# build a new state group for it if need be
context = await self._state_handler.compute_event_context(
event,
old_state=state,
state_ids_before_event=state_ids,
)
if context.partial_state:
# this can happen if some or all of the event's prev_events still have
@ -533,7 +538,9 @@ class FederationEventHandler:
)
return
await self._store.update_state_for_partial_state_event(event, context)
self._state_store.notify_event_un_partial_stated(event.event_id)
self._state_storage_controller.notify_event_un_partial_stated(
event.event_id
)
async def backfill(
self, dest: str, room_id: str, limit: int, extremities: Collection[str]
@ -767,11 +774,12 @@ class FederationEventHandler:
return
try:
state = await self._resolve_state_at_missing_prevs(origin, event)
state_ids = await self._resolve_state_at_missing_prevs(origin, event)
# TODO(faster_joins): make sure that _resolve_state_at_missing_prevs does
# not return partial state
await self._process_received_pdu(
origin, event, state=state, backfilled=backfilled
origin, event, state_ids=state_ids, backfilled=backfilled
)
except FederationError as e:
if e.code == 403:
@ -781,7 +789,7 @@ class FederationEventHandler:
async def _resolve_state_at_missing_prevs(
self, dest: str, event: EventBase
) -> Optional[Iterable[EventBase]]:
) -> Optional[StateMap[str]]:
"""Calculate the state at an event with missing prev_events.
This is used when we have pulled a batch of events from a remote server, and
@ -808,8 +816,12 @@ class FederationEventHandler:
event: an event to check for missing prevs.
Returns:
if we already had all the prev events, `None`. Otherwise, returns a list of
the events in the state at `event`.
if we already had all the prev events, `None`. Otherwise, returns
the event ids of the state at `event`.
Raises:
FederationError if we fail to get the state from the remote server after any
missing `prev_event`s.
"""
room_id = event.room_id
event_id = event.event_id
@ -829,10 +841,12 @@ class FederationEventHandler:
)
# Calculate the state after each of the previous events, and
# resolve them to find the correct state at the current event.
event_map = {event_id: event}
try:
# Get the state of the events we know about
ours = await self._state_store.get_state_groups_ids(room_id, seen)
ours = await self._state_storage_controller.get_state_groups_ids(
room_id, seen
)
# state_maps is a list of mappings from (type, state_key) to event_id
state_maps: List[StateMap[str]] = list(ours.values())
@ -849,40 +863,23 @@ class FederationEventHandler:
# note that if any of the missing prevs share missing state or
# auth events, the requests to fetch those events are deduped
# by the get_pdu_cache in federation_client.
remote_state = await self._get_state_after_missing_prev_event(
dest, room_id, p
remote_state_map = (
await self._get_state_ids_after_missing_prev_event(
dest, room_id, p
)
)
remote_state_map = {
(x.type, x.state_key): x.event_id for x in remote_state
}
state_maps.append(remote_state_map)
for x in remote_state:
event_map[x.event_id] = x
room_version = await self._store.get_room_version_id(room_id)
state_map = await self._state_resolution_handler.resolve_events_with_store(
room_id,
room_version,
state_maps,
event_map,
event_map={event_id: event},
state_res_store=StateResolutionStore(self._store),
)
# We need to give _process_received_pdu the actual state events
# rather than event ids, so generate that now.
# First though we need to fetch all the events that are in
# state_map, so we can build up the state below.
evs = await self._store.get_events(
list(state_map.values()),
get_prev_content=False,
redact_behaviour=EventRedactBehaviour.as_is,
)
event_map.update(evs)
state = [event_map[e] for e in state_map.values()]
except Exception:
logger.warning(
"Error attempting to resolve state at missing prev_events",
@ -894,14 +891,14 @@ class FederationEventHandler:
"We can't get valid state history.",
affected=event_id,
)
return state
return state_map
async def _get_state_after_missing_prev_event(
async def _get_state_ids_after_missing_prev_event(
self,
destination: str,
room_id: str,
event_id: str,
) -> List[EventBase]:
) -> StateMap[str]:
"""Requests all of the room state at a given event from a remote homeserver.
Args:
@ -910,7 +907,11 @@ class FederationEventHandler:
event_id: The id of the event we want the state at.
Returns:
A list of events in the state, including the event itself
The event ids of the state *after* the given event.
Raises:
InvalidResponseError: if the remote homeserver's response contains fields
of the wrong type.
"""
(
state_event_ids,
@ -925,19 +926,17 @@ class FederationEventHandler:
len(auth_event_ids),
)
# start by just trying to fetch the events from the store
# Start by checking events we already have in the DB
desired_events = set(state_event_ids)
desired_events.add(event_id)
logger.debug("Fetching %i events from cache/store", len(desired_events))
fetched_events = await self._store.get_events(
desired_events, allow_rejected=True
)
have_events = await self._store.have_seen_events(room_id, desired_events)
missing_desired_events = desired_events - fetched_events.keys()
missing_desired_events = desired_events - have_events
logger.debug(
"We are missing %i events (got %i)",
len(missing_desired_events),
len(fetched_events),
len(have_events),
)
# We probably won't need most of the auth events, so let's just check which
@ -948,7 +947,7 @@ class FederationEventHandler:
# already have a bunch of the state events. It would be nice if the
# federation api gave us a way of finding out which we actually need.
missing_auth_events = set(auth_event_ids) - fetched_events.keys()
missing_auth_events = set(auth_event_ids) - have_events
missing_auth_events.difference_update(
await self._store.have_seen_events(room_id, missing_auth_events)
)
@ -974,47 +973,51 @@ class FederationEventHandler:
destination=destination, room_id=room_id, event_ids=missing_events
)
# we need to make sure we re-load from the database to get the rejected
# state correct.
fetched_events.update(
await self._store.get_events(missing_desired_events, allow_rejected=True)
)
# We now need to fill out the state map, which involves fetching the
# type and state key for each event ID in the state.
state_map = {}
# check for events which were in the wrong room.
#
# this can happen if a remote server claims that the state or
# auth_events at an event in room A are actually events in room B
event_metadata = await self._store.get_metadata_for_events(state_event_ids)
for state_event_id, metadata in event_metadata.items():
if metadata.room_id != room_id:
# This is a bogus situation, but since we may only discover it a long time
# after it happened, we try our best to carry on, by just omitting the
# bad events from the returned state set.
#
# This can happen if a remote server claims that the state or
# auth_events at an event in room A are actually events in room B
logger.warning(
"Remote server %s claims event %s in room %s is an auth/state "
"event in room %s",
destination,
state_event_id,
metadata.room_id,
room_id,
)
continue
bad_events = [
(event_id, event.room_id)
for event_id, event in fetched_events.items()
if event.room_id != room_id
]
if metadata.state_key is None:
logger.warning(
"Remote server gave us non-state event in state: %s", state_event_id
)
continue
for bad_event_id, bad_room_id in bad_events:
# This is a bogus situation, but since we may only discover it a long time
# after it happened, we try our best to carry on, by just omitting the
# bad events from the returned state set.
logger.warning(
"Remote server %s claims event %s in room %s is an auth/state "
"event in room %s",
destination,
bad_event_id,
bad_room_id,
room_id,
)
del fetched_events[bad_event_id]
state_map[(metadata.event_type, metadata.state_key)] = state_event_id
# if we couldn't get the prev event in question, that's a problem.
remote_event = fetched_events.get(event_id)
remote_event = await self._store.get_event(
event_id,
allow_none=True,
allow_rejected=True,
redact_behaviour=EventRedactBehaviour.as_is,
)
if not remote_event:
raise Exception("Unable to get missing prev_event %s" % (event_id,))
# missing state at that event is a warning, not a blocker
# XXX: this doesn't sound right? it means that we'll end up with incomplete
# state.
failed_to_fetch = desired_events - fetched_events.keys()
failed_to_fetch = desired_events - event_metadata.keys()
if failed_to_fetch:
logger.warning(
"Failed to fetch missing state events for %s %s",
@ -1022,14 +1025,12 @@ class FederationEventHandler:
failed_to_fetch,
)
remote_state = [
fetched_events[e_id] for e_id in state_event_ids if e_id in fetched_events
]
if remote_event.is_state() and remote_event.rejected_reason is None:
remote_state.append(remote_event)
state_map[
(remote_event.type, remote_event.state_key)
] = remote_event.event_id
return remote_state
return state_map
async def _get_state_and_persist(
self, destination: str, room_id: str, event_id: str
@ -1056,7 +1057,7 @@ class FederationEventHandler:
self,
origin: str,
event: EventBase,
state: Optional[Iterable[EventBase]],
state_ids: Optional[StateMap[str]],
backfilled: bool = False,
) -> None:
"""Called when we have a new non-outlier event.
@ -1078,7 +1079,7 @@ class FederationEventHandler:
event: event to be persisted
state: Normally None, but if we are handling a gap in the graph
state_ids: Normally None, but if we are handling a gap in the graph
(ie, we are missing one or more prev_events), the resolved state at the
event
@ -1090,7 +1091,8 @@ class FederationEventHandler:
try:
context = await self._state_handler.compute_event_context(
event, old_state=state
event,
state_ids_before_event=state_ids,
)
context = await self._check_event_auth(
origin,
@ -1107,7 +1109,7 @@ class FederationEventHandler:
# For new (non-backfilled and non-outlier) events we check if the event
# passes auth based on the current state. If it doesn't then we
# "soft-fail" the event.
await self._check_for_soft_fail(event, state, origin=origin)
await self._check_for_soft_fail(event, state_ids, origin=origin)
await self._run_push_actions_and_persist_event(event, context, backfilled)
@ -1449,7 +1451,7 @@ class FederationEventHandler:
# we're not bothering about room state, so flag the event as an outlier.
event.internal_metadata.outlier = True
context = EventContext.for_outlier(self._storage)
context = EventContext.for_outlier(self._storage_controllers)
try:
validate_event_for_room_version(room_version_obj, event)
check_auth_rules_for_event(room_version_obj, event, auth)
@ -1582,14 +1584,16 @@ class FederationEventHandler:
if guest_access == GuestAccess.CAN_JOIN:
return
current_state_map = await self._state_handler.get_current_state(event.room_id)
current_state = list(current_state_map.values())
await self._get_room_member_handler().kick_guest_users(current_state)
current_state = await self._storage_controllers.state.get_current_state(
event.room_id
)
current_state_list = list(current_state.values())
await self._get_room_member_handler().kick_guest_users(current_state_list)
async def _check_for_soft_fail(
self,
event: EventBase,
state: Optional[Iterable[EventBase]],
state_ids: Optional[StateMap[str]],
origin: str,
) -> None:
"""Checks if we should soft fail the event; if so, marks the event as
@ -1597,7 +1601,7 @@ class FederationEventHandler:
Args:
event
state: The state at the event if we don't have all the event's prev events
state_ids: The state at the event if we don't have all the event's prev events
origin: The host the event originates from.
"""
extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
@ -1612,8 +1616,11 @@ class FederationEventHandler:
room_version = await self._store.get_room_version_id(event.room_id)
room_version_obj = KNOWN_ROOM_VERSIONS[room_version]
# The event types we want to pull from the "current" state.
auth_types = auth_types_for_event(room_version_obj, event)
# Calculate the "current state".
if state is not None:
if state_ids is not None:
# If we're explicitly given the state then we won't have all the
# prev events, and so we have a gap in the graph. In this case
# we want to be a little careful as we might have been down for
@ -1626,20 +1633,25 @@ class FederationEventHandler:
# given state at the event. This should correctly handle cases
# like bans, especially with state res v2.
state_sets_d = await self._state_store.get_state_groups(
state_sets_d = await self._state_storage_controller.get_state_groups_ids(
event.room_id, extrem_ids
)
state_sets: List[Iterable[EventBase]] = list(state_sets_d.values())
state_sets.append(state)
current_states = await self._state_handler.resolve_events(
room_version, state_sets, event
state_sets: List[StateMap[str]] = list(state_sets_d.values())
state_sets.append(state_ids)
current_state_ids = (
await self._state_resolution_handler.resolve_events_with_store(
event.room_id,
room_version,
state_sets,
event_map=None,
state_res_store=StateResolutionStore(self._store),
)
)
current_state_ids: StateMap[str] = {
k: e.event_id for k, e in current_states.items()
}
else:
current_state_ids = await self._state_handler.get_current_state_ids(
event.room_id, latest_event_ids=extrem_ids
current_state_ids = (
await self._state_storage_controller.get_current_state_ids(
event.room_id, StateFilter.from_types(auth_types)
)
)
logger.debug(
@ -1649,7 +1661,6 @@ class FederationEventHandler:
)
# Now check if event pass auth against said current state
auth_types = auth_types_for_event(room_version_obj, event)
current_state_ids_list = [
e for k, e in current_state_ids.items() if k in auth_types
]
@ -1895,7 +1906,7 @@ class FederationEventHandler:
# create a new state group as a delta from the existing one.
prev_group = context.state_group
state_group = await self._state_store.store_state_group(
state_group = await self._state_storage_controller.store_state_group(
event.event_id,
event.room_id,
prev_group=prev_group,
@ -1904,7 +1915,7 @@ class FederationEventHandler:
)
return EventContext.with_state(
storage=self._storage,
storage=self._storage_controllers,
state_group=state_group,
state_group_before_event=context.state_group_before_event,
state_delta_due_to_event=state_updates,
@ -1994,11 +2005,14 @@ class FederationEventHandler:
)
return result["max_stream_id"]
else:
assert self._storage.persistence
assert self._storage_controllers.persistence
# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
events, max_stream_token = await self._storage.persistence.persist_events(
(
events,
max_stream_token,
) = await self._storage_controllers.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)

View file

@ -1,503 +0,0 @@
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Iterable, List, Set
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.types import GroupID, JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
def _create_rerouter(func_name: str) -> Callable[..., Awaitable[JsonDict]]:
"""Returns an async function that looks at the group id and calls the function
on federation or the local group server if the group is local
"""
async def f(
self: "GroupsLocalWorkerHandler", group_id: str, *args: Any, **kwargs: Any
) -> JsonDict:
if not GroupID.is_valid(group_id):
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
if self.is_mine_id(group_id):
return await getattr(self.groups_server_handler, func_name)(
group_id, *args, **kwargs
)
else:
destination = get_domain_from_id(group_id)
try:
return await getattr(self.transport_client, func_name)(
destination, group_id, *args, **kwargs
)
except HttpResponseException as e:
# Capture errors returned by the remote homeserver and
# re-throw specific errors as SynapseErrors. This is so
# when the remote end responds with things like 403 Not
# In Group, we can communicate that to the client instead
# of a 500.
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return f
class GroupsLocalWorkerHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self.room_list_handler = hs.get_room_list_handler()
self.groups_server_handler = hs.get_groups_server_handler()
self.transport_client = hs.get_federation_transport_client()
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.is_mine_id = hs.is_mine_id
self.signing_key = hs.signing_key
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.attestations = hs.get_groups_attestation_signing()
self.profile_handler = hs.get_profile_handler()
# The following functions merely route the query to the local groups server
# or federation depending on if the group is local or remote
get_group_profile = _create_rerouter("get_group_profile")
get_rooms_in_group = _create_rerouter("get_rooms_in_group")
get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
get_group_category = _create_rerouter("get_group_category")
get_group_categories = _create_rerouter("get_group_categories")
get_group_role = _create_rerouter("get_group_role")
get_group_roles = _create_rerouter("get_group_roles")
async def get_group_summary(
self, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get the group summary for a group.
If the group is remote we check that the users have valid attestations.
"""
if self.is_mine_id(group_id):
res = await self.groups_server_handler.get_group_summary(
group_id, requester_user_id
)
else:
try:
res = await self.transport_client.get_group_summary(
get_domain_from_id(group_id), group_id, requester_user_id
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
group_server_name = get_domain_from_id(group_id)
# Loop through the users and validate the attestations.
chunk = res["users_section"]["users"]
valid_users = []
for entry in chunk:
g_user_id = entry["user_id"]
attestation = entry.pop("attestation", {})
try:
if get_domain_from_id(g_user_id) != group_server_name:
await self.attestations.verify_attestation(
attestation,
group_id=group_id,
user_id=g_user_id,
server_name=get_domain_from_id(g_user_id),
)
valid_users.append(entry)
except Exception as e:
logger.info("Failed to verify user is in group: %s", e)
res["users_section"]["users"] = valid_users
res["users_section"]["users"].sort(key=lambda e: e.get("order", 0))
res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0))
# Add `is_publicised` flag to indicate whether the user has publicised their
# membership of the group on their profile
result = await self.store.get_publicised_groups_for_user(requester_user_id)
is_publicised = group_id in result
res.setdefault("user", {})["is_publicised"] = is_publicised
return res
async def get_users_in_group(
self, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get users in a group"""
if self.is_mine_id(group_id):
return await self.groups_server_handler.get_users_in_group(
group_id, requester_user_id
)
group_server_name = get_domain_from_id(group_id)
try:
res = await self.transport_client.get_users_in_group(
get_domain_from_id(group_id), group_id, requester_user_id
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
chunk = res["chunk"]
valid_entries = []
for entry in chunk:
g_user_id = entry["user_id"]
attestation = entry.pop("attestation", {})
try:
if get_domain_from_id(g_user_id) != group_server_name:
await self.attestations.verify_attestation(
attestation,
group_id=group_id,
user_id=g_user_id,
server_name=get_domain_from_id(g_user_id),
)
valid_entries.append(entry)
except Exception as e:
logger.info("Failed to verify user is in group: %s", e)
res["chunk"] = valid_entries
return res
async def get_joined_groups(self, user_id: str) -> JsonDict:
group_ids = await self.store.get_joined_groups(user_id)
return {"groups": group_ids}
async def get_publicised_groups_for_user(self, user_id: str) -> JsonDict:
if self.hs.is_mine_id(user_id):
result = await self.store.get_publicised_groups_for_user(user_id)
# Check AS associated groups for this user - this depends on the
# RegExps in the AS registration file (under `users`)
for app_service in self.store.get_app_services():
result.extend(app_service.get_groups_for_user(user_id))
return {"groups": result}
else:
try:
bulk_result = await self.transport_client.bulk_get_publicised_groups(
get_domain_from_id(user_id), [user_id]
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
result = bulk_result.get("users", {}).get(user_id)
# TODO: Verify attestations
return {"groups": result}
async def bulk_get_publicised_groups(
self, user_ids: Iterable[str], proxy: bool = True
) -> JsonDict:
destinations: Dict[str, Set[str]] = {}
local_users = set()
for user_id in user_ids:
if self.hs.is_mine_id(user_id):
local_users.add(user_id)
else:
destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
if not proxy and destinations:
raise SynapseError(400, "Some user_ids are not local")
results = {}
failed_results: List[str] = []
for destination, dest_user_ids in destinations.items():
try:
r = await self.transport_client.bulk_get_publicised_groups(
destination, list(dest_user_ids)
)
results.update(r["users"])
except Exception:
failed_results.extend(dest_user_ids)
for uid in local_users:
results[uid] = await self.store.get_publicised_groups_for_user(uid)
# Check AS associated groups for this user - this depends on the
# RegExps in the AS registration file (under `users`)
for app_service in self.store.get_app_services():
results[uid].extend(app_service.get_groups_for_user(uid))
return {"users": results}
class GroupsLocalHandler(GroupsLocalWorkerHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
# Ensure attestations get renewed
hs.get_groups_attestation_renewer()
# The following functions merely route the query to the local groups server
# or federation depending on if the group is local or remote
update_group_profile = _create_rerouter("update_group_profile")
add_room_to_group = _create_rerouter("add_room_to_group")
update_room_in_group = _create_rerouter("update_room_in_group")
remove_room_from_group = _create_rerouter("remove_room_from_group")
update_group_summary_room = _create_rerouter("update_group_summary_room")
delete_group_summary_room = _create_rerouter("delete_group_summary_room")
update_group_category = _create_rerouter("update_group_category")
delete_group_category = _create_rerouter("delete_group_category")
update_group_summary_user = _create_rerouter("update_group_summary_user")
delete_group_summary_user = _create_rerouter("delete_group_summary_user")
update_group_role = _create_rerouter("update_group_role")
delete_group_role = _create_rerouter("delete_group_role")
set_group_join_policy = _create_rerouter("set_group_join_policy")
async def create_group(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Create a group"""
logger.info("Asking to create group with ID: %r", group_id)
if self.is_mine_id(group_id):
res = await self.groups_server_handler.create_group(
group_id, user_id, content
)
local_attestation = None
remote_attestation = None
else:
raise SynapseError(400, "Unable to create remote groups")
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=True,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return res
async def join_group(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Request to join a group"""
if self.is_mine_id(group_id):
await self.groups_server_handler.join_group(group_id, user_id, content)
local_attestation = None
remote_attestation = None
else:
local_attestation = self.attestations.create_attestation(group_id, user_id)
content["attestation"] = local_attestation
try:
res = await self.transport_client.join_group(
get_domain_from_id(group_id), group_id, user_id, content
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
remote_attestation = res["attestation"]
await self.attestations.verify_attestation(
remote_attestation,
group_id=group_id,
user_id=user_id,
server_name=get_domain_from_id(group_id),
)
# TODO: Check that the group is public and we're being added publicly
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=False,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return {}
async def accept_invite(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Accept an invite to a group"""
if self.is_mine_id(group_id):
await self.groups_server_handler.accept_invite(group_id, user_id, content)
local_attestation = None
remote_attestation = None
else:
local_attestation = self.attestations.create_attestation(group_id, user_id)
content["attestation"] = local_attestation
try:
res = await self.transport_client.accept_group_invite(
get_domain_from_id(group_id), group_id, user_id, content
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
remote_attestation = res["attestation"]
await self.attestations.verify_attestation(
remote_attestation,
group_id=group_id,
user_id=user_id,
server_name=get_domain_from_id(group_id),
)
# TODO: Check that the group is public and we're being added publicly
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=False,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return {}
async def invite(
self, group_id: str, user_id: str, requester_user_id: str, config: JsonDict
) -> JsonDict:
"""Invite a user to a group"""
content = {"requester_user_id": requester_user_id, "config": config}
if self.is_mine_id(group_id):
res = await self.groups_server_handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
else:
try:
res = await self.transport_client.invite_to_group(
get_domain_from_id(group_id),
group_id,
user_id,
requester_user_id,
content,
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return res
async def on_invite(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""One of our users were invited to a group"""
# TODO: Support auto join and rejection
if not self.is_mine_id(user_id):
raise SynapseError(400, "User not on this server")
local_profile = {}
if "profile" in content:
if "name" in content["profile"]:
local_profile["name"] = content["profile"]["name"]
if "avatar_url" in content["profile"]:
local_profile["avatar_url"] = content["profile"]["avatar_url"]
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="invite",
content={"profile": local_profile, "inviter": content["inviter"]},
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
try:
user_profile = await self.profile_handler.get_profile(user_id)
except Exception as e:
logger.warning("No profile for user %s: %s", user_id, e)
user_profile = {}
return {"state": "invite", "user_profile": user_profile}
async def remove_user_from_group(
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
"""Remove a user from a group"""
if user_id == requester_user_id:
token = await self.store.register_user_group_membership(
group_id, user_id, membership="leave"
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
# TODO: Should probably remember that we tried to leave so that we can
# retry if the group server is currently down.
if self.is_mine_id(group_id):
res = await self.groups_server_handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
else:
content["requester_user_id"] = requester_user_id
try:
res = await self.transport_client.remove_user_from_group(
get_domain_from_id(group_id),
group_id,
requester_user_id,
user_id,
content,
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return res
async def user_removed_from_group(
self, group_id: str, user_id: str, content: JsonDict
) -> None:
"""One of our users was removed/kicked from a group"""
# TODO: Check if user in group
token = await self.store.register_user_group_membership(
group_id, user_id, membership="leave"
)
self.notifier.on_new_event("groups_key", token, users=[user_id])

View file

@ -67,8 +67,8 @@ class InitialSyncHandler:
]
] = ResponseCache(hs.get_clock(), "initial_sync_cache")
self._event_serializer = hs.get_event_client_serializer()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
async def snapshot_all_rooms(
self,
@ -190,7 +190,7 @@ class InitialSyncHandler:
if event.membership == Membership.JOIN:
room_end_token = now_token.room_key
deferred_room_state = run_in_background(
self.state_handler.get_current_state, event.room_id
self._state_storage_controller.get_current_state, event.room_id
)
elif event.membership == Membership.LEAVE:
room_end_token = RoomStreamToken(
@ -198,7 +198,8 @@ class InitialSyncHandler:
event.stream_ordering,
)
deferred_room_state = run_in_background(
self.state_store.get_state_for_events, [event.event_id]
self._state_storage_controller.get_state_for_events,
[event.event_id],
).addCallback(
lambda states: cast(StateMap[EventBase], states[event.event_id])
)
@ -218,7 +219,7 @@ class InitialSyncHandler:
).addErrback(unwrapFirstError)
messages = await filter_events_for_client(
self.storage, user_id, messages
self._storage_controllers, user_id, messages
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)
@ -274,7 +275,7 @@ class InitialSyncHandler:
"rooms": rooms_ret,
"presence": [
{
"type": "m.presence",
"type": EduTypes.PRESENCE,
"content": format_user_presence_state(event, now),
}
for event in presence
@ -355,7 +356,9 @@ class InitialSyncHandler:
member_event_id: str,
is_peeking: bool,
) -> JsonDict:
room_state = await self.state_store.get_state_for_event(member_event_id)
room_state = await self._state_storage_controller.get_state_for_event(
member_event_id
)
limit = pagin_config.limit if pagin_config else None
if limit is None:
@ -369,7 +372,7 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
self.storage, user_id, messages, is_peeking=is_peeking
self._storage_controllers, user_id, messages, is_peeking=is_peeking
)
start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token)
@ -404,7 +407,9 @@ class InitialSyncHandler:
membership: str,
is_peeking: bool,
) -> JsonDict:
current_state = await self.state.get_current_state(room_id=room_id)
current_state = await self._storage_controllers.state.get_current_state(
room_id=room_id
)
# TODO: These concurrently
time_now = self.clock.time_msec()
@ -439,7 +444,7 @@ class InitialSyncHandler:
return [
{
"type": EduTypes.Presence,
"type": EduTypes.PRESENCE,
"content": format_user_presence_state(s, time_now),
}
for s in states
@ -474,7 +479,7 @@ class InitialSyncHandler:
)
messages = await filter_events_for_client(
self.storage, user_id, messages, is_peeking=is_peeking
self._storage_controllers, user_id, messages, is_peeking=is_peeking
)
start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token)

View file

@ -28,6 +28,7 @@ from synapse.api.constants import (
EventContentFields,
EventTypes,
GuestAccess,
HistoryVisibility,
Membership,
RelationTypes,
UserTypes,
@ -54,12 +55,19 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.http.send_event import ReplicationSendEventRestServlet
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester
from synapse.types import (
MutableStateMap,
Requester,
RoomAlias,
StreamToken,
UserID,
create_requester,
)
from synapse.util import json_decoder, json_encoder, log_failure, unwrapFirstError
from synapse.util.async_helpers import Linearizer, gather_results
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_events_for_client
from synapse.visibility import get_effective_room_visibility_from_state
if TYPE_CHECKING:
from synapse.events.third_party_rules import ThirdPartyEventRules
@ -76,8 +84,8 @@ class MessageHandler:
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._event_serializer = hs.get_event_client_serializer()
self._ephemeral_events_enabled = hs.config.server.enable_ephemeral_messages
@ -117,14 +125,16 @@ class MessageHandler:
)
if membership == Membership.JOIN:
data = await self.state.get_current_state(room_id, event_type, state_key)
data = await self._storage_controllers.state.get_current_state_event(
room_id, event_type, state_key
)
elif membership == Membership.LEAVE:
key = (event_type, state_key)
# If the membership is not JOIN, then the event ID should exist.
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state = await self.state_store.get_state_for_events(
room_state = await self._state_storage_controller.get_state_for_events(
[membership_event_id], StateFilter.from_types([key])
)
data = room_state[membership_event_id].get(key)
@ -175,49 +185,31 @@ class MessageHandler:
state_filter = state_filter or StateFilter.all()
if at_token:
last_event = await self.store.get_last_event_in_room_before_stream_ordering(
room_id,
end_token=at_token.room_key,
last_event_id = (
await self.store.get_last_event_in_room_before_stream_ordering(
room_id,
end_token=at_token.room_key,
)
)
if not last_event:
if not last_event_id:
raise NotFoundError("Can't find event for token %s" % (at_token,))
# check whether the user is in the room at that time to determine
# whether they should be treated as peeking.
state_map = await self.state_store.get_state_for_event(
last_event.event_id,
StateFilter.from_types([(EventTypes.Member, user_id)]),
)
joined = False
membership_event = state_map.get((EventTypes.Member, user_id))
if membership_event:
joined = membership_event.membership == Membership.JOIN
is_peeking = not joined
visible_events = await filter_events_for_client(
self.storage,
user_id,
[last_event],
filter_send_to_client=False,
is_peeking=is_peeking,
)
if visible_events:
room_state_events = await self.state_store.get_state_for_events(
[last_event.event_id], state_filter=state_filter
)
room_state: Mapping[Any, EventBase] = room_state_events[
last_event.event_id
]
else:
if not await self._user_can_see_state_at_event(
user_id, room_id, last_event_id
):
raise AuthError(
403,
"User %s not allowed to view events in room %s at token %s"
% (user_id, room_id, at_token),
)
room_state_events = (
await self._state_storage_controller.get_state_for_events(
[last_event_id], state_filter=state_filter
)
)
room_state: Mapping[Any, EventBase] = room_state_events[last_event_id]
else:
(
membership,
@ -227,7 +219,7 @@ class MessageHandler:
)
if membership == Membership.JOIN:
state_ids = await self.store.get_filtered_current_state_ids(
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id, state_filter=state_filter
)
room_state = await self.store.get_events(state_ids.values())
@ -236,8 +228,10 @@ class MessageHandler:
assert (
membership_event_id is not None
), "check_user_in_room_or_world_readable returned invalid data"
room_state_events = await self.state_store.get_state_for_events(
[membership_event_id], state_filter=state_filter
room_state_events = (
await self._state_storage_controller.get_state_for_events(
[membership_event_id], state_filter=state_filter
)
)
room_state = room_state_events[membership_event_id]
@ -245,6 +239,65 @@ class MessageHandler:
events = self._event_serializer.serialize_events(room_state.values(), now)
return events
async def _user_can_see_state_at_event(
self, user_id: str, room_id: str, event_id: str
) -> bool:
# check whether the user was in the room, and the history visibility,
# at that time.
state_map = await self._state_storage_controller.get_state_for_event(
event_id,
StateFilter.from_types(
[
(EventTypes.Member, user_id),
(EventTypes.RoomHistoryVisibility, ""),
]
),
)
membership = None
membership_event = state_map.get((EventTypes.Member, user_id))
if membership_event:
membership = membership_event.membership
# if the user was a member of the room at the time of the event,
# they can see it.
if membership == Membership.JOIN:
return True
# otherwise, it depends on the history visibility.
visibility = get_effective_room_visibility_from_state(state_map)
if visibility == HistoryVisibility.JOINED:
# we weren't a member at the time of the event, so we can't see this event.
return False
# otherwise *invited* is good enough
if membership == Membership.INVITE:
return True
if visibility == HistoryVisibility.INVITED:
# we weren't invited, so we can't see this event.
return False
if visibility == HistoryVisibility.WORLD_READABLE:
return True
# So it's SHARED, and the user was not a member at the time. The user cannot
# see history, unless they have *subsequently* joined the room.
#
# XXX: if the user has subsequently joined and then left again,
# ideally we would share history up to the point they left. But
# we don't know when they left. We just treat it as though they
# never joined, and restrict access.
(
current_membership,
_,
) = await self.store.get_local_current_membership_for_user_in_room(
user_id, event_id
)
return current_membership == Membership.JOIN
async def get_joined_members(self, requester: Requester, room_id: str) -> dict:
"""Get all the joined members in the room and their profile information.
@ -394,7 +447,7 @@ class EventCreationHandler:
self.auth = hs.get_auth()
self._event_auth_handler = hs.get_event_auth_handler()
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self._storage_controllers = hs.get_storage_controllers()
self.state = hs.get_state_handler()
self.clock = hs.get_clock()
self.validator = EventValidator()
@ -889,6 +942,22 @@ class EventCreationHandler:
spam_check_result = await self.spam_checker.check_event_for_spam(event)
if spam_check_result != self.spam_checker.NOT_SPAM:
if isinstance(spam_check_result, tuple):
try:
[code, dict] = spam_check_result
raise SynapseError(
403,
"This message had been rejected as probable spam",
code,
dict,
)
except ValueError:
logger.error(
"Spam-check module returned invalid error value. Expecting [code, dict], got %s",
spam_check_result,
)
spam_check_result = Codes.FORBIDDEN
if isinstance(spam_check_result, Codes):
raise SynapseError(
403,
@ -1023,7 +1092,7 @@ class EventCreationHandler:
# after it is created
if builder.internal_metadata.outlier:
event.internal_metadata.outlier = True
context = EventContext.for_outlier(self.storage)
context = EventContext.for_outlier(self._storage_controllers)
elif (
event.type == EventTypes.MSC2716_INSERTION
and state_event_ids
@ -1035,8 +1104,35 @@ class EventCreationHandler:
#
# TODO(faster_joins): figure out how this works, and make sure that the
# old state is complete.
old_state = await self.store.get_events_as_list(state_event_ids)
context = await self.state.compute_event_context(event, old_state=old_state)
metadata = await self.store.get_metadata_for_events(state_event_ids)
state_map_for_event: MutableStateMap[str] = {}
for state_id in state_event_ids:
data = metadata.get(state_id)
if data is None:
# We're trying to persist a new historical batch of events
# with the given state, e.g. via
# `RoomBatchSendEventRestServlet`. The state can be inferred
# by Synapse or set directly by the client.
#
# Either way, we should have persisted all the state before
# getting here.
raise Exception(
f"State event {state_id} not found in DB,"
" Synapse should have persisted it before using it."
)
if data.state_key is None:
raise Exception(
f"Trying to set non-state event {state_id} as state"
)
state_map_for_event[(data.event_type, data.state_key)] = state_id
context = await self.state.compute_event_context(
event,
state_ids_before_event=state_map_for_event,
)
else:
context = await self.state.compute_event_context(event)
@ -1416,7 +1512,7 @@ class EventCreationHandler:
"""
extra_users = extra_users or []
assert self.storage.persistence is not None
assert self._storage_controllers.persistence is not None
assert self._events_shard_config.should_handle(
self._instance_name, event.room_id
)
@ -1653,7 +1749,7 @@ class EventCreationHandler:
event,
event_pos,
max_stream_token,
) = await self.storage.persistence.persist_event(
) = await self._storage_controllers.persistence.persist_event(
event, context=context, backfilled=backfilled
)

View file

@ -129,8 +129,8 @@ class PaginationHandler:
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self.clock = hs.get_clock()
self._server_name = hs.hostname
self._room_shutdown_handler = hs.get_room_shutdown_handler()
@ -352,7 +352,7 @@ class PaginationHandler:
self._purges_in_progress_by_room.add(room_id)
try:
async with self.pagination_lock.write(room_id):
await self.storage.purge_events.purge_history(
await self._storage_controllers.purge_events.purge_history(
room_id, token, delete_local_events
)
logger.info("[purge] complete")
@ -414,7 +414,7 @@ class PaginationHandler:
if joined:
raise SynapseError(400, "Users are still joined to this room")
await self.storage.purge_events.purge_room(room_id)
await self._storage_controllers.purge_events.purge_room(room_id)
async def get_messages(
self,
@ -515,14 +515,28 @@ class PaginationHandler:
next_token = from_token.copy_and_replace(StreamKeyType.ROOM, next_key)
if events:
if event_filter:
events = await event_filter.filter(events)
# if no events are returned from pagination, that implies
# we have reached the end of the available events.
# In that case we do not return end, to tell the client
# there is no need for further queries.
if not events:
return {
"chunk": [],
"start": await from_token.to_string(self.store),
}
events = await filter_events_for_client(
self.storage, user_id, events, is_peeking=(member_event_id is None)
)
if event_filter:
events = await event_filter.filter(events)
events = await filter_events_for_client(
self._storage_controllers,
user_id,
events,
is_peeking=(member_event_id is None),
)
# if after the filter applied there are no more events
# return immediately - but there might be more in next_token batch
if not events:
return {
"chunk": [],
@ -539,7 +553,7 @@ class PaginationHandler:
(EventTypes.Member, event.sender) for event in events
)
state_ids = await self.state_store.get_state_ids_for_event(
state_ids = await self._state_storage_controller.get_state_ids_for_event(
events[0].event_id, state_filter=state_filter
)
@ -653,7 +667,7 @@ class PaginationHandler:
400, "Users are still joined to this room"
)
await self.storage.purge_events.purge_room(room_id)
await self._storage_controllers.purge_events.purge_room(room_id)
logger.info("complete")
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE

View file

@ -49,7 +49,7 @@ from prometheus_client import Counter
from typing_extensions import ContextManager
import synapse.metrics
from synapse.api.constants import EventTypes, Membership, PresenceState
from synapse.api.constants import EduTypes, EventTypes, Membership, PresenceState
from synapse.api.errors import SynapseError
from synapse.api.presence import UserPresenceState
from synapse.appservice import ApplicationService
@ -134,6 +134,7 @@ class BasePresenceHandler(abc.ABC):
def __init__(self, hs: "HomeServer"):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.presence_router = hs.get_presence_router()
self.state = hs.get_state_handler()
self.is_mine_id = hs.is_mine_id
@ -394,7 +395,7 @@ class WorkerPresenceHandler(BasePresenceHandler):
# Route presence EDUs to the right worker
hs.get_federation_registry().register_instances_for_edu(
"m.presence",
EduTypes.PRESENCE,
hs.config.worker.writers.presence,
)
@ -649,7 +650,9 @@ class PresenceHandler(BasePresenceHandler):
federation_registry = hs.get_federation_registry()
federation_registry.register_edu_handler("m.presence", self.incoming_presence)
federation_registry.register_edu_handler(
EduTypes.PRESENCE, self.incoming_presence
)
LaterGauge(
"synapse_handlers_presence_user_to_current_state_size",
@ -1346,7 +1349,10 @@ class PresenceHandler(BasePresenceHandler):
self._event_pos,
room_max_stream_ordering,
)
max_pos, deltas = await self.store.get_current_state_deltas(
(
max_pos,
deltas,
) = await self._storage_controllers.state.get_current_state_deltas(
self._event_pos, room_max_stream_ordering
)

View file

@ -23,14 +23,7 @@ from synapse.api.errors import (
StoreError,
SynapseError,
)
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.types import (
JsonDict,
Requester,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.types import JsonDict, Requester, UserID, create_requester
from synapse.util.caches.descriptors import cached
from synapse.util.stringutils import parse_and_validate_mxc_uri
@ -50,9 +43,6 @@ class ProfileHandler:
delegate to master when necessary.
"""
PROFILE_UPDATE_MS = 60 * 1000
PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.clock = hs.get_clock()
@ -73,11 +63,6 @@ class ProfileHandler:
self._third_party_rules = hs.get_third_party_event_rules()
if hs.config.worker.run_background_tasks:
self.clock.looping_call(
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
)
async def get_profile(self, user_id: str) -> JsonDict:
target_user = UserID.from_string(user_id)
@ -116,30 +101,6 @@ class ProfileHandler:
raise SynapseError(502, "Failed to fetch profile")
raise e.to_synapse_error()
async def get_profile_from_cache(self, user_id: str) -> JsonDict:
"""Get the profile information from our local cache. If the user is
ours then the profile information will always be correct. Otherwise,
it may be out of date/missing.
"""
target_user = UserID.from_string(user_id)
if self.hs.is_mine(target_user):
try:
displayname = await self.store.get_profile_displayname(
target_user.localpart
)
avatar_url = await self.store.get_profile_avatar_url(
target_user.localpart
)
except StoreError as e:
if e.code == 404:
raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND)
raise
return {"displayname": displayname, "avatar_url": avatar_url}
else:
profile = await self.store.get_from_remote_profile_cache(user_id)
return profile or {}
async def get_displayname(self, target_user: UserID) -> Optional[str]:
if self.hs.is_mine(target_user):
try:
@ -509,45 +470,3 @@ class ProfileHandler:
# so we act as if we couldn't find the profile.
raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN)
raise
@wrap_as_background_process("Update remote profile")
async def _update_remote_profile_cache(self) -> None:
"""Called periodically to check profiles of remote users we haven't
checked in a while.
"""
entries = await self.store.get_remote_profile_cache_entries_that_expire(
last_checked=self.clock.time_msec() - self.PROFILE_UPDATE_EVERY_MS
)
for user_id, displayname, avatar_url in entries:
is_subscribed = await self.store.is_subscribed_remote_profile_for_user(
user_id
)
if not is_subscribed:
await self.store.maybe_delete_remote_profile_cache(user_id)
continue
try:
profile = await self.federation.make_query(
destination=get_domain_from_id(user_id),
query_type="profile",
args={"user_id": user_id},
ignore_backoff=True,
)
except Exception:
logger.exception("Failed to get avatar_url")
await self.store.update_remote_profile_cache(
user_id, displayname, avatar_url
)
continue
new_name = profile.get("displayname")
if not isinstance(new_name, str):
new_name = None
new_avatar = profile.get("avatar_url")
if not isinstance(new_avatar, str):
new_avatar = None
# We always hit update to update the last_check timestamp
await self.store.update_remote_profile_cache(user_id, new_name, new_avatar)

View file

@ -14,7 +14,7 @@
import logging
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReceiptTypes
from synapse.api.constants import EduTypes, ReceiptTypes
from synapse.appservice import ApplicationService
from synapse.streams import EventSource
from synapse.types import (
@ -52,11 +52,11 @@ class ReceiptsHandler:
# to the appropriate worker.
if hs.get_instance_name() in hs.config.worker.writers.receipts:
hs.get_federation_registry().register_edu_handler(
"m.receipt", self._received_remote_receipt
EduTypes.RECEIPT, self._received_remote_receipt
)
else:
hs.get_federation_registry().register_instances_for_edu(
"m.receipt",
EduTypes.RECEIPT,
hs.config.worker.writers.receipts,
)

View file

@ -87,6 +87,7 @@ class LoginDict(TypedDict):
class RegistrationHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.clock = hs.get_clock()
self.hs = hs
self.auth = hs.get_auth()
@ -535,7 +536,7 @@ class RegistrationHandler:
if requires_invite:
# If the server is in the room, check if the room is public.
state = await self.store.get_filtered_current_state_ids(
state = await self._storage_controllers.state.get_current_state_ids(
room_id, StateFilter.from_types([(EventTypes.JoinRules, "")])
)

View file

@ -12,16 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import (
TYPE_CHECKING,
Collection,
Dict,
FrozenSet,
Iterable,
List,
Optional,
Tuple,
)
from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
import attr
@ -69,7 +60,7 @@ class BundledAggregations:
class RelationsHandler:
def __init__(self, hs: "HomeServer"):
self._main_store = hs.get_datastores().main
self._storage = hs.get_storage()
self._storage_controllers = hs.get_storage_controllers()
self._auth = hs.get_auth()
self._clock = hs.get_clock()
self._event_handler = hs.get_event_handler()
@ -143,7 +134,10 @@ class RelationsHandler:
)
events = await filter_events_for_client(
self._storage, user_id, events, is_peeking=(member_event_id is None)
self._storage_controllers,
user_id,
events,
is_peeking=(member_event_id is None),
)
now = self._clock.time_msec()
@ -253,13 +247,19 @@ class RelationsHandler:
return filtered_results
async def get_threads_for_events(
self, event_ids: Collection[str], user_id: str, ignored_users: FrozenSet[str]
async def _get_threads_for_events(
self,
events_by_id: Dict[str, EventBase],
relations_by_id: Dict[str, str],
user_id: str,
ignored_users: FrozenSet[str],
) -> Dict[str, _ThreadAggregation]:
"""Get the bundled aggregations for threads for the requested events.
Args:
event_ids: Events to get aggregations for threads.
events_by_id: A map of event_id to events to get aggregations for threads.
relations_by_id: A map of event_id to the relation type, if one exists
for that event.
user_id: The user requesting the bundled aggregations.
ignored_users: The users ignored by the requesting user.
@ -270,16 +270,34 @@ class RelationsHandler:
"""
user = UserID.from_string(user_id)
# It is not valid to start a thread on an event which itself relates to another event.
event_ids = [eid for eid in events_by_id.keys() if eid not in relations_by_id]
# Fetch thread summaries.
summaries = await self._main_store.get_thread_summaries(event_ids)
# Only fetch participated for a limited selection based on what had
# summaries.
# Limit fetching whether the requester has participated in a thread to
# events which are thread roots.
thread_event_ids = [
event_id for event_id, summary in summaries.items() if summary
]
participated = await self._main_store.get_threads_participated(
thread_event_ids, user_id
# Pre-seed thread participation with whether the requester sent the event.
participated = {
event_id: events_by_id[event_id].sender == user_id
for event_id in thread_event_ids
}
# For events the requester did not send, check the database for whether
# the requester sent a threaded reply.
participated.update(
await self._main_store.get_threads_participated(
[
event_id
for event_id in thread_event_ids
if not participated[event_id]
],
user_id,
)
)
# Then subtract off the results for any ignored users.
@ -340,7 +358,8 @@ class RelationsHandler:
count=thread_count,
# If there's a thread summary it must also exist in the
# participated dictionary.
current_user_participated=participated[event_id],
current_user_participated=events_by_id[event_id].sender == user_id
or participated[event_id],
)
return results
@ -398,9 +417,9 @@ class RelationsHandler:
# events to be fetched. Thus, we check those first!
# Fetch thread summaries (but only for the directly requested events).
threads = await self.get_threads_for_events(
# It is not valid to start a thread on an event which itself relates to another event.
[eid for eid in events_by_id.keys() if eid not in relations_by_id],
threads = await self._get_threads_for_events(
events_by_id,
relations_by_id,
user_id,
ignored_users,
)

View file

@ -107,6 +107,7 @@ class EventContext:
class RoomCreationHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.hs = hs
@ -468,7 +469,6 @@ class RoomCreationHandler:
(EventTypes.RoomAvatar, ""),
(EventTypes.RoomEncryption, ""),
(EventTypes.ServerACL, ""),
(EventTypes.RelatedGroups, ""),
(EventTypes.PowerLevels, ""),
]
@ -481,8 +481,10 @@ class RoomCreationHandler:
if room_type == RoomTypes.SPACE:
types_to_copy.append((EventTypes.SpaceChild, None))
old_room_state_ids = await self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types(types_to_copy)
old_room_state_ids = (
await self._storage_controllers.state.get_current_state_ids(
old_room_id, StateFilter.from_types(types_to_copy)
)
)
# map from event_id to BaseEvent
old_room_state_events = await self.store.get_events(old_room_state_ids.values())
@ -559,8 +561,10 @@ class RoomCreationHandler:
)
# Transfer membership events
old_room_member_state_ids = await self.store.get_filtered_current_state_ids(
old_room_id, StateFilter.from_types([(EventTypes.Member, None)])
old_room_member_state_ids = (
await self._storage_controllers.state.get_current_state_ids(
old_room_id, StateFilter.from_types([(EventTypes.Member, None)])
)
)
# map from event_id to BaseEvent
@ -1204,8 +1208,8 @@ class RoomContextHandler:
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._relations_handler = hs.get_relations_handler()
async def get_event_context(
@ -1248,7 +1252,10 @@ class RoomContextHandler:
if use_admin_priviledge:
return events
return await filter_events_for_client(
self.storage, user.to_string(), events, is_peeking=is_peeking
self._storage_controllers,
user.to_string(),
events,
is_peeking=is_peeking,
)
event = await self.store.get_event(
@ -1305,7 +1312,7 @@ class RoomContextHandler:
# first? Shouldn't we be consistent with /sync?
# https://github.com/matrix-org/matrix-doc/issues/687
state = await self.state_store.get_state_for_events(
state = await self._state_storage_controller.get_state_for_events(
[last_event_id], state_filter=state_filter
)
@ -1338,6 +1345,7 @@ class TimestampLookupHandler:
self.store = hs.get_datastores().main
self.state_handler = hs.get_state_handler()
self.federation_client = hs.get_federation_client()
self._storage_controllers = hs.get_storage_controllers()
async def get_event_for_timestamp(
self,
@ -1411,7 +1419,9 @@ class TimestampLookupHandler:
)
# Find other homeservers from the given state in the room
curr_state = await self.state_handler.get_current_state(room_id)
curr_state = await self._storage_controllers.state.get_current_state(
room_id
)
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name

View file

@ -17,7 +17,7 @@ class RoomBatchHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self.state_store = hs.get_storage().state
self._state_storage_controller = hs.get_storage_controllers().state
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
@ -143,7 +143,7 @@ class RoomBatchHandler:
) = await self.store.get_max_depth_of(event_ids)
# mapping from (type, state_key) -> state_event_id
assert most_recent_event_id is not None
prev_state_map = await self.state_store.get_state_ids_for_event(
prev_state_map = await self._state_storage_controller.get_state_ids_for_event(
most_recent_event_id
)
# List of state event ID's

View file

@ -50,6 +50,7 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
class RoomListHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.hs = hs
self.enable_room_list_search = hs.config.roomdirectory.enable_room_list_search
self.response_cache: ResponseCache[
@ -274,7 +275,7 @@ class RoomListHandler:
if aliases:
result["aliases"] = aliases
current_state_ids = await self.store.get_current_state_ids(
current_state_ids = await self._storage_controllers.state.get_current_state_ids(
room_id, on_invalidate=cache_context.invalidate
)

View file

@ -68,6 +68,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
@ -994,7 +995,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
current_state_ids = await self.store.get_current_state_ids(room_id)
current_state_ids = await self._storage_controllers.state.get_current_state_ids(
room_id
)
# If restricted join rules are not being used, a local join can always
# be used.
@ -1081,17 +1084,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
# Transfer alias mappings in the room directory
await self.store.update_aliases_for_room(old_room_id, room_id)
# Check if any groups we own contain the predecessor room
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
for group_id in local_group_ids:
# Add new the new room to those groups
await self.store.add_room_to_group(
group_id, room_id, old_room is not None and old_room["is_public"]
)
# Remove the old room from those groups
await self.store.remove_room_from_group(group_id, old_room_id)
async def copy_user_state_on_room_upgrade(
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
) -> None:
@ -1409,7 +1401,19 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
room_state = await self.state_handler.get_current_state(room_id)
room_state = await self._storage_controllers.state.get_current_state(
room_id,
StateFilter.from_types(
[
(EventTypes.Member, user.to_string()),
(EventTypes.CanonicalAlias, ""),
(EventTypes.Name, ""),
(EventTypes.Create, ""),
(EventTypes.JoinRules, ""),
(EventTypes.RoomAvatar, ""),
]
),
)
inviter_display_name = ""
inviter_avatar_url = ""
@ -1805,7 +1809,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
async def forget(self, user: UserID, room_id: str) -> None:
user_id = user.to_string()
member = await self.state_handler.get_current_state(
member = await self._storage_controllers.state.get_current_state_event(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None

View file

@ -90,6 +90,7 @@ class RoomSummaryHandler:
def __init__(self, hs: "HomeServer"):
self._event_auth_handler = hs.get_event_auth_handler()
self._store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._event_serializer = hs.get_event_client_serializer()
self._server_name = hs.hostname
self._federation_client = hs.get_federation_client()
@ -537,7 +538,7 @@ class RoomSummaryHandler:
Returns:
True if the room is accessible to the requesting user or server.
"""
state_ids = await self._store.get_current_state_ids(room_id)
state_ids = await self._storage_controllers.state.get_current_state_ids(room_id)
# If there's no state for the room, it isn't known.
if not state_ids:
@ -662,7 +663,8 @@ class RoomSummaryHandler:
# The API doesn't return the room version so assume that a
# join rule of knock is valid.
if (
room.get("join_rules") in (JoinRules.PUBLIC, JoinRules.KNOCK)
room.get("join_rule")
in (JoinRules.PUBLIC, JoinRules.KNOCK, JoinRules.KNOCK_RESTRICTED)
or room.get("world_readable") is True
):
return True
@ -701,7 +703,9 @@ class RoomSummaryHandler:
# there should always be an entry
assert stats is not None, "unable to retrieve stats for %s" % (room_id,)
current_state_ids = await self._store.get_current_state_ids(room_id)
current_state_ids = await self._storage_controllers.state.get_current_state_ids(
room_id
)
create_event = await self._store.get_event(
current_state_ids[(EventTypes.Create, "")]
)
@ -713,9 +717,6 @@ class RoomSummaryHandler:
"canonical_alias": stats["canonical_alias"],
"num_joined_members": stats["joined_members"],
"avatar_url": stats["avatar"],
# plural join_rules is a documentation error but kept for historical
# purposes. Should match /publicRooms.
"join_rules": stats["join_rules"],
"join_rule": stats["join_rules"],
"world_readable": (
stats["history_visibility"] == HistoryVisibility.WORLD_READABLE
@ -762,7 +763,9 @@ class RoomSummaryHandler:
"""
# look for child rooms/spaces.
current_state_ids = await self._store.get_current_state_ids(room_id)
current_state_ids = await self._storage_controllers.state.get_current_state_ids(
room_id
)
events = await self._store.get_events_as_list(
[

View file

@ -55,8 +55,8 @@ class SearchHandler:
self.hs = hs
self._event_serializer = hs.get_event_client_serializer()
self._relations_handler = hs.get_relations_handler()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self.auth = hs.get_auth()
async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]:
@ -348,7 +348,7 @@ class SearchHandler:
state_results = {}
if include_state:
for room_id in {e.room_id for e in search_result.allowed_events}:
state = await self.state_handler.get_current_state(room_id)
state = await self._storage_controllers.state.get_current_state(room_id)
state_results[room_id] = list(state.values())
aggregations = await self._relations_handler.get_bundled_aggregations(
@ -460,7 +460,7 @@ class SearchHandler:
filtered_events = await search_filter.filter([r["event"] for r in results])
events = await filter_events_for_client(
self.storage, user.to_string(), filtered_events
self._storage_controllers, user.to_string(), filtered_events
)
events.sort(key=lambda e: -rank_map[e.event_id])
@ -559,7 +559,7 @@ class SearchHandler:
filtered_events = await search_filter.filter([r["event"] for r in results])
events = await filter_events_for_client(
self.storage, user.to_string(), filtered_events
self._storage_controllers, user.to_string(), filtered_events
)
room_events.extend(events)
@ -644,11 +644,11 @@ class SearchHandler:
)
events_before = await filter_events_for_client(
self.storage, user.to_string(), res.events_before
self._storage_controllers, user.to_string(), res.events_before
)
events_after = await filter_events_for_client(
self.storage, user.to_string(), res.events_after
self._storage_controllers, user.to_string(), res.events_after
)
context: JsonDict = {
@ -677,7 +677,7 @@ class SearchHandler:
[(EventTypes.Member, sender) for sender in senders]
)
state = await self.state_store.get_state_for_event(
state = await self._state_storage_controller.get_state_for_event(
last_event_id, state_filter
)

View file

@ -40,6 +40,7 @@ class StatsHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.state = hs.get_state_handler()
self.server_name = hs.hostname
self.clock = hs.get_clock()
@ -105,7 +106,10 @@ class StatsHandler:
logger.debug(
"Processing room stats %s->%s", self.pos, room_max_stream_ordering
)
max_pos, deltas = await self.store.get_current_state_deltas(
(
max_pos,
deltas,
) = await self._storage_controllers.state.get_current_state_deltas(
self.pos, room_max_stream_ordering
)

View file

@ -166,16 +166,6 @@ class KnockedSyncResult:
return True
@attr.s(slots=True, frozen=True, auto_attribs=True)
class GroupsSyncResult:
join: JsonDict
invite: JsonDict
leave: JsonDict
def __bool__(self) -> bool:
return bool(self.join or self.invite or self.leave)
@attr.s(slots=True, auto_attribs=True)
class _RoomChanges:
"""The set of room entries to include in the sync, plus the set of joined
@ -206,7 +196,6 @@ class SyncResult:
for this device
device_unused_fallback_key_types: List of key types that have an unused fallback
key
groups: Group updates, if any
"""
next_batch: StreamToken
@ -220,7 +209,6 @@ class SyncResult:
device_lists: DeviceListUpdates
device_one_time_keys_count: JsonDict
device_unused_fallback_key_types: List[str]
groups: Optional[GroupsSyncResult]
def __bool__(self) -> bool:
"""Make the result appear empty if there are no updates. This is used
@ -236,7 +224,6 @@ class SyncResult:
or self.account_data
or self.to_device
or self.device_lists
or self.groups
)
@ -251,8 +238,8 @@ class SyncHandler:
self.clock = hs.get_clock()
self.state = hs.get_state_handler()
self.auth = hs.get_auth()
self.storage = hs.get_storage()
self.state_store = self.storage.state
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
# TODO: flush cache entries on subsequent sync request.
# Once we get the next /sync request (ie, one with the same access token
@ -519,13 +506,15 @@ class SyncHandler:
# ensure that we always include current state in the timeline
current_state_ids: FrozenSet[str] = frozenset()
if any(e.is_state() for e in recents):
current_state_ids_map = await self.store.get_current_state_ids(
room_id
current_state_ids_map = (
await self._state_storage_controller.get_current_state_ids(
room_id
)
)
current_state_ids = frozenset(current_state_ids_map.values())
recents = await filter_events_for_client(
self.storage,
self._storage_controllers,
sync_config.user.to_string(),
recents,
always_include_ids=current_state_ids,
@ -587,13 +576,16 @@ class SyncHandler:
# ensure that we always include current state in the timeline
current_state_ids = frozenset()
if any(e.is_state() for e in loaded_recents):
current_state_ids_map = await self.store.get_current_state_ids(
room_id
# FIXME(faster_joins): We use the partial state here as
# we don't want to block `/sync` on finishing a lazy join.
# Is this the correct way of doing it?
current_state_ids_map = (
await self.store.get_partial_current_state_ids(room_id)
)
current_state_ids = frozenset(current_state_ids_map.values())
loaded_recents = await filter_events_for_client(
self.storage,
self._storage_controllers,
sync_config.user.to_string(),
loaded_recents,
always_include_ids=current_state_ids,
@ -634,21 +626,32 @@ class SyncHandler:
)
async def get_state_after_event(
self, event: EventBase, state_filter: Optional[StateFilter] = None
self, event_id: str, state_filter: Optional[StateFilter] = None
) -> StateMap[str]:
"""
Get the room state after the given event
Args:
event: event of interest
event_id: event of interest
state_filter: The state filter used to fetch state from the database.
"""
state_ids = await self.state_store.get_state_ids_for_event(
event.event_id, state_filter=state_filter or StateFilter.all()
state_ids = await self._state_storage_controller.get_state_ids_for_event(
event_id, state_filter=state_filter or StateFilter.all()
)
if event.is_state():
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
# with redactions: if `event_id` is a redaction event, and we don't have the
# original (possibly because it got purged), get_event will refuse to return
# the redaction event, which isn't terribly helpful here.
#
# (To be fair, in that case we could assume it's *not* a state event, and
# therefore we don't need to worry about it. But still, it seems cleaner just
# to pull the metadata.)
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
if m.state_key is not None and m.rejection_reason is None:
state_ids = dict(state_ids)
state_ids[(event.type, event.state_key)] = event.event_id
state_ids[(m.event_type, m.state_key)] = event_id
return state_ids
async def get_state_at(
@ -667,14 +670,14 @@ class SyncHandler:
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
# of the stream token if there were multiple forward extremities at the time.
last_event = await self.store.get_last_event_in_room_before_stream_ordering(
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
room_id,
end_token=stream_position.room_key,
)
if last_event:
if last_event_id:
state = await self.get_state_after_event(
last_event, state_filter=state_filter or StateFilter.all()
last_event_id, state_filter=state_filter or StateFilter.all()
)
else:
@ -723,7 +726,7 @@ class SyncHandler:
return None
last_event = last_events[-1]
state_ids = await self.state_store.get_state_ids_for_event(
state_ids = await self._state_storage_controller.get_state_ids_for_event(
last_event.event_id,
state_filter=StateFilter.from_types(
[(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
@ -901,12 +904,16 @@ class SyncHandler:
if full_state:
if batch:
current_state_ids = await self.state_store.get_state_ids_for_event(
batch.events[-1].event_id, state_filter=state_filter
current_state_ids = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[-1].event_id, state_filter=state_filter
)
)
state_ids = await self.state_store.get_state_ids_for_event(
batch.events[0].event_id, state_filter=state_filter
state_ids = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id, state_filter=state_filter
)
)
else:
@ -926,7 +933,7 @@ class SyncHandler:
elif batch.limited:
if batch:
state_at_timeline_start = (
await self.state_store.get_state_ids_for_event(
await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id, state_filter=state_filter
)
)
@ -960,8 +967,10 @@ class SyncHandler:
)
if batch:
current_state_ids = await self.state_store.get_state_ids_for_event(
batch.events[-1].event_id, state_filter=state_filter
current_state_ids = (
await self._state_storage_controller.get_state_ids_for_event(
batch.events[-1].event_id, state_filter=state_filter
)
)
else:
# Its not clear how we get here, but empirically we do
@ -991,7 +1000,7 @@ class SyncHandler:
# So we fish out all the member events corresponding to the
# timeline here, and then dedupe any redundant ones below.
state_ids = await self.state_store.get_state_ids_for_event(
state_ids = await self._state_storage_controller.get_state_ids_for_event(
batch.events[0].event_id,
# we only want members!
state_filter=StateFilter.from_types(
@ -1156,10 +1165,6 @@ class SyncHandler:
await self.store.get_e2e_unused_fallback_key_types(user_id, device_id)
)
if self.hs_config.experimental.groups_enabled:
logger.debug("Fetching group data")
await self._generate_sync_entry_for_groups(sync_result_builder)
num_events = 0
# debug for https://github.com/matrix-org/synapse/issues/9424
@ -1183,57 +1188,11 @@ class SyncHandler:
archived=sync_result_builder.archived,
to_device=sync_result_builder.to_device,
device_lists=device_lists,
groups=sync_result_builder.groups,
device_one_time_keys_count=one_time_key_counts,
device_unused_fallback_key_types=unused_fallback_key_types,
next_batch=sync_result_builder.now_token,
)
@measure_func("_generate_sync_entry_for_groups")
async def _generate_sync_entry_for_groups(
self, sync_result_builder: "SyncResultBuilder"
) -> None:
user_id = sync_result_builder.sync_config.user.to_string()
since_token = sync_result_builder.since_token
now_token = sync_result_builder.now_token
if since_token and since_token.groups_key:
results = await self.store.get_groups_changes_for_user(
user_id, since_token.groups_key, now_token.groups_key
)
else:
results = await self.store.get_all_groups_for_user(
user_id, now_token.groups_key
)
invited = {}
joined = {}
left = {}
for result in results:
membership = result["membership"]
group_id = result["group_id"]
gtype = result["type"]
content = result["content"]
if membership == "join":
if gtype == "membership":
# TODO: Add profile
content.pop("membership", None)
joined[group_id] = content["content"]
else:
joined.setdefault(group_id, {})[gtype] = content
elif membership == "invite":
if gtype == "membership":
content.pop("membership", None)
invited[group_id] = content["content"]
else:
if gtype == "membership":
left[group_id] = content["content"]
sync_result_builder.groups = GroupsSyncResult(
join=joined, invite=invited, leave=left
)
@measure_func("_generate_sync_entry_for_device_list")
async def _generate_sync_entry_for_device_list(
self,
@ -2332,7 +2291,6 @@ class SyncResultBuilder:
invited
knocked
archived
groups
to_device
"""
@ -2348,7 +2306,6 @@ class SyncResultBuilder:
invited: List[InvitedSyncResult] = attr.Factory(list)
knocked: List[KnockedSyncResult] = attr.Factory(list)
archived: List[ArchivedSyncResult] = attr.Factory(list)
groups: Optional[GroupsSyncResult] = None
to_device: List[JsonDict] = attr.Factory(list)
def calculate_user_changes(self) -> Tuple[Set[str], Set[str]]:

View file

@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple
import attr
from synapse.api.constants import EduTypes
from synapse.api.errors import AuthError, ShadowBanError, SynapseError
from synapse.appservice import ApplicationService
from synapse.metrics.background_process_metrics import (
@ -58,6 +59,7 @@ class FollowerTypingHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.server_name = hs.config.server.server_name
self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id
@ -68,7 +70,7 @@ class FollowerTypingHandler:
if hs.get_instance_name() not in hs.config.worker.writers.typing:
hs.get_federation_registry().register_instances_for_edu(
"m.typing",
EduTypes.TYPING,
hs.config.worker.writers.typing,
)
@ -130,7 +132,6 @@ class FollowerTypingHandler:
return
try:
users = await self.store.get_users_in_room(member.room_id)
self._member_last_federation_poke[member] = self.clock.time_msec()
now = self.clock.time_msec()
@ -138,12 +139,15 @@ class FollowerTypingHandler:
now=now, obj=member, then=now + FEDERATION_PING_INTERVAL
)
for domain in {get_domain_from_id(u) for u in users}:
hosts = await self._storage_controllers.state.get_current_hosts_in_room(
member.room_id
)
for domain in hosts:
if domain != self.server_name:
logger.debug("sending typing update to %s", domain)
self.federation.build_and_send_edu(
destination=domain,
edu_type="m.typing",
edu_type=EduTypes.TYPING,
content={
"room_id": member.room_id,
"user_id": member.user_id,
@ -218,7 +222,9 @@ class TypingWriterHandler(FollowerTypingHandler):
self.hs = hs
hs.get_federation_registry().register_edu_handler("m.typing", self._recv_edu)
hs.get_federation_registry().register_edu_handler(
EduTypes.TYPING, self._recv_edu
)
hs.get_distributor().observe("user_left_room", self.user_left_room)
@ -458,7 +464,7 @@ class TypingNotificationEventSource(EventSource[int, JsonDict]):
def _make_event_for(self, room_id: str) -> JsonDict:
typing = self.get_typing_handler()._room_typing[room_id]
return {
"type": "m.typing",
"type": EduTypes.TYPING,
"room_id": room_id,
"content": {"user_ids": list(typing)},
}

View file

@ -56,6 +56,7 @@ class UserDirectoryHandler(StateDeltasHandler):
super().__init__(hs)
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
@ -174,7 +175,10 @@ class UserDirectoryHandler(StateDeltasHandler):
logger.debug(
"Processing user stats %s->%s", self.pos, room_max_stream_ordering
)
max_pos, deltas = await self.store.get_current_state_deltas(
(
max_pos,
deltas,
) = await self._storage_controllers.state.get_current_state_deltas(
self.pos, room_max_stream_ordering
)

View file

@ -92,9 +92,6 @@ incoming_responses_counter = Counter(
"synapse_http_matrixfederationclient_responses", "", ["method", "code"]
)
# a federation response can be rather large (eg a big state_ids is 50M or so), so we
# need a generous limit here.
MAX_RESPONSE_SIZE = 100 * 1024 * 1024
MAX_LONG_RETRIES = 10
MAX_SHORT_RETRIES = 3
@ -116,6 +113,11 @@ class ByteParser(ByteWriteable, Generic[T], abc.ABC):
the content type doesn't match we fail the request.
"""
# a federation response can be rather large (eg a big state_ids is 50M or so), so we
# need a generous limit here.
MAX_RESPONSE_SIZE: int = 100 * 1024 * 1024
"""The largest response this parser will accept."""
@abc.abstractmethod
def finish(self) -> T:
"""Called when response has finished streaming and the parser should
@ -203,7 +205,6 @@ async def _handle_response(
response: IResponse,
start_ms: int,
parser: ByteParser[T],
max_response_size: Optional[int] = None,
) -> T:
"""
Reads the body of a response with a timeout and sends it to a parser
@ -215,15 +216,12 @@ async def _handle_response(
response: response to the request
start_ms: Timestamp when request was made
parser: The parser for the response
max_response_size: The maximum size to read from the response, if None
uses the default.
Returns:
The parsed response
"""
if max_response_size is None:
max_response_size = MAX_RESPONSE_SIZE
max_response_size = parser.MAX_RESPONSE_SIZE
finished = False
try:
@ -242,7 +240,7 @@ async def _handle_response(
"{%s} [%s] JSON response exceeded max size %i - %s %s",
request.txn_id,
request.destination,
MAX_RESPONSE_SIZE,
max_response_size,
request.method,
request.uri.decode("ascii"),
)
@ -783,7 +781,6 @@ class MatrixFederationHttpClient:
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
max_response_size: Optional[int] = None,
) -> Union[JsonDict, list]:
...
@ -801,7 +798,6 @@ class MatrixFederationHttpClient:
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser[T]] = None,
max_response_size: Optional[int] = None,
) -> T:
...
@ -818,7 +814,6 @@ class MatrixFederationHttpClient:
backoff_on_404: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser] = None,
max_response_size: Optional[int] = None,
):
"""Sends the specified json data using PUT
@ -854,8 +849,6 @@ class MatrixFederationHttpClient:
enabled.
parser: The parser to use to decode the response. Defaults to
parsing as JSON.
max_response_size: The maximum size to read from the response, if None
uses the default.
Returns:
Succeeds when we get a 2xx HTTP response. The
@ -906,7 +899,6 @@ class MatrixFederationHttpClient:
response,
start_ms,
parser=parser,
max_response_size=max_response_size,
)
return body
@ -995,7 +987,6 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Literal[None] = None,
max_response_size: Optional[int] = None,
) -> Union[JsonDict, list]:
...
@ -1010,7 +1001,6 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = ...,
try_trailing_slash_on_400: bool = ...,
parser: ByteParser[T] = ...,
max_response_size: Optional[int] = ...,
) -> T:
...
@ -1024,7 +1014,6 @@ class MatrixFederationHttpClient:
ignore_backoff: bool = False,
try_trailing_slash_on_400: bool = False,
parser: Optional[ByteParser] = None,
max_response_size: Optional[int] = None,
):
"""GETs some json from the given host homeserver and path
@ -1054,9 +1043,6 @@ class MatrixFederationHttpClient:
parser: The parser to use to decode the response. Defaults to
parsing as JSON.
max_response_size: The maximum size to read from the response. If None,
uses the default.
Returns:
Succeeds when we get a 2xx HTTP response. The
result will be the decoded JSON body.
@ -1101,7 +1087,6 @@ class MatrixFederationHttpClient:
response,
start_ms,
parser=parser,
max_response_size=max_response_size,
)
return body

View file

@ -168,9 +168,24 @@ import inspect
import logging
import re
from functools import wraps
from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Pattern, Type
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Generator,
Iterable,
List,
Optional,
Pattern,
Type,
TypeVar,
Union,
)
import attr
from typing_extensions import ParamSpec
from twisted.internet import defer
from twisted.web.http import Request
@ -256,7 +271,7 @@ try:
def set_process(self, *args, **kwargs):
return self._reporter.set_process(*args, **kwargs)
def report_span(self, span):
def report_span(self, span: "opentracing.Span") -> None:
try:
return self._reporter.report_span(span)
except Exception:
@ -307,15 +322,19 @@ _homeserver_whitelist: Optional[Pattern[str]] = None
Sentinel = object()
def only_if_tracing(func):
P = ParamSpec("P")
R = TypeVar("R")
def only_if_tracing(func: Callable[P, R]) -> Callable[P, Optional[R]]:
"""Executes the function only if we're tracing. Otherwise returns None."""
@wraps(func)
def _only_if_tracing_inner(*args, **kwargs):
def _only_if_tracing_inner(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
if opentracing:
return func(*args, **kwargs)
else:
return
return None
return _only_if_tracing_inner
@ -356,17 +375,10 @@ def ensure_active_span(message, ret=None):
return ensure_active_span_inner_1
@contextlib.contextmanager
def noop_context_manager(*args, **kwargs):
"""Does exactly what it says on the tin"""
# TODO: replace with contextlib.nullcontext once we drop support for Python 3.6
yield
# Setup
def init_tracer(hs: "HomeServer"):
def init_tracer(hs: "HomeServer") -> None:
"""Set the whitelists and initialise the JaegerClient tracer"""
global opentracing
if not hs.config.tracing.opentracer_enabled:
@ -408,11 +420,11 @@ def init_tracer(hs: "HomeServer"):
@only_if_tracing
def set_homeserver_whitelist(homeserver_whitelist):
def set_homeserver_whitelist(homeserver_whitelist: Iterable[str]) -> None:
"""Sets the homeserver whitelist
Args:
homeserver_whitelist (Iterable[str]): regex of whitelisted homeservers
homeserver_whitelist: regexes specifying whitelisted homeservers
"""
global _homeserver_whitelist
if homeserver_whitelist:
@ -423,15 +435,15 @@ def set_homeserver_whitelist(homeserver_whitelist):
@only_if_tracing
def whitelisted_homeserver(destination):
def whitelisted_homeserver(destination: str) -> bool:
"""Checks if a destination matches the whitelist
Args:
destination (str)
destination
"""
if _homeserver_whitelist:
return _homeserver_whitelist.match(destination)
return _homeserver_whitelist.match(destination) is not None
return False
@ -457,11 +469,11 @@ def start_active_span(
Args:
See opentracing.tracer
Returns:
scope (Scope) or noop_context_manager
scope (Scope) or contextlib.nullcontext
"""
if opentracing is None:
return noop_context_manager() # type: ignore[unreachable]
return contextlib.nullcontext() # type: ignore[unreachable]
if tracer is None:
# use the global tracer by default
@ -505,7 +517,7 @@ def start_active_span_follows_from(
tracer: override the opentracing tracer. By default the global tracer is used.
"""
if opentracing is None:
return noop_context_manager() # type: ignore[unreachable]
return contextlib.nullcontext() # type: ignore[unreachable]
references = [opentracing.follows_from(context) for context in contexts]
scope = start_active_span(
@ -525,19 +537,19 @@ def start_active_span_follows_from(
def start_active_span_from_edu(
edu_content,
operation_name,
references: Optional[list] = None,
tags=None,
start_time=None,
ignore_active_span=False,
finish_on_close=True,
):
edu_content: Dict[str, Any],
operation_name: str,
references: Optional[List["opentracing.Reference"]] = None,
tags: Optional[Dict] = None,
start_time: Optional[float] = None,
ignore_active_span: bool = False,
finish_on_close: bool = True,
) -> "opentracing.Scope":
"""
Extracts a span context from an edu and uses it to start a new active span
Args:
edu_content (dict): and edu_content with a `context` field whose value is
edu_content: an edu_content with a `context` field whose value is
canonical json for a dict which contains opentracing information.
For the other args see opentracing.tracer
@ -545,7 +557,7 @@ def start_active_span_from_edu(
references = references or []
if opentracing is None:
return noop_context_manager() # type: ignore[unreachable]
return contextlib.nullcontext() # type: ignore[unreachable]
carrier = json_decoder.decode(edu_content.get("context", "{}")).get(
"opentracing", {}
@ -578,27 +590,27 @@ def start_active_span_from_edu(
# Opentracing setters for tags, logs, etc
@only_if_tracing
def active_span():
def active_span() -> Optional["opentracing.Span"]:
"""Get the currently active span, if any"""
return opentracing.tracer.active_span
@ensure_active_span("set a tag")
def set_tag(key, value):
def set_tag(key: str, value: Union[str, bool, int, float]) -> None:
"""Sets a tag on the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.set_tag(key, value)
@ensure_active_span("log")
def log_kv(key_values, timestamp=None):
def log_kv(key_values: Dict[str, Any], timestamp: Optional[float] = None) -> None:
"""Log to the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.log_kv(key_values, timestamp)
@ensure_active_span("set the traces operation name")
def set_operation_name(operation_name):
def set_operation_name(operation_name: str) -> None:
"""Sets the operation name of the active span"""
assert opentracing.tracer.active_span is not None
opentracing.tracer.active_span.set_operation_name(operation_name)
@ -624,7 +636,9 @@ def force_tracing(span=Sentinel) -> None:
span.set_baggage_item(SynapseBaggage.FORCE_TRACING, "1")
def is_context_forced_tracing(span_context) -> bool:
def is_context_forced_tracing(
span_context: Optional["opentracing.SpanContext"],
) -> bool:
"""Check if sampling has been force for the given span context."""
if span_context is None:
return False
@ -696,13 +710,13 @@ def inject_response_headers(response_headers: Headers) -> None:
@ensure_active_span("get the active span context as a dict", ret={})
def get_active_span_text_map(destination=None):
def get_active_span_text_map(destination: Optional[str] = None) -> Dict[str, str]:
"""
Gets a span context as a dict. This can be used instead of manually
injecting a span into an empty carrier.
Args:
destination (str): the name of the remote server.
destination: the name of the remote server.
Returns:
dict: the active span's context if opentracing is enabled, otherwise empty.
@ -721,7 +735,7 @@ def get_active_span_text_map(destination=None):
@ensure_active_span("get the span context as a string.", ret={})
def active_span_context_as_string():
def active_span_context_as_string() -> str:
"""
Returns:
The active span context encoded as a string.
@ -750,21 +764,21 @@ def span_context_from_request(request: Request) -> "Optional[opentracing.SpanCon
@only_if_tracing
def span_context_from_string(carrier):
def span_context_from_string(carrier: str) -> Optional["opentracing.SpanContext"]:
"""
Returns:
The active span context decoded from a string.
"""
carrier = json_decoder.decode(carrier)
return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier)
payload: Dict[str, str] = json_decoder.decode(carrier)
return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, payload)
@only_if_tracing
def extract_text_map(carrier):
def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanContext"]:
"""
Wrapper method for opentracing's tracer.extract for TEXT_MAP.
Args:
carrier (dict): a dict possibly containing a span context.
carrier: a dict possibly containing a span context.
Returns:
The active span context extracted from carrier.
@ -843,7 +857,7 @@ def trace(func=None, opname=None):
return decorator
def tag_args(func):
def tag_args(func: Callable[P, R]) -> Callable[P, R]:
"""
Tags all of the args to the active span.
"""
@ -852,11 +866,11 @@ def tag_args(func):
return func
@wraps(func)
def _tag_args_inner(*args, **kwargs):
def _tag_args_inner(*args: P.args, **kwargs: P.kwargs) -> R:
argspec = inspect.getfullargspec(func)
for i, arg in enumerate(argspec.args[1:]):
set_tag("ARG_" + arg, args[i])
set_tag("args", args[len(argspec.args) :])
set_tag("ARG_" + arg, args[i]) # type: ignore[index]
set_tag("args", args[len(argspec.args) :]) # type: ignore[index]
set_tag("kwargs", kwargs)
return func(*args, **kwargs)
@ -864,7 +878,9 @@ def tag_args(func):
@contextlib.contextmanager
def trace_servlet(request: "SynapseRequest", extract_context: bool = False):
def trace_servlet(
request: "SynapseRequest", extract_context: bool = False
) -> Generator[None, None, None]:
"""Returns a context manager which traces a request. It starts a span
with some servlet specific tags such as the request metrics name and
request information.

View file

@ -14,6 +14,7 @@
import logging
import threading
from contextlib import nullcontext
from functools import wraps
from types import TracebackType
from typing import (
@ -41,11 +42,7 @@ from synapse.logging.context import (
LoggingContext,
PreserveLoggingContext,
)
from synapse.logging.opentracing import (
SynapseTags,
noop_context_manager,
start_active_span,
)
from synapse.logging.opentracing import SynapseTags, start_active_span
from synapse.metrics._types import Collector
if TYPE_CHECKING:
@ -238,7 +235,7 @@ def run_as_background_process(
f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
)
else:
ctx = noop_context_manager()
ctx = nullcontext()
with ctx:
return await func(*args, **kwargs)
except Exception:

View file

@ -194,6 +194,7 @@ class ModuleApi:
self._store: Union[
DataStore, "GenericWorkerSlavedStore"
] = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._auth = hs.get_auth()
self._auth_handler = auth_handler
self._server_name = hs.hostname
@ -911,7 +912,7 @@ class ModuleApi:
The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._store.get_filtered_current_state_ids(
self._storage_controllers.state.get_current_state_ids(
room_id=room_id, state_filter=StateFilter.from_types(types)
)
)
@ -1147,7 +1148,10 @@ class ModuleApi:
)
async def sleep(self, seconds: float) -> None:
"""Sleeps for the given number of seconds."""
"""Sleeps for the given number of seconds.
Added in Synapse v1.49.0.
"""
await self._clock.sleep(seconds)
@ -1286,20 +1290,16 @@ class ModuleApi:
# regardless of their state key
]
"""
state_filter = None
if event_filter:
# If a filter was provided, turn it into a StateFilter and retrieve a filtered
# view of the state.
state_filter = StateFilter.from_types(event_filter)
state_ids = await self._store.get_filtered_current_state_ids(
room_id,
state_filter,
)
else:
# If no filter was provided, get the whole state. We could also reuse the call
# to get_filtered_current_state_ids above, with `state_filter = StateFilter.all()`,
# but get_filtered_current_state_ids isn't cached and `get_current_state_ids`
# is, so using the latter when we can is better for perf.
state_ids = await self._store.get_current_state_ids(room_id)
state_ids = await self._storage_controllers.state.get_current_state_ids(
room_id,
state_filter,
)
state_events = await self._store.get_events(state_ids.values())
@ -1427,6 +1427,28 @@ class ModuleApi:
user_id, spec, {"actions": actions}
)
async def get_monthly_active_users_by_service(
self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
) -> List[Tuple[str, str]]:
"""Generates list of monthly active users and their services.
Please see corresponding storage docstring for more details.
Added in Synapse v1.61.0.
Arguments:
start_timestamp: If specified, only include users that were first active
at or after this point
end_timestamp: If specified, only include users that were first active
at or before this point
Returns:
A list of tuples (appservice_id, user_id)
"""
return await self._store.get_monthly_active_users_by_service(
start_timestamp, end_timestamp
)
class PublicRoomListManager:
"""Contains methods for adding to, removing from and querying whether a room

View file

@ -33,7 +33,7 @@ from prometheus_client import Counter
from twisted.internet import defer
from synapse.api.constants import EventTypes, HistoryVisibility, Membership
from synapse.api.constants import EduTypes, EventTypes, HistoryVisibility, Membership
from synapse.api.errors import AuthError
from synapse.events import EventBase
from synapse.handlers.presence import format_user_presence_state
@ -221,7 +221,7 @@ class Notifier:
self.room_to_user_streams: Dict[str, Set[_NotifierUserStream]] = {}
self.hs = hs
self.storage = hs.get_storage()
self._storage_controllers = hs.get_storage_controllers()
self.event_sources = hs.get_event_sources()
self.store = hs.get_datastores().main
self.pending_new_room_events: List[_PendingRoomEventEntry] = []
@ -623,7 +623,7 @@ class Notifier:
if name == "room":
new_events = await filter_events_for_client(
self.storage,
self._storage_controllers,
user.to_string(),
new_events,
is_peeking=is_peeking,
@ -632,7 +632,7 @@ class Notifier:
now = self.clock.time_msec()
new_events[:] = [
{
"type": "m.presence",
"type": EduTypes.PRESENCE,
"content": format_user_presence_state(event, now),
}
for event in new_events
@ -681,7 +681,7 @@ class Notifier:
return joined_room_ids, True
async def _is_world_readable(self, room_id: str) -> bool:
state = await self.state_handler.get_current_state(
state = await self._storage_controllers.state.get_current_state_event(
room_id, EventTypes.RoomHistoryVisibility, ""
)
if state and "history_visibility" in state.content:

Some files were not shown because too many files have changed in this diff Show more