Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes

This commit is contained in:
Erik Johnston 2021-07-16 11:25:22 +01:00
commit cc07548d71
251 changed files with 2470 additions and 1712 deletions

95
.github/workflows/release-artifacts.yml vendored Normal file
View file

@ -0,0 +1,95 @@
# GitHub actions workflow which builds the release artifacts.
name: Build release artifacts
on:
# we build on PRs and develop to (hopefully) get early warning
# of things breaking (but only build one set of debs)
pull_request:
push:
branches: ["develop"]
# we do the full build on tags.
tags: ["v*"]
permissions:
contents: write
jobs:
get-distros:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: set-distros
run: |
# if we're running from a tag, get the full list of distros; otherwise just use debian:sid
dists='["debian:sid"]'
if [[ $GITHUB_REF == refs/tags/* ]]; then
dists=$(scripts-dev/build_debian_packages --show-dists-json)
fi
echo "::set-output name=distros::$dists"
# map the step outputs to job outputs
outputs:
distros: ${{ steps.set-distros.outputs.distros }}
# now build the packages with a matrix build.
build-debs:
needs: get-distros
name: "Build .deb packages"
runs-on: ubuntu-latest
strategy:
matrix:
distro: ${{ fromJson(needs.get-distros.outputs.distros) }}
steps:
- uses: actions/checkout@v2
with:
path: src
- uses: actions/setup-python@v2
- run: ./src/scripts-dev/build_debian_packages "${{ matrix.distro }}"
- uses: actions/upload-artifact@v2
with:
name: debs
path: debs/*
build-sdist:
name: "Build pypi distribution files"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- run: pip install wheel
- run: |
python setup.py sdist bdist_wheel
- uses: actions/upload-artifact@v2
with:
name: python-dist
path: dist/*
# if it's a tag, create a release and attach the artifacts to it
attach-assets:
name: "Attach assets to release"
if: ${{ !failure() && !cancelled() && startsWith(github.ref, 'refs/tags/') }}
needs:
- build-debs
- build-sdist
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v2
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
python-dist/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View file

@ -65,14 +65,14 @@ jobs:
# Dummy step to gate other tests on without repeating the whole list
linting-done:
if: ${{ always() }} # Run this even if prior jobs were skipped
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
needs: [lint, lint-crlf, lint-newsfile, lint-sdist]
runs-on: ubuntu-latest
steps:
- run: "true"
trial:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
@ -131,7 +131,7 @@ jobs:
|| true
trial-olddeps:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
@ -156,7 +156,7 @@ jobs:
trial-pypy:
# Very slow; only run if the branch name includes 'pypy'
if: ${{ contains(github.ref, 'pypy') && !failure() }}
if: ${{ contains(github.ref, 'pypy') && !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
strategy:
@ -185,7 +185,7 @@ jobs:
|| true
sytest:
if: ${{ !failure() }}
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
container:
@ -245,7 +245,7 @@ jobs:
/logs/**/*.log*
portdb:
if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail
if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
strategy:
@ -286,7 +286,7 @@ jobs:
- run: .buildkite/scripts/test_synapse_port_db.sh
complement:
if: ${{ !failure() }}
if: ${{ !failure() && !cancelled() }}
needs: linting-done
runs-on: ubuntu-latest
container:

View file

@ -1,8 +1,39 @@
Synapse 1.38.0rc1 (2021-07-06)
==============================
Synapse 1.38.0 (2021-07-13)
===========================
This release includes a database schema update which could result in elevated disk usage. See the [upgrade notes](https://matrix-org.github.io/synapse/develop/upgrade#upgrading-to-v1380) for more information.
No significant changes since 1.38.0rc3.
Synapse 1.38.0rc3 (2021-07-13)
==============================
Internal Changes
----------------
- Build the Debian packages in CI. ([\#10247](https://github.com/matrix-org/synapse/issues/10247), [\#10379](https://github.com/matrix-org/synapse/issues/10379))
Synapse 1.38.0rc2 (2021-07-09)
==============================
Bugfixes
--------
- Fix bug where inbound federation in a room could be delayed due to not correctly dropping a lock. Introduced in v1.37.1. ([\#10336](https://github.com/matrix-org/synapse/issues/10336))
Improved Documentation
----------------------
- Update links to documentation in the sample config. Contributed by @dklimpel. ([\#10287](https://github.com/matrix-org/synapse/issues/10287))
- Fix broken links in [INSTALL.md](INSTALL.md). Contributed by @dklimpel. ([\#10331](https://github.com/matrix-org/synapse/issues/10331))
Synapse 1.38.0rc1 (2021-07-06)
==============================
Features
--------

View file

@ -335,8 +335,8 @@ access the API as a Matrix client would. It is able to run Synapse directly from
the source tree, so installation of the server is not required.
Testing with SyTest is recommended for verifying that changes related to the
Client-Server API are functioning correctly. See the `installation instructions
<https://github.com/matrix-org/sytest#installing>`_ for details.
Client-Server API are functioning correctly. See the `SyTest installation
instructions <https://github.com/matrix-org/sytest#installing>`_ for details.
Platform dependencies

1
changelog.d/10250.bugfix Normal file
View file

@ -0,0 +1 @@
Add base starting insertion event when no chunk ID is specified in the historical batch send API.

1
changelog.d/10276.bugfix Normal file
View file

@ -0,0 +1 @@
Fix historical batch send endpoint (MSC2716) rejecting batches with messages from multiple senders.

View file

@ -1 +0,0 @@
Update links to documentation in sample config. Contributed by @dklimpel.

1
changelog.d/10289.misc Normal file
View file

@ -0,0 +1 @@
Convert `room_depth.min_depth` column to a `BIGINT`.

View file

@ -0,0 +1 @@
The spaces summary API now returns any joinable rooms, not only rooms which are world-readable.

View file

@ -0,0 +1 @@
The spaces summary API now returns any joinable rooms, not only rooms which are world-readable.

1
changelog.d/10313.doc Normal file
View file

@ -0,0 +1 @@
Simplify structure of room admin API.

1
changelog.d/10315.misc Normal file
View file

@ -0,0 +1 @@
Add tests to characterise the current behaviour of R30 phone-home metrics.

1
changelog.d/10316.misc Normal file
View file

@ -0,0 +1 @@
Rebuild event context and auth when processing specific results from `ThirdPartyEventRules` modules.

1
changelog.d/10317.bugfix Normal file
View file

@ -0,0 +1 @@
Fix purging rooms that other homeservers are still sending events for. Contributed by @ilmari.

1
changelog.d/10322.doc Normal file
View file

@ -0,0 +1 @@
Fix a broken link in the admin api docs.

1
changelog.d/10324.misc Normal file
View file

@ -0,0 +1 @@
Minor change to the code that populates `user_daily_visits`.

View file

@ -1 +0,0 @@
Fix broken links in INSTALL.md. Contributed by @dklimpel.

View file

@ -1 +0,0 @@
Fix bug where inbound federation in a room could be delayed due to not correctly dropping a lock. Introduced in v1.37.1.

1
changelog.d/10337.doc Normal file
View file

@ -0,0 +1 @@
Fix formatting in the logcontext documentation.

1
changelog.d/10343.bugfix Normal file
View file

@ -0,0 +1 @@
Fix errors during backfill caused by previously purged redaction events. Contributed by Andreas Rammhold (@andir).

1
changelog.d/10344.bugfix Normal file
View file

@ -0,0 +1 @@
Fix the user directory becoming broken (and noisy errors being logged) when knocking and room statistics are in use.

1
changelog.d/10345.misc Normal file
View file

@ -0,0 +1 @@
Re-enable Sytests that were disabled for the 1.37.1 release.

1
changelog.d/10347.misc Normal file
View file

@ -0,0 +1 @@
Run `pyupgrade` on the codebase.

1
changelog.d/10349.misc Normal file
View file

@ -0,0 +1 @@
Switch `application_services_txns.txn_id` database column to `BIGINT`.

1
changelog.d/10350.misc Normal file
View file

@ -0,0 +1 @@
Convert internal type variable syntax to reflect wider ecosystem use.

1
changelog.d/10353.doc Normal file
View file

@ -0,0 +1 @@
Refresh the logcontext dev documentation.

1
changelog.d/10355.bugfix Normal file
View file

@ -0,0 +1 @@
Fix newly added `synapse_federation_server_oldest_inbound_pdu_in_staging` prometheus metric to measure age rather than timestamp.

1
changelog.d/10357.misc Normal file
View file

@ -0,0 +1 @@
Re-enable Sytests that were disabled for the 1.37.1 release.

1
changelog.d/10359.bugfix Normal file
View file

@ -0,0 +1 @@
Fix PostgreSQL sometimes using table scans for queries against `state_groups_state` table, taking a long time and a large amount of IO.

View file

@ -0,0 +1 @@
Allow providing credentials to `http_proxy`.

1
changelog.d/10367.bugfix Normal file
View file

@ -0,0 +1 @@
Bugfix `make_room_admin` fails for users that have left a private room.

1
changelog.d/10368.doc Normal file
View file

@ -0,0 +1 @@
Add delegation example for caddy in the reverse proxy documentation. Contributed by @moritzdietz.

1
changelog.d/10370.doc Normal file
View file

@ -0,0 +1 @@
Fix some links in `docs` and `contrib`.

1
changelog.d/10380.misc Normal file
View file

@ -0,0 +1 @@
Convert internal type variable syntax to reflect wider ecosystem use.

1
changelog.d/10381.misc Normal file
View file

@ -0,0 +1 @@
Convert internal type variable syntax to reflect wider ecosystem use.

1
changelog.d/10383.misc Normal file
View file

@ -0,0 +1 @@
Make the Github Actions workflow configuration more efficient.

1
changelog.d/10385.misc Normal file
View file

@ -0,0 +1 @@
Add type hints to `get_{domain,localpart}_from_id`.

1
changelog.d/10391.misc Normal file
View file

@ -0,0 +1 @@
When building Debian packages for prerelease versions, set the Section accordingly.

1
changelog.d/10393.misc Normal file
View file

@ -0,0 +1 @@
Add type hints and comments to event auth code.

1
changelog.d/10395.doc Normal file
View file

@ -0,0 +1 @@
Make deprecation notice of the spam checker doc more obvious.

1
changelog.d/10396.doc Normal file
View file

@ -0,0 +1 @@
Add instructructions on installing Debian packages for release candidates.

1
changelog.d/10398.misc Normal file
View file

@ -0,0 +1 @@
Stagger sending of presence update to remote servers, reducing CPU spikes caused by starting many connections to remote servers at once.

1
changelog.d/10399.doc Normal file
View file

@ -0,0 +1 @@
Rewrite the text of links to be clearer in the documentation.

1
changelog.d/10400.bugfix Normal file
View file

@ -0,0 +1 @@
Fix a number of logged errors caused by remote servers being down.

1
changelog.d/9721.removal Normal file
View file

@ -0,0 +1 @@
Remove functionality associated with the unused `room_stats_historical` and `user_stats_historical` tables. Contributed by @xmunoz.

1
changelog.d/9971.doc Normal file
View file

@ -0,0 +1 @@
Updated installation dependencies for newer macOS versions and ARM Macs. Contributed by Luke Walsh.

View file

@ -56,7 +56,7 @@ services:
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=changeme
# ensure the database gets created correctly
# https://github.com/matrix-org/synapse/blob/master/docs/postgres.md#set-up-database
# https://matrix-org.github.io/synapse/latest/postgres.html#set-up-database
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
# You may store the database tables in a local folder..

View file

@ -1,6 +1,6 @@
# Using the Synapse Grafana dashboard
0. Set up Prometheus and Grafana. Out of scope for this readme. Useful documentation about using Grafana with Prometheus: http://docs.grafana.org/features/datasources/prometheus/
1. Have your Prometheus scrape your Synapse. https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md
1. Have your Prometheus scrape your Synapse. https://matrix-org.github.io/synapse/latest/metrics-howto.html
2. Import dashboard into Grafana. Download `synapse.json`. Import it to Grafana and select the correct Prometheus datasource. http://docs.grafana.org/reference/export_import/
3. Set up required recording rules. https://github.com/matrix-org/synapse/tree/master/contrib/prometheus
3. Set up required recording rules. [contrib/prometheus](../prometheus)

View file

@ -34,7 +34,7 @@ Add a new job to the main prometheus.yml file:
```
An example of a Prometheus configuration with workers can be found in
[metrics-howto.md](https://github.com/matrix-org/synapse/blob/master/docs/metrics-howto.md).
[metrics-howto.md](https://matrix-org.github.io/synapse/latest/metrics-howto.html).
To use `synapse.rules` add

View file

@ -3,8 +3,9 @@ Purge history API examples
# `purge_history.sh`
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
purge all messages in a list of rooms up to a certain event. You can select a
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all messages in a list of rooms up to a certain event. You can select a
timeframe or a number of messages that you want to keep in the room.
Just configure the variables DOMAIN, ADMIN, ROOMS_ARRAY and TIME at the top of
@ -12,5 +13,6 @@ the script.
# `purge_remote_media.sh`
A bash file, that uses the [purge history API](/docs/admin_api/purge_history_api.rst) to
purge all old cached remote media.
A bash file, that uses the
[purge history API](https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html)
to purge all old cached remote media.

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
# this script will use the api:
# https://github.com/matrix-org/synapse/blob/master/docs/admin_api/purge_history_api.rst
# https://matrix-org.github.io/synapse/latest/admin_api/purge_history_api.html
#
# It will purge all messages in a list of rooms up to a cetrain event

View file

@ -1,2 +1,3 @@
The documentation for using systemd to manage synapse workers is now part of
the main synapse distribution. See [docs/systemd-with-workers](../../docs/systemd-with-workers).
the main synapse distribution. See
[docs/systemd-with-workers](https://matrix-org.github.io/synapse/latest/systemd-with-workers/index.html).

14
debian/changelog vendored
View file

@ -1,8 +1,18 @@
matrix-synapse-py3 (1.37.1ubuntu1) UNRELEASED; urgency=medium
matrix-synapse-py3 (1.38.0) stable; urgency=medium
* New synapse release 1.38.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 13:20:56 +0100
matrix-synapse-py3 (1.38.0rc3) prerelease; urgency=medium
[ Erik Johnston ]
* Add synapse_review_recent_signups script
-- Erik Johnston <erikj@matrix.org> Thu, 01 Jul 2021 15:55:03 +0100
[ Synapse Packaging team ]
* New synapse release 1.38.0rc3.
-- Synapse Packaging team <packages@matrix.org> Tue, 13 Jul 2021 11:53:56 +0100
matrix-synapse-py3 (1.37.1) stable; urgency=medium

View file

@ -15,6 +15,20 @@ cd /synapse/build
dch -M -l "+$DIST" "build for $DIST"
dch -M -r "" --force-distribution --distribution "$DIST"
# if this is a prerelease, set the Section accordingly.
#
# When the package is later added to the package repo, reprepro will use the
# Section to determine which "component" it should go into (see
# https://manpages.debian.org/stretch/reprepro/reprepro.1.en.html#GUESSING)
DEB_VERSION=`dpkg-parsechangelog -SVersion`
case $DEB_VERSION in
*rc*|*a*|*b*|*c*)
sed -ie '/^Section:/c\Section: prerelease' debian/control
;;
esac
dpkg-buildpackage -us -uc
ls -l ..

View file

@ -132,7 +132,7 @@ your domain, you can simply route all traffic through the reverse proxy by
updating the SRV record appropriately (or removing it, if the proxy listens on
8448).
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
#### Option 3: add a .well-known file to delegate your matrix traffic
@ -303,7 +303,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?

View file

@ -47,7 +47,7 @@ The API returns a JSON body like the following:
## List all media uploaded by a user
Listing all media that has been uploaded by a local user can be achieved through
the use of the [List media of a user](user_admin_api.rst#list-media-of-a-user)
the use of the [List media of a user](user_admin_api.md#list-media-of-a-user)
Admin API.
# Quarantine media
@ -257,7 +257,7 @@ URL Parameters
* `server_name`: string - The name of your local server (e.g `matrix.org`).
* `before_ts`: string representing a positive integer - Unix timestamp in ms.
Files that were last used before this timestamp will be deleted. It is the timestamp of
last access and not the timestamp creation.
last access and not the timestamp creation.
* `size_gt`: Optional - string representing a positive integer - Size of the media in bytes.
Files that are larger will be deleted. Defaults to `0`.
* `keep_profiles`: Optional - string representing a boolean - Switch to also delete files

View file

@ -1,13 +1,9 @@
# Contents
- [List Room API](#list-room-api)
* [Parameters](#parameters)
* [Usage](#usage)
- [Room Details API](#room-details-api)
- [Room Members API](#room-members-api)
- [Room State API](#room-state-api)
- [Delete Room API](#delete-room-api)
* [Parameters](#parameters-1)
* [Response](#response)
* [Undoing room shutdowns](#undoing-room-shutdowns)
- [Make Room Admin API](#make-room-admin-api)
- [Forward Extremities Admin API](#forward-extremities-admin-api)
@ -19,7 +15,7 @@ The List Room admin API allows server admins to get a list of rooms on their
server. There are various parameters available that allow for filtering and
sorting the returned list. This API supports pagination.
## Parameters
**Parameters**
The following query parameters are available:
@ -46,6 +42,8 @@ The following query parameters are available:
* `search_term` - Filter rooms by their room name. Search term can be contained in any
part of the room name. Defaults to no filtering.
**Response**
The following fields are possible in the JSON response body:
* `rooms` - An array of objects, each containing information about a room.
@ -79,17 +77,15 @@ The following fields are possible in the JSON response body:
Use `prev_batch` for the `from` value in the next request to
get the "previous page" of results.
## Usage
The API is:
A standard request with no filtering:
```
GET /_synapse/admin/v1/rooms
{}
```
Response:
A response body like the following is returned:
```jsonc
{
@ -137,11 +133,9 @@ Filtering by room name:
```
GET /_synapse/admin/v1/rooms?search_term=TWIM
{}
```
Response:
A response body like the following is returned:
```json
{
@ -172,11 +166,9 @@ Paginating through a list of rooms:
```
GET /_synapse/admin/v1/rooms?order_by=size
{}
```
Response:
A response body like the following is returned:
```jsonc
{
@ -228,11 +220,9 @@ parameter to the value of `next_token`.
```
GET /_synapse/admin/v1/rooms?order_by=size&from=100
{}
```
Response:
A response body like the following is returned:
```jsonc
{
@ -304,17 +294,13 @@ The following fields are possible in the JSON response body:
* `history_visibility` - Who can see the room history. One of: ["invited", "joined", "shared", "world_readable"].
* `state_events` - Total number of state_events of a room. Complexity of the room.
## Usage
A standard request:
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>
{}
```
Response:
A response body like the following is returned:
```json
{
@ -347,17 +333,13 @@ The response includes the following fields:
* `members` - A list of all the members that are present in the room, represented by their ids.
* `total` - Total number of members in the room.
## Usage
A standard request:
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/members
{}
```
Response:
A response body like the following is returned:
```json
{
@ -378,17 +360,13 @@ The response includes the following fields:
* `state` - The current state of the room at the time of request.
## Usage
A standard request:
The API is:
```
GET /_synapse/admin/v1/rooms/<room_id>/state
{}
```
Response:
A response body like the following is returned:
```json
{
@ -432,6 +410,7 @@ DELETE /_synapse/admin/v1/rooms/<room_id>
```
with a body of:
```json
{
"new_room_user_id": "@someuser:example.com",
@ -461,7 +440,7 @@ A response body like the following is returned:
}
```
## Parameters
**Parameters**
The following parameters should be set in the URL:
@ -491,7 +470,7 @@ The following JSON body parameters are available:
The JSON body must not be empty. The body must be at least `{}`.
## Response
**Response**
The following fields are returned in the JSON response body:
@ -548,10 +527,10 @@ By default the server admin (the caller) is granted power, but another user can
optionally be specified, e.g.:
```
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
{
"user_id": "@foo:example.com"
}
POST /_synapse/admin/v1/rooms/<room_id_or_alias>/make_room_admin
{
"user_id": "@foo:example.com"
}
```
# Forward Extremities Admin API
@ -565,7 +544,7 @@ extremities accumulate in a room, performance can become degraded. For details,
To check the status of forward extremities for a room:
```
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
GET /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned:
@ -581,7 +560,7 @@ A response as follows will be returned:
"received_ts": 1611263016761
}
]
}
}
```
## Deleting forward extremities
@ -594,7 +573,7 @@ If a room has lots of forward extremities, the extra can be
deleted as follows:
```
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
DELETE /_synapse/admin/v1/rooms/<room_id_or_alias>/forward_extremities
```
A response as follows will be returned, indicating the amount of forward extremities

View file

@ -45,4 +45,4 @@ Once the notice has been sent, the API will return the following response:
```
Note that server notices must be enabled in `homeserver.yaml` before this API
can be used. See [server_notices.md](../server_notices.md) for more information.
can be used. See [the server notices documentation](../server_notices.md) for more information.

View file

@ -152,7 +152,7 @@ version of the policy. To do so:
* ensure that the consent resource is configured, as in the previous section
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
* ensure that server notices are configured, as in [the server notice documentation](server_notices.md).
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
example:

View file

@ -74,7 +74,7 @@ We no longer actively recommend against using a reverse proxy. Many admins will
find it easier to direct federation traffic to a reverse proxy and manage their
own TLS certificates, and this is a supported configuration.
See [reverse_proxy.md](reverse_proxy.md) for information on setting up a
See [the reverse proxy documentation](reverse_proxy.md) for information on setting up a
reverse proxy.
### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy?

View file

@ -14,7 +14,7 @@ you set the `server_name` to match your machine's public DNS hostname.
For this default configuration to work, you will need to listen for TLS
connections on port 8448. The preferred way to do that is by using a
reverse proxy: see [reverse_proxy.md](reverse_proxy.md) for instructions
reverse proxy: see [the reverse proxy documentation](reverse_proxy.md) for instructions
on how to correctly set one up.
In some cases you might not want to run Synapse on the machine that has
@ -23,7 +23,7 @@ traffic to use a different port than 8448. For example, you might want to
have your user names look like `@user:example.com`, but you want to run
Synapse on `synapse.example.com` on port 443. This can be done using
delegation, which allows an admin to control where federation traffic should
be sent. See [delegate.md](delegate.md) for instructions on how to set this up.
be sent. See [the delegation documentation](delegate.md) for instructions on how to set this up.
Once federation has been configured, you should be able to join a room over
federation. A good place to start is `#synapse:matrix.org` - a room for
@ -44,8 +44,8 @@ a complicated dance which requires connections in both directions).
Another common problem is that people on other servers can't join rooms that
you invite them to. This can be caused by an incorrectly-configured reverse
proxy: see [reverse_proxy.md](reverse_proxy.md) for instructions on how to correctly
configure a reverse proxy.
proxy: see [the reverse proxy documentation](reverse_proxy.md) for instructions on how
to correctly configure a reverse proxy.
### Known issues

View file

@ -14,12 +14,16 @@ The `synapse.logging.context` module provides a facilities for managing
the current log context (as well as providing the `LoggingContextFilter`
class).
Deferreds make the whole thing complicated, so this document describes
Asynchronous functions make the whole thing complicated, so this document describes
how it all works, and how to write code which follows the rules.
##Logcontexts without Deferreds
In this document, "awaitable" refers to any object which can be `await`ed. In the context of
Synapse, that normally means either a coroutine or a Twisted
[`Deferred`](https://twistedmatrix.com/documents/current/api/twisted.internet.defer.Deferred.html).
In the absence of any Deferred voodoo, things are simple enough. As with
## Logcontexts without asynchronous code
In the absence of any asynchronous voodoo, things are simple enough. As with
any code of this nature, the rule is that our function should leave
things as it found them:
@ -55,126 +59,109 @@ def do_request_handling():
logger.debug("phew")
```
## Using logcontexts with Deferreds
## Using logcontexts with awaitables
Deferreds --- and in particular, `defer.inlineCallbacks` --- break the
linear flow of code so that there is no longer a single entry point
where we should set the logcontext and a single exit point where we
should remove it.
Awaitables break the linear flow of code so that there is no longer a single entry point
where we should set the logcontext and a single exit point where we should remove it.
Consider the example above, where `do_request_handling` needs to do some
blocking operation, and returns a deferred:
blocking operation, and returns an awaitable:
```python
@defer.inlineCallbacks
def handle_request(request_id):
async def handle_request(request_id):
with context.LoggingContext() as request_context:
request_context.request = request_id
yield do_request_handling()
await do_request_handling()
logger.debug("finished")
```
In the above flow:
- The logcontext is set
- `do_request_handling` is called, and returns a deferred
- `handle_request` yields the deferred
- The `inlineCallbacks` wrapper of `handle_request` returns a deferred
- `do_request_handling` is called, and returns an awaitable
- `handle_request` awaits the awaitable
- Execution of `handle_request` is suspended
So we have stopped processing the request (and will probably go on to
start processing the next), without clearing the logcontext.
To circumvent this problem, synapse code assumes that, wherever you have
a deferred, you will want to yield on it. To that end, whereever
functions return a deferred, we adopt the following conventions:
an awaitable, you will want to `await` it. To that end, whereever
functions return awaitables, we adopt the following conventions:
**Rules for functions returning deferreds:**
**Rules for functions returning awaitables:**
> - If the deferred is already complete, the function returns with the
> - If the awaitable is already complete, the function returns with the
> same logcontext it started with.
> - If the deferred is incomplete, the function clears the logcontext
> before returning; when the deferred completes, it restores the
> - If the awaitable is incomplete, the function clears the logcontext
> before returning; when the awaitable completes, it restores the
> logcontext before running any callbacks.
That sounds complicated, but actually it means a lot of code (including
the example above) "just works". There are two cases:
- If `do_request_handling` returns a completed deferred, then the
- If `do_request_handling` returns a completed awaitable, then the
logcontext will still be in place. In this case, execution will
continue immediately after the `yield`; the "finished" line will
continue immediately after the `await`; the "finished" line will
be logged against the right context, and the `with` block restores
the original context before we return to the caller.
- If the returned deferred is incomplete, `do_request_handling` clears
- If the returned awaitable is incomplete, `do_request_handling` clears
the logcontext before returning. The logcontext is therefore clear
when `handle_request` yields the deferred. At that point, the
`inlineCallbacks` wrapper adds a callback to the deferred, and
returns another (incomplete) deferred to the caller, and it is safe
to begin processing the next request.
when `handle_request` `await`s the awaitable.
Once `do_request_handling`'s deferred completes, it will reinstate
the logcontext, before running the callback added by the
`inlineCallbacks` wrapper. That callback runs the second half of
`handle_request`, so again the "finished" line will be logged
against the right context, and the `with` block restores the
original context.
Once `do_request_handling`'s awaitable completes, it will reinstate
the logcontext, before running the second half of `handle_request`,
so again the "finished" line will be logged against the right context,
and the `with` block restores the original context.
As an aside, it's worth noting that `handle_request` follows our rules
-though that only matters if the caller has its own logcontext which it
- though that only matters if the caller has its own logcontext which it
cares about.
The following sections describe pitfalls and helpful patterns when
implementing these rules.
Always yield your deferreds
---------------------------
Always await your awaitables
----------------------------
Whenever you get a deferred back from a function, you should `yield` on
it as soon as possible. (Returning it directly to your caller is ok too,
if you're not doing `inlineCallbacks`.) Do not pass go; do not do any
logging; do not call any other functions.
Whenever you get an awaitable back from a function, you should `await` on
it as soon as possible. Do not pass go; do not do any logging; do not
call any other functions.
```python
@defer.inlineCallbacks
def fun():
async def fun():
logger.debug("starting")
yield do_some_stuff() # just like this
await do_some_stuff() # just like this
d = more_stuff()
result = yield d # also fine, of course
coro = more_stuff()
result = await coro # also fine, of course
return result
def nonInlineCallbacksFun():
logger.debug("just a wrapper really")
return do_some_stuff() # this is ok too - the caller will yield on
# it anyway.
```
Provided this pattern is followed all the way back up to the callchain
to where the logcontext was set, this will make things work out ok:
provided `do_some_stuff` and `more_stuff` follow the rules above, then
so will `fun` (as wrapped by `inlineCallbacks`) and
`nonInlineCallbacksFun`.
so will `fun`.
It's all too easy to forget to `yield`: for instance if we forgot that
`do_some_stuff` returned a deferred, we might plough on regardless. This
It's all too easy to forget to `await`: for instance if we forgot that
`do_some_stuff` returned an awaitable, we might plough on regardless. This
leads to a mess; it will probably work itself out eventually, but not
before a load of stuff has been logged against the wrong context.
(Normally, other things will break, more obviously, if you forget to
`yield`, so this tends not to be a major problem in practice.)
`await`, so this tends not to be a major problem in practice.)
Of course sometimes you need to do something a bit fancier with your
Deferreds - not all code follows the linear A-then-B-then-C pattern.
awaitable - not all code follows the linear A-then-B-then-C pattern.
Notes on implementing more complex patterns are in later sections.
## Where you create a new Deferred, make it follow the rules
## Where you create a new awaitable, make it follow the rules
Most of the time, a Deferred comes from another synapse function.
Sometimes, though, we need to make up a new Deferred, or we get a
Deferred back from external code. We need to make it follow our rules.
Most of the time, an awaitable comes from another synapse function.
Sometimes, though, we need to make up a new awaitable, or we get an awaitable
back from external code. We need to make it follow our rules.
The easy way to do it is with a combination of `defer.inlineCallbacks`,
and `context.PreserveLoggingContext`. Suppose we want to implement
The easy way to do it is by using `context.make_deferred_yieldable`. Suppose we want to implement
`sleep`, which returns a deferred which will run its callbacks after a
given number of seconds. That might look like:
@ -186,25 +173,12 @@ def get_sleep_deferred(seconds):
return d
```
That doesn't follow the rules, but we can fix it by wrapping it with
`PreserveLoggingContext` and `yield` ing on it:
That doesn't follow the rules, but we can fix it by calling it through
`context.make_deferred_yieldable`:
```python
@defer.inlineCallbacks
def sleep(seconds):
with PreserveLoggingContext():
yield get_sleep_deferred(seconds)
```
This technique works equally for external functions which return
deferreds, or deferreds we have made ourselves.
You can also use `context.make_deferred_yieldable`, which just does the
boilerplate for you, so the above could be written:
```python
def sleep(seconds):
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
async def sleep(seconds):
return await context.make_deferred_yieldable(get_sleep_deferred(seconds))
```
## Fire-and-forget
@ -213,20 +187,18 @@ Sometimes you want to fire off a chain of execution, but not wait for
its result. That might look a bit like this:
```python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
async def do_request_handling():
await foreground_operation()
# *don't* do this
background_operation()
logger.debug("Request handling complete")
@defer.inlineCallbacks
def background_operation():
yield first_background_step()
async def background_operation():
await first_background_step()
logger.debug("Completed first step")
yield second_background_step()
await second_background_step()
logger.debug("Completed second step")
```
@ -235,13 +207,13 @@ The above code does a couple of steps in the background after
against the `request_context` logcontext, which may or may not be
desirable. There are two big problems with the above, however. The first
problem is that, if `background_operation` returns an incomplete
Deferred, it will expect its caller to `yield` immediately, so will have
awaitable, it will expect its caller to `await` immediately, so will have
cleared the logcontext. In this example, that means that 'Request
handling complete' will be logged without any context.
The second problem, which is potentially even worse, is that when the
Deferred returned by `background_operation` completes, it will restore
the original logcontext. There is nothing waiting on that Deferred, so
awaitable returned by `background_operation` completes, it will restore
the original logcontext. There is nothing waiting on that awaitable, so
the logcontext will leak into the reactor and possibly get attached to
some arbitrary future operation.
@ -254,9 +226,8 @@ deferred completes will be the empty logcontext), and will restore the
current logcontext before continuing the foreground process:
```python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
async def do_request_handling():
await foreground_operation()
# start background_operation off in the empty logcontext, to
# avoid leaking the current context into the reactor.
@ -274,16 +245,15 @@ Obviously that option means that the operations done in
The second option is to use `context.run_in_background`, which wraps a
function so that it doesn't reset the logcontext even when it returns
an incomplete deferred, and adds a callback to the returned deferred to
an incomplete awaitable, and adds a callback to the returned awaitable to
reset the logcontext. In other words, it turns a function that follows
the Synapse rules about logcontexts and Deferreds into one which behaves
the Synapse rules about logcontexts and awaitables into one which behaves
more like an external function --- the opposite operation to that
described in the previous section. It can be used like this:
```python
@defer.inlineCallbacks
def do_request_handling():
yield foreground_operation()
async def do_request_handling():
await foreground_operation()
context.run_in_background(background_operation)
@ -294,152 +264,53 @@ def do_request_handling():
## Passing synapse deferreds into third-party functions
A typical example of this is where we want to collect together two or
more deferred via `defer.gatherResults`:
more awaitables via `defer.gatherResults`:
```python
d1 = operation1()
d2 = operation2()
d3 = defer.gatherResults([d1, d2])
a1 = operation1()
a2 = operation2()
a3 = defer.gatherResults([a1, a2])
```
This is really a variation of the fire-and-forget problem above, in that
we are firing off `d1` and `d2` without yielding on them. The difference
we are firing off `a1` and `a2` without awaiting on them. The difference
is that we now have third-party code attached to their callbacks. Anyway
either technique given in the [Fire-and-forget](#fire-and-forget)
section will work.
Of course, the new Deferred returned by `gatherResults` needs to be
Of course, the new awaitable returned by `gather` needs to be
wrapped in order to make it follow the logcontext rules before we can
yield it, as described in [Where you create a new Deferred, make it
yield it, as described in [Where you create a new awaitable, make it
follow the
rules](#where-you-create-a-new-deferred-make-it-follow-the-rules).
rules](#where-you-create-a-new-awaitable-make-it-follow-the-rules).
So, option one: reset the logcontext before starting the operations to
be gathered:
```python
@defer.inlineCallbacks
def do_request_handling():
async def do_request_handling():
with PreserveLoggingContext():
d1 = operation1()
d2 = operation2()
result = yield defer.gatherResults([d1, d2])
a1 = operation1()
a2 = operation2()
result = await defer.gatherResults([a1, a2])
```
In this case particularly, though, option two, of using
`context.preserve_fn` almost certainly makes more sense, so that
`context.run_in_background` almost certainly makes more sense, so that
`operation1` and `operation2` are both logged against the original
logcontext. This looks like:
```python
@defer.inlineCallbacks
def do_request_handling():
d1 = context.preserve_fn(operation1)()
d2 = context.preserve_fn(operation2)()
async def do_request_handling():
a1 = context.run_in_background(operation1)
a2 = context.run_in_background(operation2)
with PreserveLoggingContext():
result = yield defer.gatherResults([d1, d2])
result = await make_deferred_yieldable(defer.gatherResults([a1, a2]))
```
## Was all this really necessary?
## A note on garbage-collection of awaitable chains
The conventions used work fine for a linear flow where everything
happens in series via `defer.inlineCallbacks` and `yield`, but are
certainly tricky to follow for any more exotic flows. It's hard not to
wonder if we could have done something else.
We're not going to rewrite Synapse now, so the following is entirely of
academic interest, but I'd like to record some thoughts on an
alternative approach.
I briefly prototyped some code following an alternative set of rules. I
think it would work, but I certainly didn't get as far as thinking how
it would interact with concepts as complicated as the cache descriptors.
My alternative rules were:
- functions always preserve the logcontext of their caller, whether or
not they are returning a Deferred.
- Deferreds returned by synapse functions run their callbacks in the
same context as the function was orignally called in.
The main point of this scheme is that everywhere that sets the
logcontext is responsible for clearing it before returning control to
the reactor.
So, for example, if you were the function which started a
`with LoggingContext` block, you wouldn't `yield` within it --- instead
you'd start off the background process, and then leave the `with` block
to wait for it:
```python
def handle_request(request_id):
with context.LoggingContext() as request_context:
request_context.request = request_id
d = do_request_handling()
def cb(r):
logger.debug("finished")
d.addCallback(cb)
return d
```
(in general, mixing `with LoggingContext` blocks and
`defer.inlineCallbacks` in the same function leads to slighly
counter-intuitive code, under this scheme).
Because we leave the original `with` block as soon as the Deferred is
returned (as opposed to waiting for it to be resolved, as we do today),
the logcontext is cleared before control passes back to the reactor; so
if there is some code within `do_request_handling` which needs to wait
for a Deferred to complete, there is no need for it to worry about
clearing the logcontext before doing so:
```python
def handle_request():
r = do_some_stuff()
r.addCallback(do_some_more_stuff)
return r
```
--- and provided `do_some_stuff` follows the rules of returning a
Deferred which runs its callbacks in the original logcontext, all is
happy.
The business of a Deferred which runs its callbacks in the original
logcontext isn't hard to achieve --- we have it today, in the shape of
`context._PreservingContextDeferred`:
```python
def do_some_stuff():
deferred = do_some_io()
pcd = _PreservingContextDeferred(LoggingContext.current_context())
deferred.chainDeferred(pcd)
return pcd
```
It turns out that, thanks to the way that Deferreds chain together, we
automatically get the property of a context-preserving deferred with
`defer.inlineCallbacks`, provided the final Defered the function
`yields` on has that property. So we can just write:
```python
@defer.inlineCallbacks
def handle_request():
yield do_some_stuff()
yield do_some_more_stuff()
```
To conclude: I think this scheme would have worked equally well, with
less danger of messing it up, and probably made some more esoteric code
easier to write. But again --- changing the conventions of the entire
Synapse codebase is not a sensible option for the marginal improvement
offered.
## A note on garbage-collection of Deferred chains
It turns out that our logcontext rules do not play nicely with Deferred
It turns out that our logcontext rules do not play nicely with awaitable
chains which get orphaned and garbage-collected.
Imagine we have some code that looks like this:
@ -451,13 +322,12 @@ def on_something_interesting():
for d in listener_queue:
d.callback("foo")
@defer.inlineCallbacks
def await_something_interesting():
new_deferred = defer.Deferred()
listener_queue.append(new_deferred)
async def await_something_interesting():
new_awaitable = defer.Deferred()
listener_queue.append(new_awaitable)
with PreserveLoggingContext():
yield new_deferred
await new_awaitable
```
Obviously, the idea here is that we have a bunch of things which are
@ -476,18 +346,19 @@ def reset_listener_queue():
listener_queue.clear()
```
So, both ends of the deferred chain have now dropped their references,
and the deferred chain is now orphaned, and will be garbage-collected at
some point. Note that `await_something_interesting` is a generator
function, and when Python garbage-collects generator functions, it gives
them a chance to clean up by making the `yield` raise a `GeneratorExit`
So, both ends of the awaitable chain have now dropped their references,
and the awaitable chain is now orphaned, and will be garbage-collected at
some point. Note that `await_something_interesting` is a coroutine,
which Python implements as a generator function. When Python
garbage-collects generator functions, it gives them a chance to
clean up by making the `async` (or `yield`) raise a `GeneratorExit`
exception. In our case, that means that the `__exit__` handler of
`PreserveLoggingContext` will carefully restore the request context, but
there is now nothing waiting for its return, so the request context is
never cleared.
To reiterate, this problem only arises when *both* ends of a deferred
chain are dropped. Dropping the the reference to a deferred you're
supposed to be calling is probably bad practice, so this doesn't
To reiterate, this problem only arises when *both* ends of a awaitable
chain are dropped. Dropping the the reference to an awaitable you're
supposed to be awaiting is bad practice, so this doesn't
actually happen too much. Unfortunately, when it does happen, it will
lead to leaked logcontexts which are incredibly hard to track down.

View file

@ -28,7 +28,7 @@ minimal.
### The Replication Protocol
See [tcp_replication.md](tcp_replication.md)
See [the TCP replication documentation](tcp_replication.md).
### The Slaved DataStore

View file

@ -21,7 +21,7 @@ port 8448. Where these are different, we refer to the 'client port' and the
'federation port'. See [the Matrix
specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names)
for more details of the algorithm used for federation connections, and
[delegate.md](delegate.md) for instructions on setting up delegation.
[Delegation](delegate.md) for instructions on setting up delegation.
**NOTE**: Your reverse proxy must not `canonicalise` or `normalise`
the requested URI in any way (for example, by decoding `%xx` escapes).
@ -98,6 +98,33 @@ example.com:8448 {
reverse_proxy http://localhost:8008
}
```
[Delegation](delegate.md) example:
```
(matrix-well-known-header) {
# Headers
header Access-Control-Allow-Origin "*"
header Access-Control-Allow-Methods "GET, POST, PUT, DELETE, OPTIONS"
header Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
header Content-Type "application/json"
}
example.com {
handle /.well-known/matrix/server {
import matrix-well-known-header
respond `{"m.server":"matrix.example.com:443"}`
}
handle /.well-known/matrix/client {
import matrix-well-known-header
respond `{"m.homeserver":{"base_url":"https://matrix.example.com"},"m.identity_server":{"base_url":"https://identity.example.com"}}`
}
}
matrix.example.com {
reverse_proxy /_matrix/* http://localhost:8008
reverse_proxy /_synapse/client/* http://localhost:8008
}
```
### Apache

View file

@ -1,9 +1,9 @@
Room and User Statistics
========================
Synapse maintains room and user statistics (as well as a cache of room state),
in various tables. These can be used for administrative purposes but are also
used when generating the public room directory.
Synapse maintains room and user statistics in various tables. These can be used
for administrative purposes but are also used when generating the public room
directory.
# Synapse Developer Documentation
@ -15,48 +15,8 @@ used when generating the public room directory.
* **subject**: Something we are tracking stats about currently a room or user.
* **current row**: An entry for a subject in the appropriate current statistics
table. Each subject can have only one.
* **historical row**: An entry for a subject in the appropriate historical
statistics table. Each subject can have any number of these.
### Overview
Stats are maintained as time series. There are two kinds of column:
* absolute columns where the value is correct for the time given by `end_ts`
in the stats row. (Imagine a line graph for these values)
* They can also be thought of as 'gauges' in Prometheus, if you are familiar.
* per-slice columns where the value corresponds to how many of the occurrences
occurred within the time slice given by `(end_ts bucket_size)…end_ts`
or `start_ts…end_ts`. (Imagine a histogram for these values)
Stats are maintained in two tables (for each type): current and historical.
Current stats correspond to the present values. Each subject can only have one
entry.
Historical stats correspond to values in the past. Subjects may have multiple
entries.
## Concepts around the management of stats
### Current rows
Current rows contain the most up-to-date statistics for a room.
They only contain absolute columns
### Historical rows
Historical rows can always be considered to be valid for the time slice and
end time specified.
* historical rows will not exist for every time slice they will be omitted
if there were no changes. In this case, the following assumptions can be
made to interpolate/recreate missing rows:
- absolute fields have the same values as in the preceding row
- per-slice fields are zero (`0`)
* historical rows will not be retained forever rows older than a configurable
time will be purged.
#### Purge
The purging of historical rows is not yet implemented.
Stats correspond to the present values. Current rows contain the most up-to-date
statistics for a room. Each subject can only have one entry.

View file

@ -2653,11 +2653,6 @@ stats:
#
#enabled: false
# The size of each timeslice in the room_stats_historical and
# user_stats_historical tables, as a time period. Defaults to "1d".
#
#bucket_size: 1h
# Server Notices room configuration
#

View file

@ -3,8 +3,8 @@
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
channel whereby server administrators can send messages to users on the server.
They are used as part of communication of the server polices(see
[consent_tracking.md](consent_tracking.md)), however the intention is that
They are used as part of communication of the server polices (see
[Consent Tracking](consent_tracking.md)), however the intention is that
they may also find a use for features such as "Message of the day".
This is a feature specific to Synapse, but it uses standard Matrix

View file

@ -166,13 +166,16 @@ sudo dnf groupinstall "Development Tools"
Installing prerequisites on macOS:
You may need to install the latest Xcode developer tools:
```sh
xcode-select --install
sudo easy_install pip
sudo pip install virtualenv
brew install pkg-config libffi
```
On ARM-based Macs you may need to explicitly install libjpeg which is a pillow dependency. You can use Homebrew (https://brew.sh):
```sh
brew install jpeg
```
On macOS Catalina (10.15) you may need to explicitly install OpenSSL
via brew and inform `pip` about it so that `psycopg2` builds:
@ -268,9 +271,8 @@ For more details, see
##### Matrix.org packages
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
Synapse via <https://packages.matrix.org/debian/>. They are available for Debian
9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them:
Matrix.org provides Debian/Ubuntu packages of Synapse via
<https://packages.matrix.org/debian/>. To install the latest release:
```sh
sudo apt install -y lsb-release wget apt-transport-https
@ -281,12 +283,16 @@ sudo apt update
sudo apt install matrix-synapse-py3
```
**Note**: if you followed a previous version of these instructions which
recommended using `apt-key add` to add an old key from
`https://matrix.org/packages/debian/`, you should note that this key has been
revoked. You should remove the old key with `sudo apt-key remove
C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to
update your configuration.
Packages are also published for release candidates. To enable the prerelease
channel, add `prerelease` to the `sources.list` line. For example:
```sh
sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main prerelease" |
sudo tee /etc/apt/sources.list.d/matrix-org.list
sudo apt update
sudo apt install matrix-synapse-py3
```
The fingerprint of the repository signing key (as shown by `gpg
/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is
@ -409,7 +415,7 @@ instead. Advantages include:
- allowing the DB to be run on separate hardware
For information on how to install and use PostgreSQL in Synapse, please see
[docs/postgres.md](../postgres.md)
[Using Postgres](../postgres.md)
SQLite is only acceptable for testing purposes. SQLite should not be used in
a production server. Synapse will perform poorly when using
@ -424,7 +430,7 @@ over HTTPS.
The recommended way to do so is to set up a reverse proxy on port
`8448`. You can find documentation on doing so in
[docs/reverse_proxy.md](../reverse_proxy.md).
[the reverse proxy documentation](../reverse_proxy.md).
Alternatively, you can configure Synapse to expose an HTTPS port. To do
so, you will need to edit `homeserver.yaml`, as follows:
@ -451,7 +457,7 @@ so, you will need to edit `homeserver.yaml`, as follows:
`cert.pem`).
For a more detailed guide to configuring your server for federation, see
[federate.md](../federate.md).
[Federation](../federate.md).
### Client Well-Known URI
@ -563,9 +569,7 @@ on your server even if `enable_registration` is `false`.
### Setting up a TURN server
For reliable VoIP calls to be routed via this homeserver, you MUST configure
a TURN server. See
[docs/turn-howto.md](../turn-howto.md)
for details.
a TURN server. See [TURN setup](../turn-howto.md) for details.
### URL previews

View file

@ -1,6 +1,8 @@
**Note: this page of the Synapse documentation is now deprecated. For up to date
<h2 style="color:red">
This page of the Synapse documentation is now deprecated. For up to date
documentation on setting up or writing a spam checker module, please see
[this page](https://matrix-org.github.io/synapse/develop/modules.html).**
<a href="modules.md">this page</a>.
</h2>
# Handling spam in Synapse

View file

@ -14,10 +14,12 @@ contains an example configuration for the `federation_reader` worker.
## Synapse configuration files
See [workers.md](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly. You can find an example worker
config in the [workers](https://github.com/matrix-org/synapse/tree/develop/docs/systemd-with-workers/workers/)
folder.
See [the worker documentation](../workers.md) for information on how to set up the
configuration files and reverse-proxy correctly.
Below is a sample `federation_reader` worker configuration file.
```yaml
{{#include workers/federation_reader.yaml}}
```
Systemd manages daemonization itself, so ensure that none of the configuration
files set either `daemonize` or `worker_daemonize`.
@ -72,12 +74,12 @@ systemctl restart matrix-synapse.target
**Optional:** If further hardening is desired, the file
`override-hardened.conf` may be copied from
`contrib/systemd/override-hardened.conf` in this repository to the location
[contrib/systemd/override-hardened.conf](https://github.com/matrix-org/synapse/tree/develop/contrib/systemd/)
in this repository to the location
`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the
directory may have to be created). It enables certain sandboxing features in
systemd to further secure the synapse service. You may read the comments to
understand what the override file is doing. The same file will need to be copied
to
understand what the override file is doing. The same file will need to be copied to
`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf`
(this directory may also have to be created) in order to apply the same
hardening options to any worker processes.

View file

@ -11,4 +11,4 @@ a fresh config using Synapse by following the instructions in
```yaml
{{#include ../../sample_log_config.yaml}}
``__`
```

View file

@ -73,7 +73,7 @@ https://hub.docker.com/r/matrixdotorg/synapse/.
To make effective use of the workers, you will need to configure an HTTP
reverse-proxy such as nginx or haproxy, which will direct incoming requests to
the correct worker, or to the main synapse instance. See
[reverse_proxy.md](reverse_proxy.md) for information on setting up a reverse
[the reverse proxy documentation](reverse_proxy.md) for information on setting up a reverse
proxy.
When using workers, each worker process has its own configuration file which
@ -170,8 +170,8 @@ Finally, you need to start your worker processes. This can be done with either
`synctl` or your distribution's preferred service manager such as `systemd`. We
recommend the use of `systemd` where available: for information on setting up
`systemd` to start synapse workers, see
[systemd-with-workers](systemd-with-workers). To use `synctl`, see
[synctl_workers.md](synctl_workers.md).
[Systemd with Workers](systemd-with-workers). To use `synctl`, see
[Using synctl with Workers](synctl_workers.md).
## Available worker applications

View file

@ -83,6 +83,7 @@ files =
synapse/util/stringutils.py,
synapse/visibility.py,
tests/replication,
tests/test_event_auth.py,
tests/test_utils,
tests/handlers/test_password_providers.py,
tests/rest/client/v1/test_login.py,

View file

@ -10,6 +10,7 @@
# can be passed on the commandline for debugging.
import argparse
import json
import os
import signal
import subprocess
@ -34,6 +35,8 @@ By default, builds for all known distributions, but a list of distributions
can be passed on the commandline for debugging.
"""
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class Builder(object):
def __init__(self, redirect_stdout=False):
@ -57,9 +60,6 @@ class Builder(object):
raise
def _inner_build(self, dist, skip_tests=False):
projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.chdir(projdir)
tag = dist.split(":", 1)[1]
# Make the dir where the debs will live.
@ -93,6 +93,7 @@ class Builder(object):
],
stdout=stdout,
stderr=subprocess.STDOUT,
cwd=projdir,
)
container_name = "synapse_build_" + tag
@ -179,6 +180,11 @@ if __name__ == "__main__":
action="store_true",
help="skip running tests after building",
)
parser.add_argument(
"--show-dists-json",
action="store_true",
help="instead of building the packages, just list the dists to build for, as a json array",
)
parser.add_argument(
"dist",
nargs="*",
@ -186,4 +192,7 @@ if __name__ == "__main__":
help="a list of distributions to build for. Default: %(default)s",
)
args = parser.parse_args()
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)
if args.show_dists_json:
print(json.dumps(DISTS))
else:
run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check)

View file

@ -47,7 +47,7 @@ try:
except ImportError:
pass
__version__ = "1.38.0rc1"
__version__ = "1.38.0"
if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
# We import here so that we don't have to install a bunch of deps when

View file

@ -63,9 +63,9 @@ class Auth:
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.token_cache = LruCache(
self.token_cache: LruCache[str, Tuple[str, bool]] = LruCache(
10000, "token_cache"
) # type: LruCache[str, Tuple[str, bool]]
)
self._auth_blocking = AuthBlocking(self.hs)
@ -240,6 +240,37 @@ class Auth:
except KeyError:
raise MissingClientTokenError()
async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str
):
"""Validates that the app service is allowed to control
the given user.
Args:
app_service: The app service that controls the user
user_id: The author MXID that the app service is controlling
Raises:
AuthError: If the application service is not allowed to control the user
(user namespace regex does not match, wrong homeserver, etc)
or if the user has not been registered yet.
"""
# It's ok if the app service is trying to use the sender from their registration
if app_service.sender == user_id:
pass
# Check to make sure the app service is allowed to control the user
elif not app_service.is_interested_in_user(user_id):
raise AuthError(
403,
"Application service cannot masquerade as this user (%s)." % user_id,
)
# Check to make sure the user is already registered on the homeserver
elif not (await self.store.get_user_by_id(user_id)):
raise AuthError(
403, "Application service has not registered this user (%s)" % user_id
)
async def _get_appservice_user_id(
self, request: Request
) -> Tuple[Optional[str], Optional[ApplicationService]]:
@ -261,13 +292,11 @@ class Auth:
return app_service.sender, app_service
user_id = request.args[b"user_id"][0].decode("utf8")
await self.validate_appservice_can_control_user_id(app_service, user_id)
if app_service.sender == user_id:
return app_service.sender, app_service
if not app_service.is_interested_in_user(user_id):
raise AuthError(403, "Application service cannot masquerade as this user.")
if not (await self.store.get_user_by_id(user_id)):
raise AuthError(403, "Application service has not registered this user")
return user_id, app_service
async def get_user_by_access_token(

View file

@ -118,7 +118,7 @@ class RedirectException(CodeMessageException):
super().__init__(code=http_code, msg=msg)
self.location = location
self.cookies = [] # type: List[bytes]
self.cookies: List[bytes] = []
class SynapseError(CodeMessageException):
@ -160,7 +160,7 @@ class ProxiedRequestError(SynapseError):
):
super().__init__(code, msg, errcode)
if additional_fields is None:
self._additional_fields = {} # type: Dict
self._additional_fields: Dict = {}
else:
self._additional_fields = dict(additional_fields)

View file

@ -289,7 +289,7 @@ class Filter:
room_id = None
ev_type = "m.presence"
contains_url = False
labels = [] # type: List[str]
labels: List[str] = []
else:
sender = event.get("sender", None)
if not sender:

View file

@ -46,9 +46,7 @@ class Ratelimiter:
# * How many times an action has occurred since a point in time
# * The point in time
# * The rate_hz of this particular entry. This can vary per request
self.actions = (
OrderedDict()
) # type: OrderedDict[Hashable, Tuple[float, int, float]]
self.actions: OrderedDict[Hashable, Tuple[float, int, float]] = OrderedDict()
async def can_do_action(
self,

View file

@ -195,7 +195,7 @@ class RoomVersions:
)
KNOWN_ROOM_VERSIONS = {
KNOWN_ROOM_VERSIONS: Dict[str, RoomVersion] = {
v.identifier: v
for v in (
RoomVersions.V1,
@ -209,4 +209,4 @@ KNOWN_ROOM_VERSIONS = {
RoomVersions.V7,
)
# Note that we do not include MSC2043 here unless it is enabled in the config.
} # type: Dict[str, RoomVersion]
}

View file

@ -270,7 +270,7 @@ class GenericWorkerServer(HomeServer):
site_tag = port
# We always include a health resource.
resources = {"/health": HealthResource()} # type: Dict[str, IResource]
resources: Dict[str, IResource] = {"/health": HealthResource()}
for res in listener_config.http_options.resources:
for name in res.names:

View file

@ -88,9 +88,9 @@ class ApplicationServiceApi(SimpleHttpClient):
super().__init__(hs)
self.clock = hs.get_clock()
self.protocol_meta_cache = ResponseCache(
self.protocol_meta_cache: ResponseCache[Tuple[str, str]] = ResponseCache(
hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS
) # type: ResponseCache[Tuple[str, str]]
)
async def query_user(self, service, user_id):
if service.url is None:

View file

@ -57,8 +57,8 @@ def load_appservices(hostname, config_files):
return []
# Dicts of value -> filename
seen_as_tokens = {} # type: Dict[str, str]
seen_ids = {} # type: Dict[str, str]
seen_as_tokens: Dict[str, str] = {}
seen_ids: Dict[str, str] = {}
appservices = []

View file

@ -25,7 +25,7 @@ from ._base import Config, ConfigError
_CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
# Map from canonicalised cache name to cache.
_CACHES = {} # type: Dict[str, Callable[[float], None]]
_CACHES: Dict[str, Callable[[float], None]] = {}
# a lock on the contents of _CACHES
_CACHES_LOCK = threading.Lock()
@ -157,7 +157,7 @@ class CacheConfig(Config):
self.event_cache_size = self.parse_size(
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
)
self.cache_factors = {} # type: Dict[str, float]
self.cache_factors: Dict[str, float] = {}
cache_config = config.get("caches") or {}
self.global_factor = cache_config.get(

View file

@ -134,9 +134,9 @@ class EmailConfig(Config):
# trusted_third_party_id_servers does not contain a scheme whereas
# account_threepid_delegate_email is expected to. Presume https
self.account_threepid_delegate_email = (
self.account_threepid_delegate_email: Optional[str] = (
"https://" + first_trusted_identity_server
) # type: Optional[str]
)
self.using_identity_server_from_trusted_list = True
else:
raise ConfigError(

View file

@ -25,10 +25,10 @@ class ExperimentalConfig(Config):
experimental = config.get("experimental_features") or {}
# MSC2858 (multiple SSO identity providers)
self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool
self.msc2858_enabled: bool = experimental.get("msc2858_enabled", False)
# MSC3026 (busy presence state)
self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool
self.msc3026_enabled: bool = experimental.get("msc3026_enabled", False)
# MSC2716 (backfill existing history)
self.msc2716_enabled = experimental.get("msc2716_enabled", False) # type: bool
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)

View file

@ -22,7 +22,7 @@ class FederationConfig(Config):
def read_config(self, config, **kwargs):
# FIXME: federation_domain_whitelist needs sytests
self.federation_domain_whitelist = None # type: Optional[dict]
self.federation_domain_whitelist: Optional[dict] = None
federation_domain_whitelist = config.get("federation_domain_whitelist", None)
if federation_domain_whitelist is not None:

View file

@ -460,7 +460,7 @@ def _parse_oidc_config_dict(
) from e
client_secret_jwt_key_config = oidc_config.get("client_secret_jwt_key")
client_secret_jwt_key = None # type: Optional[OidcProviderClientSecretJwtKey]
client_secret_jwt_key: Optional[OidcProviderClientSecretJwtKey] = None
if client_secret_jwt_key_config is not None:
keyfile = client_secret_jwt_key_config.get("key_file")
if keyfile:

View file

@ -25,7 +25,7 @@ class PasswordAuthProviderConfig(Config):
section = "authproviders"
def read_config(self, config, **kwargs):
self.password_providers = [] # type: List[Any]
self.password_providers: List[Any] = []
providers = []
# We want to be backwards compatible with the old `ldap_config`

View file

@ -62,7 +62,7 @@ def parse_thumbnail_requirements(thumbnail_sizes):
Dictionary mapping from media type string to list of
ThumbnailRequirement tuples.
"""
requirements = {} # type: Dict[str, List]
requirements: Dict[str, List] = {}
for size in thumbnail_sizes:
width = size["width"]
height = size["height"]
@ -141,7 +141,7 @@ class ContentRepositoryConfig(Config):
#
# We don't create the storage providers here as not all workers need
# them to be started.
self.media_storage_providers = [] # type: List[tuple]
self.media_storage_providers: List[tuple] = []
for i, provider_config in enumerate(storage_providers):
# We special case the module "file_system" so as not to need to

View file

@ -505,7 +505,7 @@ class ServerConfig(Config):
" greater than 'allowed_lifetime_max'"
)
self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]]
self.retention_purge_jobs: List[Dict[str, Optional[int]]] = []
for purge_job_config in retention_config.get("purge_jobs", []):
interval_config = purge_job_config.get("interval")
@ -688,23 +688,21 @@ class ServerConfig(Config):
# not included in the sample configuration file on purpose as it's a temporary
# hack, so that some users can trial the new defaults without impacting every
# user on the homeserver.
users_new_default_push_rules = (
users_new_default_push_rules: list = (
config.get("users_new_default_push_rules") or []
) # type: list
)
if not isinstance(users_new_default_push_rules, list):
raise ConfigError("'users_new_default_push_rules' must be a list")
# Turn the list into a set to improve lookup speed.
self.users_new_default_push_rules = set(
users_new_default_push_rules
) # type: set
self.users_new_default_push_rules: set = set(users_new_default_push_rules)
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist = config.get(
next_link_domain_whitelist: Optional[List[str]] = config.get(
"next_link_domain_whitelist"
) # type: Optional[List[str]]
)
self.next_link_domain_whitelist = None # type: Optional[Set[str]]
self.next_link_domain_whitelist: Optional[Set[str]] = None
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")

View file

@ -34,7 +34,7 @@ class SpamCheckerConfig(Config):
section = "spamchecker"
def read_config(self, config, **kwargs):
self.spam_checkers = [] # type: List[Tuple[Any, Dict]]
self.spam_checkers: List[Tuple[Any, Dict]] = []
spam_checkers = config.get("spam_checker") or []
if isinstance(spam_checkers, dict):

View file

@ -39,7 +39,7 @@ class SSOConfig(Config):
section = "sso"
def read_config(self, config, **kwargs):
sso_config = config.get("sso") or {} # type: Dict[str, Any]
sso_config: Dict[str, Any] = config.get("sso") or {}
# The sso-specific template_dir
self.sso_template_dir = sso_config.get("template_dir")

View file

@ -38,13 +38,9 @@ class StatsConfig(Config):
def read_config(self, config, **kwargs):
self.stats_enabled = True
self.stats_bucket_size = 86400 * 1000
stats_config = config.get("stats", None)
if stats_config:
self.stats_enabled = stats_config.get("enabled", self.stats_enabled)
self.stats_bucket_size = self.parse_duration(
stats_config.get("bucket_size", "1d")
)
if not self.stats_enabled:
logger.warning(ROOM_STATS_DISABLED_WARN)
@ -59,9 +55,4 @@ class StatsConfig(Config):
# correctly.
#
#enabled: false
# The size of each timeslice in the room_stats_historical and
# user_stats_historical tables, as a time period. Defaults to "1d".
#
#bucket_size: 1h
"""

View file

@ -80,7 +80,7 @@ class TlsConfig(Config):
fed_whitelist_entries = []
# Support globs (*) in whitelist values
self.federation_certificate_verification_whitelist = [] # type: List[Pattern]
self.federation_certificate_verification_whitelist: List[Pattern] = []
for entry in fed_whitelist_entries:
try:
entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii"))
@ -132,8 +132,8 @@ class TlsConfig(Config):
"use_insecure_ssl_client_just_for_testing_do_not_use"
)
self.tls_certificate = None # type: Optional[crypto.X509]
self.tls_private_key = None # type: Optional[crypto.PKey]
self.tls_certificate: Optional[crypto.X509] = None
self.tls_private_key: Optional[crypto.PKey] = None
def is_disk_cert_valid(self, allow_self_signed=True):
"""

View file

@ -170,11 +170,13 @@ class Keyring:
)
self._key_fetchers = key_fetchers
self._server_queue = BatchingQueue(
self._server_queue: BatchingQueue[
_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]
] = BatchingQueue(
"keyring_server",
clock=hs.get_clock(),
process_batch_callback=self._inner_fetch_key_requests,
) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]]
)
async def verify_json_for_server(
self,
@ -330,7 +332,7 @@ class Keyring:
# First we need to deduplicate requests for the same key. We do this by
# taking the *maximum* requested `minimum_valid_until_ts` for each pair
# of server name/key ID.
server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]]
server_to_key_to_ts: Dict[str, Dict[str, int]] = {}
for request in requests:
by_server = server_to_key_to_ts.setdefault(request.server_name, {})
for key_id in request.key_ids:
@ -355,7 +357,7 @@ class Keyring:
# We now convert the returned list of results into a map from server
# name to key ID to FetchKeyResult, to return.
to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]]
to_return: Dict[str, Dict[str, FetchKeyResult]] = {}
for (request, results) in zip(deduped_requests, results_per_request):
to_return_by_server = to_return.setdefault(request.server_name, {})
for key_id, key_result in results.items():
@ -455,7 +457,7 @@ class StoreKeyFetcher(KeyFetcher):
)
res = await self.store.get_server_verify_keys(key_ids_to_fetch)
keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for (server_name, key_id), key in res.items():
keys.setdefault(server_name, {})[key_id] = key
return keys
@ -603,7 +605,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
).addErrback(unwrapFirstError)
)
union_of_keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
union_of_keys: Dict[str, Dict[str, FetchKeyResult]] = {}
for result in results:
for server_name, keys in result.items():
union_of_keys.setdefault(server_name, {}).update(keys)
@ -656,8 +658,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
except HttpResponseException as e:
raise KeyLookupError("Remote server returned an error: %s" % (e,))
keys = {} # type: Dict[str, Dict[str, FetchKeyResult]]
added_keys = [] # type: List[Tuple[str, str, FetchKeyResult]]
keys: Dict[str, Dict[str, FetchKeyResult]] = {}
added_keys: List[Tuple[str, str, FetchKeyResult]] = []
time_now_ms = self.clock.time_msec()
@ -805,7 +807,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher):
Raises:
KeyLookupError if there was a problem making the lookup
"""
keys = {} # type: Dict[str, FetchKeyResult]
keys: Dict[str, FetchKeyResult] = {}
for requested_key_id in key_ids:
# we may have found this key as a side-effect of asking for another.

View file

@ -48,6 +48,9 @@ def check(
room_version_obj: the version of the room
event: the event being checked.
auth_events: the existing room state.
do_sig_check: True if it should be verified that the sending server
signed the event.
do_size_check: True if the size of the event fields should be verified.
Raises:
AuthError if the checks fail
@ -528,7 +531,7 @@ def _check_power_levels(
user_level = get_user_power_level(event.user_id, auth_events)
# Check other levels:
levels_to_check = [
levels_to_check: List[Tuple[str, Optional[str]]] = [
("users_default", None),
("events_default", None),
("state_default", None),
@ -536,7 +539,7 @@ def _check_power_levels(
("redact", None),
("kick", None),
("invite", None),
] # type: List[Tuple[str, Optional[str]]]
]
old_list = current_state.content.get("users", {})
for user in set(list(old_list) + list(user_list)):
@ -566,12 +569,12 @@ def _check_power_levels(
new_loc = new_loc.get(dir, {})
if level_to_check in old_loc:
old_level = int(old_loc[level_to_check]) # type: Optional[int]
old_level: Optional[int] = int(old_loc[level_to_check])
else:
old_level = None
if level_to_check in new_loc:
new_level = int(new_loc[level_to_check]) # type: Optional[int]
new_level: Optional[int] = int(new_loc[level_to_check])
else:
new_level = None

View file

@ -105,28 +105,28 @@ class _EventInternalMetadata:
self._dict = dict(internal_metadata_dict)
# the stream ordering of this event. None, until it has been persisted.
self.stream_ordering = None # type: Optional[int]
self.stream_ordering: Optional[int] = None
# whether this event is an outlier (ie, whether we have the state at that point
# in the DAG)
self.outlier = False
out_of_band_membership = DictProperty("out_of_band_membership") # type: bool
send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str
recheck_redaction = DictProperty("recheck_redaction") # type: bool
soft_failed = DictProperty("soft_failed") # type: bool
proactively_send = DictProperty("proactively_send") # type: bool
redacted = DictProperty("redacted") # type: bool
txn_id = DictProperty("txn_id") # type: str
token_id = DictProperty("token_id") # type: int
historical = DictProperty("historical") # type: bool
out_of_band_membership: bool = DictProperty("out_of_band_membership")
send_on_behalf_of: str = DictProperty("send_on_behalf_of")
recheck_redaction: bool = DictProperty("recheck_redaction")
soft_failed: bool = DictProperty("soft_failed")
proactively_send: bool = DictProperty("proactively_send")
redacted: bool = DictProperty("redacted")
txn_id: str = DictProperty("txn_id")
token_id: int = DictProperty("token_id")
historical: bool = DictProperty("historical")
# XXX: These are set by StreamWorkerStore._set_before_and_after.
# I'm pretty sure that these are never persisted to the database, so shouldn't
# be here
before = DictProperty("before") # type: RoomStreamToken
after = DictProperty("after") # type: RoomStreamToken
order = DictProperty("order") # type: Tuple[int, int]
before: RoomStreamToken = DictProperty("before")
after: RoomStreamToken = DictProperty("after")
order: Tuple[int, int] = DictProperty("order")
def get_dict(self) -> JsonDict:
return dict(self._dict)

View file

@ -132,12 +132,12 @@ class EventBuilder:
format_version = self.room_version.event_format
if format_version == EventFormatVersions.V1:
# The types of auth/prev events changes between event versions.
auth_events = await self._store.add_event_hashes(
auth_event_ids
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
prev_events = await self._store.add_event_hashes(
prev_event_ids
) # type: Union[List[str], List[Tuple[str, Dict[str, str]]]]
auth_events: Union[
List[str], List[Tuple[str, Dict[str, str]]]
] = await self._store.add_event_hashes(auth_event_ids)
prev_events: Union[
List[str], List[Tuple[str, Dict[str, str]]]
] = await self._store.add_event_hashes(prev_event_ids)
else:
auth_events = auth_event_ids
prev_events = prev_event_ids
@ -156,7 +156,7 @@ class EventBuilder:
# the db)
depth = min(depth, MAX_DEPTH)
event_dict = {
event_dict: Dict[str, Any] = {
"auth_events": auth_events,
"prev_events": prev_events,
"type": self.type,
@ -166,7 +166,7 @@ class EventBuilder:
"unsigned": self.unsigned,
"depth": depth,
"prev_state": [],
} # type: Dict[str, Any]
}
if self.is_state():
event_dict["state_key"] = self._state_key

View file

@ -76,7 +76,7 @@ def load_legacy_spam_checkers(hs: "synapse.server.HomeServer"):
"""Wrapper that loads spam checkers configured using the old configuration, and
registers the spam checker hooks they implement.
"""
spam_checkers = [] # type: List[Any]
spam_checkers: List[Any] = []
api = hs.get_module_api()
for module, config in hs.config.spam_checkers:
# Older spam checkers don't accept the `api` argument, so we
@ -239,7 +239,7 @@ class SpamChecker:
will be used as the error message returned to the user.
"""
for callback in self._check_event_for_spam_callbacks:
res = await callback(event) # type: Union[bool, str]
res: Union[bool, str] = await callback(event)
if res:
return res

Some files were not shown because too many files have changed in this diff Show more