0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-11-05 22:28:54 +01:00

Merge remote-tracking branch 'upstream/release-v1.69'

This commit is contained in:
Tulir Asokan 2022-10-04 15:27:39 +03:00
commit 4b94513ae4
191 changed files with 10356 additions and 2903 deletions

17
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,17 @@
version: 2
updates:
- # "pip" is the correct setting for poetry, per https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View file

@ -0,0 +1,46 @@
name: Write changelog for dependabot PR
on:
pull_request:
types:
- opened
- reopened # For debugging!
permissions:
# Needed to be able to push the commit. See
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request
# for a similar example
contents: write
jobs:
add-changelog:
runs-on: 'ubuntu-latest'
if: ${{ github.actor == 'dependabot[bot]' }}
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.ref }}
- name: Write, commit and push changelog
run: |
echo "${{ github.event.pull_request.title }}." > "changelog.d/${{ github.event.pull_request.number }}".misc
git add changelog.d
git config user.email "github-actions[bot]@users.noreply.github.com"
git config user.name "GitHub Actions"
git commit -m "Changelog"
git push
shell: bash
# The `git push` above does not trigger CI on the dependabot PR.
#
# By default, workflows can't trigger other workflows when they're just using the
# default `GITHUB_TOKEN` access token. (This is intended to stop you from writing
# recursive workflow loops by accident, because that'll get very expensive very
# quickly.) Instead, you have to manually call out to another workflow, or else
# make your changes (i.e. the `git push` above) using a personal access token.
# See
# https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#triggering-a-workflow-from-a-workflow
#
# I have tried and failed to find a way to trigger CI on the "merge ref" of the PR.
# See git commit history for previous attempts. If anyone desperately wants to try
# again in the future, make a matrix-bot account and use its access token to git push.
# THIS WORKFLOW HAS WRITE PERMISSIONS---do not add other jobs here unless they
# are sufficiently locked down to dependabot only as above.

View file

@ -17,19 +17,19 @@ jobs:
steps:
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v1
uses: docker/setup-qemu-action@v2
with:
platforms: arm64
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
- name: Inspect builder
run: docker buildx inspect
- name: Log in to DockerHub
uses: docker/login-action@v1
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@ -48,7 +48,7 @@ jobs:
type=pep440,pattern={{raw}}
- name: Build and push all platforms
uses: docker/build-push-action@v2
uses: docker/build-push-action@v3
with:
push: true
labels: "gitsha1=${{ github.sha }}"

View file

@ -17,7 +17,7 @@ jobs:
name: GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Setup mdbook
uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14

View file

@ -25,7 +25,7 @@ jobs:
mypy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
@ -59,7 +59,7 @@ jobs:
postgres-version: "14"
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -133,7 +133,7 @@ jobs:
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -155,7 +155,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@ -182,8 +182,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
@ -201,15 +201,16 @@ jobs:
open-issue:
if: "failure() && github.event_name != 'push' && github.event_name != 'pull_request'"
needs:
# TODO: should mypy be included here? It feels more brittle than the other two.
# TODO: should mypy be included here? It feels more brittle than the others.
- mypy
- trial
- sytest
- complement
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -11,6 +11,7 @@ on:
# we do the full build on tags.
tags: ["v*"]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@ -24,7 +25,7 @@ jobs:
name: "Calculate list of debian distros"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
- id: set-distros
run: |
@ -49,18 +50,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
path: src
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Set up docker layer caching
uses: actions/cache@v2
uses: actions/cache@v3
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
@ -84,7 +85,7 @@ jobs:
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Upload debs as artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: debs
path: debs/*
@ -145,7 +146,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: Sdist
path: dist/*.tar.gz
@ -162,7 +163,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
- name: Attach to release

View file

@ -4,6 +4,7 @@ on:
push:
branches: ["develop", "release-*"]
pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@ -30,7 +31,7 @@ jobs:
check-sampleconfig:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
- uses: matrix-org/setup-python-poetry@v1
with:
@ -41,7 +42,7 @@ jobs:
check-schema-delta:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
- run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'"
- run: scripts-dev/check_schema_delta.py --force-colors
@ -54,15 +55,15 @@ jobs:
lint-crlf:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Check line endings
run: scripts-dev/check_line_terminators.sh
lint-newsfile:
if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }}
if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0
@ -75,7 +76,7 @@ jobs:
lint-pydantic:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.event.pull_request.head.sha }}
- uses: matrix-org/setup-python-poetry@v1
@ -89,7 +90,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -107,7 +108,7 @@ jobs:
if: ${{ needs.changes.outputs.rust == 'true' }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -140,7 +141,7 @@ jobs:
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: actions/setup-python@v2
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
@ -157,7 +158,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
@ -199,7 +200,7 @@ jobs:
needs: linting-done
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -270,7 +271,7 @@ jobs:
extras: ["all"]
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
# Install libs necessary for PyPy to build binary wheels for dependencies
- run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev
- uses: matrix-org/setup-python-poetry@v1
@ -313,7 +314,7 @@ jobs:
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Prepare test blacklist
run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers
@ -331,7 +332,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
@ -361,7 +362,7 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@ -402,7 +403,7 @@ jobs:
--health-retries 5
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1 postgresql-client
- uses: matrix-org/setup-python-poetry@v1
with:
@ -444,8 +445,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
@ -473,7 +474,7 @@ jobs:
- changes
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1

View file

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- run: sudo apt-get -qq install xmlsec1
- name: Install Rust
@ -81,7 +81,7 @@ jobs:
- ${{ github.workspace }}:/src
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
@ -112,7 +112,7 @@ jobs:
if: ${{ always() }}
run: /sytest/scripts/tap_to_gha.pl /logs/results.tap
- name: Upload SyTest logs
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
@ -138,8 +138,8 @@ jobs:
database: Postgres
steps:
- name: Run actions/checkout@v2 for synapse
uses: actions/checkout@v2
- name: Run actions/checkout@v3 for synapse
uses: actions/checkout@v3
with:
path: synapse
@ -177,7 +177,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- uses: JasonEtco/create-an-issue@5d9504915f79f9cc6d791934b8ef34f2353dd74d # v2.5.0, 2020-12-06
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

1
.rustfmt.toml Normal file
View file

@ -0,0 +1 @@
group_imports = "StdExternalCrate"

View file

@ -1,3 +1,108 @@
Synapse 1.69.0rc1 (2022-10-04)
==============================
Please note that legacy Prometheus metric names are now deprecated and will be removed in Synapse 1.73.0.
Server administrators should update their dashboards and alerting rules to avoid using the deprecated metric names.
See the [upgrade notes](https://matrix-org.github.io/synapse/v1.69/upgrade.html#upgrading-to-v1690) for more details.
Features
--------
- Allow application services to set the `origin_server_ts` of a state event by providing the query parameter `ts` in [`PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}`](https://spec.matrix.org/v1.4/client-server-api/#put_matrixclientv3roomsroomidstateeventtypestatekey), per [MSC3316](https://github.com/matrix-org/matrix-doc/pull/3316). Contributed by @lukasdenk. ([\#11866](https://github.com/matrix-org/synapse/issues/11866))
- Allow server admins to require a manual approval process before new accounts can be used (using [MSC3866](https://github.com/matrix-org/matrix-spec-proposals/pull/3866)). ([\#13556](https://github.com/matrix-org/synapse/issues/13556))
- Exponentially backoff from backfilling the same event over and over. ([\#13635](https://github.com/matrix-org/synapse/issues/13635), [\#13936](https://github.com/matrix-org/synapse/issues/13936))
- Add cache invalidation across workers to module API. ([\#13667](https://github.com/matrix-org/synapse/issues/13667), [\#13947](https://github.com/matrix-org/synapse/issues/13947))
- Experimental implementation of [MSC3882](https://github.com/matrix-org/matrix-spec-proposals/pull/3882) to allow an existing device/session to generate a login token for use on a new device/session. ([\#13722](https://github.com/matrix-org/synapse/issues/13722), [\#13868](https://github.com/matrix-org/synapse/issues/13868))
- Experimental support for thread-specific receipts ([MSC3771](https://github.com/matrix-org/matrix-spec-proposals/pull/3771)). ([\#13782](https://github.com/matrix-org/synapse/issues/13782), [\#13893](https://github.com/matrix-org/synapse/issues/13893), [\#13932](https://github.com/matrix-org/synapse/issues/13932), [\#13937](https://github.com/matrix-org/synapse/issues/13937), [\#13939](https://github.com/matrix-org/synapse/issues/13939))
- Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). ([\#13799](https://github.com/matrix-org/synapse/issues/13799), [\#13831](https://github.com/matrix-org/synapse/issues/13831), [\#13860](https://github.com/matrix-org/synapse/issues/13860))
- Keep track when an event pulled over federation fails its signature check so we can intelligently back-off in the future. ([\#13815](https://github.com/matrix-org/synapse/issues/13815))
- Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. ([\#13832](https://github.com/matrix-org/synapse/issues/13832))
- Faster remote room joins: record _when_ we first partial-join to a room. ([\#13892](https://github.com/matrix-org/synapse/issues/13892))
- Support a `dir` parameter on the `/relations` endpoint per [MSC3715](https://github.com/matrix-org/matrix-doc/pull/3715). ([\#13920](https://github.com/matrix-org/synapse/issues/13920))
- Ask mail servers receiving emails from Synapse to not send automatic replies (e.g. out-of-office responses). ([\#13957](https://github.com/matrix-org/synapse/issues/13957))
Bugfixes
--------
- Send push notifications for invites received over federation. ([\#13719](https://github.com/matrix-org/synapse/issues/13719), [\#14014](https://github.com/matrix-org/synapse/issues/14014))
- Fix a long-standing bug where typing events would be accepted from remote servers not present in a room. Also fix a bug where incoming typing events would cause other incoming events to get stuck during a fast join. ([\#13830](https://github.com/matrix-org/synapse/issues/13830))
- Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. ([\#13840](https://github.com/matrix-org/synapse/issues/13840))
- Fix access token leak to logs from proxy agent. ([\#13855](https://github.com/matrix-org/synapse/issues/13855))
- Fix `have_seen_event` cache not being invalidated after we persist an event which causes inefficiency effects like extra `/state` federation calls. ([\#13863](https://github.com/matrix-org/synapse/issues/13863))
- Faster room joins: Fix a bug introduced in 1.66.0 where an error would be logged when syncing after joining a room. ([\#13872](https://github.com/matrix-org/synapse/issues/13872))
- Fix a bug introduced in 1.66.0 where some required fields in the pushrules sent to clients were not present anymore. Contributed by Nico. ([\#13904](https://github.com/matrix-org/synapse/issues/13904))
- Fix packaging to include `Cargo.lock` in `sdist`. ([\#13909](https://github.com/matrix-org/synapse/issues/13909))
- Fix a long-standing bug where device updates could cause delays sending out to-device messages over federation. ([\#13922](https://github.com/matrix-org/synapse/issues/13922))
- Fix a bug introduced in v1.68.0 where Synapse would require `setuptools_rust` at runtime, even though the package is only required at build time. ([\#13952](https://github.com/matrix-org/synapse/issues/13952))
- Fix a long-standing bug where `POST /_matrix/client/v3/keys/query` requests could result in excessively large SQL queries. ([\#13956](https://github.com/matrix-org/synapse/issues/13956))
- Fix a performance regression in the `get_users_in_room` database query. Introduced in v1.67.0. ([\#13972](https://github.com/matrix-org/synapse/issues/13972))
- Fix a bug introduced in v1.68.0 bug where Rust extension wasn't built in `release` mode when using `poetry install`. ([\#14009](https://github.com/matrix-org/synapse/issues/14009))
- Do not return an unspecified `original_event` field when using the stable `/relations` endpoint. Introduced in Synapse v1.57.0. ([\#14025](https://github.com/matrix-org/synapse/issues/14025))
- Correctly handle a race with device lists when a remote user leaves during a partial join. ([\#13885](https://github.com/matrix-org/synapse/issues/13885))
- Correctly handle sending local device list updates to remote servers during a partial join. ([\#13934](https://github.com/matrix-org/synapse/issues/13934))
Improved Documentation
----------------------
- Add `worker_main_http_uri` for the worker generator bash script. ([\#13772](https://github.com/matrix-org/synapse/issues/13772))
- Update URL for the NixOS module for Synapse. ([\#13818](https://github.com/matrix-org/synapse/issues/13818))
- Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name`, not `displayname`. ([\#13836](https://github.com/matrix-org/synapse/issues/13836))
- Fix a cross-link from the registration admin API to the `registration_shared_secret` configuration documentation. ([\#13870](https://github.com/matrix-org/synapse/issues/13870))
- Update the man page for the `hash_password` script to correct the default number of bcrypt rounds performed. ([\#13911](https://github.com/matrix-org/synapse/issues/13911), [\#13930](https://github.com/matrix-org/synapse/issues/13930))
- Emphasize the right reasons when to use `(room_id, event_id)` in a database schema. ([\#13915](https://github.com/matrix-org/synapse/issues/13915))
- Add instruction to contributing guide for running unit tests in parallel. Contributed by @ashfame. ([\#13928](https://github.com/matrix-org/synapse/issues/13928))
- Clarify that the `auto_join_rooms` config option can also be used with Space aliases. ([\#13931](https://github.com/matrix-org/synapse/issues/13931))
- Add some cross references to worker documentation. ([\#13974](https://github.com/matrix-org/synapse/issues/13974))
- Linkify urls in config documentation. ([\#14003](https://github.com/matrix-org/synapse/issues/14003))
Deprecations and Removals
-------------------------
- Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. ([\#13843](https://github.com/matrix-org/synapse/issues/13843))
- Announce that legacy metric names are deprecated, will be turned off by default in Synapse v1.71.0 and removed altogether in Synapse v1.73.0. See the upgrade notes for more information. ([\#14024](https://github.com/matrix-org/synapse/issues/14024))
Internal Changes
----------------
- Speed up creation of DM rooms. ([\#13487](https://github.com/matrix-org/synapse/issues/13487), [\#13800](https://github.com/matrix-org/synapse/issues/13800))
- Port push rules to using Rust. ([\#13768](https://github.com/matrix-org/synapse/issues/13768), [\#13838](https://github.com/matrix-org/synapse/issues/13838), [\#13889](https://github.com/matrix-org/synapse/issues/13889))
- Optimise get rooms for user calls. Contributed by Nick @ Beeper (@fizzadar). ([\#13787](https://github.com/matrix-org/synapse/issues/13787))
- Update the script which makes full schema dumps. ([\#13792](https://github.com/matrix-org/synapse/issues/13792))
- Use shared methods for cache invalidation when persisting events, remove duplicate codepaths. Contributed by Nick @ Beeper (@fizzadar). ([\#13796](https://github.com/matrix-org/synapse/issues/13796))
- Improve the `synapse.api.auth.Auth` mock used in unit tests. ([\#13809](https://github.com/matrix-org/synapse/issues/13809))
- Faster Remote Room Joins: tell remote homeservers that we are unable to authorise them if they query a room which has partial state on our server. ([\#13823](https://github.com/matrix-org/synapse/issues/13823))
- Carry IdP Session IDs through user-mapping sessions. ([\#13839](https://github.com/matrix-org/synapse/issues/13839))
- Fix the release script not publishing binary wheels. ([\#13850](https://github.com/matrix-org/synapse/issues/13850))
- Raise issue if complement fails with latest deps. ([\#13859](https://github.com/matrix-org/synapse/issues/13859))
- Correct the comments in the complement dockerfile. ([\#13867](https://github.com/matrix-org/synapse/issues/13867))
- Create a new snapshot of the database schema. ([\#13873](https://github.com/matrix-org/synapse/issues/13873))
- Faster room joins: Send device list updates to most servers in rooms with partial state. ([\#13874](https://github.com/matrix-org/synapse/issues/13874), [\#14013](https://github.com/matrix-org/synapse/issues/14013))
- Add comments to the Prometheus recording rules to make it clear which set of rules you need for Grafana or Prometheus Console. ([\#13876](https://github.com/matrix-org/synapse/issues/13876))
- Only pull relevant backfill points from the database based on the current depth and limit (instead of all) every time we want to `/backfill`. ([\#13879](https://github.com/matrix-org/synapse/issues/13879))
- Faster room joins: Avoid waiting for full state when processing `/keys/changes` requests. ([\#13888](https://github.com/matrix-org/synapse/issues/13888))
- Improve backfill robustness by trying more servers when we get a `4xx` error back. ([\#13890](https://github.com/matrix-org/synapse/issues/13890))
- Fix mypy errors with canonicaljson 1.6.3. ([\#13905](https://github.com/matrix-org/synapse/issues/13905))
- Faster remote room joins: correctly handle remote device list updates during a partial join. ([\#13913](https://github.com/matrix-org/synapse/issues/13913))
- Complement image: propagate SIGTERM to all workers. ([\#13914](https://github.com/matrix-org/synapse/issues/13914))
- Update an innaccurate comment in Synapse's upsert database helper. ([\#13924](https://github.com/matrix-org/synapse/issues/13924))
- Update mypy (0.950 -> 0.981) and mypy-zope (0.3.7 -> 0.3.11). ([\#13925](https://github.com/matrix-org/synapse/issues/13925), [\#13993](https://github.com/matrix-org/synapse/issues/13993))
- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating users to copy over during a room upgrade. ([\#13960](https://github.com/matrix-org/synapse/issues/13960))
- Refactor language in user directory `_track_user_joined_room` code to make it more clear that we use both local and remote users. ([\#13966](https://github.com/matrix-org/synapse/issues/13966))
- Revert catch-all exceptions being recorded as event pull attempt failures (only handle what we know about). ([\#13969](https://github.com/matrix-org/synapse/issues/13969))
- Speed up calculating push actions in large rooms. ([\#13973](https://github.com/matrix-org/synapse/issues/13973), [\#13992](https://github.com/matrix-org/synapse/issues/13992))
- Enable update notifications from Github's dependabot. ([\#13976](https://github.com/matrix-org/synapse/issues/13976))
- Prototype a workflow to automatically add changelogs to dependabot PRs. ([\#13998](https://github.com/matrix-org/synapse/issues/13998), [\#14011](https://github.com/matrix-org/synapse/issues/14011), [\#14017](https://github.com/matrix-org/synapse/issues/14017), [\#14021](https://github.com/matrix-org/synapse/issues/14021), [\#14027](https://github.com/matrix-org/synapse/issues/14027))
- Fix type annotations to be compatible with new annotations in development versions of twisted. ([\#14012](https://github.com/matrix-org/synapse/issues/14012))
- Clear out stale entries in `event_push_actions_staging` table. ([\#14020](https://github.com/matrix-org/synapse/issues/14020))
- Bump versions of GitHub actions. ([\#13978](https://github.com/matrix-org/synapse/issues/13978), [\#13979](https://github.com/matrix-org/synapse/issues/13979), [\#13980](https://github.com/matrix-org/synapse/issues/13980), [\#13982](https://github.com/matrix-org/synapse/issues/13982), [\#14015](https://github.com/matrix-org/synapse/issues/14015), [\#14019](https://github.com/matrix-org/synapse/issues/14019), [\#14022](https://github.com/matrix-org/synapse/issues/14022), [\#14023](https://github.com/matrix-org/synapse/issues/14023))
Synapse 1.68.0 (2022-09-27)
===========================

164
Cargo.lock generated
View file

@ -2,6 +2,27 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "0.7.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602"
[[package]]
name = "arc-swap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164"
[[package]]
name = "autocfg"
version = "1.1.0"
@ -81,6 +102,18 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3"
[[package]]
name = "itoa"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.132"
@ -97,6 +130,30 @@ dependencies = [
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.13.1"
@ -137,13 +194,15 @@ dependencies = [
[[package]]
name = "pyo3"
version = "0.16.6"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0220c44442c9b239dd4357aa856ac468a4f5e1f0df19ddb89b2522952eb4c6ca"
checksum = "12f72538a0230791398a0986a6518ebd88abc3fded89007b506ed072acc831e1"
dependencies = [
"anyhow",
"cfg-if",
"indoc",
"libc",
"memoffset",
"parking_lot",
"pyo3-build-config",
"pyo3-ffi",
@ -153,9 +212,9 @@ dependencies = [
[[package]]
name = "pyo3-build-config"
version = "0.16.6"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c819d397859445928609d0ec5afc2da5204e0d0f73d6bf9e153b04e83c9cdc2"
checksum = "fc4cf18c20f4f09995f3554e6bcf9b09bd5e4d6b67c562fdfaafa644526ba479"
dependencies = [
"once_cell",
"target-lexicon",
@ -163,19 +222,30 @@ dependencies = [
[[package]]
name = "pyo3-ffi"
version = "0.16.6"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca882703ab55f54702d7bfe1189b41b0af10272389f04cae38fe4cd56c65f75f"
checksum = "a41877f28d8ebd600b6aa21a17b40c3b0fc4dfe73a27b6e81ab3d895e401b0e9"
dependencies = [
"libc",
"pyo3-build-config",
]
[[package]]
name = "pyo3-macros"
version = "0.16.6"
name = "pyo3-log"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "568749402955ad7be7bad9a09b8593851cd36e549ac90bfd44079cea500f3f21"
checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b"
dependencies = [
"arc-swap",
"log",
"pyo3",
]
[[package]]
name = "pyo3-macros"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e81c8d4bcc2f216dc1b665412df35e46d12ee8d3d046b381aad05f1fcf30547"
dependencies = [
"proc-macro2",
"pyo3-macros-backend",
@ -185,15 +255,25 @@ dependencies = [
[[package]]
name = "pyo3-macros-backend"
version = "0.16.6"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "611f64e82d98f447787e82b8e7b0ebc681e1eb78fc1252668b2c605ffb4e1eb8"
checksum = "85752a767ee19399a78272cc2ab625cd7d373b2e112b4b13db28de71fa892784"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pythonize"
version = "0.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0f7f0c136f5fbc01868185eef462800e49659eb23acca83b9e884367a006acb6"
dependencies = [
"pyo3",
"serde",
]
[[package]]
name = "quote"
version = "1.0.21"
@ -212,12 +292,66 @@ dependencies = [
"bitflags",
]
[[package]]
name = "regex"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]]
name = "ryu"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "smallvec"
version = "1.9.0"
@ -245,9 +379,17 @@ dependencies = [
name = "synapse"
version = "0.1.0"
dependencies = [
"anyhow",
"blake2",
"hex",
"lazy_static",
"log",
"pyo3",
"pyo3-log",
"pythonize",
"regex",
"serde",
"serde_json",
]
[[package]]

View file

@ -15,6 +15,9 @@ def build(setup_kwargs: Dict[str, Any]) -> None:
path=cargo_toml_path,
binding=Binding.PyO3,
py_limited_api=True,
# We force always building in release mode, as we can't tell the
# difference between using `poetry` in development vs production.
debug=False,
)
setup_kwargs.setdefault("rust_extensions", []).append(extension)
setup_kwargs["zip_safe"] = False

View file

@ -1,7 +1,12 @@
groups:
- name: synapse
rules:
# These 3 rules are used in the included Prometheus console
###
### Prometheus Console Only
### The following rules are only needed if you use the Prometheus Console
### in contrib/prometheus/consoles/synapse.html
###
- record: 'synapse_federation_client_sent'
labels:
type: "EDU"
@ -15,7 +20,6 @@ groups:
type: "Query"
expr: 'sum(synapse_federation_client_sent_queries) by (job)'
# These 3 rules are used in the included Prometheus console
- record: 'synapse_federation_server_received'
labels:
type: "EDU"
@ -29,7 +33,6 @@ groups:
type: "Query"
expr: 'sum(synapse_federation_server_received_queries) by (job)'
# These 2 rules are used in the included Prometheus console
- record: 'synapse_federation_transaction_queue_pending'
labels:
type: "EDU"
@ -38,8 +41,16 @@ groups:
labels:
type: "PDU"
expr: 'synapse_federation_transaction_queue_pending_pdus + 0'
###
### End of 'Prometheus Console Only' rules block
###
# These 3 rules are used in the included Grafana dashboard
###
### Grafana Only
### The following rules are only needed if you use the Grafana dashboard
### in contrib/grafana/synapse.json
###
- record: synapse_storage_events_persisted_by_source_type
expr: sum without(type, origin_type, origin_entity) (synapse_storage_events_persisted_events_sep_total{origin_type="remote"})
labels:
@ -53,11 +64,11 @@ groups:
labels:
type: bridges
# This rule is used in the included Grafana dashboard
- record: synapse_storage_events_persisted_by_event_type
expr: sum without(origin_entity, origin_type) (synapse_storage_events_persisted_events_sep_total)
# This rule is used in the included Grafana dashboard
- record: synapse_storage_events_persisted_by_origin
expr: sum without(type) (synapse_storage_events_persisted_events_sep_total)
###
### End of 'Grafana Only' rules block
###

View file

@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple `
#!/bin/bash
for i in {1..5}
do
cat << EOF >> generic_worker$i.yaml
cat << EOF > generic_worker$i.yaml
worker_app: synapse.app.generic_worker
worker_name: generic_worker$i
@ -15,6 +15,8 @@ worker_name: generic_worker$i
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 808$i

8
debian/changelog vendored
View file

@ -1,3 +1,11 @@
matrix-synapse-py3 (1.69.0~rc1) stable; urgency=medium
* The man page for the hash_password script has been updated to reflect
the correct default value of 'bcrypt_rounds'.
* New Synapse release 1.69.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Oct 2022 11:17:16 +0100
matrix-synapse-py3 (1.68.0) stable; urgency=medium
* New Synapse release 1.68.0.

View file

@ -10,7 +10,7 @@
.P
\fBhash_password\fR takes a password as an parameter either on the command line or the \fBSTDIN\fR if not supplied\.
.P
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB10\fR\.
It accepts an YAML file which can be used to specify parameters like the number of rounds for bcrypt and password_config section having the pepper value used for the hashing\. By default \fBbcrypt_rounds\fR is set to \fB12\fR\.
.P
The hashed password is written on the \fBSTDOUT\fR\.
.SH "FILES"

View file

@ -14,7 +14,7 @@ or the `STDIN` if not supplied.
It accepts an YAML file which can be used to specify parameters like the
number of rounds for bcrypt and password_config section having the pepper
value used for the hashing. By default `bcrypt_rounds` is set to **10**.
value used for the hashing. By default `bcrypt_rounds` is set to **12**.
The hashed password is written on the `STDOUT`.

View file

@ -8,19 +8,15 @@
ARG SYNAPSE_VERSION=latest
# first of all, we create a base image with a postgres server and database,
# which we can copy into the target image. For repeated rebuilds, this is
# much faster than apt installing postgres each time.
#
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
# now build the final image, based on the Synapse image.
FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
# copy the postgres installation over from the image we built above
# First of all, we copy postgres server from the official postgres image,
# since for repeated rebuilds, this is much faster than apt installing
# postgres each time.
# This trick only works because (a) the Synapse image happens to have all the
# shared libraries that postgres wants, (b) we use a postgres image based on
# the same debian version as Synapse's docker image (so the versions of the
# shared libraries match).
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
@ -28,7 +24,7 @@ FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
ENV PGDATA=/var/lib/postgresql/data
# initialise the database cluster in /var/lib/postgresql
# We also initialize the database at build time, rather than runtime, so that it's faster to spin up the image.
RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password
# Configure a password and create a database for Synapse

View file

@ -5,7 +5,7 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
([`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret)
in the homeserver configuration), and a one-time nonce. If the registration
shared secret is not configured, this API is not enabled.

View file

@ -167,6 +167,12 @@ was broken. They are slower than the linters but will typically catch more error
poetry run trial tests
```
You can run unit tests in parallel by specifying `-jX` argument to `trial` where `X` is the number of parallel runners you want. To use 4 cpu cores, you would run them like:
```sh
poetry run trial -j4 tests
```
If you wish to only run *some* unit tests, you may specify
another module instead of `tests` - or a test class or a method:

View file

@ -195,23 +195,24 @@ There are three separate aspects to this:
## `event_id` global uniqueness
In room versions `1` and `2` it's possible to end up with two events with the
same `event_id` (in the same or different rooms). After room version `3`, that
can only happen with a hash collision, which we basically hope will never
happen.
There are several places in Synapse and even Matrix APIs like [`GET
`event_id`'s can be considered globally unique although there has been a lot of
debate on this topic in places like
[MSC2779](https://github.com/matrix-org/matrix-spec-proposals/issues/2779) and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01). There are several places in Synapse
and even in the Matrix APIs like [`GET
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
where we assume that event IDs are globally unique.
But hash collisions are still possible, and by treating event IDs as room
scoped, we can reduce the possibility of a hash collision. When scoping
`event_id` in the database schema, it should be also accompanied by `room_id`
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
`(room_id, event_id)`.
When scoping `event_id` in a database schema, it is often nice to accompany it
with `room_id` (`PRIMARY KEY (room_id, event_id)` and a `FOREIGN KEY(room_id)
REFERENCES rooms(room_id)`) which makes flexible lookups easy. For example it
makes it very easy to find and clean up everything in a room when it needs to be
purged (no need to use sub-`select` query or join from the `events` table).
A note on collisions: In room versions `1` and `2` it's possible to end up with
two events with the same `event_id` (in the same or different rooms). After room
version `3`, that can only happen with a hash collision, which we basically hope
will never happen (SHA256 has a massive big key space).
There has been a lot of debate on this in places like
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01).

View file

@ -135,6 +135,8 @@ Synapse 1.2 updates the Prometheus metrics to match the naming
convention of the upstream `prometheus_client`. The old names are
considered deprecated and will be removed in a future version of
Synapse.
**The old names will be disabled by default in Synapse v1.71.0 and removed
altogether in Synapse v1.73.0.**
| New Name | Old Name |
| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- |
@ -146,6 +148,13 @@ Synapse.
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
| synapse_util_caches_cache_hits | synapse_util_caches_cache:hits |
| synapse_util_caches_cache_size | synapse_util_caches_cache:size |
| synapse_util_caches_cache_evicted_size | synapse_util_caches_cache:evicted_size |
| synapse_util_caches_cache | synapse_util_caches_cache:total |
| synapse_util_caches_response_cache_size | synapse_util_caches_response_cache:size |
| synapse_util_caches_response_cache_hits | synapse_util_caches_response_cache:hits |
| synapse_util_caches_response_cache_evicted_size | synapse_util_caches_response_cache:evicted_size |
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
@ -261,7 +270,7 @@ Standard Metric Names
As of synapse version 0.18.2, the format of the process-wide metrics has
been changed to fit prometheus standard naming conventions. Additionally
the units have been changed to seconds, from miliseconds.
the units have been changed to seconds, from milliseconds.
| New name | Old name |
| ---------------------------------------- | --------------------------------- |

View file

@ -181,7 +181,7 @@ doas pkg_add synapse
#### NixOS
Robin Lambertz has packaged Synapse for NixOS at:
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-synapse.nix>
<https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/matrix/synapse.nix>
### Installing as a Python module from PyPI

View file

@ -73,8 +73,8 @@ A custom mapping provider must specify the following methods:
* `async def map_user_attributes(self, userinfo, token, failures)`
- This method must be async.
- Arguments:
- `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user
information from.
- `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo)
object to extract user information from.
- `token` - A dictionary which includes information necessary to make
further requests to the OpenID provider.
- `failures` - An `int` that represents the amount of times the returned
@ -91,7 +91,13 @@ A custom mapping provider must specify the following methods:
`None`, the user is prompted to pick their own username. This is only used
during a user's first login. Once a localpart has been associated with a
remote user ID (see `get_remote_user_id`) it cannot be updated.
- `displayname`: An optional string, the display name for the user.
- `confirm_localpart`: A boolean. If set to `True`, when a `localpart`
string is returned from this method, Synapse will prompt the user to
either accept this localpart or pick their own username. Otherwise this
option has no effect. If omitted, defaults to `False`.
- `display_name`: An optional string, the display name for the user.
- `emails`: A list of strings, the email address(es) to associate with
this user. If omitted, defaults to an empty list.
* `async def get_extra_attributes(self, userinfo, token)`
- This method must be async.
- Arguments:

View file

@ -0,0 +1,14 @@
worker_app: synapse.app.media_repository
worker_name: media_worker
# The replication listener on the main synapse process.
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_listeners:
- type: http
port: 8085
resources:
- names: [media]
worker_log_config: /etc/matrix-synapse/media-worker-log.yaml

View file

@ -88,13 +88,70 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.69.0
## Changes to the receipts replication streams
Synapse now includes information indicating if a receipt applies to a thread when
replicating it to other workers. This is a forwards- and backwards-incompatible
change: v1.68 and workers cannot process receipts replicated by v1.69 workers, and
vice versa.
Once all workers are upgraded to v1.69 (or downgraded to v1.68), receipts
replication will resume as normal.
## Deprecation of legacy Prometheus metric names
In current versions of Synapse, some Prometheus metrics are emitted under two different names,
with one of the names being older but non-compliant with OpenMetrics and Prometheus conventions
and one of the names being newer but compliant.
Synapse v1.71.0 will turn the old metric names off *by default*.
For administrators that still rely on them and have not had chance to update their
uses of the metrics, it's possible to specify `enable_legacy_metrics: true` in
the configuration to re-enable them temporarily.
Synapse v1.73.0 will **remove legacy metric names altogether** and it will no longer
be possible to re-enable them.
The Grafana dashboard, Prometheus recording rules and Prometheus Consoles included
in the `contrib` directory in the Synapse repository have been updated to no longer
rely on the legacy names. These can be used on a current version of Synapse
because current versions of Synapse emit both old and new names.
You may need to update your alerting rules or any other rules that depend on
the names of Prometheus metrics.
If you want to test your changes before legacy names are disabled by default,
you may specify `enable_legacy_metrics: false` in your homeserver configuration.
A list of affected metrics is available on the [Metrics How-to page](https://matrix-org.github.io/synapse/v1.69/metrics-howto.html?highlight=metrics%20deprecated#renaming-of-metrics--deprecation-of-old-names-in-12).
# Upgrading to v1.68.0
As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite
version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse
now require a recent Rust compiler.
Two changes announced in the upgrade notes for v1.67.0 have now landed in v1.68.0.
Installations using
## SQLite version requirement
Synapse now requires a SQLite version of 3.27.0 or higher if SQLite is configured as
Synapse's database.
Installations using
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
- a PostgreSQL database
are not affected.
## Rust requirement when building from source.
Building from a source checkout of Synapse now requires a recent Rust compiler
(currently Rust 1.58.1, but see also the
[Platform Dependency Policy](https://matrix-org.github.io/synapse/latest/deprecation_policy.html)).
Installations using
- Docker images [from `matrixdotorg`](https://hub.docker.com/r/matrixdotorg/synapse),
- Debian packages [from Matrix.org](https://packages.matrix.org/), or
@ -134,12 +191,12 @@ The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
## SQLite version requirement in the next release
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or
higher. Synapse v1.67.0 will be the last major release supporting SQLite
versions 3.22 to 3.26.
Those using Docker images or Debian packages from Matrix.org will not be
affected. If you have installed from source, you should check the version of
affected. If you have installed from source, you should check the version of
SQLite used by Python with:
```shell

View file

@ -179,7 +179,7 @@ This will tell other servers to send traffic to port 443 instead.
This option currently defaults to false.
See https://matrix-org.github.io/synapse/latest/delegate.html for more
See [Delegation of incoming federation traffic](../../delegate.md) for more
information.
Example configuration:
@ -2229,6 +2229,9 @@ homeserver. If the room already exists, make certain it is a publicly joinable
room, i.e. the join rule of the room must be set to 'public'. You can find more options
relating to auto-joining rooms below.
As Spaces are just rooms under the hood, Space aliases may also be
used.
Example configuration:
```yaml
auto_join_rooms:
@ -2240,7 +2243,7 @@ auto_join_rooms:
Where `auto_join_rooms` are specified, setting this flag ensures that
the rooms exist by creating them when the first user on the
homeserver registers.
homeserver registers. This option will not create Spaces.
By default the auto-created rooms are publicly joinable from any federated
server. Use the `autocreate_auto_join_rooms_federated` and
@ -2258,7 +2261,7 @@ autocreate_auto_join_rooms: false
---
### `autocreate_auto_join_rooms_federated`
Whether the rooms listen in `auto_join_rooms` that are auto-created are available
Whether the rooms listed in `auto_join_rooms` that are auto-created are available
via federation. Only has an effect if `autocreate_auto_join_rooms` is true.
Note that whether a room is federated cannot be modified after
@ -2433,6 +2436,31 @@ Example configuration:
enable_metrics: true
```
---
### `enable_legacy_metrics`
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
**In Synapse v1.71.0, this will default to `false` before being removed in Synapse v1.73.0.**
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
They are included for backwards compatibility.
Example configuration:
```yaml
enable_legacy_metrics: false
```
See https://github.com/matrix-org/synapse/issues/11106 for context.
*Since v1.67.0.*
**Will be removed in v1.73.0.**
---
### `sentry`
Use this option to enable sentry integration. Provide the DSN assigned to you by sentry
@ -2949,7 +2977,7 @@ Options for each entry include:
* `module`: The class name of a custom mapping module. Default is
`synapse.handlers.oidc.JinjaOidcMappingProvider`.
See https://matrix-org.github.io/synapse/latest/sso_mapping_providers.html#openid-mapping-providers
See [OpenID Mapping Providers](../../sso_mapping_providers.md#openid-mapping-providers)
for information on implementing a custom mapping provider.
* `config`: Configuration for the mapping provider module. This section will
@ -3390,13 +3418,15 @@ This option has the following sub-options:
the user directory. If false, search results will only contain users
visible in public rooms and users sharing a room with the requester.
Defaults to false.
NB. If you set this to true, and the last time the user_directory search
indexes were (re)built was before Synapse 1.44, you'll have to
rebuild the indexes in order to search through all known users.
These indexes are built the first time Synapse starts; admins can
manually trigger a rebuild via API following the instructions at
https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/background_updates.html#run
Set to true to return search results containing all known users, even if that
manually trigger a rebuild via the API following the instructions
[for running background updates](../administration/admin_api/background_updates.md#run),
set to true to return search results containing all known users, even if that
user does not share a room with the requester.
* `prefer_local_users`: Defines whether to prefer local users in search query results.
If set to true, local users are more likely to appear above remote users when searching the

View file

@ -93,7 +93,6 @@ listener" for the main process; and secondly, you need to enable redis-based
replication. Optionally, a shared secret can be used to authenticate HTTP
traffic between workers. For example:
```yaml
# extend the existing `listeners` section. This defines the ports that the
# main process will listen on.
@ -129,7 +128,8 @@ In the config file for each worker, you must specify:
* The HTTP replication endpoint that it should talk to on the main synapse process
(`worker_replication_host` and `worker_replication_http_port`)
* If handling HTTP requests, a `worker_listeners` option with an `http`
listener, in the same way as the `listeners` option in the shared config.
listener, in the same way as the [`listeners`](usage/configuration/config_documentation.md#listeners)
option in the shared config.
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`).
@ -285,8 +285,9 @@ For multiple workers not handling the SSO endpoints properly, see
[#7530](https://github.com/matrix-org/synapse/issues/7530) and
[#9427](https://github.com/matrix-org/synapse/issues/9427).
Note that a HTTP listener with `client` and `federation` resources must be
configured in the `worker_listeners` option in the worker config.
Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
with `client` and `federation` `resources` must be configured in the `worker_listeners`
option in the worker config.
#### Load balancing
@ -326,7 +327,8 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
To enable this, the worker must have a HTTP replication listener configured,
To enable this, the worker must have a
[HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
have a `worker_name` and be listed in the `instance_map` config. The same worker
can handle multiple streams, but unless otherwise documented, each stream can only
have a single writer.
@ -410,7 +412,7 @@ the stream writer for the `presence` stream:
There is also support for moving background tasks to a separate
worker. Background tasks are run periodically or started via replication. Exactly
which tasks are configured to run depends on your Synapse configuration (e.g. if
stats is enabled).
stats is enabled). This worker doesn't handle any REST endpoints itself.
To enable this, the worker must have a `worker_name` and can be configured to run
background tasks. For example, to move background tasks to a dedicated worker,
@ -457,8 +459,8 @@ worker application type.
#### Notifying Application Services
You can designate one generic worker to send output traffic to Application Services.
Specify its name in the shared configuration as follows:
Doesn't handle any REST endpoints itself, but you should specify its name in the
shared configuration as follows:
```yaml
notify_appservices_from_worker: worker_name
@ -536,16 +538,12 @@ file to stop the main synapse running background jobs related to managing the
media repository. Note that doing so will prevent the main process from being
able to handle the above endpoints.
In the `media_repository` worker configuration file, configure the http listener to
In the `media_repository` worker configuration file, configure the
[HTTP listener](usage/configuration/config_documentation.md#listeners) to
expose the `media` resource. For example:
```yaml
worker_listeners:
- type: http
port: 8085
resources:
- names:
- media
{{#include systemd-with-workers/workers/media_worker.yaml}}
```
Note that if running multiple media repositories they must be on the same server

68
poetry.lock generated
View file

@ -95,14 +95,15 @@ webencodings = "*"
[[package]]
name = "canonicaljson"
version = "1.6.0"
version = "1.6.3"
description = "Canonical JSON"
category = "main"
optional = false
python-versions = "~=3.7"
python-versions = ">=3.7"
[package.dependencies]
simplejson = ">=3.14.0"
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.8\""}
[package.extras]
frozendict = ["frozendict (>=1.0)"]
@ -572,11 +573,11 @@ python-versions = "*"
[[package]]
name = "mypy"
version = "0.950"
version = "0.981"
description = "Optional static typing for Python"
category = "dev"
optional = false
python-versions = ">=3.6"
python-versions = ">=3.7"
[package.dependencies]
mypy-extensions = ">=0.4.3"
@ -599,14 +600,14 @@ python-versions = "*"
[[package]]
name = "mypy-zope"
version = "0.3.7"
version = "0.3.11"
description = "Plugin for mypy to support zope interfaces"
category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
mypy = "0.950"
mypy = "0.981"
"zope.interface" = "*"
"zope.schema" = "*"
@ -1682,8 +1683,8 @@ bleach = [
{file = "bleach-4.1.0.tar.gz", hash = "sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da"},
]
canonicaljson = [
{file = "canonicaljson-1.6.0-py3-none-any.whl", hash = "sha256:7230c2a2a3db07874f622af84effe41a655e07bf23734830e18a454e65d5b998"},
{file = "canonicaljson-1.6.0.tar.gz", hash = "sha256:8739d5fd91aca7281d425660ae65af7663808c8177778965f67e90b16a2b2427"},
{file = "canonicaljson-1.6.3-py3-none-any.whl", hash = "sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6"},
{file = "canonicaljson-1.6.3.tar.gz", hash = "sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448"},
]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
@ -2161,37 +2162,38 @@ msgpack = [
{file = "msgpack-1.0.3.tar.gz", hash = "sha256:51fdc7fb93615286428ee7758cecc2f374d5ff363bdd884c7ea622a7a327a81e"},
]
mypy = [
{file = "mypy-0.950-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cf9c261958a769a3bd38c3e133801ebcd284ffb734ea12d01457cb09eacf7d7b"},
{file = "mypy-0.950-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5b5bd0ffb11b4aba2bb6d31b8643902c48f990cc92fda4e21afac658044f0c0"},
{file = "mypy-0.950-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5e7647df0f8fc947388e6251d728189cfadb3b1e558407f93254e35abc026e22"},
{file = "mypy-0.950-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eaff8156016487c1af5ffa5304c3e3fd183edcb412f3e9c72db349faf3f6e0eb"},
{file = "mypy-0.950-cp310-cp310-win_amd64.whl", hash = "sha256:563514c7dc504698fb66bb1cf897657a173a496406f1866afae73ab5b3cdb334"},
{file = "mypy-0.950-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:dd4d670eee9610bf61c25c940e9ade2d0ed05eb44227275cce88701fee014b1f"},
{file = "mypy-0.950-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ca75ecf2783395ca3016a5e455cb322ba26b6d33b4b413fcdedfc632e67941dc"},
{file = "mypy-0.950-cp36-cp36m-win_amd64.whl", hash = "sha256:6003de687c13196e8a1243a5e4bcce617d79b88f83ee6625437e335d89dfebe2"},
{file = "mypy-0.950-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4c653e4846f287051599ed8f4b3c044b80e540e88feec76b11044ddc5612ffed"},
{file = "mypy-0.950-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e19736af56947addedce4674c0971e5dceef1b5ec7d667fe86bcd2b07f8f9075"},
{file = "mypy-0.950-cp37-cp37m-win_amd64.whl", hash = "sha256:ef7beb2a3582eb7a9f37beaf38a28acfd801988cde688760aea9e6cc4832b10b"},
{file = "mypy-0.950-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0112752a6ff07230f9ec2f71b0d3d4e088a910fdce454fdb6553e83ed0eced7d"},
{file = "mypy-0.950-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ee0a36edd332ed2c5208565ae6e3a7afc0eabb53f5327e281f2ef03a6bc7687a"},
{file = "mypy-0.950-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77423570c04aca807508a492037abbd72b12a1fb25a385847d191cd50b2c9605"},
{file = "mypy-0.950-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ce6a09042b6da16d773d2110e44f169683d8cc8687e79ec6d1181a72cb028d2"},
{file = "mypy-0.950-cp38-cp38-win_amd64.whl", hash = "sha256:5b231afd6a6e951381b9ef09a1223b1feabe13625388db48a8690f8daa9b71ff"},
{file = "mypy-0.950-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0384d9f3af49837baa92f559d3fa673e6d2652a16550a9ee07fc08c736f5e6f8"},
{file = "mypy-0.950-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1fdeb0a0f64f2a874a4c1f5271f06e40e1e9779bf55f9567f149466fc7a55038"},
{file = "mypy-0.950-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:61504b9a5ae166ba5ecfed9e93357fd51aa693d3d434b582a925338a2ff57fd2"},
{file = "mypy-0.950-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a952b8bc0ae278fc6316e6384f67bb9a396eb30aced6ad034d3a76120ebcc519"},
{file = "mypy-0.950-cp39-cp39-win_amd64.whl", hash = "sha256:eaea21d150fb26d7b4856766e7addcf929119dd19fc832b22e71d942835201ef"},
{file = "mypy-0.950-py3-none-any.whl", hash = "sha256:a4d9898f46446bfb6405383b57b96737dcfd0a7f25b748e78ef3e8c576bba3cb"},
{file = "mypy-0.950.tar.gz", hash = "sha256:1b333cfbca1762ff15808a0ef4f71b5d3eed8528b23ea1c3fb50543c867d68de"},
{file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"},
{file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"},
{file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"},
{file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"},
{file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"},
{file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"},
{file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"},
{file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"},
{file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"},
{file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"},
{file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"},
{file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"},
{file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"},
{file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"},
{file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"},
{file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"},
{file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"},
{file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"},
{file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"},
{file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"},
{file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"},
{file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"},
{file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"},
{file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"},
]
mypy-extensions = [
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
]
mypy-zope = [
{file = "mypy-zope-0.3.7.tar.gz", hash = "sha256:9da171e78e8ef7ac8922c86af1a62f1b7f3244f121020bd94a2246bc3f33c605"},
{file = "mypy_zope-0.3.7-py3-none-any.whl", hash = "sha256:9c7637d066e4d1bafa0651abc091c752009769098043b236446e6725be2bc9c2"},
{file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"},
{file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"},
]
netaddr = [
{file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"},

View file

@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml"
[tool.poetry]
name = "matrix-synapse"
version = "1.68.0"
version = "1.69.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"

View file

@ -21,9 +21,9 @@ bcrypt==3.2.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0"
bleach==4.1.0 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:0900d8b37eba61a802ee40ac0061f8c2b5dee29c1927dd1d233e075ebf5a71da \
--hash=sha256:4d2651ab93271d1129ac9cbc679f524565cc8a1b791909c4a51eac4446a15994
canonicaljson==1.6.0 ; python_full_version >= "3.7.1" and python_version < "4.0" \
--hash=sha256:7230c2a2a3db07874f622af84effe41a655e07bf23734830e18a454e65d5b998 \
--hash=sha256:8739d5fd91aca7281d425660ae65af7663808c8177778965f67e90b16a2b2427
canonicaljson==1.6.3 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:6ba3cf1702fa3d209b3e915a4e9a3e4ef194f1e8fca189c1f0b7a2a7686a27e6 \
--hash=sha256:ca59760bc274a899a0da75809d6909ae43e5123381fd6ef040a44d1952c0b448
certifi==2021.10.8 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872 \
--hash=sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569
@ -627,7 +627,7 @@ setuptools==65.3.0 ; python_full_version >= "3.7.1" and python_full_version < "4
signedjson==1.1.4 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228 \
--hash=sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492
simplejson==3.17.6 ; python_full_version >= "3.7.1" and python_version < "4.0" \
simplejson==3.17.6 ; python_full_version >= "3.7.1" and python_full_version < "4.0.0" \
--hash=sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce \
--hash=sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1 \
--hash=sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0 \

View file

@ -11,14 +11,24 @@ rust-version = "1.58.1"
[lib]
name = "synapse"
crate-type = ["cdylib"]
# We generate a `cdylib` for Python and a standard `lib` for running
# tests/benchmarks.
crate-type = ["lib", "cdylib"]
[package.metadata.maturin]
# This is where we tell maturin where to place the built library.
name = "synapse.synapse_rust"
[dependencies]
pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] }
anyhow = "1.0.63"
lazy_static = "1.4.0"
log = "0.4.17"
pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] }
pyo3-log = "0.7.0"
pythonize = "0.17.0"
regex = "1.6.0"
serde = { version = "1.0.144", features = ["derive"] }
serde_json = "1.0.85"
[build-dependencies]
blake2 = "0.10.4"

149
rust/benches/evaluator.rs Normal file
View file

@ -0,0 +1,149 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
use synapse::push::{
evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules,
};
use test::Bencher;
extern crate test;
#[bench]
fn bench_match_exact(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
]
.into_iter()
.collect();
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
0,
Default::default(),
Default::default(),
true,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "room_id".into(),
pattern: Some("!room:server".into()),
pattern_type: None,
},
));
let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
}
#[bench]
fn bench_match_word(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
]
.into_iter()
.collect();
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
0,
Default::default(),
Default::default(),
true,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
pattern: Some("test".into()),
pattern_type: None,
},
));
let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
}
#[bench]
fn bench_match_word_miss(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
]
.into_iter()
.collect();
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
0,
Default::default(),
Default::default(),
true,
)
.unwrap();
let condition = Condition::Known(synapse::push::KnownCondition::EventMatch(
EventMatchCondition {
key: "content.body".into(),
pattern: Some("foobar".into()),
pattern_type: None,
},
));
let matched = eval.match_condition(&condition, None, None).unwrap();
assert!(!matched, "Didn't match");
b.iter(|| eval.match_condition(&condition, None, None).unwrap());
}
#[bench]
fn bench_eval_message(b: &mut Bencher) {
let flattened_keys = [
("type".to_string(), "m.text".to_string()),
("room_id".to_string(), "!room:server".to_string()),
("content.body".to_string(), "test message".to_string()),
]
.into_iter()
.collect();
let eval = PushRuleEvaluator::py_new(
flattened_keys,
10,
0,
Default::default(),
Default::default(),
true,
)
.unwrap();
let rules =
FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));
}

40
rust/benches/glob.rs Normal file
View file

@ -0,0 +1,40 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(test)]
use synapse::push::utils::{glob_to_regex, GlobMatchType};
use test::Bencher;
extern crate test;
#[bench]
fn bench_whole(b: &mut Bencher) {
b.iter(|| glob_to_regex("test", GlobMatchType::Whole));
}
#[bench]
fn bench_word(b: &mut Bencher) {
b.iter(|| glob_to_regex("test", GlobMatchType::Word));
}
#[bench]
fn bench_whole_wildcard_run(b: &mut Bencher) {
b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
}
#[bench]
fn bench_word_wildcard_run(b: &mut Bencher) {
b.iter(|| glob_to_regex("test***??*?*?foo", GlobMatchType::Whole));
}

View file

@ -22,7 +22,7 @@ fn main() -> Result<(), std::io::Error> {
for entry in entries {
if entry.is_dir() {
dirs.push(entry)
dirs.push(entry);
} else {
paths.push(entry.to_str().expect("valid rust paths").to_string());
}

View file

@ -1,5 +1,7 @@
use pyo3::prelude::*;
pub mod push;
/// Returns the hash of all the rust source files at the time it was compiled.
///
/// Used by python to detect if the rust library is outdated.
@ -17,8 +19,13 @@ fn sum_as_string(a: usize, b: usize) -> PyResult<String> {
/// The entry point for defining the Python module.
#[pymodule]
fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> {
pyo3_log::init();
m.add_function(wrap_pyfunction!(sum_as_string, m)?)?;
m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?;
push::register_module(py, m)?;
Ok(())
}

336
rust/src/push/base_rules.rs Normal file
View file

@ -0,0 +1,336 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains the definitions of the "base" push rules.
use std::borrow::Cow;
use std::collections::HashMap;
use lazy_static::lazy_static;
use serde_json::Value;
use super::KnownCondition;
use crate::push::Action;
use crate::push::Condition;
use crate::push::EventMatchCondition;
use crate::push::PushRule;
use crate::push::SetTweak;
use crate::push::TweakValue;
const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
value: None,
other_keys: Value::Null,
});
const HIGHLIGHT_FALSE_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("highlight"),
value: Some(TweakValue::Other(Value::Bool(false))),
other_keys: Value::Null,
});
const SOUND_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("sound"),
value: Some(TweakValue::String(Cow::Borrowed("default"))),
other_keys: Value::Null,
});
const RING_ACTION: Action = Action::SetTweak(SetTweak {
set_tweak: Cow::Borrowed("sound"),
value: Some(TweakValue::String(Cow::Borrowed("ring"))),
other_keys: Value::Null,
});
pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.master"),
priority_class: 5,
conditions: Cow::Borrowed(&[]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: false,
}];
pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.msgtype"),
pattern: Some(Cow::Borrowed("m.notice")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.invite_for_me"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.membership"),
pattern: Some(Cow::Borrowed("invite")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_id")),
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.member_event"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.member")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::ContainsDisplayName)]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::SenderNotificationPermission {
key: Cow::Borrowed("room"),
}),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.body"),
pattern: Some(Cow::Borrowed("@room")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.tombstone"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.tombstone")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.reaction"),
priority_class: 5,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.reaction")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::DontNotify]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.org.matrix.msc3786.rule.room.server_acl"),
priority_class: 5,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.server_acl")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[]),
default: true,
default_enabled: true,
},
];
pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule {
rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"),
priority_class: 4,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("content.body"),
pattern: None,
pattern_type: Some(Cow::Borrowed("user_localpart")),
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]),
default: true,
default_enabled: true,
}];
pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.call"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.call.invite")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
})),
Condition::Known(KnownCondition::RoomMemberCount {
is: Some(Cow::Borrowed("2")),
}),
]),
actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3772.thread_reply"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelationMatch {
rel_type: Cow::Borrowed("m.thread"),
event_type_pattern: None,
sender: None,
sender_type: Some(Cow::Borrowed("user_id")),
})]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.message")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted"),
priority_class: 1,
conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("m.room.encrypted")),
pattern_type: None,
},
))]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"),
priority_class: 1,
conditions: Cow::Borrowed(&[
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("type"),
pattern: Some(Cow::Borrowed("im.vector.modular.widgets")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("content.type"),
pattern: Some(Cow::Borrowed("jitsi")),
pattern_type: None,
})),
Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: Cow::Borrowed("state_key"),
pattern: Some(Cow::Borrowed("*")),
pattern_type: None,
})),
]),
actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
default: true,
default_enabled: true,
},
];
lazy_static! {
pub static ref BASE_RULES_BY_ID: HashMap<&'static str, &'static PushRule> =
BASE_PREPEND_OVERRIDE_RULES
.iter()
.chain(BASE_APPEND_OVERRIDE_RULES.iter())
.chain(BASE_APPEND_CONTENT_RULES.iter())
.chain(BASE_APPEND_UNDERRIDE_RULES.iter())
.map(|rule| { (&*rule.rule_id, rule) })
.collect();
}

374
rust/src/push/evaluator.rs Normal file
View file

@ -0,0 +1,374 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{
borrow::Cow,
collections::{BTreeMap, BTreeSet},
};
use anyhow::{Context, Error};
use lazy_static::lazy_static;
use log::warn;
use pyo3::prelude::*;
use regex::Regex;
use super::{
utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType},
Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition,
};
lazy_static! {
/// Used to parse the `is` clause in the room member count condition.
static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
}
/// Allows running a set of push rules against a particular event.
#[pyclass]
pub struct PushRuleEvaluator {
/// A mapping of "flattened" keys to string values in the event, e.g.
/// includes things like "type" and "content.msgtype".
flattened_keys: BTreeMap<String, String>,
/// The "content.body", if any.
body: String,
/// The number of users in the room.
room_member_count: u64,
/// The `notifications` section of the current power levels in the room.
notification_power_levels: BTreeMap<String, i64>,
/// The relations related to the event as a mapping from relation type to
/// set of sender/event type 2-tuples.
relations: BTreeMap<String, BTreeSet<(String, String)>>,
/// Is running "relation" conditions enabled?
relation_match_enabled: bool,
/// The power level of the sender of the event, or None if event is an
/// outlier.
sender_power_level: Option<i64>,
}
#[pymethods]
impl PushRuleEvaluator {
/// Create a new `PushRuleEvaluator`. See struct docstring for details.
#[new]
pub fn py_new(
flattened_keys: BTreeMap<String, String>,
room_member_count: u64,
sender_power_level: Option<i64>,
notification_power_levels: BTreeMap<String, i64>,
relations: BTreeMap<String, BTreeSet<(String, String)>>,
relation_match_enabled: bool,
) -> Result<Self, Error> {
let body = flattened_keys
.get("content.body")
.cloned()
.unwrap_or_default();
Ok(PushRuleEvaluator {
flattened_keys,
body,
room_member_count,
notification_power_levels,
relations,
relation_match_enabled,
sender_power_level,
})
}
/// Run the evaluator with the given push rules, for the given user ID and
/// display name of the user.
///
/// Passing in None will skip evaluating rules matching user ID and display
/// name.
///
/// Returns the set of actions, if any, that match (filtering out any
/// `dont_notify` actions).
pub fn run(
&self,
push_rules: &FilteredPushRules,
user_id: Option<&str>,
display_name: Option<&str>,
) -> Vec<Action> {
'outer: for (push_rule, enabled) in push_rules.iter() {
if !enabled {
continue;
}
for condition in push_rule.conditions.iter() {
match self.match_condition(condition, user_id, display_name) {
Ok(true) => {}
Ok(false) => continue 'outer,
Err(err) => {
warn!("Condition match failed {err}");
continue 'outer;
}
}
}
let actions = push_rule
.actions
.iter()
// Filter out "dont_notify" actions, as we don't store them.
.filter(|a| **a != Action::DontNotify)
.cloned()
.collect();
return actions;
}
Vec::new()
}
/// Check if the given condition matches.
fn matches(
&self,
condition: Condition,
user_id: Option<&str>,
display_name: Option<&str>,
) -> bool {
match self.match_condition(&condition, user_id, display_name) {
Ok(true) => true,
Ok(false) => false,
Err(err) => {
warn!("Condition match failed {err}");
false
}
}
}
}
impl PushRuleEvaluator {
/// Match a given `Condition` for a push rule.
pub fn match_condition(
&self,
condition: &Condition,
user_id: Option<&str>,
display_name: Option<&str>,
) -> Result<bool, Error> {
let known_condition = match condition {
Condition::Known(known) => known,
Condition::Unknown(_) => {
return Ok(false);
}
};
let result = match known_condition {
KnownCondition::EventMatch(event_match) => {
self.match_event_match(event_match, user_id)?
}
KnownCondition::ContainsDisplayName => {
if let Some(dn) = display_name {
if !dn.is_empty() {
get_glob_matcher(dn, GlobMatchType::Word)?.is_match(&self.body)?
} else {
// We specifically ignore empty display names, as otherwise
// they would always match.
false
}
} else {
false
}
}
KnownCondition::RoomMemberCount { is } => {
if let Some(is) = is {
self.match_member_count(is)?
} else {
false
}
}
KnownCondition::SenderNotificationPermission { key } => {
if let Some(sender_power_level) = &self.sender_power_level {
let required_level = self
.notification_power_levels
.get(key.as_ref())
.copied()
.unwrap_or(50);
*sender_power_level >= required_level
} else {
false
}
}
KnownCondition::RelationMatch {
rel_type,
event_type_pattern,
sender,
sender_type,
} => {
self.match_relations(rel_type, sender, sender_type, user_id, event_type_pattern)?
}
};
Ok(result)
}
/// Evaluates a relation condition.
fn match_relations(
&self,
rel_type: &str,
sender: &Option<Cow<str>>,
sender_type: &Option<Cow<str>>,
user_id: Option<&str>,
event_type_pattern: &Option<Cow<str>>,
) -> Result<bool, Error> {
// First check if relation matching is enabled...
if !self.relation_match_enabled {
return Ok(false);
}
// ... and if there are any relations to match against.
let relations = if let Some(relations) = self.relations.get(rel_type) {
relations
} else {
return Ok(false);
};
// Extract the sender pattern from the condition
let sender_pattern = if let Some(sender) = sender {
Some(sender.as_ref())
} else if let Some(sender_type) = sender_type {
if sender_type == "user_id" {
if let Some(user_id) = user_id {
Some(user_id)
} else {
return Ok(false);
}
} else {
warn!("Unrecognized sender_type: {sender_type}");
return Ok(false);
}
} else {
None
};
let mut sender_compiled_pattern = if let Some(pattern) = sender_pattern {
Some(get_glob_matcher(pattern, GlobMatchType::Whole)?)
} else {
None
};
let mut type_compiled_pattern = if let Some(pattern) = event_type_pattern {
Some(get_glob_matcher(pattern, GlobMatchType::Whole)?)
} else {
None
};
for (relation_sender, event_type) in relations {
if let Some(pattern) = &mut sender_compiled_pattern {
if !pattern.is_match(relation_sender)? {
continue;
}
}
if let Some(pattern) = &mut type_compiled_pattern {
if !pattern.is_match(event_type)? {
continue;
}
}
return Ok(true);
}
Ok(false)
}
/// Evaluates a `event_match` condition.
fn match_event_match(
&self,
event_match: &EventMatchCondition,
user_id: Option<&str>,
) -> Result<bool, Error> {
let pattern = if let Some(pattern) = &event_match.pattern {
pattern
} else if let Some(pattern_type) = &event_match.pattern_type {
// The `pattern_type` can either be "user_id" or "user_localpart",
// either way if we don't have a `user_id` then the condition can't
// match.
let user_id = if let Some(user_id) = user_id {
user_id
} else {
return Ok(false);
};
match &**pattern_type {
"user_id" => user_id,
"user_localpart" => get_localpart_from_id(user_id)?,
_ => return Ok(false),
}
} else {
return Ok(false);
};
let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) {
haystack
} else {
return Ok(false);
};
// For the content.body we match against "words", but for everything
// else we match against the entire value.
let match_type = if event_match.key == "content.body" {
GlobMatchType::Word
} else {
GlobMatchType::Whole
};
let mut compiled_pattern = get_glob_matcher(pattern, match_type)?;
compiled_pattern.is_match(haystack)
}
/// Match the member count against an 'is' condition
/// The `is` condition can be things like '>2', '==3' or even just '4'.
fn match_member_count(&self, is: &str) -> Result<bool, Error> {
let captures = INEQUALITY_EXPR.captures(is).context("bad 'is' clause")?;
let ineq = captures.get(1).map_or("==", |m| m.as_str());
let rhs: u64 = captures
.get(2)
.context("missing number")?
.as_str()
.parse()?;
let matches = match ineq {
"" | "==" => self.room_member_count == rhs,
"<" => self.room_member_count < rhs,
">" => self.room_member_count > rhs,
">=" => self.room_member_count >= rhs,
"<=" => self.room_member_count <= rhs,
_ => false,
};
Ok(matches)
}
}
#[test]
fn push_rule_evaluator() {
let mut flattened_keys = BTreeMap::new();
flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
let evaluator = PushRuleEvaluator::py_new(
flattened_keys,
10,
Some(0),
BTreeMap::new(),
BTreeMap::new(),
true,
)
.unwrap();
let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
assert_eq!(result.len(), 3);
}

514
rust/src/push/mod.rs Normal file
View file

@ -0,0 +1,514 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An implementation of Matrix push rules.
//!
//! The `Cow<_>` type is used extensively within this module to allow creating
//! the base rules as constants (in Rust constants can't require explicit
//! allocation atm).
//!
//! ---
//!
//! Push rules is the system used to determine which events trigger a push (and a
//! bump in notification counts).
//!
//! This consists of a list of "push rules" for each user, where a push rule is a
//! pair of "conditions" and "actions". When a user receives an event Synapse
//! iterates over the list of push rules until it finds one where all the conditions
//! match the event, at which point "actions" describe the outcome (e.g. notify,
//! highlight, etc).
//!
//! Push rules are split up into 5 different "kinds" (aka "priority classes"), which
//! are run in order:
//! 1. Override — highest priority rules, e.g. always ignore notices
//! 2. Content — content specific rules, e.g. @ notifications
//! 3. Room — per room rules, e.g. enable/disable notifications for all messages
//! in a room
//! 4. Sender — per sender rules, e.g. never notify for messages from a given
//! user
//! 5. Underride — the lowest priority "default" rules, e.g. notify for every
//! message.
//!
//! The set of "base rules" are the list of rules that every user has by default. A
//! user can modify their copy of the push rules in one of three ways:
//! 1. Adding a new push rule of a certain kind
//! 2. Changing the actions of a base rule
//! 3. Enabling/disabling a base rule.
//!
//! The base rules are split into whether they come before or after a particular
//! kind, so the order of push rule evaluation would be: base rules for before
//! "override" kind, user defined "override" rules, base rules after "override"
//! kind, etc, etc.
use std::borrow::Cow;
use std::collections::{BTreeMap, HashMap, HashSet};
use anyhow::{Context, Error};
use log::warn;
use pyo3::prelude::*;
use pythonize::{depythonize, pythonize};
use serde::de::Error as _;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use self::evaluator::PushRuleEvaluator;
mod base_rules;
pub mod evaluator;
pub mod utils;
/// Called when registering modules with python.
pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> {
let child_module = PyModule::new(py, "push")?;
child_module.add_class::<PushRule>()?;
child_module.add_class::<PushRules>()?;
child_module.add_class::<FilteredPushRules>()?;
child_module.add_class::<PushRuleEvaluator>()?;
child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?;
m.add_submodule(child_module)?;
// We need to manually add the module to sys.modules to make `from
// synapse.synapse_rust import push` work.
py.import("sys")?
.getattr("modules")?
.set_item("synapse.synapse_rust.push", child_module)?;
Ok(())
}
#[pyfunction]
fn get_base_rule_ids() -> HashSet<&'static str> {
base_rules::BASE_RULES_BY_ID.keys().copied().collect()
}
/// A single push rule for a user.
#[derive(Debug, Clone)]
#[pyclass(frozen)]
pub struct PushRule {
/// A unique ID for this rule
pub rule_id: Cow<'static, str>,
/// The "kind" of push rule this is (see `PRIORITY_CLASS_MAP` in Python)
#[pyo3(get)]
pub priority_class: i32,
/// The conditions that must all match for actions to be applied
pub conditions: Cow<'static, [Condition]>,
/// The actions to apply if all conditions are met
pub actions: Cow<'static, [Action]>,
/// Whether this is a base rule
#[pyo3(get)]
pub default: bool,
/// Whether this is enabled by default
#[pyo3(get)]
pub default_enabled: bool,
}
#[pymethods]
impl PushRule {
#[staticmethod]
pub fn from_db(
rule_id: String,
priority_class: i32,
conditions: &str,
actions: &str,
) -> Result<PushRule, Error> {
let conditions = serde_json::from_str(conditions).context("parsing conditions")?;
let actions = serde_json::from_str(actions).context("parsing actions")?;
Ok(PushRule {
rule_id: Cow::Owned(rule_id),
priority_class,
conditions,
actions,
default: false,
default_enabled: true,
})
}
#[getter]
fn rule_id(&self) -> &str {
&self.rule_id
}
#[getter]
fn actions(&self) -> Vec<Action> {
self.actions.clone().into_owned()
}
#[getter]
fn conditions(&self) -> Vec<Condition> {
self.conditions.clone().into_owned()
}
fn __repr__(&self) -> String {
format!(
"<PushRule rule_id={}, conditions={:?}, actions={:?}>",
self.rule_id, self.conditions, self.actions
)
}
}
/// The "action" Synapse should perform for a matching push rule.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action {
DontNotify,
Notify,
Coalesce,
SetTweak(SetTweak),
// An unrecognized custom action.
Unknown(Value),
}
impl IntoPy<PyObject> for Action {
fn into_py(self, py: Python<'_>) -> PyObject {
// When we pass the `Action` struct to Python we want it to be converted
// to a dict. We use `pythonize`, which converts the struct using the
// `serde` serialization.
pythonize(py, &self).expect("valid action")
}
}
/// The body of a `SetTweak` push action.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct SetTweak {
set_tweak: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
value: Option<TweakValue>,
// This picks up any other fields that may have been added by clients.
// These get added when we convert the `Action` to a python object.
#[serde(flatten)]
other_keys: Value,
}
/// The value of a `set_tweak`.
///
/// We need this (rather than using `TweakValue` directly) so that we can use
/// `&'static str` in the value when defining the constant base rules.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum TweakValue {
String(Cow<'static, str>),
Other(Value),
}
impl Serialize for Action {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Action::DontNotify => serializer.serialize_str("dont_notify"),
Action::Notify => serializer.serialize_str("notify"),
Action::Coalesce => serializer.serialize_str("coalesce"),
Action::SetTweak(tweak) => tweak.serialize(serializer),
Action::Unknown(value) => value.serialize(serializer),
}
}
}
/// Simple helper class for deserializing Action from JSON.
#[derive(Deserialize)]
#[serde(untagged)]
enum ActionDeserializeHelper {
Str(String),
SetTweak(SetTweak),
Unknown(Value),
}
impl<'de> Deserialize<'de> for Action {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let helper: ActionDeserializeHelper = Deserialize::deserialize(deserializer)?;
match helper {
ActionDeserializeHelper::Str(s) => match &*s {
"dont_notify" => Ok(Action::DontNotify),
"notify" => Ok(Action::Notify),
"coalesce" => Ok(Action::Coalesce),
_ => Err(D::Error::custom("unrecognized action")),
},
ActionDeserializeHelper::SetTweak(set_tweak) => Ok(Action::SetTweak(set_tweak)),
ActionDeserializeHelper::Unknown(value) => Ok(Action::Unknown(value)),
}
}
}
/// A condition used in push rules to match against an event.
///
/// We need this split as `serde` doesn't give us the ability to have a
/// "catchall" variant in tagged enums.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(untagged)]
pub enum Condition {
/// A recognized condition that we can match against
Known(KnownCondition),
/// An unrecognized condition that we ignore.
Unknown(Value),
}
/// The set of "known" conditions that we can handle.
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
#[serde(tag = "kind")]
pub enum KnownCondition {
EventMatch(EventMatchCondition),
ContainsDisplayName,
RoomMemberCount {
#[serde(skip_serializing_if = "Option::is_none")]
is: Option<Cow<'static, str>>,
},
SenderNotificationPermission {
key: Cow<'static, str>,
},
#[serde(rename = "org.matrix.msc3772.relation_match")]
RelationMatch {
rel_type: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none", rename = "type")]
event_type_pattern: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
sender: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
sender_type: Option<Cow<'static, str>>,
},
}
impl IntoPy<PyObject> for Condition {
fn into_py(self, py: Python<'_>) -> PyObject {
pythonize(py, &self).expect("valid condition")
}
}
impl<'source> FromPyObject<'source> for Condition {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
Ok(depythonize(ob)?)
}
}
/// The body of a [`Condition::EventMatch`]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct EventMatchCondition {
pub key: Cow<'static, str>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern: Option<Cow<'static, str>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub pattern_type: Option<Cow<'static, str>>,
}
/// The collection of push rules for a user.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
pub struct PushRules {
/// Custom push rules that override a base rule.
overridden_base_rules: HashMap<Cow<'static, str>, PushRule>,
/// Custom rules that come between the prepend/append override base rules.
override_rules: Vec<PushRule>,
/// Custom rules that come before the base content rules.
content: Vec<PushRule>,
/// Custom rules that come before the base room rules.
room: Vec<PushRule>,
/// Custom rules that come before the base sender rules.
sender: Vec<PushRule>,
/// Custom rules that come before the base underride rules.
underride: Vec<PushRule>,
}
#[pymethods]
impl PushRules {
#[new]
pub fn new(rules: Vec<PushRule>) -> PushRules {
let mut push_rules: PushRules = Default::default();
for rule in rules {
if let Some(&o) = base_rules::BASE_RULES_BY_ID.get(&*rule.rule_id) {
push_rules.overridden_base_rules.insert(
rule.rule_id.clone(),
PushRule {
actions: rule.actions.clone(),
..o.clone()
},
);
continue;
}
match rule.priority_class {
5 => push_rules.override_rules.push(rule),
4 => push_rules.content.push(rule),
3 => push_rules.room.push(rule),
2 => push_rules.sender.push(rule),
1 => push_rules.underride.push(rule),
_ => {
warn!(
"Unrecognized priority class for rule {}: {}",
rule.rule_id, rule.priority_class
);
}
}
}
push_rules
}
/// Returns the list of all rules, including base rules, in the order they
/// should be executed in.
fn rules(&self) -> Vec<PushRule> {
self.iter().cloned().collect()
}
}
impl PushRules {
/// Iterates over all the rules, including base rules, in the order they
/// should be executed in.
pub fn iter(&self) -> impl Iterator<Item = &PushRule> {
base_rules::BASE_PREPEND_OVERRIDE_RULES
.iter()
.chain(self.override_rules.iter())
.chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter())
.chain(self.content.iter())
.chain(base_rules::BASE_APPEND_CONTENT_RULES.iter())
.chain(self.room.iter())
.chain(self.sender.iter())
.chain(self.underride.iter())
.chain(base_rules::BASE_APPEND_UNDERRIDE_RULES.iter())
.map(|rule| {
self.overridden_base_rules
.get(&*rule.rule_id)
.unwrap_or(rule)
})
}
}
/// A wrapper around `PushRules` that checks the enabled state of rules and
/// filters out disabled experimental rules.
#[derive(Debug, Clone, Default)]
#[pyclass(frozen)]
pub struct FilteredPushRules {
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3786_enabled: bool,
msc3772_enabled: bool,
}
#[pymethods]
impl FilteredPushRules {
#[new]
pub fn py_new(
push_rules: PushRules,
enabled_map: BTreeMap<String, bool>,
msc3786_enabled: bool,
msc3772_enabled: bool,
) -> Self {
Self {
push_rules,
enabled_map,
msc3786_enabled,
msc3772_enabled,
}
}
/// Returns the list of all rules and their enabled state, including base
/// rules, in the order they should be executed in.
fn rules(&self) -> Vec<(PushRule, bool)> {
self.iter().map(|(r, e)| (r.clone(), e)).collect()
}
}
impl FilteredPushRules {
/// Iterates over all the rules and their enabled state, including base
/// rules, in the order they should be executed in.
fn iter(&self) -> impl Iterator<Item = (&PushRule, bool)> {
self.push_rules
.iter()
.filter(|rule| {
// Ignore disabled experimental push rules
if !self.msc3786_enabled
&& rule.rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
{
return false;
}
if !self.msc3772_enabled
&& rule.rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
{
return false;
}
true
})
.map(|r| {
let enabled = *self
.enabled_map
.get(&*r.rule_id)
.unwrap_or(&r.default_enabled);
(r, enabled)
})
}
}
#[test]
fn test_serialize_condition() {
let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
key: "content.body".into(),
pattern: Some("coffee".into()),
pattern_type: None,
}));
let json = serde_json::to_string(&condition).unwrap();
assert_eq!(
json,
r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#
)
}
#[test]
fn test_deserialize_condition() {
let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#;
let _: Condition = serde_json::from_str(json).unwrap();
}
#[test]
fn test_deserialize_custom_condition() {
let json = r#"{"kind":"custom_tag"}"#;
let condition: Condition = serde_json::from_str(json).unwrap();
assert!(matches!(condition, Condition::Unknown(_)));
let new_json = serde_json::to_string(&condition).unwrap();
assert_eq!(json, new_json);
}
#[test]
fn test_deserialize_action() {
let _: Action = serde_json::from_str(r#""notify""#).unwrap();
let _: Action = serde_json::from_str(r#""dont_notify""#).unwrap();
let _: Action = serde_json::from_str(r#""coalesce""#).unwrap();
let _: Action = serde_json::from_str(r#"{"set_tweak": "highlight"}"#).unwrap();
}
#[test]
fn test_custom_action() {
let json = r#"{"some_custom":"action_fields"}"#;
let action: Action = serde_json::from_str(json).unwrap();
assert!(matches!(action, Action::Unknown(_)));
let new_json = serde_json::to_string(&action).unwrap();
assert_eq!(json, new_json);
}

215
rust/src/push/utils.rs Normal file
View file

@ -0,0 +1,215 @@
// Copyright 2022 The Matrix.org Foundation C.I.C.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::bail;
use anyhow::Context;
use anyhow::Error;
use lazy_static::lazy_static;
use regex;
use regex::Regex;
use regex::RegexBuilder;
lazy_static! {
/// Matches runs of non-wildcard characters followed by wildcard characters.
static ref WILDCARD_RUN: Regex = Regex::new(r"([^\?\*]*)([\?\*]*)").expect("valid regex");
}
/// Extract the localpart from a Matrix style ID
pub(crate) fn get_localpart_from_id(id: &str) -> Result<&str, Error> {
let (localpart, _) = id
.split_once(':')
.with_context(|| format!("ID does not contain colon: {id}"))?;
// We need to strip off the first character, which is the ID type.
if localpart.is_empty() {
bail!("Invalid ID {id}");
}
Ok(&localpart[1..])
}
/// Used by `glob_to_regex` to specify what to match the regex against.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum GlobMatchType {
/// The generated regex will match against the entire input.
Whole,
/// The generated regex will match against words.
Word,
}
/// Convert a "glob" style expression to a regex, anchoring either to the entire
/// input or to individual words.
pub fn glob_to_regex(glob: &str, match_type: GlobMatchType) -> Result<Regex, Error> {
let mut chunks = Vec::new();
// Patterns with wildcards must be simplified to avoid performance cliffs
// - The glob `?**?**?` is equivalent to the glob `???*`
// - The glob `???*` is equivalent to the regex `.{3,}`
for captures in WILDCARD_RUN.captures_iter(glob) {
if let Some(chunk) = captures.get(1) {
chunks.push(regex::escape(chunk.as_str()));
}
if let Some(wildcards) = captures.get(2) {
if wildcards.as_str() == "" {
continue;
}
let question_marks = wildcards.as_str().chars().filter(|c| *c == '?').count();
if wildcards.as_str().contains('*') {
chunks.push(format!(".{{{question_marks},}}"));
} else {
chunks.push(format!(".{{{question_marks}}}"));
}
}
}
let joined = chunks.join("");
let regex_str = match match_type {
GlobMatchType::Whole => format!(r"\A{joined}\z"),
// `^|\W` and `\W|$` handle the case where `pattern` starts or ends with a non-word
// character.
GlobMatchType::Word => format!(r"(?:^|\b|\W){joined}(?:\b|\W|$)"),
};
Ok(RegexBuilder::new(&regex_str)
.case_insensitive(true)
.build()?)
}
/// Compiles the glob into a `Matcher`.
pub fn get_glob_matcher(glob: &str, match_type: GlobMatchType) -> Result<Matcher, Error> {
// There are a number of shortcuts we can make if the glob doesn't contain a
// wild card.
let matcher = if glob.contains(['*', '?']) {
let regex = glob_to_regex(glob, match_type)?;
Matcher::Regex(regex)
} else if match_type == GlobMatchType::Whole {
// If there aren't any wildcards and we're matching the whole thing,
// then we simply can do a case-insensitive string match.
Matcher::Whole(glob.to_lowercase())
} else {
// Otherwise, if we're matching against words then can first check
// if the haystack contains the glob at all.
Matcher::Word {
word: glob.to_lowercase(),
regex: None,
}
};
Ok(matcher)
}
/// Matches against a glob
pub enum Matcher {
/// Plain regex matching.
Regex(Regex),
/// Case-insensitive equality.
Whole(String),
/// Word matching. `regex` is a cache of calling [`glob_to_regex`] on word.
Word { word: String, regex: Option<Regex> },
}
impl Matcher {
/// Checks if the glob matches the given haystack.
pub fn is_match(&mut self, haystack: &str) -> Result<bool, Error> {
// We want to to do case-insensitive matching, so we convert to
// lowercase first.
let haystack = haystack.to_lowercase();
match self {
Matcher::Regex(regex) => Ok(regex.is_match(&haystack)),
Matcher::Whole(whole) => Ok(whole == &haystack),
Matcher::Word { word, regex } => {
// If we're looking for a literal word, then we first check if
// the haystack contains the word as a substring.
if !haystack.contains(&*word) {
return Ok(false);
}
// If it does contain the word as a substring, then we need to
// check if it is an actual word by testing it against the regex.
let regex = if let Some(regex) = regex {
regex
} else {
let compiled_regex = glob_to_regex(word, GlobMatchType::Word)?;
regex.insert(compiled_regex)
};
Ok(regex.is_match(&haystack))
}
}
}
}
#[test]
fn test_get_domain_from_id() {
get_localpart_from_id("").unwrap_err();
get_localpart_from_id(":").unwrap_err();
get_localpart_from_id(":asd").unwrap_err();
get_localpart_from_id("::as::asad").unwrap_err();
assert_eq!(get_localpart_from_id("@test:foo").unwrap(), "test");
assert_eq!(get_localpart_from_id("@:").unwrap(), "");
assert_eq!(get_localpart_from_id("@test:foo:907").unwrap(), "test");
}
#[test]
fn tset_glob() -> Result<(), Error> {
assert_eq!(
glob_to_regex("simple", GlobMatchType::Whole)?.as_str(),
r"\Asimple\z"
);
assert_eq!(
glob_to_regex("simple*", GlobMatchType::Whole)?.as_str(),
r"\Asimple.{0,}\z"
);
assert_eq!(
glob_to_regex("simple?", GlobMatchType::Whole)?.as_str(),
r"\Asimple.{1}\z"
);
assert_eq!(
glob_to_regex("simple?*?*", GlobMatchType::Whole)?.as_str(),
r"\Asimple.{2,}\z"
);
assert_eq!(
glob_to_regex("simple???", GlobMatchType::Whole)?.as_str(),
r"\Asimple.{3}\z"
);
assert_eq!(
glob_to_regex("escape.", GlobMatchType::Whole)?.as_str(),
r"\Aescape\.\z"
);
assert!(glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simple"));
assert!(!glob_to_regex("simple", GlobMatchType::Whole)?.is_match("simples"));
assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simples"));
assert!(glob_to_regex("simple?", GlobMatchType::Whole)?.is_match("simples"));
assert!(glob_to_regex("simple*", GlobMatchType::Whole)?.is_match("simple"));
assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("some simple."));
assert!(glob_to_regex("simple", GlobMatchType::Word)?.is_match("simple"));
assert!(!glob_to_regex("simple", GlobMatchType::Word)?.is_match("simples"));
assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("Some @user:foo test"));
assert!(glob_to_regex("@user:foo", GlobMatchType::Word)?.is_match("@user:foo"));
Ok(())
}

View file

@ -88,10 +88,9 @@ def make_wrapper(factory: Callable[P, R]) -> Callable[P, R]:
@functools.wraps(factory)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
# type-ignore: should be redundant once we can use https://github.com/python/mypy/pull/12668
if "strict" not in kwargs: # type: ignore[attr-defined]
if "strict" not in kwargs:
raise MissingStrictInConstrainedTypeException(factory.__name__)
if not kwargs["strict"]: # type: ignore[index]
if not kwargs["strict"]:
raise MissingStrictInConstrainedTypeException(factory.__name__)
return factory(*args, **kwargs)

View file

@ -2,23 +2,16 @@
#
# This script generates SQL files for creating a brand new Synapse DB with the latest
# schema, on both SQLite3 and Postgres.
#
# It does so by having Synapse generate an up-to-date SQLite DB, then running
# synapse_port_db to convert it to Postgres. It then dumps the contents of both.
export PGHOST="localhost"
POSTGRES_DB_NAME="synapse_full_schema.$$"
SQLITE_SCHEMA_FILE="schema.sql.sqlite"
SQLITE_ROWS_FILE="rows.sql.sqlite"
POSTGRES_SCHEMA_FILE="full.sql.postgres"
POSTGRES_ROWS_FILE="rows.sql.postgres"
POSTGRES_MAIN_DB_NAME="synapse_full_schema_main.$$"
POSTGRES_COMMON_DB_NAME="synapse_full_schema_common.$$"
POSTGRES_STATE_DB_NAME="synapse_full_schema_state.$$"
REQUIRED_DEPS=("matrix-synapse" "psycopg2")
usage() {
echo
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n] [-h]"
echo "Usage: $0 -p <postgres_username> -o <path> [-c] [-n <schema number>] [-h]"
echo
echo "-p <postgres_username>"
echo " Username to connect to local postgres instance. The password will be requested"
@ -27,11 +20,19 @@ usage() {
echo " CI mode. Prints every command that the script runs."
echo "-o <path>"
echo " Directory to output full schema files to."
echo "-n <schema number>"
echo " Schema number for the new snapshot. Used to set the location of files within "
echo " the output directory, mimicking that of synapse/storage/schemas."
echo " Defaults to 9999."
echo "-h"
echo " Display this help text."
echo ""
echo " NB: make sure to run this against the *oldest* supported version of postgres,"
echo " or else pg_dump might output non-backwards-compatible syntax."
}
while getopts "p:co:h" opt; do
SCHEMA_NUMBER="9999"
while getopts "p:co:hn:" opt; do
case $opt in
p)
export PGUSER=$OPTARG
@ -48,6 +49,9 @@ while getopts "p:co:h" opt; do
usage
exit
;;
n)
SCHEMA_NUMBER="$OPTARG"
;;
\?)
echo "ERROR: Invalid option: -$OPTARG" >&2
usage
@ -95,12 +99,21 @@ cd "$(dirname "$0")/.."
TMPDIR=$(mktemp -d)
KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path
SQLITE_CONFIG=$TMPDIR/sqlite.conf
SQLITE_DB=$TMPDIR/homeserver.db
SQLITE_MAIN_DB=$TMPDIR/main.db
SQLITE_STATE_DB=$TMPDIR/state.db
SQLITE_COMMON_DB=$TMPDIR/common.db
POSTGRES_CONFIG=$TMPDIR/postgres.conf
# Ensure these files are delete on script exit
# TODO: the trap should also drop the temp postgres DB
trap 'rm -rf $TMPDIR' EXIT
cleanup() {
echo "Cleaning up temporary sqlite database and config files..."
rm -r "$TMPDIR"
echo "Cleaning up temporary Postgres database..."
dropdb --if-exists "$POSTGRES_COMMON_DB_NAME"
dropdb --if-exists "$POSTGRES_MAIN_DB_NAME"
dropdb --if-exists "$POSTGRES_STATE_DB_NAME"
}
trap 'cleanup' EXIT
cat > "$SQLITE_CONFIG" <<EOF
server_name: "test"
@ -110,10 +123,22 @@ macaroon_secret_key: "abcde"
report_stats: false
database:
name: "sqlite3"
args:
database: "$SQLITE_DB"
databases:
common:
name: "sqlite3"
data_stores: []
args:
database: "$SQLITE_COMMON_DB"
main:
name: "sqlite3"
data_stores: ["main"]
args:
database: "$SQLITE_MAIN_DB"
state:
name: "sqlite3"
data_stores: ["state"]
args:
database: "$SQLITE_STATE_DB"
# Suppress the key server warning.
trusted_key_servers: []
@ -127,13 +152,32 @@ macaroon_secret_key: "abcde"
report_stats: false
database:
name: "psycopg2"
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_DB_NAME"
databases:
common:
name: "psycopg2"
data_stores: []
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_COMMON_DB_NAME"
main:
name: "psycopg2"
data_stores: ["main"]
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_MAIN_DB_NAME"
state:
name: "psycopg2"
data_stores: ["state"]
args:
user: "$PGUSER"
host: "$PGHOST"
password: "$PGPASSWORD"
database: "$POSTGRES_STATE_DB_NAME"
# Suppress the key server warning.
trusted_key_servers: []
@ -148,33 +192,105 @@ echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates
# Create the PostgreSQL database.
echo "Creating postgres database..."
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME"
echo "Creating postgres databases..."
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_COMMON_DB_NAME"
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAME"
createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME"
echo "Running db background jobs..."
synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates
# Delete schema_version, applied_schema_deltas and applied_module_schemas tables
# Also delete any shadow tables from fts4
echo "Dropping unwanted db tables..."
SQL="
# Some common tables are created and updated by Synapse itself and do not belong in the
# schema.
DROP_APP_MANAGED_TABLES="
DROP TABLE schema_version;
DROP TABLE schema_compat_version;
DROP TABLE applied_schema_deltas;
DROP TABLE applied_module_schemas;
"
sqlite3 "$SQLITE_DB" <<< "$SQL"
psql "$POSTGRES_DB_NAME" -w <<< "$SQL"
# Other common tables are not created by Synapse and do belong in the schema.
# TODO: we could derive DROP_COMMON_TABLES from the dump of the common-only DB. But
# since there's only one table there, I haven't bothered to do so.
DROP_COMMON_TABLES="$DROP_APP_MANAGED_TABLES
DROP TABLE background_updates;
"
echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_SCHEMA_FILE' and '$OUTPUT_DIR/$SQLITE_ROWS_FILE'..."
sqlite3 "$SQLITE_DB" ".schema --indent" > "$OUTPUT_DIR/$SQLITE_SCHEMA_FILE"
sqlite3 "$SQLITE_DB" ".dump --data-only --nosys" > "$OUTPUT_DIR/$SQLITE_ROWS_FILE"
sqlite3 "$SQLITE_COMMON_DB" <<< "$DROP_APP_MANAGED_TABLES"
sqlite3 "$SQLITE_MAIN_DB" <<< "$DROP_COMMON_TABLES"
sqlite3 "$SQLITE_STATE_DB" <<< "$DROP_COMMON_TABLES"
psql "$POSTGRES_COMMON_DB_NAME" -w <<< "$DROP_APP_MANAGED_TABLES"
psql "$POSTGRES_MAIN_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
psql "$POSTGRES_STATE_DB_NAME" -w <<< "$DROP_COMMON_TABLES"
echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE' and '$OUTPUT_DIR/$POSTGRES_ROWS_FILE'..."
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_ROWS_FILE"
# For Reasons(TM), SQLite's `.schema` also dumps out "shadow tables", the implementation
# details behind full text search tables. Omit these from the dumps.
echo "Cleaning up temporary Postgres database..."
dropdb $POSTGRES_DB_NAME
sqlite3 "$SQLITE_MAIN_DB" <<< "
DROP TABLE event_search_content;
DROP TABLE event_search_segments;
DROP TABLE event_search_segdir;
DROP TABLE event_search_docsize;
DROP TABLE event_search_stat;
DROP TABLE user_directory_search_content;
DROP TABLE user_directory_search_segments;
DROP TABLE user_directory_search_segdir;
DROP TABLE user_directory_search_docsize;
DROP TABLE user_directory_search_stat;
"
echo "Dumping SQLite3 schema..."
mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schemas/$SCHEMA_NUMBER"
sqlite3 "$SQLITE_COMMON_DB" ".schema" > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_MAIN_DB" ".schema" > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_STATE_DB" ".schema" > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.sqlite"
cleanup_pg_schema() {
# Cleanup as follows:
# - Remove empty lines. pg_dump likes to output a lot of these.
# - Remove comment-only lines. pg_dump also likes to output a lot of these to visually
# separate tables etc.
# - Remove "public." prefix --- the schema name.
# - Remove "SET" commands. Last time I ran this, the output commands were
# SET statement_timeout = 0;
# SET lock_timeout = 0;
# SET idle_in_transaction_session_timeout = 0;
# SET client_encoding = 'UTF8';
# SET standard_conforming_strings = on;
# SET check_function_bodies = false;
# SET xmloption = content;
# SET client_min_messages = warning;
# SET row_security = off;
# SET default_table_access_method = heap;
# - Very carefully remove specific SELECT statements. We CANNOT blanket remove all
# SELECT statements because some of those have side-effects which we do want in the
# schema. Last time I ran this, the only SELECTS were
# SELECT pg_catalog.set_config('search_path', '', false);
# and
# SELECT pg_catalog.setval(text, bigint, bool);
# We do want to remove the former, but the latter is important. If the last argument
# is `true` or omitted, this marks the given integer as having been consumed and
# will NOT appear as the nextval.
sed -e '/^$/d' \
-e '/^--/d' \
-e 's/public\.//g' \
-e '/^SET /d' \
-e '/^SELECT pg_catalog.set_config/d'
}
echo "Dumping Postgres schema..."
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres"
echo "Done! Files dumped to: $OUTPUT_DIR"

View file

@ -29,7 +29,7 @@ class SynapsePlugin(Plugin):
self, fullname: str
) -> Optional[Callable[[MethodSigContext], CallableType]]:
if fullname.startswith(
"synapse.util.caches.descriptors._CachedFunction.__call__"
"synapse.util.caches.descriptors.CachedFunction.__call__"
) or fullname.startswith(
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
):
@ -38,7 +38,7 @@ class SynapsePlugin(Plugin):
def cached_function_method_signature(ctx: MethodSigContext) -> CallableType:
"""Fixes the `_CachedFunction.__call__` signature to be correct.
"""Fixes the `CachedFunction.__call__` signature to be correct.
It already has *almost* the correct signature, except:

View file

@ -0,0 +1,54 @@
from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union
from synapse.types import JsonDict
class PushRule:
@property
def rule_id(self) -> str: ...
@property
def priority_class(self) -> int: ...
@property
def conditions(self) -> Sequence[Mapping[str, str]]: ...
@property
def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ...
@property
def default(self) -> bool: ...
@property
def default_enabled(self) -> bool: ...
@staticmethod
def from_db(
rule_id: str, priority_class: int, conditions: str, actions: str
) -> "PushRule": ...
class PushRules:
def __init__(self, rules: Collection[PushRule]): ...
def rules(self) -> Collection[PushRule]: ...
class FilteredPushRules:
def __init__(
self,
push_rules: PushRules,
enabled_map: Dict[str, bool],
msc3786_enabled: bool,
msc3772_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
def get_base_rule_ids() -> Collection[str]: ...
class PushRuleEvaluator:
def __init__(
self,
flattened_keys: Mapping[str, str],
room_member_count: int,
sender_power_level: Optional[int],
notification_power_levels: Mapping[str, int],
relations: Mapping[str, Set[Tuple[str, str]]],
relation_match_enabled: bool,
): ...
def run(
self,
push_rules: FilteredPushRules,
user_id: Optional[str],
display_name: Optional[str],
) -> Collection[dict]: ...

View file

@ -107,10 +107,11 @@ BOOLEAN_COLUMNS = {
"redactions": ["have_censored"],
"room_stats_state": ["is_federatable"],
"local_media_repository": ["safe_from_quarantine"],
"users": ["shadow_banned"],
"users": ["shadow_banned", "approved"],
"e2e_fallback_keys_json": ["used"],
"access_tokens": ["used"],
"device_lists_changes_in_room": ["converted_to_destinations"],
"pushers": ["enabled"],
}

View file

@ -48,10 +48,13 @@ class MockHomeserver(HomeServer):
def run_background_updates(hs: HomeServer) -> None:
store = hs.get_datastores().main
main = hs.get_datastores().main
state = hs.get_datastores().state
async def run_background_updates() -> None:
await store.db_pool.updates.run_background_updates(sleep=False)
await main.db_pool.updates.run_background_updates(sleep=False)
if state:
await state.db_pool.updates.run_background_updates(sleep=False)
# Stop the reactor to exit the script once every background update is run.
reactor.stop()
@ -97,8 +100,11 @@ def main() -> None:
# Load, process and sanity-check the config.
hs_config = yaml.safe_load(args.database_config)
if "database" not in hs_config:
sys.stderr.write("The configuration file must have a 'database' section.\n")
if "database" not in hs_config and "databases" not in hs_config:
sys.stderr.write(
"The configuration file must have a 'database' or 'databases' section. "
"See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database"
)
sys.exit(4)
config = HomeServerConfig()

View file

@ -269,3 +269,14 @@ class PublicRoomsFilterFields:
GENERIC_SEARCH_TERM: Final = "generic_search_term"
ROOM_TYPES: Final = "room_types"
class ApprovalNoticeMedium:
"""Identifier for the medium this server will use to serve notice of approval for a
specific user's registration.
As defined in https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/m_not_approved/proposals/3866-user-not-approved-error.md
"""
NONE = "org.matrix.msc3866.none"
EMAIL = "org.matrix.msc3866.email"

View file

@ -100,6 +100,14 @@ class Codes(str, Enum):
UNREDACTED_CONTENT_DELETED = "FI.MAU.MSC2815_UNREDACTED_CONTENT_DELETED"
# Returned for federation requests where we can't process a request as we
# can't ensure the sending server is in a room which is partial-stated on
# our side.
# Part of MSC3895.
UNABLE_DUE_TO_PARTIAL_STATE = "ORG.MATRIX.MSC3895_UNABLE_DUE_TO_PARTIAL_STATE"
USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL"
class CodeMessageException(RuntimeError):
"""An exception with integer code and message string attributes.
@ -560,6 +568,20 @@ class UnredactedContentDeletedError(SynapseError):
return cs_error(self.msg, self.errcode, **extra)
class NotApprovedError(SynapseError):
def __init__(
self,
msg: str,
approval_notice_medium: str,
):
super().__init__(
code=403,
msg=msg,
errcode=Codes.USER_AWAITING_APPROVAL,
additional_fields={"approval_notice_medium": approval_notice_medium},
)
def cs_error(msg: str, code: str = Codes.UNKNOWN, **kwargs: Any) -> "JsonDict":
"""Utility method for constructing an error response for client-server
interactions.

View file

@ -98,9 +98,7 @@ def register_sighup(func: Callable[P, None], *args: P.args, **kwargs: P.kwargs)
func: Function to be called when sent a SIGHUP signal.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
# This type-ignore should be redundant once we use a mypy release with
# https://github.com/python/mypy/pull/12668.
_sighup_callbacks.append((func, args, kwargs)) # type: ignore[arg-type]
_sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(

View file

@ -53,9 +53,9 @@ logger = logging.getLogger("synapse.app.admin_cmd")
class AdminCmdSlavedStore(
SlavedFilteringStore,
SlavedDeviceStore,
SlavedPushRuleStore,
SlavedEventStore,
SlavedDeviceStore,
TagsWorkerStore,
DeviceInboxWorkerStore,
AccountDataWorkerStore,

View file

@ -51,11 +51,18 @@ import argparse
import importlib
import itertools
import multiprocessing
import os
import signal
import sys
from typing import Any, Callable, List
from types import FrameType
from typing import Any, Callable, List, Optional
from twisted.internet.main import installReactor
# a list of the original signal handlers, before we installed our custom ones.
# We restore these in our child processes.
_original_signal_handlers: dict[int, Any] = {}
class ProxiedReactor:
"""
@ -105,6 +112,11 @@ def _worker_entrypoint(
sys.argv = args
# reset the custom signal handlers that we installed, so that the children start
# from a clean slate.
for sig, handler in _original_signal_handlers.items():
signal.signal(sig, handler)
from twisted.internet.epollreactor import EPollReactor
proxy_reactor._install_real_reactor(EPollReactor())
@ -167,13 +179,29 @@ def main() -> None:
update_proc.join()
print("===== PREPARED DATABASE =====", file=sys.stderr)
processes: List[multiprocessing.Process] = []
# Install signal handlers to propagate signals to all our children, so that they
# shut down cleanly. This also inhibits our own exit, but that's good: we want to
# wait until the children have exited.
def handle_signal(signum: int, frame: Optional[FrameType]) -> None:
print(
f"complement_fork_starter: Caught signal {signum}. Stopping children.",
file=sys.stderr,
)
for p in processes:
if p.pid:
os.kill(p.pid, signum)
for sig in (signal.SIGINT, signal.SIGTERM):
_original_signal_handlers[sig] = signal.signal(sig, handle_signal)
# At this point, we've imported all the main entrypoints for all the workers.
# Now we basically just fork() out to create the workers we need.
# Because we're using fork(), all the workers get a clone of this launcher's
# memory space and don't need to repeat the work of loading the code!
# Instead of using fork() directly, we use the multiprocessing library,
# which uses fork() on Unix platforms.
processes = []
for (func, worker_args) in zip(worker_functions, args_by_worker):
process = multiprocessing.Process(
target=_worker_entrypoint, args=(func, proxy_reactor, worker_args)

View file

@ -14,10 +14,25 @@
from typing import Any
import attr
from synapse.config._base import Config
from synapse.types import JsonDict
@attr.s(auto_attribs=True, frozen=True, slots=True)
class MSC3866Config:
"""Configuration for MSC3866 (mandating approval for new users)"""
# Whether the base support for the approval process is enabled. This includes the
# ability for administrators to check and update the approval of users, even if no
# approval is currently required.
enabled: bool = False
# Whether to require that new users are approved by an admin before their account
# can be used. Note that this setting is ignored if 'enabled' is false.
require_approval_for_new_accounts: bool = False
class ExperimentalConfig(Config):
"""Config section for enabling experimental features"""
@ -63,7 +78,8 @@ class ExperimentalConfig(Config):
# MSC3706 (server-side support for partial state in /send_join responses)
self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False)
# experimental support for faster joins over federation (msc2775, msc3706)
# experimental support for faster joins over federation
# (MSC2775, MSC3706, MSC3895)
# requires a target server with msc3706_enabled enabled.
self.faster_joins_enabled: bool = experimental.get("faster_joins", False)
@ -82,6 +98,8 @@ class ExperimentalConfig(Config):
# MSC3786 (Add a default push rule to ignore m.room.server_acl events)
self.msc3786_enabled: bool = experimental.get("msc3786_enabled", False)
# MSC3771: Thread read receipts
self.msc3771_enabled: bool = experimental.get("msc3771_enabled", False)
# MSC3772: A push rule for mutual relations.
self.msc3772_enabled: bool = experimental.get("msc3772_enabled", False)
@ -93,3 +111,17 @@ class ExperimentalConfig(Config):
# MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices.
self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False)
# MSC3866: M_USER_AWAITING_APPROVAL error code
raw_msc3866_config = experimental.get("msc3866", {})
self.msc3866 = MSC3866Config(**raw_msc3866_config)
# MSC3881: Remotely toggle push notifications for another client
self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False)
# MSC3882: Allow an existing session to sign in a new session
self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False)
self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True)
self.msc3882_token_timeout = self.parse_duration(
experimental.get("msc3882_token_timeout", "5m")
)

View file

@ -43,32 +43,6 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
"""
### `enable_legacy_metrics` (experimental)
**Experimental: this option may be removed or have its behaviour
changed at any time, with no notice.**
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
They are included for backwards compatibility.
Example configuration:
```yaml
enable_legacy_metrics: false
```
See https://github.com/matrix-org/synapse/issues/11106 for context.
*Since v1.67.0.*
"""
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
self.report_stats = config.get("report_stats", None)

View file

@ -289,6 +289,10 @@ class _EventInternalMetadata:
"""
return self._dict.get("historical", False)
def is_notifiable(self) -> bool:
"""Whether this event can trigger a push notification"""
return not self.is_outlier() or self.is_out_of_band_membership()
class EventBase(metaclass=abc.ABCMeta):
@property

View file

@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Awaitable, Callable, Optional
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import Codes, SynapseError
@ -58,7 +58,12 @@ class FederationBase:
@trace
async def _check_sigs_and_hash(
self, room_version: RoomVersion, pdu: EventBase
self,
room_version: RoomVersion,
pdu: EventBase,
record_failure_callback: Optional[
Callable[[EventBase, str], Awaitable[None]]
] = None,
) -> EventBase:
"""Checks that event is correctly signed by the sending server.
@ -70,6 +75,11 @@ class FederationBase:
Args:
room_version: The room version of the PDU
pdu: the event to be checked
record_failure_callback: A callback to run whenever the given event
fails signature or hash checks. This includes exceptions
that would be normally be thrown/raised but also things like
checking for event tampering where we just return the redacted
event.
Returns:
* the original event if the checks pass
@ -80,7 +90,12 @@ class FederationBase:
InvalidEventSignatureError if the signature check failed. Nothing
will be logged in this case.
"""
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
try:
await _check_sigs_on_pdu(self.keyring, room_version, pdu)
except InvalidEventSignatureError as exc:
if record_failure_callback:
await record_failure_callback(pdu, str(exc))
raise exc
if not check_event_content_hash(pdu):
# let's try to distinguish between failures because the event was
@ -116,6 +131,10 @@ class FederationBase:
"event_id": pdu.event_id,
}
)
if record_failure_callback:
await record_failure_callback(
pdu, "Event content has been tampered with"
)
return redacted_event
spam_check = await self.spam_checker.check_event_for_spam(pdu)

View file

@ -278,7 +278,7 @@ class FederationClient(FederationBase):
pdus = [event_from_pdu_json(p, room_version) for p in transaction_data_pdus]
# Check signatures and hash of pdus, removing any from the list that fail checks
pdus[:] = await self._check_sigs_and_hash_and_fetch(
pdus[:] = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
dest, pdus, room_version=room_version
)
@ -328,7 +328,17 @@ class FederationClient(FederationBase):
# Check signatures are correct.
try:
signed_pdu = await self._check_sigs_and_hash(room_version, pdu)
async def _record_failure_callback(
event: EventBase, cause: str
) -> None:
await self.store.record_event_failed_pull_attempt(
event.room_id, event.event_id, cause
)
signed_pdu = await self._check_sigs_and_hash(
room_version, pdu, _record_failure_callback
)
except InvalidEventSignatureError as e:
errmsg = f"event id {pdu.event_id}: {e}"
logger.warning("%s", errmsg)
@ -547,24 +557,28 @@ class FederationClient(FederationBase):
len(auth_event_map),
)
valid_auth_events = await self._check_sigs_and_hash_and_fetch(
valid_auth_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_event_map.values(), room_version
)
valid_state_events = await self._check_sigs_and_hash_and_fetch(
destination, state_event_map.values(), room_version
valid_state_events = (
await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, state_event_map.values(), room_version
)
)
return valid_state_events, valid_auth_events
@trace
async def _check_sigs_and_hash_and_fetch(
async def _check_sigs_and_hash_for_pulled_events_and_fetch(
self,
origin: str,
pdus: Collection[EventBase],
room_version: RoomVersion,
) -> List[EventBase]:
"""Checks the signatures and hashes of a list of events.
"""
Checks the signatures and hashes of a list of pulled events we got from
federation and records any signature failures as failed pull attempts.
If a PDU fails its signature check then we check if we have it in
the database, and if not then request it from the sender's server (if that
@ -597,11 +611,17 @@ class FederationClient(FederationBase):
valid_pdus: List[EventBase] = []
async def _record_failure_callback(event: EventBase, cause: str) -> None:
await self.store.record_event_failed_pull_attempt(
event.room_id, event.event_id, cause
)
async def _execute(pdu: EventBase) -> None:
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
pdu=pdu,
origin=origin,
room_version=room_version,
record_failure_callback=_record_failure_callback,
)
if valid_pdu:
@ -618,6 +638,9 @@ class FederationClient(FederationBase):
pdu: EventBase,
origin: str,
room_version: RoomVersion,
record_failure_callback: Optional[
Callable[[EventBase, str], Awaitable[None]]
] = None,
) -> Optional[EventBase]:
"""Takes a PDU and checks its signatures and hashes.
@ -634,6 +657,11 @@ class FederationClient(FederationBase):
origin
pdu
room_version
record_failure_callback: A callback to run whenever the given event
fails signature or hash checks. This includes exceptions
that would be normally be thrown/raised but also things like
checking for event tampering where we just return the redacted
event.
Returns:
The PDU (possibly redacted) if it has valid signatures and hashes.
@ -641,7 +669,9 @@ class FederationClient(FederationBase):
"""
try:
return await self._check_sigs_and_hash(room_version, pdu)
return await self._check_sigs_and_hash(
room_version, pdu, record_failure_callback
)
except InvalidEventSignatureError as e:
logger.warning(
"Signature on retrieved event %s was invalid (%s). "
@ -694,7 +724,7 @@ class FederationClient(FederationBase):
auth_chain = [event_from_pdu_json(p, room_version) for p in res["auth_chain"]]
signed_auth = await self._check_sigs_and_hash_and_fetch(
signed_auth = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, auth_chain, room_version=room_version
)
@ -1401,7 +1431,7 @@ class FederationClient(FederationBase):
event_from_pdu_json(e, room_version) for e in content.get("events", [])
]
signed_events = await self._check_sigs_and_hash_and_fetch(
signed_events = await self._check_sigs_and_hash_for_pulled_events_and_fetch(
destination, events, room_version=room_version
)
except HttpResponseException as e:

View file

@ -530,13 +530,10 @@ class FederationServer(FederationBase):
async def on_room_state_request(
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, JsonDict]:
await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
# we grab the linearizer to protect ourselves from servers which hammer
# us. In theory we might already have the response to this query
# in the cache so we could return it without waiting for the linearizer
@ -560,13 +557,10 @@ class FederationServer(FederationBase):
if not event_id:
raise NotImplementedError("Specify an event")
await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
resp = await self._state_ids_resp_cache.wrap(
(room_id, event_id),
self._on_state_ids_request_compute,
@ -955,6 +949,7 @@ class FederationServer(FederationBase):
self, origin: str, room_id: str, event_id: str
) -> Tuple[int, Dict[str, Any]]:
async with self._server_linearizer.queue((origin, room_id)):
await self._event_auth_handler.assert_host_in_room(room_id, origin)
origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id)

View file

@ -646,10 +646,25 @@ class _TransactionQueueManager:
# We start by fetching device related EDUs, i.e device updates and to
# device messages. We have to keep 2 free slots for presence and rr_edus.
limit = MAX_EDUS_PER_TRANSACTION - 2
device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
# We prioritize to-device messages so that existing encryption channels
# work. We also keep a few slots spare (by reducing the limit) so that
# we can still trickle out some device list updates.
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
device_edu_limit -= len(to_device_edus)
device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
limit
device_edu_limit
)
if device_update_edus:
@ -657,18 +672,6 @@ class _TransactionQueueManager:
else:
self.queue._last_device_list_stream_id = dev_list_id
limit -= len(device_update_edus)
(
to_device_edus,
device_stream_id,
) = await self.queue._get_to_device_message_edus(limit)
if to_device_edus:
self._device_stream_id = device_stream_id
else:
self.queue._last_device_stream_id = device_stream_id
pending_edus = device_update_edus + to_device_edus
# Now add the read receipt EDU.

View file

@ -32,6 +32,7 @@ class AdminHandler:
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self._state_storage_controller = self._storage_controllers.state
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
async def get_whois(self, user: UserID) -> JsonDict:
connections = []
@ -75,6 +76,10 @@ class AdminHandler:
"is_guest",
}
if self._msc3866_enabled:
# Only include the approved flag if support for MSC3866 is enabled.
user_info_to_return.add("approved")
# Restrict returned keys to a known set.
user_info_dict = {
key: value

View file

@ -63,7 +63,6 @@ from synapse.http.server import finish_request, respond_with_html
from synapse.http.site import SynapseRequest
from synapse.logging.context import defer_to_thread
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.roommember import ProfileInfo
from synapse.types import JsonDict, Requester, UserID
from synapse.util import stringutils as stringutils
from synapse.util.async_helpers import delay_cancellation, maybe_awaitable
@ -1010,6 +1009,17 @@ class AuthHandler:
return res[0]
return None
async def is_user_approved(self, user_id: str) -> bool:
"""Checks if a user is approved and therefore can be allowed to log in.
Args:
user_id: the user to check the approval status of.
Returns:
A boolean that is True if the user is approved, False otherwise.
"""
return await self.store.is_user_approved(user_id)
async def _find_user_id_and_pwd_hash(
self, user_id: str
) -> Optional[Tuple[str, str]]:
@ -1687,41 +1697,10 @@ class AuthHandler:
respond_with_html(request, 403, self._sso_account_deactivated_template)
return
profile = await self.store.get_profileinfo(
user_profile_data = await self.store.get_profileinfo(
UserID.from_string(registered_user_id).localpart
)
self._complete_sso_login(
registered_user_id,
auth_provider_id,
request,
client_redirect_url,
extra_attributes,
new_user=new_user,
user_profile_data=profile,
auth_provider_session_id=auth_provider_session_id,
)
def _complete_sso_login(
self,
registered_user_id: str,
auth_provider_id: str,
request: Request,
client_redirect_url: str,
extra_attributes: Optional[JsonDict] = None,
new_user: bool = False,
user_profile_data: Optional[ProfileInfo] = None,
auth_provider_session_id: Optional[str] = None,
) -> None:
"""
The synchronous portion of complete_sso_login.
This exists purely for backwards compatibility of synapse.module_api.ModuleApi.
"""
if user_profile_data is None:
user_profile_data = ProfileInfo(None, None)
# Store any extra attributes which will be passed in the login response.
# Note that this is per-user so it may overwrite a previous value, this
# is considered OK since the newest SSO attributes should be most valid.

View file

@ -130,6 +130,9 @@ class CasHandler:
except PartialDownloadError as pde:
# Twisted raises this error if the connection is closed,
# even if that's being used old-http style to signal end-of-data
# Assertion is for mypy's benefit. Error.response is Optional[bytes],
# but a PartialDownloadError should always have a non-None response.
assert pde.response is not None
body = pde.response
except HttpResponseException as e:
description = (

View file

@ -195,7 +195,9 @@ class DeviceWorkerHandler:
possibly_changed = set(changed)
possibly_left = set()
for room_id in rooms_changed:
current_state_ids = await self._state_storage.get_current_state_ids(room_id)
current_state_ids = await self._state_storage.get_current_state_ids(
room_id, await_full_state=False
)
# The user may have left the room
# TODO: Check if they actually did or if we were just invited.
@ -234,7 +236,8 @@ class DeviceWorkerHandler:
# mapping from event_id -> state_dict
prev_state_ids = await self._state_storage.get_state_ids_for_events(
event_ids
event_ids,
await_full_state=False,
)
# Check if we've joined the room? If so we just blindly add all the users to
@ -270,11 +273,9 @@ class DeviceWorkerHandler:
possibly_left = possibly_changed | possibly_left
# Double check if we still share rooms with the given user.
users_rooms = await self.store.get_rooms_for_users_with_stream_ordering(
possibly_left
)
users_rooms = await self.store.get_rooms_for_users(possibly_left)
for changed_user_id, entries in users_rooms.items():
if any(e.room_id in room_ids for e in entries):
if any(rid in room_ids for rid in entries):
possibly_left.discard(changed_user_id)
else:
possibly_joined.discard(changed_user_id)
@ -306,6 +307,17 @@ class DeviceWorkerHandler:
"self_signing_key": self_signing_key,
}
async def handle_room_un_partial_stated(self, room_id: str) -> None:
"""Handles sending appropriate device list updates in a room that has
gone from partial to full state.
"""
# TODO(faster_joins): worker mode support
# https://github.com/matrix-org/synapse/issues/12994
logger.error(
"Trying handling device list state for partial join: not supported on workers."
)
class DeviceHandler(DeviceWorkerHandler):
def __init__(self, hs: "HomeServer"):
@ -688,11 +700,15 @@ class DeviceHandler(DeviceWorkerHandler):
# Ignore any users that aren't ours
if self.hs.is_mine_id(user_id):
hosts = set(
await self._storage_controllers.state.get_current_hosts_in_room(
await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
)
hosts.discard(self.server_name)
# For rooms with partial state, `hosts` is merely an
# approximation. When we transition to a full state room, we
# will have to send out device list updates to any servers we
# missed.
# Check if we've already sent this update to some hosts
if current_stream_id == stream_id:
@ -739,6 +755,95 @@ class DeviceHandler(DeviceWorkerHandler):
finally:
self._handle_new_device_update_is_processing = False
async def handle_room_un_partial_stated(self, room_id: str) -> None:
"""Handles sending appropriate device list updates in a room that has
gone from partial to full state.
"""
# We defer to the device list updater to handle pending remote device
# list updates.
await self.device_list_updater.handle_room_un_partial_stated(room_id)
# Replay local updates.
(
join_event_id,
device_lists_stream_id,
) = await self.store.get_join_event_id_and_device_lists_stream_id_for_partial_state(
room_id
)
# Get the local device list changes that have happened in the room since
# we started joining. If there are no updates there's nothing left to do.
changes = await self.store.get_device_list_changes_in_room(
room_id, device_lists_stream_id
)
local_changes = {(u, d) for u, d in changes if self.hs.is_mine_id(u)}
if not local_changes:
return
# Note: We have persisted the full state at this point, we just haven't
# cleared the `partial_room` flag.
join_state_ids = await self._state_storage.get_state_ids_for_event(
join_event_id, await_full_state=False
)
current_state_ids = await self.store.get_partial_current_state_ids(room_id)
# Now we need to work out all servers that might have been in the room
# at any point during our join.
# First we look for any membership states that have changed between the
# initial join and now...
all_keys = set(join_state_ids)
all_keys.update(current_state_ids)
potentially_changed_hosts = set()
for etype, state_key in all_keys:
if etype != EventTypes.Member:
continue
prev = join_state_ids.get((etype, state_key))
current = current_state_ids.get((etype, state_key))
if prev != current:
potentially_changed_hosts.add(get_domain_from_id(state_key))
# ... then we add all the hosts that are currently joined to the room...
current_hosts_in_room = await self.store.get_current_hosts_in_room(room_id)
potentially_changed_hosts.update(current_hosts_in_room)
# ... and finally we remove any hosts that we were told about, as we
# will have sent device list updates to those hosts when they happened.
known_hosts_at_join = await self.store.get_partial_state_servers_at_join(
room_id
)
potentially_changed_hosts.difference_update(known_hosts_at_join)
potentially_changed_hosts.discard(self.server_name)
if not potentially_changed_hosts:
# Nothing to do.
return
logger.info(
"Found %d changed hosts to send device list updates to",
len(potentially_changed_hosts),
)
for user_id, device_id in local_changes:
await self.store.add_device_list_outbound_pokes(
user_id=user_id,
device_id=device_id,
room_id=room_id,
stream_id=None,
hosts=potentially_changed_hosts,
context=None,
)
# Notify things that device lists need to be sent out.
self.notifier.notify_replication()
for host in potentially_changed_hosts:
self.federation_sender.send_device_messages(host, immediate=False)
def _update_device_from_client_ips(
device: JsonDict, client_ips: Mapping[Tuple[str, str], Mapping[str, Any]]
@ -829,6 +934,16 @@ class DeviceListUpdater:
)
return
# Check if we are partially joining any rooms. If so we need to store
# all device list updates so that we can handle them correctly once we
# know who is in the room.
partial_rooms = await self.store.get_partial_state_rooms_and_servers()
if partial_rooms:
await self.store.add_remote_device_list_to_pending(
user_id,
device_id,
)
room_ids = await self.store.get_rooms_for_user(user_id)
if not room_ids:
# We don't share any rooms with this user. Ignore update, as we
@ -1168,3 +1283,35 @@ class DeviceListUpdater:
device_ids.append(verify_key.version)
return device_ids
async def handle_room_un_partial_stated(self, room_id: str) -> None:
"""Handles sending appropriate device list updates in a room that has
gone from partial to full state.
"""
pending_updates = (
await self.store.get_pending_remote_device_list_updates_for_room(room_id)
)
for user_id, device_id in pending_updates:
logger.info(
"Got pending device list update in room %s: %s / %s",
room_id,
user_id,
device_id,
)
position = await self.store.add_device_change_to_streams(
user_id,
[device_id],
room_ids=[room_id],
)
if not position:
# This should only happen if there are no updates, which
# shouldn't happen when we've passed in a non-empty set of
# device IDs.
continue
self.device_handler.notifier.on_new_event(
StreamKeyType.DEVICE_LIST, position, rooms=[room_id]
)

View file

@ -31,7 +31,6 @@ from synapse.events import EventBase
from synapse.events.builder import EventBuilder
from synapse.events.snapshot import EventContext
from synapse.types import StateMap, get_domain_from_id
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -156,9 +155,33 @@ class EventAuthHandler:
Codes.UNABLE_TO_GRANT_JOIN,
)
async def check_host_in_room(self, room_id: str, host: str) -> bool:
with Measure(self._clock, "check_host_in_room"):
return await self._store.is_host_joined(room_id, host)
async def is_host_in_room(self, room_id: str, host: str) -> bool:
return await self._store.is_host_joined(room_id, host)
async def assert_host_in_room(
self, room_id: str, host: str, allow_partial_state_rooms: bool = False
) -> None:
"""
Asserts that the host is in the room, or raises an AuthError.
If the room is partial-stated, we raise an AuthError with the
UNABLE_DUE_TO_PARTIAL_STATE error code, unless `allow_partial_state_rooms` is true.
If allow_partial_state_rooms is True and the room is partial-stated,
this function may return an incorrect result as we are not able to fully
track server membership in a room without full state.
"""
if not allow_partial_state_rooms and await self._store.is_partial_state_room(
room_id
):
raise AuthError(
403,
"Unable to authorise you right now; room is partial-stated here.",
errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
)
if not await self.is_host_in_room(room_id, host):
raise AuthError(403, "Host not in room.")
async def check_restricted_join_rules(
self,

View file

@ -38,7 +38,7 @@ from signedjson.sign import verify_signed_json
from unpaddedbase64 import decode_base64
from synapse import event_auth
from synapse.api.constants import EventContentFields, EventTypes, Membership
from synapse.api.constants import MAX_DEPTH, EventContentFields, EventTypes, Membership
from synapse.api.errors import (
AuthError,
CodeMessageException,
@ -149,6 +149,8 @@ class FederationHandler:
self.http_client = hs.get_proxied_blacklisted_http_client()
self._replication = hs.get_replication_data_handler()
self._federation_event_handler = hs.get_federation_event_handler()
self._device_handler = hs.get_device_handler()
self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator()
self._clean_room_for_join_client = ReplicationCleanRoomRestServlet.make_client(
hs
@ -209,7 +211,7 @@ class FederationHandler:
current_depth: int,
limit: int,
*,
processing_start_time: int,
processing_start_time: Optional[int],
) -> bool:
"""
Checks whether the `current_depth` is at or approaching any backfill
@ -221,13 +223,22 @@ class FederationHandler:
room_id: The room to backfill in.
current_depth: The depth to check at for any upcoming backfill points.
limit: The max number of events to request from the remote federated server.
processing_start_time: The time when `maybe_backfill` started
processing. Only used for timing.
processing_start_time: The time when `maybe_backfill` started processing.
Only used for timing. If `None`, no timing observation will be made.
"""
backwards_extremities = [
_BackfillPoint(event_id, depth, _BackfillPointType.BACKWARDS_EXTREMITY)
for event_id, depth in await self.store.get_oldest_event_ids_with_depth_in_room(
room_id
for event_id, depth in await self.store.get_backfill_points_in_room(
room_id=room_id,
current_depth=current_depth,
# We only need to end up with 5 extremities combined with the
# insertion event extremities to make the `/backfill` request
# but fetch an order of magnitude more to make sure there is
# enough even after we filter them by whether visible in the
# history. This isn't fool-proof as all backfill points within
# our limit could be filtered out but seems like a good amount
# to try with at least.
limit=50,
)
]
@ -236,7 +247,12 @@ class FederationHandler:
insertion_events_to_be_backfilled = [
_BackfillPoint(event_id, depth, _BackfillPointType.INSERTION_PONT)
for event_id, depth in await self.store.get_insertion_event_backward_extremities_in_room(
room_id
room_id=room_id,
current_depth=current_depth,
# We only need to end up with 5 extremities combined with
# the backfill points to make the `/backfill` request ...
# (see the other comment above for more context).
limit=50,
)
]
logger.debug(
@ -245,10 +261,6 @@ class FederationHandler:
insertion_events_to_be_backfilled,
)
if not backwards_extremities and not insertion_events_to_be_backfilled:
logger.debug("Not backfilling as no extremeties found.")
return False
# we now have a list of potential places to backpaginate from. We prefer to
# start with the most recent (ie, max depth), so let's sort the list.
sorted_backfill_points: List[_BackfillPoint] = sorted(
@ -269,6 +281,33 @@ class FederationHandler:
sorted_backfill_points,
)
# If we have no backfill points lower than the `current_depth` then
# either we can a) bail or b) still attempt to backfill. We opt to try
# backfilling anyway just in case we do get relevant events.
if not sorted_backfill_points and current_depth != MAX_DEPTH:
logger.debug(
"_maybe_backfill_inner: all backfill points are *after* current depth. Trying again with later backfill points."
)
return await self._maybe_backfill_inner(
room_id=room_id,
# We use `MAX_DEPTH` so that we find all backfill points next
# time (all events are below the `MAX_DEPTH`)
current_depth=MAX_DEPTH,
limit=limit,
# We don't want to start another timing observation from this
# nested recursive call. The top-most call can record the time
# overall otherwise the smaller one will throw off the results.
processing_start_time=None,
)
# Even after recursing with `MAX_DEPTH`, we didn't find any
# backward extremities to backfill from.
if not sorted_backfill_points:
logger.debug(
"_maybe_backfill_inner: Not backfilling as no backward extremeties found."
)
return False
# If we're approaching an extremity we trigger a backfill, otherwise we
# no-op.
#
@ -278,47 +317,16 @@ class FederationHandler:
# chose more than one times the limit in case of failure, but choosing a
# much larger factor will result in triggering a backfill request much
# earlier than necessary.
#
# XXX: shouldn't we do this *after* the filter by depth below? Again, we don't
# care about events that have happened after our current position.
#
max_depth = sorted_backfill_points[0].depth
if current_depth - 2 * limit > max_depth:
max_depth_of_backfill_points = sorted_backfill_points[0].depth
if current_depth - 2 * limit > max_depth_of_backfill_points:
logger.debug(
"Not backfilling as we don't need to. %d < %d - 2 * %d",
max_depth,
max_depth_of_backfill_points,
current_depth,
limit,
)
return False
# We ignore extremities that have a greater depth than our current depth
# as:
# 1. we don't really care about getting events that have happened
# after our current position; and
# 2. we have likely previously tried and failed to backfill from that
# extremity, so to avoid getting "stuck" requesting the same
# backfill repeatedly we drop those extremities.
#
# However, we need to check that the filtered extremities are non-empty.
# If they are empty then either we can a) bail or b) still attempt to
# backfill. We opt to try backfilling anyway just in case we do get
# relevant events.
#
filtered_sorted_backfill_points = [
t for t in sorted_backfill_points if t.depth <= current_depth
]
if filtered_sorted_backfill_points:
logger.debug(
"_maybe_backfill_inner: backfill points before current depth: %s",
filtered_sorted_backfill_points,
)
sorted_backfill_points = filtered_sorted_backfill_points
else:
logger.debug(
"_maybe_backfill_inner: all backfill points are *after* current depth. Backfilling anyway."
)
# For performance's sake, we only want to paginate from a particular extremity
# if we can actually see the events we'll get. Otherwise, we'd just spend a lot
# of resources to get redacted events. We check each extremity in turn and
@ -404,11 +412,22 @@ class FederationHandler:
# First we try hosts that are already in the room.
# TODO: HEURISTIC ALERT.
likely_domains = (
await self._storage_controllers.state.get_current_hosts_in_room(room_id)
await self._storage_controllers.state.get_current_hosts_in_room_ordered(
room_id
)
)
async def try_backfill(domains: Collection[str]) -> bool:
# TODO: Should we try multiple of these at a time?
# Number of contacted remote homeservers that have denied our backfill
# request with a 4xx code.
denied_count = 0
# Maximum number of contacted remote homeservers that can deny our
# backfill request with 4xx codes before we give up.
max_denied_count = 5
for dom in domains:
# We don't want to ask our own server for information we don't have
if dom == self.server_name:
@ -427,13 +446,33 @@ class FederationHandler:
continue
except HttpResponseException as e:
if 400 <= e.code < 500:
raise e.to_synapse_error()
logger.warning(
"Backfill denied from %s because %s [%d/%d]",
dom,
e,
denied_count,
max_denied_count,
)
denied_count += 1
if denied_count >= max_denied_count:
return False
continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
except CodeMessageException as e:
if 400 <= e.code < 500:
raise
logger.warning(
"Backfill denied from %s because %s [%d/%d]",
dom,
e,
denied_count,
max_denied_count,
)
denied_count += 1
if denied_count >= max_denied_count:
return False
continue
logger.info("Failed to backfill from %s because %s", dom, e)
continue
@ -452,10 +491,15 @@ class FederationHandler:
return False
processing_end_time = self.clock.time_msec()
backfill_processing_before_timer.observe(
(processing_end_time - processing_start_time) / 1000
)
# If we have the `processing_start_time`, then we can make an
# observation. We wouldn't have the `processing_start_time` in the case
# where `_maybe_backfill_inner` is recursively called to find any
# backfill points regardless of `current_depth`.
if processing_start_time is not None:
processing_end_time = self.clock.time_msec()
backfill_processing_before_timer.observe(
(processing_end_time - processing_start_time) / 1000
)
success = await try_backfill(likely_domains)
if success:
@ -583,7 +627,11 @@ class FederationHandler:
# Mark the room as having partial state.
# The background process is responsible for unmarking this flag,
# even if the join fails.
await self.store.store_partial_state_room(room_id, ret.servers_in_room)
await self.store.store_partial_state_room(
room_id=room_id,
servers=ret.servers_in_room,
device_lists_stream_id=self.store.get_device_stream_token(),
)
try:
max_stream_id = (
@ -608,6 +656,14 @@ class FederationHandler:
room_id,
)
raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0)
else:
# Record the join event id for future use (when we finish the full
# join). We have to do this after persisting the event to keep foreign
# key constraints intact.
if ret.partial_state:
await self.store.write_partial_state_rooms_join_event_id(
room_id, event.event_id
)
finally:
# Always kick off the background process that asynchronously fetches
# state for the room.
@ -804,7 +860,7 @@ class FederationHandler:
)
# now check that we are *still* in the room
is_in_room = await self._event_auth_handler.check_host_in_room(
is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@ -946,9 +1002,15 @@ class FederationHandler:
)
context = EventContext.for_outlier(self._storage_controllers)
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
await self._bulk_push_rule_evaluator.action_for_event_by_user(event, context)
try:
await self._federation_event_handler.persist_events_and_notify(
event.room_id, [(event, context)]
)
except Exception:
await self.store.remove_push_actions_from_staging(event.event_id)
raise
return event
@ -1150,9 +1212,7 @@ class FederationHandler:
async def on_backfill_request(
self, origin: str, room_id: str, pdu_list: List[str], limit: int
) -> List[EventBase]:
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
await self._event_auth_handler.assert_host_in_room(room_id, origin)
# Synapse asks for 100 events per backfill request. Do not allow more.
limit = min(limit, 100)
@ -1198,21 +1258,17 @@ class FederationHandler:
event_id, allow_none=True, allow_rejected=True
)
if event:
in_room = await self._event_auth_handler.check_host_in_room(
event.room_id, origin
)
if not in_room:
raise AuthError(403, "Host not in room.")
events = await filter_events_for_server(
self._storage_controllers, origin, [event]
)
event = events[0]
return event
else:
if not event:
return None
await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
events = await filter_events_for_server(
self._storage_controllers, origin, [event]
)
event = events[0]
return event
async def on_get_missing_events(
self,
origin: str,
@ -1221,9 +1277,7 @@ class FederationHandler:
latest_events: List[str],
limit: int,
) -> List[EventBase]:
in_room = await self._event_auth_handler.check_host_in_room(room_id, origin)
if not in_room:
raise AuthError(403, "Host not in room.")
await self._event_auth_handler.assert_host_in_room(room_id, origin)
# Only allow up to 20 events to be retrieved per request.
limit = min(limit, 20)
@ -1257,7 +1311,7 @@ class FederationHandler:
"state_key": target_user_id,
}
if await self._event_auth_handler.check_host_in_room(room_id, self.hs.hostname):
if await self._event_auth_handler.is_host_in_room(room_id, self.hs.hostname):
room_version_obj = await self.store.get_room_version(room_id)
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
@ -1622,6 +1676,9 @@ class FederationHandler:
# https://github.com/matrix-org/synapse/issues/12994
await self.state_handler.update_current_state(room_id)
logger.info("Handling any pending device list updates")
await self._device_handler.handle_room_un_partial_stated(room_id)
logger.info("Clearing partial-state flag for %s", room_id)
success = await self.store.clear_partial_state_room(room_id)
if success:

View file

@ -238,7 +238,7 @@ class FederationEventHandler:
#
# Note that if we were never in the room then we would have already
# dropped the event, since we wouldn't know the room version.
is_in_room = await self._event_auth_handler.check_host_in_room(
is_in_room = await self._event_auth_handler.is_host_in_room(
room_id, self._server_name
)
if not is_in_room:
@ -866,11 +866,6 @@ class FederationEventHandler:
event.room_id, event_id, str(err)
)
return
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
try:
try:
@ -913,11 +908,6 @@ class FederationEventHandler:
logger.warning("Pulled event %s failed history check.", event_id)
else:
raise
except Exception as exc:
await self._store.record_event_failed_pull_attempt(
event.room_id, event_id, str(exc)
)
raise exc
@trace
async def _compute_event_context_with_maybe_missing_prevs(
@ -2170,6 +2160,7 @@ class FederationEventHandler:
if instance != self._instance_name:
# Limit the number of events sent over replication. We choose 200
# here as that is what we default to in `max_request_body_size(..)`
result = {}
try:
for batch in batch_iter(event_and_contexts, 200):
result = await self._send_events(

File diff suppressed because it is too large Load diff

View file

@ -16,14 +16,17 @@ from typing import TYPE_CHECKING, List, Optional, Union
import attr
from synapse.api.errors import SynapseError, UnrecognizedRequestError
from synapse.push.baserules import BASE_RULE_IDS
from synapse.storage.push_rule import RuleNotFoundException
from synapse.synapse_rust.push import get_base_rule_ids
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
BASE_RULE_IDS = get_base_rule_ids()
@attr.s(slots=True, frozen=True, auto_attribs=True)
class RuleSpec:
scope: str

View file

@ -63,6 +63,8 @@ class ReceiptsHandler:
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
self._msc3771_enabled = hs.config.experimental.msc3771_enabled
async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None:
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
@ -70,7 +72,7 @@ class ReceiptsHandler:
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@ -91,13 +93,23 @@ class ReceiptsHandler:
)
continue
# Check if these receipts apply to a thread.
thread_id = None
data = user_values.get("data", {})
if self._msc3771_enabled and isinstance(data, dict):
thread_id = data.get("thread_id")
# If the thread ID is invalid, consider it missing.
if not isinstance(thread_id, str):
thread_id = None
receipts.append(
ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=user_values["event_ids"],
data=user_values.get("data", {}),
thread_id=thread_id,
data=data,
)
)
@ -114,6 +126,7 @@ class ReceiptsHandler:
receipt.receipt_type,
receipt.user_id,
receipt.event_ids,
receipt.thread_id,
receipt.data,
)
@ -146,7 +159,12 @@ class ReceiptsHandler:
return True
async def received_client_receipt(
self, room_id: str, receipt_type: str, user_id: str, event_id: str,
self,
room_id: str,
receipt_type: str,
user_id: str,
event_id: str,
thread_id: Optional[str],
extra_content: Optional[JsonDict] = None,
) -> None:
"""Called when a client tells us a local user has read up to the given
@ -157,6 +175,7 @@ class ReceiptsHandler:
receipt_type=receipt_type,
user_id=user_id,
event_ids=[event_id],
thread_id=thread_id,
data={"ts": int(self.clock.time_msec()), **(extra_content or {})},
)

View file

@ -225,6 +225,7 @@ class RegistrationHandler:
by_admin: bool = False,
user_agent_ips: Optional[List[Tuple[str, str]]] = None,
auth_provider_id: Optional[str] = None,
approved: bool = False,
) -> str:
"""Registers a new client on the server.
@ -251,6 +252,8 @@ class RegistrationHandler:
user_agent_ips: Tuples of user-agents and IP addresses used
during the registration process.
auth_provider_id: The SSO IdP the user used, if any.
approved: True if the new user should be considered already
approved by an administrator.
Returns:
The registered user_id.
Raises:
@ -314,6 +317,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
approved=approved,
)
profile = await self.store.get_profileinfo(localpart)
@ -702,6 +706,7 @@ class RegistrationHandler:
user_type: Optional[str] = None,
address: Optional[str] = None,
shadow_banned: bool = False,
approved: bool = False,
) -> None:
"""Register user in the datastore.
@ -720,6 +725,7 @@ class RegistrationHandler:
api.constants.UserTypes, or None for a normal user.
address: the IP address used to perform the registration.
shadow_banned: Whether to shadow-ban the user
approved: Whether to mark the user as approved by an administrator
"""
if self.hs.config.worker.worker_app:
await self._register_client(
@ -733,6 +739,7 @@ class RegistrationHandler:
user_type=user_type,
address=address,
shadow_banned=shadow_banned,
approved=approved,
)
else:
await self.store.register_user(
@ -745,6 +752,7 @@ class RegistrationHandler:
admin=admin,
user_type=user_type,
shadow_banned=shadow_banned,
approved=approved,
)
# Only call the account validity module(s) on the main process, to avoid
@ -1004,7 +1012,7 @@ class RegistrationHandler:
assert user_tuple
token_id = user_tuple.token_id
await self.pusher_pool.add_pusher(
await self.pusher_pool.add_or_update_pusher(
user_id=user_id,
access_token=token_id,
kind="email",
@ -1012,7 +1020,7 @@ class RegistrationHandler:
app_display_name="Email Notifications",
device_display_name=threepid["address"],
pushkey=threepid["address"],
lang=None, # We don't know a user's language here
lang=None,
data={},
)

View file

@ -78,6 +78,7 @@ class RelationsHandler:
direction: str = "b",
from_token: Optional[StreamToken] = None,
to_token: Optional[StreamToken] = None,
include_original_event: bool = False,
) -> JsonDict:
"""Get related events of a event, ordered by topological ordering.
@ -94,6 +95,7 @@ class RelationsHandler:
oldest first (`"f"`).
from_token: Fetch rows from the given token, or from the start if None.
to_token: Fetch rows up to the given token, or up to the end if None.
include_original_event: Whether to include the parent event.
Returns:
The pagination chunk.
@ -138,25 +140,24 @@ class RelationsHandler:
is_peeking=(member_event_id is None),
)
now = self._clock.time_msec()
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
original_event = self._event_serializer.serialize_event(
event, now, bundle_aggregations=None
)
# The relations returned for the requested event do include their
# bundled aggregations.
aggregations = await self.get_bundled_aggregations(
events, requester.user.to_string()
)
serialized_events = self._event_serializer.serialize_events(
events, now, bundle_aggregations=aggregations
)
return_value = {
"chunk": serialized_events,
"original_event": original_event,
now = self._clock.time_msec()
return_value: JsonDict = {
"chunk": self._event_serializer.serialize_events(
events, now, bundle_aggregations=aggregations
),
}
if include_original_event:
# Do not bundle aggregations when retrieving the original event because
# we want the content before relations are applied to it.
return_value["original_event"] = self._event_serializer.serialize_event(
event, now, bundle_aggregations=None
)
if next_token:
return_value["next_batch"] = await next_token.to_string(self._main_store)

View file

@ -301,8 +301,7 @@ class RoomCreationHandler:
# now send the tombstone
await self.event_creation_handler.handle_new_client_event(
requester=requester,
event=tombstone_event,
context=tombstone_context,
events_and_context=[(tombstone_event, tombstone_context)],
)
state_filter = StateFilter.from_types(
@ -716,7 +715,7 @@ class RoomCreationHandler:
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
and user_id == self._server_notices_mxid
):
# allow the server notices mxid to create rooms
is_requester_admin = True
@ -1054,7 +1053,9 @@ class RoomCreationHandler:
creator_join_profile: Optional[JsonDict] = None,
ratelimit: bool = True,
) -> Tuple[int, str, int]:
"""Sends the initial events into a new room.
"""Sends the initial events into a new room. Sends the room creation, membership,
and power level events into the room sequentially, then creates and batches up the
rest of the events to persist as a batch to the DB.
`power_level_content_override` doesn't apply when initial state has
power level state event content.
@ -1065,13 +1066,23 @@ class RoomCreationHandler:
"""
creator_id = creator.user.to_string()
event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""}
depth = 1
# the last event sent/persisted to the db
last_sent_event_id: Optional[str] = None
def create(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
# the most recently created event
prev_event: List[str] = []
# a map of event types, state keys -> event_ids. We collect these mappings this as events are
# created (but not persisted to the db) to determine state for future created events
# (as this info can't be pulled from the db)
state_map: MutableStateMap[str] = {}
# current_state_group of last event created. Used for computing event context of
# events to be batched
current_state_group = None
def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict:
e = {"type": etype, "content": content}
e.update(event_keys)
@ -1079,32 +1090,51 @@ class RoomCreationHandler:
return e
async def send(etype: str, content: JsonDict, **kwargs: Any) -> int:
nonlocal last_sent_event_id
async def create_event(
etype: str,
content: JsonDict,
for_batch: bool,
**kwargs: Any,
) -> Tuple[EventBase, synapse.events.snapshot.EventContext]:
nonlocal depth
nonlocal prev_event
event = create(etype, content, **kwargs)
logger.debug("Sending %s in new room", etype)
# Allow these events to be sent even if the user is shadow-banned to
# allow the room creation to complete.
(
sent_event,
last_stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
event_dict = create_event_dict(etype, content, **kwargs)
new_event, new_context = await self.event_creation_handler.create_event(
creator,
event,
event_dict,
prev_event_ids=prev_event,
depth=depth,
state_map=state_map,
for_batch=for_batch,
current_state_group=current_state_group,
)
depth += 1
prev_event = [new_event.event_id]
state_map[(new_event.type, new_event.state_key)] = new_event.event_id
return new_event, new_context
async def send(
event: EventBase,
context: synapse.events.snapshot.EventContext,
creator: Requester,
) -> int:
nonlocal last_sent_event_id
ev = await self.event_creation_handler.handle_new_client_event(
requester=creator,
events_and_context=[(event, context)],
ratelimit=False,
ignore_shadow_ban=True,
# Note: we don't pass state_event_ids here because this triggers
# an additional query per event to look them up from the events table.
prev_event_ids=[last_sent_event_id] if last_sent_event_id else [],
depth=depth,
)
last_sent_event_id = sent_event.event_id
depth += 1
last_sent_event_id = ev.event_id
return last_stream_id
# we know it was persisted, so must have a stream ordering
assert ev.internal_metadata.stream_ordering
return ev.internal_metadata.stream_ordering
try:
config = self._presets_dict[preset_config]
@ -1114,9 +1144,13 @@ class RoomCreationHandler:
)
creation_content.update({"creator": creator_id})
await send(etype=EventTypes.Create, content=creation_content)
creation_event, creation_context = await create_event(
EventTypes.Create, creation_content, False
)
logger.debug("Sending %s in new room", EventTypes.Member)
await send(creation_event, creation_context, creator)
# Room create event must exist at this point
assert last_sent_event_id is not None
member_event_id, _ = await self.room_member_handler.update_membership(
@ -1130,15 +1164,22 @@ class RoomCreationHandler:
prev_event_ids=[last_sent_event_id],
depth=depth,
)
last_sent_event_id = member_event_id
prev_event = [member_event_id]
# update the depth and state map here as the membership event has been created
# through a different code path
depth += 1
state_map[(EventTypes.Member, creator.user.to_string())] = member_event_id
# We treat the power levels override specially as this needs to be one
# of the first events that get sent into a room.
pl_content = initial_state.pop((EventTypes.PowerLevels, ""), None)
if pl_content is not None:
last_sent_stream_id = await send(
etype=EventTypes.PowerLevels, content=pl_content
power_event, power_context = await create_event(
EventTypes.PowerLevels, pl_content, False
)
current_state_group = power_context._state_group
await send(power_event, power_context, creator)
else:
power_level_content: JsonDict = {
"users": {creator_id: 9001},
@ -1181,48 +1222,71 @@ class RoomCreationHandler:
# apply those.
if power_level_content_override:
power_level_content.update(power_level_content_override)
last_sent_stream_id = await send(
etype=EventTypes.PowerLevels, content=power_level_content
pl_event, pl_context = await create_event(
EventTypes.PowerLevels,
power_level_content,
False,
)
current_state_group = pl_context._state_group
await send(pl_event, pl_context, creator)
events_to_send = []
if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.CanonicalAlias,
content={"alias": room_alias.to_string()},
room_alias_event, room_alias_context = await create_event(
EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True
)
current_state_group = room_alias_context._state_group
events_to_send.append((room_alias_event, room_alias_context))
if (EventTypes.JoinRules, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.JoinRules, content={"join_rule": config["join_rules"]}
join_rules_event, join_rules_context = await create_event(
EventTypes.JoinRules,
{"join_rule": config["join_rules"]},
True,
)
current_state_group = join_rules_context._state_group
events_to_send.append((join_rules_event, join_rules_context))
if (EventTypes.RoomHistoryVisibility, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.RoomHistoryVisibility,
content={"history_visibility": config["history_visibility"]},
visibility_event, visibility_context = await create_event(
EventTypes.RoomHistoryVisibility,
{"history_visibility": config["history_visibility"]},
True,
)
current_state_group = visibility_context._state_group
events_to_send.append((visibility_event, visibility_context))
if config["guest_can_join"]:
if (EventTypes.GuestAccess, "") not in initial_state:
last_sent_stream_id = await send(
etype=EventTypes.GuestAccess,
content={EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
guest_access_event, guest_access_context = await create_event(
EventTypes.GuestAccess,
{EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN},
True,
)
current_state_group = guest_access_context._state_group
events_to_send.append((guest_access_event, guest_access_context))
for (etype, state_key), content in initial_state.items():
last_sent_stream_id = await send(
etype=etype, state_key=state_key, content=content
event, context = await create_event(
etype, content, True, state_key=state_key
)
current_state_group = context._state_group
events_to_send.append((event, context))
if config["encrypted"]:
last_sent_stream_id = await send(
etype=EventTypes.RoomEncryption,
encryption_event, encryption_context = await create_event(
EventTypes.RoomEncryption,
{"algorithm": RoomEncryptionAlgorithms.DEFAULT},
True,
state_key="",
content={"algorithm": RoomEncryptionAlgorithms.DEFAULT},
)
events_to_send.append((encryption_event, encryption_context))
return last_sent_stream_id, last_sent_event_id, depth
last_event = await self.event_creation_handler.handle_new_client_event(
creator, events_to_send, ignore_shadow_ban=True
)
assert last_event.internal_metadata.stream_ordering is not None
return last_event.internal_metadata.stream_ordering, last_event.event_id, depth
def _generate_room_id(self) -> str:
"""Generates a random room ID.
@ -1488,7 +1552,9 @@ class TimestampLookupHandler:
)
likely_domains = (
await self._storage_controllers.state.get_current_hosts_in_room(room_id)
await self._storage_controllers.state.get_current_hosts_in_room_ordered(
room_id
)
)
# Loop through each homeserver candidate until we get a succesful response

View file

@ -398,8 +398,7 @@ class RoomBatchHandler:
await self.create_requester_for_user_id_from_app_service(
event.sender, app_service_requester.app_service
),
event=event,
context=context,
events_and_context=[(event, context)],
)
return event_ids

View file

@ -322,6 +322,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent: bool = True,
outlier: bool = False,
historical: bool = False,
origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
@ -361,6 +362,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
historical: Indicates whether the message is being inserted
back in time around some existing events. This is used to skip
a few checks and mark the event as backfilled.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
Returns:
Tuple of event ID and stream ordering position
@ -399,6 +402,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
"origin_server_ts": origin_server_ts,
},
txn_id=txn_id,
allow_no_prev_events=allow_no_prev_events,
@ -432,8 +436,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
with opentracing.start_active_span("handle_new_client_event"):
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
events_and_context=[(event, context)],
extra_users=[target],
ratelimit=ratelimit,
)
@ -505,6 +508,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
@ -543,6 +547,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
@ -584,6 +590,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids=prev_event_ids,
state_event_ids=state_event_ids,
depth=depth,
origin_server_ts=origin_server_ts,
)
return result
@ -607,6 +614,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
prev_event_ids: Optional[List[str]] = None,
state_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
origin_server_ts: Optional[int] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
@ -647,6 +655,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
depth: Override the depth used to order the event in the DAG.
Should normally be set to None, which will cause the depth to be calculated
based on the prev_events.
origin_server_ts: The origin_server_ts to use if a new event is created. Uses
the current timestamp if set to None.
Returns:
A tuple of the new event ID and stream ID.
@ -766,6 +776,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
require_consent=require_consent,
outlier=outlier,
historical=historical,
origin_server_ts=origin_server_ts,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
@ -1011,6 +1022,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content=content,
require_consent=require_consent,
outlier=outlier,
origin_server_ts=origin_server_ts,
)
async def _should_perform_remote_join(
@ -1131,8 +1143,8 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
users = await self.store.get_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
local_users = await self.store.get_local_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, local_users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
@ -1232,7 +1244,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
requester,
events_and_context=[(event, context)],
extra_users=[target_user],
ratelimit=ratelimit,
)
prev_member_event_id = prev_state_ids.get(
@ -1840,8 +1855,7 @@ class RoomMemberMasterHandler(RoomMemberHandler):
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
events_and_context=[(event, context)],
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering

View file

@ -609,7 +609,7 @@ class RoomSummaryHandler:
# If this is a request over federation, check if the host is in the room or
# has a user who could join the room.
elif origin:
if await self._event_auth_handler.check_host_in_room(
if await self._event_auth_handler.is_host_in_room(
room_id, origin
) or await self._store.is_host_invited(room_id, origin):
return True
@ -624,9 +624,7 @@ class RoomSummaryHandler:
await self._event_auth_handler.get_rooms_that_allow_join(state_ids)
)
for space_id in allowed_rooms:
if await self._event_auth_handler.check_host_in_room(
space_id, origin
):
if await self._event_auth_handler.is_host_in_room(space_id, origin):
return True
logger.info(

View file

@ -187,6 +187,19 @@ class SendEmailHandler:
multipart_msg["To"] = email_address
multipart_msg["Date"] = email.utils.formatdate()
multipart_msg["Message-ID"] = email.utils.make_msgid()
# Discourage automatic responses to Synapse's emails.
# Per RFC 3834, automatic responses should not be sent if the "Auto-Submitted"
# header is present with any value other than "no". See
# https://www.rfc-editor.org/rfc/rfc3834.html#section-5.1
multipart_msg["Auto-Submitted"] = "auto-generated"
# Also include a Microsoft-Exchange specific header:
# https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxcmail/ced68690-498a-4567-9d14-5c01f974d8b1
# which suggests it can take the value "All" to "suppress all auto-replies",
# or a comma separated list of auto-reply classes to suppress.
# The following stack overflow question has a little more context:
# https://stackoverflow.com/a/25324691/5252017
# https://stackoverflow.com/a/61646381/5252017
multipart_msg["X-Auto-Response-Suppress"] = "All"
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)

View file

@ -128,6 +128,9 @@ class SsoIdentityProvider(Protocol):
@attr.s(auto_attribs=True)
class UserAttributes:
# NB: This struct is documented in docs/sso_mapping_providers.md so that users can
# populate it with data from their own mapping providers.
# the localpart of the mxid that the mapper has assigned to the user.
# if `None`, the mapper has not picked a userid, and the user should be prompted to
# enter one.
@ -144,6 +147,9 @@ class UsernameMappingSession:
# A unique identifier for this SSO provider, e.g. "oidc" or "saml".
auth_provider_id: str
# An optional session ID from the IdP.
auth_provider_session_id: Optional[str]
# user ID on the IdP server
remote_user_id: str
@ -461,6 +467,7 @@ class SsoHandler:
client_redirect_url,
next_step_url,
extra_login_attributes,
auth_provider_session_id,
)
user_id = await self._register_mapped_user(
@ -582,6 +589,7 @@ class SsoHandler:
client_redirect_url: str,
next_step_url: bytes,
extra_login_attributes: Optional[JsonDict],
auth_provider_session_id: Optional[str],
) -> NoReturn:
"""Creates a UsernameMappingSession and redirects the browser
@ -604,6 +612,8 @@ class SsoHandler:
extra_login_attributes: An optional dictionary of extra
attributes to be provided to the client in the login response.
auth_provider_session_id: An optional session ID from the IdP.
Raises:
RedirectException
"""
@ -612,6 +622,7 @@ class SsoHandler:
now = self._clock.time_msec()
session = UsernameMappingSession(
auth_provider_id=auth_provider_id,
auth_provider_session_id=auth_provider_session_id,
remote_user_id=remote_user_id,
display_name=attributes.display_name,
emails=attributes.emails,
@ -965,6 +976,7 @@ class SsoHandler:
session.client_redirect_url,
session.extra_login_attributes,
new_user=True,
auth_provider_session_id=session.auth_provider_session_id,
)
def _expire_old_sessions(self) -> None:

View file

@ -1190,7 +1190,9 @@ class SyncHandler:
room_id: The partial state room to find the remaining memberships for.
members_to_fetch: The memberships to find.
events_with_membership_auth: A mapping from user IDs to events whose auth
events are known to contain their membership.
events would contain their prior membership, if one exists.
Note that join events will not cite a prior membership if a user has
never been in a room before.
found_state_ids: A dict from (type, state_key) -> state_event_id, containing
memberships that have been previously found. Entries in
`members_to_fetch` that have a membership in `found_state_ids` are
@ -1200,6 +1202,10 @@ class SyncHandler:
A dict from ("m.room.member", state_key) -> state_event_id, containing the
memberships missing from `found_state_ids`.
When `events_with_membership_auth` contains a join event for a given user
which does not cite a prior membership, no membership is returned for that
user.
Raises:
KeyError: if `events_with_membership_auth` does not have an entry for a
missing membership. Memberships in `found_state_ids` do not need an
@ -1217,8 +1223,18 @@ class SyncHandler:
if (EventTypes.Member, member) in found_state_ids:
continue
missing_members.add(member)
event_with_membership_auth = events_with_membership_auth[member]
is_join = (
event_with_membership_auth.is_state()
and event_with_membership_auth.type == EventTypes.Member
and event_with_membership_auth.state_key == member
and event_with_membership_auth.content.get("membership")
== Membership.JOIN
)
if not is_join:
# The event must include the desired membership as an auth event, unless
# it's the first join event for a given user.
missing_members.add(member)
auth_event_ids.update(event_with_membership_auth.auth_event_ids())
auth_events = await self.store.get_events(auth_event_ids)
@ -1242,7 +1258,7 @@ class SyncHandler:
auth_event.type == EventTypes.Member
and auth_event.state_key == member
):
missing_members.remove(member)
missing_members.discard(member)
additional_state_ids[
(EventTypes.Member, member)
] = auth_event.event_id
@ -1473,16 +1489,14 @@ class SyncHandler:
since_token.device_list_key
)
if changed_users is not None:
result = await self.store.get_rooms_for_users_with_stream_ordering(
changed_users
)
result = await self.store.get_rooms_for_users(changed_users)
for changed_user_id, entries in result.items():
# Check if the changed user shares any rooms with the user,
# or if the changed user is the syncing user (as we always
# want to include device list updates of their own devices).
if user_id == changed_user_id or any(
e.room_id in joined_rooms for e in entries
rid in joined_rooms for rid in entries
):
users_that_have_changed.add(changed_user_id)
else:
@ -1516,13 +1530,9 @@ class SyncHandler:
newly_left_users.update(left_users)
# Remove any users that we still share a room with.
left_users_rooms = (
await self.store.get_rooms_for_users_with_stream_ordering(
newly_left_users
)
)
left_users_rooms = await self.store.get_rooms_for_users(newly_left_users)
for user_id, entries in left_users_rooms.items():
if any(e.room_id in joined_rooms for e in entries):
if any(rid in joined_rooms for rid in entries):
newly_left_users.discard(user_id)
return DeviceListUpdates(

View file

@ -340,7 +340,7 @@ class TypingWriterHandler(FollowerTypingHandler):
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
is_in_room = await self.event_auth_handler.is_host_in_room(
room_id, self.server_name
)
if not is_in_room:
@ -362,11 +362,14 @@ class TypingWriterHandler(FollowerTypingHandler):
)
return
domains = await self._storage_controllers.state.get_current_hosts_in_room(
# Let's check that the origin server is in the room before accepting the typing
# event. We don't want to block waiting on a partial state so take an
# approximation if needed.
domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
room_id
)
if self.server_name in domains:
if user.domain in domains:
logger.info("Got typing update from %s: %r", user_id, content)
now = self.clock.time_msec()
self._member_typing_until[member] = now + FEDERATION_TIMEOUT

View file

@ -119,6 +119,9 @@ class RecaptchaAuthChecker(UserInteractiveAuthChecker):
except PartialDownloadError as pde:
# Twisted is silly
data = pde.response
# For mypy's benefit. A general Error.response is Optional[bytes], but
# a PartialDownloadError.response should be bytes AFAICS.
assert data is not None
resp_body = json_decoder.decode(data.decode("utf-8"))
if "success" in resp_body:

View file

@ -13,7 +13,7 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple
import synapse.metrics
from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership
@ -379,7 +379,7 @@ class UserDirectoryHandler(StateDeltasHandler):
user_id, event.content.get("displayname"), event.content.get("avatar_url")
)
async def _track_user_joined_room(self, room_id: str, user_id: str) -> None:
async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None:
"""Someone's just joined a room. Update `users_in_public_rooms` or
`users_who_share_private_rooms` as appropriate.
@ -390,32 +390,44 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id
)
if is_public:
await self.store.add_users_in_public_rooms(room_id, (user_id,))
await self.store.add_users_in_public_rooms(room_id, (joining_user_id,))
else:
users_in_room = await self.store.get_users_in_room(room_id)
other_users_in_room = [
other
for other in users_in_room
if other != user_id
if other != joining_user_id
and (
# We can't apply any special rules to remote users so
# they're always included
not self.is_mine_id(other)
# Check the special rules whether the local user should be
# included in the user directory
or await self.store.should_include_local_user_in_dir(other)
)
]
to_insert = set()
updates_to_users_who_share_rooms: Set[Tuple[str, str]] = set()
# First, if they're our user then we need to update for every user
if self.is_mine_id(user_id):
# First, if the joining user is our local user then we need an
# update for every other user in the room.
if self.is_mine_id(joining_user_id):
for other_user_id in other_users_in_room:
to_insert.add((user_id, other_user_id))
updates_to_users_who_share_rooms.add(
(joining_user_id, other_user_id)
)
# Next we need to update for every local user in the room
# Next, we need an update for every other local user in the room
# that they now share a room with the joining user.
for other_user_id in other_users_in_room:
if self.is_mine_id(other_user_id):
to_insert.add((other_user_id, user_id))
updates_to_users_who_share_rooms.add(
(other_user_id, joining_user_id)
)
if to_insert:
await self.store.add_users_who_share_private_room(room_id, to_insert)
if updates_to_users_who_share_rooms:
await self.store.add_users_who_share_private_room(
room_id, updates_to_users_who_share_rooms
)
async def _handle_remove_user(self, room_id: str, user_id: str) -> None:
"""Called when when someone leaves a room. The user may be local or remote.

View file

@ -36,6 +36,7 @@ from twisted.web.error import SchemeNotSupported
from twisted.web.http_headers import Headers
from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
from synapse.http import redact_uri
from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
from synapse.types import ISynapseReactor
@ -220,7 +221,11 @@ class ProxyAgent(_AgentBase):
self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
)
logger.debug("Requesting %s via %s", uri, endpoint)
logger.debug(
"Requesting %s via %s",
redact_uri(uri.decode("ascii", errors="replace")),
endpoint,
)
if parsed_uri.scheme == b"https":
tls_connection_creator = self._policy_for_https.creatorForNetloc(

View file

@ -705,7 +705,7 @@ class _ByteProducer:
self._request = None
def _encode_json_bytes(json_object: Any) -> bytes:
def _encode_json_bytes(json_object: object) -> bytes:
"""
Encode an object into JSON. Returns an iterator of bytes.
"""
@ -746,7 +746,7 @@ def respond_with_json(
return None
if canonical_json:
encoder = encode_canonical_json
encoder: Callable[[object], bytes] = encode_canonical_json
else:
encoder = _encode_json_bytes

View file

@ -586,7 +586,7 @@ class LoggingContextFilter(logging.Filter):
True to include the record in the log output.
"""
context = current_context()
record.request = self._default_request # type: ignore
record.request = self._default_request
# context should never be None, but if it somehow ends up being, then
# we end up in a death spiral of infinite loops, so let's check, for
@ -594,21 +594,21 @@ class LoggingContextFilter(logging.Filter):
if context is not None:
# Logging is interested in the request ID. Note that for backwards
# compatibility this is stored as the "request" on the record.
record.request = str(context) # type: ignore
record.request = str(context)
# Add some data from the HTTP request.
request = context.request
if request is None:
return True
record.ip_address = request.ip_address # type: ignore
record.site_tag = request.site_tag # type: ignore
record.requester = request.requester # type: ignore
record.authenticated_entity = request.authenticated_entity # type: ignore
record.method = request.method # type: ignore
record.url = request.url # type: ignore
record.protocol = request.protocol # type: ignore
record.user_agent = request.user_agent # type: ignore
record.ip_address = request.ip_address
record.site_tag = request.site_tag
record.requester = request.requester
record.authenticated_entity = request.authenticated_entity
record.method = request.method
record.url = request.url
record.protocol = request.protocol
record.user_agent = request.user_agent
return True

View file

@ -992,9 +992,9 @@ def tag_args(func: Callable[P, R]) -> Callable[P, R]:
# FIXME: We could update this to handle any type of function by ignoring the
# first argument only if it's named `self` or `cls`. This isn't fool-proof
# but handles the idiomatic cases.
for i, arg in enumerate(args[1:], start=1): # type: ignore[index]
for i, arg in enumerate(args[1:], start=1):
set_tag(SynapseTags.FUNC_ARG_PREFIX + argspec.args[i], str(arg))
set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :])) # type: ignore[index]
set_tag(SynapseTags.FUNC_ARGS, str(args[len(argspec.args) :]))
set_tag(SynapseTags.FUNC_KWARGS, str(kwargs))
yield

View file

@ -125,7 +125,7 @@ from synapse.types import (
)
from synapse.util import Clock
from synapse.util.async_helpers import maybe_awaitable
from synapse.util.caches.descriptors import cached
from synapse.util.caches.descriptors import CachedFunction, cached
from synapse.util.frozenutils import freeze
if TYPE_CHECKING:
@ -836,29 +836,39 @@ class ModuleApi:
self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type]
)
def complete_sso_login(
self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
) -> None:
"""Complete a SSO login by redirecting the user to a page to confirm whether they
want their access token sent to `client_redirect_url`, or redirect them to that
URL with a token directly if the URL matches with one of the whitelisted clients.
def register_cached_function(self, cached_func: CachedFunction) -> None:
"""Register a cached function that should be invalidated across workers.
Invalidation local to a worker can be done directly using `cached_func.invalidate`,
however invalidation that needs to go to other workers needs to call `invalidate_cache`
on the module API instead.
This is deprecated in favor of complete_sso_login_async.
Added in Synapse v1.11.1.
Added in Synapse v1.69.0.
Args:
registered_user_id: The MXID that has been registered as a previous step of
of this SSO login.
request: The request to respond to.
client_redirect_url: The URL to which to offer to redirect the user (or to
redirect them directly if whitelisted).
cached_function: The cached function that will be registered to receive invalidation
locally and from other workers.
"""
self._auth_handler._complete_sso_login(
registered_user_id,
"<unknown>",
request,
client_redirect_url,
self._store.register_external_cached_function(
f"{cached_func.__module__}.{cached_func.__name__}", cached_func
)
async def invalidate_cache(
self, cached_func: CachedFunction, keys: Tuple[Any, ...]
) -> None:
"""Invalidate a cache entry of a cached function across workers. The cached function
needs to be registered on all workers first with `register_cached_function`.
Added in Synapse v1.69.0.
Args:
cached_function: The cached function that needs an invalidation
keys: keys of the entry to invalidate, usually matching the arguments of the
cached function.
"""
cached_func.invalidate(keys)
await self._store.send_invalidation_to_replication(
f"{cached_func.__module__}.{cached_func.__name__}",
keys,
)
async def complete_sso_login_async(

View file

@ -116,6 +116,8 @@ class PusherConfig:
last_stream_ordering: int
last_success: Optional[int]
failing_since: Optional[int]
enabled: bool
device_id: Optional[str]
def as_dict(self) -> Dict[str, Any]:
"""Information that can be retrieved about a pusher after creation."""
@ -128,6 +130,8 @@ class PusherConfig:
"lang": self.lang,
"profile_tag": self.profile_tag,
"pushkey": self.pushkey,
"enabled": self.enabled,
"device_id": self.device_id,
}

View file

@ -1,583 +0,0 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Push rules is the system used to determine which events trigger a push (and a
bump in notification counts).
This consists of a list of "push rules" for each user, where a push rule is a
pair of "conditions" and "actions". When a user receives an event Synapse
iterates over the list of push rules until it finds one where all the conditions
match the event, at which point "actions" describe the outcome (e.g. notify,
highlight, etc).
Push rules are split up into 5 different "kinds" (aka "priority classes"), which
are run in order:
1. Override highest priority rules, e.g. always ignore notices
2. Content content specific rules, e.g. @ notifications
3. Room per room rules, e.g. enable/disable notifications for all messages
in a room
4. Sender per sender rules, e.g. never notify for messages from a given
user
5. Underride the lowest priority "default" rules, e.g. notify for every
message.
The set of "base rules" are the list of rules that every user has by default. A
user can modify their copy of the push rules in one of three ways:
1. Adding a new push rule of a certain kind
2. Changing the actions of a base rule
3. Enabling/disabling a base rule.
The base rules are split into whether they come before or after a particular
kind, so the order of push rule evaluation would be: base rules for before
"override" kind, user defined "override" rules, base rules after "override"
kind, etc, etc.
"""
import itertools
import logging
from typing import Dict, Iterator, List, Mapping, Sequence, Tuple, Union
import attr
from synapse.config.experimental import ExperimentalConfig
from synapse.push.rulekinds import PRIORITY_CLASS_MAP
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, slots=True, frozen=True)
class PushRule:
"""A push rule
Attributes:
rule_id: a unique ID for this rule
priority_class: what "kind" of push rule this is (see
`PRIORITY_CLASS_MAP` for mapping between int and kind)
conditions: the sequence of conditions that all need to match
actions: the actions to apply if all conditions are met
default: is this a base rule?
default_enabled: is this enabled by default?
"""
rule_id: str
priority_class: int
conditions: Sequence[Mapping[str, str]]
actions: Sequence[Union[str, Mapping]]
default: bool = False
default_enabled: bool = True
@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False)
class PushRules:
"""A collection of push rules for an account.
Can be iterated over, producing push rules in priority order.
"""
# A mapping from rule ID to push rule that overrides a base rule. These will
# be returned instead of the base rule.
overriden_base_rules: Dict[str, PushRule] = attr.Factory(dict)
# The following stores the custom push rules at each priority class.
#
# We keep these separate (rather than combining into one big list) to avoid
# copying the base rules around all the time.
override: List[PushRule] = attr.Factory(list)
content: List[PushRule] = attr.Factory(list)
room: List[PushRule] = attr.Factory(list)
sender: List[PushRule] = attr.Factory(list)
underride: List[PushRule] = attr.Factory(list)
def __iter__(self) -> Iterator[PushRule]:
# When iterating over the push rules we need to return the base rules
# interspersed at the correct spots.
for rule in itertools.chain(
BASE_PREPEND_OVERRIDE_RULES,
self.override,
BASE_APPEND_OVERRIDE_RULES,
self.content,
BASE_APPEND_CONTENT_RULES,
self.room,
self.sender,
self.underride,
BASE_APPEND_UNDERRIDE_RULES,
):
# Check if a base rule has been overriden by a custom rule. If so
# return that instead.
override_rule = self.overriden_base_rules.get(rule.rule_id)
if override_rule:
yield override_rule
else:
yield rule
def __len__(self) -> int:
# The length is mostly used by caches to get a sense of "size" / amount
# of memory this object is using, so we only count the number of custom
# rules.
return (
len(self.overriden_base_rules)
+ len(self.override)
+ len(self.content)
+ len(self.room)
+ len(self.sender)
+ len(self.underride)
)
@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False)
class FilteredPushRules:
"""A wrapper around `PushRules` that filters out disabled experimental push
rules, and includes the "enabled" state for each rule when iterated over.
"""
push_rules: PushRules
enabled_map: Dict[str, bool]
experimental_config: ExperimentalConfig
def __iter__(self) -> Iterator[Tuple[PushRule, bool]]:
for rule in self.push_rules:
if not _is_experimental_rule_enabled(
rule.rule_id, self.experimental_config
):
continue
enabled = self.enabled_map.get(rule.rule_id, rule.default_enabled)
yield rule, enabled
def __len__(self) -> int:
return len(self.push_rules)
DEFAULT_EMPTY_PUSH_RULES = PushRules()
def compile_push_rules(rawrules: List[PushRule]) -> PushRules:
"""Given a set of custom push rules return a `PushRules` instance (which
includes the base rules).
"""
if not rawrules:
# Fast path to avoid allocating empty lists when there are no custom
# rules for the user.
return DEFAULT_EMPTY_PUSH_RULES
rules = PushRules()
for rule in rawrules:
# We need to decide which bucket each custom push rule goes into.
# If it has the same ID as a base rule then it overrides that...
overriden_base_rule = BASE_RULES_BY_ID.get(rule.rule_id)
if overriden_base_rule:
rules.overriden_base_rules[rule.rule_id] = attr.evolve(
overriden_base_rule, actions=rule.actions
)
continue
# ... otherwise it gets added to the appropriate priority class bucket
collection: List[PushRule]
if rule.priority_class == 5:
collection = rules.override
elif rule.priority_class == 4:
collection = rules.content
elif rule.priority_class == 3:
collection = rules.room
elif rule.priority_class == 2:
collection = rules.sender
elif rule.priority_class == 1:
collection = rules.underride
elif rule.priority_class <= 0:
logger.info(
"Got rule with priority class less than zero, but doesn't override a base rule: %s",
rule,
)
continue
else:
# We log and continue here so as not to break event sending
logger.error("Unknown priority class: %", rule.priority_class)
continue
collection.append(rule)
return rules
def _is_experimental_rule_enabled(
rule_id: str, experimental_config: ExperimentalConfig
) -> bool:
"""Used by `FilteredPushRules` to filter out experimental rules when they
have not been enabled.
"""
if (
rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl"
and not experimental_config.msc3786_enabled
):
return False
if (
rule_id == "global/underride/.org.matrix.msc3772.thread_reply"
and not experimental_config.msc3772_enabled
):
return False
return True
BASE_APPEND_CONTENT_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["content"],
rule_id="global/content/.m.rule.contains_user_name",
conditions=[
{
"kind": "event_match",
"key": "content.body",
# Match the localpart of the requester's MXID.
"pattern_type": "user_localpart",
}
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
],
)
]
BASE_PREPEND_OVERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.master",
default_enabled=False,
conditions=[],
actions=["dont_notify"],
)
]
BASE_APPEND_OVERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.suppress_notices",
conditions=[
{
"kind": "event_match",
"key": "content.msgtype",
"pattern": "m.notice",
"_cache_key": "_suppress_notices",
}
],
actions=["dont_notify"],
),
# NB. .m.rule.invite_for_me must be higher prio than .m.rule.member_event
# otherwise invites will be matched by .m.rule.member_event
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.invite_for_me",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.member",
"_cache_key": "_member",
},
{
"kind": "event_match",
"key": "content.membership",
"pattern": "invite",
"_cache_key": "_invite_member",
},
# Match the requester's MXID.
{"kind": "event_match", "key": "state_key", "pattern_type": "user_id"},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
# Will we sometimes want to know about people joining and leaving?
# Perhaps: if so, this could be expanded upon. Seems the most usual case
# is that we don't though. We add this override rule so that even if
# the room rule is set to notify, we don't get notifications about
# join/leave/avatar/displayname events.
# See also: https://matrix.org/jira/browse/SYN-607
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.member_event",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.member",
"_cache_key": "_member",
}
],
actions=["dont_notify"],
),
# This was changed from underride to override so it's closer in priority
# to the content rules where the user name highlight rule lives. This
# way a room rule is lower priority than both but a custom override rule
# is higher priority than both.
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.contains_display_name",
conditions=[{"kind": "contains_display_name"}],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight"},
],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.roomnotif",
conditions=[
{
"kind": "event_match",
"key": "content.body",
"pattern": "@room",
"_cache_key": "_roomnotif_content",
},
{
"kind": "sender_notification_permission",
"key": "room",
"_cache_key": "_roomnotif_pl",
},
],
actions=["notify", {"set_tweak": "highlight", "value": True}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.tombstone",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.tombstone",
"_cache_key": "_tombstone",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "",
"_cache_key": "_tombstone_statekey",
},
],
actions=["notify", {"set_tweak": "highlight", "value": True}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.m.rule.reaction",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.reaction",
"_cache_key": "_reaction",
}
],
actions=["dont_notify"],
),
# XXX: This is an experimental rule that is only enabled if msc3786_enabled
# is enabled, if it is not the rule gets filtered out in _load_rules() in
# PushRulesWorkerStore
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["override"],
rule_id="global/override/.org.matrix.msc3786.rule.room.server_acl",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.server_acl",
"_cache_key": "_room_server_acl",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "",
"_cache_key": "_room_server_acl_state_key",
},
],
actions=[],
),
]
BASE_APPEND_UNDERRIDE_RULES = [
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.call",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.call.invite",
"_cache_key": "_call",
}
],
actions=[
"notify",
{"set_tweak": "sound", "value": "ring"},
{"set_tweak": "highlight", "value": False},
],
),
# XXX: once m.direct is standardised everywhere, we should use it to detect
# a DM from the user's perspective rather than this heuristic.
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.room_one_to_one",
conditions=[
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.message",
"_cache_key": "_message",
},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.encrypted_room_one_to_one",
conditions=[
{"kind": "room_member_count", "is": "2", "_cache_key": "member_count"},
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.encrypted",
"_cache_key": "_encrypted",
},
],
actions=[
"notify",
{"set_tweak": "sound", "value": "default"},
{"set_tweak": "highlight", "value": False},
],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.org.matrix.msc3772.thread_reply",
conditions=[
{
"kind": "org.matrix.msc3772.relation_match",
"rel_type": "m.thread",
# Match the requester's MXID.
"sender_type": "user_id",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.message",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.message",
"_cache_key": "_message",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.m.rule.encrypted",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "m.room.encrypted",
"_cache_key": "_encrypted",
}
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
PushRule(
default=True,
priority_class=PRIORITY_CLASS_MAP["underride"],
rule_id="global/underride/.im.vector.jitsi",
conditions=[
{
"kind": "event_match",
"key": "type",
"pattern": "im.vector.modular.widgets",
"_cache_key": "_type_modular_widgets",
},
{
"kind": "event_match",
"key": "content.type",
"pattern": "jitsi",
"_cache_key": "_content_type_jitsi",
},
{
"kind": "event_match",
"key": "state_key",
"pattern": "*",
"_cache_key": "_is_state_event",
},
],
actions=["notify", {"set_tweak": "highlight", "value": False}],
),
]
BASE_RULE_IDS = set()
BASE_RULES_BY_ID: Dict[str, PushRule] = {}
for r in BASE_APPEND_CONTENT_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_PREPEND_OVERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_APPEND_OVERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r
for r in BASE_APPEND_UNDERRIDE_RULES:
BASE_RULE_IDS.add(r.rule_id)
BASE_RULES_BY_ID[r.rule_id] = r

View file

@ -17,6 +17,7 @@ import itertools
import logging
from typing import (
TYPE_CHECKING,
Any,
Collection,
Dict,
Iterable,
@ -37,13 +38,11 @@ from synapse.events.snapshot import EventContext
from synapse.state import POWER_KEY
from synapse.storage.databases.main.roommember import EventIdMembership
from synapse.storage.state import StateFilter
from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRuleEvaluator
from synapse.util.caches import register_cache
from synapse.util.metrics import measure_func
from synapse.visibility import filter_event_for_clients_with_state
from .baserules import FilteredPushRules, PushRule
from .push_rule_evaluator import PushRuleEvaluatorForEvent
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -173,7 +172,11 @@ class BulkPushRuleEvaluator:
async def _get_power_levels_and_sender_level(
self, event: EventBase, context: EventContext
) -> Tuple[dict, int]:
) -> Tuple[dict, Optional[int]]:
# There are no power levels and sender levels possible to get from outlier
if event.internal_metadata.is_outlier():
return {}, None
event_types = auth_types_for_event(event.room_version, event)
prev_state_ids = await context.get_prev_state_ids(
StateFilter.from_types(event_types)
@ -250,8 +253,8 @@ class BulkPushRuleEvaluator:
should increment the unread count, and insert the results into the
event_push_actions_staging table.
"""
if event.internal_metadata.is_outlier():
# This can happen due to out of band memberships
if not event.internal_metadata.is_notifiable():
# Push rules for events that aren't notifiable can't be processed by this
return
# Disable counting as unread unless the experimental configuration is
@ -280,16 +283,17 @@ class BulkPushRuleEvaluator:
thread_id = "main"
if relation:
relations = await self._get_mutual_relations(
relation.parent_id, itertools.chain(*rules_by_user.values())
relation.parent_id,
itertools.chain(*(r.rules() for r in rules_by_user.values())),
)
if relation.rel_type == RelationTypes.THREAD:
thread_id = relation.parent_id
evaluator = PushRuleEvaluatorForEvent(
event,
evaluator = PushRuleEvaluator(
_flatten_dict(event),
room_member_count,
sender_power_level,
power_levels,
power_levels.get("notifications", {}),
relations,
self._relations_match_enabled,
)
@ -299,20 +303,10 @@ class BulkPushRuleEvaluator:
event.room_id, users
)
# This is a check for the case where user joins a room without being
# allowed to see history, and then the server receives a delayed event
# from before the user joined, which they should not be pushed for
uids_with_visibility = await filter_event_for_clients_with_state(
self.store, users, event, context
)
for uid, rules in rules_by_user.items():
if event.sender == uid:
continue
if uid not in uids_with_visibility:
continue
display_name = None
profile = profiles.get(uid)
if profile:
@ -333,17 +327,30 @@ class BulkPushRuleEvaluator:
# current user, it'll be added to the dict later.
actions_by_user[uid] = []
for rule, enabled in rules:
if not enabled:
continue
actions = evaluator.run(rules, uid, display_name)
if "notify" in actions:
# Push rules say we should notify the user of this event
actions_by_user[uid] = actions
matches = evaluator.check_conditions(rule.conditions, uid, display_name)
if matches:
actions = [x for x in rule.actions if x != "dont_notify"]
if actions and "notify" in actions:
# Push rules say we should notify the user of this event
actions_by_user[uid] = actions
break
# If there aren't any actions then we can skip the rest of the
# processing.
if not actions_by_user:
return
# This is a check for the case where user joins a room without being
# allowed to see history, and then the server receives a delayed event
# from before the user joined, which they should not be pushed for
#
# We do this *after* calculating the push actions as a) its unlikely
# that we'll filter anyone out and b) for large rooms its likely that
# most users will have push disabled and so the set of users to check is
# much smaller.
uids_with_visibility = await filter_event_for_clients_with_state(
self.store, actions_by_user.keys(), event, context
)
for user_id in set(actions_by_user).difference(uids_with_visibility):
actions_by_user.pop(user_id, None)
# Mark in the DB staging area the push actions for users who should be
# notified for this event. (This will then get handled when we persist
@ -360,3 +367,21 @@ MemberMap = Dict[str, Optional[EventIdMembership]]
Rule = Dict[str, dict]
RulesByUser = Dict[str, List[Rule]]
StateGroup = Union[object, int]
def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
if prefix is None:
prefix = []
if result is None:
result = {}
for key, value in d.items():
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
elif isinstance(value, Mapping):
_flatten_dict(value, prefix=(prefix + [key]), result=result)
return result

View file

@ -16,10 +16,9 @@ import copy
from typing import Any, Dict, List, Optional
from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP
from synapse.synapse_rust.push import FilteredPushRules, PushRule
from synapse.types import UserID
from .baserules import FilteredPushRules, PushRule
def format_push_rules_for_user(
user: UserID, ruleslist: FilteredPushRules
@ -34,7 +33,7 @@ def format_push_rules_for_user(
rules["global"] = _add_empty_priority_class_arrays(rules["global"])
for r, enabled in ruleslist:
for r, enabled in ruleslist.rules():
template_name = _priority_class_to_template_name(r.priority_class)
rulearray = rules["global"][template_name]
@ -103,10 +102,8 @@ def _rule_to_template(rule: PushRule) -> Optional[Dict[str, Any]]:
# with PRIORITY_CLASS_INVERSE_MAP.
raise ValueError("Unexpected template_name: %s" % (template_name,))
if unscoped_rule_id:
templaterule["rule_id"] = unscoped_rule_id
if rule.default:
templaterule["default"] = True
templaterule["rule_id"] = unscoped_rule_id
templaterule["default"] = rule.default
return templaterule

View file

@ -14,7 +14,7 @@
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
from prometheus_client import Counter
@ -28,7 +28,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import Pusher, PusherConfig, PusherConfigException
from synapse.storage.databases.main.event_push_actions import HttpPushAction
from . import push_rule_evaluator, push_tools
from . import push_tools
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -56,6 +56,39 @@ http_badges_failed_counter = Counter(
)
def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True}
"""
tweaks = {}
for a in actions:
if not isinstance(a, dict):
continue
if "set_tweak" in a:
# value is allowed to be absent in which case the value assumed
# should be True.
tweaks[a["set_tweak"]] = a.get("value", True)
return tweaks
class HttpPusher(Pusher):
INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes
MAX_BACKOFF_SEC = 60 * 60
@ -274,7 +307,7 @@ class HttpPusher(Pusher):
if "notify" not in push_action.actions:
return True
tweaks = push_rule_evaluator.tweaks_for_actions(push_action.actions)
tweaks = tweaks_for_actions(push_action.actions)
badge = await push_tools.get_badge_count(
self.hs.get_datastores().main,
self.user_id,

View file

@ -1,361 +0,0 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import (
Any,
Dict,
List,
Mapping,
Optional,
Pattern,
Sequence,
Set,
Tuple,
Union,
)
from matrix_common.regex import glob_to_regex, to_word_pattern
from synapse.events import EventBase
from synapse.types import UserID
from synapse.util.caches.lrucache import LruCache
logger = logging.getLogger(__name__)
GLOB_REGEX = re.compile(r"\\\[(\\\!|)(.*)\\\]")
IS_GLOB = re.compile(r"[\?\*\[\]]")
INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
def _room_member_count(
ev: EventBase, condition: Mapping[str, Any], room_member_count: int
) -> bool:
return _test_ineq_condition(condition, room_member_count)
def _sender_notification_permission(
ev: EventBase,
condition: Mapping[str, Any],
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
) -> bool:
notif_level_key = condition.get("key")
if notif_level_key is None:
return False
notif_levels = power_levels.get("notifications", {})
assert isinstance(notif_levels, dict)
room_notif_level = notif_levels.get(notif_level_key, 50)
return sender_power_level >= room_notif_level
def _test_ineq_condition(condition: Mapping[str, Any], number: int) -> bool:
if "is" not in condition:
return False
m = INEQUALITY_EXPR.match(condition["is"])
if not m:
return False
ineq = m.group(1)
rhs = m.group(2)
if not rhs.isdigit():
return False
rhs_int = int(rhs)
if ineq == "" or ineq == "==":
return number == rhs_int
elif ineq == "<":
return number < rhs_int
elif ineq == ">":
return number > rhs_int
elif ineq == ">=":
return number >= rhs_int
elif ineq == "<=":
return number <= rhs_int
else:
return False
def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True}
"""
tweaks = {}
for a in actions:
if not isinstance(a, dict):
continue
if "set_tweak" in a:
# value is allowed to be absent in which case the value assumed
# should be True.
tweaks[a["set_tweak"]] = a.get("value", True)
return tweaks
class PushRuleEvaluatorForEvent:
def __init__(
self,
event: EventBase,
room_member_count: int,
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
relations: Dict[str, Set[Tuple[str, str]]],
relations_match_enabled: bool,
):
self._event = event
self._room_member_count = room_member_count
self._sender_power_level = sender_power_level
self._power_levels = power_levels
self._relations = relations
self._relations_match_enabled = relations_match_enabled
# Maps strings of e.g. 'content.body' -> event["content"]["body"]
self._value_cache = _flatten_dict(event)
# Maps cache keys to final values.
self._condition_cache: Dict[str, bool] = {}
def check_conditions(
self, conditions: Sequence[Mapping], uid: str, display_name: Optional[str]
) -> bool:
"""
Returns true if a user's conditions/user ID/display name match the event.
Args:
conditions: The user's conditions to match.
uid: The user's MXID.
display_name: The display name.
Returns:
True if all conditions match the event, False otherwise.
"""
for cond in conditions:
_cache_key = cond.get("_cache_key", None)
if _cache_key:
res = self._condition_cache.get(_cache_key, None)
if res is False:
return False
elif res is True:
continue
res = self.matches(cond, uid, display_name)
if _cache_key:
self._condition_cache[_cache_key] = bool(res)
if not res:
return False
return True
def matches(
self, condition: Mapping[str, Any], user_id: str, display_name: Optional[str]
) -> bool:
"""
Returns true if a user's condition/user ID/display name match the event.
Args:
condition: The user's condition to match.
uid: The user's MXID.
display_name: The display name, or None if there is not one.
Returns:
True if the condition matches the event, False otherwise.
"""
if condition["kind"] == "event_match":
return self._event_match(condition, user_id)
elif condition["kind"] == "contains_display_name":
return self._contains_display_name(display_name)
elif condition["kind"] == "room_member_count":
return _room_member_count(self._event, condition, self._room_member_count)
elif condition["kind"] == "sender_notification_permission":
return _sender_notification_permission(
self._event, condition, self._sender_power_level, self._power_levels
)
elif (
condition["kind"] == "org.matrix.msc3772.relation_match"
and self._relations_match_enabled
):
return self._relation_match(condition, user_id)
else:
# XXX This looks incorrect -- we have reached an unknown condition
# kind and are unconditionally returning that it matches. Note
# that it seems possible to provide a condition to the /pushrules
# endpoint with an unknown kind, see _rule_tuple_from_request_object.
return True
def _event_match(self, condition: Mapping, user_id: str) -> bool:
"""
Check an "event_match" push rule condition.
Args:
condition: The "event_match" push rule condition to match.
user_id: The user's MXID.
Returns:
True if the condition matches the event, False otherwise.
"""
pattern = condition.get("pattern", None)
if not pattern:
pattern_type = condition.get("pattern_type", None)
if pattern_type == "user_id":
pattern = user_id
elif pattern_type == "user_localpart":
pattern = UserID.from_string(user_id).localpart
if not pattern:
logger.warning("event_match condition with no pattern")
return False
# XXX: optimisation: cache our pattern regexps
if condition["key"] == "content.body":
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
return _glob_matches(pattern, body, word_boundary=True)
else:
haystack = self._value_cache.get(condition["key"], None)
if haystack is None:
return False
return _glob_matches(pattern, haystack)
def _contains_display_name(self, display_name: Optional[str]) -> bool:
"""
Check an "event_match" push rule condition.
Args:
display_name: The display name, or None if there is not one.
Returns:
True if the display name is found in the event body, False otherwise.
"""
if not display_name:
return False
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
# Similar to _glob_matches, but do not treat display_name as a glob.
r = regex_cache.get((display_name, False, True), None)
if not r:
r1 = re.escape(display_name)
r1 = to_word_pattern(r1)
r = re.compile(r1, flags=re.IGNORECASE)
regex_cache[(display_name, False, True)] = r
return bool(r.search(body))
def _relation_match(self, condition: Mapping, user_id: str) -> bool:
"""
Check an "relation_match" push rule condition.
Args:
condition: The "event_match" push rule condition to match.
user_id: The user's MXID.
Returns:
True if the condition matches the event, False otherwise.
"""
rel_type = condition.get("rel_type")
if not rel_type:
logger.warning("relation_match condition missing rel_type")
return False
sender_pattern = condition.get("sender")
if sender_pattern is None:
sender_type = condition.get("sender_type")
if sender_type == "user_id":
sender_pattern = user_id
type_pattern = condition.get("type")
# If any other relations matches, return True.
for sender, event_type in self._relations.get(rel_type, ()):
if sender_pattern and not _glob_matches(sender_pattern, sender):
continue
if type_pattern and not _glob_matches(type_pattern, event_type):
continue
# All values must have matched.
return True
# No relations matched.
return False
# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
50000, "regex_push_cache"
)
def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
"""Tests if value matches glob.
Args:
glob
value: String to test against glob.
word_boundary: Whether to match against word boundaries or entire
string. Defaults to False.
"""
try:
r = regex_cache.get((glob, True, word_boundary), None)
if not r:
r = glob_to_regex(glob, word_boundary=word_boundary)
regex_cache[(glob, True, word_boundary)] = r
return bool(r.search(value))
except re.error:
logger.warning("Failed to parse glob to regex: %r", glob)
return False
def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
if prefix is None:
prefix = []
if result is None:
result = {}
for key, value in d.items():
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
elif isinstance(value, Mapping):
_flatten_dict(value, prefix=(prefix + [key]), result=result)
return result

View file

@ -94,7 +94,7 @@ class PusherPool:
return
run_as_background_process("start_pushers", self._start_pushers)
async def add_pusher(
async def add_or_update_pusher(
self,
user_id: str,
access_token: Optional[int],
@ -106,6 +106,8 @@ class PusherPool:
lang: Optional[str],
data: JsonDict,
profile_tag: str = "",
enabled: bool = True,
device_id: Optional[str] = None,
) -> Optional[Pusher]:
"""Creates a new pusher and adds it to the pool
@ -147,9 +149,22 @@ class PusherPool:
last_stream_ordering=last_stream_ordering,
last_success=None,
failing_since=None,
enabled=enabled,
device_id=device_id,
)
)
# Before we actually persist the pusher, we check if the user already has one
# this app ID and pushkey. If so, we want to keep the access token and device ID
# in place, since this could be one device modifying (e.g. enabling/disabling)
# another device's pusher.
existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
user_id, app_id, pushkey
)
if existing_config:
access_token = existing_config.access_token
device_id = existing_config.device_id
await self.store.add_pusher(
user_id=user_id,
access_token=access_token,
@ -163,8 +178,10 @@ class PusherPool:
data=data,
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
enabled=enabled,
device_id=device_id,
)
pusher = await self.start_pusher_by_id(app_id, pushkey, user_id)
pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id)
return pusher
@ -276,10 +293,25 @@ class PusherPool:
except Exception:
logger.exception("Exception in pusher on_new_receipts")
async def start_pusher_by_id(
async def _get_pusher_config_for_user_by_app_id_and_pushkey(
self, user_id: str, app_id: str, pushkey: str
) -> Optional[PusherConfig]:
resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
pusher_config = None
for r in resultlist:
if r.user_name == user_id:
pusher_config = r
return pusher_config
async def process_pusher_change_by_id(
self, app_id: str, pushkey: str, user_id: str
) -> Optional[Pusher]:
"""Look up the details for the given pusher, and start it
"""Look up the details for the given pusher, and either start it if its
"enabled" flag is True, or try to stop it otherwise.
If the pusher is new and its "enabled" flag is False, the stop is a noop.
Returns:
The pusher started, if any
@ -290,12 +322,13 @@ class PusherPool:
if not self._pusher_shard_config.should_handle(self._instance_name, user_id):
return None
resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey)
pusher_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey(
user_id, app_id, pushkey
)
pusher_config = None
for r in resultlist:
if r.user_name == user_id:
pusher_config = r
if pusher_config and not pusher_config.enabled:
self.maybe_stop_pusher(app_id, pushkey, user_id)
return None
pusher = None
if pusher_config:
@ -305,7 +338,7 @@ class PusherPool:
async def _start_pushers(self) -> None:
"""Start all the pushers"""
pushers = await self.store.get_all_pushers()
pushers = await self.store.get_enabled_pushers()
# Stagger starting up the pushers so we don't completely drown the
# process on start up.
@ -363,6 +396,8 @@ class PusherPool:
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc()
logger.info("Starting pusher %s / %s", pusher.user_id, appid_pushkey)
# Check if there *may* be push to process. We do this as this check is a
# lot cheaper to do than actually fetching the exact rows we need to
# push.
@ -382,16 +417,7 @@ class PusherPool:
return pusher
async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
appid_pushkey = "%s:%s" % (app_id, pushkey)
byuser = self.pushers.get(user_id, {})
if appid_pushkey in byuser:
logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
pusher = byuser.pop(appid_pushkey)
pusher.on_stop()
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()
self.maybe_stop_pusher(app_id, pushkey, user_id)
# We can only delete pushers on master.
if self._remove_pusher_client:
@ -402,3 +428,22 @@ class PusherPool:
await self.store.delete_pusher_by_app_id_pushkey_user_id(
app_id, pushkey, user_id
)
def maybe_stop_pusher(self, app_id: str, pushkey: str, user_id: str) -> None:
"""Stops a pusher with the given app ID and push key if one is running.
Args:
app_id: the pusher's app ID.
pushkey: the pusher's push key.
user_id: the user the pusher belongs to. Only used for logging.
"""
appid_pushkey = "%s:%s" % (app_id, pushkey)
byuser = self.pushers.get(user_id, {})
if appid_pushkey in byuser:
logger.info("Stopping pusher %s / %s", user_id, appid_pushkey)
pusher = byuser.pop(appid_pushkey)
pusher.on_stop()
synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec()

View file

@ -25,6 +25,7 @@ from synapse.replication.http import (
push,
register,
send_event,
send_events,
state,
streams,
)
@ -43,6 +44,7 @@ class ReplicationRestResource(JsonResource):
def register_servlets(self, hs: "HomeServer") -> None:
send_event.register_servlets(hs, self)
send_events.register_servlets(hs, self)
federation.register_servlets(hs, self)
presence.register_servlets(hs, self)
membership.register_servlets(hs, self)

View file

@ -51,6 +51,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type: Optional[str],
address: Optional[str],
shadow_banned: bool,
approved: bool,
) -> JsonDict:
"""
Args:
@ -68,6 +69,8 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
or None for a normal user.
address: the IP address used to perform the regitration.
shadow_banned: Whether to shadow-ban the user
approved: Whether the user should be considered already approved by an
administrator.
"""
return {
"password_hash": password_hash,
@ -79,6 +82,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
"user_type": user_type,
"address": address,
"shadow_banned": shadow_banned,
"approved": approved,
}
async def _handle_request( # type: ignore[override]
@ -99,6 +103,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint):
user_type=content["user_type"],
address=content["address"],
shadow_banned=content["shadow_banned"],
approved=content["approved"],
)
return 200, {}

View file

@ -145,8 +145,8 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)
event = await self.event_creation_handler.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users, dont_notify=dont_notify,
event = await self.event_creation_handler.persist_and_notify_client_events(
requester, [(event, context)], ratelimit=ratelimit, extra_users=extra_users, dont_notify=dont_notify,
)
return (

View file

@ -0,0 +1,171 @@
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Tuple
from twisted.web.server import Request
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.http.server import HttpServer
from synapse.http.servlet import parse_json_object_from_request
from synapse.replication.http._base import ReplicationEndpoint
from synapse.types import JsonDict, Requester, UserID
from synapse.util.metrics import Measure
if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.storage.databases.main import DataStore
logger = logging.getLogger(__name__)
class ReplicationSendEventsRestServlet(ReplicationEndpoint):
"""Handles batches of newly created events on workers, including persisting and
notifying.
The API looks like:
POST /_synapse/replication/send_events/:txn_id
{
"events": [{
"event": { .. serialized event .. },
"room_version": .., // "1", "2", "3", etc: the version of the room
// containing the event
"event_format_version": .., // 1,2,3 etc: the event format version
"internal_metadata": { .. serialized internal_metadata .. },
"outlier": true|false,
"rejected_reason": .., // The event.rejected_reason field
"context": { .. serialized event context .. },
"requester": { .. serialized requester .. },
"ratelimit": true,
}]
}
200 OK
{ "stream_id": 12345, "event_id": "$abcdef..." }
Responds with a 409 when a `PartialStateConflictError` is raised due to an event
context that needs to be recomputed due to the un-partial stating of a room.
"""
NAME = "send_events"
PATH_ARGS = ()
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.store = hs.get_datastores().main
self._storage_controllers = hs.get_storage_controllers()
self.clock = hs.get_clock()
@staticmethod
async def _serialize_payload( # type: ignore[override]
events_and_context: List[Tuple[EventBase, EventContext]],
store: "DataStore",
requester: Requester,
ratelimit: bool,
extra_users: List[UserID],
) -> JsonDict:
"""
Args:
store
requester
events_and_ctx
ratelimit
"""
serialized_events = []
for event, context in events_and_context:
serialized_context = await context.serialize(event, store)
serialized_event = {
"event": event.get_pdu_json(),
"room_version": event.room_version.identifier,
"event_format_version": event.format_version,
"internal_metadata": event.internal_metadata.get_dict(),
"outlier": event.internal_metadata.is_outlier(),
"rejected_reason": event.rejected_reason,
"context": serialized_context,
"requester": requester.serialize(),
"ratelimit": ratelimit,
"extra_users": [u.to_string() for u in extra_users],
}
serialized_events.append(serialized_event)
payload = {"events": serialized_events}
return payload
async def _handle_request( # type: ignore[override]
self, request: Request
) -> Tuple[int, JsonDict]:
with Measure(self.clock, "repl_send_events_parse"):
payload = parse_json_object_from_request(request)
events_and_context = []
events = payload["events"]
for event_payload in events:
event_dict = event_payload["event"]
room_ver = KNOWN_ROOM_VERSIONS[event_payload["room_version"]]
internal_metadata = event_payload["internal_metadata"]
rejected_reason = event_payload["rejected_reason"]
event = make_event_from_dict(
event_dict, room_ver, internal_metadata, rejected_reason
)
event.internal_metadata.outlier = event_payload["outlier"]
requester = Requester.deserialize(
self.store, event_payload["requester"]
)
context = EventContext.deserialize(
self._storage_controllers, event_payload["context"]
)
ratelimit = event_payload["ratelimit"]
events_and_context.append((event, context))
extra_users = [
UserID.from_string(u) for u in event_payload["extra_users"]
]
logger.info(
"Got batch of events to send, last ID of batch is: %s, sending into room: %s",
event.event_id,
event.room_id,
)
last_event = (
await self.event_creation_handler.persist_and_notify_client_events(
requester, events_and_context, ratelimit, extra_users
)
)
return (
200,
{
"stream_id": last_event.internal_metadata.stream_ordering,
"event_id": last_event.event_id,
},
)
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReplicationSendEventsRestServlet(hs).register(http_server)

View file

@ -189,7 +189,9 @@ class ReplicationDataHandler:
if row.deleted:
self.stop_pusher(row.user_id, row.app_id, row.pushkey)
else:
await self.start_pusher(row.user_id, row.app_id, row.pushkey)
await self.process_pusher_change(
row.user_id, row.app_id, row.pushkey
)
elif stream_name == EventsStream.NAME:
# We shouldn't get multiple rows per token for events stream, so
# we don't need to optimise this for multiple rows.
@ -334,13 +336,15 @@ class ReplicationDataHandler:
logger.info("Stopping pusher %r / %r", user_id, key)
pusher.on_stop()
async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None:
async def process_pusher_change(
self, user_id: str, app_id: str, pushkey: str
) -> None:
if not self._notify_pushers:
return
key = "%s:%s" % (app_id, pushkey)
logger.info("Starting pusher %r / %r", user_id, key)
await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id)
class FederationSenderHandler:
@ -423,7 +427,8 @@ class FederationSenderHandler:
receipt.receipt_type,
receipt.user_id,
[receipt.event_id],
receipt.data,
thread_id=receipt.thread_id,
data=receipt.data,
)
await self.federation_sender.send_read_receipt(receipt_info)

View file

@ -361,6 +361,7 @@ class ReceiptsStream(Stream):
receipt_type: str
user_id: str
event_id: str
thread_id: Optional[str]
data: dict
NAME = "receipts"

View file

@ -30,6 +30,7 @@ from synapse.rest.client import (
keys,
knock,
login as v1_login,
login_token_request,
logout,
mutual_rooms,
notifications,
@ -130,3 +131,4 @@ class ClientRestResource(JsonResource):
# unstable
mutual_rooms.register_servlets(hs, client_resource)
login_token_request.register_servlets(hs, client_resource)

Some files were not shown because too many files have changed in this diff Show more