0
0
Fork 1
mirror of https://mau.dev/maunium/synapse.git synced 2024-06-11 07:08:56 +02:00

Merge remote-tracking branch 'upstream/release-v1.67'

This commit is contained in:
Tulir Asokan 2022-09-06 08:22:38 -04:00
commit ca9515d2c7
133 changed files with 3557 additions and 2185 deletions

View file

@ -27,10 +27,10 @@ which is under the Unlicense licence.
{{- . -}}{{- "\n" -}}
{{- end -}}
{{- with .TestCases -}}
{{- /* Failing tests are first */ -}}
{{- /* Passing tests are first */ -}}
{{- range . -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%
@ -47,7 +47,6 @@ which is under the Unlicense licence.
{{- end -}}
{{- end -}}
{{- /* Then skipped tests are second */ -}}
{{- range . -}}
{{- if eq .Result "SKIP" -}}
@ -68,11 +67,10 @@ which is under the Unlicense licence.
{{- end -}}
{{- end -}}
{{- /* Then passing tests are last */ -}}
{{- /* and failing tests are last */ -}}
{{- range . -}}
{{- if eq .Result "PASS" -}}
::group::{{ "\033" }}[0;32m✅{{ " " }}{{- .Name -}}
{{- if and (ne .Result "PASS") (ne .Result "SKIP") -}}
::group::{{ "\033" }}[0;31m❌{{ " " }}{{- .Name -}}
{{- "\033" -}}[0;37m ({{if $settings.ShowTestStatus}}{{.Result}}; {{end}}{{ .Duration -}}
{{- with .Coverage -}}
, coverage: {{ . }}%

128
.ci/scripts/calculate_jobs.py Executable file
View file

@ -0,0 +1,128 @@
#!/usr/bin/env python
# Copyright 2022 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Calculate the trial jobs to run based on if we're in a PR or not.
import json
import os
IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For each type of test we only run on Py3.7 on PRs
trial_sqlite_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "all",
}
]
if not IS_PR:
trial_sqlite_tests.extend(
{
"python-version": version,
"database": "sqlite",
"extras": "all",
}
for version in ("3.8", "3.9", "3.10")
)
trial_postgres_tests = [
{
"python-version": "3.7",
"database": "postgres",
"postgres-version": "10",
"extras": "all",
}
]
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.10",
"database": "postgres",
"postgres-version": "14",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.7",
"database": "sqlite",
"extras": "",
}
]
print("::group::Calculated trial jobs")
print(
json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests, indent=4
)
)
print("::endgroup::")
test_matrix = json.dumps(
trial_sqlite_tests + trial_postgres_tests + trial_no_extra_tests
)
print(f"::set-output name=trial_test_matrix::{test_matrix}")
# First calculate the various sytest jobs.
#
# For each type of test we only run on focal on PRs
sytest_tests = [
{
"sytest-tag": "focal",
},
{
"sytest-tag": "focal",
"postgres": "postgres",
},
{
"sytest-tag": "focal",
"postgres": "multi-postgres",
"workers": "workers",
},
]
if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "testing",
"postgres": "postgres",
},
{
"sytest-tag": "buster",
"postgres": "multi-postgres",
"workers": "workers",
},
]
)
print("::group::Calculated sytest jobs")
print(json.dumps(sytest_tests, indent=4))
print("::endgroup::")
test_matrix = json.dumps(sytest_tests)
print(f"::set-output name=sytest_test_matrix::{test_matrix}")

21
.ci/scripts/gotestfmt Executable file
View file

@ -0,0 +1,21 @@
#!/bin/bash
#
# wraps `gotestfmt`, hiding output from successful packages unless
# all tests passed.
set -o pipefail
set -e
# tee the test results to a log, whilst also piping them into gotestfmt,
# telling it to hide successful results, so that we can clearly see
# unsuccessful results.
tee complement.log | gotestfmt -hide successful-packages
# gotestfmt will exit non-zero if there were any failures, so if we got to this
# point, we must have had a successful result.
echo "All tests successful; showing all test results"
# Pipe the test results back through gotestfmt, showing all results.
# The log file consists of JSON lines giving the test results, interspersed
# with regular stdout lines (including reports of downloaded packages).
grep '^{"Time":' complement.log | gotestfmt

View file

@ -163,7 +163,7 @@ jobs:
- run: |
set -o pipefail
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
TEST_ONLY_IGNORE_POETRY_LOCKFILE=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests

View file

@ -73,53 +73,48 @@ jobs:
steps:
- run: "true"
trial:
calculate-test-jobs:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: linting-done
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
- id: get-matrix
run: .ci/scripts/calculate_jobs.py
outputs:
trial_test_matrix: ${{ steps.get-matrix.outputs.trial_test_matrix }}
sytest_test_matrix: ${{ steps.get-matrix.outputs.sytest_test_matrix }}
trial:
if: ${{ !cancelled() && !failure() }} # Allow previous steps to be skipped, but not fail
needs: calculate-test-jobs
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.7", "3.8", "3.9", "3.10"]
database: ["sqlite"]
extras: ["all"]
include:
# Newest Python without optional deps
- python-version: "3.10"
extras: ""
# Oldest Python with PostgreSQL
- python-version: "3.7"
database: "postgres"
postgres-version: "10"
extras: "all"
# Newest Python with newest PostgreSQL
- python-version: "3.10"
database: "postgres"
postgres-version: "14"
extras: "all"
job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }}
steps:
- uses: actions/checkout@v2
- run: sudo apt-get -qq install xmlsec1
- name: Set up PostgreSQL ${{ matrix.postgres-version }}
if: ${{ matrix.postgres-version }}
- name: Set up PostgreSQL ${{ matrix.job.postgres-version }}
if: ${{ matrix.job.postgres-version }}
run: |
docker run -d -p 5432:5432 \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \
postgres:${{ matrix.postgres-version }}
postgres:${{ matrix.job.postgres-version }}
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: ${{ matrix.extras }}
python-version: ${{ matrix.job.python-version }}
extras: ${{ matrix.job.extras }}
- name: Await PostgreSQL
if: ${{ matrix.postgres-version }}
if: ${{ matrix.job.postgres-version }}
timeout-minutes: 2
run: until pg_isready -h localhost; do sleep 1; done
- run: poetry run trial --jobs=2 tests
env:
SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }}
SYNAPSE_POSTGRES: ${{ matrix.job.database == 'postgres' || '' }}
SYNAPSE_POSTGRES_HOST: localhost
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
@ -198,45 +193,24 @@ jobs:
sytest:
if: ${{ !failure() && !cancelled() }}
needs: linting-done
needs: calculate-test-jobs
runs-on: ubuntu-latest
container:
image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }}
image: matrixdotorg/sytest-synapse:${{ matrix.job.sytest-tag }}
volumes:
- ${{ github.workspace }}:/src
env:
SYTEST_BRANCH: ${{ github.head_ref }}
POSTGRES: ${{ matrix.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.workers && 1 }}
REDIS: ${{ matrix.redis && 1 }}
BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }}
POSTGRES: ${{ matrix.job.postgres && 1}}
MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}}
WORKERS: ${{ matrix.job.workers && 1 }}
BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }}
TOP: ${{ github.workspace }}
strategy:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: focal
postgres: postgres
- sytest-tag: testing
postgres: postgres
- sytest-tag: focal
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: multi-postgres
workers: workers
- sytest-tag: buster
postgres: postgres
workers: workers
redis: redis
job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }}
steps:
- uses: actions/checkout@v2
@ -252,7 +226,7 @@ jobs:
uses: actions/upload-artifact@v2
if: ${{ always() }}
with:
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }})
name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.job.*, ', ') }})
path: |
/logs/results.tap
/logs/**/*.log*
@ -283,7 +257,6 @@ jobs:
- run: sudo apt-get -qq install xmlsec1
- uses: matrix-org/setup-python-poetry@v1
with:
python-version: ${{ matrix.python-version }}
extras: "postgres"
- run: .ci/scripts/test_export_data_command.sh
@ -354,7 +327,7 @@ jobs:
- run: |
set -o pipefail
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests

28
.github/workflows/triage-incoming.yml vendored Normal file
View file

@ -0,0 +1,28 @@
name: Move new issues into the issue triage board
on:
issues:
types: [ opened ]
jobs:
add_new_issues:
name: Add new issues to the triage board
runs-on: ubuntu-latest
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation add_to_project($projectid:ID!,$contentid:ID!) {
addProjectV2ItemById(input: {projectId: $projectid contentId: $contentid}) {
item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}

44
.github/workflows/triage_labelled.yml vendored Normal file
View file

@ -0,0 +1,44 @@
name: Move labelled issues to correct projects
on:
issues:
types: [ labeled ]
jobs:
move_needs_info:
name: Move X-Needs-Info on the triage board
runs-on: ubuntu-latest
if: >
contains(github.event.issue.labels.*.name, 'X-Needs-Info')
steps:
- uses: octokit/graphql-action@v2.x
id: add_to_project
with:
headers: '{"GraphQL-Features": "projects_next_graphql"}'
query: |
mutation {
updateProjectV2ItemFieldValue(
input: {
projectId: $projectid
itemId: $contentid
fieldId: $fieldid
value: {
singleSelectOptionId: "Todo"
}
}
) {
projectV2Item {
id
}
}
}
projectid: ${{ env.PROJECT_ID }}
contentid: ${{ github.event.issue.node_id }}
fieldid: ${{ env.FIELD_ID }}
optionid: ${{ env.OPTION_ID }}
env:
PROJECT_ID: "PVT_kwDOAIB0Bs4AFDdZ"
GITHUB_TOKEN: ${{ secrets.ELEMENT_BOT_TOKEN }}
FIELD_ID: "PVTSSF_lADOAIB0Bs4AFDdZzgC6ZA4"
OPTION_ID: "ba22e43c"

View file

@ -137,7 +137,7 @@ jobs:
- run: |
set -o pipefail
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | gotestfmt
TEST_ONLY_SKIP_DEP_HASH_VERIFICATION=1 POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt
shell: bash
name: Run Complement Tests

View file

@ -1,3 +1,100 @@
Synapse 1.67.0rc1 (2022-09-06)
==============================
This release removes using the deprecated direct TCP replication configuration
for workers. Server admins should use Redis instead. See the [upgrade
notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
The minimum version of `poetry` supported for managing source checkouts is now
1.2.0.
Notice: from the next major release (v1.68.0) installing Synapse from a source
checkout will require a recent Rust compiler. Those using packages or
`pip install matrix-synapse` will not be affected. See the [upgrade
notes](https://matrix-org.github.io/synapse/v1.67/upgrade.html#upgrading-to-v1670).
Features
--------
- Support setting the registration shared secret in a file, via a new `registration_shared_secret_path` configuration option. ([\#13614](https://github.com/matrix-org/synapse/issues/13614))
- Change the default startup behaviour so that any missing "additional" configuration files (signing key, etc) are generated automatically. ([\#13615](https://github.com/matrix-org/synapse/issues/13615))
- Improve performance of sending messages in rooms with thousands of local users. ([\#13634](https://github.com/matrix-org/synapse/issues/13634))
Bugfixes
--------
- Fix a bug introduced in Synapse 1.13 where the [List Rooms admin API](https://matrix-org.github.io/synapse/develop/admin_api/rooms.html#list-room-api) would return integers instead of booleans for the `federatable` and `public` fields when using a Sqlite database. ([\#13509](https://github.com/matrix-org/synapse/issues/13509))
- Fix bug that user cannot `/forget` rooms after the last member has left the room. ([\#13546](https://github.com/matrix-org/synapse/issues/13546))
- Faster Room Joins: fix `/make_knock` blocking indefinitely when the room in question is a partial-stated room. ([\#13583](https://github.com/matrix-org/synapse/issues/13583))
- Fix loading the current stream position behind the actual position. ([\#13585](https://github.com/matrix-org/synapse/issues/13585))
- Fix a longstanding bug in `register_new_matrix_user` which meant it was always necessary to explicitly give a server URL. ([\#13616](https://github.com/matrix-org/synapse/issues/13616))
- Fix the running of [MSC1763](https://github.com/matrix-org/matrix-spec-proposals/pull/1763) retention purge_jobs in deployments with background jobs running on a worker by forcing them back onto the main worker. Contributed by Brad @ Beeper. ([\#13632](https://github.com/matrix-org/synapse/issues/13632))
- Fix a long-standing bug that downloaded media for URL previews was not deleted while database background updates were running. ([\#13657](https://github.com/matrix-org/synapse/issues/13657))
- Fix [MSC3030](https://github.com/matrix-org/matrix-spec-proposals/pull/3030) `/timestamp_to_event` endpoint to return the correct next event when the events have the same timestamp. ([\#13658](https://github.com/matrix-org/synapse/issues/13658))
- Fix bug where we wedge media plugins if clients disconnect early. Introduced in v1.22.0. ([\#13660](https://github.com/matrix-org/synapse/issues/13660))
- Fix a long-standing bug which meant that keys for unwhitelisted servers were not returned by `/_matrix/key/v2/query`. ([\#13683](https://github.com/matrix-org/synapse/issues/13683))
- Fix a bug introduced in Synapse v1.20.0 that would cause the unstable unread counts from [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) to be calculated even if the feature is disabled. ([\#13694](https://github.com/matrix-org/synapse/issues/13694))
Updates to the Docker image
---------------------------
- Update docker image to use a stable version of poetry. ([\#13688](https://github.com/matrix-org/synapse/issues/13688))
Improved Documentation
----------------------
- Improve the description of the ["chain cover index"](https://matrix-org.github.io/synapse/latest/auth_chain_difference_algorithm.html) used internally by Synapse. ([\#13602](https://github.com/matrix-org/synapse/issues/13602))
- Document how ["monthly active users"](https://matrix-org.github.io/synapse/latest/usage/administration/monthly_active_users.html) is calculated and used. ([\#13617](https://github.com/matrix-org/synapse/issues/13617))
- Improve documentation around user registration. ([\#13640](https://github.com/matrix-org/synapse/issues/13640))
- Remove documentation of legacy `frontend_proxy` worker app. ([\#13645](https://github.com/matrix-org/synapse/issues/13645))
- Clarify documentation that HTTP replication traffic can be protected with a shared secret. ([\#13656](https://github.com/matrix-org/synapse/issues/13656))
- Remove unintentional colons from [config manual](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html) headers. ([\#13665](https://github.com/matrix-org/synapse/issues/13665))
- Update docs to make enabling metrics more clear. ([\#13678](https://github.com/matrix-org/synapse/issues/13678))
- Clarify `(room_id, event_id)` global uniqueness and how we should scope our database schemas. ([\#13701](https://github.com/matrix-org/synapse/issues/13701))
Deprecations and Removals
-------------------------
- Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token`, which was not permitted by the spec. Contributed by @Vetchu. ([\#13241](https://github.com/matrix-org/synapse/issues/13241))
- Remove redundant `_get_joined_users_from_context` cache. Contributed by Nick @ Beeper (@fizzadar). ([\#13569](https://github.com/matrix-org/synapse/issues/13569))
- Remove the ability to use direct TCP replication with workers. Direct TCP replication was deprecated in Synapse v1.18.0. Workers now require using Redis. ([\#13647](https://github.com/matrix-org/synapse/issues/13647))
- Remove support for unstable [private read receipts](https://github.com/matrix-org/matrix-spec-proposals/pull/2285). ([\#13653](https://github.com/matrix-org/synapse/issues/13653), [\#13692](https://github.com/matrix-org/synapse/issues/13692))
Internal Changes
----------------
- Extend the release script to wait for GitHub Actions to finish and to be usable as a guide for the whole process. ([\#13483](https://github.com/matrix-org/synapse/issues/13483))
- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13540](https://github.com/matrix-org/synapse/issues/13540))
- Cache user IDs instead of profiles to reduce cache memory usage. Contributed by Nick @ Beeper (@fizzadar). ([\#13573](https://github.com/matrix-org/synapse/issues/13573), [\#13600](https://github.com/matrix-org/synapse/issues/13600))
- Optimize how Synapse calculates domains to fetch from during backfill. ([\#13575](https://github.com/matrix-org/synapse/issues/13575))
- Comment about a better future where we can get the state diff between two events. ([\#13586](https://github.com/matrix-org/synapse/issues/13586))
- Instrument `_check_sigs_and_hash_and_fetch` to trace time spent in child concurrent calls for understandable traces in Jaeger. ([\#13588](https://github.com/matrix-org/synapse/issues/13588))
- Improve performance of `@cachedList`. ([\#13591](https://github.com/matrix-org/synapse/issues/13591))
- Minor speed up of fetching large numbers of push rules. ([\#13592](https://github.com/matrix-org/synapse/issues/13592))
- Optimise push action fetching queries. Contributed by Nick @ Beeper (@fizzadar). ([\#13597](https://github.com/matrix-org/synapse/issues/13597))
- Rename `event_map` to `unpersisted_events` when computing the auth differences. ([\#13603](https://github.com/matrix-org/synapse/issues/13603))
- Refactor `get_users_in_room(room_id)` mis-use with dedicated `get_current_hosts_in_room(room_id)` function. ([\#13605](https://github.com/matrix-org/synapse/issues/13605))
- Use dedicated `get_local_users_in_room(room_id)` function to find local users when calculating `join_authorised_via_users_server` of a `/make_join` request. ([\#13606](https://github.com/matrix-org/synapse/issues/13606))
- Refactor `get_users_in_room(room_id)` mis-use to lookup single local user with dedicated `check_local_user_in_room(...)` function. ([\#13608](https://github.com/matrix-org/synapse/issues/13608))
- Drop unused column `application_services_state.last_txn`. ([\#13627](https://github.com/matrix-org/synapse/issues/13627))
- Improve readability of Complement CI logs by printing failure results last. ([\#13639](https://github.com/matrix-org/synapse/issues/13639))
- Generalise the `@cancellable` annotation so it can be used on functions other than just servlet methods. ([\#13662](https://github.com/matrix-org/synapse/issues/13662))
- Introduce a `CommonUsageMetrics` class to share some usage metrics between the Prometheus exporter and the phone home stats. ([\#13671](https://github.com/matrix-org/synapse/issues/13671))
- Add some logging to help track down #13444. ([\#13679](https://github.com/matrix-org/synapse/issues/13679))
- Update poetry lock file for v1.2.0. ([\#13689](https://github.com/matrix-org/synapse/issues/13689))
- Add cache to `is_partial_state_room`. ([\#13693](https://github.com/matrix-org/synapse/issues/13693))
- Update the Grafana dashboard that is included with Synapse in the `contrib` directory. ([\#13697](https://github.com/matrix-org/synapse/issues/13697))
- Only run trial CI on all python versions on non-PRs. ([\#13698](https://github.com/matrix-org/synapse/issues/13698))
- Fix typechecking with latest types-jsonschema. ([\#13712](https://github.com/matrix-org/synapse/issues/13712))
- Reduce number of CI checks we run for PRs. ([\#13713](https://github.com/matrix-org/synapse/issues/13713))
Synapse 1.66.0 (2022-08-31)
===========================

View file

@ -3,7 +3,7 @@ Synapse |support| |development| |documentation| |license| |pypi| |python|
=========================================================================
Synapse is an open-source `Matrix <https://matrix.org/>`_ homeserver written and
maintained by the Matrix.org Foundation. We began rapid development began in 2014,
maintained by the Matrix.org Foundation. We began rapid development in 2014,
reaching v1.0.0 in 2019. Development on Synapse and the Matrix protocol itself continues
in earnest today.

View file

@ -3244,6 +3244,104 @@
"yaxis": {
"align": false
}
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"description": "Average number of hosts being rate limited across each worker type.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green"
}
]
},
"unit": "none"
},
"overrides": []
},
"gridPos": {
"h": 9,
"w": 12,
"x": 12,
"y": 53
},
"id": 225,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single",
"sort": "desc"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "builder",
"expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_sleep_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})",
"hide": false,
"legendFormat": "Slept by {{job}}:{{rate_limiter_name}}",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"editorMode": "builder",
"expr": "avg by(job, rate_limiter_name) (synapse_rate_limit_reject_affected_hosts{instance=\"$instance\", job=~\"$job\", index=~\"$index\"})",
"legendFormat": "Rejected by {{job}}:{{rate_limiter_name}}",
"range": true,
"refId": "A"
}
],
"title": "Hosts being rate limited",
"type": "timeseries"
}
],
"targets": [
@ -6404,7 +6502,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 10
"y": 35
},
"hiddenSeries": false,
"id": 12,
@ -6502,7 +6600,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 10
"y": 35
},
"hiddenSeries": false,
"id": 26,
@ -6601,7 +6699,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 23
"y": 48
},
"hiddenSeries": false,
"id": 13,
@ -6705,7 +6803,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 23
"y": 48
},
"hiddenSeries": false,
"id": 27,
@ -6803,7 +6901,7 @@
"h": 13,
"w": 12,
"x": 0,
"y": 36
"y": 61
},
"hiddenSeries": false,
"id": 28,
@ -6900,7 +6998,7 @@
"h": 13,
"w": 12,
"x": 12,
"y": 36
"y": 61
},
"hiddenSeries": false,
"id": 25,
@ -6935,7 +7033,7 @@
"datasource": {
"uid": "$datasource"
},
"expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])",
"expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])",
"format": "time_series",
"interval": "",
"intervalFactor": 2,
@ -6960,11 +7058,13 @@
},
"yaxes": [
{
"format": "ms",
"$$hashKey": "object:180",
"format": "s",
"logBase": 1,
"show": true
},
{
"$$hashKey": "object:181",
"format": "short",
"logBase": 1,
"show": true
@ -6988,7 +7088,7 @@
"h": 15,
"w": 12,
"x": 0,
"y": 49
"y": 74
},
"hiddenSeries": false,
"id": 154,
@ -7009,7 +7109,7 @@
"alertThreshold": true
},
"percentage": false,
"pluginVersion": "8.4.3",
"pluginVersion": "9.0.4",
"pointradius": 2,
"points": false,
"renderer": "flot",
@ -7109,7 +7209,7 @@
"h": 10,
"w": 12,
"x": 0,
"y": 36
"y": 69
},
"hiddenSeries": false,
"id": 1,
@ -7211,7 +7311,7 @@
"h": 10,
"w": 12,
"x": 12,
"y": 36
"y": 69
},
"hiddenSeries": false,
"id": 8,
@ -7311,7 +7411,7 @@
"h": 10,
"w": 12,
"x": 0,
"y": 46
"y": 79
},
"hiddenSeries": false,
"id": 38,
@ -7407,7 +7507,7 @@
"h": 10,
"w": 12,
"x": 12,
"y": 46
"y": 79
},
"hiddenSeries": false,
"id": 39,
@ -7415,11 +7515,16 @@
"alignAsTable": true,
"avg": false,
"current": false,
"max": false,
"hideEmpty": false,
"hideZero": false,
"max": true,
"min": false,
"rightSide": false,
"show": true,
"sort": "max",
"sortDesc": true,
"total": false,
"values": false
"values": true
},
"lines": true,
"linewidth": 1,
@ -7467,11 +7572,13 @@
},
"yaxes": [
{
"$$hashKey": "object:101",
"format": "rps",
"logBase": 1,
"show": true
},
{
"$$hashKey": "object:102",
"format": "short",
"logBase": 1,
"show": true
@ -7501,7 +7608,7 @@
"h": 9,
"w": 12,
"x": 0,
"y": 56
"y": 89
},
"hiddenSeries": false,
"id": 65,
@ -11757,8 +11864,8 @@
]
},
"time": {
"from": "2022-07-22T04:08:13.716Z",
"to": "2022-07-22T18:44:27.863Z"
"from": "now-3h",
"to": "now"
},
"timepicker": {
"now": true,
@ -11789,6 +11896,6 @@
"timezone": "",
"title": "Synapse",
"uid": "000000012",
"version": 124,
"version": 132,
"weekStart": ""
}

View file

@ -36,7 +36,7 @@ TEMP_VENV="$(mktemp -d)"
python3 -m venv "$TEMP_VENV"
source "$TEMP_VENV/bin/activate"
pip install -U pip
pip install poetry==1.2.0b1
pip install poetry==1.2.0
poetry export \
--extras all \
--extras test \

19
debian/changelog vendored
View file

@ -1,9 +1,28 @@
matrix-synapse-py3 (1.67.0~rc1) stable; urgency=medium
[ Erik Johnston ]
* Use stable poetry 1.2.0 version, rather than a prerelease.
[ Synapse Packaging team ]
* New Synapse release 1.67.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 06 Sep 2022 09:01:06 +0100
matrix-synapse-py3 (1.66.0) stable; urgency=medium
* New Synapse release 1.66.0.
-- Synapse Packaging team <packages@matrix.org> Wed, 31 Aug 2022 11:20:17 +0100
matrix-synapse-py3 (1.66.0~rc2+nmu1) UNRELEASED; urgency=medium
* Update debhelper to compatibility level 12.
* Drop the preinst script stopping synapse.
* Allocate a group for the system user.
* Change dpkg-statoverride to --force-statoverride-add.
-- Jörg Behrmann <behrmann@physik.fu-berlin.de> Tue, 23 Aug 2022 17:17:00 +0100
matrix-synapse-py3 (1.66.0~rc2) stable; urgency=medium
* New Synapse release 1.66.0rc2.

1
debian/compat vendored
View file

@ -1 +0,0 @@
10

2
debian/control vendored
View file

@ -4,7 +4,7 @@ Priority: extra
Maintainer: Synapse Packaging team <packages@matrix.org>
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
Build-Depends:
debhelper (>= 10),
debhelper-compat (= 12),
dh-virtualenv (>= 1.1),
libsystemd-dev,
libpq-dev,

View file

@ -40,12 +40,12 @@ EOF
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
if ! getent passwd $USER >/dev/null; then
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
adduser --quiet --system --group --no-create-home --home /var/lib/matrix-synapse $USER
fi
for DIR in /var/lib/matrix-synapse /var/log/matrix-synapse /etc/matrix-synapse; do
if ! dpkg-statoverride --list --quiet $DIR >/dev/null; then
dpkg-statoverride --force --quiet --update --add $USER nogroup 0755 $DIR
dpkg-statoverride --force-statoverride-add --quiet --update --add $USER "$(id -gn $USER)" 0755 $DIR
fi
done

View file

@ -1,31 +0,0 @@
#!/bin/sh -e
# Attempt to undo some of the braindamage caused by
# https://github.com/matrix-org/package-synapse-debian/issues/18.
#
# Due to reasons [1], the old python2 matrix-synapse package will not stop the
# service when the package is uninstalled. Our maintainer scripts will do the
# right thing in terms of ensuring the service is enabled and unmasked, but
# then do a `systemctl start matrix-synapse`, which of course does nothing -
# leaving the old (py2) service running.
#
# There should normally be no reason for the service to be running during our
# preinst, so we assume that if it *is* running, it's due to that situation,
# and stop it.
#
# [1] dh_systemd_start doesn't do anything because it sees that there is an
# init.d script with the same name, so leaves it to dh_installinit.
#
# dh_installinit doesn't do anything because somebody gave it a --no-start
# for unknown reasons.
if [ -x /bin/systemctl ]; then
if /bin/systemctl --quiet is-active -- matrix-synapse; then
echo >&2 "stopping existing matrix-synapse service"
/bin/systemctl stop matrix-synapse || true
fi
fi
#DEBHELPER#
exit 0

View file

@ -1,2 +0,0 @@
# Specify environment variables used when running Synapse
# SYNAPSE_CACHE_FACTOR=0.5 (default)

View file

@ -5,7 +5,6 @@ Description=Synapse Matrix homeserver
Type=notify
User=matrix-synapse
WorkingDirectory=/var/lib/matrix-synapse
EnvironmentFile=-/etc/default/matrix-synapse
ExecStartPre=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/ --generate-keys
ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --config-path=/etc/matrix-synapse/homeserver.yaml --config-path=/etc/matrix-synapse/conf.d/
ExecReload=/bin/kill -HUP $MAINPID
@ -13,5 +12,10 @@ Restart=always
RestartSec=3
SyslogIdentifier=matrix-synapse
# The environment file is not shipped by default anymore and the below directive
# is for backwards compatibility only. Please use your homeserver.yaml if
# possible.
EnvironmentFile=-/etc/default/matrix-synapse
[Install]
WantedBy=multi-user.target

12
debian/rules vendored
View file

@ -6,15 +6,17 @@
# assume we only have one package
PACKAGE_NAME:=`dh_listpackages`
override_dh_systemd_enable:
dh_systemd_enable --name=matrix-synapse
override_dh_installinit:
dh_installinit --name=matrix-synapse
override_dh_installsystemd:
dh_installsystemd --name=matrix-synapse
# we don't really want to strip the symbols from our object files.
override_dh_strip:
# many libraries pulled from PyPI have allocatable sections after
# non-allocatable ones on which dwz errors out. For those without the issue the
# gains are only marginal
override_dh_dwz:
# dh_shlibdeps calls dpkg-shlibdeps, which finds all the binary files
# (executables and shared libs) in the package, and looks for the shared
# libraries that they depend on. It then adds a dependency on the package that

View file

@ -46,17 +46,8 @@ RUN \
# We install poetry in its own build stage to avoid its dependencies conflicting with
# synapse's dependencies.
# We use a specific commit from poetry's master branch instead of our usual 1.1.14,
# to incorporate fixes to some bugs in `poetry export`. This commit corresponds to
# https://github.com/python-poetry/poetry/pull/5156 and
# https://github.com/python-poetry/poetry/issues/5141 ;
# without it, we generate a requirements.txt with incorrect environment markers,
# which causes necessary packages to be omitted when we `pip install`.
#
# NB: In poetry 1.2 `poetry export` will be moved into a plugin; we'll need to also
# pip install poetry-plugin-export (https://github.com/python-poetry/poetry-plugin-export).
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --user "poetry-core==1.1.0a7" "git+https://github.com/python-poetry/poetry.git@fb13b3a676f476177f7937ffa480ee5cff9a90a5"
pip install --user "poetry==1.2.0"
WORKDIR /synapse

View file

@ -69,6 +69,7 @@
- [Manhole](manhole.md)
- [Monitoring](metrics-howto.md)
- [Reporting Homeserver Usage Statistics](usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
- [Monthly Active Users](usage/administration/monthly_active_users.md)
- [Understanding Synapse Through Grafana Graphs](usage/administration/understanding_synapse_through_grafana_graphs.md)
- [Useful SQL for Admins](usage/administration/useful_sql_for_admins.md)
- [Database Maintenance Tools](usage/administration/database_maintenance_tools.md)

View file

@ -5,9 +5,9 @@ non-interactive way. This is generally used for bootstrapping a Synapse
instance with administrator accounts.
To authenticate yourself to the server, you will need both the shared secret
(`registration_shared_secret` in the homeserver configuration), and a
one-time nonce. If the registration shared secret is not configured, this API
is not enabled.
([`registration_shared_secret`](../configuration/config_documentation.md#registration_shared_secret)
in the homeserver configuration), and a one-time nonce. If the registration
shared secret is not configured, this API is not enabled.
To fetch the nonce, you need to request one from the API:

View file

@ -34,13 +34,45 @@ the process of indexing it).
## Chain Cover Index
Synapse computes auth chain differences by pre-computing a "chain cover" index
for the auth chain in a room, allowing efficient reachability queries like "is
event A in the auth chain of event B". This is done by assigning every event a
*chain ID* and *sequence number* (e.g. `(5,3)`), and having a map of *links*
between chains (e.g. `(5,3) -> (2,4)`) such that A is reachable by B (i.e. `A`
is in the auth chain of `B`) if and only if either:
for the auth chain in a room, allowing us to efficiently make reachability queries
like "is event `A` in the auth chain of event `B`?". We could do this with an index
that tracks all pairs `(A, B)` such that `A` is in the auth chain of `B`. However, this
would be prohibitively large, scaling poorly as the room accumulates more state
events.
1. A and B have the same chain ID and `A`'s sequence number is less than `B`'s
Instead, we break down the graph into *chains*. A chain is a subset of a DAG
with the following property: for any pair of events `E` and `F` in the chain,
the chain contains a path `E -> F` or a path `F -> E`. This forces a chain to be
linear (without forks), e.g. `E -> F -> G -> ... -> H`. Each event in the chain
is given a *sequence number* local to that chain. The oldest event `E` in the
chain has sequence number 1. If `E` has a child `F` in the chain, then `F` has
sequence number 2. If `E` has a grandchild `G` in the chain, then `G` has
sequence number 3; and so on.
Synapse ensures that each persisted event belongs to exactly one chain, and
tracks how the chains are connected to one another. This allows us to
efficiently answer reachability queries. Doing so uses less storage than
tracking reachability on an event-by-event basis, particularly when we have
fewer and longer chains. See
> Jagadish, H. (1990). [A compression technique to materialize transitive closure](https://doi.org/10.1145/99935.99944).
> *ACM Transactions on Database Systems (TODS)*, 15*(4)*, 558-598.
for the original idea or
> Y. Chen, Y. Chen, [An efficient algorithm for answering graph
> reachability queries](https://doi.org/10.1109/ICDE.2008.4497498),
> in: 2008 IEEE 24th International Conference on Data Engineering, April 2008,
> pp. 893902. (PDF available via [Google Scholar](https://scholar.google.com/scholar?q=Y.%20Chen,%20Y.%20Chen,%20An%20efficient%20algorithm%20for%20answering%20graph%20reachability%20queries,%20in:%202008%20IEEE%2024th%20International%20Conference%20on%20Data%20Engineering,%20April%202008,%20pp.%20893902.).)
for a more modern take.
In practical terms, the chain cover assigns every event a
*chain ID* and *sequence number* (e.g. `(5,3)`), and maintains a map of *links*
between events in chains (e.g. `(5,3) -> (2,4)`) such that `A` is reachable by `B`
(i.e. `A` is in the auth chain of `B`) if and only if either:
1. `A` and `B` have the same chain ID and `A`'s sequence number is less than `B`'s
sequence number; or
2. there is a link `L` between `B`'s chain ID and `A`'s chain ID such that
`L.start_seq_no` <= `B.seq_no` and `A.seq_no` <= `L.end_seq_no`.
@ -49,8 +81,9 @@ There are actually two potential implementations, one where we store links from
each chain to every other reachable chain (the transitive closure of the links
graph), and one where we remove redundant links (the transitive reduction of the
links graph) e.g. if we have chains `C3 -> C2 -> C1` then the link `C3 -> C1`
would not be stored. Synapse uses the former implementations so that it doesn't
need to recurse to test reachability between chains.
would not be stored. Synapse uses the former implementation so that it doesn't
need to recurse to test reachability between chains. This trades-off extra storage
in order to save CPU cycles and DB queries.
### Example

View file

@ -62,6 +62,8 @@ pipx install poetry
but see poetry's [installation instructions](https://python-poetry.org/docs/#installation)
for other installation methods.
Synapse requires Poetry version 1.2.0 or later.
Next, open a terminal and install dependencies as follows:
```sh

View file

@ -191,3 +191,27 @@ There are three separate aspects to this:
flavour will be accepted by SQLite 3.22, but will give a column whose
default value is the **string** `"FALSE"` - which, when cast back to a boolean
in Python, evaluates to `True`.
## `event_id` global uniqueness
In room versions `1` and `2` it's possible to end up with two events with the
same `event_id` (in the same or different rooms). After room version `3`, that
can only happen with a hash collision, which we basically hope will never
happen.
There are several places in Synapse and even Matrix APIs like [`GET
/_matrix/federation/v1/event/{eventId}`](https://spec.matrix.org/v1.1/server-server-api/#get_matrixfederationv1eventeventid)
where we assume that event IDs are globally unique.
But hash collisions are still possible, and by treating event IDs as room
scoped, we can reduce the possibility of a hash collision. When scoping
`event_id` in the database schema, it should be also accompanied by `room_id`
(`PRIMARY KEY (room_id, event_id)`) and lookups should be done through the pair
`(room_id, event_id)`.
There has been a lot of debate on this in places like
https://github.com/matrix-org/matrix-spec-proposals/issues/2779 and
[MSC2848](https://github.com/matrix-org/matrix-spec-proposals/pull/2848) which
has no resolution yet (as of 2022-09-01).

View file

@ -243,14 +243,11 @@ doesn't require poetry. (It's what we use in CI too). However, you could try
## Check the version of poetry with `poetry --version`.
At the time of writing, the 1.2 series is beta only. We have seen some examples
where the lockfiles generated by 1.2 prereleasese aren't interpreted correctly
by poetry 1.1.x. For now, use poetry 1.1.14, which includes a critical
[change](https://github.com/python-poetry/poetry/pull/5973) needed to remain
[compatible with PyPI](https://github.com/pypi/warehouse/pull/11775).
The minimum version of poetry supported by Synapse is 1.2.
It can also be useful to check the version of `poetry-core` in use. If you've
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep poetry-core`.
installed `poetry` with `pipx`, try `pipx runpip poetry list | grep
poetry-core`.
## Clear caches: `poetry cache clear --all pypi`.

View file

@ -7,7 +7,13 @@
1. Enable Synapse metrics:
There are two methods of enabling metrics in Synapse.
In `homeserver.yaml`, make sure `enable_metrics` is
set to `True`.
1. Enable the `/_synapse/metrics` Synapse endpoint that Prometheus uses to
collect data:
There are two methods of enabling the metrics endpoint in Synapse.
The first serves the metrics as a part of the usual web server and
can be enabled by adding the \"metrics\" resource to the existing
@ -41,9 +47,6 @@
- '0.0.0.0'
```
For both options, you will need to ensure that `enable_metrics` is
set to `True`.
1. Restart Synapse.
1. Add a Prometheus target for Synapse.

View file

@ -174,7 +174,9 @@ oidc_providers:
1. Create a regular web application for Synapse
2. Set the Allowed Callback URLs to `[synapse public baseurl]/_synapse/client/oidc/callback`
3. Add a rule to add the `preferred_username` claim.
3. Add a rule with any name to add the `preferred_username` claim.
(See https://auth0.com/docs/customize/rules/create-rules for more information on how to create rules.)
<details>
<summary>Code sample</summary>

View file

@ -506,9 +506,13 @@ email will be disabled.
### Registering a user
The easiest way to create a new user is to do so from a client like [Element](https://element.io/).
One way to create a new user is to do so from a client like
[Element](https://element.io/). This requires registration to be enabled via
the
[`enable_registration`](../usage/configuration/config_documentation.md#enable_registration)
setting.
Alternatively, you can do so from the command line. This can be done as follows:
Alternatively, you can create new users from the command line. This can be done as follows:
1. If synapse was installed via pip, activate the virtualenv as follows (if Synapse was
installed via a prebuilt package, `register_new_matrix_user` should already be
@ -520,7 +524,7 @@ Alternatively, you can do so from the command line. This can be done as follows:
```
2. Run the following command:
```sh
register_new_matrix_user -c homeserver.yaml http://localhost:8008
register_new_matrix_user -c homeserver.yaml
```
This will prompt you to add details for the new user, and will then connect to
@ -533,12 +537,13 @@ Make admin [no]:
Success!
```
This process uses a setting `registration_shared_secret` in
`homeserver.yaml`, which is shared between Synapse itself and the
`register_new_matrix_user` script. It doesn't matter what it is (a random
value is generated by `--generate-config`), but it should be kept secret, as
anyone with knowledge of it can register users, including admin accounts,
on your server even if `enable_registration` is `false`.
This process uses a setting
[`registration_shared_secret`](../usage/configuration/config_documentation.md#registration_shared_secret),
which is shared between Synapse itself and the `register_new_matrix_user`
script. It doesn't matter what it is (a random value is generated by
`--generate-config`), but it should be kept secret, as anyone with knowledge of
it can register users, including admin accounts, on your server even if
`enable_registration` is `false`.
### Setting up a TURN server

View file

@ -5,6 +5,8 @@ worker_name: generic_worker1
worker_replication_host: 127.0.0.1
worker_replication_http_port: 9093
worker_main_http_uri: http://localhost:8008/
worker_listeners:
- type: http
port: 8083

View file

@ -89,6 +89,37 @@ process, for example:
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
```
# Upgrading to v1.67.0
## Direct TCP replication is no longer supported: migrate to Redis
Redis support was added in v1.13.0 with it becoming the recommended method in
v1.18.0. It replaced the old direct TCP connections (which was deprecated as of
v1.18.0) to the main process. With Redis, rather than all the workers connecting
to the main process, all the workers and the main process connect to Redis,
which relays replication commands between processes. This can give a significant
CPU saving on the main process and is a prerequisite for upcoming
performance improvements.
To migrate to Redis add the [`redis` config](./workers.md#shared-configuration),
and remove the TCP `replication` listener from config of the master and
`worker_replication_port` from worker config. Note that a HTTP listener with a
`replication` resource is still required.
## Minimum version of Poetry is now v1.2.0
The minimum supported version of poetry is now 1.2. This should only affect
those installing from a source checkout.
## Rust requirement in the next release
From the next major release (v1.68.0) installing Synapse from a source checkout
will require a recent Rust compiler. Those using packages or
`pip install matrix-synapse` will not be affected.
The simplest way of installing Rust is via [rustup.rs](https://rustup.rs/)
# Upgrading to v1.66.0
## Delegation of email validation no longer supported

View file

@ -0,0 +1,84 @@
# Monthly Active Users
Synapse can be configured to record the number of monthly active users (also referred to as MAU) on a given homeserver.
For clarity's sake, MAU only tracks local users.
Please note that the metrics recorded by the [Homeserver Usage Stats](../../usage/administration/monitoring/reporting_homeserver_usage_statistics.md)
are calculated differently. The `monthly_active_users` from the usage stats does not take into account any
of the rules below, and counts any users who have made a request to the homeserver in the last 30 days.
See the [configuration manual](../../usage/configuration/config_documentation.md#limit_usage_by_mau) for details on how to configure MAU.
## Calculating active users
Individual user activity is measured in active days. If a user performs an action, the exact time of that action is then recorded. When
calculating the MAU figure, any users with a recorded action in the last 30 days are considered part of the cohort. Days are measured
as a rolling window from the current system time to 30 days ago.
So for example, if Synapse were to calculate the active users on the 15th July at 13:25, it would include any activity from 15th June 13:25 onwards.
A user is **never** considered active if they are either:
- Part of the trial day cohort (described below)
- Owned by an application service.
- Note: This **only** covers users that are part of an application service `namespaces.users` registration. The namespace
must also be marked as `exclusive`.
Otherwise, any request to Synapse will mark the user as active. Please note that registration will not mark a user as active *unless*
they register with a 3pid that is included in the config field `mau_limits_reserved_threepids`.
The Prometheus metric for MAU is refreshed every 5 minutes.
Once an hour, Synapse checks to see if any users are inactive (with only activity timestamps later than 30 days). These users
are removed from the active users cohort. If they then become active, they are immediately restored to the cohort.
It is important to note that **deactivated** users are not immediately removed from the pool of active users, but as these users won't
perform actions they will eventually be removed from the cohort.
### Trial days
If the config option `mau_trial_days` is set, a user must have been active this many days **after** registration to be active. A user is in the
trial period if their registration timestamp (also known as the `creation_ts`) is less than `mau_trial_days` old.
As an example, if `mau_trial_days` is set to `3` and a user is active **after** 3 days (72 hours from registration time) then they will be counted as active.
The `mau_appservice_trial_days` config further extends this rule by applying different durations depending on the `appservice_id` of the user.
Users registered by an application service will be recorded with an `appservice_id` matching the `id` key in the registration file for that service.
## Limiting usage of the homeserver when the maximum MAU is reached
If both config options `limit_usage_by_mau` and `max_mau_value` is set, and the current MAU value exceeds the maximum value, the
homeserver will begin to block some actions.
Individual users matching **any** of the below criteria never have their actions blocked:
- Considered part of the cohort of MAU users.
- Considered part of the trial period.
- Registered as a `support` user.
- Application service users if `track_appservice_user_ips` is NOT set.
Please not that server admins are **not** exempt from blocking.
The following actions are blocked when the MAU limit is exceeded:
- Logging in
- Sending events
- Creating rooms
- Syncing
Registration is also blocked for all new signups *unless* the user is registering with a threepid included in the `mau_limits_reserved_threepids`
config value.
When a request is blocked, the response will have the `errcode` `M_RESOURCE_LIMIT_EXCEEDED`.
## Metrics
Synapse records several different prometheus metrics for MAU.
`synapse_admin_mau:current` records the current MAU figure for native (non-application-service) users.
`synapse_admin_mau:max` records the maximum MAU as dictated by the `max_mau_value` config value.
`synapse_admin_mau_current_mau_by_service` records the current MAU including application service users. The label `app_service` can be used
to filter by a specific service ID. This *also* includes non-application-service users under `app_service=native` .
`synapse_admin_mau:registered_reserved_users` records the number of users specified in `mau_limits_reserved_threepids` which have
registered accounts on the homeserver.

View file

@ -431,8 +431,6 @@ Sub-options for each listener include:
* `metrics`: (see the docs [here](../../metrics-howto.md)),
* `replication`: (deprecated as of Synapse 1.18, see the docs [here](../../workers.md)).
* `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path.
* `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is
@ -595,6 +593,8 @@ server owner wants to limit to the number of monthly active users. When enabled
reached the server returns a `ResourceLimitError` with error type `Codes.RESOURCE_LIMIT_EXCEEDED`.
Defaults to false. If this is enabled, a value for `max_mau_value` must also be set.
See [Monthly Active Users](../administration/monthly_active_users.md) for details on how to configure MAU.
Example configuration:
```yaml
limit_usage_by_mau: true
@ -1873,8 +1873,8 @@ See [here](../../CAPTCHA_SETUP.md) for full details on setting up captcha.
---
### `recaptcha_public_key`
This homeserver's ReCAPTCHA public key. Must be specified if `enable_registration_captcha` is
enabled.
This homeserver's ReCAPTCHA public key. Must be specified if
[`enable_registration_captcha`](#enable_registration_captcha) is enabled.
Example configuration:
```yaml
@ -1883,7 +1883,8 @@ recaptcha_public_key: "YOUR_PUBLIC_KEY"
---
### `recaptcha_private_key`
This homeserver's ReCAPTCHA private key. Must be specified if `enable_registration_captcha` is
This homeserver's ReCAPTCHA private key. Must be specified if
[`enable_registration_captcha`](#enable_registration_captcha) is
enabled.
Example configuration:
@ -1893,9 +1894,11 @@ recaptcha_private_key: "YOUR_PRIVATE_KEY"
---
### `enable_registration_captcha`
Set to true to enable ReCaptcha checks when registering, preventing signup
unless a captcha is answered. Requires a valid ReCaptcha public/private key.
Defaults to false.
Set to `true` to require users to complete a CAPTCHA test when registering an account.
Requires a valid ReCaptcha public/private key.
Defaults to `false`.
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@ -1971,9 +1974,21 @@ Registration can be rate-limited using the parameters in the [Ratelimiting](#rat
---
### `enable_registration`
Enable registration for new users. Defaults to false. It is highly recommended that if you enable registration,
you use either captcha, email, or token-based verification to verify that new users are not bots. In order to enable registration
without any verification, you must also set `enable_registration_without_verification` to true.
Enable registration for new users. Defaults to `false`.
It is highly recommended that if you enable registration, you set one or more
or the following options, to avoid abuse of your server by "bots":
* [`enable_registration_captcha`](#enable_registration_captcha)
* [`registrations_require_3pid`](#registrations_require_3pid)
* [`registration_requires_token`](#registration_requires_token)
(In order to enable registration without any verification, you must also set
[`enable_registration_without_verification`](#enable_registration_without_verification).)
Note that even if this setting is disabled, new accounts can still be created
via the admin API if
[`registration_shared_secret`](#registration_shared_secret) is set.
Example configuration:
```yaml
@ -1981,88 +1996,21 @@ enable_registration: true
```
---
### `enable_registration_without_verification`
Enable registration without email or captcha verification. Note: this option is *not* recommended,
as registration without verification is a known vector for spam and abuse. Defaults to false. Has no effect
unless `enable_registration` is also enabled.
as registration without verification is a known vector for spam and abuse. Defaults to `false`. Has no effect
unless [`enable_registration`](#enable_registration) is also enabled.
Example configuration:
```yaml
enable_registration_without_verification: true
```
---
### `session_lifetime`
Time that a user's session remains valid for, after they log in.
Note that this is not currently compatible with guest logins.
Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
logged in.
By default, this is infinite.
Example configuration:
```yaml
session_lifetime: 24h
```
----
### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
Note that this only applies to clients which advertise support for refresh tokens.
Note also that this is calculated at login time and refresh time: changes are not applied to
existing sessions until they are refreshed.
By default, this is 5 minutes.
Example configuration:
```yaml
refreshable_access_token_lifetime: 10m
```
---
### `refresh_token_lifetime: 24h`
Time that a refresh token remains valid for (provided that it is not
exchanged for another one first).
This option can be used to automatically log-out inactive sessions.
Please see the manual for more information.
Note also that this is calculated at login time and refresh time:
changes are not applied to existing sessions until they are refreshed.
By default, this is infinite.
Example configuration:
```yaml
refresh_token_lifetime: 24h
```
---
### `nonrefreshable_access_token_lifetime`
Time that an access token remains valid for, if the session is NOT
using refresh tokens.
Please note that not all clients support refresh tokens, so setting
this to a short value may be inconvenient for some users who will
then be logged out frequently.
Note also that this is calculated at login time: changes are not applied
retrospectively to existing sessions for users that have already logged in.
By default, this is infinite.
Example configuration:
```yaml
nonrefreshable_access_token_lifetime: 24h
```
---
### `registrations_require_3pid`
If this is set, the user must provide all of the specified types of 3PID when registering.
If this is set, users must provide all of the specified types of 3PID when registering an account.
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@ -2110,9 +2058,11 @@ enable_3pid_lookup: false
Require users to submit a token during registration.
Tokens can be managed using the admin [API](../administration/admin_api/registration_tokens.md).
Note that `enable_registration` must be set to true.
Disabling this option will not delete any tokens previously generated.
Defaults to false. Set to true to enable.
Defaults to `false`. Set to `true` to enable.
Note that [`enable_registration`](#enable_registration) must also be set to allow account registration.
Example configuration:
```yaml
@ -2121,13 +2071,39 @@ registration_requires_token: true
---
### `registration_shared_secret`
If set, allows registration of standard or admin accounts by anyone who
has the shared secret, even if registration is otherwise disabled.
If set, allows registration of standard or admin accounts by anyone who has the
shared secret, even if [`enable_registration`](#enable_registration) is not
set.
This is primarily intended for use with the `register_new_matrix_user` script
(see [Registering a user](../../setup/installation.md#registering-a-user));
however, the interface is [documented](../admin_api/register_api.html).
See also [`registration_shared_secret_path`](#registration_shared_secret_path).
Example configuration:
```yaml
registration_shared_secret: <PRIVATE STRING>
```
---
### `registration_shared_secret_path`
An alternative to [`registration_shared_secret`](#registration_shared_secret):
allows the shared secret to be specified in an external file.
The file should be a plain text file, containing only the shared secret.
If this file does not exist, Synapse will create a new signing
key on startup and store it in this file.
Example configuration:
```yaml
registration_shared_secret_file: /path/to/secrets/file
```
_Added in Synapse 1.67.0._
---
### `bcrypt_rounds`
@ -2358,6 +2334,79 @@ Example configuration:
```yaml
inhibit_user_in_use_error: true
```
---
## User session management
---
### `session_lifetime`
Time that a user's session remains valid for, after they log in.
Note that this is not currently compatible with guest logins.
Note also that this is calculated at login time: changes are not applied retrospectively to users who have already
logged in.
By default, this is infinite.
Example configuration:
```yaml
session_lifetime: 24h
```
----
### `refresh_access_token_lifetime`
Time that an access token remains valid for, if the session is using refresh tokens.
For more information about refresh tokens, please see the [manual](user_authentication/refresh_tokens.md).
Note that this only applies to clients which advertise support for refresh tokens.
Note also that this is calculated at login time and refresh time: changes are not applied to
existing sessions until they are refreshed.
By default, this is 5 minutes.
Example configuration:
```yaml
refreshable_access_token_lifetime: 10m
```
---
### `refresh_token_lifetime: 24h`
Time that a refresh token remains valid for (provided that it is not
exchanged for another one first).
This option can be used to automatically log-out inactive sessions.
Please see the manual for more information.
Note also that this is calculated at login time and refresh time:
changes are not applied to existing sessions until they are refreshed.
By default, this is infinite.
Example configuration:
```yaml
refresh_token_lifetime: 24h
```
---
### `nonrefreshable_access_token_lifetime`
Time that an access token remains valid for, if the session is NOT
using refresh tokens.
Please note that not all clients support refresh tokens, so setting
this to a short value may be inconvenient for some users who will
then be logged out frequently.
Note also that this is calculated at login time: changes are not applied
retrospectively to existing sessions for users that have already logged in.
By default, this is infinite.
Example configuration:
```yaml
nonrefreshable_access_token_lifetime: 24h
```
---
## Metrics ###
Config options related to metrics.
@ -2435,7 +2484,7 @@ report_stats_endpoint: https://example.com/report-usage-stats/push
Config settings related to the client/server API
---
### `room_prejoin_state:`
### `room_prejoin_state`
Controls for the state that is shared with users who receive an invite
to a room. By default, the following state event types are shared with users who
@ -2537,7 +2586,10 @@ Config options relating to signing keys
---
### `signing_key_path`
Path to the signing key to sign messages with.
Path to the signing key to sign events and federation requests with.
*New in Synapse 1.67*: If this file does not exist, Synapse will create a new signing
key on startup and store it in this file.
Example configuration:
```yaml
@ -2572,7 +2624,7 @@ Example configuration:
key_refresh_interval: 2d
```
---
### `trusted_key_servers:`
### `trusted_key_servers`
The trusted servers to download signing keys from.
@ -2642,13 +2694,10 @@ key_server_signing_keys_path: "key_server_signing_keys.key"
The following settings can be used to make Synapse use a single sign-on
provider for authentication, instead of its internal password database.
You will probably also want to set the following options to false to
You will probably also want to set the following options to `false` to
disable the regular login/registration flows:
* `enable_registration`
* `password_config.enabled`
You will also want to investigate the settings under the "sso" configuration
section below.
* [`enable_registration`](#enable_registration)
* [`password_config.enabled`](#password_config)
---
### `saml2_config`

View file

@ -32,13 +32,8 @@ stream between all configured Synapse processes. Additionally, processes may
make HTTP requests to each other, primarily for operations which need to wait
for a reply ─ such as sending an event.
Redis support was added in v1.13.0 with it becoming the recommended method in
v1.18.0. It replaced the old direct TCP connections (which is deprecated as of
v1.18.0) to the main process. With Redis, rather than all the workers connecting
to the main process, all the workers and the main process connect to Redis,
which relays replication commands between processes. This can give a significant
cpu saving on the main process and will be a prerequisite for upcoming
performance improvements.
All the workers and the main process connect to Redis, which relays replication
commands between processes.
If Redis support is enabled Synapse will use it as a shared cache, as well as a
pub/sub mechanism.
@ -117,23 +112,26 @@ redis:
enabled: true
```
See the sample config for the full documentation of each option.
See the [configuration manual](usage/configuration/config_documentation.html) for the full documentation of each option.
Under **no circumstances** should the replication listener be exposed to the
public internet; it has no authentication and is unencrypted.
public internet; replication traffic is:
* always unencrypted
* unauthenticated, unless `worker_replication_secret` is configured
### Worker configuration
In the config file for each worker, you must specify the type of worker
application (`worker_app`), and you should specify a unique name for the worker
(`worker_name`). The currently available worker applications are listed below.
You must also specify the HTTP replication endpoint that it should talk to on
the main synapse process. `worker_replication_host` should specify the host of
the main synapse and `worker_replication_http_port` should point to the HTTP
replication port. If the worker will handle HTTP requests then the
`worker_listeners` option should be set with a `http` listener, in the same way
as the `listeners` option in the shared config.
In the config file for each worker, you must specify:
* The type of worker (`worker_app`). The currently available worker applications are listed below.
* A unique name for the worker (`worker_name`).
* The HTTP replication endpoint that it should talk to on the main synapse process
(`worker_replication_host` and `worker_replication_http_port`)
* If handling HTTP requests, a `worker_listeners` option with an `http`
listener, in the same way as the `listeners` option in the shared config.
* If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
the main process (`worker_main_http_uri`).
For example:
@ -217,10 +215,12 @@ information.
^/_matrix/client/(api/v1|r0|v3|unstable)/search$
# Encryption requests
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
^/_matrix/client/(r0|v3|unstable)/keys/query$
^/_matrix/client/(r0|v3|unstable)/keys/changes$
^/_matrix/client/(r0|v3|unstable)/keys/claim$
^/_matrix/client/(r0|v3|unstable)/room_keys/
^/_matrix/client/(r0|v3|unstable)/keys/upload/
# Registration/login requests
^/_matrix/client/(api/v1|r0|v3|unstable)/login$
@ -325,7 +325,6 @@ effects of bursts of events from that bridge on events sent by normal users.
Additionally, the writing of specific streams (such as events) can be moved off
of the main process to a particular worker.
(This is only supported with Redis-based replication.)
To enable this, the worker must have a HTTP replication listener configured,
have a `worker_name` and be listed in the `instance_map` config. The same worker
@ -581,52 +580,23 @@ handle it, and are online.
If `update_user_directory` is set to `false`, and this worker is not running,
the above endpoint may give outdated results.
### `synapse.app.frontend_proxy`
Proxies some frequently-requested client endpoints to add caching and remove
load from the main synapse. It can handle REST endpoints matching the following
regular expressions:
^/_matrix/client/(r0|v3|unstable)/keys/upload
If `use_presence` is False in the homeserver config, it can also handle REST
endpoints matching the following regular expressions:
^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
This "stub" presence handler will pass through `GET` request but make the
`PUT` effectively a no-op.
It will proxy any requests it cannot handle to the main synapse instance. It
must therefore be configured with the location of the main instance, via
the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
file. For example:
```yaml
worker_main_http_uri: http://127.0.0.1:8008
```
### Historical apps
*Note:* Historically there used to be more apps, however they have been
amalgamated into a single `synapse.app.generic_worker` app. The remaining apps
are ones that do specific processing unrelated to requests, e.g. the `pusher`
that handles sending out push notifications for new events. The intention is for
all these to be folded into the `generic_worker` app and to use config to define
which processes handle the various proccessing such as push notifications.
The following used to be separate worker application types, but are now
equivalent to `synapse.app.generic_worker`:
* `synapse.app.client_reader`
* `synapse.app.event_creator`
* `synapse.app.federation_reader`
* `synapse.app.frontend_proxy`
* `synapse.app.synchrotron`
## Migration from old config
There are two main independent changes that have been made: introducing Redis
support and merging apps into `synapse.app.generic_worker`. Both these changes
are backwards compatible and so no changes to the config are required, however
server admins are encouraged to plan to migrate to Redis as the old style direct
TCP replication config is deprecated.
To migrate to Redis add the `redis` config as above, and optionally remove the
TCP `replication` listener from master and `worker_replication_port` from worker
config.
A main change that has occurred is the merging of worker apps into
`synapse.app.generic_worker`. This change is backwards compatible and so no
changes to the config are required.
To migrate apps to use `synapse.app.generic_worker` simply update the
`worker_app` option in the worker configs, and where worker are started (e.g.

146
poetry.lock generated
View file

@ -7,10 +7,10 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[package.extras]
dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "sphinx", "sphinx-notfound-page", "zope.interface"]
docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"]
tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six", "zope.interface"]
tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "six"]
[[package]]
name = "authlib"
@ -39,7 +39,7 @@ attrs = ">=19.2.0"
six = "*"
[package.extras]
visualize = ["graphviz (>0.5.1)", "Twisted (>=16.1.1)"]
visualize = ["Twisted (>=16.1.1)", "graphviz (>0.5.1)"]
[[package]]
name = "bcrypt"
@ -177,7 +177,7 @@ optional = false
python-versions = "*"
[package.extras]
test = ["hypothesis (==3.55.3)", "flake8 (==3.7.8)"]
test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"]
[[package]]
name = "constantly"
@ -199,12 +199,12 @@ python-versions = ">=3.6"
cffi = ">=1.12"
[package.extras]
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"]
docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"]
docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx_rtd_theme"]
docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"]
sdist = ["setuptools_rust (>=0.11.4)"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"]
test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"]
[[package]]
name = "defusedxml"
@ -226,7 +226,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
wrapt = ">=1.10,<2"
[package.extras]
dev = ["tox", "bump2version (<1)", "sphinx (<2)", "importlib-metadata (<3)", "importlib-resources (<4)", "configparser (<5)", "sphinxcontrib-websupport (<2)", "zipp (<2)", "PyTest (<5)", "PyTest-Cov (<2.6)", "pytest", "pytest-cov"]
dev = ["PyTest", "PyTest (<5)", "PyTest-Cov", "PyTest-Cov (<2.6)", "bump2version (<1)", "configparser (<5)", "importlib-metadata (<3)", "importlib-resources (<4)", "sphinx (<2)", "sphinxcontrib-websupport (<2)", "tox", "zipp (<2)"]
[[package]]
name = "docutils"
@ -245,7 +245,7 @@ optional = true
python-versions = ">=3.7"
[package.extras]
dev = ["tox", "coverage", "lxml", "xmlschema (>=1.8.0)", "sphinx", "memory-profiler", "flake8", "mypy (==0.910)"]
dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"]
[[package]]
name = "flake8"
@ -274,7 +274,7 @@ attrs = ">=19.2.0"
flake8 = ">=3.0.0"
[package.extras]
dev = ["coverage", "black", "hypothesis", "hypothesmith"]
dev = ["black", "coverage", "hypothesis", "hypothesmith"]
[[package]]
name = "flake8-comprehensions"
@ -367,8 +367,8 @@ typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
zipp = ">=0.5"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"]
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pep517", "pyfakefs", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "importlib-resources"
@ -382,8 +382,8 @@ python-versions = ">=3.6"
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "incremental"
@ -405,9 +405,9 @@ optional = false
python-versions = ">=3.6,<4.0"
[package.extras]
pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
requirements_deprecated_finder = ["pipreqs", "pip-api"]
colors = ["colorama (>=0.4.3,<0.5.0)"]
pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
requirements_deprecated_finder = ["pip-api", "pipreqs"]
[[package]]
name = "jaeger-client"
@ -424,7 +424,7 @@ thrift = "*"
tornado = ">=4.3"
[package.extras]
tests = ["mock", "pycurl", "pytest", "pytest-cov", "coverage", "pytest-timeout", "pytest-tornado", "pytest-benchmark", "pytest-localserver", "flake8", "flake8-quotes", "flake8-typing-imports", "codecov", "tchannel (==2.1.0)", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "mypy"]
tests = ["codecov", "coverage", "flake8", "flake8-quotes", "flake8-typing-imports", "mock", "mypy", "opentracing_instrumentation (>=3,<4)", "prometheus_client (==0.11.0)", "pycurl", "pytest", "pytest-benchmark[histogram]", "pytest-cov", "pytest-localserver", "pytest-timeout", "pytest-tornado", "tchannel (==2.1.0)"]
[[package]]
name = "jeepney"
@ -435,8 +435,8 @@ optional = false
python-versions = ">=3.6"
[package.extras]
trio = ["async-generator", "trio"]
test = ["async-timeout", "trio", "testpath", "pytest-asyncio", "pytest-trio", "pytest"]
test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"]
trio = ["async_generator", "trio"]
[[package]]
name = "jinja2"
@ -486,8 +486,8 @@ pywin32-ctypes = {version = "<0.1.0 || >0.1.0,<0.1.1 || >0.1.1", markers = "sys_
SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "jaraco.tidelift (>=1.4)"]
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"]
docs = ["jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx"]
testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "ldap3"
@ -511,7 +511,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
[package.extras]
cssselect = ["cssselect (>=0.7)"]
html5 = ["html5lib"]
htmlsoup = ["beautifulsoup4"]
htmlsoup = ["BeautifulSoup4"]
source = ["Cython (>=0.29.7)"]
[[package]]
@ -535,8 +535,8 @@ attrs = "*"
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
[package.extras]
test = ["aiounittest", "twisted", "tox"]
dev = ["twine (==4.0.1)", "build (==0.8.0)", "isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "mypy (==0.910)", "aiounittest", "twisted", "tox"]
dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
test = ["aiounittest", "tox", "twisted"]
[[package]]
name = "matrix-synapse-ldap3"
@ -552,7 +552,7 @@ service-identity = "*"
Twisted = ">=15.1.0"
[package.extras]
dev = ["isort (==5.9.3)", "flake8 (==4.0.1)", "black (==22.3.0)", "types-setuptools", "mypy (==0.910)", "ldaptor", "tox", "matrix-synapse"]
dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "matrix-synapse", "mypy (==0.910)", "tox", "types-setuptools"]
[[package]]
name = "mccabe"
@ -611,7 +611,7 @@ mypy = "0.950"
"zope.schema" = "*"
[package.extras]
test = ["pytest (>=4.6)", "pytest-cov", "lxml"]
test = ["lxml", "pytest (>=4.6)", "pytest-cov"]
[[package]]
name = "netaddr"
@ -630,7 +630,7 @@ optional = true
python-versions = "*"
[package.extras]
tests = ["doubles", "flake8", "flake8-quotes", "mock", "pytest", "pytest-cov", "pytest-mock", "sphinx", "sphinx-rtd-theme", "six (>=1.10.0,<2.0)", "gevent", "tornado"]
tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"]
[[package]]
name = "packaging"
@ -835,10 +835,10 @@ optional = false
python-versions = ">=3.6"
[package.extras]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
docs = ["zope.interface", "sphinx-rtd-theme", "sphinx"]
dev = ["pre-commit", "mypy", "coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)", "cryptography (>=3.3.1)", "zope.interface", "sphinx-rtd-theme", "sphinx"]
crypto = ["cryptography (>=3.3.1)"]
dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
[[package]]
name = "pymacaroons"
@ -872,8 +872,8 @@ python-versions = ">=3.6"
cffi = ">=1.4.1"
[package.extras]
docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"]
docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"]
tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
[[package]]
name = "pyopenssl"
@ -925,11 +925,12 @@ pyOpenSSL = "*"
python-dateutil = "*"
pytz = "*"
requests = ">=1.0.0"
setuptools = "*"
six = "*"
xmlschema = ">=1.2.1"
[package.extras]
s2repoze = ["paste", "zope.interface", "repoze.who"]
s2repoze = ["paste", "repoze.who", "zope.interface"]
[[package]]
name = "python-dateutil"
@ -1054,11 +1055,11 @@ celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
flask = ["flask (>=0.11)", "blinker (>=1.1)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)"]
httpx = ["httpx (>=0.16.0)"]
pure_eval = ["pure-eval", "executing", "asttokens"]
pure_eval = ["asttokens", "executing", "pure-eval"]
pyspark = ["pyspark (>=2.4.4)"]
quart = ["quart (>=0.16.1)", "blinker (>=1.1)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
@ -1080,11 +1081,24 @@ pyasn1-modules = "*"
six = "*"
[package.extras]
dev = ["coverage[toml] (>=5.0.2)", "pytest", "sphinx", "furo", "idna", "pyopenssl"]
docs = ["sphinx", "furo"]
dev = ["coverage[toml] (>=5.0.2)", "furo", "idna", "pyOpenSSL", "pytest", "sphinx"]
docs = ["furo", "sphinx"]
idna = ["idna"]
tests = ["coverage[toml] (>=5.0.2)", "pytest"]
[[package]]
name = "setuptools"
version = "65.3.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
category = "main"
optional = false
python-versions = ">=3.7"
[package.extras]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
[[package]]
name = "signedjson"
version = "1.1.4"
@ -1199,6 +1213,7 @@ click = "*"
click-default-group = "*"
incremental = "*"
jinja2 = "*"
setuptools = "*"
tomli = {version = "*", markers = "python_version >= \"3.6\""}
[package.extras]
@ -1236,7 +1251,7 @@ requests = ">=2.1.0"
Twisted = {version = ">=18.7.0", extras = ["tls"]}
[package.extras]
dev = ["pep8", "pyflakes", "httpbin (==0.5.0)"]
dev = ["httpbin (==0.5.0)", "pep8", "pyflakes"]
docs = ["sphinx (>=1.4.8)"]
[[package]]
@ -1281,20 +1296,20 @@ typing-extensions = ">=3.6.5"
"zope.interface" = ">=4.4.2"
[package.extras]
all_non_platform = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
conch = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)"]
conch_nacl = ["pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pynacl"]
all_non_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
conch = ["appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
conch_nacl = ["PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "cryptography (>=2.6)", "pyasn1"]
contextvars = ["contextvars (>=2.4,<3)"]
dev = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "python-subunit (>=1.4,<2.0)", "pydoctor (>=21.9.0,<21.10.0)"]
dev_release = ["towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pydoctor (>=21.9.0,<21.10.0)"]
dev = ["coverage (>=6b1,<7)", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "python-subunit (>=1.4,<2.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)"]
dev_release = ["pydoctor (>=21.9.0,<21.10.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)"]
http2 = ["h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)"]
macos_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
mypy = ["mypy (==0.930)", "mypy-zope (==0.3.4)", "types-setuptools", "types-pyopenssl", "towncrier (>=19.2,<20.0)", "sphinx-rtd-theme (>=0.5,<1.0)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "sphinx (>=4.1.2,<6)", "pyflakes (>=2.2,<3.0)", "twistedchecker (>=0.7,<1.0)", "coverage (>=6b1,<7)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pynacl", "pywin32 (!=226)", "python-subunit (>=1.4,<2.0)", "contextvars (>=2.4,<3)", "pydoctor (>=21.9.0,<21.10.0)"]
osx_platform = ["pyobjc-core", "pyobjc-framework-cfnetwork", "pyobjc-framework-cocoa", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
macos_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
mypy = ["PyHamcrest (>=1.9.0)", "PyNaCl", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "coverage (>=6b1,<7)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "mypy (==0.930)", "mypy-zope (==0.3.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pydoctor (>=21.9.0,<21.10.0)", "pyflakes (>=2.2,<3.0)", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "python-subunit (>=1.4,<2.0)", "pywin32 (!=226)", "readthedocs-sphinx-ext (>=2.1,<3.0)", "service-identity (>=18.1.0)", "sphinx (>=4.1.2,<6)", "sphinx-rtd-theme (>=0.5,<1.0)", "towncrier (>=19.2,<20.0)", "twistedchecker (>=0.7,<1.0)", "types-pyOpenSSL", "types-setuptools"]
osx_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyobjc-core", "pyobjc-framework-CFNetwork", "pyobjc-framework-Cocoa", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
serial = ["pyserial (>=3.0)", "pywin32 (!=226)"]
test = ["cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)"]
tls = ["pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)"]
windows_platform = ["pywin32 (!=226)", "cython-test-exception-raiser (>=1.0.2,<2)", "PyHamcrest (>=1.9.0)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)", "idna (>=2.4)", "pyasn1", "cryptography (>=2.6)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "pyserial (>=3.0)", "h2 (>=3.0,<5.0)", "priority (>=1.1.0,<2.0)", "pywin32 (!=226)", "contextvars (>=2.4,<3)"]
test = ["PyHamcrest (>=1.9.0)", "cython-test-exception-raiser (>=1.0.2,<2)"]
tls = ["idna (>=2.4)", "pyopenssl (>=16.0.0)", "service-identity (>=18.1.0)"]
windows_platform = ["PyHamcrest (>=1.9.0)", "appdirs (>=1.4.0)", "bcrypt (>=3.0.0)", "contextvars (>=2.4,<3)", "cryptography (>=2.6)", "cython-test-exception-raiser (>=1.0.2,<2)", "h2 (>=3.0,<5.0)", "idna (>=2.4)", "priority (>=1.1.0,<2.0)", "pyasn1", "pyopenssl (>=16.0.0)", "pyserial (>=3.0)", "pywin32 (!=226)", "pywin32 (!=226)", "service-identity (>=18.1.0)"]
[[package]]
name = "twisted-iocpsupport"
@ -1472,7 +1487,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
[package.extras]
brotli = ["brotlipy (>=0.6.0)"]
secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[[package]]
@ -1504,8 +1519,8 @@ elementpath = ">=2.5.0,<3.0.0"
[package.extras]
codegen = ["elementpath (>=2.5.0,<3.0.0)", "jinja2"]
dev = ["tox", "coverage", "lxml", "elementpath (>=2.5.0,<3.0.0)", "memory-profiler", "sphinx", "sphinx-rtd-theme", "jinja2", "flake8", "mypy", "lxml-stubs"]
docs = ["elementpath (>=2.5.0,<3.0.0)", "sphinx", "sphinx-rtd-theme", "jinja2"]
dev = ["Sphinx", "coverage", "elementpath (>=2.5.0,<3.0.0)", "flake8", "jinja2", "lxml", "lxml-stubs", "memory-profiler", "mypy", "sphinx-rtd-theme", "tox"]
docs = ["Sphinx", "elementpath (>=2.5.0,<3.0.0)", "jinja2", "sphinx-rtd-theme"]
[[package]]
name = "zipp"
@ -1516,8 +1531,8 @@ optional = false
python-versions = ">=3.7"
[package.extras]
docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"]
testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"]
docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
testing = ["func-timeout", "jaraco.itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy"]
[[package]]
name = "zope.event"
@ -1527,8 +1542,11 @@ category = "dev"
optional = false
python-versions = "*"
[package.dependencies]
setuptools = "*"
[package.extras]
docs = ["sphinx"]
docs = ["Sphinx"]
test = ["zope.testrunner"]
[[package]]
@ -1539,8 +1557,11 @@ category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[package.dependencies]
setuptools = "*"
[package.extras]
docs = ["sphinx", "repoze.sphinx.autointerface"]
docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
@ -1553,11 +1574,12 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[package.dependencies]
setuptools = "*"
"zope.event" = "*"
"zope.interface" = ">=5.0.0"
[package.extras]
docs = ["sphinx", "repoze.sphinx.autointerface"]
docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["zope.i18nmessageid", "zope.testing", "zope.testrunner"]
[extras]
@ -2458,6 +2480,10 @@ service-identity = [
{file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
{file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"},
]
setuptools = [
{file = "setuptools-65.3.0-py3-none-any.whl", hash = "sha256:2e24e0bec025f035a2e72cdd1961119f557d78ad331bb00ff82efb2ab8da8e82"},
{file = "setuptools-65.3.0.tar.gz", hash = "sha256:7732871f4f7fa58fb6bdcaeadb0161b2bd046c85905dbaa066bdcbcc81953b57"},
]
signedjson = [
{file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"},
{file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"},

View file

@ -54,7 +54,7 @@ skip_gitignore = true
[tool.poetry]
name = "matrix-synapse"
version = "1.66.0"
version = "1.67.0rc1"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "Apache-2.0"

File diff suppressed because it is too large Load diff

View file

@ -18,10 +18,12 @@
"""
import glob
import json
import os
import re
import subprocess
import sys
import time
import urllib.request
from os import path
from tempfile import TemporaryDirectory
@ -71,18 +73,21 @@ def cli() -> None:
./scripts-dev/release.py tag
# ... wait for assets to build ...
# wait for assets to build, either manually or with:
./scripts-dev/release.py wait-for-actions
./scripts-dev/release.py publish
./scripts-dev/release.py upload
# Optional: generate some nice links for the announcement
./scripts-dev/release.py merge-back
# Optional: generate some nice links for the announcement
./scripts-dev/release.py announce
Alternatively, `./scripts-dev/release.py full` will do all the above
as well as guiding you through the manual steps.
If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the
`tag`/`publish` command, then a new draft release will be created/published.
"""
@ -90,6 +95,10 @@ def cli() -> None:
@cli.command()
def prepare() -> None:
_prepare()
def _prepare() -> None:
"""Do the initial stages of creating a release, including creating release
branch, updating changelog and pushing to GitHub.
"""
@ -284,6 +293,10 @@ def prepare() -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"])
def tag(gh_token: Optional[str]) -> None:
_tag(gh_token)
def _tag(gh_token: Optional[str]) -> None:
"""Tags the release and generates a draft GitHub release"""
# Make sure we're in a git repo.
@ -374,6 +387,10 @@ def tag(gh_token: Optional[str]) -> None:
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
def publish(gh_token: str) -> None:
_publish(gh_token)
def _publish(gh_token: str) -> None:
"""Publish release on GitHub."""
# Make sure we're in a git repo.
@ -411,6 +428,10 @@ def publish(gh_token: str) -> None:
@cli.command()
def upload() -> None:
_upload()
def _upload() -> None:
"""Upload release to pypi."""
current_version = get_package_version()
@ -479,8 +500,75 @@ def _merge_into(repo: Repo, source: str, target: str) -> None:
repo.remote().push()
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False)
def wait_for_actions(gh_token: Optional[str]) -> None:
_wait_for_actions(gh_token)
def _wait_for_actions(gh_token: Optional[str]) -> None:
# Find out the version and tag name.
current_version = get_package_version()
tag_name = f"v{current_version}"
# Authentication is optional on this endpoint,
# but use a token if we have one to reduce the chance of being rate-limited.
url = f"https://api.github.com/repos/matrix-org/synapse/actions/runs?branch={tag_name}"
headers = {"Accept": "application/vnd.github+json"}
if gh_token is not None:
headers["authorization"] = f"token {gh_token}"
req = urllib.request.Request(url, headers=headers)
time.sleep(10 * 60)
while True:
time.sleep(5 * 60)
response = urllib.request.urlopen(req)
resp = json.loads(response.read())
if len(resp["workflow_runs"]) == 0:
continue
if all(
workflow["status"] != "in_progress" for workflow in resp["workflow_runs"]
):
success = (
workflow["status"] == "completed" for workflow in resp["workflow_runs"]
)
if success:
_notify("Workflows successful. You can now continue the release.")
else:
_notify("Workflows failed.")
click.confirm("Continue anyway?", abort=True)
break
def _notify(message: str) -> None:
# Send a bell character. Most terminals will play a sound or show a notification
# for this.
click.echo(f"\a{message}")
# Try and run notify-send, but don't raise an Exception if this fails
# (This is best-effort)
# TODO Support other platforms?
subprocess.run(
[
"notify-send",
"--app-name",
"Synapse Release Script",
"--expire-time",
"3600000",
message,
]
)
@cli.command()
def merge_back() -> None:
_merge_back()
def _merge_back() -> None:
"""Merge the release branch back into the appropriate branches.
All branches will be automatically pulled from the remote and the results
will be pushed to the remote."""
@ -519,6 +607,10 @@ def merge_back() -> None:
@cli.command()
def announce() -> None:
_announce()
def _announce() -> None:
"""Generate markdown to announce the release."""
current_version = get_package_version()
@ -548,10 +640,56 @@ Announce the release in
- #homeowners:matrix.org (Synapse Announcements), bumping the version in the topic
- #synapse:matrix.org (Synapse Admins), bumping the version in the topic
- #synapse-dev:matrix.org
- #synapse-package-maintainers:matrix.org"""
- #synapse-package-maintainers:matrix.org
Ask the designated people to do the blog and tweets."""
)
@cli.command()
@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=True)
def full(gh_token: str) -> None:
click.echo("1. If this is a security release, read the security wiki page.")
click.echo("2. Check for any release blockers before proceeding.")
click.echo(" https://github.com/matrix-org/synapse/labels/X-Release-Blocker")
click.confirm("Ready?", abort=True)
click.echo("\n*** prepare ***")
_prepare()
click.echo("Deploy to matrix.org and ensure that it hasn't fallen over.")
click.echo("Remember to silence the alerts to prevent alert spam.")
click.confirm("Deployed?", abort=True)
click.echo("\n*** tag ***")
_tag(gh_token)
click.echo("\n*** wait for actions ***")
_wait_for_actions(gh_token)
click.echo("\n*** publish ***")
_publish(gh_token)
click.echo("\n*** upload ***")
_upload()
click.echo("\n*** merge back ***")
_merge_back()
click.echo("\nUpdate the Debian repository")
click.confirm("Started updating Debian repository?", abort=True)
click.echo("\nWait for all release methods to be ready.")
# Docker should be ready because it was done by the workflows earlier
# PyPI should be ready because we just ran upload().
# TODO Automatically poll until the Debs have made it to packages.matrix.org
click.confirm("Debs ready?", abort=True)
click.echo("\n*** announce ***")
_announce()
def get_package_version() -> version.Version:
version_string = subprocess.check_output(["poetry", "version", "--short"]).decode(
"utf-8"

View file

@ -1,6 +1,6 @@
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2018 New Vector
# Copyright 2021 The Matrix.org Foundation C.I.C.
# Copyright 2021-22 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -20,11 +20,22 @@ import hashlib
import hmac
import logging
import sys
from typing import Callable, Optional
from typing import Any, Callable, Dict, Optional
import requests
import yaml
_CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
Conflicting options 'registration_shared_secret' and 'registration_shared_secret_path'
are both defined in config file.
"""
_NO_SHARED_SECRET_OPTS_ERROR = """\
No 'registration_shared_secret' or 'registration_shared_secret_path' defined in config.
"""
_DEFAULT_SERVER_URL = "http://localhost:8008"
def request_registration(
user: str,
@ -203,31 +214,104 @@ def main() -> None:
parser.add_argument(
"server_url",
default="https://localhost:8448",
nargs="?",
help="URL to use to talk to the homeserver. Defaults to "
" 'https://localhost:8448'.",
help="URL to use to talk to the homeserver. By default, tries to find a "
"suitable URL from the configuration file. Otherwise, defaults to "
f"'{_DEFAULT_SERVER_URL}'.",
)
args = parser.parse_args()
if "config" in args and args.config:
config = yaml.safe_load(args.config)
secret = config.get("registration_shared_secret", None)
if not secret:
print("No 'registration_shared_secret' defined in config.")
sys.exit(1)
else:
if args.shared_secret:
secret = args.shared_secret
else:
# argparse should check that we have either config or shared secret
assert config
secret = config.get("registration_shared_secret")
secret_file = config.get("registration_shared_secret_path")
if secret_file:
if secret:
print(_CONFLICTING_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1)
secret = _read_file(secret_file, "registration_shared_secret_path").strip()
if not secret:
print(_NO_SHARED_SECRET_OPTS_ERROR, file=sys.stderr)
sys.exit(1)
if args.server_url:
server_url = args.server_url
elif config:
server_url = _find_client_listener(config)
if not server_url:
server_url = _DEFAULT_SERVER_URL
print(
"Unable to find a suitable HTTP listener in the configuration file. "
f"Trying {server_url} as a last resort.",
file=sys.stderr,
)
else:
server_url = _DEFAULT_SERVER_URL
print(
f"No server url or configuration file given. Defaulting to {server_url}.",
file=sys.stderr,
)
admin = None
if args.admin or args.no_admin:
admin = args.admin
register_new_user(
args.user, args.password, args.server_url, secret, admin, args.user_type
args.user, args.password, server_url, secret, admin, args.user_type
)
def _read_file(file_path: Any, config_path: str) -> str:
"""Check the given file exists, and read it into a string
If it does not, exit with an error indicating the problem
Args:
file_path: the file to be read
config_path: where in the configuration file_path came from, so that a useful
error can be emitted if it does not exist.
Returns:
content of the file.
"""
if not isinstance(file_path, str):
print(f"{config_path} setting is not a string", file=sys.stderr)
sys.exit(1)
try:
with open(file_path) as file_stream:
return file_stream.read()
except OSError as e:
print(f"Error accessing file {file_path}: {e}", file=sys.stderr)
sys.exit(1)
def _find_client_listener(config: Dict[str, Any]) -> Optional[str]:
# try to find a listener in the config. Returns a host:port pair
for listener in config.get("listeners", []):
if listener.get("type") != "http" or listener.get("tls", False):
continue
if not any(
name == "client"
for resource in listener.get("resources", [])
for name in resource.get("names", [])
):
continue
# TODO: consider bind_addresses
return f"http://localhost:{listener['port']}"
# no suitable listeners?
return None
if __name__ == "__main__":
main()

View file

@ -258,7 +258,6 @@ class GuestAccess:
class ReceiptTypes:
READ: Final = "m.read"
READ_PRIVATE: Final = "m.read.private"
UNSTABLE_READ_PRIVATE: Final = "org.matrix.msc2285.read.private"
FULLY_READ: Final = "m.fully_read"

View file

@ -140,13 +140,13 @@ USER_FILTER_SCHEMA = {
@FormatChecker.cls_checks("matrix_room_id")
def matrix_room_id_validator(room_id_str: str) -> RoomID:
return RoomID.from_string(room_id_str)
def matrix_room_id_validator(room_id_str: str) -> bool:
return RoomID.is_valid(room_id_str)
@FormatChecker.cls_checks("matrix_user_id")
def matrix_user_id_validator(user_id_str: str) -> UserID:
return UserID.from_string(user_id_str)
def matrix_user_id_validator(user_id_str: str) -> bool:
return UserID.is_valid(user_id_str)
class Filtering:

View file

@ -266,15 +266,48 @@ def register_start(
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
def listen_metrics(
bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
) -> None:
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy, start_http_server
from prometheus_client import start_http_server as start_http_server_prometheus
from synapse.metrics import (
RegistryProxy,
start_http_server as start_http_server_legacy,
)
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
start_http_server(port, addr=host, registry=RegistryProxy)
if enable_legacy_metric_names:
start_http_server_legacy(port, addr=host, registry=RegistryProxy)
else:
_set_prometheus_client_use_created_metrics(False)
start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
"""
Sets whether prometheus_client should expose `_created`-suffixed metrics for
all gauges, histograms and summaries.
There is no programmatic way to disable this without poking at internals;
the proper way is to use an environment variable which prometheus_client
loads at import time.
The motivation for disabling these `_created` metrics is that they're
a waste of space as they're not useful but they take up space in Prometheus.
"""
import prometheus_client.metrics
if hasattr(prometheus_client.metrics, "_use_created"):
prometheus_client.metrics._use_created = new_value
else:
logger.error(
"Can't disable `_created` metrics in prometheus_client (brittle hack broken?)"
)
def listen_manhole(
@ -478,9 +511,10 @@ async def start(hs: "HomeServer") -> None:
setup_sentry(hs)
setup_sdnotify(hs)
# If background tasks are running on the main process, start collecting the
# phone home stats.
# If background tasks are running on the main process or this is the worker in
# charge of them, start collecting the phone home stats and shared usage metrics.
if hs.config.worker.run_background_tasks:
await hs.get_common_usage_metrics_manager().setup()
start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost)

View file

@ -412,7 +412,11 @@ class GenericWorkerServer(HomeServer):
"enable_metrics is not True!"
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)
_base.listen_metrics(
listener.bind_addresses,
listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
logger.warning("Unsupported listener type: %s", listener.type)

View file

@ -57,7 +57,6 @@ from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
from synapse.rest.admin import AdminRestResource
from synapse.rest.health import HealthResource
@ -290,16 +289,6 @@ class SynapseHomeServer(HomeServer):
manhole_settings=self.config.server.manhole_settings,
manhole_globals={"hs": self},
)
elif listener.type == "replication":
services = listen_tcp(
listener.bind_addresses,
listener.port,
ReplicationStreamProtocolFactory(self),
)
for s in services:
self.get_reactor().addSystemEventTrigger(
"before", "shutdown", s.stopListening
)
elif listener.type == "metrics":
if not self.config.metrics.enable_metrics:
logger.warning(
@ -307,7 +296,11 @@ class SynapseHomeServer(HomeServer):
"enable_metrics is not True!"
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)
_base.listen_metrics(
listener.bind_addresses,
listener.port,
enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
)
else:
# this shouldn't happen, as the listener type should have been checked
# during parsing

View file

@ -51,6 +51,16 @@ async def phone_stats_home(
stats: JsonDict,
stats_process: List[Tuple[int, "resource.struct_rusage"]] = _stats_process,
) -> None:
"""Collect usage statistics and send them to the configured endpoint.
Args:
hs: the HomeServer object to use for gathering usage data.
stats: the dict in which to store the statistics sent to the configured
endpoint. Mostly used in tests to figure out the data that is supposed to
be sent.
stats_process: statistics about resource usage of the process.
"""
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
# Ensure the homeserver has started.
@ -83,6 +93,7 @@ async def phone_stats_home(
#
store = hs.get_datastores().main
common_metrics = await hs.get_common_usage_metrics_manager().get_metrics()
stats["homeserver"] = hs.config.server.server_name
stats["server_context"] = hs.config.server.server_context
@ -104,7 +115,7 @@ async def phone_stats_home(
room_count = await store.get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = await store.count_daily_users()
stats["daily_active_users"] = common_metrics.daily_active_users
stats["monthly_active_users"] = await store.count_monthly_users()
daily_active_e2ee_rooms = await store.count_daily_active_e2ee_rooms()
stats["daily_active_e2ee_rooms"] = daily_active_e2ee_rooms

View file

@ -20,6 +20,7 @@ import logging
import os
import re
from collections import OrderedDict
from enum import Enum, auto
from hashlib import sha256
from textwrap import dedent
from typing import (
@ -603,18 +604,44 @@ class RootConfig:
" may specify directories containing *.yaml files.",
)
generate_group = parser.add_argument_group("Config generation")
generate_group.add_argument(
"--generate-config",
action="store_true",
help="Generate a config file, then exit.",
# we nest the mutually-exclusive group inside another group so that the help
# text shows them in their own group.
generate_mode_group = parser.add_argument_group(
"Config generation mode",
)
generate_group.add_argument(
generate_mode_exclusive = generate_mode_group.add_mutually_exclusive_group()
generate_mode_exclusive.add_argument(
# hidden option to make the type and default work
"--generate-mode",
help=argparse.SUPPRESS,
type=_ConfigGenerateMode,
default=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
)
generate_mode_exclusive.add_argument(
"--generate-config",
help="Generate a config file, then exit.",
action="store_const",
const=_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
dest="generate_mode",
)
generate_mode_exclusive.add_argument(
"--generate-missing-configs",
"--generate-keys",
action="store_true",
help="Generate any missing additional config files, then exit.",
action="store_const",
const=_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
dest="generate_mode",
)
generate_mode_exclusive.add_argument(
"--generate-missing-and-run",
help="Generate any missing additional config files, then run. This is the "
"default behaviour.",
action="store_const",
const=_ConfigGenerateMode.GENERATE_MISSING_AND_RUN,
dest="generate_mode",
)
generate_group = parser.add_argument_group("Details for --generate-config")
generate_group.add_argument(
"-H", "--server-name", help="The server name to generate a config file for."
)
@ -670,11 +697,12 @@ class RootConfig:
config_dir_path = os.path.abspath(config_dir_path)
data_dir_path = os.getcwd()
generate_missing_configs = config_args.generate_missing_configs
obj = cls(config_files)
if config_args.generate_config:
if (
config_args.generate_mode
== _ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT
):
if config_args.report_stats is None:
parser.error(
"Please specify either --report-stats=yes or --report-stats=no\n\n"
@ -732,11 +760,14 @@ class RootConfig:
)
% (config_path,)
)
generate_missing_configs = True
config_dict = read_config_files(config_files)
if generate_missing_configs:
obj.generate_missing_files(config_dict, config_dir_path)
obj.generate_missing_files(config_dict, config_dir_path)
if config_args.generate_mode in (
_ConfigGenerateMode.GENERATE_EVERYTHING_AND_EXIT,
_ConfigGenerateMode.GENERATE_MISSING_AND_EXIT,
):
return None
obj.parse_config_dict(
@ -965,6 +996,12 @@ def read_file(file_path: Any, config_path: Iterable[str]) -> str:
raise ConfigError("Error accessing file %r" % (file_path,), config_path) from e
class _ConfigGenerateMode(Enum):
GENERATE_MISSING_AND_RUN = auto()
GENERATE_MISSING_AND_EXIT = auto()
GENERATE_EVERYTHING_AND_EXIT = auto()
__all__ = [
"Config",
"RootConfig",

View file

@ -32,9 +32,6 @@ class ExperimentalConfig(Config):
# MSC2716 (importing historical messages)
self.msc2716_enabled: bool = experimental.get("msc2716_enabled", False)
# MSC2285 (unstable private read receipts)
self.msc2285_enabled: bool = experimental.get("msc2285_enabled", False)
# MSC3244 (room version capabilities)
self.msc3244_enabled: bool = experimental.get("msc3244_enabled", True)
@ -74,6 +71,9 @@ class ExperimentalConfig(Config):
self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False)
# MSC2654: Unread counts
#
# Note that enabling this will result in an incorrect unread count for
# previously calculated push actions.
self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False)
# MSC2815 (allow room moderators to view redacted event content)

View file

@ -42,6 +42,35 @@ class MetricsConfig(Config):
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.enable_metrics = config.get("enable_metrics", False)
"""
### `enable_legacy_metrics` (experimental)
**Experimental: this option may be removed or have its behaviour
changed at any time, with no notice.**
Set to `true` to publish both legacy and non-legacy Prometheus metric names,
or to `false` to only publish non-legacy Prometheus metric names.
Defaults to `true`. Has no effect if `enable_metrics` is `false`.
Legacy metric names include:
- metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
- counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
These legacy metric names are unconventional and not compliant with OpenMetrics standards.
They are included for backwards compatibility.
Example configuration:
```yaml
enable_legacy_metrics: false
```
See https://github.com/matrix-org/synapse/issues/11106 for context.
*Since v1.67.0.*
"""
self.enable_legacy_metrics = config.get("enable_legacy_metrics", True)
self.report_stats = config.get("report_stats", None)
self.report_stats_endpoint = config.get(
"report_stats_endpoint", "https://matrix.org/report-usage-stats/push"

View file

@ -13,10 +13,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from typing import Any, Optional
from typing import Any, Dict, Optional
from synapse.api.constants import RoomCreationPreset
from synapse.config._base import Config, ConfigError
from synapse.config._base import Config, ConfigError, read_file
from synapse.types import JsonDict, RoomAlias, UserID
from synapse.util.stringutils import random_string_with_symbols, strtobool
@ -27,6 +27,11 @@ password resets, configure Synapse with an SMTP server via the `email` setting,
remove `account_threepid_delegates.email`.
"""
CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\
You have configured both `registration_shared_secret` and
`registration_shared_secret_path`. These are mutually incompatible.
"""
class RegistrationConfig(Config):
section = "registration"
@ -53,7 +58,16 @@ class RegistrationConfig(Config):
self.enable_registration_token_3pid_bypass = config.get(
"enable_registration_token_3pid_bypass", False
)
# read the shared secret, either inline or from an external file
self.registration_shared_secret = config.get("registration_shared_secret")
registration_shared_secret_path = config.get("registration_shared_secret_path")
if registration_shared_secret_path:
if self.registration_shared_secret:
raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR)
self.registration_shared_secret = read_file(
registration_shared_secret_path, ("registration_shared_secret_path",)
).strip()
self.bcrypt_rounds = config.get("bcrypt_rounds", 12)
@ -218,6 +232,21 @@ class RegistrationConfig(Config):
else:
return ""
def generate_files(self, config: Dict[str, Any], config_dir_path: str) -> None:
# if 'registration_shared_secret_path' is specified, and the target file
# does not exist, generate it.
registration_shared_secret_path = config.get("registration_shared_secret_path")
if registration_shared_secret_path and not self.path_exists(
registration_shared_secret_path
):
print(
"Generating registration shared secret file "
+ registration_shared_secret_path
)
secret = random_string_with_symbols(50)
with open(registration_shared_secret_path, "w") as f:
f.write(f"{secret}\n")
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> None:
reg_group = parser.add_argument_group("registration")

View file

@ -36,6 +36,12 @@ from ._util import validate_config
logger = logging.Logger(__name__)
DIRECT_TCP_ERROR = """
Using direct TCP replication for workers is no longer supported.
Please see https://matrix-org.github.io/synapse/latest/upgrade.html#direct-tcp-replication-is-no-longer-supported-migrate-to-redis
"""
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
# on IPv6 when '::' is set.
@ -165,7 +171,6 @@ KNOWN_LISTENER_TYPES = {
"http",
"metrics",
"manhole",
"replication",
}
KNOWN_RESOURCES = {
@ -515,7 +520,9 @@ class ServerConfig(Config):
):
raise ConfigError("allowed_avatar_mimetypes must be a list")
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
self.listeners = [
parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", []))
]
# no_tls is not really supported any more, but let's grandfather it in
# here.
@ -880,9 +887,12 @@ def read_gc_thresholds(
)
def parse_listener_def(listener: Any) -> ListenerConfig:
def parse_listener_def(num: int, listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
listener_type = listener["type"]
# Raise a helpful error if direct TCP replication is still configured.
if listener_type == "replication":
raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type"))
port = listener.get("port")
if not isinstance(port, int):

View file

@ -27,7 +27,7 @@ from ._base import (
RoutableShardedWorkerHandlingConfig,
ShardedWorkerHandlingConfig,
)
from .server import ListenerConfig, parse_listener_def
from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
_FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
The send_federation config option must be disabled in the main
@ -128,7 +128,8 @@ class WorkerConfig(Config):
self.worker_app = None
self.worker_listeners = [
parse_listener_def(x) for x in config.get("worker_listeners", [])
parse_listener_def(i, x)
for i, x in enumerate(config.get("worker_listeners", []))
]
self.worker_daemonize = bool(config.get("worker_daemonize"))
self.worker_pid_file = config.get("worker_pid_file")
@ -142,7 +143,8 @@ class WorkerConfig(Config):
self.worker_replication_host = config.get("worker_replication_host", None)
# The port on the main synapse for TCP replication
self.worker_replication_port = config.get("worker_replication_port", None)
if "worker_replication_port" in config:
raise ConfigError(DIRECT_TCP_ERROR, ("worker_replication_port",))
# The port on the main synapse for HTTP replication endpoint
self.worker_replication_http_port = config.get("worker_replication_http_port")

View file

@ -28,6 +28,7 @@ from synapse.api.errors import Codes, SynapseError
from synapse.api.room_versions import RoomVersion
from synapse.events import EventBase
from synapse.events.utils import prune_event, prune_event_dict
from synapse.logging.opentracing import trace
from synapse.types import JsonDict
logger = logging.getLogger(__name__)
@ -35,6 +36,7 @@ logger = logging.getLogger(__name__)
Hasher = Callable[[bytes], "hashlib._Hash"]
@trace
def check_event_content_hash(
event: EventBase, hash_algorithm: Hasher = hashlib.sha256
) -> bool:

View file

@ -32,6 +32,7 @@ from typing_extensions import Literal
import synapse
from synapse.api.errors import Codes
from synapse.logging.opentracing import trace
from synapse.rest.media.v1._base import FileInfo
from synapse.rest.media.v1.media_storage import ReadableFileWrapper
from synapse.spam_checker_api import RegistrationBehaviour
@ -378,6 +379,7 @@ class SpamChecker:
if check_media_file_for_spam is not None:
self._check_media_file_for_spam_callbacks.append(check_media_file_for_spam)
@trace
async def check_event_for_spam(
self, event: "synapse.events.EventBase"
) -> Union[Tuple[Codes, JsonDict], str]:

View file

@ -23,6 +23,7 @@ from synapse.crypto.keyring import Keyring
from synapse.events import EventBase, make_event_from_dict
from synapse.events.utils import prune_event, validate_canonicaljson
from synapse.http.servlet import assert_params_in_dict
from synapse.logging.opentracing import log_kv, trace
from synapse.types import JsonDict, get_domain_from_id
if TYPE_CHECKING:
@ -55,6 +56,7 @@ class FederationBase:
self._clock = hs.get_clock()
self._storage_controllers = hs.get_storage_controllers()
@trace
async def _check_sigs_and_hash(
self, room_version: RoomVersion, pdu: EventBase
) -> EventBase:
@ -97,17 +99,36 @@ class FederationBase:
"Event %s seems to have been redacted; using our redacted copy",
pdu.event_id,
)
log_kv(
{
"message": "Event seems to have been redacted; using our redacted copy",
"event_id": pdu.event_id,
}
)
else:
logger.warning(
"Event %s content has been tampered, redacting",
pdu.event_id,
)
log_kv(
{
"message": "Event content has been tampered, redacting",
"event_id": pdu.event_id,
}
)
return redacted_event
spam_check = await self.spam_checker.check_event_for_spam(pdu)
if spam_check != self.spam_checker.NOT_SPAM:
logger.warning("Event contains spam, soft-failing %s", pdu.event_id)
log_kv(
{
"message": "Event contains spam, redacting (to save disk space) "
"as well as soft-failing (to stop using the event in prev_events)",
"event_id": pdu.event_id,
}
)
# we redact (to save disk space) as well as soft-failing (to stop
# using the event in prev_events).
redacted_event = prune_event(pdu)
@ -117,6 +138,7 @@ class FederationBase:
return pdu
@trace
async def _check_sigs_on_pdu(
keyring: Keyring, room_version: RoomVersion, pdu: EventBase
) -> None:

View file

@ -61,7 +61,7 @@ from synapse.federation.federation_base import (
)
from synapse.federation.transport.client import SendJoinResponse
from synapse.http.types import QueryParams
from synapse.logging.opentracing import SynapseTags, set_tag, tag_args, trace
from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace
from synapse.types import JsonDict, UserID, get_domain_from_id
from synapse.util.async_helpers import concurrently_execute
from synapse.util.caches.expiringcache import ExpiringCache
@ -587,11 +587,15 @@ class FederationClient(FederationBase):
Returns:
A list of PDUs that have valid signatures and hashes.
"""
set_tag(
SynapseTags.RESULT_PREFIX + "pdus.length",
str(len(pdus)),
)
# We limit how many PDUs we check at once, as if we try to do hundreds
# of thousands of PDUs at once we see large memory spikes.
valid_pdus = []
valid_pdus: List[EventBase] = []
async def _execute(pdu: EventBase) -> None:
valid_pdu = await self._check_sigs_and_hash_and_fetch_one(
@ -607,6 +611,8 @@ class FederationClient(FederationBase):
return valid_pdus
@trace
@tag_args
async def _check_sigs_and_hash_and_fetch_one(
self,
pdu: EventBase,
@ -639,16 +645,27 @@ class FederationClient(FederationBase):
except InvalidEventSignatureError as e:
logger.warning(
"Signature on retrieved event %s was invalid (%s). "
"Checking local store/orgin server",
"Checking local store/origin server",
pdu.event_id,
e,
)
log_kv(
{
"message": "Signature on retrieved event was invalid. "
"Checking local store/origin server",
"event_id": pdu.event_id,
"InvalidEventSignatureError": e,
}
)
# Check local db.
res = await self.store.get_event(
pdu.event_id, allow_rejected=True, allow_none=True
)
# If the PDU fails its signature check and we don't have it in our
# database, we then request it from sender's server (if that is not the
# same as `origin`).
pdu_origin = get_domain_from_id(pdu.sender)
if not res and pdu_origin != origin:
try:

View file

@ -763,6 +763,17 @@ class FederationServer(FederationBase):
The partial knock event.
"""
origin_host, _ = parse_server_name(origin)
if await self.store.is_partial_state_room(room_id):
# Before we do anything: check if the room is partial-stated.
# Note that at the time this check was added, `on_make_knock_request` would
# block due to https://github.com/matrix-org/synapse/issues/12997.
raise SynapseError(
404,
"Unable to handle /make_knock right now; this server is not fully joined.",
errcode=Codes.NOT_FOUND,
)
await self.check_server_matches_acl(origin_host, room_id)
room_version = await self.store.get_room_version(room_id)

View file

@ -441,6 +441,19 @@ class FederationSender(AbstractFederationSender):
destinations = await self._external_cache.get(
"get_joined_hosts", str(sg)
)
if destinations is None:
# Add logging to help track down #13444
logger.info(
"Unexpectedly did not have cached destinations for %s / %s",
sg,
event.event_id,
)
else:
# Add logging to help track down #13444
logger.info(
"Unexpectedly did not have cached prev group for %s",
event.event_id,
)
if destinations is None:
try:

View file

@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Tupl
from synapse.api.errors import Codes, FederationDeniedError, SynapseError
from synapse.api.urls import FEDERATION_V1_PREFIX
from synapse.http.server import HttpServer, ServletCallback, is_method_cancellable
from synapse.http.server import HttpServer, ServletCallback
from synapse.http.servlet import parse_json_object_from_request
from synapse.http.site import SynapseRequest
from synapse.logging.context import run_in_background
@ -34,6 +34,7 @@ from synapse.logging.opentracing import (
whitelisted_homeserver,
)
from synapse.types import JsonDict
from synapse.util.cancellation import is_function_cancellable
from synapse.util.ratelimitutils import FederationRateLimiter
from synapse.util.stringutils import parse_and_validate_server_name
@ -375,7 +376,7 @@ class BaseFederationServlet:
if code is None:
continue
if is_method_cancellable(code):
if is_function_cancellable(code):
# The wrapper added by `self._wrap` will inherit the cancellable flag,
# but the wrapper itself does not support cancellation yet.
# Once resolved, the cancellation tests in

View file

@ -310,6 +310,7 @@ class DeviceHandler(DeviceWorkerHandler):
super().__init__(hs)
self.federation_sender = hs.get_federation_sender()
self._storage_controllers = hs.get_storage_controllers()
self.device_list_updater = DeviceListUpdater(hs, self)
@ -694,8 +695,11 @@ class DeviceHandler(DeviceWorkerHandler):
# Ignore any users that aren't ours
if self.hs.is_mine_id(user_id):
joined_user_ids = await self.store.get_users_in_room(room_id)
hosts = {get_domain_from_id(u) for u in joined_user_ids}
hosts = set(
await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
)
hosts.discard(self.server_name)
# Check if we've already sent this update to some hosts

View file

@ -30,7 +30,7 @@ from synapse.api.errors import (
from synapse.appservice import ApplicationService
from synapse.module_api import NOT_SPAM
from synapse.storage.databases.main.directory import RoomAliasMapping
from synapse.types import JsonDict, Requester, RoomAlias, get_domain_from_id
from synapse.types import JsonDict, Requester, RoomAlias
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -85,8 +85,9 @@ class DirectoryHandler:
# TODO(erikj): Add transactions.
# TODO(erikj): Check if there is a current association.
if not servers:
users = await self.store.get_users_in_room(room_id)
servers = {get_domain_from_id(u) for u in users}
servers = await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
if not servers:
raise SynapseError(400, "Failed to get server list")
@ -292,8 +293,9 @@ class DirectoryHandler:
Codes.NOT_FOUND,
)
users = await self.store.get_users_in_room(room_id)
extra_servers = {get_domain_from_id(u) for u in users}
extra_servers = await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
servers_set = set(extra_servers) | set(servers)
# If this server is in the list of servers, return it first.

View file

@ -129,12 +129,9 @@ class EventAuthHandler:
else:
users = {}
# Find the user with the highest power level.
users_in_room = await self._store.get_users_in_room(room_id)
# Only interested in local users.
local_users_in_room = [
u for u in users_in_room if get_domain_from_id(u) == self._server_name
]
# Find the user with the highest power level (only interested in local
# users).
local_users_in_room = await self._store.get_local_users_in_room(room_id)
chosen_user = max(
local_users_in_room,
key=lambda user: users.get(user, users_default_level),

View file

@ -151,7 +151,7 @@ class EventHandler:
"""Retrieve a single specified event.
Args:
user: The user requesting the event
user: The local user requesting the event
room_id: The expected room id. We'll return None if the
event's room does not match.
event_id: The event ID to obtain.
@ -173,8 +173,11 @@ class EventHandler:
if not event:
return None
users = await self.store.get_users_in_room(event.room_id)
is_peeking = user.to_string() not in users
is_user_in_room = await self.store.check_local_user_in_room(
user_id=user.to_string(), room_id=event.room_id
)
# The user is peeking if they aren't in the room already
is_peeking = not is_user_in_room
filtered = await filter_events_for_client(
self._storage_controllers, user.to_string(), [event], is_peeking=is_peeking

View file

@ -70,7 +70,7 @@ from synapse.replication.http.federation import (
from synapse.storage.databases.main.events import PartialStateConflictError
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.state import StateFilter
from synapse.types import JsonDict, StateMap, get_domain_from_id
from synapse.types import JsonDict, get_domain_from_id
from synapse.util.async_helpers import Linearizer
from synapse.util.retryutils import NotRetryingDestination
from synapse.visibility import filter_events_for_server
@ -104,37 +104,6 @@ backfill_processing_before_timer = Histogram(
)
def get_domains_from_state(state: StateMap[EventBase]) -> List[Tuple[str, int]]:
"""Get joined domains from state
Args:
state: State map from type/state key to event.
Returns:
Returns a list of servers with the lowest depth of their joins.
Sorted by lowest depth first.
"""
joined_users = [
(state_key, int(event.depth))
for (e_type, state_key), event in state.items()
if e_type == EventTypes.Member and event.membership == Membership.JOIN
]
joined_domains: Dict[str, int] = {}
for u, d in joined_users:
try:
dom = get_domain_from_id(u)
old_d = joined_domains.get(dom)
if old_d:
joined_domains[dom] = min(d, old_d)
else:
joined_domains[dom] = d
except Exception:
pass
return sorted(joined_domains.items(), key=lambda d: d[1])
class _BackfillPointType(Enum):
# a regular backwards extremity (ie, an event which we don't yet have, but which
# is referred to by other events in the DAG)
@ -432,21 +401,19 @@ class FederationHandler:
)
# Now we need to decide which hosts to hit first.
# First we try hosts that are already in the room
# First we try hosts that are already in the room.
# TODO: HEURISTIC ALERT.
likely_domains = (
await self._storage_controllers.state.get_current_hosts_in_room(room_id)
)
curr_state = await self._storage_controllers.state.get_current_state(room_id)
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name
]
async def try_backfill(domains: List[str]) -> bool:
async def try_backfill(domains: Collection[str]) -> bool:
# TODO: Should we try multiple of these at a time?
for dom in domains:
# We don't want to ask our own server for information we don't have
if dom == self.server_name:
continue
try:
await self._federation_event_handler.backfill(
dom, room_id, limit=100, extremities=extremities_to_request

View file

@ -1041,6 +1041,14 @@ class FederationEventHandler:
InvalidResponseError: if the remote homeserver's response contains fields
of the wrong type.
"""
# It would be better if we could query the difference from our known
# state to the given `event_id` so the sending server doesn't have to
# send as much and we don't have to process as many events. For example
# in a room like #matrix:matrix.org, we get 200k events (77k state_events, 122k
# auth_events) from this call.
#
# Tracked by https://github.com/matrix-org/synapse/issues/13618
(
state_event_ids,
auth_event_ids,

View file

@ -538,11 +538,7 @@ class IdentityHandler:
raise SynapseError(400, "Error contacting the identity server")
async def lookup_3pid(
self,
id_server: str,
medium: str,
address: str,
id_access_token: Optional[str] = None,
self, id_server: str, medium: str, address: str, id_access_token: str
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server.
@ -557,60 +553,15 @@ class IdentityHandler:
Returns:
the matrix ID of the 3pid, or None if it is not recognized.
"""
if id_access_token is not None:
try:
results = await self._lookup_3pid_v2(
id_server, id_access_token, medium, address
)
return results
except Exception as e:
# Catch HttpResponseExcept for a non-200 response code
# Check if this identity server does not know about v2 lookups
if isinstance(e, HttpResponseException) and e.code == 404:
# This is an old identity server that does not yet support v2 lookups
logger.warning(
"Attempted v2 lookup on v1 identity server %s. Falling "
"back to v1",
id_server,
)
else:
logger.warning("Error when looking up hashing details: %s", e)
return None
return await self._lookup_3pid_v1(id_server, medium, address)
async def _lookup_3pid_v1(
self, id_server: str, medium: str, address: str
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server using v1 lookup.
Args:
id_server: The server name (including port, if required)
of the identity server to use.
medium: The type of the third party identifier (e.g. "email").
address: The third party identifier (e.g. "foo@example.com").
Returns:
the matrix ID of the 3pid, or None if it is not recognized.
"""
try:
data = await self.blacklisting_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
{"medium": medium, "address": address},
results = await self._lookup_3pid_v2(
id_server, id_access_token, medium, address
)
if "mxid" in data:
# note: we used to verify the identity server's signature here, but no longer
# require or validate it. See the following for context:
# https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
return data["mxid"]
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except OSError as e:
logger.warning("Error from v1 identity server lookup: %s" % (e,))
return None
return results
except Exception as e:
logger.warning("Error when looking up hashing details: %s", e)
return None
async def _lookup_3pid_v2(
self, id_server: str, id_access_token: str, medium: str, address: str
@ -739,7 +690,7 @@ class IdentityHandler:
room_type: Optional[str],
inviter_display_name: str,
inviter_avatar_url: str,
id_access_token: Optional[str] = None,
id_access_token: str,
) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]:
"""
Asks an identity server for a third party invite.
@ -760,7 +711,7 @@ class IdentityHandler:
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
id_access_token (str|None): The access token to authenticate to the identity
id_access_token (str): The access token to authenticate to the identity
server with
Returns:
@ -792,71 +743,24 @@ class IdentityHandler:
invite_config["org.matrix.web_client_location"] = self._web_client_location
# Add the identity service access token to the JSON body and use the v2
# Identity Service endpoints if id_access_token is present
# Identity Service endpoints
data = None
base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
if id_access_token:
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
id_server_scheme,
id_server,
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
url = "%s%s/_matrix/identity/v2/store-invite" % (id_server_scheme, id_server)
try:
data = await self.blacklisting_http_client.post_json_get_json(
url,
invite_config,
{"Authorization": create_id_access_token_header(id_access_token)},
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
# Attempt a v2 lookup
url = base_url + "/v2/store-invite"
try:
data = await self.blacklisting_http_client.post_json_get_json(
url,
invite_config,
{"Authorization": create_id_access_token_header(id_access_token)},
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
if e.code != 404:
logger.info("Failed to POST %s with JSON: %s", url, e)
raise e
if data is None:
key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
url = base_url + "/api/v1/store-invite"
try:
data = await self.blacklisting_http_client.post_json_get_json(
url, invite_config
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
logger.warning(
"Error trying to call /store-invite on %s%s: %s",
id_server_scheme,
id_server,
e,
)
if data is None:
# Some identity servers may only support application/x-www-form-urlencoded
# types. This is especially true with old instances of Sydent, see
# https://github.com/matrix-org/sydent/pull/170
try:
data = await self.blacklisting_http_client.post_urlencoded_get_json(
url, invite_config
)
except HttpResponseException as e:
logger.warning(
"Error calling /store-invite on %s%s with fallback "
"encoding: %s",
id_server_scheme,
id_server,
e,
)
raise e
# TODO: Check for success
token = data["token"]
public_keys = data.get("public_keys", [])
if "public_key" in data:

View file

@ -763,8 +763,10 @@ class EventCreationHandler:
async def _is_server_notices_room(self, room_id: str) -> bool:
if self.config.servernotices.server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self.config.servernotices.server_notices_mxid in user_ids
is_server_notices_room = await self.store.check_local_user_in_room(
user_id=self.config.servernotices.server_notices_mxid, room_id=room_id
)
return is_server_notices_room
async def assert_accepted_privacy_policy(self, requester: Requester) -> None:
"""Check if a user has accepted the privacy policy

View file

@ -159,11 +159,9 @@ class PaginationHandler:
self._retention_allowed_lifetime_max = (
hs.config.retention.retention_allowed_lifetime_max
)
self._is_master = hs.config.worker.worker_app is None
if (
hs.config.worker.run_background_tasks
and hs.config.retention.retention_enabled
):
if hs.config.retention.retention_enabled and self._is_master:
# Run the purge jobs described in the configuration file.
for job in hs.config.retention.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job)

View file

@ -2051,8 +2051,7 @@ async def get_interested_remotes(
)
for room_id, states in room_ids_to_states.items():
user_ids = await store.get_users_in_room(room_id)
hosts = {get_domain_from_id(user_id) for user_id in user_ids}
hosts = await store.get_current_hosts_in_room(room_id)
for host in hosts:
hosts_and_states.setdefault(host, set()).update(states)

View file

@ -164,10 +164,7 @@ class ReceiptsHandler:
if not is_new:
return
if self.federation_sender and receipt_type not in (
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
):
if self.federation_sender and receipt_type != ReceiptTypes.READ_PRIVATE:
await self.federation_sender.send_read_receipt(receipt)
@ -207,38 +204,24 @@ class ReceiptEventSource(EventSource[int, JsonDict]):
for event_id, orig_event_content in room.get("content", {}).items():
event_content = orig_event_content
# If there are private read receipts, additional logic is necessary.
if (
ReceiptTypes.READ_PRIVATE in event_content
or ReceiptTypes.UNSTABLE_READ_PRIVATE in event_content
):
if ReceiptTypes.READ_PRIVATE in event_content:
# Make a copy without private read receipts to avoid leaking
# other user's private read receipts..
event_content = {
receipt_type: receipt_value
for receipt_type, receipt_value in event_content.items()
if receipt_type
not in (
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
)
if receipt_type != ReceiptTypes.READ_PRIVATE
}
# Copy the current user's private read receipt from the
# original content, if it exists.
user_private_read_receipt = orig_event_content.get(
ReceiptTypes.READ_PRIVATE, {}
).get(user_id, None)
user_private_read_receipt = orig_event_content[
ReceiptTypes.READ_PRIVATE
].get(user_id, None)
if user_private_read_receipt:
event_content[ReceiptTypes.READ_PRIVATE] = {
user_id: user_private_read_receipt
}
user_unstable_private_read_receipt = orig_event_content.get(
ReceiptTypes.UNSTABLE_READ_PRIVATE, {}
).get(user_id, None)
if user_unstable_private_read_receipt:
event_content[ReceiptTypes.UNSTABLE_READ_PRIVATE] = {
user_id: user_unstable_private_read_receipt
}
# Include the event if there is at least one non-private read
# receipt or the current user has a private read receipt.

View file

@ -19,6 +19,7 @@ import math
import random
import string
from collections import OrderedDict
from http import HTTPStatus
from typing import (
TYPE_CHECKING,
Any,
@ -60,7 +61,6 @@ from synapse.event_auth import validate_event_for_room_version
from synapse.events import EventBase
from synapse.events.utils import copy_and_fixup_power_levels_contents
from synapse.federation.federation_client import InvalidResponseError
from synapse.handlers.federation import get_domains_from_state
from synapse.handlers.relations import BundledAggregations
from synapse.module_api import NOT_SPAM
from synapse.rest.admin._base import assert_user_is_admin
@ -705,8 +705,8 @@ class RoomCreationHandler:
was, requested, `room_alias`. Secondly, the stream_id of the
last persisted event.
Raises:
SynapseError if the room ID couldn't be stored, or something went
horribly wrong.
SynapseError if the room ID couldn't be stored, 3pid invitation config
validation failed, or something went horribly wrong.
ResourceLimitError if server is blocked to some resource being
exceeded
"""
@ -732,6 +732,19 @@ class RoomCreationHandler:
invite_3pid_list = config.get("invite_3pid", [])
invite_list = config.get("invite", [])
# validate each entry for correctness
for invite_3pid in invite_3pid_list:
if not all(
key in invite_3pid
for key in ("medium", "address", "id_server", "id_access_token")
):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"all of `medium`, `address`, `id_server` and `id_access_token` "
"are required when making a 3pid invite",
Codes.MISSING_PARAM,
)
if not is_requester_admin:
spam_check = await self.spam_checker.user_may_create_room(user_id)
if spam_check != NOT_SPAM:
@ -991,7 +1004,7 @@ class RoomCreationHandler:
for invite_3pid in invite_3pid_list:
id_server = invite_3pid["id_server"]
id_access_token = invite_3pid.get("id_access_token") # optional
id_access_token = invite_3pid["id_access_token"]
address = invite_3pid["address"]
medium = invite_3pid["medium"]
# Note that do_3pid_invite can raise a ShadowBanError, but this was
@ -1296,8 +1309,11 @@ class RoomContextHandler:
before_limit = math.floor(limit / 2.0)
after_limit = limit - before_limit
users = await self.store.get_users_in_room(room_id)
is_peeking = user.to_string() not in users
is_user_in_room = await self.store.check_local_user_in_room(
user_id=user.to_string(), room_id=room_id
)
# The user is peeking if they aren't in the room already
is_peeking = not is_user_in_room
async def filter_evts(events: List[EventBase]) -> List[EventBase]:
if use_admin_priviledge:
@ -1471,17 +1487,16 @@ class TimestampLookupHandler:
timestamp,
)
# Find other homeservers from the given state in the room
curr_state = await self._storage_controllers.state.get_current_state(
room_id
likely_domains = (
await self._storage_controllers.state.get_current_hosts_in_room(room_id)
)
curr_domains = get_domains_from_state(curr_state)
likely_domains = [
domain for domain, depth in curr_domains if domain != self.server_name
]
# Loop through each homeserver candidate until we get a succesful response
for domain in likely_domains:
# We don't want to ask our own server for information we don't have
if domain == self.server_name:
continue
try:
remote_response = await self.federation_client.timestamp_to_event(
domain, room_id, timestamp, direction

View file

@ -1382,7 +1382,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
id_server: str,
requester: Requester,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
id_access_token: str,
prev_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
) -> Tuple[str, int]:
@ -1397,7 +1397,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
id_access_token: The optional identity server access token.
id_access_token: Identity server access token.
depth: Override the depth used to order the event in the DAG.
prev_event_ids: The event IDs to use as the prev events
Should normally be set to None, which will cause the depth to be calculated
@ -1494,7 +1494,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
room_id: str,
user: UserID,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
id_access_token: str,
prev_event_ids: Optional[List[str]] = None,
depth: Optional[int] = None,
) -> Tuple[EventBase, int]:
@ -1620,8 +1620,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
async def _is_server_notice_room(self, room_id: str) -> bool:
if self._server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self._server_notices_mxid in user_ids
is_server_notices_room = await self.store.check_local_user_in_room(
user_id=self._server_notices_mxid, room_id=room_id
)
return is_server_notices_room
class RoomMemberMasterHandler(RoomMemberHandler):
@ -1923,8 +1925,11 @@ class RoomMemberMasterHandler(RoomMemberHandler):
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
if membership:
await self.store.forget(user_id, room_id)
# In normal case this call is only required if `membership` is not `None`.
# But: After the last member had left the room, the background update
# `_background_remove_left_rooms` is deleting rows related to this room from
# the table `current_state_events` and `get_current_state_events` is `None`.
await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:

View file

@ -2420,10 +2420,10 @@ class SyncHandler:
joined_room.room_id, joined_room.event_pos.stream
)
)
users_in_room = await self.state.get_current_users_in_room(
user_ids_in_room = await self.state.get_current_user_ids_in_room(
joined_room.room_id, extrems
)
if user_id in users_in_room:
if user_id in user_ids_in_room:
joined_room_ids.add(joined_room.room_id)
return frozenset(joined_room_ids)

View file

@ -26,7 +26,7 @@ from synapse.metrics.background_process_metrics import (
)
from synapse.replication.tcp.streams import TypingStream
from synapse.streams import EventSource
from synapse.types import JsonDict, Requester, StreamKeyType, UserID, get_domain_from_id
from synapse.types import JsonDict, Requester, StreamKeyType, UserID
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.metrics import Measure
from synapse.util.wheel_timer import WheelTimer
@ -362,8 +362,9 @@ class TypingWriterHandler(FollowerTypingHandler):
)
return
users = await self.store.get_users_in_room(room_id)
domains = {get_domain_from_id(u) for u in users}
domains = await self._storage_controllers.state.get_current_hosts_in_room(
room_id
)
if self.server_name in domains:
logger.info("Got typing update from %s: %r", user_id, content)

View file

@ -33,7 +33,6 @@ from typing import (
Optional,
Pattern,
Tuple,
TypeVar,
Union,
)
@ -64,6 +63,7 @@ from synapse.logging.context import defer_to_thread, preserve_fn, run_in_backgro
from synapse.logging.opentracing import active_span, start_active_span, trace_servlet
from synapse.util import json_encoder
from synapse.util.caches import intern_dict
from synapse.util.cancellation import is_function_cancellable
from synapse.util.iterutils import chunk_seq
if TYPE_CHECKING:
@ -94,68 +94,6 @@ HTML_ERROR_TEMPLATE = """<!DOCTYPE html>
HTTP_STATUS_REQUEST_CANCELLED = 499
F = TypeVar("F", bound=Callable[..., Any])
_cancellable_method_names = frozenset(
{
# `RestServlet`, `BaseFederationServlet` and `BaseFederationServerServlet`
# methods
"on_GET",
"on_PUT",
"on_POST",
"on_DELETE",
# `_AsyncResource`, `DirectServeHtmlResource` and `DirectServeJsonResource`
# methods
"_async_render_GET",
"_async_render_PUT",
"_async_render_POST",
"_async_render_DELETE",
"_async_render_OPTIONS",
# `ReplicationEndpoint` methods
"_handle_request",
}
)
def cancellable(method: F) -> F:
"""Marks a servlet method as cancellable.
Methods with this decorator will be cancelled if the client disconnects before we
finish processing the request.
During cancellation, `Deferred.cancel()` will be invoked on the `Deferred` wrapping
the method. The `cancel()` call will propagate down to the `Deferred` that is
currently being waited on. That `Deferred` will raise a `CancelledError`, which will
propagate up, as per normal exception handling.
Before applying this decorator to a new endpoint, you MUST recursively check
that all `await`s in the function are on `async` functions or `Deferred`s that
handle cancellation cleanly, otherwise a variety of bugs may occur, ranging from
premature logging context closure, to stuck requests, to database corruption.
Usage:
class SomeServlet(RestServlet):
@cancellable
async def on_GET(self, request: SynapseRequest) -> ...:
...
"""
if method.__name__ not in _cancellable_method_names and not any(
method.__name__.startswith(prefix) for prefix in _cancellable_method_names
):
raise ValueError(
"@cancellable decorator can only be applied to servlet methods."
)
method.cancellable = True # type: ignore[attr-defined]
return method
def is_method_cancellable(method: Callable[..., Any]) -> bool:
"""Checks whether a servlet method has the `@cancellable` flag."""
return getattr(method, "cancellable", False)
def return_json_error(
f: failure.Failure, request: SynapseRequest, config: Optional[HomeServerConfig]
) -> None:
@ -389,7 +327,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta):
method_handler = getattr(self, "_async_render_%s" % (request_method,), None)
if method_handler:
request.is_render_cancellable = is_method_cancellable(method_handler)
request.is_render_cancellable = is_function_cancellable(method_handler)
raw_callback_return = method_handler(request)
@ -551,7 +489,7 @@ class JsonResource(DirectServeJsonResource):
async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]:
callback, servlet_classname, group_dict = self._get_handler_for_request(request)
request.is_render_cancellable = is_method_cancellable(callback)
request.is_render_cancellable = is_function_cancellable(callback)
# Make sure we have an appropriate name for this handler in prometheus
# (rather than the default of JsonResource).

View file

@ -46,12 +46,12 @@ from twisted.python.threadpool import ThreadPool
# This module is imported for its side effects; flake8 needn't warn that it's unused.
import synapse.metrics._reactor_metrics # noqa: F401
from synapse.metrics._exposition import (
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
from synapse.metrics._legacy_exposition import (
MetricsResource,
generate_latest,
start_http_server,
)
from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
from synapse.metrics._types import Collector
from synapse.util import SYNAPSE_VERSION

View file

@ -80,7 +80,27 @@ def sample_line(line: Sample, name: str) -> str:
return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
# Mapping from new metric names to legacy metric names.
# We translate these back to their old names when exposing them through our
# legacy vendored exporter.
# Only this legacy exposition module applies these name changes.
LEGACY_METRIC_NAMES = {
"synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
"synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
"synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
"synapse_util_caches_cache_total": "synapse_util_caches_cache:total",
"synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
"synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
"synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
"synapse_util_caches_response_cache_total": "synapse_util_caches_response_cache:total",
}
def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
"""
Generate metrics in legacy format. Modern metrics are generated directly
by prometheus-client.
"""
# Trigger the cache metrics to be rescraped, which updates the common
# metrics but do not produce metrics themselves
@ -94,7 +114,8 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
# No samples, don't bother.
continue
mname = metric.name
# Translate to legacy metric name if it has one.
mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
mnewname = metric.name
mtype = metric.type
@ -124,7 +145,7 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
om_samples: Dict[str, List[str]] = {}
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
if s.name == mname + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
# (these come from gaugehistograms which don't get renamed,
# so no need to faff with mnewname)
@ -140,12 +161,12 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
if emit_help:
output.append(
"# HELP {}{} {}\n".format(
metric.name,
mname,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append(f"# TYPE {metric.name}{suffix} gauge\n")
output.append(f"# TYPE {mname}{suffix} gauge\n")
output.extend(lines)
# Get rid of the weird colon things while we're at it
@ -170,11 +191,12 @@ def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> byt
# Get rid of the OpenMetrics specific samples (we should already have
# dealt with them above anyway.)
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
if s.name == mname + suffix:
break
else:
sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
output.append(
sample_line(s, s.name.replace(":total", "").replace(":", "_"))
sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
)
return "".join(output).encode("utf-8")

View file

@ -0,0 +1,79 @@
# Copyright 2022 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import attr
from synapse.metrics.background_process_metrics import run_as_background_process
if TYPE_CHECKING:
from synapse.server import HomeServer
from prometheus_client import Gauge
# Gauge to expose daily active users metrics
current_dau_gauge = Gauge(
"synapse_admin_daily_active_users",
"Current daily active users count",
)
@attr.s(auto_attribs=True)
class CommonUsageMetrics:
"""Usage metrics shared between the phone home stats and the prometheus exporter."""
daily_active_users: int
class CommonUsageMetricsManager:
"""Collects common usage metrics."""
def __init__(self, hs: "HomeServer") -> None:
self._store = hs.get_datastores().main
self._clock = hs.get_clock()
async def get_metrics(self) -> CommonUsageMetrics:
"""Get the CommonUsageMetrics object. If no collection has happened yet, do it
before returning the metrics.
Returns:
The CommonUsageMetrics object to read common metrics from.
"""
return await self._collect()
async def setup(self) -> None:
"""Keep the gauges for common usage metrics up to date."""
await self._update_gauges()
self._clock.looping_call(
run_as_background_process,
5 * 60 * 1000,
desc="common_usage_metrics_update_gauges",
func=self._update_gauges,
)
async def _collect(self) -> CommonUsageMetrics:
"""Collect the common metrics and either create the CommonUsageMetrics object to
use if it doesn't exist yet, or update it.
"""
dau_count = await self._store.count_daily_users()
return CommonUsageMetrics(
daily_active_users=dau_count,
)
async def _update_gauges(self) -> None:
"""Update the Prometheus gauges."""
metrics = await self._collect()
current_dau_gauge.set(float(metrics.daily_active_users))

View file

@ -262,7 +262,12 @@ class BulkPushRuleEvaluator:
# This can happen due to out of band memberships
return
count_as_unread = _should_count_as_unread(event, context)
# Disable counting as unread unless the experimental configuration is
# enabled, as it can cause additional (unwanted) rows to be added to the
# event_push_actions table.
count_as_unread = False
if self.hs.config.experimental.msc2654_enabled:
count_as_unread = _should_count_as_unread(event, context)
rules_by_user = await self._get_rules_for_event(event)
actions_by_user: Dict[str, Collection[Union[Mapping, str]]] = {}

View file

@ -26,12 +26,13 @@ from twisted.web.server import Request
from synapse.api.errors import HttpResponseException, SynapseError
from synapse.http import RequestTimedOutError
from synapse.http.server import HttpServer, is_method_cancellable
from synapse.http.server import HttpServer
from synapse.http.site import SynapseRequest
from synapse.logging import opentracing
from synapse.logging.opentracing import trace_with_opname
from synapse.types import JsonDict
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.cancellation import is_function_cancellable
from synapse.util.stringutils import random_string
if TYPE_CHECKING:
@ -311,7 +312,7 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
url_args = list(self.PATH_ARGS)
method = self.METHOD
if self.CACHE and is_method_cancellable(self._handle_request):
if self.CACHE and is_function_cancellable(self._handle_request):
raise Exception(
f"{self.__class__.__name__} has been marked as cancellable, but CACHE "
"is set. The cancellable flag would have no effect."
@ -359,6 +360,6 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta):
# The `@cancellable` decorator may be applied to `_handle_request`. But we
# told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`,
# so we have to set up the cancellable flag ourselves.
request.is_render_cancellable = is_method_cancellable(self._handle_request)
request.is_render_cancellable = is_function_cancellable(self._handle_request)
return await self._handle_request(request, **kwargs)

View file

@ -31,6 +31,5 @@ class SlavedPushRuleStore(SlavedEventStore, PushRulesWorkerStore):
self._push_rules_stream_id_gen.advance(instance_name, token)
for row in rows:
self.get_push_rules_for_user.invalidate((row.user_id,))
self.get_push_rules_enabled_for_user.invalidate((row.user_id,))
self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
return super().process_replication_rows(stream_name, instance_name, token, rows)

View file

@ -416,10 +416,7 @@ class FederationSenderHandler:
if not self._is_mine_id(receipt.user_id):
continue
# Private read receipts never get sent over federation.
if receipt.receipt_type in (
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
):
if receipt.receipt_type == ReceiptTypes.READ_PRIVATE:
continue
receipt_info = ReadReceipt(
receipt.room_id,

View file

@ -35,7 +35,6 @@ from twisted.internet.protocol import ReconnectingClientFactory
from synapse.metrics import LaterGauge
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.replication.tcp.client import DirectTcpReplicationClientFactory
from synapse.replication.tcp.commands import (
ClearUserSyncsCommand,
Command,
@ -332,46 +331,31 @@ class ReplicationCommandHandler:
def start_replication(self, hs: "HomeServer") -> None:
"""Helper method to start replication."""
if hs.config.redis.redis_enabled:
from synapse.replication.tcp.redis import (
RedisDirectTcpReplicationClientFactory,
)
from synapse.replication.tcp.redis import RedisDirectTcpReplicationClientFactory
# First let's ensure that we have a ReplicationStreamer started.
hs.get_replication_streamer()
# First let's ensure that we have a ReplicationStreamer started.
hs.get_replication_streamer()
# We need two connections to redis, one for the subscription stream and
# one to send commands to (as you can't send further redis commands to a
# connection after SUBSCRIBE is called).
# We need two connections to redis, one for the subscription stream and
# one to send commands to (as you can't send further redis commands to a
# connection after SUBSCRIBE is called).
# First create the connection for sending commands.
outbound_redis_connection = hs.get_outbound_redis_connection()
# First create the connection for sending commands.
outbound_redis_connection = hs.get_outbound_redis_connection()
# Now create the factory/connection for the subscription stream.
self._factory = RedisDirectTcpReplicationClientFactory(
hs,
outbound_redis_connection,
channel_names=self._channels_to_subscribe_to,
)
hs.get_reactor().connectTCP(
hs.config.redis.redis_host,
hs.config.redis.redis_port,
self._factory,
timeout=30,
bindAddress=None,
)
else:
client_name = hs.get_instance_name()
self._factory = DirectTcpReplicationClientFactory(hs, client_name, self)
host = hs.config.worker.worker_replication_host
port = hs.config.worker.worker_replication_port
hs.get_reactor().connectTCP(
host,
port,
self._factory,
timeout=30,
bindAddress=None,
)
# Now create the factory/connection for the subscription stream.
self._factory = RedisDirectTcpReplicationClientFactory(
hs,
outbound_redis_connection,
channel_names=self._channels_to_subscribe_to,
)
hs.get_reactor().connectTCP(
hs.config.redis.redis_host,
hs.config.redis.redis_port,
self._factory,
timeout=30,
bindAddress=None,
)
def get_streams(self) -> Dict[str, Stream]:
"""Get a map from stream name to all streams."""

View file

@ -62,7 +62,6 @@ class NotificationsServlet(RestServlet):
[
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
],
)

View file

@ -45,8 +45,6 @@ class ReadMarkerRestServlet(RestServlet):
ReceiptTypes.FULLY_READ,
ReceiptTypes.READ_PRIVATE,
}
if hs.config.experimental.msc2285_enabled:
self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE)
async def on_POST(
self, request: SynapseRequest, room_id: str

View file

@ -49,8 +49,6 @@ class ReceiptRestServlet(RestServlet):
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.FULLY_READ,
}
if hs.config.experimental.msc2285_enabled:
self._known_receipt_types.add(ReceiptTypes.UNSTABLE_READ_PRIVATE)
async def on_POST(
self, request: SynapseRequest, room_id: str, receipt_type: str, event_id: str

View file

@ -17,6 +17,7 @@
import logging
import re
from enum import Enum
from http import HTTPStatus
from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple
from urllib import parse as urlparse
@ -37,7 +38,7 @@ from synapse.api.errors import (
)
from synapse.api.filtering import Filter
from synapse.events.utils import format_event_for_client_v2
from synapse.http.server import HttpServer, cancellable
from synapse.http.server import HttpServer
from synapse.http.servlet import (
ResolveRoomIdMixin,
RestServlet,
@ -57,6 +58,7 @@ from synapse.storage.state import StateFilter
from synapse.streams.config import PaginationConfig
from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID
from synapse.util import json_decoder
from synapse.util.cancellation import cancellable
from synapse.util.stringutils import parse_and_validate_server_name, random_string
if TYPE_CHECKING:
@ -950,7 +952,16 @@ class RoomMembershipRestServlet(TransactionRestServlet):
# cheekily send invalid bodies.
content = {}
if membership_action == "invite" and self._has_3pid_invite_keys(content):
if membership_action == "invite" and all(
key in content for key in ("medium", "address")
):
if not all(key in content for key in ("id_server", "id_access_token")):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"`id_server` and `id_access_token` are required when doing 3pid invite",
Codes.MISSING_PARAM,
)
try:
await self.room_member_handler.do_3pid_invite(
room_id,
@ -960,7 +971,7 @@ class RoomMembershipRestServlet(TransactionRestServlet):
content["id_server"],
requester,
txn_id,
content.get("id_access_token"),
content["id_access_token"],
)
except ShadowBanError:
# Pretend the request succeeded.
@ -997,12 +1008,6 @@ class RoomMembershipRestServlet(TransactionRestServlet):
return 200, return_value
def _has_3pid_invite_keys(self, content: JsonDict) -> bool:
for key in {"id_server", "medium", "address"}:
if key not in content:
return False
return True
def on_PUT(
self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str
) -> Awaitable[Tuple[int, JsonDict]]:

View file

@ -95,7 +95,6 @@ class VersionsRestServlet(RestServlet):
"org.matrix.msc3026.busy_presence": self.config.experimental.msc3026_enabled,
# Supports receiving private read receipts as per MSC2285
"org.matrix.msc2285.stable": True, # TODO: Remove when MSC2285 becomes a part of the spec
"org.matrix.msc2285": self.config.experimental.msc2285_enabled,
# Supports filtering of /publicRooms by room type as per MSC3827
"org.matrix.msc3827.stable": True,
# Adds support for importing historical messages as per MSC2716

View file

@ -135,13 +135,6 @@ class RemoteKey(DirectServeJsonResource):
store_queries = []
for server_name, key_ids in query.items():
if (
self.federation_domain_whitelist is not None
and server_name not in self.federation_domain_whitelist
):
logger.debug("Federation denied with %s", server_name)
continue
if not key_ids:
key_ids = (None,)
for key_id in key_ids:
@ -153,21 +146,28 @@ class RemoteKey(DirectServeJsonResource):
time_now_ms = self.clock.time_msec()
# Note that the value is unused.
# Map server_name->key_id->int. Note that the value of the init is unused.
# XXX: why don't we just use a set?
cache_misses: Dict[str, Dict[str, int]] = {}
for (server_name, key_id, _), key_results in cached.items():
results = [(result["ts_added_ms"], result) for result in key_results]
if not results and key_id is not None:
cache_misses.setdefault(server_name, {})[key_id] = 0
if key_id is None:
# all keys were requested. Just return what we have without worrying
# about validity
for _, result in results:
# Cast to bytes since postgresql returns a memoryview.
json_results.add(bytes(result["key_json"]))
continue
if key_id is not None:
miss = False
if not results:
miss = True
else:
ts_added_ms, most_recent_result = max(results)
ts_valid_until_ms = most_recent_result["ts_valid_until_ms"]
req_key = query.get(server_name, {}).get(key_id, {})
req_valid_until = req_key.get("minimum_valid_until_ts")
miss = False
if req_valid_until is not None:
if ts_valid_until_ms < req_valid_until:
logger.debug(
@ -211,19 +211,20 @@ class RemoteKey(DirectServeJsonResource):
ts_valid_until_ms,
time_now_ms,
)
if miss:
cache_misses.setdefault(server_name, {})[key_id] = 0
# Cast to bytes since postgresql returns a memoryview.
json_results.add(bytes(most_recent_result["key_json"]))
else:
for _, result in results:
# Cast to bytes since postgresql returns a memoryview.
json_results.add(bytes(result["key_json"]))
if miss and query_remote_on_cache_miss:
# only bother attempting to fetch keys from servers on our whitelist
if (
self.federation_domain_whitelist is None
or server_name in self.federation_domain_whitelist
):
cache_misses.setdefault(server_name, {})[key_id] = 0
# If there is a cache miss, request the missing keys, then recurse (and
# ensure the result is sent).
if cache_misses and query_remote_on_cache_miss:
if cache_misses:
await yieldable_gather_results(
lambda t: self.fetcher.get_keys(*t),
(

View file

@ -254,30 +254,32 @@ async def respond_with_responder(
file_size: Size in bytes of the media. If not known it should be None
upload_name: The name of the requested file, if any.
"""
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
if not responder:
respond_404(request)
return
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
with responder:
await responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
# If we have a responder we *must* use it as a context manager.
with responder:
if request._disconnected:
logger.warning(
"Not sending response to request %s, already disconnected.", request
)
return
# Unregister the producer, if it has one, so Twisted doesn't complain
if request.producer:
request.unregisterProducer()
logger.debug("Responding to media request with responder %s", responder)
add_file_headers(request, media_type, file_size, upload_name)
try:
await responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
# Unregister the producer, if it has one, so Twisted doesn't complain
if request.producer:
request.unregisterProducer()
finish_request(request)

View file

@ -64,7 +64,6 @@ if TYPE_CHECKING:
logger = logging.getLogger(__name__)
# How often to run the background job to update the "recently accessed"
# attribute of local and remote media.
UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute

View file

@ -732,10 +732,6 @@ class PreviewUrlResource(DirectServeJsonResource):
logger.debug("Running url preview cache expiry")
if not (await self.store.db_pool.updates.has_completed_background_updates()):
logger.debug("Still running DB updates; skipping url preview cache expiry")
return
def try_remove_parent_dirs(dirs: Iterable[str]) -> None:
"""Attempt to remove the given chain of parent directories

View file

@ -105,6 +105,7 @@ from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler
from synapse.handlers.user_directory import UserDirectoryHandler
from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager
from synapse.module_api import ModuleApi
from synapse.notifier import Notifier
from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator
@ -829,3 +830,8 @@ class HomeServer(metaclass=abc.ABCMeta):
self.config.ratelimiting.rc_message,
self.config.ratelimiting.rc_admin_redaction,
)
@cache_in_self
def get_common_usage_metrics_manager(self) -> CommonUsageMetricsManager:
"""Usage metrics shared between phone home stats and the prometheus exporter."""
return CommonUsageMetricsManager(self)

View file

@ -102,6 +102,10 @@ class ServerNoticesManager:
Returns:
The room's ID, or None if no room could be found.
"""
# If there is no server notices MXID, then there is no server notices room
if self.server_notices_mxid is None:
return None
rooms = await self._store.get_rooms_for_local_user_where_membership_is(
user_id, [Membership.INVITE, Membership.JOIN]
)
@ -111,8 +115,10 @@ class ServerNoticesManager:
# be joined. This is kinda deliberate, in that if somebody somehow
# manages to invite the system user to a room, that doesn't make it
# the server notices room.
user_ids = await self._store.get_users_in_room(room.room_id)
if len(user_ids) <= 2 and self.server_notices_mxid in user_ids:
is_server_notices_room = await self._store.check_local_user_in_room(
user_id=self.server_notices_mxid, room_id=room.room_id
)
if is_server_notices_room:
# we found a room which our user shares with the system notice
# user
return room.room_id

View file

@ -44,7 +44,6 @@ from synapse.logging.context import ContextResourceUsage
from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.roommember import ProfileInfo
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.async_helpers import Linearizer
@ -210,11 +209,11 @@ class StateHandler:
ret = await self.resolve_state_groups_for_events(room_id, event_ids)
return await ret.get_state(self._state_storage_controller, state_filter)
async def get_current_users_in_room(
async def get_current_user_ids_in_room(
self, room_id: str, latest_event_ids: List[str]
) -> Dict[str, ProfileInfo]:
) -> Set[str]:
"""
Get the users who are currently in a room.
Get the users IDs who are currently in a room.
Note: This is much slower than using the equivalent method
`DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`,
@ -225,15 +224,15 @@ class StateHandler:
room_id: The ID of the room.
latest_event_ids: Precomputed list of latest event IDs. Will be computed if None.
Returns:
Dictionary of user IDs to their profileinfo.
Set of user IDs in the room.
"""
assert latest_event_ids is not None
logger.debug("calling resolve_state_groups from get_current_users_in_room")
logger.debug("calling resolve_state_groups from get_current_user_ids_in_room")
entry = await self.resolve_state_groups_for_events(room_id, latest_event_ids)
state = await entry.get_state(self._state_storage_controller, StateFilter.all())
return await self.store.get_joined_users_from_state(room_id, state, entry)
return await self.store.get_joined_user_ids_from_state(room_id, state)
async def get_hosts_in_room_at_events(
self, room_id: str, event_ids: Collection[str]

View file

@ -271,40 +271,41 @@ async def _get_power_level_for_sender(
async def _get_auth_chain_difference(
room_id: str,
state_sets: Sequence[Mapping[Any, str]],
event_map: Dict[str, EventBase],
unpersisted_events: Dict[str, EventBase],
state_res_store: StateResolutionStore,
) -> Set[str]:
"""Compare the auth chains of each state set and return the set of events
that only appear in some but not all of the auth chains.
that only appear in some, but not all of the auth chains.
Args:
state_sets
event_map
state_res_store
state_sets: The input state sets we are trying to resolve across.
unpersisted_events: A map from event ID to EventBase containing all unpersisted
events involved in this resolution.
state_res_store:
Returns:
Set of event IDs
The auth difference of the given state sets, as a set of event IDs.
"""
# The `StateResolutionStore.get_auth_chain_difference` function assumes that
# all events passed to it (and their auth chains) have been persisted
# previously. This is not the case for any events in the `event_map`, and so
# we need to manually handle those events.
# previously. We need to manually handle any other events that are yet to be
# persisted.
#
# We do this by:
# 1. calculating the auth chain difference for the state sets based on the
# events in `event_map` alone
# 2. replacing any events in the state_sets that are also in `event_map`
# with their auth events (recursively), and then calling
# `store.get_auth_chain_difference` as normal
# 3. adding the results of 1 and 2 together.
# We do this in three steps:
# 1. Compute the set of unpersisted events belonging to the auth difference.
# 2. Replacing any unpersisted events in the state_sets with their auth events,
# recursively, until the state_sets contain only persisted events.
# Then we call `store.get_auth_chain_difference` as normal, which computes
# the set of persisted events belonging to the auth difference.
# 3. Adding the results of 1 and 2 together.
# Map from event ID in `event_map` to their auth event IDs, and their auth
# event IDs if they appear in the `event_map`. This is the intersection of
# the event's auth chain with the events in the `event_map` *plus* their
# Map from event ID in `unpersisted_events` to their auth event IDs, and their auth
# event IDs if they appear in the `unpersisted_events`. This is the intersection of
# the event's auth chain with the events in `unpersisted_events` *plus* their
# auth event IDs.
events_to_auth_chain: Dict[str, Set[str]] = {}
for event in event_map.values():
for event in unpersisted_events.values():
chain = {event.event_id}
events_to_auth_chain[event.event_id] = chain
@ -312,16 +313,16 @@ async def _get_auth_chain_difference(
while to_search:
for auth_id in to_search.pop().auth_event_ids():
chain.add(auth_id)
auth_event = event_map.get(auth_id)
auth_event = unpersisted_events.get(auth_id)
if auth_event:
to_search.append(auth_event)
# We now a) calculate the auth chain difference for the unpersisted events
# and b) work out the state sets to pass to the store.
# We now 1) calculate the auth chain difference for the unpersisted events
# and 2) work out the state sets to pass to the store.
#
# Note: If the `event_map` is empty (which is the common case), we can do a
# Note: If there are no `unpersisted_events` (which is the common case), we can do a
# much simpler calculation.
if event_map:
if unpersisted_events:
# The list of state sets to pass to the store, where each state set is a set
# of the event ids making up the state. This is similar to `state_sets`,
# except that (a) we only have event ids, not the complete
@ -344,14 +345,18 @@ async def _get_auth_chain_difference(
for event_id in state_set.values():
event_chain = events_to_auth_chain.get(event_id)
if event_chain is not None:
# We have an event in `event_map`. We add all the auth
# events that it references (that aren't also in `event_map`).
set_ids.update(e for e in event_chain if e not in event_map)
# We have an unpersisted event. We add all the auth
# events that it references which are also unpersisted.
set_ids.update(
e for e in event_chain if e not in unpersisted_events
)
# We also add the full chain of unpersisted event IDs
# referenced by this state set, so that we can work out the
# auth chain difference of the unpersisted events.
unpersisted_ids.update(e for e in event_chain if e in event_map)
unpersisted_ids.update(
e for e in event_chain if e in unpersisted_events
)
else:
set_ids.add(event_id)
@ -361,15 +366,15 @@ async def _get_auth_chain_difference(
union = unpersisted_set_ids[0].union(*unpersisted_set_ids[1:])
intersection = unpersisted_set_ids[0].intersection(*unpersisted_set_ids[1:])
difference_from_event_map: Collection[str] = union - intersection
auth_difference_unpersisted_part: Collection[str] = union - intersection
else:
difference_from_event_map = ()
auth_difference_unpersisted_part = ()
state_sets_ids = [set(state_set.values()) for state_set in state_sets]
difference = await state_res_store.get_auth_chain_difference(
room_id, state_sets_ids
)
difference.update(difference_from_event_map)
difference.update(auth_difference_unpersisted_part)
return difference

View file

@ -23,7 +23,6 @@ from typing import (
List,
Mapping,
Optional,
Set,
Tuple,
)
@ -520,7 +519,7 @@ class StateStorageController:
)
return state_map.get(key)
async def get_current_hosts_in_room(self, room_id: str) -> Set[str]:
async def get_current_hosts_in_room(self, room_id: str) -> List[str]:
"""Get current hosts in room based on current state."""
await self._partial_state_room_tracker.await_full_state(room_id)

View file

@ -650,9 +650,6 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
txn, self.get_account_data_for_room, (user_id,)
)
self._invalidate_cache_and_stream(txn, self.get_push_rules_for_user, (user_id,))
self._invalidate_cache_and_stream(
txn, self.get_push_rules_enabled_for_user, (user_id,)
)
# This user might be contained in the ignored_by cache for other users,
# so we have to invalidate it all.
self._invalidate_all_cache_and_stream(txn, self.ignored_by)

View file

@ -274,7 +274,6 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
receipt_types=(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
),
)
@ -459,6 +458,31 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
return await self.db_pool.runInteraction("get_push_action_users_in_range", f)
def _get_receipts_by_room_txn(
self, txn: LoggingTransaction, user_id: str
) -> List[Tuple[str, int]]:
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
),
)
sql = f"""
SELECT room_id, MAX(stream_ordering)
FROM receipts_linearized
INNER JOIN events USING (room_id, event_id)
WHERE {receipt_types_clause}
AND user_id = ?
GROUP BY room_id
"""
args.extend((user_id,))
txn.execute(sql, args)
return cast(List[Tuple[str, int]], txn.fetchall())
async def get_unread_push_actions_for_user_in_range_for_http(
self,
user_id: str,
@ -482,106 +506,45 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will have between 0~limit entries.
"""
# find rooms that have a read receipt in them and return the next
# push actions
def get_after_receipt(
txn: LoggingTransaction,
) -> List[Tuple[str, str, int, str, bool]]:
# find rooms that have a read receipt in them and return the next
# push actions
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
),
)
sql = f"""
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,
ep.highlight
FROM (
SELECT room_id,
MAX(stream_ordering) as stream_ordering
FROM events
INNER JOIN receipts_linearized USING (room_id, event_id)
WHERE {receipt_types_clause} AND user_id = ?
GROUP BY room_id
) AS rl,
event_push_actions AS ep
WHERE
ep.room_id = rl.room_id
AND ep.stream_ordering > rl.stream_ordering
AND ep.user_id = ?
AND ep.stream_ordering > ?
AND ep.stream_ordering <= ?
AND ep.notif = 1
ORDER BY ep.stream_ordering ASC LIMIT ?
"""
args.extend(
(user_id, user_id, min_stream_ordering, max_stream_ordering, limit)
)
txn.execute(sql, args)
return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall())
after_read_receipt = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_http_arr", get_after_receipt
receipts_by_room = dict(
await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_http_receipts",
self._get_receipts_by_room_txn,
user_id=user_id,
),
)
# There are rooms with push actions in them but you don't have a read receipt in
# them e.g. rooms you've been invited to, so get push actions for rooms which do
# not have read receipts in them too.
def get_no_receipt(
def get_push_actions_txn(
txn: LoggingTransaction,
) -> List[Tuple[str, str, int, str, bool]]:
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
),
)
sql = f"""
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,
ep.highlight
sql = """
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, ep.highlight
FROM event_push_actions AS ep
INNER JOIN events AS e USING (room_id, event_id)
WHERE
ep.room_id NOT IN (
SELECT room_id FROM receipts_linearized
WHERE {receipt_types_clause} AND user_id = ?
GROUP BY room_id
)
AND ep.user_id = ?
ep.user_id = ?
AND ep.stream_ordering > ?
AND ep.stream_ordering <= ?
AND ep.notif = 1
ORDER BY ep.stream_ordering ASC LIMIT ?
"""
args.extend(
(user_id, user_id, min_stream_ordering, max_stream_ordering, limit)
)
txn.execute(sql, args)
txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
return cast(List[Tuple[str, str, int, str, bool]], txn.fetchall())
no_read_receipt = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_http_nrr", get_no_receipt
push_actions = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_http", get_push_actions_txn
)
notifs = [
HttpPushAction(
event_id=row[0],
room_id=row[1],
stream_ordering=row[2],
actions=_deserialize_action(row[3], row[4]),
event_id=event_id,
room_id=room_id,
stream_ordering=stream_ordering,
actions=_deserialize_action(actions, highlight),
)
for row in after_read_receipt + no_read_receipt
for event_id, room_id, stream_ordering, actions, highlight in push_actions
# Only include push actions with a stream ordering after any receipt, or without any
# receipt present (invited to but never read rooms).
if stream_ordering > receipts_by_room.get(room_id, 0)
]
# Now sort it so it's ordered correctly, since currently it will
@ -617,106 +580,49 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
The list will have between 0~limit entries.
"""
# find rooms that have a read receipt in them and return the most recent
# push actions
def get_after_receipt(
txn: LoggingTransaction,
) -> List[Tuple[str, str, int, str, bool, int]]:
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
),
)
sql = f"""
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,
ep.highlight, e.received_ts
FROM (
SELECT room_id,
MAX(stream_ordering) as stream_ordering
FROM events
INNER JOIN receipts_linearized USING (room_id, event_id)
WHERE {receipt_types_clause} AND user_id = ?
GROUP BY room_id
) AS rl,
event_push_actions AS ep
INNER JOIN events AS e USING (room_id, event_id)
WHERE
ep.room_id = rl.room_id
AND ep.stream_ordering > rl.stream_ordering
AND ep.user_id = ?
AND ep.stream_ordering > ?
AND ep.stream_ordering <= ?
AND ep.notif = 1
ORDER BY ep.stream_ordering DESC LIMIT ?
"""
args.extend(
(user_id, user_id, min_stream_ordering, max_stream_ordering, limit)
)
txn.execute(sql, args)
return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall())
after_read_receipt = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_email_arr", get_after_receipt
receipts_by_room = dict(
await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_email_receipts",
self._get_receipts_by_room_txn,
user_id=user_id,
),
)
# There are rooms with push actions in them but you don't have a read receipt in
# them e.g. rooms you've been invited to, so get push actions for rooms which do
# not have read receipts in them too.
def get_no_receipt(
def get_push_actions_txn(
txn: LoggingTransaction,
) -> List[Tuple[str, str, int, str, bool, int]]:
receipt_types_clause, args = make_in_list_sql_clause(
self.database_engine,
"receipt_type",
(
ReceiptTypes.READ,
ReceiptTypes.READ_PRIVATE,
ReceiptTypes.UNSTABLE_READ_PRIVATE,
),
)
sql = f"""
sql = """
SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions,
ep.highlight, e.received_ts
FROM event_push_actions AS ep
INNER JOIN events AS e USING (room_id, event_id)
WHERE
ep.room_id NOT IN (
SELECT room_id FROM receipts_linearized
WHERE {receipt_types_clause} AND user_id = ?
GROUP BY room_id
)
AND ep.user_id = ?
ep.user_id = ?
AND ep.stream_ordering > ?
AND ep.stream_ordering <= ?
AND ep.notif = 1
ORDER BY ep.stream_ordering DESC LIMIT ?
"""
args.extend(
(user_id, user_id, min_stream_ordering, max_stream_ordering, limit)
)
txn.execute(sql, args)
txn.execute(sql, (user_id, min_stream_ordering, max_stream_ordering, limit))
return cast(List[Tuple[str, str, int, str, bool, int]], txn.fetchall())
no_read_receipt = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_email_nrr", get_no_receipt
push_actions = await self.db_pool.runInteraction(
"get_unread_push_actions_for_user_in_range_email", get_push_actions_txn
)
# Make a list of dicts from the two sets of results.
notifs = [
EmailPushAction(
event_id=row[0],
room_id=row[1],
stream_ordering=row[2],
actions=_deserialize_action(row[3], row[4]),
received_ts=row[5],
event_id=event_id,
room_id=room_id,
stream_ordering=stream_ordering,
actions=_deserialize_action(actions, highlight),
received_ts=received_ts,
)
for row in after_read_receipt + no_read_receipt
for event_id, room_id, stream_ordering, actions, highlight, received_ts in push_actions
# Only include push actions with a stream ordering after any receipt, or without any
# receipt present (invited to but never read rooms).
if stream_ordering > receipts_by_room.get(room_id, 0)
]
# Now sort it so it's ordered correctly, since currently it will
@ -792,26 +698,14 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
int(count_as_unread), # unread column
)
def _add_push_actions_to_staging_txn(txn: LoggingTransaction) -> None:
# We don't use simple_insert_many here to avoid the overhead
# of generating lists of dicts.
sql = """
INSERT INTO event_push_actions_staging
(event_id, user_id, actions, notif, highlight, unread)
VALUES (?, ?, ?, ?, ?, ?)
"""
txn.execute_batch(
sql,
(
_gen_entry(user_id, actions)
for user_id, actions in user_id_actions.items()
),
)
return await self.db_pool.runInteraction(
"add_push_actions_to_staging", _add_push_actions_to_staging_txn
await self.db_pool.simple_insert_many(
"event_push_actions_staging",
keys=("event_id", "user_id", "actions", "notif", "highlight", "unread"),
values=[
_gen_entry(user_id, actions)
for user_id, actions in user_id_actions.items()
],
desc="add_push_actions_to_staging",
)
async def remove_push_actions_from_staging(self, event_id: str) -> None:

View file

@ -2111,7 +2111,14 @@ class EventsWorkerStore(SQLBaseStore):
AND room_id = ?
/* Make sure event is not rejected */
AND rejections.event_id IS NULL
ORDER BY origin_server_ts %s
/**
* First sort by the message timestamp. If the message timestamps are the
* same, we want the message that logically comes "next" (before/after
* the given timestamp) based on the DAG and its topological order (`depth`).
* Finally, we can tie-break based on when it was received on the server
* (`stream_ordering`).
*/
ORDER BY origin_server_ts %s, depth %s, stream_ordering %s
LIMIT 1;
"""
@ -2130,7 +2137,8 @@ class EventsWorkerStore(SQLBaseStore):
order = "ASC"
txn.execute(
sql_template % (comparison_operator, order), (timestamp, room_id)
sql_template % (comparison_operator, order, order, order),
(timestamp, room_id),
)
row = txn.fetchone()
if row:

Some files were not shown because too many files have changed in this diff Show more