Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes
This commit is contained in:
commit
5299707329
|
@ -173,11 +173,13 @@ steps:
|
|||
queue: "medium"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
|
@ -192,11 +194,13 @@ steps:
|
|||
POSTGRES: "1"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
retry:
|
||||
automatic:
|
||||
- exit_status: -1
|
||||
|
@ -212,11 +216,13 @@ steps:
|
|||
WORKERS: "1"
|
||||
command:
|
||||
- "bash .buildkite/merge_base_branch.sh"
|
||||
- "bash .buildkite/synapse_sytest.sh"
|
||||
- "bash /synapse_sytest.sh"
|
||||
plugins:
|
||||
- docker#v3.0.1:
|
||||
image: "matrixdotorg/sytest-synapse:py35"
|
||||
propagate-environment: true
|
||||
always-pull: true
|
||||
workdir: "/src"
|
||||
soft_fail: true
|
||||
retry:
|
||||
automatic:
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
|
||||
# sytest-synapse docker images.
|
||||
|
||||
set -ex
|
||||
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
SYNAPSE_DIR=`pwd`
|
||||
else
|
||||
SYNAPSE_DIR="/src"
|
||||
fi
|
||||
|
||||
# Attempt to find a sytest to use.
|
||||
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
|
||||
if [ -d "/sytest" ]; then
|
||||
# If the user has mounted in a SyTest checkout, use that.
|
||||
echo "Using local sytests..."
|
||||
|
||||
# create ourselves a working directory and dos2unix some scripts therein
|
||||
mkdir -p /work/jenkins
|
||||
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
|
||||
dos2unix -n "/sytest/$i" "/work/$i"
|
||||
done
|
||||
ln -sf /sytest/tests /work
|
||||
ln -sf /sytest/keys /work
|
||||
SYTEST_LIB="/sytest/lib"
|
||||
else
|
||||
if [ -n "BUILDKITE_BRANCH" ]
|
||||
then
|
||||
branch_name=$BUILDKITE_BRANCH
|
||||
else
|
||||
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
|
||||
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
|
||||
fi
|
||||
|
||||
# Try and fetch the branch
|
||||
echo "Trying to get same-named sytest branch..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
|
||||
# Probably a 404, fall back to develop
|
||||
echo "Using develop instead..."
|
||||
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
|
||||
}
|
||||
|
||||
mkdir -p /work
|
||||
tar -C /work --strip-components=1 -xf sytest.tar.gz
|
||||
SYTEST_LIB="/work/lib"
|
||||
fi
|
||||
|
||||
cd /work
|
||||
|
||||
# PostgreSQL setup
|
||||
if [ -n "$POSTGRES" ]
|
||||
then
|
||||
export PGUSER=postgres
|
||||
export POSTGRES_DB_1=pg1
|
||||
export POSTGRES_DB_2=pg2
|
||||
|
||||
# Start the database
|
||||
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||
|
||||
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
|
||||
jenkins/prep_sytest_for_postgres.sh
|
||||
|
||||
# Make the test databases for the two Synapse servers that will be spun up
|
||||
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
|
||||
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
|
||||
|
||||
fi
|
||||
|
||||
if [ -n "$OFFLINE" ]; then
|
||||
# if we're in offline mode, just put synapse into the virtualenv, and
|
||||
# hope that the deps are up-to-date.
|
||||
#
|
||||
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
|
||||
# so we just run setup.py explicitly.)
|
||||
#
|
||||
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
|
||||
else
|
||||
# We've already created the virtualenv, but lets double check we have all
|
||||
# deps.
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
|
||||
/venv/bin/pip install -q --upgrade --no-cache-dir \
|
||||
lxml psycopg2 coverage codecov tap.py
|
||||
|
||||
# Make sure all Perl deps are installed -- this is done in the docker build
|
||||
# so will only install packages added since the last Docker build
|
||||
./install-deps.pl
|
||||
fi
|
||||
|
||||
|
||||
# Run the tests
|
||||
>&2 echo "+++ Running tests"
|
||||
|
||||
RUN_TESTS=(
|
||||
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
|
||||
)
|
||||
|
||||
TEST_STATUS=0
|
||||
|
||||
if [ -n "$WORKERS" ]; then
|
||||
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
|
||||
else
|
||||
RUN_TESTS+=(-I Synapse)
|
||||
fi
|
||||
|
||||
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
|
||||
else
|
||||
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
|
||||
fi
|
||||
|
||||
>&2 echo "--- Copying assets"
|
||||
|
||||
# Copy out the logs
|
||||
mkdir -p /logs
|
||||
cp results.tap /logs/results.tap
|
||||
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
|
||||
|
||||
# Upload coverage to codecov and upload files, if running on Buildkite
|
||||
if [ -n "$BUILDKITE" ]
|
||||
then
|
||||
/venv/bin/coverage combine || true
|
||||
/venv/bin/coverage xml || true
|
||||
/venv/bin/codecov -X gcov -f coverage.xml
|
||||
|
||||
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
|
||||
tar xvf buildkite.tar.gz
|
||||
chmod +x ./buildkite-agent
|
||||
|
||||
# Upload the files
|
||||
./buildkite-agent artifact upload "/logs/**/*.log*"
|
||||
./buildkite-agent artifact upload "/logs/results.tap"
|
||||
|
||||
if [ $TEST_STATUS -ne 0 ]; then
|
||||
# Annotate, if failure
|
||||
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
exit $TEST_STATUS
|
30
CHANGES.md
30
CHANGES.md
|
@ -1,3 +1,33 @@
|
|||
Synapse 1.1.0 (2019-07-04)
|
||||
==========================
|
||||
|
||||
As of v1.1.0, Synapse no longer supports Python 2, nor Postgres version 9.4.
|
||||
See the [upgrade notes](UPGRADE.rst#upgrading-to-v110) for more details.
|
||||
|
||||
This release also deprecates the use of environment variables to configure the
|
||||
docker image. See the [docker README](https://github.com/matrix-org/synapse/blob/release-v1.1.0/docker/README.md#legacy-dynamic-configuration-file-support)
|
||||
for more details.
|
||||
|
||||
No changes since 1.1.0rc2.
|
||||
|
||||
|
||||
Synapse 1.1.0rc2 (2019-07-03)
|
||||
=============================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail. ([\#5593](https://github.com/matrix-org/synapse/issues/5593))
|
||||
- Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. ([\#5596](https://github.com/matrix-org/synapse/issues/5596))
|
||||
- Fix a number of "Starting txn from sentinel context" warnings. ([\#5605](https://github.com/matrix-org/synapse/issues/5605))
|
||||
|
||||
|
||||
Internal Changes
|
||||
----------------
|
||||
|
||||
- Update github templates. ([\#5552](https://github.com/matrix-org/synapse/issues/5552))
|
||||
|
||||
|
||||
Synapse 1.1.0rc1 (2019-07-02)
|
||||
=============================
|
||||
|
||||
|
|
|
@ -30,11 +30,10 @@ use github's pull request workflow to review the contribution, and either ask
|
|||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Buildkite
|
||||
<https://buildkite.com/matrix-dot-org/synapse>`_ for continuous integration.
|
||||
Buildkite builds need to be authorised by a maintainer. If your change breaks
|
||||
the build, this will be shown in GitHub, so please keep an eye on the pull
|
||||
request for feedback.
|
||||
We use `Buildkite <https://buildkite.com/matrix-dot-org/synapse>`_ for
|
||||
continuous integration. Buildkite builds need to be authorised by a
|
||||
maintainer. If your change breaks the build, this will be shown in GitHub, so
|
||||
please keep an eye on the pull request for feedback.
|
||||
|
||||
To run unit tests in a local development environment, you can use:
|
||||
|
||||
|
@ -70,13 +69,21 @@ All changes, even minor ones, need a corresponding changelog / newsfragment
|
|||
entry. These are managed by Towncrier
|
||||
(https://github.com/hawkowl/towncrier).
|
||||
|
||||
To create a changelog entry, make a new file in the ``changelog.d``
|
||||
file named in the format of ``PRnumber.type``. The type can be
|
||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||
deprecations), or ``misc`` (for internal-only changes).
|
||||
To create a changelog entry, make a new file in the ``changelog.d`` file named
|
||||
in the format of ``PRnumber.type``. The type can be one of the following:
|
||||
|
||||
The content of the file is your changelog entry, which can contain Markdown
|
||||
formatting. The entry should end with a full stop ('.') for consistency.
|
||||
* ``feature``.
|
||||
* ``bugfix``.
|
||||
* ``docker`` (for updates to the Docker image).
|
||||
* ``doc`` (for updates to the documentation).
|
||||
* ``removal`` (also used for deprecations).
|
||||
* ``misc`` (for internal-only changes).
|
||||
|
||||
The content of the file is your changelog entry, which should be a short
|
||||
description of your change in the same style as the rest of our `changelog
|
||||
<https://github.com/matrix-org/synapse/blob/master/CHANGES.md>`_. The file can
|
||||
contain Markdown formatting, and should end with a full stop ('.') for
|
||||
consistency.
|
||||
|
||||
Adding credits to the changelog is encouraged, we value your
|
||||
contributions and would like to have you shouted out in the release notes!
|
||||
|
|
|
@ -7,6 +7,7 @@ include demo/README
|
|||
include demo/demo.tls.dh
|
||||
include demo/*.py
|
||||
include demo/*.sh
|
||||
include sytest-blacklist
|
||||
|
||||
recursive-include synapse/storage/schema *.sql
|
||||
recursive-include synapse/storage/schema *.sql.postgres
|
||||
|
|
|
@ -272,7 +272,7 @@ to install using pip and a virtualenv::
|
|||
|
||||
virtualenv -p python3 env
|
||||
source env/bin/activate
|
||||
python -m pip install --no-pep-517 -e .[all]
|
||||
python -m pip install --no-use-pep517 -e .[all]
|
||||
|
||||
This will run a process of downloading and installing all the needed
|
||||
dependencies into a virtual env.
|
||||
|
|
1
changelog.d/5397.doc
Normal file
1
changelog.d/5397.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks!
|
1
changelog.d/5544.misc
Normal file
1
changelog.d/5544.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Added opentracing and configuration options.
|
|
@ -1 +0,0 @@
|
|||
Update github templates.
|
1
changelog.d/5589.feature
Normal file
1
changelog.d/5589.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add ability to pull all locally stored events out of synapse that a particular user can see.
|
|
@ -1 +0,0 @@
|
|||
Fix regression in 1.1rc1 where OPTIONS requests to the media repo would fail.
|
|
@ -1 +0,0 @@
|
|||
Removed the `SYNAPSE_SMTP_*` docker container environment variables. Using these environment variables prevented the docker container from starting in Synapse v1.0, even though they didn't actually allow any functionality anyway. Users are advised to remove `SYNAPSE_SMTP_HOST`, `SYNAPSE_SMTP_PORT`, `SYNAPSE_SMTP_USER`, `SYNAPSE_SMTP_PASSWORD` and `SYNAPSE_SMTP_FROM` environment variables from their docker run commands.
|
1
changelog.d/5597.feature
Normal file
1
changelog.d/5597.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance.
|
|
@ -1 +0,0 @@
|
|||
Fix a number of "Starting txn from sentinel context" warnings.
|
1
changelog.d/5606.misc
Normal file
1
changelog.d/5606.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Move logging code out of `synapse.util` and into `synapse.logging`.
|
1
changelog.d/5609.bugfix
Normal file
1
changelog.d/5609.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix 'utime went backwards' errors on daemonization.
|
1
changelog.d/5611.misc
Normal file
1
changelog.d/5611.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add a blacklist file to the repo to blacklist certain sytests from failing CI.
|
1
changelog.d/5613.feature
Normal file
1
changelog.d/5613.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add `sender` and `origin_server_ts` fields to `m.replace`.
|
1
changelog.d/5616.misc
Normal file
1
changelog.d/5616.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Make runtime errors surrounding password reset emails much clearer.
|
1
changelog.d/5617.misc
Normal file
1
changelog.d/5617.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Move logging code out of `synapse.util` and into `synapse.logging`.
|
1
changelog.d/5619.docker
Normal file
1
changelog.d/5619.docker
Normal file
|
@ -0,0 +1 @@
|
|||
Base Docker image on a newer Alpine Linux version (3.8 -> 3.10).
|
1
changelog.d/5620.docker
Normal file
1
changelog.d/5620.docker
Normal file
|
@ -0,0 +1 @@
|
|||
Add missing space in default logging file format generated by the Docker image.
|
1
changelog.d/5621.bugfix
Normal file
1
changelog.d/5621.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Various minor fixes to the federation request rate limiter.
|
1
changelog.d/5622.misc
Normal file
1
changelog.d/5622.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Remove dead code for persiting outgoing federation transactions.
|
1
changelog.d/5623.feature
Normal file
1
changelog.d/5623.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add default push rule to ignore reactions.
|
1
changelog.d/5625.removal
Normal file
1
changelog.d/5625.removal
Normal file
|
@ -0,0 +1 @@
|
|||
Remove support for the `invite_3pid_guest` configuration setting.
|
1
changelog.d/5626.feature
Normal file
1
changelog.d/5626.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Include the original event when asking for its relations.
|
1
changelog.d/5627.misc
Normal file
1
changelog.d/5627.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI.
|
1
changelog.d/5628.misc
Normal file
1
changelog.d/5628.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Move RegistrationHandler.get_or_create_user to test code.
|
1
changelog.d/5630.misc
Normal file
1
changelog.d/5630.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add some more common python virtual-environment paths to the black exclusion list.
|
1
changelog.d/5637.misc
Normal file
1
changelog.d/5637.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Unblacklist some user_directory sytests.
|
1
changelog.d/5638.bugfix
Normal file
1
changelog.d/5638.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format.
|
1
changelog.d/5639.misc
Normal file
1
changelog.d/5639.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Factor out some redundant code in the login implementation.
|
1
changelog.d/5640.misc
Normal file
1
changelog.d/5640.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Update ModuleApi to avoid register(generate_token=True).
|
1
changelog.d/5641.misc
Normal file
1
changelog.d/5641.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Remove access-token support from RegistrationHandler.register, and rename it.
|
1
changelog.d/5642.misc
Normal file
1
changelog.d/5642.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Remove access-token support from `RegistrationStore.register`, and rename it.
|
1
changelog.d/5643.misc
Normal file
1
changelog.d/5643.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Improve logging for auto-join when a new user is created.
|
1
changelog.d/5644.bugfix
Normal file
1
changelog.d/5644.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix newly-registered users not being able to lookup their own profile without joining a room.
|
1
changelog.d/5645.misc
Normal file
1
changelog.d/5645.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure.
|
1
changelog.d/5651.doc
Normal file
1
changelog.d/5651.doc
Normal file
|
@ -0,0 +1 @@
|
|||
--no-pep517 should be --no-use-pep517 in the documentation to setup the development environment.
|
1
changelog.d/5654.bugfix
Normal file
1
changelog.d/5654.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`.
|
1
changelog.d/5655.misc
Normal file
1
changelog.d/5655.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix a small typo in a code comment.
|
1
changelog.d/5656.misc
Normal file
1
changelog.d/5656.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Clean up exception handling around client access tokens.
|
1
changelog.d/5657.misc
Normal file
1
changelog.d/5657.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add a mechanism for per-test homeserver configuration in the unit tests.
|
1
changelog.d/5658.bugfix
Normal file
1
changelog.d/5658.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated.
|
1
changelog.d/5659.misc
Normal file
1
changelog.d/5659.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Inline issue_access_token.
|
1
changelog.d/5660.feature
Normal file
1
changelog.d/5660.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Implement `session_lifetime` configuration option, after which access tokens will expire.
|
1
changelog.d/5661.doc
Normal file
1
changelog.d/5661.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks!
|
1
changelog.d/5664.misc
Normal file
1
changelog.d/5664.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Update the sytest BuildKite configuration to checkout Synapse in `/src`.
|
1
changelog.d/5673.misc
Normal file
1
changelog.d/5673.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add a `docker` type to the towncrier configuration.
|
1
changelog.d/5674.feature
Normal file
1
changelog.d/5674.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Return "This account has been deactivated" when a deactivated user tries to login.
|
|
@ -1,7 +1,7 @@
|
|||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||
# `homeserver.yaml`, and restart synapse.
|
||||
#
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# This configuration will produce similar results to the defaults within
|
||||
# synapse, but can be edited to give more flexibility.
|
||||
|
||||
version: 1
|
||||
|
@ -12,7 +12,7 @@ formatters:
|
|||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
@ -35,7 +35,7 @@ handlers:
|
|||
root:
|
||||
level: INFO
|
||||
handlers: [console] # to use file handler instead, switch to [file]
|
||||
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: INFO
|
||||
|
|
|
@ -36,7 +36,7 @@ from synapse.util import origin_from_ucid
|
|||
|
||||
from synapse.app.homeserver import SynapseHomeServer
|
||||
|
||||
# from synapse.util.logutils import log_function
|
||||
# from synapse.logging.utils import log_function
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python import log
|
||||
|
|
|
@ -8,7 +8,7 @@ formatters:
|
|||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
|
14
debian/changelog
vendored
14
debian/changelog
vendored
|
@ -1,9 +1,19 @@
|
|||
matrix-synapse-py3 (1.0.0+nmu1) UNRELEASED; urgency=medium
|
||||
matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium
|
||||
|
||||
[ Amber Brown ]
|
||||
* Update logging config defaults to match API changes in Synapse.
|
||||
|
||||
-- Erik Johnston <erikj@rae> Thu, 04 Jul 2019 13:59:02 +0100
|
||||
|
||||
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
||||
|
||||
[ Silke Hofstra ]
|
||||
* Include systemd-python to allow logging to the systemd journal.
|
||||
|
||||
-- Silke Hofstra <silke@slxh.eu> Wed, 29 May 2019 09:45:29 +0200
|
||||
[ Synapse Packaging team ]
|
||||
* New synapse release 1.1.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Thu, 04 Jul 2019 11:43:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.0.0) stable; urgency=medium
|
||||
|
||||
|
|
2
debian/log.yaml
vendored
2
debian/log.yaml
vendored
|
@ -7,7 +7,7 @@ formatters:
|
|||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
|
|
@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7
|
|||
###
|
||||
### Stage 0: builder
|
||||
###
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder
|
||||
|
||||
# install the OS build deps
|
||||
|
||||
|
@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \
|
|||
### Stage 1: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
||||
FROM docker.io/python:${PYTHON_VERSION}-alpine3.10
|
||||
|
||||
# xmlsec is required for saml support
|
||||
RUN apk add --no-cache --virtual .runtime_deps \
|
||||
|
|
|
@ -2,11 +2,11 @@ version: 1
|
|||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Log contexts
|
||||
Log Contexts
|
||||
============
|
||||
|
||||
.. contents::
|
||||
|
@ -12,7 +12,7 @@ record.
|
|||
Logcontexts are also used for CPU and database accounting, so that we can track
|
||||
which requests were responsible for high CPU use or database activity.
|
||||
|
||||
The ``synapse.util.logcontext`` module provides a facilities for managing the
|
||||
The ``synapse.logging.context`` module provides a facilities for managing the
|
||||
current log context (as well as providing the ``LoggingContextFilter`` class).
|
||||
|
||||
Deferreds make the whole thing complicated, so this document describes how it
|
||||
|
@ -27,19 +27,19 @@ found them:
|
|||
|
||||
.. code:: python
|
||||
|
||||
from synapse.util import logcontext # omitted from future snippets
|
||||
from synapse.logging import context # omitted from future snippets
|
||||
|
||||
def handle_request(request_id):
|
||||
request_context = logcontext.LoggingContext()
|
||||
request_context = context.LoggingContext()
|
||||
|
||||
calling_context = logcontext.LoggingContext.current_context()
|
||||
logcontext.LoggingContext.set_current_context(request_context)
|
||||
calling_context = context.LoggingContext.current_context()
|
||||
context.LoggingContext.set_current_context(request_context)
|
||||
try:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
finally:
|
||||
logcontext.LoggingContext.set_current_context(calling_context)
|
||||
context.LoggingContext.set_current_context(calling_context)
|
||||
|
||||
def do_request_handling():
|
||||
logger.debug("phew") # this will be logged against request_id
|
||||
|
@ -51,7 +51,7 @@ written much more succinctly as:
|
|||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
@ -74,7 +74,7 @@ blocking operation, and returns a deferred:
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
yield do_request_handling()
|
||||
logger.debug("finished")
|
||||
|
@ -179,7 +179,7 @@ though, we need to make up a new Deferred, or we get a Deferred back from
|
|||
external code. We need to make it follow our rules.
|
||||
|
||||
The easy way to do it is with a combination of ``defer.inlineCallbacks``, and
|
||||
``logcontext.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
``context.PreserveLoggingContext``. Suppose we want to implement ``sleep``,
|
||||
which returns a deferred which will run its callbacks after a given number of
|
||||
seconds. That might look like:
|
||||
|
||||
|
@ -204,13 +204,13 @@ That doesn't follow the rules, but we can fix it by wrapping it with
|
|||
This technique works equally for external functions which return deferreds,
|
||||
or deferreds we have made ourselves.
|
||||
|
||||
You can also use ``logcontext.make_deferred_yieldable``, which just does the
|
||||
You can also use ``context.make_deferred_yieldable``, which just does the
|
||||
boilerplate for you, so the above could be written:
|
||||
|
||||
.. code:: python
|
||||
|
||||
def sleep(seconds):
|
||||
return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
return context.make_deferred_yieldable(get_sleep_deferred(seconds))
|
||||
|
||||
|
||||
Fire-and-forget
|
||||
|
@ -279,7 +279,7 @@ Obviously that option means that the operations done in
|
|||
that might be fixed by setting a different logcontext via a ``with
|
||||
LoggingContext(...)`` in ``background_operation``).
|
||||
|
||||
The second option is to use ``logcontext.run_in_background``, which wraps a
|
||||
The second option is to use ``context.run_in_background``, which wraps a
|
||||
function so that it doesn't reset the logcontext even when it returns an
|
||||
incomplete deferred, and adds a callback to the returned deferred to reset the
|
||||
logcontext. In other words, it turns a function that follows the Synapse rules
|
||||
|
@ -293,7 +293,7 @@ It can be used like this:
|
|||
def do_request_handling():
|
||||
yield foreground_operation()
|
||||
|
||||
logcontext.run_in_background(background_operation)
|
||||
context.run_in_background(background_operation)
|
||||
|
||||
# this will now be logged against the request context
|
||||
logger.debug("Request handling complete")
|
||||
|
@ -332,7 +332,7 @@ gathered:
|
|||
result = yield defer.gatherResults([d1, d2])
|
||||
|
||||
In this case particularly, though, option two, of using
|
||||
``logcontext.preserve_fn`` almost certainly makes more sense, so that
|
||||
``context.preserve_fn`` almost certainly makes more sense, so that
|
||||
``operation1`` and ``operation2`` are both logged against the original
|
||||
logcontext. This looks like:
|
||||
|
||||
|
@ -340,8 +340,8 @@ logcontext. This looks like:
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def do_request_handling():
|
||||
d1 = logcontext.preserve_fn(operation1)()
|
||||
d2 = logcontext.preserve_fn(operation2)()
|
||||
d1 = context.preserve_fn(operation1)()
|
||||
d2 = context.preserve_fn(operation2)()
|
||||
|
||||
with PreserveLoggingContext():
|
||||
result = yield defer.gatherResults([d1, d2])
|
||||
|
@ -381,7 +381,7 @@ off the background process, and then leave the ``with`` block to wait for it:
|
|||
.. code:: python
|
||||
|
||||
def handle_request(request_id):
|
||||
with logcontext.LoggingContext() as request_context:
|
||||
with context.LoggingContext() as request_context:
|
||||
request_context.request = request_id
|
||||
d = do_request_handling()
|
||||
|
||||
|
@ -414,7 +414,7 @@ runs its callbacks in the original logcontext, all is happy.
|
|||
|
||||
The business of a Deferred which runs its callbacks in the original logcontext
|
||||
isn't hard to achieve — we have it today, in the shape of
|
||||
``logcontext._PreservingContextDeferred``:
|
||||
``context._PreservingContextDeferred``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
|
|
|
@ -34,9 +34,14 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
|
|||
su - postgres
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||
would not be able to store UTF8 strings. To create a database with the correct
|
||||
encoding use, e.g.::
|
||||
Before you can authenticate with the ``synapse_user``, you must create a
|
||||
database that it can access. To create a database, first connect to the database
|
||||
with your database user::
|
||||
|
||||
su - postgres
|
||||
psql
|
||||
|
||||
and then run::
|
||||
|
||||
CREATE DATABASE synapse
|
||||
ENCODING 'UTF8'
|
||||
|
@ -46,7 +51,13 @@ encoding use, e.g.::
|
|||
OWNER synapse_user;
|
||||
|
||||
This would create an appropriate database named ``synapse`` owned by the
|
||||
``synapse_user`` user (which must already exist).
|
||||
``synapse_user`` user (which must already have been created as above).
|
||||
|
||||
Note that the PostgreSQL database *must* have the correct encoding set (as
|
||||
shown above), otherwise it will not be able to store UTF8 strings.
|
||||
|
||||
You may need to enable password authentication so ``synapse_user`` can connect
|
||||
to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html.
|
||||
|
||||
Tuning Postgres
|
||||
===============
|
||||
|
|
|
@ -48,6 +48,8 @@ Let's assume that we expect clients to connect to our server at
|
|||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
}
|
||||
}
|
||||
|
||||
Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI.
|
||||
|
||||
* Caddy::
|
||||
|
||||
|
|
|
@ -786,6 +786,17 @@ uploads_path: "DATADIR/uploads"
|
|||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %(app)s account"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
#
|
||||
# Note also that this is calculated at login time: changes are not applied
|
||||
# retrospectively to users who have already logged in.
|
||||
#
|
||||
# By default, this is infinite.
|
||||
#
|
||||
#session_lifetime: 24h
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
#registrations_require_3pid:
|
||||
|
@ -1395,3 +1406,20 @@ password_config:
|
|||
# module: "my_custom_project.SuperRulesSet"
|
||||
# config:
|
||||
# example_option: 'things'
|
||||
|
||||
|
||||
## Opentracing ##
|
||||
# These settings enable opentracing which implements distributed tracing
|
||||
# This allows you to observe the causal chain of events across servers
|
||||
# including requests, key lookups etc. across any server running
|
||||
# synapse or any other other services which supports opentracing.
|
||||
# (specifically those implemented with jaeger)
|
||||
|
||||
#opentracing:
|
||||
# # Enable / disable tracer
|
||||
# tracer_enabled: false
|
||||
# # The list of homeservers we wish to expose our current traces to.
|
||||
# # The list is a list of regexes which are matched against the
|
||||
# # servername of the homeserver
|
||||
# homeserver_whitelist:
|
||||
# - ".*"
|
||||
|
|
|
@ -14,6 +14,11 @@
|
|||
name = "Bugfixes"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "docker"
|
||||
name = "Updates to the Docker image"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "doc"
|
||||
name = "Improved Documentation"
|
||||
|
@ -39,6 +44,8 @@ exclude = '''
|
|||
| \.git # root of the project
|
||||
| \.tox
|
||||
| \.venv
|
||||
| \.env
|
||||
| env
|
||||
| _build
|
||||
| _trial_temp.*
|
||||
| build
|
||||
|
|
12
scripts-dev/lint.sh
Executable file
12
scripts-dev/lint.sh
Executable file
|
@ -0,0 +1,12 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Runs linting scripts over the local Synapse checkout
|
||||
# isort - sorts import statements
|
||||
# flake8 - lints and finds mistakes
|
||||
# black - opinionated code formatter
|
||||
|
||||
set -e
|
||||
|
||||
isort -y -rc synapse tests scripts-dev scripts
|
||||
flake8 synapse tests
|
||||
python3 -m black synapse tests scripts-dev scripts
|
|
@ -35,4 +35,4 @@ try:
|
|||
except ImportError:
|
||||
pass
|
||||
|
||||
__version__ = "1.1.0rc1"
|
||||
__version__ = "1.1.0"
|
||||
|
|
|
@ -25,7 +25,13 @@ from twisted.internet import defer
|
|||
import synapse.types
|
||||
from synapse import event_auth
|
||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
ResourceLimitError,
|
||||
)
|
||||
from synapse.config.server import is_threepid_reserved
|
||||
from synapse.types import UserID
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||
|
@ -63,7 +69,6 @@ class Auth(object):
|
|||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state = hs.get_state_handler()
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
@ -189,18 +194,17 @@ class Auth(object):
|
|||
Returns:
|
||||
defer.Deferred: resolves to a ``synapse.types.Requester`` object
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
# Can optionally look elsewhere in the request (e.g. headers)
|
||||
try:
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
b"User-Agent", default=[b""]
|
||||
)[0].decode("ascii", "surrogateescape")
|
||||
|
||||
access_token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
access_token = self.get_access_token_from_request(request)
|
||||
|
||||
user_id, app_service = yield self._get_appservice_user_id(request)
|
||||
if user_id:
|
||||
|
@ -264,18 +268,12 @@ class Auth(object):
|
|||
)
|
||||
)
|
||||
except KeyError:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_appservice_user_id(self, request):
|
||||
app_service = self.store.get_app_service_by_token(
|
||||
self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
self.get_access_token_from_request(request)
|
||||
)
|
||||
if app_service is None:
|
||||
defer.returnValue((None, None))
|
||||
|
@ -313,13 +311,25 @@ class Auth(object):
|
|||
`token_id` (int|None): access token id. May be None if guest
|
||||
`device_id` (str|None): device corresponding to access token
|
||||
Raises:
|
||||
AuthError if no user by that token exists or the token is invalid.
|
||||
InvalidClientCredentialsError if no user by that token exists or the token
|
||||
is invalid.
|
||||
"""
|
||||
|
||||
if rights == "access":
|
||||
# first look in the database
|
||||
r = yield self._look_up_user_by_access_token(token)
|
||||
if r:
|
||||
valid_until_ms = r["valid_until_ms"]
|
||||
if (
|
||||
valid_until_ms is not None
|
||||
and valid_until_ms < self.clock.time_msec()
|
||||
):
|
||||
# there was a valid access token, but it has expired.
|
||||
# soft-logout the user.
|
||||
raise InvalidClientTokenError(
|
||||
msg="Access token has expired", soft_logout=True
|
||||
)
|
||||
|
||||
defer.returnValue(r)
|
||||
|
||||
# otherwise it needs to be a valid macaroon
|
||||
|
@ -331,11 +341,7 @@ class Auth(object):
|
|||
if not guest:
|
||||
# non-guest access tokens must be in the database
|
||||
logger.warning("Unrecognised access token - not in store.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError()
|
||||
|
||||
# Guest access tokens are not stored in the database (there can
|
||||
# only be one access token per guest, anyway).
|
||||
|
@ -350,16 +356,10 @@ class Auth(object):
|
|||
# guest tokens.
|
||||
stored_user = yield self.store.get_user_by_id(user_id)
|
||||
if not stored_user:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unknown user_id %s" % user_id,
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Unknown user_id %s" % user_id)
|
||||
if not stored_user["is_guest"]:
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Guest access token used for regular user",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
raise InvalidClientTokenError(
|
||||
"Guest access token used for regular user"
|
||||
)
|
||||
ret = {
|
||||
"user": user,
|
||||
|
@ -386,11 +386,7 @@ class Auth(object):
|
|||
ValueError,
|
||||
) as e:
|
||||
logger.warning("Invalid macaroon in auth: %s %s", type(e), e)
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Invalid macaroon passed.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
def _parse_and_validate_macaroon(self, token, rights="access"):
|
||||
"""Takes a macaroon and tries to parse and validate it. This is cached
|
||||
|
@ -430,11 +426,7 @@ class Auth(object):
|
|||
macaroon, rights, self.hs.config.expire_access_token, user_id=user_id
|
||||
)
|
||||
except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError):
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Invalid macaroon passed.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("Invalid macaroon passed.")
|
||||
|
||||
if not has_expiry and rights == "access":
|
||||
self.token_cache[token] = (user_id, guest)
|
||||
|
@ -453,17 +445,14 @@ class Auth(object):
|
|||
(str) user id
|
||||
|
||||
Raises:
|
||||
AuthError if there is no user_id caveat in the macaroon
|
||||
InvalidClientCredentialsError if there is no user_id caveat in the
|
||||
macaroon
|
||||
"""
|
||||
user_prefix = "user_id = "
|
||||
for caveat in macaroon.caveats:
|
||||
if caveat.caveat_id.startswith(user_prefix):
|
||||
return caveat.caveat_id[len(user_prefix) :]
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"No user caveat in macaroon",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
raise InvalidClientTokenError("No user caveat in macaroon")
|
||||
|
||||
def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
|
||||
"""
|
||||
|
@ -527,26 +516,18 @@ class Auth(object):
|
|||
"token_id": ret.get("token_id", None),
|
||||
"is_guest": False,
|
||||
"device_id": ret.get("device_id"),
|
||||
"valid_until_ms": ret.get("valid_until_ms"),
|
||||
}
|
||||
defer.returnValue(user_info)
|
||||
|
||||
def get_appservice_by_req(self, request):
|
||||
try:
|
||||
token = self.get_access_token_from_request(
|
||||
request, self.TOKEN_NOT_FOUND_HTTP_STATUS
|
||||
)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise AuthError(
|
||||
self.TOKEN_NOT_FOUND_HTTP_STATUS,
|
||||
"Unrecognised access token.",
|
||||
errcode=Codes.UNKNOWN_TOKEN,
|
||||
)
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
except KeyError:
|
||||
raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.")
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
|
||||
def is_server_admin(self, user):
|
||||
""" Check if the given user is a local server admin.
|
||||
|
@ -692,20 +673,16 @@ class Auth(object):
|
|||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
@staticmethod
|
||||
def get_access_token_from_request(request, token_not_found_http_status=401):
|
||||
def get_access_token_from_request(request):
|
||||
"""Extracts the access_token from the request.
|
||||
|
||||
Args:
|
||||
request: The http request.
|
||||
token_not_found_http_status(int): The HTTP status code to set in the
|
||||
AuthError if the token isn't found. This is used in some of the
|
||||
legacy APIs to change the status code to 403 from the default of
|
||||
401 since some of the old clients depended on auth errors returning
|
||||
403.
|
||||
Returns:
|
||||
unicode: The access_token
|
||||
Raises:
|
||||
AuthError: If there isn't an access_token in the request.
|
||||
MissingClientTokenError: If there isn't a single access_token in the
|
||||
request
|
||||
"""
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
|
@ -714,34 +691,20 @@ class Auth(object):
|
|||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
if query_params is not None:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Mixing Authorization headers and access_token query parameters.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
raise MissingClientTokenError(
|
||||
"Mixing Authorization headers and access_token query parameters."
|
||||
)
|
||||
if len(auth_headers) > 1:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Too many Authorization headers.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError("Too many Authorization headers.")
|
||||
parts = auth_headers[0].split(b" ")
|
||||
if parts[0] == b"Bearer" and len(parts) == 2:
|
||||
return parts[1].decode("ascii")
|
||||
else:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Invalid Authorization header.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError("Invalid Authorization header.")
|
||||
else:
|
||||
# Try to get the access_token from the query params.
|
||||
if not query_params:
|
||||
raise AuthError(
|
||||
token_not_found_http_status,
|
||||
"Missing access token.",
|
||||
errcode=Codes.MISSING_TOKEN,
|
||||
)
|
||||
raise MissingClientTokenError()
|
||||
|
||||
return query_params[0].decode("ascii")
|
||||
|
||||
|
|
|
@ -139,6 +139,22 @@ class ConsentNotGivenError(SynapseError):
|
|||
return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri)
|
||||
|
||||
|
||||
class UserDeactivatedError(SynapseError):
|
||||
"""The error returned to the client when the user attempted to access an
|
||||
authenticated endpoint, but the account has been deactivated.
|
||||
"""
|
||||
|
||||
def __init__(self, msg):
|
||||
"""Constructs a UserDeactivatedError
|
||||
|
||||
Args:
|
||||
msg (str): The human-readable error message
|
||||
"""
|
||||
super(UserDeactivatedError, self).__init__(
|
||||
code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN
|
||||
)
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
"""An error raised when a registration event fails."""
|
||||
|
||||
|
@ -210,7 +226,9 @@ class NotFoundError(SynapseError):
|
|||
|
||||
|
||||
class AuthError(SynapseError):
|
||||
"""An error raised when there was a problem authorising an event."""
|
||||
"""An error raised when there was a problem authorising an event, and at various
|
||||
other poorly-defined times.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if "errcode" not in kwargs:
|
||||
|
@ -218,6 +236,41 @@ class AuthError(SynapseError):
|
|||
super(AuthError, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class InvalidClientCredentialsError(SynapseError):
|
||||
"""An error raised when there was a problem with the authorisation credentials
|
||||
in a client request.
|
||||
|
||||
https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens:
|
||||
|
||||
When credentials are required but missing or invalid, the HTTP call will
|
||||
return with a status of 401 and the error code, M_MISSING_TOKEN or
|
||||
M_UNKNOWN_TOKEN respectively.
|
||||
"""
|
||||
|
||||
def __init__(self, msg, errcode):
|
||||
super().__init__(code=401, msg=msg, errcode=errcode)
|
||||
|
||||
|
||||
class MissingClientTokenError(InvalidClientCredentialsError):
|
||||
"""Raised when we couldn't find the access token in a request"""
|
||||
|
||||
def __init__(self, msg="Missing access token"):
|
||||
super().__init__(msg=msg, errcode="M_MISSING_TOKEN")
|
||||
|
||||
|
||||
class InvalidClientTokenError(InvalidClientCredentialsError):
|
||||
"""Raised when we didn't understand the access token in a request"""
|
||||
|
||||
def __init__(self, msg="Unrecognised access token", soft_logout=False):
|
||||
super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN")
|
||||
self._soft_logout = soft_logout
|
||||
|
||||
def error_dict(self):
|
||||
d = super().error_dict()
|
||||
d["soft_logout"] = self._soft_logout
|
||||
return d
|
||||
|
||||
|
||||
class ResourceLimitError(SynapseError):
|
||||
"""
|
||||
Any error raised when there is a problem with resource usage.
|
||||
|
|
|
@ -27,7 +27,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory
|
|||
import synapse
|
||||
from synapse.app import check_bind_error
|
||||
from synapse.crypto import context_factory
|
||||
from synapse.util import PreserveLoggingContext
|
||||
from synapse.logging.context import PreserveLoggingContext
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
@ -48,7 +48,7 @@ def register_sighup(func):
|
|||
_sighup_callbacks.append(func)
|
||||
|
||||
|
||||
def start_worker_reactor(appname, config):
|
||||
def start_worker_reactor(appname, config, run_command=reactor.run):
|
||||
""" Run the reactor in the main process
|
||||
|
||||
Daemonizes if necessary, and then configures some resources, before starting
|
||||
|
@ -57,6 +57,7 @@ def start_worker_reactor(appname, config):
|
|||
Args:
|
||||
appname (str): application name which will be sent to syslog
|
||||
config (synapse.config.Config): config object
|
||||
run_command (Callable[]): callable that actually runs the reactor
|
||||
"""
|
||||
|
||||
logger = logging.getLogger(config.worker_app)
|
||||
|
@ -69,11 +70,19 @@ def start_worker_reactor(appname, config):
|
|||
daemonize=config.worker_daemonize,
|
||||
print_pidfile=config.print_pidfile,
|
||||
logger=logger,
|
||||
run_command=run_command,
|
||||
)
|
||||
|
||||
|
||||
def start_reactor(
|
||||
appname, soft_file_limit, gc_thresholds, pid_file, daemonize, print_pidfile, logger
|
||||
appname,
|
||||
soft_file_limit,
|
||||
gc_thresholds,
|
||||
pid_file,
|
||||
daemonize,
|
||||
print_pidfile,
|
||||
logger,
|
||||
run_command=reactor.run,
|
||||
):
|
||||
""" Run the reactor in the main process
|
||||
|
||||
|
@ -88,38 +97,42 @@ def start_reactor(
|
|||
daemonize (bool): true to run the reactor in a background process
|
||||
print_pidfile (bool): whether to print the pid file, if daemonize is True
|
||||
logger (logging.Logger): logger instance to pass to Daemonize
|
||||
run_command (Callable[]): callable that actually runs the reactor
|
||||
"""
|
||||
|
||||
install_dns_limiter(reactor)
|
||||
|
||||
def run():
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
with PreserveLoggingContext():
|
||||
logger.info("Running")
|
||||
logger.info("Running")
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
run_command()
|
||||
|
||||
change_resource_limit(soft_file_limit)
|
||||
if gc_thresholds:
|
||||
gc.set_threshold(*gc_thresholds)
|
||||
reactor.run()
|
||||
# make sure that we run the reactor with the sentinel log context,
|
||||
# otherwise other PreserveLoggingContext instances will get confused
|
||||
# and complain when they see the logcontext arbitrarily swapping
|
||||
# between the sentinel and `run` logcontexts.
|
||||
#
|
||||
# We also need to drop the logcontext before forking if we're daemonizing,
|
||||
# otherwise the cputime metrics get confused about the per-thread resource usage
|
||||
# appearing to go backwards.
|
||||
with PreserveLoggingContext():
|
||||
if daemonize:
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
if daemonize:
|
||||
if print_pidfile:
|
||||
print(pid_file)
|
||||
|
||||
daemon = Daemonize(
|
||||
app=appname,
|
||||
pid=pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
daemon = Daemonize(
|
||||
app=appname,
|
||||
pid=pid_file,
|
||||
action=run,
|
||||
auto_close_fds=False,
|
||||
verbose=True,
|
||||
logger=logger,
|
||||
)
|
||||
daemon.start()
|
||||
else:
|
||||
run()
|
||||
|
||||
|
||||
def quit_with_error(error_string):
|
||||
|
@ -240,6 +253,9 @@ def start(hs, listeners=None):
|
|||
# Load the certificate from disk.
|
||||
refresh_certificate(hs)
|
||||
|
||||
# Start the tracer
|
||||
synapse.logging.opentracing.init_tracer(hs.config)
|
||||
|
||||
# It is now safe to start your Synapse.
|
||||
hs.start_listening(listeners)
|
||||
hs.get_datastore().start_profiling()
|
||||
|
|
264
synapse/app/admin_cmd.py
Normal file
264
synapse/app/admin_cmd.py
Normal file
|
@ -0,0 +1,264 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from canonicaljson import json
|
||||
|
||||
from twisted.internet import defer, task
|
||||
|
||||
import synapse
|
||||
from synapse.app import _base
|
||||
from synapse.config._base import ConfigError
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.handlers.admin import ExfiltrationWriter
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
|
||||
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
|
||||
from synapse.replication.slave.storage.presence import SlavedPresenceStore
|
||||
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
|
||||
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
|
||||
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
|
||||
from synapse.replication.slave.storage.room import RoomStore
|
||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
logger = logging.getLogger("synapse.app.admin_cmd")
|
||||
|
||||
|
||||
class AdminCmdSlavedStore(
|
||||
SlavedReceiptsStore,
|
||||
SlavedAccountDataStore,
|
||||
SlavedApplicationServiceStore,
|
||||
SlavedRegistrationStore,
|
||||
SlavedFilteringStore,
|
||||
SlavedPresenceStore,
|
||||
SlavedGroupServerStore,
|
||||
SlavedDeviceInboxStore,
|
||||
SlavedDeviceStore,
|
||||
SlavedPushRuleStore,
|
||||
SlavedEventStore,
|
||||
SlavedClientIpStore,
|
||||
RoomStore,
|
||||
BaseSlavedStore,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdSlavedStore
|
||||
|
||||
def _listen_http(self, listener_config):
|
||||
pass
|
||||
|
||||
def start_listening(self, listeners):
|
||||
pass
|
||||
|
||||
def build_tcp_replication(self):
|
||||
return AdminCmdReplicationHandler(self)
|
||||
|
||||
|
||||
class AdminCmdReplicationHandler(ReplicationClientHandler):
|
||||
@defer.inlineCallbacks
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
pass
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
return {}
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def export_data_command(hs, args):
|
||||
"""Export data for a user.
|
||||
|
||||
Args:
|
||||
hs (HomeServer)
|
||||
args (argparse.Namespace)
|
||||
"""
|
||||
|
||||
user_id = args.user_id
|
||||
directory = args.output_directory
|
||||
|
||||
res = yield hs.get_handlers().admin_handler.export_user_data(
|
||||
user_id, FileExfiltrationWriter(user_id, directory=directory)
|
||||
)
|
||||
print(res)
|
||||
|
||||
|
||||
class FileExfiltrationWriter(ExfiltrationWriter):
|
||||
"""An ExfiltrationWriter that writes the users data to a directory.
|
||||
Returns the directory location on completion.
|
||||
|
||||
Note: This writes to disk on the main reactor thread.
|
||||
|
||||
Args:
|
||||
user_id (str): The user whose data is being exfiltrated.
|
||||
directory (str|None): The directory to write the data to, if None then
|
||||
will write to a temporary directory.
|
||||
"""
|
||||
|
||||
def __init__(self, user_id, directory=None):
|
||||
self.user_id = user_id
|
||||
|
||||
if directory:
|
||||
self.base_directory = directory
|
||||
else:
|
||||
self.base_directory = tempfile.mkdtemp(
|
||||
prefix="synapse-exfiltrate__%s__" % (user_id,)
|
||||
)
|
||||
|
||||
os.makedirs(self.base_directory, exist_ok=True)
|
||||
if list(os.listdir(self.base_directory)):
|
||||
raise Exception("Directory must be empty")
|
||||
|
||||
def write_events(self, room_id, events):
|
||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||
os.makedirs(room_directory, exist_ok=True)
|
||||
events_file = os.path.join(room_directory, "events")
|
||||
|
||||
with open(events_file, "a") as f:
|
||||
for event in events:
|
||||
print(json.dumps(event.get_pdu_json()), file=f)
|
||||
|
||||
def write_state(self, room_id, event_id, state):
|
||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||
state_directory = os.path.join(room_directory, "state")
|
||||
os.makedirs(state_directory, exist_ok=True)
|
||||
|
||||
event_file = os.path.join(state_directory, event_id)
|
||||
|
||||
with open(event_file, "a") as f:
|
||||
for event in state.values():
|
||||
print(json.dumps(event.get_pdu_json()), file=f)
|
||||
|
||||
def write_invite(self, room_id, event, state):
|
||||
self.write_events(room_id, [event])
|
||||
|
||||
# We write the invite state somewhere else as they aren't full events
|
||||
# and are only a subset of the state at the event.
|
||||
room_directory = os.path.join(self.base_directory, "rooms", room_id)
|
||||
os.makedirs(room_directory, exist_ok=True)
|
||||
|
||||
invite_state = os.path.join(room_directory, "invite_state")
|
||||
|
||||
with open(invite_state, "a") as f:
|
||||
for event in state.values():
|
||||
print(json.dumps(event), file=f)
|
||||
|
||||
def finished(self):
|
||||
return self.base_directory
|
||||
|
||||
|
||||
def start(config_options):
|
||||
parser = argparse.ArgumentParser(description="Synapse Admin Command")
|
||||
HomeServerConfig.add_arguments_to_parser(parser)
|
||||
|
||||
subparser = parser.add_subparsers(
|
||||
title="Admin Commands",
|
||||
required=True,
|
||||
dest="command",
|
||||
metavar="<admin_command>",
|
||||
help="The admin command to perform.",
|
||||
)
|
||||
export_data_parser = subparser.add_parser(
|
||||
"export-data", help="Export all data for a user"
|
||||
)
|
||||
export_data_parser.add_argument("user_id", help="User to extra data from")
|
||||
export_data_parser.add_argument(
|
||||
"--output-directory",
|
||||
action="store",
|
||||
metavar="DIRECTORY",
|
||||
required=False,
|
||||
help="The directory to store the exported data in. Must be empty. Defaults"
|
||||
" to creating a temp directory.",
|
||||
)
|
||||
export_data_parser.set_defaults(func=export_data_command)
|
||||
|
||||
try:
|
||||
config, args = HomeServerConfig.load_config_with_parser(parser, config_options)
|
||||
except ConfigError as e:
|
||||
sys.stderr.write("\n" + str(e) + "\n")
|
||||
sys.exit(1)
|
||||
|
||||
if config.worker_app is not None:
|
||||
assert config.worker_app == "synapse.app.admin_cmd"
|
||||
|
||||
# Update the config with some basic overrides so that don't have to specify
|
||||
# a full worker config.
|
||||
config.worker_app = "synapse.app.admin_cmd"
|
||||
|
||||
if (
|
||||
not config.worker_daemonize
|
||||
and not config.worker_log_file
|
||||
and not config.worker_log_config
|
||||
):
|
||||
# Since we're meant to be run as a "command" let's not redirect stdio
|
||||
# unless we've actually set log config.
|
||||
config.no_redirect_stdio = True
|
||||
|
||||
# Explicitly disable background processes
|
||||
config.update_user_directory = False
|
||||
config.start_pushers = False
|
||||
config.send_federation = False
|
||||
|
||||
setup_logging(config, use_worker_options=True)
|
||||
|
||||
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||
|
||||
database_engine = create_engine(config.database_config)
|
||||
|
||||
ss = AdminCmdServer(
|
||||
config.server_name,
|
||||
db_config=config.database_config,
|
||||
config=config,
|
||||
version_string="Synapse/" + get_version_string(synapse),
|
||||
database_engine=database_engine,
|
||||
)
|
||||
|
||||
ss.setup()
|
||||
|
||||
# We use task.react as the basic run command as it correctly handles tearing
|
||||
# down the reactor when the deferreds resolve and setting the return value.
|
||||
# We also make sure that `_base.start` gets run before we actually run the
|
||||
# command.
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run(_reactor):
|
||||
with LoggingContext("command"):
|
||||
yield _base.start(ss, [])
|
||||
yield args.func(ss, args)
|
||||
|
||||
_base.start_worker_reactor(
|
||||
"synapse-admin-cmd", config, run_command=lambda: task.react(run)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
with LoggingContext("main"):
|
||||
start(sys.argv[1:])
|
|
@ -26,6 +26,7 @@ from synapse.config._base import ConfigError
|
|||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||
|
@ -36,7 +37,6 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
|||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -64,7 +65,6 @@ from synapse.rest.client.versions import VersionsRestServlet
|
|||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -59,7 +60,6 @@ from synapse.server import HomeServer
|
|||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation.transport.server import TransportLayerServer
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -48,7 +49,6 @@ from synapse.rest.key.v2 import KeyApiV2Resource
|
|||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||
from synapse.config.logger import setup_logging
|
||||
from synapse.federation import send_queue
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
|
@ -44,7 +45,6 @@ from synapse.storage.engines import create_engine
|
|||
from synapse.types import ReadReceipt
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ from synapse.config.logger import setup_logging
|
|||
from synapse.http.server import JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -41,7 +42,6 @@ from synapse.rest.client.v2_alpha._base import client_patterns
|
|||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ from synapse.federation.transport.server import TransportLayerServer
|
|||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
|
@ -72,7 +73,6 @@ from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
|||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.module_loader import load_module
|
||||
from synapse.util.rlimit import change_resource_limit
|
||||
|
|
|
@ -27,6 +27,7 @@ from synapse.config._base import ConfigError
|
|||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -40,7 +41,6 @@ from synapse.server import HomeServer
|
|||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.media_repository import MediaRepositoryStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ from synapse.config._base import ConfigError
|
|||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import __func__
|
||||
|
@ -38,7 +39,6 @@ from synapse.server import HomeServer
|
|||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ from synapse.config.logger import setup_logging
|
|||
from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||
|
@ -57,7 +58,6 @@ from synapse.server import HomeServer
|
|||
from synapse.storage.engines import create_engine
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
|
|
@ -28,6 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
|
|||
from synapse.config.logger import setup_logging
|
||||
from synapse.http.server import JsonResource
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.logging.context import LoggingContext, run_in_background
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||
|
@ -46,7 +47,6 @@ from synapse.storage.engines import create_engine
|
|||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ import logging
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.logging.context import run_in_background
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -137,12 +137,42 @@ class Config(object):
|
|||
return file_stream.read()
|
||||
|
||||
def invoke_all(self, name, *args, **kargs):
|
||||
"""Invoke all instance methods with the given name and arguments in the
|
||||
class's MRO.
|
||||
|
||||
Args:
|
||||
name (str): Name of function to invoke
|
||||
*args
|
||||
**kwargs
|
||||
|
||||
Returns:
|
||||
list: The list of the return values from each method called
|
||||
"""
|
||||
results = []
|
||||
for cls in type(self).mro():
|
||||
if name in cls.__dict__:
|
||||
results.append(getattr(cls, name)(self, *args, **kargs))
|
||||
return results
|
||||
|
||||
@classmethod
|
||||
def invoke_all_static(cls, name, *args, **kargs):
|
||||
"""Invoke all static methods with the given name and arguments in the
|
||||
class's MRO.
|
||||
|
||||
Args:
|
||||
name (str): Name of function to invoke
|
||||
*args
|
||||
**kwargs
|
||||
|
||||
Returns:
|
||||
list: The list of the return values from each method called
|
||||
"""
|
||||
results = []
|
||||
for c in cls.mro():
|
||||
if name in c.__dict__:
|
||||
results.append(getattr(c, name)(*args, **kargs))
|
||||
return results
|
||||
|
||||
def generate_config(
|
||||
self,
|
||||
config_dir_path,
|
||||
|
@ -202,6 +232,23 @@ class Config(object):
|
|||
Returns: Config object.
|
||||
"""
|
||||
config_parser = argparse.ArgumentParser(description=description)
|
||||
cls.add_arguments_to_parser(config_parser)
|
||||
obj, _ = cls.load_config_with_parser(config_parser, argv)
|
||||
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def add_arguments_to_parser(cls, config_parser):
|
||||
"""Adds all the config flags to an ArgumentParser.
|
||||
|
||||
Doesn't support config-file-generation: used by the worker apps.
|
||||
|
||||
Used for workers where we want to add extra flags/subcommands.
|
||||
|
||||
Args:
|
||||
config_parser (ArgumentParser): App description
|
||||
"""
|
||||
|
||||
config_parser.add_argument(
|
||||
"-c",
|
||||
"--config-path",
|
||||
|
@ -219,16 +266,34 @@ class Config(object):
|
|||
" Defaults to the directory containing the last config file",
|
||||
)
|
||||
|
||||
cls.invoke_all_static("add_arguments", config_parser)
|
||||
|
||||
@classmethod
|
||||
def load_config_with_parser(cls, parser, argv):
|
||||
"""Parse the commandline and config files with the given parser
|
||||
|
||||
Doesn't support config-file-generation: used by the worker apps.
|
||||
|
||||
Used for workers where we want to add extra flags/subcommands.
|
||||
|
||||
Args:
|
||||
parser (ArgumentParser)
|
||||
argv (list[str])
|
||||
|
||||
Returns:
|
||||
tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed
|
||||
config object and the parsed argparse.Namespace object from
|
||||
`parser.parse_args(..)`
|
||||
"""
|
||||
|
||||
obj = cls()
|
||||
|
||||
obj.invoke_all("add_arguments", config_parser)
|
||||
|
||||
config_args = config_parser.parse_args(argv)
|
||||
config_args = parser.parse_args(argv)
|
||||
|
||||
config_files = find_config_files(search_paths=config_args.config_path)
|
||||
|
||||
if not config_files:
|
||||
config_parser.error("Must supply a config file.")
|
||||
parser.error("Must supply a config file.")
|
||||
|
||||
if config_args.keys_directory:
|
||||
config_dir_path = config_args.keys_directory
|
||||
|
@ -244,7 +309,7 @@ class Config(object):
|
|||
|
||||
obj.invoke_all("read_arguments", config_args)
|
||||
|
||||
return obj
|
||||
return obj, config_args
|
||||
|
||||
@classmethod
|
||||
def load_or_generate_config(cls, description, argv):
|
||||
|
@ -401,7 +466,7 @@ class Config(object):
|
|||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
obj.invoke_all("add_arguments", parser)
|
||||
obj.invoke_all_static("add_arguments", parser)
|
||||
args = parser.parse_args(remaining_args)
|
||||
|
||||
config_dict = read_config_files(config_files)
|
||||
|
|
|
@ -69,7 +69,8 @@ class DatabaseConfig(Config):
|
|||
if database_path is not None:
|
||||
self.database_config["args"]["database"] = database_path
|
||||
|
||||
def add_arguments(self, parser):
|
||||
@staticmethod
|
||||
def add_arguments(parser):
|
||||
db_group = parser.add_argument_group("database")
|
||||
db_group.add_argument(
|
||||
"-d",
|
||||
|
|
|
@ -112,13 +112,17 @@ class EmailConfig(Config):
|
|||
missing = []
|
||||
for k in required:
|
||||
if k not in email_config:
|
||||
missing.append(k)
|
||||
missing.append("email." + k)
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
missing.append("public_base_url")
|
||||
|
||||
if len(missing) > 0:
|
||||
raise RuntimeError(
|
||||
"email.password_reset_behaviour is set to 'local' "
|
||||
"but required keys are missing: %s"
|
||||
% (", ".join(["email." + k for k in missing]),)
|
||||
"Password resets emails are configured to be sent from "
|
||||
"this homeserver due to a partial 'email' block. "
|
||||
"However, the following required keys are missing: %s"
|
||||
% (", ".join(missing),)
|
||||
)
|
||||
|
||||
# Templates for password reset emails
|
||||
|
@ -156,13 +160,6 @@ class EmailConfig(Config):
|
|||
filepath, "email.password_reset_template_success_html"
|
||||
)
|
||||
|
||||
if config.get("public_baseurl") is None:
|
||||
raise RuntimeError(
|
||||
"email.password_reset_behaviour is set to 'local' but no "
|
||||
"public_baseurl is set. This is necessary to generate password "
|
||||
"reset links"
|
||||
)
|
||||
|
||||
if self.email_enable_notifs:
|
||||
required = [
|
||||
"smtp_host",
|
||||
|
|
|
@ -40,6 +40,7 @@ from .spam_checker import SpamCheckerConfig
|
|||
from .stats import StatsConfig
|
||||
from .third_party_event_rules import ThirdPartyRulesConfig
|
||||
from .tls import TlsConfig
|
||||
from .tracer import TracerConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
from .voip import VoipConfig
|
||||
from .workers import WorkerConfig
|
||||
|
@ -75,5 +76,6 @@ class HomeServerConfig(
|
|||
ServerNoticesConfig,
|
||||
RoomDirectoryConfig,
|
||||
ThirdPartyRulesConfig,
|
||||
TracerConfig,
|
||||
):
|
||||
pass
|
||||
|
|
|
@ -24,7 +24,7 @@ from twisted.logger import STDLibLogObserver, globalLogBeginner
|
|||
|
||||
import synapse
|
||||
from synapse.app import _base as appbase
|
||||
from synapse.util.logcontext import LoggingContextFilter
|
||||
from synapse.logging.context import LoggingContextFilter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
|
||||
from ._base import Config
|
||||
|
@ -40,7 +40,7 @@ formatters:
|
|||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
(): synapse.logging.context.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
|
@ -103,7 +103,8 @@ class LoggingConfig(Config):
|
|||
if args.log_file is not None:
|
||||
self.log_file = args.log_file
|
||||
|
||||
def add_arguments(cls, parser):
|
||||
@staticmethod
|
||||
def add_arguments(parser):
|
||||
logging_group = parser.add_argument_group("logging")
|
||||
logging_group.add_argument(
|
||||
"-v",
|
||||
|
|
|
@ -23,7 +23,7 @@ class RateLimitConfig(object):
|
|||
|
||||
class FederationRateLimitConfig(object):
|
||||
_items_and_default = {
|
||||
"window_size": 10000,
|
||||
"window_size": 1000,
|
||||
"sleep_limit": 10,
|
||||
"sleep_delay": 500,
|
||||
"reject_limit": 50,
|
||||
|
@ -54,7 +54,7 @@ class RatelimitConfig(Config):
|
|||
|
||||
# Load the new-style federation config, if it exists. Otherwise, fall
|
||||
# back to the old method.
|
||||
if "federation_rc" in config:
|
||||
if "rc_federation" in config:
|
||||
self.rc_federation = FederationRateLimitConfig(**config["rc_federation"])
|
||||
else:
|
||||
self.rc_federation = FederationRateLimitConfig(
|
||||
|
|
|
@ -71,9 +71,8 @@ class RegistrationConfig(Config):
|
|||
self.default_identity_server = config.get("default_identity_server")
|
||||
self.allow_guest_access = config.get("allow_guest_access", False)
|
||||
|
||||
self.invite_3pid_guest = self.allow_guest_access and config.get(
|
||||
"invite_3pid_guest", False
|
||||
)
|
||||
if config.get("invite_3pid_guest", False):
|
||||
raise ConfigError("invite_3pid_guest is no longer supported")
|
||||
|
||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||
for room_alias in self.auto_join_rooms:
|
||||
|
@ -85,6 +84,11 @@ class RegistrationConfig(Config):
|
|||
"disable_msisdn_registration", False
|
||||
)
|
||||
|
||||
session_lifetime = config.get("session_lifetime")
|
||||
if session_lifetime is not None:
|
||||
session_lifetime = self.parse_duration(session_lifetime)
|
||||
self.session_lifetime = session_lifetime
|
||||
|
||||
def generate_config_section(self, generate_secrets=False, **kwargs):
|
||||
if generate_secrets:
|
||||
registration_shared_secret = 'registration_shared_secret: "%s"' % (
|
||||
|
@ -142,6 +146,17 @@ class RegistrationConfig(Config):
|
|||
# renew_at: 1w
|
||||
# renew_email_subject: "Renew your %%(app)s account"
|
||||
|
||||
# Time that a user's session remains valid for, after they log in.
|
||||
#
|
||||
# Note that this is not currently compatible with guest logins.
|
||||
#
|
||||
# Note also that this is calculated at login time: changes are not applied
|
||||
# retrospectively to users who have already logged in.
|
||||
#
|
||||
# By default, this is infinite.
|
||||
#
|
||||
#session_lifetime: 24h
|
||||
|
||||
# The user must provide all of the below types of 3PID when registering.
|
||||
#
|
||||
#registrations_require_3pid:
|
||||
|
@ -222,7 +237,8 @@ class RegistrationConfig(Config):
|
|||
% locals()
|
||||
)
|
||||
|
||||
def add_arguments(self, parser):
|
||||
@staticmethod
|
||||
def add_arguments(parser):
|
||||
reg_group = parser.add_argument_group("registration")
|
||||
reg_group.add_argument(
|
||||
"--enable-registration",
|
||||
|
|
|
@ -639,7 +639,8 @@ class ServerConfig(Config):
|
|||
if args.print_pidfile is not None:
|
||||
self.print_pidfile = args.print_pidfile
|
||||
|
||||
def add_arguments(self, parser):
|
||||
@staticmethod
|
||||
def add_arguments(parser):
|
||||
server_group = parser.add_argument_group("server")
|
||||
server_group.add_argument(
|
||||
"-D",
|
||||
|
|
50
synapse/config/tracer.py
Normal file
50
synapse/config/tracer.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.d
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config, ConfigError
|
||||
|
||||
|
||||
class TracerConfig(Config):
|
||||
def read_config(self, config, **kwargs):
|
||||
self.tracer_config = config.get("opentracing")
|
||||
|
||||
self.tracer_config = config.get("opentracing", {"tracer_enabled": False})
|
||||
|
||||
if self.tracer_config.get("tracer_enabled", False):
|
||||
# The tracer is enabled so sanitize the config
|
||||
# If no whitelists are given
|
||||
self.tracer_config.setdefault("homeserver_whitelist", [])
|
||||
|
||||
if not isinstance(self.tracer_config.get("homeserver_whitelist"), list):
|
||||
raise ConfigError("Tracer homesererver_whitelist config is malformed")
|
||||
|
||||
def generate_config_section(cls, **kwargs):
|
||||
return """\
|
||||
## Opentracing ##
|
||||
# These settings enable opentracing which implements distributed tracing
|
||||
# This allows you to observe the causal chain of events across servers
|
||||
# including requests, key lookups etc. across any server running
|
||||
# synapse or any other other services which supports opentracing.
|
||||
# (specifically those implemented with jaeger)
|
||||
|
||||
#opentracing:
|
||||
# # Enable / disable tracer
|
||||
# tracer_enabled: false
|
||||
# # The list of homeservers we wish to expose our current traces to.
|
||||
# # The list is a list of regexes which are matched against the
|
||||
# # servername of the homeserver
|
||||
# homeserver_whitelist:
|
||||
# - ".*"
|
||||
"""
|
|
@ -44,15 +44,16 @@ from synapse.api.errors import (
|
|||
RequestSendFailed,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.storage.keys import FetchKeyResult
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.logcontext import (
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.storage.keys import FetchKeyResult
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async_helpers import yieldable_gather_results
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
|
@ -140,7 +141,7 @@ class Keyring(object):
|
|||
"""
|
||||
req = VerifyJsonRequest(server_name, json_object, validity_time, request_name)
|
||||
requests = (req,)
|
||||
return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||
return make_deferred_yieldable(self._verify_objects(requests)[0])
|
||||
|
||||
def verify_json_objects_for_server(self, server_and_json):
|
||||
"""Bulk verifies signatures of json objects, bulk fetching keys as
|
||||
|
@ -557,7 +558,7 @@ class BaseV2KeyFetcher(object):
|
|||
|
||||
signed_key_json_bytes = encode_canonical_json(signed_key_json)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(
|
||||
yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
run_in_background(
|
||||
|
@ -612,7 +613,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher):
|
|||
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(
|
||||
results = yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[run_in_background(get_key, server) for server in self.key_servers],
|
||||
consumeErrors=True,
|
||||
|
|
|
@ -19,7 +19,7 @@ from frozendict import frozendict
|
|||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
|
||||
|
||||
class EventContext(object):
|
||||
|
|
|
@ -392,7 +392,11 @@ class EventClientSerializer(object):
|
|||
serialized_event["content"].pop("m.relates_to", None)
|
||||
|
||||
r = serialized_event["unsigned"].setdefault("m.relations", {})
|
||||
r[RelationTypes.REPLACE] = {"event_id": edit.event_id}
|
||||
r[RelationTypes.REPLACE] = {
|
||||
"event_id": edit.event_id,
|
||||
"origin_server_ts": edit.origin_server_ts,
|
||||
"sender": edit.sender,
|
||||
}
|
||||
|
||||
defer.returnValue(serialized_event)
|
||||
|
||||
|
|
|
@ -27,8 +27,14 @@ from synapse.crypto.event_signing import check_event_content_hash
|
|||
from synapse.events import event_type_from_format_version
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.http.servlet import assert_params_in_dict
|
||||
from synapse.logging.context import (
|
||||
LoggingContext,
|
||||
PreserveLoggingContext,
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
)
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util import unwrapFirstError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -73,7 +79,7 @@ class FederationBase(object):
|
|||
@defer.inlineCallbacks
|
||||
def handle_check_result(pdu, deferred):
|
||||
try:
|
||||
res = yield logcontext.make_deferred_yieldable(deferred)
|
||||
res = yield make_deferred_yieldable(deferred)
|
||||
except SynapseError:
|
||||
res = None
|
||||
|
||||
|
@ -102,10 +108,10 @@ class FederationBase(object):
|
|||
|
||||
defer.returnValue(res)
|
||||
|
||||
handle = logcontext.preserve_fn(handle_check_result)
|
||||
handle = preserve_fn(handle_check_result)
|
||||
deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)]
|
||||
|
||||
valid_pdus = yield logcontext.make_deferred_yieldable(
|
||||
valid_pdus = yield make_deferred_yieldable(
|
||||
defer.gatherResults(deferreds2, consumeErrors=True)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
|
@ -115,7 +121,7 @@ class FederationBase(object):
|
|||
defer.returnValue([p for p in valid_pdus if p])
|
||||
|
||||
def _check_sigs_and_hash(self, room_version, pdu):
|
||||
return logcontext.make_deferred_yieldable(
|
||||
return make_deferred_yieldable(
|
||||
self._check_sigs_and_hashes(room_version, [pdu])[0]
|
||||
)
|
||||
|
||||
|
@ -133,14 +139,14 @@ class FederationBase(object):
|
|||
* returns a redacted version of the event (if the signature
|
||||
matched but the hash did not)
|
||||
* throws a SynapseError if the signature check failed.
|
||||
The deferreds run their callbacks in the sentinel logcontext.
|
||||
The deferreds run their callbacks in the sentinel
|
||||
"""
|
||||
deferreds = _check_sigs_on_pdus(self.keyring, room_version, pdus)
|
||||
|
||||
ctx = logcontext.LoggingContext.current_context()
|
||||
ctx = LoggingContext.current_context()
|
||||
|
||||
def callback(_, pdu):
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
with PreserveLoggingContext(ctx):
|
||||
if not check_event_content_hash(pdu):
|
||||
# let's try to distinguish between failures because the event was
|
||||
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||
|
@ -178,7 +184,7 @@ class FederationBase(object):
|
|||
|
||||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
with logcontext.PreserveLoggingContext(ctx):
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
|
|
|
@ -39,10 +39,10 @@ from synapse.api.room_versions import (
|
|||
)
|
||||
from synapse.events import builder, room_version_to_event_format
|
||||
from synapse.federation.federation_base import FederationBase, event_from_pdu_json
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -207,7 +207,7 @@ class FederationClient(FederationBase):
|
|||
]
|
||||
|
||||
# FIXME: We should handle signature failures more gracefully.
|
||||
pdus[:] = yield logcontext.make_deferred_yieldable(
|
||||
pdus[:] = yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True
|
||||
).addErrback(unwrapFirstError)
|
||||
|
|
|
@ -42,6 +42,8 @@ from synapse.federation.federation_base import FederationBase, event_from_pdu_js
|
|||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
from synapse.http.endpoint import parse_server_name
|
||||
from synapse.logging.context import nested_logging_context
|
||||
from synapse.logging.utils import log_function
|
||||
from synapse.replication.http.federation import (
|
||||
ReplicationFederationSendEduRestServlet,
|
||||
ReplicationGetQueryRestServlet,
|
||||
|
@ -50,8 +52,6 @@ from synapse.types import get_domain_from_id
|
|||
from synapse.util import glob_to_regex
|
||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.logcontext import nested_logging_context
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
# when processing incoming transactions, we try to handle multiple rooms in
|
||||
# parallel, up to this limit.
|
||||
|
|
|
@ -21,9 +21,7 @@ These actions are mostly only used by the :py:mod:`.replication` module.
|
|||
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.logging.utils import log_function
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -63,33 +61,3 @@ class TransactionActions(object):
|
|||
return self.store.set_received_txn_response(
|
||||
transaction.transaction_id, origin, code, response
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def prepare_to_send(self, transaction):
|
||||
""" Persists the `Transaction` we are about to send and works out the
|
||||
correct value for the `prev_ids` key.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
transaction.prev_ids = yield self.store.prep_send_transaction(
|
||||
transaction.transaction_id,
|
||||
transaction.destination,
|
||||
transaction.origin_server_ts,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delivered(self, transaction, response_code, response_dict):
|
||||
""" Marks the given `Transaction` as having been successfully
|
||||
delivered to the remote homeserver, and what the response was.
|
||||
|
||||
Returns:
|
||||
Deferred
|
||||
"""
|
||||
return self.store.delivered_txn(
|
||||
transaction.transaction_id,
|
||||
transaction.destination,
|
||||
response_code,
|
||||
response_dict,
|
||||
)
|
||||
|
|
|
@ -26,6 +26,11 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue
|
|||
from synapse.federation.sender.transaction_manager import TransactionManager
|
||||
from synapse.federation.units import Edu
|
||||
from synapse.handlers.presence import get_interested_remotes
|
||||
from synapse.logging.context import (
|
||||
make_deferred_yieldable,
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.metrics import (
|
||||
LaterGauge,
|
||||
event_processing_loop_counter,
|
||||
|
@ -33,7 +38,6 @@ from synapse.metrics import (
|
|||
events_processed_counter,
|
||||
)
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.util import logcontext
|
||||
from synapse.util.metrics import measure_func
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -210,10 +214,10 @@ class FederationSender(object):
|
|||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(
|
||||
yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
logcontext.run_in_background(handle_room_events, evs)
|
||||
run_in_background(handle_room_events, evs)
|
||||
for evs in itervalues(events_by_room)
|
||||
],
|
||||
consumeErrors=True,
|
||||
|
@ -360,7 +364,7 @@ class FederationSender(object):
|
|||
for queue in queues:
|
||||
queue.flush_read_receipts_for_room(room_id)
|
||||
|
||||
@logcontext.preserve_fn # the caller should not yield on this
|
||||
@preserve_fn # the caller should not yield on this
|
||||
@defer.inlineCallbacks
|
||||
def send_presence(self, states):
|
||||
"""Send the new presence states to the appropriate destinations.
|
||||
|
|
|
@ -63,8 +63,6 @@ class TransactionManager(object):
|
|||
len(edus),
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self.clock.time_msec()),
|
||||
transaction_id=txn_id,
|
||||
|
@ -76,9 +74,6 @@ class TransactionManager(object):
|
|||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self._transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)",
|
||||
destination,
|
||||
|
@ -118,10 +113,6 @@ class TransactionManager(object):
|
|||
|
||||
logger.info("TX [%s] {%s} got %d response", destination, txn_id, code)
|
||||
|
||||
yield self._transaction_actions.delivered(transaction, code, response)
|
||||
|
||||
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
||||
|
||||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
|
|
|
@ -22,7 +22,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse.api.constants import Membership
|
||||
from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.logging.utils import log_function
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue