Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes

This commit is contained in:
Erik Johnston 2019-06-17 14:10:28 +01:00
commit c08e4dbadc
79 changed files with 1489 additions and 498 deletions

View file

@ -1,21 +0,0 @@
version: '3.1'
services:
postgres:
image: postgres:9.4
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -1,21 +0,0 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

33
.buildkite/format_tap.py Normal file
View file

@ -0,0 +1,33 @@
import sys
from tap.parser import Parser
from tap.line import Result, Unknown, Diagnostic
out = ["### TAP Output for " + sys.argv[2]]
p = Parser()
in_error = False
for line in p.parse_file(sys.argv[1]):
if isinstance(line, Result):
if in_error:
out.append("")
out.append("</pre></code></details>")
out.append("")
out.append("----")
out.append("")
in_error = False
if not line.ok and not line.todo:
in_error = True
out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description))
out.append("")
out.append("<details><summary>Show log</summary><code><pre>")
elif isinstance(line, Diagnostic) and in_error:
out.append(line.text)
if out:
for line in out[:-3]:
print(line)

View file

@ -1,22 +1,21 @@
#!/usr/bin/env bash
set -e
set -ex
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
source $BASH_ENV
if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
echo "Not merging forward, as this is a release branch"
exit 0
fi
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
then
echo "Can't figure out what the PR number is! Assuming merge target is develop."
if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
echo "Not a pull request, or hasn't had a PR opened yet..."
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
# can probably assume it's based on it and will be merged into it.
GITBASE="develop"
else
# Get the reference, using the GitHub API
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
fi
# Show what we are before

View file

@ -2,6 +2,7 @@ env:
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
steps:
- command:
- "python -m pip install tox"
- "tox -e pep8"
@ -46,15 +47,16 @@ steps:
- wait
- command:
- "python -m pip install tox"
- "tox -e py27,codecov"
label: ":python: 2.7 / SQLite"
- "tox -e py35-old,codecov"
label: ":python: 3.5 / SQLite / Old Deps"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:2.7"
image: "python:3.5"
propagate-environment: true
retry:
automatic:
@ -114,57 +116,6 @@ steps:
- exit_status: 2
limit: 2
- command:
- "python -m pip install tox"
- "tox -e py27-old,codecov"
label: ":python: 2.7 / SQLite / Old Deps"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg94.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 2.7 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg95.yaml
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: ":python: 3.5 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
@ -232,3 +183,61 @@ steps:
limit: 2
- exit_status: 2
limit: 2
- label: "SyTest - :python: 3.5 / SQLite / Monolith"
agents:
queue: "medium"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
agents:
queue: "medium"
env:
POSTGRES: "1"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2
- label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
agents:
queue: "medium"
env:
POSTGRES: "1"
WORKERS: "1"
command:
- "bash .buildkite/merge_base_branch.sh"
- "bash .buildkite/synapse_sytest.sh"
plugins:
- docker#v3.0.1:
image: "matrixdotorg/sytest-synapse:py35"
propagate-environment: true
soft_fail: true
retry:
automatic:
- exit_status: -1
limit: 2
- exit_status: 2
limit: 2

View file

@ -0,0 +1,145 @@
#!/bin/bash
#
# Fetch sytest, and then run the tests for synapse. The entrypoint for the
# sytest-synapse docker images.
set -ex
if [ -n "$BUILDKITE" ]
then
SYNAPSE_DIR=`pwd`
else
SYNAPSE_DIR="/src"
fi
# Attempt to find a sytest to use.
# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
if [ -d "/sytest" ]; then
# If the user has mounted in a SyTest checkout, use that.
echo "Using local sytests..."
# create ourselves a working directory and dos2unix some scripts therein
mkdir -p /work/jenkins
for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
dos2unix -n "/sytest/$i" "/work/$i"
done
ln -sf /sytest/tests /work
ln -sf /sytest/keys /work
SYTEST_LIB="/sytest/lib"
else
if [ -n "BUILDKITE_BRANCH" ]
then
branch_name=$BUILDKITE_BRANCH
else
# Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
fi
# Try and fetch the branch
echo "Trying to get same-named sytest branch..."
wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
# Probably a 404, fall back to develop
echo "Using develop instead..."
wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
}
mkdir -p /work
tar -C /work --strip-components=1 -xf sytest.tar.gz
SYTEST_LIB="/work/lib"
fi
cd /work
# PostgreSQL setup
if [ -n "$POSTGRES" ]
then
export PGUSER=postgres
export POSTGRES_DB_1=pg1
export POSTGRES_DB_2=pg2
# Start the database
su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
# Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
jenkins/prep_sytest_for_postgres.sh
# Make the test databases for the two Synapse servers that will be spun up
su -c 'psql -c "CREATE DATABASE pg1;"' postgres
su -c 'psql -c "CREATE DATABASE pg2;"' postgres
fi
if [ -n "$OFFLINE" ]; then
# if we're in offline mode, just put synapse into the virtualenv, and
# hope that the deps are up-to-date.
#
# (`pip install -e` likes to reinstall setuptools even if it's already installed,
# so we just run setup.py explicitly.)
#
(cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
else
# We've already created the virtualenv, but lets double check we have all
# deps.
/venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
/venv/bin/pip install -q --upgrade --no-cache-dir \
lxml psycopg2 coverage codecov tap.py
# Make sure all Perl deps are installed -- this is done in the docker build
# so will only install packages added since the last Docker build
./install-deps.pl
fi
# Run the tests
>&2 echo "+++ Running tests"
RUN_TESTS=(
perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
)
TEST_STATUS=0
if [ -n "$WORKERS" ]; then
RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
else
RUN_TESTS+=(-I Synapse)
fi
"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
if [ $TEST_STATUS -ne 0 ]; then
>&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
else
>&2 echo -e "run-tests \e[32mPASSED\e[0m"
fi
>&2 echo "--- Copying assets"
# Copy out the logs
mkdir -p /logs
cp results.tap /logs/results.tap
rsync --ignore-missing-args --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
# Upload coverage to codecov and upload files, if running on Buildkite
if [ -n "$BUILDKITE" ]
then
/venv/bin/coverage combine || true
/venv/bin/coverage xml || true
/venv/bin/codecov -X gcov -f coverage.xml
wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
tar xvf buildkite.tar.gz
chmod +x ./buildkite-agent
# Upload the files
./buildkite-agent artifact upload "/logs/**/*.log*"
./buildkite-agent artifact upload "/logs/results.tap"
if [ $TEST_STATUS -ne 0 ]; then
# Annotate, if failure
/venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
fi
fi
exit $TEST_STATUS

View file

@ -4,160 +4,23 @@ jobs:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
dockerhubuploadlatest:
machine: true
steps:
- checkout
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 .
- run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 .
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
- run: docker push matrixdotorg/synapse:latest
- run: docker push matrixdotorg/synapse:latest-py2
- run: docker push matrixdotorg/synapse:latest-py3
sytestpy2:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgres:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2merged:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy2postgresmerged:
docker:
- image: matrixdotorg/sytest-synapsepy2
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgres:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3merged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
sytestpy3postgresmerged:
docker:
- image: matrixdotorg/sytest-synapsepy3
working_directory: /src
steps:
- checkout
- run: bash .circleci/merge_base_branch.sh
- run: POSTGRES=1 /synapse_sytest.sh
- store_artifacts:
path: /logs
destination: logs
- store_test_results:
path: /logs
workflows:
version: 2
build:
jobs:
- sytestpy2:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy3postgres:
filters:
branches:
only: /develop|master|release-.*/
- sytestpy2merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy2postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3merged:
filters:
branches:
ignore: /develop|master|release-.*/
- sytestpy3postgresmerged:
filters:
branches:
ignore: /develop|master|release-.*/
- dockerhubuploadrelease:
filters:
tags:

4
.github/FUNDING.yml vendored Normal file
View file

@ -0,0 +1,4 @@
# One username per supported platform and one custom link
patreon: matrixdotorg
liberapay: matrixdotorg
custom: https://paypal.me/matrixdotorg

1
changelog.d/5252.feature Normal file
View file

@ -0,0 +1 @@
Add monthly active users to phonehome stats.

1
changelog.d/5325.bugfix Normal file
View file

@ -0,0 +1 @@
Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean.

1
changelog.d/5363.feature Normal file
View file

@ -0,0 +1 @@
Allow expired user to trigger renewal email sending manually.

1
changelog.d/5378.misc Normal file
View file

@ -0,0 +1 @@
Track deactivated accounts in the database.

1
changelog.d/5381.misc Normal file
View file

@ -0,0 +1 @@
Clean up code for sending federation EDUs.

1
changelog.d/5382.misc Normal file
View file

@ -0,0 +1 @@
Add a sponsor button to the repo.

1
changelog.d/5383.misc Normal file
View file

@ -0,0 +1 @@
Don't log non-200 responses from federation queries as exceptions.

1
changelog.d/5384.feature Normal file
View file

@ -0,0 +1 @@
Statistics on forward extremities per room are now exposed via Prometheus.

1
changelog.d/5386.misc Normal file
View file

@ -0,0 +1 @@
Add a sponsor button to the repo.

1
changelog.d/5387.bugfix Normal file
View file

@ -0,0 +1 @@
Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset.

1
changelog.d/5388.bugfix Normal file
View file

@ -0,0 +1 @@
Fix email notifications for unnamed rooms with multiple people.

1
changelog.d/5389.bugfix Normal file
View file

@ -0,0 +1 @@
Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker.

1
changelog.d/5390.bugfix Normal file
View file

@ -0,0 +1 @@
Fix handling of failures fetching remote content to not log failures as exceptions.

1
changelog.d/5394.bugfix Normal file
View file

@ -0,0 +1 @@
Fix a bug where deactivated users could receive renewal emails if the account validity feature is on.

1
changelog.d/5412.feature Normal file
View file

@ -0,0 +1 @@
Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste.

1
changelog.d/5425.removal Normal file
View file

@ -0,0 +1 @@
Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run.

1
changelog.d/5440.feature Normal file
View file

@ -0,0 +1 @@
Allow server admins to define implementations of extra rules for allowing or denying incoming events.

1
changelog.d/5447.misc Normal file
View file

@ -0,0 +1 @@
Update federation_client dev script to support `.well-known` and work with python3.

1
changelog.d/5458.feature Normal file
View file

@ -0,0 +1 @@
Statistics on forward extremities per room are now exposed via Prometheus.

1
changelog.d/5459.misc Normal file
View file

@ -0,0 +1 @@
SyTest has been moved to Buildkite.

1
changelog.d/5460.misc Normal file
View file

@ -0,0 +1 @@
Demo script now uses python3.

1
changelog.d/5461.feature Normal file
View file

@ -0,0 +1 @@
Statistics on forward extremities per room are now exposed via Prometheus.

1
changelog.d/5464.bugfix Normal file
View file

@ -0,0 +1 @@
Fix missing invite state after exchanging 3PID invites over federaton.

2
changelog.d/5465.misc Normal file
View file

@ -0,0 +1,2 @@
Track deactivated accounts in the database.

View file

@ -21,7 +21,7 @@ for port in 8080 8081 8082; do
pushd demo/$port
#rm $DIR/etc/$port.config
python -m synapse.app.homeserver \
python3 -m synapse.app.homeserver \
--generate-config \
-H "localhost:$https_port" \
--config-path "$DIR/etc/$port.config" \
@ -55,7 +55,7 @@ for port in 8080 8081 8082; do
echo "report_stats: false" >> $DIR/etc/$port.config
fi
python -m synapse.app.homeserver \
python3 -m synapse.app.homeserver \
--config-path "$DIR/etc/$port.config" \
-D \
-vv \

View file

@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated
configuration file or with a custom configuration that requires manual editing.
An easy way to make use of this image is via docker-compose. See the
[contrib/docker](../contrib/docker) section of the synapse project for
[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for
examples.
### Without Compose (harder)

View file

@ -1351,3 +1351,16 @@ password_config:
# alias: "*"
# room_id: "*"
# action: allow
# Server admins can define a Python module that implements extra rules for
# allowing or denying incoming events. In order to work, this module needs to
# override the methods defined in synapse/events/third_party_rules.py.
#
# This feature is designed to be used in closed federations only, where each
# participating server enforces the same rules.
#
#third_party_event_rules:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'

View file

@ -21,7 +21,8 @@ import argparse
import base64
import json
import sys
from urlparse import urlparse, urlunparse
from six.moves.urllib import parse as urlparse
import nacl.signing
import requests
@ -145,7 +146,7 @@ def request_json(method, origin_name, origin_key, destination, path, content):
for key, sig in signed_json["signatures"][origin_name].items():
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig)
authorization_headers.append(bytes(header))
authorization_headers.append(header.encode("ascii"))
print("Authorization: %s" % header, file=sys.stderr)
dest = "matrix://%s%s" % (destination, path)
@ -250,7 +251,7 @@ def read_args_from_config(args):
class MatrixConnectionAdapter(HTTPAdapter):
@staticmethod
def lookup(s):
def lookup(s, skip_well_known=False):
if s[-1] == ']':
# ipv6 literal (with no port)
return s, 8448
@ -263,19 +264,51 @@ class MatrixConnectionAdapter(HTTPAdapter):
raise ValueError("Invalid host:port '%s'" % s)
return out[0], port
# try a .well-known lookup
if not skip_well_known:
well_known = MatrixConnectionAdapter.get_well_known(s)
if well_known:
return MatrixConnectionAdapter.lookup(
well_known, skip_well_known=True
)
try:
srv = srvlookup.lookup("matrix", "tcp", s)[0]
return srv.host, srv.port
except Exception:
return s, 8448
@staticmethod
def get_well_known(server_name):
uri = "https://%s/.well-known/matrix/server" % (server_name, )
print("fetching %s" % (uri, ), file=sys.stderr)
try:
resp = requests.get(uri)
if resp.status_code != 200:
print("%s gave %i" % (uri, resp.status_code), file=sys.stderr)
return None
parsed_well_known = resp.json()
if not isinstance(parsed_well_known, dict):
raise Exception("not a dict")
if "m.server" not in parsed_well_known:
raise Exception("Missing key 'm.server'")
new_name = parsed_well_known['m.server']
print("well-known lookup gave %s" % (new_name, ), file=sys.stderr)
return new_name
except Exception as e:
print("Invalid response from %s: %s" % (uri, e, ), file=sys.stderr)
return None
def get_connection(self, url, proxies=None):
parsed = urlparse(url)
parsed = urlparse.urlparse(url)
(host, port) = self.lookup(parsed.netloc)
netloc = "%s:%d" % (host, port)
print("Connecting to %s" % (netloc,), file=sys.stderr)
url = urlunparse(
url = urlparse.urlunparse(
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
)
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)

View file

@ -16,7 +16,7 @@
import argparse
import sys
from signedjson.key import write_signing_keys, generate_signing_key
from signedjson.key import generate_signing_key, write_signing_keys
from synapse.util.stringutils import random_string

View file

@ -54,6 +54,7 @@ BOOLEAN_COLUMNS = {
"group_roles": ["is_public"],
"local_group_membership": ["is_publicised", "is_admin"],
"e2e_room_keys": ["is_verified"],
"account_validity": ["email_sent"],
}

View file

@ -102,6 +102,16 @@ setup(
include_package_data=True,
zip_safe=False,
long_description=long_description,
python_requires='~=3.5',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Communications :: Chat',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
scripts=["synctl"] + glob.glob("scripts/*"),
cmdclass={'test': TestCommand},
)

View file

@ -17,6 +17,13 @@
""" This is a reference implementation of a Matrix home server.
"""
import sys
# Check that we're not running on an unsupported Python version.
if sys.version_info < (3, 5):
print("Synapse requires Python 3.5 or above.")
sys.exit(1)
try:
from twisted.internet import protocol
from twisted.internet.protocol import Factory

View file

@ -184,11 +184,22 @@ class Auth(object):
return event_auth.get_public_keys(invite_event)
@defer.inlineCallbacks
def get_user_by_req(self, request, allow_guest=False, rights="access"):
def get_user_by_req(
self,
request,
allow_guest=False,
rights="access",
allow_expired=False,
):
""" Get a registered user's ID.
Args:
request - An HTTP request with an access_token query parameter.
allow_expired - Whether to allow the request through even if the account is
expired. If true, Synapse will still require an access token to be
provided but won't check if the account it belongs to has expired. This
works thanks to /login delivering access tokens regardless of accounts'
expiration.
Returns:
defer.Deferred: resolves to a ``synapse.types.Requester`` object
Raises:
@ -229,7 +240,7 @@ class Auth(object):
is_guest = user_info["is_guest"]
# Deny the request if the user account has expired.
if self._account_validity.enabled:
if self._account_validity.enabled and not allow_expired:
user_id = user.to_string()
expiration_ts = yield self.store.get_expiration_ts_for_user(user_id)
if expiration_ts is not None and self.clock.time_msec() >= expiration_ts:

View file

@ -540,6 +540,7 @@ def run(hs):
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()

View file

@ -19,15 +19,12 @@ from __future__ import print_function
# This file can't be called email.py because if it is, we cannot:
import email.utils
import logging
import os
import pkg_resources
from ._base import Config, ConfigError
logger = logging.getLogger(__name__)
class EmailConfig(Config):
def read_config(self, config):
@ -85,10 +82,12 @@ class EmailConfig(Config):
self.email_password_reset_behaviour = (
"remote" if email_trust_identity_server_for_password_resets else "local"
)
self.password_resets_were_disabled_due_to_email_config = False
if self.email_password_reset_behaviour == "local" and email_config == {}:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
# We cannot warn the user this has happened here
# Instead do so when a user attempts to reset their password
self.password_resets_were_disabled_due_to_email_config = True
self.email_password_reset_behaviour = "off"
# Get lifetime of a validation token in milliseconds

View file

@ -38,6 +38,7 @@ from .server import ServerConfig
from .server_notices_config import ServerNoticesConfig
from .spam_checker import SpamCheckerConfig
from .stats import StatsConfig
from .third_party_event_rules import ThirdPartyRulesConfig
from .tls import TlsConfig
from .user_directory import UserDirectoryConfig
from .voip import VoipConfig
@ -73,5 +74,6 @@ class HomeServerConfig(
StatsConfig,
ServerNoticesConfig,
RoomDirectoryConfig,
ThirdPartyRulesConfig,
):
pass

View file

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.util.module_loader import load_module
from ._base import Config
class ThirdPartyRulesConfig(Config):
def read_config(self, config):
self.third_party_event_rules = None
provider = config.get("third_party_event_rules", None)
if provider is not None:
self.third_party_event_rules = load_module(provider)
def default_config(self, **kwargs):
return """\
# Server admins can define a Python module that implements extra rules for
# allowing or denying incoming events. In order to work, this module needs to
# override the methods defined in synapse/events/third_party_rules.py.
#
# This feature is designed to be used in closed federations only, where each
# participating server enforces the same rules.
#
#third_party_event_rules:
# module: "my_custom_project.SuperRulesSet"
# config:
# example_option: 'things'
"""

View file

@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
class ThirdPartyEventRules(object):
"""Allows server admins to provide a Python module implementing an extra set of rules
to apply when processing events.
This is designed to help admins of closed federations with enforcing custom
behaviours.
"""
def __init__(self, hs):
self.third_party_rules = None
self.store = hs.get_datastore()
module = None
config = None
if hs.config.third_party_event_rules:
module, config = hs.config.third_party_event_rules
if module is not None:
self.third_party_rules = module(config=config)
@defer.inlineCallbacks
def check_event_allowed(self, event, context):
"""Check if a provided event should be allowed in the given context.
Args:
event (synapse.events.EventBase): The event to be checked.
context (synapse.events.snapshot.EventContext): The context of the event.
Returns:
defer.Deferred(bool), True if the event should be allowed, False if not.
"""
if self.third_party_rules is None:
defer.returnValue(True)
prev_state_ids = yield context.get_prev_state_ids(self.store)
# Retrieve the state events from the database.
state_events = {}
for key, event_id in prev_state_ids.items():
state_events[key] = yield self.store.get_event(event_id, allow_none=True)
ret = yield self.third_party_rules.check_event_allowed(event, state_events)
defer.returnValue(ret)

View file

@ -189,11 +189,21 @@ class PerDestinationQueue(object):
pending_pdus = []
while True:
device_message_edus, device_stream_id, dev_list_id = (
# We have to keep 2 free slots for presence and rr_edus
yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2)
# We have to keep 2 free slots for presence and rr_edus
limit = MAX_EDUS_PER_TRANSACTION - 2
device_update_edus, dev_list_id = (
yield self._get_device_update_edus(limit)
)
limit -= len(device_update_edus)
to_device_edus, device_stream_id = (
yield self._get_to_device_message_edus(limit)
)
pending_edus = device_update_edus + to_device_edus
# BEGIN CRITICAL SECTION
#
# In order to avoid a race condition, we need to make sure that
@ -208,10 +218,6 @@ class PerDestinationQueue(object):
# We can only include at most 50 PDUs per transactions
pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:]
pending_edus = []
# We can only include at most 100 EDUs per transactions
# rr_edus and pending_presence take at most one slot each
pending_edus.extend(self._get_rr_edus(force_flush=False))
pending_presence = self._pending_presence
self._pending_presence = {}
@ -232,7 +238,6 @@ class PerDestinationQueue(object):
)
)
pending_edus.extend(device_message_edus)
pending_edus.extend(
self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
)
@ -272,10 +277,13 @@ class PerDestinationQueue(object):
sent_edus_by_type.labels(edu.edu_type).inc()
# Remove the acknowledged device messages from the database
# Only bother if we actually sent some device messages
if device_message_edus:
if to_device_edus:
yield self._store.delete_device_msgs_for_remote(
self._destination, device_stream_id
)
# also mark the device updates as sent
if device_update_edus:
logger.info(
"Marking as sent %r %r", self._destination, dev_list_id
)
@ -347,7 +355,7 @@ class PerDestinationQueue(object):
return pending_edus
@defer.inlineCallbacks
def _get_new_device_messages(self, limit):
def _get_device_update_edus(self, limit):
last_device_list = self._last_device_list_stream_id
# Retrieve list of new device updates to send to the destination
@ -366,15 +374,19 @@ class PerDestinationQueue(object):
assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
defer.returnValue((edus, now_stream_id))
@defer.inlineCallbacks
def _get_to_device_message_edus(self, limit):
last_device_stream_id = self._last_device_stream_id
to_device_stream_id = self._store.get_to_device_stream_token()
contents, stream_id = yield self._store.get_new_device_msgs_for_remote(
self._destination,
last_device_stream_id,
to_device_stream_id,
limit - len(edus),
limit,
)
edus.extend(
edus = [
Edu(
origin=self._server_name,
destination=self._destination,
@ -382,6 +394,6 @@ class PerDestinationQueue(object):
content=content,
)
for content in contents
)
]
defer.returnValue((edus, stream_id, now_stream_id))
defer.returnValue((edus, stream_id))

View file

@ -42,7 +42,7 @@ from signedjson.sign import sign_json
from twisted.internet import defer
from synapse.api.errors import RequestSendFailed, SynapseError
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util.logcontext import run_in_background
@ -132,9 +132,10 @@ class GroupAttestionRenewer(object):
self.is_mine_id = hs.is_mine_id
self.attestations = hs.get_groups_attestation_signing()
self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000,
)
if not hs.config.worker_app:
self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000,
)
@defer.inlineCallbacks
def on_renew_attestation(self, group_id, user_id, content):
@ -194,7 +195,7 @@ class GroupAttestionRenewer(object):
yield self.store.update_attestation_renewal(
group_id, user_id, attestation
)
except RequestSendFailed as e:
except (RequestSendFailed, HttpResponseException) as e:
logger.warning(
"Failed to renew attestation of %r in %r: %s",
user_id, group_id, e,

View file

@ -110,6 +110,9 @@ class AccountValidityHandler(object):
# Stop right here if the user doesn't have at least one email address.
# In this case, they will have to ask their server admin to renew their
# account manually.
# We don't need to do a specific check to make sure the account isn't
# deactivated, as a deactivated account isn't supposed to have any
# email address attached to it.
if not addresses:
return

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2017, 2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -42,6 +43,8 @@ class DeactivateAccountHandler(BaseHandler):
# it left off (if it has work left to do).
hs.get_reactor().callWhenRunning(self._start_user_parting)
self._account_validity_enabled = hs.config.account_validity.enabled
@defer.inlineCallbacks
def deactivate_account(self, user_id, erase_data, id_server=None):
"""Deactivate a user's account
@ -114,6 +117,13 @@ class DeactivateAccountHandler(BaseHandler):
# parts users from rooms (if it isn't already running)
self._start_user_parting()
# Remove all information on the user from the account_validity table.
if self._account_validity_enabled:
yield self.store.delete_account_validity_for_user(user_id)
# Mark the user as deactivated.
yield self.store.set_user_deactivated_status(user_id, True)
defer.returnValue(identity_server_supports_unbinding)
def _start_user_parting(self):

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018 New Vector Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -33,6 +34,7 @@ from synapse.api.constants import EventTypes, Membership, RejectedReason
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
FederationDeniedError,
FederationError,
RequestSendFailed,
@ -127,6 +129,8 @@ class FederationHandler(BaseHandler):
self.room_queues = {}
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
self.third_party_event_rules = hs.get_third_party_event_rules()
@defer.inlineCallbacks
def on_receive_pdu(
self, origin, pdu, sent_to_us_directly=False,
@ -1258,6 +1262,15 @@ class FederationHandler(BaseHandler):
logger.warn("Failed to create join %r because %s", event, e)
raise e
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Creation of join %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_join_request`
yield self.auth.check_from_context(
@ -1300,6 +1313,15 @@ class FederationHandler(BaseHandler):
origin, event
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Sending of join %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
logger.debug(
"on_send_join_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
@ -1458,6 +1480,15 @@ class FederationHandler(BaseHandler):
builder=builder,
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.warning("Creation of leave %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
try:
# The remote hasn't signed it yet, obviously. We'll do the full checks
# when we get the event back in `on_send_leave_request`
@ -1484,10 +1515,19 @@ class FederationHandler(BaseHandler):
event.internal_metadata.outlier = False
yield self._handle_new_event(
context = yield self._handle_new_event(
origin, event
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info("Sending of leave %s forbidden by third-party rules", event)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
logger.debug(
"on_send_leave_request: After _handle_new_event: %s, sigs: %s",
event.event_id,
@ -2550,6 +2590,18 @@ class FederationHandler(BaseHandler):
builder=builder
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.info(
"Creation of threepid invite %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
event, context = yield self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
@ -2598,6 +2650,18 @@ class FederationHandler(BaseHandler):
builder=builder,
)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
logger.warning(
"Exchange of threepid invite %s forbidden by third-party rules",
event,
)
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
event, context = yield self.add_display_name_to_third_party_invite(
room_version, event_dict, event, context
)
@ -2613,12 +2677,6 @@ class FederationHandler(BaseHandler):
# though the sender isn't a local user.
event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
# XXX we send the invite here, but send_membership_event also sends it,
# so we end up making two requests. I think this is redundant.
returned_invite = yield self.send_invite(origin, event)
# TODO: Make sure the signatures actually are correct.
event.signatures.update(returned_invite.signatures)
member_handler = self.hs.get_room_member_handler()
yield member_handler.send_membership_event(None, event, context)

View file

@ -49,9 +49,7 @@ def _create_rerouter(func_name):
def http_response_errback(failure):
failure.trap(HttpResponseException)
e = failure.value
if e.code == 403:
raise e.to_synapse_error()
return failure
raise e.to_synapse_error()
def request_failed_errback(failure):
failure.trap(RequestSendFailed)

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014 - 2016 OpenMarket Ltd
# Copyright 2017 - 2018 New Vector Ltd
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -248,6 +249,7 @@ class EventCreationHandler(object):
self.action_generator = hs.get_action_generator()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._block_events_without_consent_error = (
self.config.block_events_without_consent_error
@ -658,6 +660,14 @@ class EventCreationHandler(object):
else:
room_version = yield self.store.get_room_version(event.room_id)
event_allowed = yield self.third_party_event_rules.check_event_allowed(
event, context,
)
if not event_allowed:
raise SynapseError(
403, "This event is not allowed in this context", Codes.FORBIDDEN,
)
try:
yield self.auth.check_from_context(room_version, event, context)
except AuthError as err:

View file

@ -15,12 +15,15 @@
import logging
from six import raise_from
from twisted.internet import defer
from synapse.api.errors import (
AuthError,
CodeMessageException,
Codes,
HttpResponseException,
RequestSendFailed,
StoreError,
SynapseError,
)
@ -85,10 +88,10 @@ class BaseProfileHandler(BaseHandler):
ignore_backoff=True,
)
defer.returnValue(result)
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get displayname")
raise
except RequestSendFailed as e:
raise_from(SynapseError(502, "Failed to fetch profile"), e)
except HttpResponseException as e:
raise e.to_synapse_error()
@defer.inlineCallbacks
def get_profile_from_cache(self, user_id):
@ -142,10 +145,10 @@ class BaseProfileHandler(BaseHandler):
},
ignore_backoff=True,
)
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get displayname")
raise
except RequestSendFailed as e:
raise_from(SynapseError(502, "Failed to fetch profile"), e)
except HttpResponseException as e:
raise e.to_synapse_error()
defer.returnValue(result["displayname"])
@ -208,10 +211,10 @@ class BaseProfileHandler(BaseHandler):
},
ignore_backoff=True,
)
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get avatar_url")
raise
except RequestSendFailed as e:
raise_from(SynapseError(502, "Failed to fetch profile"), e)
except HttpResponseException as e:
raise e.to_synapse_error()
defer.returnValue(result["avatar_url"])

View file

@ -17,7 +17,7 @@
import logging
from io import BytesIO
from six import text_type
from six import raise_from, text_type
from six.moves import urllib
import treq
@ -542,10 +542,15 @@ class SimpleHttpClient(object):
length = yield make_deferred_yieldable(
_readBodyToFile(response, output_stream, max_size)
)
except SynapseError:
# This can happen e.g. because the body is too large.
raise
except Exception as e:
logger.exception("Failed to download body")
raise SynapseError(
502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN
raise_from(
SynapseError(
502, ("Failed to download remote body: %s" % e),
),
e
)
defer.returnValue(

View file

@ -25,7 +25,7 @@ import six
import attr
from prometheus_client import Counter, Gauge, Histogram
from prometheus_client.core import REGISTRY, GaugeMetricFamily
from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricFamily
from twisted.internet import reactor
@ -40,7 +40,6 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
class RegistryProxy(object):
@staticmethod
def collect():
for metric in REGISTRY.collect():
@ -63,10 +62,7 @@ class LaterGauge(object):
try:
calls = self.caller()
except Exception:
logger.exception(
"Exception running callback for LaterGauge(%s)",
self.name,
)
logger.exception("Exception running callback for LaterGauge(%s)", self.name)
yield g
return
@ -116,9 +112,7 @@ class InFlightGauge(object):
# Create a class which have the sub_metrics values as attributes, which
# default to 0 on initialization. Used to pass to registered callbacks.
self._metrics_class = attr.make_class(
"_MetricsEntry",
attrs={x: attr.ib(0) for x in sub_metrics},
slots=True,
"_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True
)
# Counts number of in flight blocks for a given set of label values
@ -157,7 +151,9 @@ class InFlightGauge(object):
Note: may be called by a separate thread.
"""
in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels)
in_flight = GaugeMetricFamily(
self.name + "_total", self.desc, labels=self.labels
)
metrics_by_key = {}
@ -179,7 +175,9 @@ class InFlightGauge(object):
yield in_flight
for name in self.sub_metrics:
gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels)
gauge = GaugeMetricFamily(
"_".join([self.name, name]), "", labels=self.labels
)
for key, metrics in six.iteritems(metrics_by_key):
gauge.add_metric(key, getattr(metrics, name))
yield gauge
@ -193,12 +191,74 @@ class InFlightGauge(object):
all_gauges[self.name] = self
@attr.s(hash=True)
class BucketCollector(object):
"""
Like a Histogram, but allows buckets to be point-in-time instead of
incrementally added to.
Args:
name (str): Base name of metric to be exported to Prometheus.
data_collector (callable -> dict): A synchronous callable that
returns a dict mapping bucket to number of items in the
bucket. If these buckets are not the same as the buckets
given to this class, they will be remapped into them.
buckets (list[float]): List of floats/ints of the buckets to
give to Prometheus. +Inf is ignored, if given.
"""
name = attr.ib()
data_collector = attr.ib()
buckets = attr.ib()
def collect(self):
# Fetch the data -- this must be synchronous!
data = self.data_collector()
buckets = {}
res = []
for x in data.keys():
for i, bound in enumerate(self.buckets):
if x <= bound:
buckets[bound] = buckets.get(bound, 0) + data[x]
for i in self.buckets:
res.append([str(i), buckets.get(i, 0)])
res.append(["+Inf", sum(data.values())])
metric = HistogramMetricFamily(
self.name,
"",
buckets=res,
sum_value=sum([x * y for x, y in data.items()]),
)
yield metric
def __attrs_post_init__(self):
self.buckets = [float(x) for x in self.buckets if x != "+Inf"]
if self.buckets != sorted(self.buckets):
raise ValueError("Buckets not sorted")
self.buckets = tuple(self.buckets)
if self.name in all_gauges.keys():
logger.warning("%s already registered, reregistering" % (self.name,))
REGISTRY.unregister(all_gauges.pop(self.name))
REGISTRY.register(self)
all_gauges[self.name] = self
#
# Detailed CPU metrics
#
class CPUMetrics(object):
class CPUMetrics(object):
def __init__(self):
ticks_per_sec = 100
try:
@ -237,13 +297,28 @@ gc_time = Histogram(
"python_gc_time",
"Time taken to GC (sec)",
["gen"],
buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50,
5.00, 7.50, 15.00, 30.00, 45.00, 60.00],
buckets=[
0.0025,
0.005,
0.01,
0.025,
0.05,
0.10,
0.25,
0.50,
1.00,
2.50,
5.00,
7.50,
15.00,
30.00,
45.00,
60.00,
],
)
class GCCounts(object):
def collect(self):
cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"])
for n, m in enumerate(gc.get_count()):
@ -279,9 +354,7 @@ sent_transactions_counter = Counter("synapse_federation_client_sent_transactions
events_processed_counter = Counter("synapse_federation_client_events_processed", "")
event_processing_loop_counter = Counter(
"synapse_event_processing_loop_count",
"Event processing loop iterations",
["name"],
"synapse_event_processing_loop_count", "Event processing loop iterations", ["name"]
)
event_processing_loop_room_count = Counter(
@ -311,7 +384,6 @@ last_ticked = time.time()
class ReactorLastSeenMetric(object):
def collect(self):
cm = GaugeMetricFamily(
"python_twisted_reactor_last_seen",
@ -325,7 +397,6 @@ REGISTRY.register(ReactorLastSeenMetric())
def runUntilCurrentTimer(func):
@functools.wraps(func)
def f(*args, **kwargs):
now = reactor.seconds()

View file

@ -114,6 +114,21 @@ class EmailPusher(object):
run_as_background_process("emailpush.process", self._process)
def _pause_processing(self):
"""Used by tests to temporarily pause processing of events.
Asserts that its not currently processing.
"""
assert not self._is_processing
self._is_processing = True
def _resume_processing(self):
"""Used by tests to resume processing of events after pausing.
"""
assert self._is_processing
self._is_processing = False
self._start_processing()
@defer.inlineCallbacks
def _process(self):
# we should never get here if we are already processing
@ -215,6 +230,10 @@ class EmailPusher(object):
@defer.inlineCallbacks
def save_last_stream_ordering_and_success(self, last_stream_ordering):
if last_stream_ordering is None:
# This happens if we haven't yet processed anything
return
self.last_stream_ordering = last_stream_ordering
yield self.store.update_pusher_last_stream_ordering_and_success(
self.app_id, self.email, self.user_id,

View file

@ -162,6 +162,17 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True
def descriptor_from_member_events(member_events):
"""Get a description of the room based on the member events.
Args:
member_events (Iterable[FrozenEvent])
Returns:
str
"""
member_events = list(member_events)
if len(member_events) == 0:
return "nobody"
elif len(member_events) == 1:

View file

@ -60,6 +60,11 @@ class PusherPool:
def add_pusher(self, user_id, access_token, kind, app_id,
app_display_name, device_display_name, pushkey, lang, data,
profile_tag=""):
"""Creates a new pusher and adds it to the pool
Returns:
Deferred[EmailPusher|HttpPusher]
"""
time_now_msec = self.clock.time_msec()
# we try to create the pusher just to validate the config: it
@ -103,7 +108,9 @@ class PusherPool:
last_stream_ordering=last_stream_ordering,
profile_tag=profile_tag,
)
yield self.start_pusher_by_id(app_id, pushkey, user_id)
pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id)
defer.returnValue(pusher)
@defer.inlineCallbacks
def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey,
@ -184,7 +191,11 @@ class PusherPool:
@defer.inlineCallbacks
def start_pusher_by_id(self, app_id, pushkey, user_id):
"""Look up the details for the given pusher, and start it"""
"""Look up the details for the given pusher, and start it
Returns:
Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any
"""
if not self._should_start_pushers:
return
@ -192,13 +203,16 @@ class PusherPool:
app_id, pushkey
)
p = None
pusher_dict = None
for r in resultlist:
if r['user_name'] == user_id:
p = r
pusher_dict = r
if p:
yield self._start_pusher(p)
pusher = None
if pusher_dict:
pusher = yield self._start_pusher(pusher_dict)
defer.returnValue(pusher)
@defer.inlineCallbacks
def _start_pushers(self):
@ -224,7 +238,7 @@ class PusherPool:
pusherdict (dict):
Returns:
None
Deferred[EmailPusher|HttpPusher]
"""
try:
p = self.pusher_factory.create_pusher(pusherdict)
@ -270,6 +284,8 @@ class PusherPool:
p.on_started(have_notifs)
defer.returnValue(p)
@defer.inlineCallbacks
def remove_pusher(self, app_id, pushkey, user_id):
appid_pushkey = "%s:%s" % (app_id, pushkey)

View file

@ -44,7 +44,7 @@ REQUIREMENTS = [
"canonicaljson>=1.1.3",
"signedjson>=1.0.0",
"pynacl>=1.2.1",
"idna>=2",
"idna>=2.5",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
@ -65,7 +65,7 @@ REQUIREMENTS = [
"sortedcontainers>=1.4.4",
"psutil>=2.0.0",
"pymacaroons>=0.13.0",
"msgpack>=0.5.0",
"msgpack>=0.5.2",
"phonenumbers>=8.2.0",
"six>=1.10",
# prometheus_client 0.4.0 changed the format of counter metrics

View file

@ -17,11 +17,17 @@ import abc
import logging
import re
from six import raise_from
from six.moves import urllib
from twisted.internet import defer
from synapse.api.errors import CodeMessageException, HttpResponseException
from synapse.api.errors import (
CodeMessageException,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.util.caches.response_cache import ResponseCache
from synapse.util.stringutils import random_string
@ -175,6 +181,8 @@ class ReplicationEndpoint(object):
# on the master process that we should send to the client. (And
# importantly, not stack traces everywhere)
raise e.to_synapse_error()
except RequestSendFailed as e:
raise_from(SynapseError(502, "Failed to talk to master"), e)
defer.returnValue(result)

View file

@ -67,7 +67,13 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
@defer.inlineCallbacks
def on_POST(self, request):
if self.config.email_password_reset_behaviour == "off":
raise SynapseError(400, "Password resets have been disabled on this server")
if self.config.password_resets_were_disabled_due_to_email_config:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
raise SynapseError(
400, "Email-based password resets have been disabled on this server",
)
body = parse_json_object_from_request(request)
@ -195,9 +201,6 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet):
@defer.inlineCallbacks
def on_POST(self, request):
if not self.config.email_password_reset_behaviour == "off":
raise SynapseError(400, "Password resets have been disabled on this server")
body = parse_json_object_from_request(request)
assert_params_in_dict(body, [
@ -252,6 +255,14 @@ class PasswordResetSubmitTokenServlet(RestServlet):
400,
"This medium is currently not supported for password resets",
)
if self.config.email_password_reset_behaviour == "off":
if self.config.password_resets_were_disabled_due_to_email_config:
logger.warn(
"User password resets have been disabled due to lack of email config"
)
raise SynapseError(
400, "Email-based password resets have been disabled on this server",
)
sid = parse_string(request, "sid")
client_secret = parse_string(request, "client_secret")

View file

@ -79,7 +79,7 @@ class AccountValiditySendMailServlet(RestServlet):
if not self.account_validity.renew_by_email_enabled:
raise AuthError(403, "Account renewal via email is disabled on this server.")
requester = yield self.auth.get_user_by_req(request)
requester = yield self.auth.get_user_by_req(request, allow_expired=True)
user_id = requester.user.to_string()
yield self.account_activity_handler.send_renewal_email_to_user(user_id)

View file

@ -386,8 +386,10 @@ class MediaRepository(object):
raise SynapseError(502, "Failed to fetch remote media")
except SynapseError:
logger.exception("Failed to fetch remote media %s/%s",
server_name, media_id)
logger.warn(
"Failed to fetch remote media %s/%s",
server_name, media_id,
)
raise
except NotRetryingDestination:
logger.warn("Not retrying destination %r", server_name)

View file

@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2017-2018 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -35,6 +37,7 @@ from synapse.crypto import context_factory
from synapse.crypto.keyring import Keyring
from synapse.events.builder import EventBuilderFactory
from synapse.events.spamcheck import SpamChecker
from synapse.events.third_party_rules import ThirdPartyEventRules
from synapse.events.utils import EventClientSerializer
from synapse.federation.federation_client import FederationClient
from synapse.federation.federation_server import (
@ -178,6 +181,7 @@ class HomeServer(object):
'groups_attestation_renewer',
'secrets',
'spam_checker',
'third_party_event_rules',
'room_member_handler',
'federation_registry',
'server_notices_manager',
@ -483,6 +487,9 @@ class HomeServer(object):
def build_spam_checker(self):
return SpamChecker(self)
def build_third_party_event_rules(self):
return ThirdPartyEventRules(self)
def build_room_member_handler(self):
if self.config.worker_app:
return RoomMemberWorkerHandler(self)

View file

@ -279,23 +279,37 @@ class DataStore(
"""
Counts the number of users who used this homeserver in the last 24 hours.
"""
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
return self.runInteraction("count_daily_users", self._count_users, yesterday,)
def _count_users(txn):
yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)
def count_monthly_users(self):
"""
Counts the number of users who used this homeserver in the last 30 days.
Note this method is intended for phonehome metrics only and is different
from the mau figure in synapse.storage.monthly_active_users which,
amongst other things, includes a 3 day grace period before a user counts.
"""
thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
return self.runInteraction(
"count_monthly_users",
self._count_users,
thirty_days_ago,
)
sql = """
SELECT COALESCE(count(*), 0) FROM (
SELECT user_id FROM user_ips
WHERE last_seen > ?
GROUP BY user_id
) u
"""
txn.execute(sql, (yesterday,))
count, = txn.fetchone()
return count
return self.runInteraction("count_users", _count_users)
def _count_users(self, txn, time_from):
"""
Returns number of users seen in the past time_from period
"""
sql = """
SELECT COALESCE(count(*), 0) FROM (
SELECT user_id FROM user_ips
WHERE last_seen > ?
GROUP BY user_id
) u
"""
txn.execute(sql, (time_from,))
count, = txn.fetchone()
return count
def count_r30_users(self):
"""

View file

@ -299,12 +299,12 @@ class SQLBaseStore(object):
def select_users_with_no_expiration_date_txn(txn):
"""Retrieves the list of registered users with no expiration date from the
database.
database, filtering out deactivated users.
"""
sql = (
"SELECT users.name FROM users"
" LEFT JOIN account_validity ON (users.name = account_validity.user_id)"
" WHERE account_validity.user_id is NULL;"
" WHERE account_validity.user_id is NULL AND users.deactivated = 0;"
)
txn.execute(sql, [])

View file

@ -17,7 +17,7 @@
import itertools
import logging
from collections import OrderedDict, deque, namedtuple
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
from functools import wraps
from six import iteritems, text_type
@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
@ -220,13 +221,39 @@ class EventsStore(
EventsWorkerStore,
BackgroundUpdateStore,
):
def __init__(self, db_conn, hs):
super(EventsStore, self).__init__(db_conn, hs)
self._event_persist_queue = _EventPeristenceQueue()
self._state_resolution_handler = hs.get_state_resolution_handler()
# Collect metrics on the number of forward extremities that exist.
# Counter of number of extremities to count
self._current_forward_extremities_amount = c_counter()
BucketCollector(
"synapse_forward_extremities",
lambda: self._current_forward_extremities_amount,
buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"]
)
# Read the extrems every 60 minutes
hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000)
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
txn.execute(
"""
select count(*) c from event_forward_extremities
group by room_id
"""
)
return txn.fetchall()
res = yield self.runInteraction("read_forward_extremities", fetch)
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False):
"""
@ -568,17 +595,11 @@ class EventsStore(
)
txn.execute(sql, batch)
results.extend(
r[0]
for r in txn
if not json.loads(r[1]).get("soft_failed")
)
results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_events_which_are_prevs",
_get_events_which_are_prevs_txn,
chunk,
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
)
defer.returnValue(results)
@ -640,9 +661,7 @@ class EventsStore(
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_prevs_before_rejected",
_get_prevs_before_rejected_txn,
chunk,
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
)
defer.returnValue(existing_prevs)

View file

@ -15,6 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from six import iterkeys
@ -31,6 +32,8 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
THIRTY_MINUTES_IN_MS = 30 * 60 * 1000
logger = logging.getLogger(__name__)
class RegistrationWorkerStore(SQLBaseStore):
def __init__(self, db_conn, hs):
@ -248,6 +251,20 @@ class RegistrationWorkerStore(SQLBaseStore):
desc="set_renewal_mail_status",
)
@defer.inlineCallbacks
def delete_account_validity_for_user(self, user_id):
"""Deletes the entry for the given user in the account validity table, removing
their expiration date and renewal token.
Args:
user_id (str): ID of the user to remove from the account validity table.
"""
yield self._simple_delete_one(
table="account_validity",
keyvalues={"user_id": user_id},
desc="delete_account_validity_for_user",
)
@defer.inlineCallbacks
def is_server_admin(self, user):
res = yield self._simple_select_one_onecol(
@ -598,11 +615,77 @@ class RegistrationStore(
"user_threepids_grandfather", self._bg_user_threepids_grandfather,
)
self.register_background_update_handler(
"users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag,
)
# Create a background job for culling expired 3PID validity tokens
hs.get_clock().looping_call(
self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS,
)
@defer.inlineCallbacks
def _backgroud_update_set_deactivated_flag(self, progress, batch_size):
"""Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1
for each of them.
"""
last_user = progress.get("user_id", "")
def _backgroud_update_set_deactivated_flag_txn(txn):
txn.execute(
"""
SELECT
users.name,
COUNT(access_tokens.token) AS count_tokens,
COUNT(user_threepids.address) AS count_threepids
FROM users
LEFT JOIN access_tokens ON (access_tokens.user_id = users.name)
LEFT JOIN user_threepids ON (user_threepids.user_id = users.name)
WHERE (users.password_hash IS NULL OR users.password_hash = '')
AND (users.appservice_id IS NULL OR users.appservice_id = '')
AND users.is_guest = 0
AND users.name > ?
GROUP BY users.name
ORDER BY users.name ASC
LIMIT ?;
""",
(last_user, batch_size),
)
rows = self.cursor_to_dict(txn)
if not rows:
return True
rows_processed_nb = 0
for user in rows:
if not user["count_tokens"] and not user["count_threepids"]:
self.set_user_deactivated_status_txn(txn, user["user_id"], True)
rows_processed_nb += 1
logger.info("Marked %d rows as deactivated", rows_processed_nb)
self._background_update_progress_txn(
txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]}
)
if batch_size > len(rows):
return True
else:
return False
end = yield self.runInteraction(
"users_set_deactivated_flag",
_backgroud_update_set_deactivated_flag_txn,
)
if end:
yield self._end_background_update("users_set_deactivated_flag")
defer.returnValue(batch_size)
@defer.inlineCallbacks
def add_access_token_to_user(self, user_id, token, device_id=None):
"""Adds an access token for the given user.
@ -1268,3 +1351,50 @@ class RegistrationStore(
"delete_threepid_session",
delete_threepid_session_txn,
)
def set_user_deactivated_status_txn(self, txn, user_id, deactivated):
self._simple_update_one_txn(
txn=txn,
table="users",
keyvalues={"name": user_id},
updatevalues={"deactivated": 1 if deactivated else 0},
)
self._invalidate_cache_and_stream(
txn, self.get_user_deactivated_status, (user_id,),
)
@defer.inlineCallbacks
def set_user_deactivated_status(self, user_id, deactivated):
"""Set the `deactivated` property for the provided user to the provided value.
Args:
user_id (str): The ID of the user to set the status for.
deactivated (bool): The value to set for `deactivated`.
"""
yield self.runInteraction(
"set_user_deactivated_status",
self.set_user_deactivated_status_txn,
user_id, deactivated,
)
@cachedInlineCallbacks()
def get_user_deactivated_status(self, user_id):
"""Retrieve the value for the `deactivated` property for the provided user.
Args:
user_id (str): The ID of the user to retrieve the status for.
Returns:
defer.Deferred(bool): The requested value.
"""
res = yield self._simple_select_one_onecol(
table="users",
keyvalues={"name": user_id},
retcol="deactivated",
desc="get_user_deactivated_status",
)
# Convert the integer into a boolean.
defer.returnValue(res == 1)

View file

@ -0,0 +1,19 @@
/* Copyright 2019 The Matrix.org Foundation C.I.C.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ALTER TABLE users ADD deactivated SMALLINT DEFAULT 0 NOT NULL;
INSERT INTO background_updates (update_name, progress_json) VALUES
('users_set_deactivated_flag', '{}');

19
synctl
View file

@ -69,10 +69,14 @@ def abort(message, colour=RED, stream=sys.stderr):
sys.exit(1)
def start(configfile):
def start(configfile, daemonize=True):
write("Starting ...")
args = SYNAPSE
args.extend(["--daemonize", "-c", configfile])
if daemonize:
args.extend(["--daemonize", "-c", configfile])
else:
args.extend(["-c", configfile])
try:
subprocess.check_call(args)
@ -143,12 +147,21 @@ def main():
help="start or stop all the workers in the given directory"
" and the main synapse process",
)
parser.add_argument(
"--no-daemonize",
action="store_false",
help="Run synapse in the foreground for debugging. "
"Will work only if the daemonize option is not set in the config."
)
options = parser.parse_args()
if options.worker and options.all_processes:
write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr)
sys.exit(1)
if options.no_daemonize and options.all_processes:
write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr)
sys.exit(1)
configfile = options.configfile
@ -276,7 +289,7 @@ def main():
# Check if synapse is already running
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
abort("synapse.app.homeserver already running")
start(configfile)
start(configfile, bool(options.no_daemonize))
for worker in workers:
env = os.environ.copy()

View file

@ -15,6 +15,7 @@
import os
import attr
import pkg_resources
from twisted.internet.defer import Deferred
@ -25,6 +26,13 @@ from synapse.rest.client.v1 import login, room
from tests.unittest import HomeserverTestCase
@attr.s
class _User(object):
"Helper wrapper for user ID and access token"
id = attr.ib()
token = attr.ib()
class EmailPusherTests(HomeserverTestCase):
servlets = [
@ -71,25 +79,32 @@ class EmailPusherTests(HomeserverTestCase):
return hs
def test_sends_email(self):
def prepare(self, reactor, clock, hs):
# Register the user who gets notified
user_id = self.register_user("user", "pass")
access_token = self.login("user", "pass")
self.user_id = self.register_user("user", "pass")
self.access_token = self.login("user", "pass")
# Register the user who sends the message
other_user_id = self.register_user("otheruser", "pass")
other_access_token = self.login("otheruser", "pass")
# Register other users
self.others = [
_User(
id=self.register_user("otheruser1", "pass"),
token=self.login("otheruser1", "pass"),
),
_User(
id=self.register_user("otheruser2", "pass"),
token=self.login("otheruser2", "pass"),
),
]
# Register the pusher
user_tuple = self.get_success(
self.hs.get_datastore().get_user_by_access_token(access_token)
self.hs.get_datastore().get_user_by_access_token(self.access_token)
)
token_id = user_tuple["token_id"]
self.get_success(
self.pusher = self.get_success(
self.hs.get_pusherpool().add_pusher(
user_id=user_id,
user_id=self.user_id,
access_token=token_id,
kind="email",
app_id="m.email",
@ -101,22 +116,54 @@ class EmailPusherTests(HomeserverTestCase):
)
)
# Create a room
room = self.helper.create_room_as(user_id, tok=access_token)
# Invite the other person
self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id)
# The other user joins
self.helper.join(room=room, user=other_user_id, tok=other_access_token)
def test_simple_sends_email(self):
# Create a simple room with two users
room = self.helper.create_room_as(self.user_id, tok=self.access_token)
self.helper.invite(
room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id,
)
self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token)
# The other user sends some messages
self.helper.send(room, body="Hi!", tok=other_access_token)
self.helper.send(room, body="There!", tok=other_access_token)
self.helper.send(room, body="Hi!", tok=self.others[0].token)
self.helper.send(room, body="There!", tok=self.others[0].token)
# We should get emailed about that message
self._check_for_mail()
def test_multiple_members_email(self):
# We want to test multiple notifications, so we pause processing of push
# while we send messages.
self.pusher._pause_processing()
# Create a simple room with multiple other users
room = self.helper.create_room_as(self.user_id, tok=self.access_token)
for other in self.others:
self.helper.invite(
room=room, src=self.user_id, tok=self.access_token, targ=other.id,
)
self.helper.join(room=room, user=other.id, tok=other.token)
# The other users send some messages
self.helper.send(room, body="Hi!", tok=self.others[0].token)
self.helper.send(room, body="There!", tok=self.others[1].token)
self.helper.send(room, body="There!", tok=self.others[1].token)
# Nothing should have happened yet, as we're paused.
assert not self.email_attempts
self.pusher._resume_processing()
# We should get emailed about those messages
self._check_for_mail()
def _check_for_mail(self):
"Check that the user receives an email notification"
# Get the stream ordering before it gets sent
pushers = self.get_success(
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
)
self.assertEqual(len(pushers), 1)
last_stream_ordering = pushers[0]["last_stream_ordering"]
@ -126,7 +173,7 @@ class EmailPusherTests(HomeserverTestCase):
# It hasn't succeeded yet, so the stream ordering shouldn't have moved
pushers = self.get_success(
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
)
self.assertEqual(len(pushers), 1)
self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"])
@ -143,7 +190,7 @@ class EmailPusherTests(HomeserverTestCase):
# The stream ordering has increased
pushers = self.get_success(
self.hs.get_datastore().get_pushers_by(dict(user_name=user_id))
self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id))
)
self.assertEqual(len(pushers), 1)
self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering)

View file

@ -0,0 +1,79 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests import unittest
class ThirdPartyRulesTestModule(object):
def __init__(self, config):
pass
def check_event_allowed(self, event, context):
if event.type == "foo.bar.forbidden":
return False
else:
return True
@staticmethod
def parse_config(config):
return config
class ThirdPartyRulesTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
config = self.default_config()
config["third_party_event_rules"] = {
"module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule",
"config": {},
}
self.hs = self.setup_test_homeserver(config=config)
return self.hs
def test_third_party_rules(self):
"""Tests that a forbidden event is forbidden from being sent, but an allowed one
can be sent.
"""
user_id = self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
room_id = self.helper.create_room_as(user_id, tok=tok)
request, channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id,
{},
access_token=tok,
)
self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
request, channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id,
{},
access_token=tok,
)
self.render(request)
self.assertEquals(channel.result["code"], b"403", channel.result)

View file

@ -15,6 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from email.parser import Parser
@ -239,3 +240,47 @@ class PasswordResetTestCase(unittest.HomeserverTestCase):
)
self.render(request)
self.assertEquals(expected_code, channel.code, channel.result)
class DeactivateTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
account.register_servlets,
]
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver()
return hs
def test_deactivate_account(self):
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
request_data = json.dumps({
"auth": {
"type": "m.login.password",
"user": user_id,
"password": "test",
},
"erase": False,
})
request, channel = self.make_request(
"POST",
"account/deactivate",
request_data,
access_token=tok,
)
self.render(request)
self.assertEqual(request.code, 200)
store = self.hs.get_datastore()
# Check that the user has been marked as deactivated.
self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id)))
# Check that this access token has been invalidated.
request, channel = self.make_request("GET", "account/whoami")
self.render(request)
self.assertEqual(request.code, 401)

View file

@ -26,7 +26,7 @@ from synapse.api.constants import LoginType
from synapse.api.errors import Codes
from synapse.appservice import ApplicationService
from synapse.rest.client.v1 import login
from synapse.rest.client.v2_alpha import account_validity, register, sync
from synapse.rest.client.v2_alpha import account, account_validity, register, sync
from tests import unittest
@ -308,6 +308,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
login.register_servlets,
sync.register_servlets,
account_validity.register_servlets,
account.register_servlets,
]
def make_homeserver(self, reactor, clock):
@ -358,20 +359,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
def test_renewal_email(self):
self.email_attempts = []
user_id = self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
# We need to manually add an email address otherwise the handler will do
# nothing.
now = self.hs.clock.time_msec()
self.get_success(
self.store.user_add_threepid(
user_id=user_id,
medium="email",
address="kermit@example.com",
validated_at=now,
added_at=now,
)
)
(user_id, tok) = self.create_user()
# Move 6 days forward. This should trigger a renewal email to be sent.
self.reactor.advance(datetime.timedelta(days=6).total_seconds())
@ -396,6 +384,44 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
def test_manual_email_send(self):
self.email_attempts = []
(user_id, tok) = self.create_user()
request, channel = self.make_request(
b"POST",
"/_matrix/client/unstable/account_validity/send_mail",
access_token=tok,
)
self.render(request)
self.assertEquals(channel.result["code"], b"200", channel.result)
self.assertEqual(len(self.email_attempts), 1)
def test_deactivated_user(self):
self.email_attempts = []
(user_id, tok) = self.create_user()
request_data = json.dumps({
"auth": {
"type": "m.login.password",
"user": user_id,
"password": "monkey",
},
"erase": False,
})
request, channel = self.make_request(
"POST",
"account/deactivate",
request_data,
access_token=tok,
)
self.render(request)
self.assertEqual(request.code, 200)
self.reactor.advance(datetime.timedelta(days=8).total_seconds())
self.assertEqual(len(self.email_attempts), 0)
def create_user(self):
user_id = self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
# We need to manually add an email address otherwise the handler will do
@ -410,7 +436,33 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase):
added_at=now,
)
)
return (user_id, tok)
def test_manual_email_send_expired_account(self):
user_id = self.register_user("kermit", "monkey")
tok = self.login("kermit", "monkey")
# We need to manually add an email address otherwise the handler will do
# nothing.
now = self.hs.clock.time_msec()
self.get_success(
self.store.user_add_threepid(
user_id=user_id,
medium="email",
address="kermit@example.com",
validated_at=now,
added_at=now,
)
)
# Make the account expire.
self.reactor.advance(datetime.timedelta(days=8).total_seconds())
# Ignore all emails sent by the automatic background task and only focus on the
# ones sent manually.
self.email_attempts = []
# Test that we're still able to manually trigger a mail to be sent.
request, channel = self.make_request(
b"POST",
"/_matrix/client/unstable/account_validity/send_mail",

View file

@ -15,7 +15,6 @@
import os.path
from synapse.api.constants import EventTypes
from synapse.storage import prepare_database
from synapse.types import Requester, UserID
@ -23,17 +22,12 @@ from tests.unittest import HomeserverTestCase
class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
"""Test the background update to clean forward extremities table.
"""
def make_homeserver(self, reactor, clock):
# Hack until we understand why test_forked_graph_cleanup fails with v4
config = self.default_config()
config['default_room_version'] = '1'
return self.setup_test_homeserver(config=config)
Test the background update to clean forward extremities table.
"""
def prepare(self, reactor, clock, homeserver):
self.store = homeserver.get_datastore()
self.event_creator = homeserver.get_event_creation_handler()
self.room_creator = homeserver.get_room_creation_handler()
# Create a test user and room
@ -42,56 +36,6 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
info = self.get_success(self.room_creator.create_room(self.requester, {}))
self.room_id = info["room_id"]
def create_and_send_event(self, soft_failed=False, prev_event_ids=None):
"""Create and send an event.
Args:
soft_failed (bool): Whether to create a soft failed event or not
prev_event_ids (list[str]|None): Explicitly set the prev events,
or if None just use the default
Returns:
str: The new event's ID.
"""
prev_events_and_hashes = None
if prev_event_ids:
prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids]
event, context = self.get_success(
self.event_creator.create_event(
self.requester,
{
"type": EventTypes.Message,
"room_id": self.room_id,
"sender": self.user.to_string(),
"content": {"body": "", "msgtype": "m.text"},
},
prev_events_and_hashes=prev_events_and_hashes,
)
)
if soft_failed:
event.internal_metadata.soft_failed = True
self.get_success(
self.event_creator.send_nonmember_event(self.requester, event, context)
)
return event.event_id
def add_extremity(self, event_id):
"""Add the given event as an extremity to the room.
"""
self.get_success(
self.store._simple_insert(
table="event_forward_extremities",
values={"room_id": self.room_id, "event_id": event_id},
desc="test_add_extremity",
)
)
self.store.get_latest_event_ids_in_room.invalidate((self.room_id,))
def run_background_update(self):
"""Re run the background update to clean up the extremities.
"""
@ -131,10 +75,16 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
"""
# Create the room graph
event_id_1 = self.create_and_send_event()
event_id_2 = self.create_and_send_event(True, [event_id_1])
event_id_3 = self.create_and_send_event(True, [event_id_2])
event_id_4 = self.create_and_send_event(False, [event_id_3])
event_id_1 = self.create_and_send_event(self.room_id, self.user)
event_id_2 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_1]
)
event_id_3 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_2]
)
event_id_4 = self.create_and_send_event(
self.room_id, self.user, False, [event_id_3]
)
# Check the latest events are as expected
latest_event_ids = self.get_success(
@ -154,12 +104,16 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
Where SF* are soft failed, and with extremities of A and B
"""
# Create the room graph
event_id_a = self.create_and_send_event()
event_id_sf1 = self.create_and_send_event(True, [event_id_a])
event_id_b = self.create_and_send_event(False, [event_id_sf1])
event_id_a = self.create_and_send_event(self.room_id, self.user)
event_id_sf1 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_a]
)
event_id_b = self.create_and_send_event(
self.room_id, self.user, False, [event_id_sf1]
)
# Add the new extremity and check the latest events are as expected
self.add_extremity(event_id_a)
self.add_extremity(self.room_id, event_id_a)
latest_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(self.room_id)
@ -185,13 +139,19 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
Where SF* are soft failed, and with extremities of A and B
"""
# Create the room graph
event_id_a = self.create_and_send_event()
event_id_sf1 = self.create_and_send_event(True, [event_id_a])
event_id_sf2 = self.create_and_send_event(True, [event_id_sf1])
event_id_b = self.create_and_send_event(False, [event_id_sf2])
event_id_a = self.create_and_send_event(self.room_id, self.user)
event_id_sf1 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_a]
)
event_id_sf2 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_sf1]
)
event_id_b = self.create_and_send_event(
self.room_id, self.user, False, [event_id_sf2]
)
# Add the new extremity and check the latest events are as expected
self.add_extremity(event_id_a)
self.add_extremity(self.room_id, event_id_a)
latest_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(self.room_id)
@ -227,16 +187,26 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase):
"""
# Create the room graph
event_id_a = self.create_and_send_event()
event_id_b = self.create_and_send_event()
event_id_sf1 = self.create_and_send_event(True, [event_id_a])
event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b])
event_id_sf3 = self.create_and_send_event(True, [event_id_sf1])
self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4
event_id_c = self.create_and_send_event(False, [event_id_sf3])
event_id_a = self.create_and_send_event(self.room_id, self.user)
event_id_b = self.create_and_send_event(self.room_id, self.user)
event_id_sf1 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_a]
)
event_id_sf2 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_a, event_id_b]
)
event_id_sf3 = self.create_and_send_event(
self.room_id, self.user, True, [event_id_sf1]
)
self.create_and_send_event(
self.room_id, self.user, True, [event_id_sf2, event_id_sf3]
) # SF4
event_id_c = self.create_and_send_event(
self.room_id, self.user, False, [event_id_sf3]
)
# Add the new extremity and check the latest events are as expected
self.add_extremity(event_id_a)
self.add_extremity(self.room_id, event_id_a)
latest_event_ids = self.get_success(
self.store.get_latest_event_ids_in_room(self.room_id)

View file

@ -0,0 +1,82 @@
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from prometheus_client.exposition import generate_latest
from synapse.metrics import REGISTRY
from synapse.types import Requester, UserID
from tests.unittest import HomeserverTestCase
class ExtremStatisticsTestCase(HomeserverTestCase):
def test_exposed_to_prometheus(self):
"""
Forward extremity counts are exposed via Prometheus.
"""
room_creator = self.hs.get_room_creation_handler()
user = UserID("alice", "test")
requester = Requester(user, None, False, None, None)
# Real events, forward extremities
events = [(3, 2), (6, 2), (4, 6)]
for event_count, extrems in events:
info = self.get_success(room_creator.create_room(requester, {}))
room_id = info["room_id"]
last_event = None
# Make a real event chain
for i in range(event_count):
ev = self.create_and_send_event(room_id, user, False, last_event)
last_event = [ev]
# Sprinkle in some extremities
for i in range(extrems):
ev = self.create_and_send_event(room_id, user, False, last_event)
# Let it run for a while, then pull out the statistics from the
# Prometheus client registry
self.reactor.advance(60 * 60 * 1000)
self.pump(1)
items = set(
filter(
lambda x: b"synapse_forward_extremities_" in x,
generate_latest(REGISTRY).split(b"\n"),
)
)
expected = set([
b'synapse_forward_extremities_bucket{le="1.0"} 0.0',
b'synapse_forward_extremities_bucket{le="2.0"} 2.0',
b'synapse_forward_extremities_bucket{le="3.0"} 2.0',
b'synapse_forward_extremities_bucket{le="5.0"} 2.0',
b'synapse_forward_extremities_bucket{le="7.0"} 3.0',
b'synapse_forward_extremities_bucket{le="10.0"} 3.0',
b'synapse_forward_extremities_bucket{le="15.0"} 3.0',
b'synapse_forward_extremities_bucket{le="20.0"} 3.0',
b'synapse_forward_extremities_bucket{le="50.0"} 3.0',
b'synapse_forward_extremities_bucket{le="100.0"} 3.0',
b'synapse_forward_extremities_bucket{le="200.0"} 3.0',
b'synapse_forward_extremities_bucket{le="500.0"} 3.0',
b'synapse_forward_extremities_bucket{le="+Inf"} 3.0',
b'synapse_forward_extremities_count 3.0',
b'synapse_forward_extremities_sum 10.0',
])
self.assertEqual(items, expected)

View file

@ -27,11 +27,12 @@ import twisted.logger
from twisted.internet.defer import Deferred
from twisted.trial import unittest
from synapse.api.constants import EventTypes
from synapse.config.homeserver import HomeServerConfig
from synapse.http.server import JsonResource
from synapse.http.site import SynapseRequest
from synapse.server import HomeServer
from synapse.types import UserID, create_requester
from synapse.types import Requester, UserID, create_requester
from synapse.util.logcontext import LoggingContext
from tests.server import get_clock, make_request, render, setup_test_homeserver
@ -442,6 +443,64 @@ class HomeserverTestCase(TestCase):
access_token = channel.json_body["access_token"]
return access_token
def create_and_send_event(
self, room_id, user, soft_failed=False, prev_event_ids=None
):
"""
Create and send an event.
Args:
soft_failed (bool): Whether to create a soft failed event or not
prev_event_ids (list[str]|None): Explicitly set the prev events,
or if None just use the default
Returns:
str: The new event's ID.
"""
event_creator = self.hs.get_event_creation_handler()
secrets = self.hs.get_secrets()
requester = Requester(user, None, False, None, None)
prev_events_and_hashes = None
if prev_event_ids:
prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids]
event, context = self.get_success(
event_creator.create_event(
requester,
{
"type": EventTypes.Message,
"room_id": room_id,
"sender": user.to_string(),
"content": {"body": secrets.token_hex(), "msgtype": "m.text"},
},
prev_events_and_hashes=prev_events_and_hashes,
)
)
if soft_failed:
event.internal_metadata.soft_failed = True
self.get_success(
event_creator.send_nonmember_event(requester, event, context)
)
return event.event_id
def add_extremity(self, room_id, event_id):
"""
Add the given event as an extremity to the room.
"""
self.get_success(
self.hs.get_datastore()._simple_insert(
table="event_forward_extremities",
values={"room_id": room_id, "event_id": event_id},
desc="test_add_extremity",
)
)
self.hs.get_datastore().get_latest_event_ids_in_room.invalidate((room_id,))
def attempt_wrong_password_login(self, username, password):
"""Attempts to login as the user with the given password, asserting
that the attempt *fails*.

View file

@ -1,5 +1,5 @@
[tox]
envlist = packaging, py27, py36, pep8, check_isort
envlist = packaging, py35, py36, py37, pep8, check_isort
[base]
deps =
@ -79,7 +79,7 @@ usedevelop=true
# A test suite for the oldest supported versions of Python libraries, to catch
# any uses of APIs not available in them.
[testenv:py27-old]
[testenv:py35-old]
skip_install=True
deps =
# Old automat version for Twisted