Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes

This commit is contained in:
Erik Johnston 2019-02-22 15:29:41 +00:00
commit 0ea52872ab
28 changed files with 383 additions and 82 deletions

View file

@ -78,11 +78,7 @@ matrix:
if: type = pull_request
name: "check-newsfragment"
python: 3.6
env: TOX_ENV=check-newsfragment
script:
- git remote set-branches --add origin develop
- git fetch origin develop
- tox -e $TOX_ENV
script: scripts-dev/check-newsfragment
install:
# this just logs the postgres version we will be testing against (if any)

View file

@ -30,7 +30,7 @@ use github's pull request workflow to review the contribution, and either ask
you to make any refinements needed or merge it and make them ourselves. The
changes will then land on master when we next do a release.
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
pull requests to synapse get automatically tested by Travis and CircleCI.
If your change breaks the build, this will be shown in GitHub, so please
@ -74,16 +74,39 @@ entry. These are managed by Towncrier
To create a changelog entry, make a new file in the ``changelog.d``
file named in the format of ``PRnumber.type``. The type can be
one of ``feature``, ``bugfix``, ``removal`` (also used for
deprecations), or ``misc`` (for internal-only changes). The content of
the file is your changelog entry, which can contain Markdown
formatting. Adding credits to the changelog is encouraged, we value
your contributions and would like to have you shouted out in the
release notes!
deprecations), or ``misc`` (for internal-only changes).
The content of the file is your changelog entry, which can contain Markdown
formatting. The entry should end with a full stop ('.') for consistency.
Adding credits to the changelog is encouraged, we value your
contributions and would like to have you shouted out in the release notes!
For example, a fix in PR #1234 would have its changelog entry in
``changelog.d/1234.bugfix``, and contain content like "The security levels of
Florbs are now validated when recieved over federation. Contributed by Jane
Matrix".
Matrix.".
Debian changelog
----------------
Changes which affect the debian packaging files (in ``debian``) are an
exception.
In this case, you will need to add an entry to the debian changelog for the
next release. For this, run the following command::
dch
This will make up a new version number (if there isn't already an unreleased
version in flight), and open an editor where you can add a new changelog entry.
(Our release process will ensure that the version number and maintainer name is
corrected for the release.)
If your change affects both the debian packaging *and* files outside the debian
directory, you will need both a regular newsfragment *and* an entry in the
debian changelog. (Though typically such changes should be submitted as two
separate pull requests.)
Attribution
~~~~~~~~~~~

1
changelog.d/4263.bugfix Normal file
View file

@ -0,0 +1 @@
Prevent crash on pagination.

1
changelog.d/4541.feature Normal file
View file

@ -0,0 +1 @@
Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (Benpro).

3
changelog.d/4680.bugfix Normal file
View file

@ -0,0 +1,3 @@
Fix an issue in the database migration script where the
`e2e_room_keys.is_verified` column wasn't considered as
a boolean

1
changelog.d/4689.misc Normal file
View file

@ -0,0 +1 @@
Minor tweaks to acme docs.

1
changelog.d/4698.misc Normal file
View file

@ -0,0 +1 @@
Better checks on newsfragments

1
changelog.d/4706.misc Normal file
View file

@ -0,0 +1 @@
Avoid some redundant work when processing read receipts

1
changelog.d/4707.misc Normal file
View file

@ -0,0 +1 @@
Run push_receipts_to_remotes as background job.

1
changelog.d/4709.misc Normal file
View file

@ -0,0 +1 @@
Add prometheus metrics for number of badge update pushes.

1
changelog.d/4715.misc Normal file
View file

@ -0,0 +1 @@
Improve replication performance by reducing cache invalidation traffic.

1
changelog.d/4716.misc Normal file
View file

@ -0,0 +1 @@
Reduce pusher logging on startup

6
debian/changelog vendored
View file

@ -1,3 +1,9 @@
matrix-synapse-py3 (0.99.2) UNRELEASED; urgency=medium
* Fix overwriting of config settings on upgrade.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Feb 2019 17:11:25 +0000
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium
* New synapse release 0.99.1.1

1
debian/install vendored
View file

@ -1 +1,2 @@
debian/log.yaml etc/matrix-synapse
debian/manage_debconf.pl /opt/venvs/matrix-synapse/lib/

130
debian/manage_debconf.pl vendored Executable file
View file

@ -0,0 +1,130 @@
#!/usr/bin/perl
#
# Interface between our config files and the debconf database.
#
# Usage:
#
# manage_debconf.pl <action>
#
# where <action> can be:
#
# read: read the configuration from the yaml into debconf
# update: update the yaml config according to the debconf database
use strict;
use warnings;
use Debconf::Client::ConfModule (qw/get set/);
# map from the name of a setting in our .yaml file to the relevant debconf
# setting.
my %MAPPINGS=(
server_name => 'matrix-synapse/server-name',
report_stats => 'matrix-synapse/report-stats',
);
# enable debug if dpkg --debug
my $DEBUG = $ENV{DPKG_MAINTSCRIPT_DEBUG};
sub read_config {
my @files = @_;
foreach my $file (@files) {
print STDERR "reading $file\n" if $DEBUG;
open my $FH, "<", $file or next;
# rudimentary parsing which (a) avoids having to depend on a yaml library,
# and (b) is tolerant of yaml errors
while($_ = <$FH>) {
while (my ($setting, $debconf) = each %MAPPINGS) {
$setting = quotemeta $setting;
if(/^${setting}\s*:(.*)$/) {
my $val = $1;
# remove leading/trailing whitespace
$val =~ s/^\s*//;
$val =~ s/\s*$//;
# remove surrounding quotes
if ($val =~ /^"(.*)"$/ || $val =~ /^'(.*)'$/) {
$val = $1;
}
print STDERR ">> $debconf = $val\n" if $DEBUG;
set($debconf, $val);
}
}
}
close $FH;
}
}
sub update_config {
my @files = @_;
my %substs = ();
while (my ($setting, $debconf) = each %MAPPINGS) {
my @res = get($debconf);
$substs{$setting} = $res[1] if $res[0] == 0;
}
foreach my $file (@files) {
print STDERR "checking $file\n" if $DEBUG;
open my $FH, "<", $file or next;
my $updated = 0;
# read the whole file into memory
my @lines = <$FH>;
while (my ($setting, $val) = each %substs) {
$setting = quotemeta $setting;
map {
if (/^${setting}\s*:\s*(.*)\s*$/) {
my $current = $1;
if ($val ne $current) {
$_ = "${setting}: $val\n";
$updated = 1;
}
}
} @lines;
}
close $FH;
next unless $updated;
print STDERR "updating $file\n" if $DEBUG;
open $FH, ">", $file or die "unable to update $file";
print $FH @lines;
close $FH;
}
}
my $cmd = $ARGV[0];
my $read = 0;
my $update = 0;
if (not $cmd) {
die "must specify a command to perform\n";
} elsif ($cmd eq 'read') {
$read = 1;
} elsif ($cmd eq 'update') {
$update = 1;
} else {
die "unknown command '$cmd'\n";
}
my @files = (
"/etc/matrix-synapse/homeserver.yaml",
glob("/etc/matrix-synapse/conf.d/*.yaml"),
);
if ($read) {
read_config(@files);
} elsif ($update) {
update_config(@files);
}

View file

@ -4,6 +4,9 @@ set -e
. /usr/share/debconf/confmodule
# try to update the debconf db according to whatever is in the config files
/opt/venvs/matrix-synapse/lib/manage_debconf.pl read || true
db_input high matrix-synapse/server-name || true
db_input high matrix-synapse/report-stats || true
db_go

View file

@ -8,19 +8,36 @@ USER="matrix-synapse"
case "$1" in
configure|reconfigure)
# Set server name in config file
# generate template config files if they don't exist
mkdir -p "/etc/matrix-synapse/conf.d/"
db_get matrix-synapse/server-name
if [ ! -e "$CONFIGFILE_SERVERNAME" ]; then
cat > "$CONFIGFILE_SERVERNAME" <<EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
if [ "$RET" ]; then
echo "server_name: $RET" > $CONFIGFILE_SERVERNAME
# The domain name of the server, with optional explicit port.
# This is used by remote servers to connect to this server,
# e.g. matrix.org, localhost:8080, etc.
# This is also the last part of your UserID.
#
server_name: ''
EOF
fi
db_get matrix-synapse/report-stats
if [ "$RET" ]; then
echo "report_stats: $RET" > $CONFIGFILE_REPORTSTATS
if [ ! -e "$CONFIGFILE_REPORTSTATS" ]; then
cat > "$CONFIGFILE_REPORTSTATS" <<EOF
# This file is autogenerated, and will be recreated on upgrade if it is deleted.
# Any changes you make will be preserved.
# Whether to report anonymized homeserver usage statistics.
report_stats: false
EOF
fi
# update the config files according to whatever is in the debconf database
/opt/venvs/matrix-synapse/lib/manage_debconf.pl update
if ! getent passwd $USER >/dev/null; then
adduser --quiet --system --no-create-home --home /var/lib/matrix-synapse $USER
fi

View file

@ -10,13 +10,14 @@ through [Let's Encrypt](https://letsencrypt.org/) if you tell it to.
In the case that your `server_name` config variable is the same as
the hostname that the client connects to, then the same certificate can be
used between client and federation ports without issue.
used between client and federation ports without issue.
For a sample configuration, please inspect the new ACME section in the example
generated config by running the `generate-config` executable. For example:
If your configuration file does not already have an `acme` section, you can
generate an example config by running the `generate_config` executable. For
example:
```
~/synapse/env3/bin/generate-config
~/synapse/env3/bin/generate_config
```
You will need to provide Let's Encrypt (or another ACME provider) access to
@ -27,10 +28,9 @@ like `authbind` to allow Synapse to listen on port 80 without root access.
(Do not run Synapse with root permissions!) Detailed instructions are
available under "ACME setup" below.
If you are already using self-signed certificates, you will need to back up
or delete them (files `example.com.tls.crt` and `example.com.tls.key` in
Synapse's root directory), Synapse's ACME implementation will not overwrite
them.
If you already have certificates, you will need to back up or delete them
(files `example.com.tls.crt` and `example.com.tls.key` in Synapse's root
directory), Synapse's ACME implementation will not overwrite them.
You may wish to use alternate methods such as Certbot to obtain a certificate
from Let's Encrypt, depending on your server configuration. Of course, if you
@ -87,7 +87,6 @@ acme:
port: 8009
```
#### Authbind
`authbind` allows a program which does not run as root to bind to
@ -127,4 +126,4 @@ acme:
Ensure that the certificate paths specified in `homeserver.yaml` (`tls_certificate_path` and `tls_private_key_path`) do not currently point to any files. Synapse will not provision certificates if files exist, as it does not want to overwrite existing certificates.
Finally, start/restart Synapse.
Finally, start/restart Synapse.

View file

@ -85,6 +85,24 @@ Let's assume that we expect clients to connect to our server at
</Location>
</VirtualHost>
* HAProxy::
frontend https
bind 0.0.0.0:443 v4v6 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
bind :::443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
# Matrix client traffic
acl matrix hdr(host) -i matrix.example.com
use_backend matrix if matrix
frontend matrix-federation
bind 0.0.0.0:8448 v4v6 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
bind :::8448 ssl crt /etc/ssl/haproxy/synapse.pem alpn h2,http/1.1
default_backend matrix
backend matrix
server matrix 127.0.0.1:8008
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
recorded correctly.

36
scripts-dev/check-newsfragment Executable file
View file

@ -0,0 +1,36 @@
#!/bin/bash
#
# A script which checks that an appropriate news file has been added on this
# branch.
set -e
# make sure that origin/develop is up to date
git fetch origin develop
UPSTREAM=origin/develop
# if there are changes in the debian directory, check that the debian changelog
# has been updated
if ! git diff --quiet $UPSTREAM... -- debian; then
if git diff --quiet $UPSTREAM... -- debian/changelog; then
echo "Updates to debian directory, but no update to the changelog." >&2
exit 1
fi
fi
# if there are changes *outside* the debian directory, check that the
# newsfragments have been updated.
if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
tox -e check-newsfragment
fi
# check that any new newsfiles on this branch end with a full stop.
for f in git diff --name-only $UPSTREAM... -- changelog.d; do
lastchar=`tr -d '\n' < $f | tail -c 1`
if [ $lastchar != '.' ]; then
echo "Newsfragment $f does not end with a '.'" >&2
exit 1
fi
done

View file

@ -53,6 +53,7 @@ BOOLEAN_COLUMNS = {
"group_summary_users": ["is_public"],
"group_roles": ["is_public"],
"local_group_membership": ["is_publicised", "is_admin"],
"e2e_room_keys": ["is_verified"],
}

View file

@ -254,7 +254,7 @@ class PaginationHandler(object):
})
state = None
if event_filter and event_filter.lazy_load_members():
if event_filter and event_filter.lazy_load_members() and len(events) > 0:
# TODO: remove redundant members
# FIXME: we also care about invite targets etc.

View file

@ -16,8 +16,8 @@ import logging
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.types import get_domain_from_id
from synapse.util import logcontext
from ._base import BaseHandler
@ -59,7 +59,9 @@ class ReceiptsHandler(BaseHandler):
if is_new:
# fire off a process in the background to send the receipt to
# remote servers
self._push_remotes([receipt])
run_as_background_process(
'push_receipts_to_remotes', self._push_remotes, receipt
)
@defer.inlineCallbacks
def _received_remote_receipt(self, origin, content):
@ -125,44 +127,42 @@ class ReceiptsHandler(BaseHandler):
defer.returnValue(True)
@logcontext.preserve_fn # caller should not yield on this
@defer.inlineCallbacks
def _push_remotes(self, receipts):
"""Given a list of receipts, works out which remote servers should be
def _push_remotes(self, receipt):
"""Given a receipt, works out which remote servers should be
poked and pokes them.
"""
try:
# TODO: Some of this stuff should be coallesced.
for receipt in receipts:
room_id = receipt["room_id"]
receipt_type = receipt["receipt_type"]
user_id = receipt["user_id"]
event_ids = receipt["event_ids"]
data = receipt["data"]
# TODO: optimise this to move some of the work to the workers.
room_id = receipt["room_id"]
receipt_type = receipt["receipt_type"]
user_id = receipt["user_id"]
event_ids = receipt["event_ids"]
data = receipt["data"]
users = yield self.state.get_current_user_in_room(room_id)
remotedomains = set(get_domain_from_id(u) for u in users)
remotedomains = remotedomains.copy()
remotedomains.discard(self.server_name)
users = yield self.state.get_current_user_in_room(room_id)
remotedomains = set(get_domain_from_id(u) for u in users)
remotedomains = remotedomains.copy()
remotedomains.discard(self.server_name)
logger.debug("Sending receipt to: %r", remotedomains)
logger.debug("Sending receipt to: %r", remotedomains)
for domain in remotedomains:
self.federation.send_edu(
destination=domain,
edu_type="m.receipt",
content={
room_id: {
receipt_type: {
user_id: {
"event_ids": event_ids,
"data": data,
}
for domain in remotedomains:
self.federation.send_edu(
destination=domain,
edu_type="m.receipt",
content={
room_id: {
receipt_type: {
user_id: {
"event_ids": event_ids,
"data": data,
}
},
}
},
key=(room_id, receipt_type, user_id),
)
},
key=(room_id, receipt_type, user_id),
)
except Exception:
logger.exception("Error pushing receipts to remote servers")

View file

@ -32,9 +32,25 @@ if six.PY3:
logger = logging.getLogger(__name__)
http_push_processed_counter = Counter("synapse_http_httppusher_http_pushes_processed", "")
http_push_processed_counter = Counter(
"synapse_http_httppusher_http_pushes_processed",
"Number of push notifications successfully sent",
)
http_push_failed_counter = Counter("synapse_http_httppusher_http_pushes_failed", "")
http_push_failed_counter = Counter(
"synapse_http_httppusher_http_pushes_failed",
"Number of push notifications which failed",
)
http_badges_processed_counter = Counter(
"synapse_http_httppusher_badge_updates_processed",
"Number of badge updates successfully sent",
)
http_badges_failed_counter = Counter(
"synapse_http_httppusher_badge_updates_failed",
"Number of badge updates which failed",
)
class HttpPusher(object):
@ -81,6 +97,11 @@ class HttpPusher(object):
pusherdict['pushkey'],
)
if self.data is None:
raise PusherConfigException(
"data can not be null for HTTP pusher"
)
if 'url' not in self.data:
raise PusherConfigException(
"'url' required in data for HTTP pusher"
@ -350,6 +371,10 @@ class HttpPusher(object):
@defer.inlineCallbacks
def _send_badge(self, badge):
"""
Args:
badge (int): number of unread messages
"""
logger.info("Sending updated badge count %d to %s", badge, self.name)
d = {
'notification': {
@ -370,14 +395,11 @@ class HttpPusher(object):
}
}
try:
resp = yield self.http_client.post_json_get_json(self.url, d)
yield self.http_client.post_json_get_json(self.url, d)
http_badges_processed_counter.inc()
except Exception as e:
logger.warning(
"Failed to send badge count to %s: %s %s",
self.name, type(e), e,
)
defer.returnValue(False)
rejected = []
if 'rejected' in resp:
rejected = resp['rejected']
defer.returnValue(rejected)
http_badges_failed_counter.inc()

View file

@ -56,7 +56,7 @@ class PusherFactory(object):
f = self.pusher_types.get(kind, None)
if not f:
return None
logger.info("creating %s pusher for %r", kind, pusherdict)
logger.debug("creating %s pusher for %r", kind, pusherdict)
return f(self.hs, pusherdict)
def _create_email_pusher(self, _hs, pusherdict):

View file

@ -19,6 +19,7 @@ import logging
from twisted.internet import defer
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import PusherConfigException
from synapse.push.pusher import PusherFactory
logger = logging.getLogger(__name__)
@ -140,6 +141,10 @@ class PusherPool:
@defer.inlineCallbacks
def on_new_notifications(self, min_stream_id, max_stream_id):
if not self.pushers:
# nothing to do here.
return
try:
users_affected = yield self.store.get_push_action_users_in_range(
min_stream_id, max_stream_id
@ -218,6 +223,15 @@ class PusherPool:
"""
try:
p = self.pusher_factory.create_pusher(pusherdict)
except PusherConfigException as e:
logger.warning(
"Pusher incorrectly configured user=%s, appid=%s, pushkey=%s: %s",
pusherdict.get('user_name'),
pusherdict.get('app_id'),
pusherdict.get('pushkey'),
e,
)
return
except Exception:
logger.exception("Couldn't start a pusher: caught Exception")
return

View file

@ -59,12 +59,7 @@ class BaseSlavedStore(SQLBaseStore):
members_changed = set(row.keys[1:])
self._invalidate_state_caches(room_id, members_changed)
else:
try:
getattr(self, row.cache_func).invalidate(tuple(row.keys))
except AttributeError:
# We probably haven't pulled in the cache in this worker,
# which is fine.
pass
self._attempt_to_invalidate_cache(row.cache_func, tuple(row.keys))
def _invalidate_cache_and_stream(self, txn, cache_func, keys):
txn.call_after(cache_func.invalidate, keys)

View file

@ -1342,15 +1342,43 @@ class SQLBaseStore(object):
changed
"""
for member in members_changed:
self.get_rooms_for_user_with_stream_ordering.invalidate((member,))
self._attempt_to_invalidate_cache(
"get_rooms_for_user_with_stream_ordering", (member,),
)
for host in set(get_domain_from_id(u) for u in members_changed):
self.is_host_joined.invalidate((room_id, host))
self.was_host_joined.invalidate((room_id, host))
self._attempt_to_invalidate_cache(
"is_host_joined", (room_id, host,),
)
self._attempt_to_invalidate_cache(
"was_host_joined", (room_id, host,),
)
self.get_users_in_room.invalidate((room_id,))
self.get_room_summary.invalidate((room_id,))
self.get_current_state_ids.invalidate((room_id,))
self._attempt_to_invalidate_cache(
"get_users_in_room", (room_id,),
)
self._attempt_to_invalidate_cache(
"get_room_summary", (room_id,),
)
self._attempt_to_invalidate_cache(
"get_current_state_ids", (room_id,),
)
def _attempt_to_invalidate_cache(self, cache_name, key):
"""Attempts to invalidate the cache of the given name, ignoring if the
cache doesn't exist. Mainly used for invalidating caches on workers,
where they may not have the cache.
Args:
cache_name (str)
key (tuple)
"""
try:
getattr(self, cache_name).invalidate(key)
except AttributeError:
# We probably haven't pulled in the cache in this worker,
# which is fine.
pass
def _send_invalidation_to_replication(self, txn, cache_name, keys):
"""Notifies replication that given cache has been invalidated.