Merge branch 'release-v1.2.0' into matrix-org-hotfixes
This commit is contained in:
commit
4102cb220a
|
@ -49,6 +49,13 @@ returned by the Client-Server API:
|
||||||
# configured on port 443.
|
# configured on port 443.
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
|
Upgrading to v1.2.0
|
||||||
|
===================
|
||||||
|
|
||||||
|
Some counter metrics have been renamed, with the old names deprecated. See
|
||||||
|
`the metrics documentation <docs/metrics-howto.rst#renaming-of-metrics--deprecation-of-old-names-in-12>`_
|
||||||
|
for details.
|
||||||
|
|
||||||
Upgrading to v1.1.0
|
Upgrading to v1.1.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
|
|
2
changelog.d/5544.feature
Normal file
2
changelog.d/5544.feature
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Add support for opentracing.
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Added opentracing and configuration options.
|
|
1
changelog.d/5629.bugfix
Normal file
1
changelog.d/5629.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Forbid viewing relations on an event once it has been redacted.
|
1
changelog.d/5636.misc
Normal file
1
changelog.d/5636.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details.
|
1
changelog.d/5675.doc
Normal file
1
changelog.d/5675.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Minor tweaks to postgres documentation.
|
1
changelog.d/5689.misc
Normal file
1
changelog.d/5689.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces.
|
1
changelog.d/5699.bugfix
Normal file
1
changelog.d/5699.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix some problems with authenticating redactions in recent room versions.
|
2
changelog.d/5700.bugfix
Normal file
2
changelog.d/5700.bugfix
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Fix some problems with authenticating redactions in recent room versions.
|
||||||
|
|
1
changelog.d/5701.bugfix
Normal file
1
changelog.d/5701.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Ignore redactions of m.room.create events.
|
1
changelog.d/5707.bugfix
Normal file
1
changelog.d/5707.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix some problems with authenticating redactions in recent room versions.
|
2
changelog.d/5712.feature
Normal file
2
changelog.d/5712.feature
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
Add support for opentracing.
|
||||||
|
|
3
debian/changelog
vendored
3
debian/changelog
vendored
|
@ -3,6 +3,9 @@ matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium
|
||||||
[ Amber Brown ]
|
[ Amber Brown ]
|
||||||
* Update logging config defaults to match API changes in Synapse.
|
* Update logging config defaults to match API changes in Synapse.
|
||||||
|
|
||||||
|
[ Richard van der Hoff ]
|
||||||
|
* Add Recommends and Depends for some libraries which you probably want.
|
||||||
|
|
||||||
-- Erik Johnston <erikj@rae> Thu, 04 Jul 2019 13:59:02 +0100
|
-- Erik Johnston <erikj@rae> Thu, 04 Jul 2019 13:59:02 +0100
|
||||||
|
|
||||||
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
matrix-synapse-py3 (1.1.0) stable; urgency=medium
|
||||||
|
|
7
debian/control
vendored
7
debian/control
vendored
|
@ -2,16 +2,20 @@ Source: matrix-synapse-py3
|
||||||
Section: contrib/python
|
Section: contrib/python
|
||||||
Priority: extra
|
Priority: extra
|
||||||
Maintainer: Synapse Packaging team <packages@matrix.org>
|
Maintainer: Synapse Packaging team <packages@matrix.org>
|
||||||
|
# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv.
|
||||||
Build-Depends:
|
Build-Depends:
|
||||||
debhelper (>= 9),
|
debhelper (>= 9),
|
||||||
dh-systemd,
|
dh-systemd,
|
||||||
dh-virtualenv (>= 1.1),
|
dh-virtualenv (>= 1.1),
|
||||||
|
libsystemd-dev,
|
||||||
|
libpq-dev,
|
||||||
lsb-release,
|
lsb-release,
|
||||||
python3-dev,
|
python3-dev,
|
||||||
python3,
|
python3,
|
||||||
python3-setuptools,
|
python3-setuptools,
|
||||||
python3-pip,
|
python3-pip,
|
||||||
python3-venv,
|
python3-venv,
|
||||||
|
libsqlite3-dev,
|
||||||
tar,
|
tar,
|
||||||
Standards-Version: 3.9.8
|
Standards-Version: 3.9.8
|
||||||
Homepage: https://github.com/matrix-org/synapse
|
Homepage: https://github.com/matrix-org/synapse
|
||||||
|
@ -28,9 +32,12 @@ Depends:
|
||||||
debconf,
|
debconf,
|
||||||
python3-distutils|libpython3-stdlib (<< 3.6),
|
python3-distutils|libpython3-stdlib (<< 3.6),
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
|
${shlibs:Depends},
|
||||||
${synapse:pydepends},
|
${synapse:pydepends},
|
||||||
# some of our scripts use perl, but none of them are important,
|
# some of our scripts use perl, but none of them are important,
|
||||||
# so we put perl:Depends in Suggests rather than Depends.
|
# so we put perl:Depends in Suggests rather than Depends.
|
||||||
|
Recommends:
|
||||||
|
${shlibs1:Recommends},
|
||||||
Suggests:
|
Suggests:
|
||||||
sqlite3,
|
sqlite3,
|
||||||
${perl:Depends},
|
${perl:Depends},
|
||||||
|
|
14
debian/rules
vendored
14
debian/rules
vendored
|
@ -3,15 +3,29 @@
|
||||||
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
# Build Debian package using https://github.com/spotify/dh-virtualenv
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# assume we only have one package
|
||||||
|
PACKAGE_NAME:=`dh_listpackages`
|
||||||
|
|
||||||
override_dh_systemd_enable:
|
override_dh_systemd_enable:
|
||||||
dh_systemd_enable --name=matrix-synapse
|
dh_systemd_enable --name=matrix-synapse
|
||||||
|
|
||||||
override_dh_installinit:
|
override_dh_installinit:
|
||||||
dh_installinit --name=matrix-synapse
|
dh_installinit --name=matrix-synapse
|
||||||
|
|
||||||
|
# we don't really want to strip the symbols from our object files.
|
||||||
override_dh_strip:
|
override_dh_strip:
|
||||||
|
|
||||||
override_dh_shlibdeps:
|
override_dh_shlibdeps:
|
||||||
|
# make the postgres package's dependencies a recommendation
|
||||||
|
# rather than a hard dependency.
|
||||||
|
find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \
|
||||||
|
xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \
|
||||||
|
-pshlibs1 -dRecommends
|
||||||
|
|
||||||
|
# all the other dependencies can be normal 'Depends' requirements,
|
||||||
|
# except for PIL's, which is self-contained and which confuses
|
||||||
|
# dpkg-shlibdeps.
|
||||||
|
dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2
|
||||||
|
|
||||||
override_dh_virtualenv:
|
override_dh_virtualenv:
|
||||||
./debian/build_virtualenv
|
./debian/build_virtualenv
|
||||||
|
|
|
@ -43,6 +43,9 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b
|
||||||
FROM ${distro}
|
FROM ${distro}
|
||||||
|
|
||||||
# Install the build dependencies
|
# Install the build dependencies
|
||||||
|
#
|
||||||
|
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||||
|
# TODO: it would be nice to do that automatically.
|
||||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||||
|
|
|
@ -59,6 +59,108 @@ How to monitor Synapse metrics using Prometheus
|
||||||
Restart Prometheus.
|
Restart Prometheus.
|
||||||
|
|
||||||
|
|
||||||
|
Renaming of metrics & deprecation of old names in 1.2
|
||||||
|
-----------------------------------------------------
|
||||||
|
|
||||||
|
Synapse 1.2 updates the Prometheus metrics to match the naming convention of the
|
||||||
|
upstream ``prometheus_client``. The old names are considered deprecated and will
|
||||||
|
be removed in a future version of Synapse.
|
||||||
|
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| New Name | Old Name |
|
||||||
|
+=============================================================================+=======================================================================+
|
||||||
|
| python_gc_objects_collected_total | python_gc_objects_collected |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| python_gc_collections_total | python_gc_collections |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| process_cpu_seconds_total | process_cpu_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_start_count_total | synapse_background_process_start_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_notifier_notified_events_total | synapse_notifier_notified_events |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed |
|
||||||
|
+-----------------------------------------------------------------------------+-----------------------------------------------------------------------+
|
||||||
|
|
||||||
|
|
||||||
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
Removal of deprecated metrics & time based counters becoming histograms in 0.31.0
|
||||||
---------------------------------------------------------------------------------
|
---------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,9 @@ a postgres database.
|
||||||
|
|
||||||
* If you are using the `matrix.org debian/ubuntu
|
* If you are using the `matrix.org debian/ubuntu
|
||||||
packages <../INSTALL.md#matrixorg-packages>`_,
|
packages <../INSTALL.md#matrixorg-packages>`_,
|
||||||
the necessary libraries will already be installed.
|
the necessary python library will already be installed, but you will need to
|
||||||
|
ensure the low-level postgres library is installed, which you can do with
|
||||||
|
``apt install libpq5``.
|
||||||
|
|
||||||
* For other pre-built packages, please consult the documentation from the
|
* For other pre-built packages, please consult the documentation from the
|
||||||
relevant package.
|
relevant package.
|
||||||
|
@ -34,7 +36,7 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user
|
||||||
su - postgres
|
su - postgres
|
||||||
createuser --pwprompt synapse_user
|
createuser --pwprompt synapse_user
|
||||||
|
|
||||||
Before you can authenticate with the ``synapse_user``, you must create a
|
Before you can authenticate with the ``synapse_user``, you must create a
|
||||||
database that it can access. To create a database, first connect to the database
|
database that it can access. To create a database, first connect to the database
|
||||||
with your database user::
|
with your database user::
|
||||||
|
|
||||||
|
@ -53,7 +55,7 @@ and then run::
|
||||||
This would create an appropriate database named ``synapse`` owned by the
|
This would create an appropriate database named ``synapse`` owned by the
|
||||||
``synapse_user`` user (which must already have been created as above).
|
``synapse_user`` user (which must already have been created as above).
|
||||||
|
|
||||||
Note that the PostgreSQL database *must* have the correct encoding set (as
|
Note that the PostgreSQL database *must* have the correct encoding set (as
|
||||||
shown above), otherwise it will not be able to store UTF8 strings.
|
shown above), otherwise it will not be able to store UTF8 strings.
|
||||||
|
|
||||||
You may need to enable password authentication so ``synapse_user`` can connect
|
You may need to enable password authentication so ``synapse_user`` can connect
|
||||||
|
|
|
@ -1409,17 +1409,34 @@ password_config:
|
||||||
|
|
||||||
|
|
||||||
## Opentracing ##
|
## Opentracing ##
|
||||||
# These settings enable opentracing which implements distributed tracing
|
|
||||||
# This allows you to observe the causal chain of events across servers
|
|
||||||
# including requests, key lookups etc. across any server running
|
|
||||||
# synapse or any other other services which supports opentracing.
|
|
||||||
# (specifically those implemented with jaeger)
|
|
||||||
|
|
||||||
#opentracing:
|
# These settings enable opentracing, which implements distributed tracing.
|
||||||
# # Enable / disable tracer
|
# This allows you to observe the causal chains of events across servers
|
||||||
# tracer_enabled: false
|
# including requests, key lookups etc., across any server running
|
||||||
# # The list of homeservers we wish to expose our current traces to.
|
# synapse or any other other services which supports opentracing
|
||||||
# # The list is a list of regexes which are matched against the
|
# (specifically those implemented with Jaeger).
|
||||||
# # servername of the homeserver
|
#
|
||||||
# homeserver_whitelist:
|
opentracing:
|
||||||
# - ".*"
|
# tracing is disabled by default. Uncomment the following line to enable it.
|
||||||
|
#
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||||
|
#
|
||||||
|
# Though it's mostly safe to send and receive span contexts to and from
|
||||||
|
# untrusted users since span contexts are usually opaque ids it can lead to
|
||||||
|
# two problems, namely:
|
||||||
|
# - If the span context is marked as sampled by the sending homeserver the receiver will
|
||||||
|
# sample it. Therefore two homeservers with wildly disparaging sampling policies
|
||||||
|
# could incur higher sampling counts than intended.
|
||||||
|
# - Span baggage can be arbitrary data. For safety this has been disabled in synapse
|
||||||
|
# but that doesn't prevent another server sending you baggage which will be logged
|
||||||
|
# to opentracing logs.
|
||||||
|
#
|
||||||
|
# This a list of regexes which are matched against the server_name of the
|
||||||
|
# homeserver.
|
||||||
|
#
|
||||||
|
# By defult, it is empty, so no servers are matched.
|
||||||
|
#
|
||||||
|
#homeserver_whitelist:
|
||||||
|
# - ".*"
|
||||||
|
|
|
@ -606,21 +606,6 @@ class Auth(object):
|
||||||
|
|
||||||
defer.returnValue(auth_ids)
|
defer.returnValue(auth_ids)
|
||||||
|
|
||||||
def check_redaction(self, room_version, event, auth_events):
|
|
||||||
"""Check whether the event sender is allowed to redact the target event.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
True if the the sender is allowed to redact the target event if the
|
|
||||||
target event was created by them.
|
|
||||||
False if the sender is allowed to redact the target event with no
|
|
||||||
further checks.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
AuthError if the event sender is definitely not allowed to redact
|
|
||||||
the target event.
|
|
||||||
"""
|
|
||||||
return event_auth.check_redaction(room_version, event, auth_events)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_can_change_room_list(self, room_id, user):
|
def check_can_change_room_list(self, room_id, user):
|
||||||
"""Check if the user is allowed to edit the room's entry in the
|
"""Check if the user is allowed to edit the room's entry in the
|
||||||
|
|
|
@ -149,8 +149,7 @@ def listen_metrics(bind_addresses, port):
|
||||||
"""
|
"""
|
||||||
Start Prometheus metrics server.
|
Start Prometheus metrics server.
|
||||||
"""
|
"""
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy, start_http_server
|
||||||
from prometheus_client import start_http_server
|
|
||||||
|
|
||||||
for host in bind_addresses:
|
for host in bind_addresses:
|
||||||
logger.info("Starting metrics listener on %s:%d", host, port)
|
logger.info("Starting metrics listener on %s:%d", host, port)
|
||||||
|
|
|
@ -27,8 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.directory import DirectoryStore
|
from synapse.replication.slave.storage.directory import DirectoryStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
|
|
@ -28,8 +28,7 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
|
|
@ -28,8 +28,7 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
|
|
@ -29,8 +29,7 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.federation.transport.server import TransportLayerServer
|
from synapse.federation.transport.server import TransportLayerServer
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
|
|
@ -28,9 +28,8 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.federation import send_queue
|
from synapse.federation import send_queue
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
|
||||||
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
from synapse.replication.slave.storage.devices import SlavedDeviceStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
|
|
@ -30,8 +30,7 @@ from synapse.http.server import JsonResource
|
||||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
|
|
@ -55,9 +55,8 @@ from synapse.http.additional_resource import AdditionalResource
|
||||||
from synapse.http.server import RootRedirect
|
from synapse.http.server import RootRedirect
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.python_dependencies import check_requirements
|
from synapse.python_dependencies import check_requirements
|
||||||
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
|
||||||
|
|
|
@ -28,8 +28,7 @@ from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext
|
from synapse.logging.context import LoggingContext
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
|
|
@ -27,8 +27,7 @@ from synapse.config.homeserver import HomeServerConfig
|
||||||
from synapse.config.logger import setup_logging
|
from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import __func__
|
from synapse.replication.slave.storage._base import __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
|
|
|
@ -32,8 +32,7 @@ from synapse.handlers.presence import PresenceHandler, get_interested_parties
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
|
|
|
@ -29,8 +29,7 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.http.server import JsonResource
|
from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.logging.context import LoggingContext, run_in_background
|
from synapse.logging.context import LoggingContext, run_in_background
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
|
|
@ -18,33 +18,52 @@ from ._base import Config, ConfigError
|
||||||
|
|
||||||
class TracerConfig(Config):
|
class TracerConfig(Config):
|
||||||
def read_config(self, config, **kwargs):
|
def read_config(self, config, **kwargs):
|
||||||
self.tracer_config = config.get("opentracing")
|
opentracing_config = config.get("opentracing")
|
||||||
|
if opentracing_config is None:
|
||||||
|
opentracing_config = {}
|
||||||
|
|
||||||
self.tracer_config = config.get("opentracing", {"tracer_enabled": False})
|
self.opentracer_enabled = opentracing_config.get("enabled", False)
|
||||||
|
if not self.opentracer_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
if self.tracer_config.get("tracer_enabled", False):
|
# The tracer is enabled so sanitize the config
|
||||||
# The tracer is enabled so sanitize the config
|
|
||||||
# If no whitelists are given
|
|
||||||
self.tracer_config.setdefault("homeserver_whitelist", [])
|
|
||||||
|
|
||||||
if not isinstance(self.tracer_config.get("homeserver_whitelist"), list):
|
self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", [])
|
||||||
raise ConfigError("Tracer homesererver_whitelist config is malformed")
|
if not isinstance(self.opentracer_whitelist, list):
|
||||||
|
raise ConfigError("Tracer homeserver_whitelist config is malformed")
|
||||||
|
|
||||||
def generate_config_section(cls, **kwargs):
|
def generate_config_section(cls, **kwargs):
|
||||||
return """\
|
return """\
|
||||||
## Opentracing ##
|
## Opentracing ##
|
||||||
# These settings enable opentracing which implements distributed tracing
|
|
||||||
# This allows you to observe the causal chain of events across servers
|
|
||||||
# including requests, key lookups etc. across any server running
|
|
||||||
# synapse or any other other services which supports opentracing.
|
|
||||||
# (specifically those implemented with jaeger)
|
|
||||||
|
|
||||||
#opentracing:
|
# These settings enable opentracing, which implements distributed tracing.
|
||||||
# # Enable / disable tracer
|
# This allows you to observe the causal chains of events across servers
|
||||||
# tracer_enabled: false
|
# including requests, key lookups etc., across any server running
|
||||||
# # The list of homeservers we wish to expose our current traces to.
|
# synapse or any other other services which supports opentracing
|
||||||
# # The list is a list of regexes which are matched against the
|
# (specifically those implemented with Jaeger).
|
||||||
# # servername of the homeserver
|
#
|
||||||
# homeserver_whitelist:
|
opentracing:
|
||||||
# - ".*"
|
# tracing is disabled by default. Uncomment the following line to enable it.
|
||||||
|
#
|
||||||
|
#enabled: true
|
||||||
|
|
||||||
|
# The list of homeservers we wish to send and receive span contexts and span baggage.
|
||||||
|
#
|
||||||
|
# Though it's mostly safe to send and receive span contexts to and from
|
||||||
|
# untrusted users since span contexts are usually opaque ids it can lead to
|
||||||
|
# two problems, namely:
|
||||||
|
# - If the span context is marked as sampled by the sending homeserver the receiver will
|
||||||
|
# sample it. Therefore two homeservers with wildly disparaging sampling policies
|
||||||
|
# could incur higher sampling counts than intended.
|
||||||
|
# - Span baggage can be arbitrary data. For safety this has been disabled in synapse
|
||||||
|
# but that doesn't prevent another server sending you baggage which will be logged
|
||||||
|
# to opentracing logs.
|
||||||
|
#
|
||||||
|
# This a list of regexes which are matched against the server_name of the
|
||||||
|
# homeserver.
|
||||||
|
#
|
||||||
|
# By defult, it is empty, so no servers are matched.
|
||||||
|
#
|
||||||
|
#homeserver_whitelist:
|
||||||
|
# - ".*"
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -104,6 +104,17 @@ class _EventInternalMetadata(object):
|
||||||
"""
|
"""
|
||||||
return getattr(self, "proactively_send", True)
|
return getattr(self, "proactively_send", True)
|
||||||
|
|
||||||
|
def is_redacted(self):
|
||||||
|
"""Whether the event has been redacted.
|
||||||
|
|
||||||
|
This is used for efficiently checking whether an event has been
|
||||||
|
marked as redacted without needing to make another database call.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool
|
||||||
|
"""
|
||||||
|
return getattr(self, "redacted", False)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
# We want to be able to use hasattr with the event dict properties.
|
# We want to be able to use hasattr with the event dict properties.
|
||||||
|
|
|
@ -52,10 +52,15 @@ def prune_event(event):
|
||||||
|
|
||||||
from . import event_type_from_format_version
|
from . import event_type_from_format_version
|
||||||
|
|
||||||
return event_type_from_format_version(event.format_version)(
|
pruned_event = event_type_from_format_version(event.format_version)(
|
||||||
pruned_event_dict, event.internal_metadata.get_dict()
|
pruned_event_dict, event.internal_metadata.get_dict()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Mark the event as redacted
|
||||||
|
pruned_event.internal_metadata.redacted = True
|
||||||
|
|
||||||
|
return pruned_event
|
||||||
|
|
||||||
|
|
||||||
def prune_event_dict(event_dict):
|
def prune_event_dict(event_dict):
|
||||||
"""Redacts the event_dict in the same way as `prune_event`, except it
|
"""Redacts the event_dict in the same way as `prune_event`, except it
|
||||||
|
@ -360,9 +365,12 @@ class EventClientSerializer(object):
|
||||||
event_id = event.event_id
|
event_id = event.event_id
|
||||||
serialized_event = serialize_event(event, time_now, **kwargs)
|
serialized_event = serialize_event(event, time_now, **kwargs)
|
||||||
|
|
||||||
# If MSC1849 is enabled then we need to look if thre are any relations
|
# If MSC1849 is enabled then we need to look if there are any relations
|
||||||
# we need to bundle in with the event
|
# we need to bundle in with the event.
|
||||||
if self.experimental_msc1849_support_enabled and bundle_aggregations:
|
# Do not bundle relations if the event has been redacted
|
||||||
|
if not event.internal_metadata.is_redacted() and (
|
||||||
|
self.experimental_msc1849_support_enabled and bundle_aggregations
|
||||||
|
):
|
||||||
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
|
||||||
references = yield self.store.get_relations_for_event(
|
references = yield self.store.get_relations_for_event(
|
||||||
event_id, RelationTypes.REFERENCE, direction="f"
|
event_id, RelationTypes.REFERENCE, direction="f"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -23,6 +23,7 @@ from canonicaljson import encode_canonical_json, json
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import succeed
|
from twisted.internet.defer import succeed
|
||||||
|
|
||||||
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes, Membership, RelationTypes
|
from synapse.api.constants import EventTypes, Membership, RelationTypes
|
||||||
from synapse.api.errors import (
|
from synapse.api.errors import (
|
||||||
AuthError,
|
AuthError,
|
||||||
|
@ -784,6 +785,20 @@ class EventCreationHandler(object):
|
||||||
event.signatures.update(returned_invite.signatures)
|
event.signatures.update(returned_invite.signatures)
|
||||||
|
|
||||||
if event.type == EventTypes.Redaction:
|
if event.type == EventTypes.Redaction:
|
||||||
|
original_event = yield self.store.get_event(
|
||||||
|
event.redacts,
|
||||||
|
check_redacted=False,
|
||||||
|
get_prev_content=False,
|
||||||
|
allow_rejected=False,
|
||||||
|
allow_none=True,
|
||||||
|
check_room_id=event.room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# we can make some additional checks now if we have the original event.
|
||||||
|
if original_event:
|
||||||
|
if original_event.type == EventTypes.Create:
|
||||||
|
raise AuthError(403, "Redacting create events is not permitted")
|
||||||
|
|
||||||
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
prev_state_ids = yield context.get_prev_state_ids(self.store)
|
||||||
auth_events_ids = yield self.auth.compute_auth_events(
|
auth_events_ids = yield self.auth.compute_auth_events(
|
||||||
event, prev_state_ids, for_verification=True
|
event, prev_state_ids, for_verification=True
|
||||||
|
@ -791,18 +806,18 @@ class EventCreationHandler(object):
|
||||||
auth_events = yield self.store.get_events(auth_events_ids)
|
auth_events = yield self.store.get_events(auth_events_ids)
|
||||||
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
auth_events = {(e.type, e.state_key): e for e in auth_events.values()}
|
||||||
room_version = yield self.store.get_room_version(event.room_id)
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
if self.auth.check_redaction(room_version, event, auth_events=auth_events):
|
|
||||||
original_event = yield self.store.get_event(
|
if event_auth.check_redaction(room_version, event, auth_events=auth_events):
|
||||||
event.redacts,
|
# this user doesn't have 'redact' rights, so we need to do some more
|
||||||
check_redacted=False,
|
# checks on the original event. Let's start by checking the original
|
||||||
get_prev_content=False,
|
# event exists.
|
||||||
allow_rejected=False,
|
if not original_event:
|
||||||
allow_none=False,
|
raise NotFoundError("Could not find event %s" % (event.redacts,))
|
||||||
)
|
|
||||||
if event.user_id != original_event.user_id:
|
if event.user_id != original_event.user_id:
|
||||||
raise AuthError(403, "You don't have permission to redact events")
|
raise AuthError(403, "You don't have permission to redact events")
|
||||||
|
|
||||||
# We've already checked.
|
# all the checks are done.
|
||||||
event.internal_metadata.recheck_redaction = False
|
event.internal_metadata.recheck_redaction = False
|
||||||
|
|
||||||
if event.type == EventTypes.Create:
|
if event.type == EventTypes.Create:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# Copyright 2019 The Matrix.org Foundation C.I.C.d
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -24,6 +24,15 @@
|
||||||
# this move the methods have work very similarly to opentracing's and it should only
|
# this move the methods have work very similarly to opentracing's and it should only
|
||||||
# be a matter of few regexes to move over to opentracing's access patterns proper.
|
# be a matter of few regexes to move over to opentracing's access patterns proper.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.config import ConfigError
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import opentracing
|
import opentracing
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
@ -35,12 +44,6 @@ except ImportError:
|
||||||
JaegerConfig = None
|
JaegerConfig = None
|
||||||
LogContextScopeManager = None
|
LogContextScopeManager = None
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -91,7 +94,8 @@ def only_if_tracing(func):
|
||||||
return _only_if_tracing_inner
|
return _only_if_tracing_inner
|
||||||
|
|
||||||
|
|
||||||
# Block everything by default
|
# A regex which matches the server_names to expose traces for.
|
||||||
|
# None means 'block everything'.
|
||||||
_homeserver_whitelist = None
|
_homeserver_whitelist = None
|
||||||
|
|
||||||
tags = _DumTagNames
|
tags = _DumTagNames
|
||||||
|
@ -101,31 +105,24 @@ def init_tracer(config):
|
||||||
"""Set the whitelists and initialise the JaegerClient tracer
|
"""Set the whitelists and initialise the JaegerClient tracer
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
config (Config)
|
config (HomeserverConfig): The config used by the homeserver
|
||||||
The config used by the homeserver. Here it's used to set the service
|
|
||||||
name to the homeserver's.
|
|
||||||
"""
|
"""
|
||||||
global opentracing
|
global opentracing
|
||||||
if not config.tracer_config.get("tracer_enabled", False):
|
if not config.opentracer_enabled:
|
||||||
# We don't have a tracer
|
# We don't have a tracer
|
||||||
opentracing = None
|
opentracing = None
|
||||||
return
|
return
|
||||||
|
|
||||||
if not opentracing:
|
if not opentracing or not JaegerConfig:
|
||||||
logger.error(
|
raise ConfigError(
|
||||||
"The server has been configure to use opentracing but opentracing is not installed."
|
"The server has been configured to use opentracing but opentracing is not "
|
||||||
)
|
"installed."
|
||||||
raise ModuleNotFoundError("opentracing")
|
|
||||||
|
|
||||||
if not JaegerConfig:
|
|
||||||
logger.error(
|
|
||||||
"The server has been configure to use opentracing but opentracing is not installed."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Include the worker name
|
# Include the worker name
|
||||||
name = config.worker_name if config.worker_name else "master"
|
name = config.worker_name if config.worker_name else "master"
|
||||||
|
|
||||||
set_homeserver_whitelist(config.tracer_config["homeserver_whitelist"])
|
set_homeserver_whitelist(config.opentracer_whitelist)
|
||||||
jaeger_config = JaegerConfig(
|
jaeger_config = JaegerConfig(
|
||||||
config={"sampler": {"type": "const", "param": 1}, "logging": True},
|
config={"sampler": {"type": "const", "param": 1}, "logging": True},
|
||||||
service_name="{} {}".format(config.server_name, name),
|
service_name="{} {}".format(config.server_name, name),
|
||||||
|
@ -232,7 +229,6 @@ def whitelisted_homeserver(destination):
|
||||||
"""Checks if a destination matches the whitelist
|
"""Checks if a destination matches the whitelist
|
||||||
Args:
|
Args:
|
||||||
destination (String)"""
|
destination (String)"""
|
||||||
global _homeserver_whitelist
|
|
||||||
if _homeserver_whitelist:
|
if _homeserver_whitelist:
|
||||||
return _homeserver_whitelist.match(destination)
|
return _homeserver_whitelist.match(destination)
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -34,9 +34,7 @@ class LogContextScopeManager(ScopeManager):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
# Set the whitelists
|
pass
|
||||||
logger.info(config.tracer_config)
|
|
||||||
self._homeserver_whitelist = config.tracer_config["homeserver_whitelist"]
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def active(self):
|
def active(self):
|
||||||
|
|
|
@ -29,8 +29,16 @@ from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricF
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
|
|
||||||
|
from synapse.metrics._exposition import (
|
||||||
|
MetricsResource,
|
||||||
|
generate_latest,
|
||||||
|
start_http_server,
|
||||||
|
)
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
METRICS_PREFIX = "/_synapse/metrics"
|
||||||
|
|
||||||
running_on_pypy = platform.python_implementation() == "PyPy"
|
running_on_pypy = platform.python_implementation() == "PyPy"
|
||||||
all_metrics = []
|
all_metrics = []
|
||||||
all_collectors = []
|
all_collectors = []
|
||||||
|
@ -470,3 +478,12 @@ try:
|
||||||
gc.disable()
|
gc.disable()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"MetricsResource",
|
||||||
|
"generate_latest",
|
||||||
|
"start_http_server",
|
||||||
|
"LaterGauge",
|
||||||
|
"InFlightGauge",
|
||||||
|
"BucketCollector",
|
||||||
|
]
|
||||||
|
|
258
synapse/metrics/_exposition.py
Normal file
258
synapse/metrics/_exposition.py
Normal file
|
@ -0,0 +1,258 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015-2019 Prometheus Python Client Developers
|
||||||
|
# Copyright 2019 Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
|
||||||
|
|
||||||
|
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
|
||||||
|
vendoring of the code will emit both the old versions that Synapse dashboards
|
||||||
|
expect, and the newer "best practice" version of the up-to-date official client.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import math
|
||||||
|
import threading
|
||||||
|
from collections import namedtuple
|
||||||
|
from http.server import BaseHTTPRequestHandler, HTTPServer
|
||||||
|
from socketserver import ThreadingMixIn
|
||||||
|
from urllib.parse import parse_qs, urlparse
|
||||||
|
|
||||||
|
from prometheus_client import REGISTRY
|
||||||
|
|
||||||
|
from twisted.web.resource import Resource
|
||||||
|
|
||||||
|
try:
|
||||||
|
from prometheus_client.samples import Sample
|
||||||
|
except ImportError:
|
||||||
|
Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"])
|
||||||
|
|
||||||
|
|
||||||
|
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
INF = float("inf")
|
||||||
|
MINUS_INF = float("-inf")
|
||||||
|
|
||||||
|
|
||||||
|
def floatToGoString(d):
|
||||||
|
d = float(d)
|
||||||
|
if d == INF:
|
||||||
|
return "+Inf"
|
||||||
|
elif d == MINUS_INF:
|
||||||
|
return "-Inf"
|
||||||
|
elif math.isnan(d):
|
||||||
|
return "NaN"
|
||||||
|
else:
|
||||||
|
s = repr(d)
|
||||||
|
dot = s.find(".")
|
||||||
|
# Go switches to exponents sooner than Python.
|
||||||
|
# We only need to care about positive values for le/quantile.
|
||||||
|
if d > 0 and dot > 6:
|
||||||
|
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
|
||||||
|
return "{0}e+0{1}".format(mantissa, dot - 1)
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def sample_line(line, name):
|
||||||
|
if line.labels:
|
||||||
|
labelstr = "{{{0}}}".format(
|
||||||
|
",".join(
|
||||||
|
[
|
||||||
|
'{0}="{1}"'.format(
|
||||||
|
k,
|
||||||
|
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
|
||||||
|
)
|
||||||
|
for k, v in sorted(line.labels.items())
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
labelstr = ""
|
||||||
|
timestamp = ""
|
||||||
|
if line.timestamp is not None:
|
||||||
|
# Convert to milliseconds.
|
||||||
|
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
|
||||||
|
return "{0}{1} {2}{3}\n".format(
|
||||||
|
name, labelstr, floatToGoString(line.value), timestamp
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def nameify_sample(sample):
|
||||||
|
"""
|
||||||
|
If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a
|
||||||
|
namedtuple which has the names we expect.
|
||||||
|
"""
|
||||||
|
if not isinstance(sample, Sample):
|
||||||
|
sample = Sample(*sample, None, None)
|
||||||
|
|
||||||
|
return sample
|
||||||
|
|
||||||
|
|
||||||
|
def generate_latest(registry, emit_help=False):
|
||||||
|
output = []
|
||||||
|
|
||||||
|
for metric in registry.collect():
|
||||||
|
|
||||||
|
if metric.name.startswith("__unused"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not metric.samples:
|
||||||
|
# No samples, don't bother.
|
||||||
|
continue
|
||||||
|
|
||||||
|
mname = metric.name
|
||||||
|
mnewname = metric.name
|
||||||
|
mtype = metric.type
|
||||||
|
|
||||||
|
# OpenMetrics -> Prometheus
|
||||||
|
if mtype == "counter":
|
||||||
|
mnewname = mnewname + "_total"
|
||||||
|
elif mtype == "info":
|
||||||
|
mtype = "gauge"
|
||||||
|
mnewname = mnewname + "_info"
|
||||||
|
elif mtype == "stateset":
|
||||||
|
mtype = "gauge"
|
||||||
|
elif mtype == "gaugehistogram":
|
||||||
|
mtype = "histogram"
|
||||||
|
elif mtype == "unknown":
|
||||||
|
mtype = "untyped"
|
||||||
|
|
||||||
|
# Output in the old format for compatibility.
|
||||||
|
if emit_help:
|
||||||
|
output.append(
|
||||||
|
"# HELP {0} {1}\n".format(
|
||||||
|
mname,
|
||||||
|
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
output.append("# TYPE {0} {1}\n".format(mname, mtype))
|
||||||
|
for sample in map(nameify_sample, metric.samples):
|
||||||
|
# Get rid of the OpenMetrics specific samples
|
||||||
|
for suffix in ["_created", "_gsum", "_gcount"]:
|
||||||
|
if sample.name.endswith(suffix):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
newname = sample.name.replace(mnewname, mname)
|
||||||
|
if ":" in newname and newname.endswith("_total"):
|
||||||
|
newname = newname[: -len("_total")]
|
||||||
|
output.append(sample_line(sample, newname))
|
||||||
|
|
||||||
|
# Get rid of the weird colon things while we're at it
|
||||||
|
if mtype == "counter":
|
||||||
|
mnewname = mnewname.replace(":total", "")
|
||||||
|
mnewname = mnewname.replace(":", "_")
|
||||||
|
|
||||||
|
if mname == mnewname:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Also output in the new format, if it's different.
|
||||||
|
if emit_help:
|
||||||
|
output.append(
|
||||||
|
"# HELP {0} {1}\n".format(
|
||||||
|
mnewname,
|
||||||
|
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
|
||||||
|
for sample in map(nameify_sample, metric.samples):
|
||||||
|
# Get rid of the OpenMetrics specific samples
|
||||||
|
for suffix in ["_created", "_gsum", "_gcount"]:
|
||||||
|
if sample.name.endswith(suffix):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
output.append(
|
||||||
|
sample_line(
|
||||||
|
sample, sample.name.replace(":total", "").replace(":", "_")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return "".join(output).encode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsHandler(BaseHTTPRequestHandler):
|
||||||
|
"""HTTP handler that gives metrics from ``REGISTRY``."""
|
||||||
|
|
||||||
|
registry = REGISTRY
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
registry = self.registry
|
||||||
|
params = parse_qs(urlparse(self.path).query)
|
||||||
|
|
||||||
|
if "help" in params:
|
||||||
|
emit_help = True
|
||||||
|
else:
|
||||||
|
emit_help = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
output = generate_latest(registry, emit_help=emit_help)
|
||||||
|
except Exception:
|
||||||
|
self.send_error(500, "error generating metric output")
|
||||||
|
raise
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(output)
|
||||||
|
|
||||||
|
def log_message(self, format, *args):
|
||||||
|
"""Log nothing."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def factory(cls, registry):
|
||||||
|
"""Returns a dynamic MetricsHandler class tied
|
||||||
|
to the passed registry.
|
||||||
|
"""
|
||||||
|
# This implementation relies on MetricsHandler.registry
|
||||||
|
# (defined above and defaulted to REGISTRY).
|
||||||
|
|
||||||
|
# As we have unicode_literals, we need to create a str()
|
||||||
|
# object for type().
|
||||||
|
cls_name = str(cls.__name__)
|
||||||
|
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
|
||||||
|
return MyMetricsHandler
|
||||||
|
|
||||||
|
|
||||||
|
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
|
||||||
|
"""Thread per request HTTP server."""
|
||||||
|
|
||||||
|
# Make worker threads "fire and forget". Beginning with Python 3.7 this
|
||||||
|
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
|
||||||
|
# non-daemon threads in a list in order to join on them at server close.
|
||||||
|
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
|
||||||
|
# same as Python 3.7's ``ThreadingHTTPServer``.
|
||||||
|
daemon_threads = True
|
||||||
|
|
||||||
|
|
||||||
|
def start_http_server(port, addr="", registry=REGISTRY):
|
||||||
|
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
|
||||||
|
CustomMetricsHandler = MetricsHandler.factory(registry)
|
||||||
|
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
|
||||||
|
t = threading.Thread(target=httpd.serve_forever)
|
||||||
|
t.daemon = True
|
||||||
|
t.start()
|
||||||
|
|
||||||
|
|
||||||
|
class MetricsResource(Resource):
|
||||||
|
"""
|
||||||
|
Twisted ``Resource`` that serves prometheus metrics.
|
||||||
|
"""
|
||||||
|
|
||||||
|
isLeaf = True
|
||||||
|
|
||||||
|
def __init__(self, registry=REGISTRY):
|
||||||
|
self.registry = registry
|
||||||
|
|
||||||
|
def render_GET(self, request):
|
||||||
|
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
|
||||||
|
return generate_latest(self.registry)
|
|
@ -1,20 +0,0 @@
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2015, 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from prometheus_client.twisted import MetricsResource
|
|
||||||
|
|
||||||
METRICS_PREFIX = "/_synapse/metrics"
|
|
||||||
|
|
||||||
__all__ = ["MetricsResource", "METRICS_PREFIX"]
|
|
|
@ -65,9 +65,7 @@ REQUIREMENTS = [
|
||||||
"msgpack>=0.5.2",
|
"msgpack>=0.5.2",
|
||||||
"phonenumbers>=8.2.0",
|
"phonenumbers>=8.2.0",
|
||||||
"six>=1.10",
|
"six>=1.10",
|
||||||
# prometheus_client 0.4.0 changed the format of counter metrics
|
"prometheus_client>=0.0.18,<0.8.0",
|
||||||
# (cf https://github.com/matrix-org/synapse/issues/4001)
|
|
||||||
"prometheus_client>=0.0.18,<0.4.0",
|
|
||||||
# we use attr.s(slots), which arrived in 16.0.0
|
# we use attr.s(slots), which arrived in 16.0.0
|
||||||
# Twisted 18.7.0 requires attrs>=17.4.0
|
# Twisted 18.7.0 requires attrs>=17.4.0
|
||||||
"attrs>=17.4.0",
|
"attrs>=17.4.0",
|
||||||
|
|
|
@ -34,6 +34,7 @@ from synapse.http.servlet import (
|
||||||
from synapse.rest.client.transactions import HttpTransactionCache
|
from synapse.rest.client.transactions import HttpTransactionCache
|
||||||
from synapse.storage.relations import (
|
from synapse.storage.relations import (
|
||||||
AggregationPaginationToken,
|
AggregationPaginationToken,
|
||||||
|
PaginationChunk,
|
||||||
RelationPaginationToken,
|
RelationPaginationToken,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -153,23 +154,28 @@ class RelationPaginationServlet(RestServlet):
|
||||||
from_token = parse_string(request, "from")
|
from_token = parse_string(request, "from")
|
||||||
to_token = parse_string(request, "to")
|
to_token = parse_string(request, "to")
|
||||||
|
|
||||||
if from_token:
|
if event.internal_metadata.is_redacted():
|
||||||
from_token = RelationPaginationToken.from_string(from_token)
|
# If the event is redacted, return an empty list of relations
|
||||||
|
pagination_chunk = PaginationChunk(chunk=[])
|
||||||
|
else:
|
||||||
|
# Return the relations
|
||||||
|
if from_token:
|
||||||
|
from_token = RelationPaginationToken.from_string(from_token)
|
||||||
|
|
||||||
if to_token:
|
if to_token:
|
||||||
to_token = RelationPaginationToken.from_string(to_token)
|
to_token = RelationPaginationToken.from_string(to_token)
|
||||||
|
|
||||||
result = yield self.store.get_relations_for_event(
|
pagination_chunk = yield self.store.get_relations_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
relation_type=relation_type,
|
relation_type=relation_type,
|
||||||
event_type=event_type,
|
event_type=event_type,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
from_token=from_token,
|
from_token=from_token,
|
||||||
to_token=to_token,
|
to_token=to_token,
|
||||||
)
|
)
|
||||||
|
|
||||||
events = yield self.store.get_events_as_list(
|
events = yield self.store.get_events_as_list(
|
||||||
[c["event_id"] for c in result.chunk]
|
[c["event_id"] for c in pagination_chunk.chunk]
|
||||||
)
|
)
|
||||||
|
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
|
@ -186,7 +192,7 @@ class RelationPaginationServlet(RestServlet):
|
||||||
events, now, bundle_aggregations=False
|
events, now, bundle_aggregations=False
|
||||||
)
|
)
|
||||||
|
|
||||||
return_value = result.to_dict()
|
return_value = pagination_chunk.to_dict()
|
||||||
return_value["chunk"] = events
|
return_value["chunk"] = events
|
||||||
return_value["original_event"] = original_event
|
return_value["original_event"] = original_event
|
||||||
|
|
||||||
|
@ -234,7 +240,7 @@ class RelationAggregationPaginationServlet(RestServlet):
|
||||||
|
|
||||||
# This checks that a) the event exists and b) the user is allowed to
|
# This checks that a) the event exists and b) the user is allowed to
|
||||||
# view it.
|
# view it.
|
||||||
yield self.event_handler.get_event(requester.user, room_id, parent_id)
|
event = yield self.event_handler.get_event(requester.user, room_id, parent_id)
|
||||||
|
|
||||||
if relation_type not in (RelationTypes.ANNOTATION, None):
|
if relation_type not in (RelationTypes.ANNOTATION, None):
|
||||||
raise SynapseError(400, "Relation type must be 'annotation'")
|
raise SynapseError(400, "Relation type must be 'annotation'")
|
||||||
|
@ -243,21 +249,26 @@ class RelationAggregationPaginationServlet(RestServlet):
|
||||||
from_token = parse_string(request, "from")
|
from_token = parse_string(request, "from")
|
||||||
to_token = parse_string(request, "to")
|
to_token = parse_string(request, "to")
|
||||||
|
|
||||||
if from_token:
|
if event.internal_metadata.is_redacted():
|
||||||
from_token = AggregationPaginationToken.from_string(from_token)
|
# If the event is redacted, return an empty list of relations
|
||||||
|
pagination_chunk = PaginationChunk(chunk=[])
|
||||||
|
else:
|
||||||
|
# Return the relations
|
||||||
|
if from_token:
|
||||||
|
from_token = AggregationPaginationToken.from_string(from_token)
|
||||||
|
|
||||||
if to_token:
|
if to_token:
|
||||||
to_token = AggregationPaginationToken.from_string(to_token)
|
to_token = AggregationPaginationToken.from_string(to_token)
|
||||||
|
|
||||||
res = yield self.store.get_aggregation_groups_for_event(
|
pagination_chunk = yield self.store.get_aggregation_groups_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
event_type=event_type,
|
event_type=event_type,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
from_token=from_token,
|
from_token=from_token,
|
||||||
to_token=to_token,
|
to_token=to_token,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((200, res.to_dict()))
|
defer.returnValue((200, pagination_chunk.to_dict()))
|
||||||
|
|
||||||
|
|
||||||
class RelationAggregationGroupPaginationServlet(RestServlet):
|
class RelationAggregationGroupPaginationServlet(RestServlet):
|
||||||
|
|
|
@ -37,6 +37,7 @@ from synapse.logging.context import (
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
from synapse.util import batch_iter
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
from ._base import SQLBaseStore
|
from ._base import SQLBaseStore
|
||||||
|
@ -218,9 +219,108 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
if not event_ids:
|
if not event_ids:
|
||||||
defer.returnValue([])
|
defer.returnValue([])
|
||||||
|
|
||||||
event_id_list = event_ids
|
# there may be duplicates so we cast the list to a set
|
||||||
event_ids = set(event_ids)
|
event_entry_map = yield self._get_events_from_cache_or_db(
|
||||||
|
set(event_ids), allow_rejected=allow_rejected
|
||||||
|
)
|
||||||
|
|
||||||
|
events = []
|
||||||
|
for event_id in event_ids:
|
||||||
|
entry = event_entry_map.get(event_id, None)
|
||||||
|
if not entry:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not allow_rejected:
|
||||||
|
assert not entry.event.rejected_reason, (
|
||||||
|
"rejected event returned from _get_events_from_cache_or_db despite "
|
||||||
|
"allow_rejected=False"
|
||||||
|
)
|
||||||
|
|
||||||
|
# We may not have had the original event when we received a redaction, so
|
||||||
|
# we have to recheck auth now.
|
||||||
|
|
||||||
|
if not allow_rejected and entry.event.type == EventTypes.Redaction:
|
||||||
|
redacted_event_id = entry.event.redacts
|
||||||
|
event_map = yield self._get_events_from_cache_or_db([redacted_event_id])
|
||||||
|
original_event_entry = event_map.get(redacted_event_id)
|
||||||
|
if not original_event_entry:
|
||||||
|
# we don't have the redacted event (or it was rejected).
|
||||||
|
#
|
||||||
|
# We assume that the redaction isn't authorized for now; if the
|
||||||
|
# redacted event later turns up, the redaction will be re-checked,
|
||||||
|
# and if it is found valid, the original will get redacted before it
|
||||||
|
# is served to the client.
|
||||||
|
logger.debug(
|
||||||
|
"Withholding redaction event %s since we don't (yet) have the "
|
||||||
|
"original %s",
|
||||||
|
event_id,
|
||||||
|
redacted_event_id,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
original_event = original_event_entry.event
|
||||||
|
if original_event.type == EventTypes.Create:
|
||||||
|
# we never serve redactions of Creates to clients.
|
||||||
|
logger.info(
|
||||||
|
"Withholding redaction %s of create event %s",
|
||||||
|
event_id,
|
||||||
|
redacted_event_id,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if entry.event.internal_metadata.need_to_check_redaction():
|
||||||
|
original_domain = get_domain_from_id(original_event.sender)
|
||||||
|
redaction_domain = get_domain_from_id(entry.event.sender)
|
||||||
|
if original_domain != redaction_domain:
|
||||||
|
# the senders don't match, so this is forbidden
|
||||||
|
logger.info(
|
||||||
|
"Withholding redaction %s whose sender domain %s doesn't "
|
||||||
|
"match that of redacted event %s %s",
|
||||||
|
event_id,
|
||||||
|
redaction_domain,
|
||||||
|
redacted_event_id,
|
||||||
|
original_domain,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Update the cache to save doing the checks again.
|
||||||
|
entry.event.internal_metadata.recheck_redaction = False
|
||||||
|
|
||||||
|
if check_redacted and entry.redacted_event:
|
||||||
|
event = entry.redacted_event
|
||||||
|
else:
|
||||||
|
event = entry.event
|
||||||
|
|
||||||
|
events.append(event)
|
||||||
|
|
||||||
|
if get_prev_content:
|
||||||
|
if "replaces_state" in event.unsigned:
|
||||||
|
prev = yield self.get_event(
|
||||||
|
event.unsigned["replaces_state"],
|
||||||
|
get_prev_content=False,
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
if prev:
|
||||||
|
event.unsigned = dict(event.unsigned)
|
||||||
|
event.unsigned["prev_content"] = prev.content
|
||||||
|
event.unsigned["prev_sender"] = prev.sender
|
||||||
|
|
||||||
|
defer.returnValue(events)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False):
|
||||||
|
"""Fetch a bunch of events from the cache or the database.
|
||||||
|
|
||||||
|
If events are pulled from the database, they will be cached for future lookups.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_ids (Iterable[str]): The event_ids of the events to fetch
|
||||||
|
allow_rejected (bool): Whether to include rejected events
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[Dict[str, _EventCacheEntry]]:
|
||||||
|
map from event id to result
|
||||||
|
"""
|
||||||
event_entry_map = self._get_events_from_cache(
|
event_entry_map = self._get_events_from_cache(
|
||||||
event_ids, allow_rejected=allow_rejected
|
event_ids, allow_rejected=allow_rejected
|
||||||
)
|
)
|
||||||
|
@ -243,81 +343,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
event_entry_map.update(missing_events)
|
event_entry_map.update(missing_events)
|
||||||
|
|
||||||
events = []
|
return event_entry_map
|
||||||
for event_id in event_id_list:
|
|
||||||
entry = event_entry_map.get(event_id, None)
|
|
||||||
if not entry:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Starting in room version v3, some redactions need to be rechecked if we
|
|
||||||
# didn't have the redacted event at the time, so we recheck on read
|
|
||||||
# instead.
|
|
||||||
if not allow_rejected and entry.event.type == EventTypes.Redaction:
|
|
||||||
if entry.event.internal_metadata.need_to_check_redaction():
|
|
||||||
# XXX: we need to avoid calling get_event here.
|
|
||||||
#
|
|
||||||
# The problem is that we end up at this point when an event
|
|
||||||
# which has been redacted is pulled out of the database by
|
|
||||||
# _enqueue_events, because _enqueue_events needs to check
|
|
||||||
# the redaction before it can cache the redacted event. So
|
|
||||||
# obviously, calling get_event to get the redacted event out
|
|
||||||
# of the database gives us an infinite loop.
|
|
||||||
#
|
|
||||||
# For now (quick hack to fix during 0.99 release cycle), we
|
|
||||||
# just go and fetch the relevant row from the db, but it
|
|
||||||
# would be nice to think about how we can cache this rather
|
|
||||||
# than hit the db every time we access a redaction event.
|
|
||||||
#
|
|
||||||
# One thought on how to do this:
|
|
||||||
# 1. split get_events_as_list up so that it is divided into
|
|
||||||
# (a) get the rawish event from the db/cache, (b) do the
|
|
||||||
# redaction/rejection filtering
|
|
||||||
# 2. have _get_event_from_row just call the first half of
|
|
||||||
# that
|
|
||||||
|
|
||||||
orig_sender = yield self._simple_select_one_onecol(
|
|
||||||
table="events",
|
|
||||||
keyvalues={"event_id": entry.event.redacts},
|
|
||||||
retcol="sender",
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
expected_domain = get_domain_from_id(entry.event.sender)
|
|
||||||
if (
|
|
||||||
orig_sender
|
|
||||||
and get_domain_from_id(orig_sender) == expected_domain
|
|
||||||
):
|
|
||||||
# This redaction event is allowed. Mark as not needing a
|
|
||||||
# recheck.
|
|
||||||
entry.event.internal_metadata.recheck_redaction = False
|
|
||||||
else:
|
|
||||||
# We don't have the event that is being redacted, so we
|
|
||||||
# assume that the event isn't authorized for now. (If we
|
|
||||||
# later receive the event, then we will always redact
|
|
||||||
# it anyway, since we have this redaction)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if allow_rejected or not entry.event.rejected_reason:
|
|
||||||
if check_redacted and entry.redacted_event:
|
|
||||||
event = entry.redacted_event
|
|
||||||
else:
|
|
||||||
event = entry.event
|
|
||||||
|
|
||||||
events.append(event)
|
|
||||||
|
|
||||||
if get_prev_content:
|
|
||||||
if "replaces_state" in event.unsigned:
|
|
||||||
prev = yield self.get_event(
|
|
||||||
event.unsigned["replaces_state"],
|
|
||||||
get_prev_content=False,
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
if prev:
|
|
||||||
event.unsigned = dict(event.unsigned)
|
|
||||||
event.unsigned["prev_content"] = prev.content
|
|
||||||
event.unsigned["prev_sender"] = prev.sender
|
|
||||||
|
|
||||||
defer.returnValue(events)
|
|
||||||
|
|
||||||
def _invalidate_get_event_cache(self, event_id):
|
def _invalidate_get_event_cache(self, event_id):
|
||||||
self._get_event_cache.invalidate((event_id,))
|
self._get_event_cache.invalidate((event_id,))
|
||||||
|
@ -326,7 +352,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
"""Fetch events from the caches
|
"""Fetch events from the caches
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
events (list(str)): list of event_ids to fetch
|
events (Iterable[str]): list of event_ids to fetch
|
||||||
allow_rejected (bool): Whether to return events that were rejected
|
allow_rejected (bool): Whether to return events that were rejected
|
||||||
update_metrics (bool): Whether to update the cache hit ratio metrics
|
update_metrics (bool): Whether to update the cache hit ratio metrics
|
||||||
|
|
||||||
|
@ -384,19 +410,16 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
The fetch requests. Each entry consists of a list of event
|
The fetch requests. Each entry consists of a list of event
|
||||||
ids to be fetched, and a deferred to be completed once the
|
ids to be fetched, and a deferred to be completed once the
|
||||||
events have been fetched.
|
events have been fetched.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
with Measure(self._clock, "_fetch_event_list"):
|
with Measure(self._clock, "_fetch_event_list"):
|
||||||
try:
|
try:
|
||||||
event_id_lists = list(zip(*event_list))[0]
|
event_id_lists = list(zip(*event_list))[0]
|
||||||
event_ids = [item for sublist in event_id_lists for item in sublist]
|
event_ids = [item for sublist in event_id_lists for item in sublist]
|
||||||
|
|
||||||
rows = self._new_transaction(
|
row_dict = self._new_transaction(
|
||||||
conn, "do_fetch", [], [], self._fetch_event_rows, event_ids
|
conn, "do_fetch", [], [], self._fetch_event_rows, event_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
row_dict = {r["event_id"]: r for r in rows}
|
|
||||||
|
|
||||||
# We only want to resolve deferreds from the main thread
|
# We only want to resolve deferreds from the main thread
|
||||||
def fire(lst, res):
|
def fire(lst, res):
|
||||||
for ids, d in lst:
|
for ids, d in lst:
|
||||||
|
@ -454,7 +477,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
logger.debug("Loaded %d events (%d rows)", len(events), len(rows))
|
logger.debug("Loaded %d events (%d rows)", len(events), len(rows))
|
||||||
|
|
||||||
if not allow_rejected:
|
if not allow_rejected:
|
||||||
rows[:] = [r for r in rows if not r["rejects"]]
|
rows[:] = [r for r in rows if r["rejected_reason"] is None]
|
||||||
|
|
||||||
res = yield make_deferred_yieldable(
|
res = yield make_deferred_yieldable(
|
||||||
defer.gatherResults(
|
defer.gatherResults(
|
||||||
|
@ -463,8 +486,8 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
self._get_event_from_row,
|
self._get_event_from_row,
|
||||||
row["internal_metadata"],
|
row["internal_metadata"],
|
||||||
row["json"],
|
row["json"],
|
||||||
row["redacts"],
|
row["redactions"],
|
||||||
rejected_reason=row["rejects"],
|
rejected_reason=row["rejected_reason"],
|
||||||
format_version=row["format_version"],
|
format_version=row["format_version"],
|
||||||
)
|
)
|
||||||
for row in rows
|
for row in rows
|
||||||
|
@ -475,49 +498,98 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
defer.returnValue({e.event.event_id: e for e in res if e})
|
defer.returnValue({e.event.event_id: e for e in res if e})
|
||||||
|
|
||||||
def _fetch_event_rows(self, txn, events):
|
def _fetch_event_rows(self, txn, event_ids):
|
||||||
rows = []
|
"""Fetch event rows from the database
|
||||||
N = 200
|
|
||||||
for i in range(1 + len(events) // N):
|
|
||||||
evs = events[i * N : (i + 1) * N]
|
|
||||||
if not evs:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
Events which are not found are omitted from the result.
|
||||||
|
|
||||||
|
The returned per-event dicts contain the following keys:
|
||||||
|
|
||||||
|
* event_id (str)
|
||||||
|
|
||||||
|
* json (str): json-encoded event structure
|
||||||
|
|
||||||
|
* internal_metadata (str): json-encoded internal metadata dict
|
||||||
|
|
||||||
|
* format_version (int|None): The format of the event. Hopefully one
|
||||||
|
of EventFormatVersions. 'None' means the event predates
|
||||||
|
EventFormatVersions (so the event is format V1).
|
||||||
|
|
||||||
|
* rejected_reason (str|None): if the event was rejected, the reason
|
||||||
|
why.
|
||||||
|
|
||||||
|
* redactions (List[str]): a list of event-ids which (claim to) redact
|
||||||
|
this event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
txn (twisted.enterprise.adbapi.Connection):
|
||||||
|
event_ids (Iterable[str]): event IDs to fetch
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict[str, Dict]: a map from event id to event info.
|
||||||
|
"""
|
||||||
|
event_dict = {}
|
||||||
|
for evs in batch_iter(event_ids, 200):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT "
|
"SELECT "
|
||||||
" e.event_id as event_id, "
|
" e.event_id, "
|
||||||
" e.internal_metadata,"
|
" e.internal_metadata,"
|
||||||
" e.json,"
|
" e.json,"
|
||||||
" e.format_version, "
|
" e.format_version, "
|
||||||
" r.redacts as redacts,"
|
" rej.reason "
|
||||||
" rej.event_id as rejects "
|
|
||||||
" FROM event_json as e"
|
" FROM event_json as e"
|
||||||
" LEFT JOIN rejections as rej USING (event_id)"
|
" LEFT JOIN rejections as rej USING (event_id)"
|
||||||
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
|
|
||||||
" WHERE e.event_id IN (%s)"
|
" WHERE e.event_id IN (%s)"
|
||||||
) % (",".join(["?"] * len(evs)),)
|
) % (",".join(["?"] * len(evs)),)
|
||||||
|
|
||||||
txn.execute(sql, evs)
|
txn.execute(sql, evs)
|
||||||
rows.extend(self.cursor_to_dict(txn))
|
|
||||||
|
|
||||||
return rows
|
for row in txn:
|
||||||
|
event_id = row[0]
|
||||||
|
event_dict[event_id] = {
|
||||||
|
"event_id": event_id,
|
||||||
|
"internal_metadata": row[1],
|
||||||
|
"json": row[2],
|
||||||
|
"format_version": row[3],
|
||||||
|
"rejected_reason": row[4],
|
||||||
|
"redactions": [],
|
||||||
|
}
|
||||||
|
|
||||||
|
# check for redactions
|
||||||
|
redactions_sql = (
|
||||||
|
"SELECT event_id, redacts FROM redactions WHERE redacts IN (%s)"
|
||||||
|
) % (",".join(["?"] * len(evs)),)
|
||||||
|
|
||||||
|
txn.execute(redactions_sql, evs)
|
||||||
|
|
||||||
|
for (redacter, redacted) in txn:
|
||||||
|
d = event_dict.get(redacted)
|
||||||
|
if d:
|
||||||
|
d["redactions"].append(redacter)
|
||||||
|
|
||||||
|
return event_dict
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_event_from_row(
|
def _get_event_from_row(
|
||||||
self, internal_metadata, js, redacted, format_version, rejected_reason=None
|
self, internal_metadata, js, redactions, format_version, rejected_reason=None
|
||||||
):
|
):
|
||||||
|
"""Parse an event row which has been read from the database
|
||||||
|
|
||||||
|
Args:
|
||||||
|
internal_metadata (str): json-encoded internal_metadata column
|
||||||
|
js (str): json-encoded event body from event_json
|
||||||
|
redactions (list[str]): a list of the events which claim to have redacted
|
||||||
|
this event, from the redactions table
|
||||||
|
format_version: (str): the 'format_version' column
|
||||||
|
rejected_reason (str|None): the reason this event was rejected, if any
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
_EventCacheEntry
|
||||||
|
"""
|
||||||
with Measure(self._clock, "_get_event_from_row"):
|
with Measure(self._clock, "_get_event_from_row"):
|
||||||
d = json.loads(js)
|
d = json.loads(js)
|
||||||
internal_metadata = json.loads(internal_metadata)
|
internal_metadata = json.loads(internal_metadata)
|
||||||
|
|
||||||
if rejected_reason:
|
|
||||||
rejected_reason = yield self._simple_select_one_onecol(
|
|
||||||
table="rejections",
|
|
||||||
keyvalues={"event_id": rejected_reason},
|
|
||||||
retcol="reason",
|
|
||||||
desc="_get_event_from_row_rejected_reason",
|
|
||||||
)
|
|
||||||
|
|
||||||
if format_version is None:
|
if format_version is None:
|
||||||
# This means that we stored the event before we had the concept
|
# This means that we stored the event before we had the concept
|
||||||
# of a event format version, so it must be a V1 event.
|
# of a event format version, so it must be a V1 event.
|
||||||
|
@ -529,41 +601,7 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
rejected_reason=rejected_reason,
|
rejected_reason=rejected_reason,
|
||||||
)
|
)
|
||||||
|
|
||||||
redacted_event = None
|
redacted_event = yield self._maybe_redact_event_row(original_ev, redactions)
|
||||||
if redacted:
|
|
||||||
redacted_event = prune_event(original_ev)
|
|
||||||
|
|
||||||
redaction_id = yield self._simple_select_one_onecol(
|
|
||||||
table="redactions",
|
|
||||||
keyvalues={"redacts": redacted_event.event_id},
|
|
||||||
retcol="event_id",
|
|
||||||
desc="_get_event_from_row_redactions",
|
|
||||||
)
|
|
||||||
|
|
||||||
redacted_event.unsigned["redacted_by"] = redaction_id
|
|
||||||
# Get the redaction event.
|
|
||||||
|
|
||||||
because = yield self.get_event(
|
|
||||||
redaction_id, check_redacted=False, allow_none=True
|
|
||||||
)
|
|
||||||
|
|
||||||
if because:
|
|
||||||
# It's fine to do add the event directly, since get_pdu_json
|
|
||||||
# will serialise this field correctly
|
|
||||||
redacted_event.unsigned["redacted_because"] = because
|
|
||||||
|
|
||||||
# Starting in room version v3, some redactions need to be
|
|
||||||
# rechecked if we didn't have the redacted event at the
|
|
||||||
# time, so we recheck on read instead.
|
|
||||||
if because.internal_metadata.need_to_check_redaction():
|
|
||||||
expected_domain = get_domain_from_id(original_ev.sender)
|
|
||||||
if get_domain_from_id(because.sender) == expected_domain:
|
|
||||||
# This redaction event is allowed. Mark as not needing a
|
|
||||||
# recheck.
|
|
||||||
because.internal_metadata.recheck_redaction = False
|
|
||||||
else:
|
|
||||||
# Senders don't match, so the event isn't actually redacted
|
|
||||||
redacted_event = None
|
|
||||||
|
|
||||||
cache_entry = _EventCacheEntry(
|
cache_entry = _EventCacheEntry(
|
||||||
event=original_ev, redacted_event=redacted_event
|
event=original_ev, redacted_event=redacted_event
|
||||||
|
@ -573,6 +611,60 @@ class EventsWorkerStore(SQLBaseStore):
|
||||||
|
|
||||||
defer.returnValue(cache_entry)
|
defer.returnValue(cache_entry)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _maybe_redact_event_row(self, original_ev, redactions):
|
||||||
|
"""Given an event object and a list of possible redacting event ids,
|
||||||
|
determine whether to honour any of those redactions and if so return a redacted
|
||||||
|
event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
original_ev (EventBase):
|
||||||
|
redactions (iterable[str]): list of event ids of potential redaction events
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[EventBase|None]: if the event should be redacted, a pruned
|
||||||
|
event object. Otherwise, None.
|
||||||
|
"""
|
||||||
|
if original_ev.type == "m.room.create":
|
||||||
|
# we choose to ignore redactions of m.room.create events.
|
||||||
|
return None
|
||||||
|
|
||||||
|
redaction_map = yield self._get_events_from_cache_or_db(redactions)
|
||||||
|
|
||||||
|
for redaction_id in redactions:
|
||||||
|
redaction_entry = redaction_map.get(redaction_id)
|
||||||
|
if not redaction_entry:
|
||||||
|
# we don't have the redaction event, or the redaction event was not
|
||||||
|
# authorized.
|
||||||
|
continue
|
||||||
|
|
||||||
|
redaction_event = redaction_entry.event
|
||||||
|
|
||||||
|
# Starting in room version v3, some redactions need to be
|
||||||
|
# rechecked if we didn't have the redacted event at the
|
||||||
|
# time, so we recheck on read instead.
|
||||||
|
if redaction_event.internal_metadata.need_to_check_redaction():
|
||||||
|
expected_domain = get_domain_from_id(original_ev.sender)
|
||||||
|
if get_domain_from_id(redaction_event.sender) == expected_domain:
|
||||||
|
# This redaction event is allowed. Mark as not needing a recheck.
|
||||||
|
redaction_event.internal_metadata.recheck_redaction = False
|
||||||
|
else:
|
||||||
|
# Senders don't match, so the event isn't actually redacted
|
||||||
|
continue
|
||||||
|
|
||||||
|
# we found a good redaction event. Redact!
|
||||||
|
redacted_event = prune_event(original_ev)
|
||||||
|
redacted_event.unsigned["redacted_by"] = redaction_id
|
||||||
|
|
||||||
|
# It's fine to add the event directly, since get_pdu_json
|
||||||
|
# will serialise this field correctly
|
||||||
|
redacted_event.unsigned["redacted_because"] = redaction_event
|
||||||
|
|
||||||
|
return redacted_event
|
||||||
|
|
||||||
|
# no valid redaction found for this event
|
||||||
|
return None
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def have_events_in_timeline(self, event_ids):
|
def have_events_in_timeline(self, event_ids):
|
||||||
"""Given a list of event ids, check if we have already processed and
|
"""Given a list of event ids, check if we have already processed and
|
||||||
|
|
179
tests/rest/client/test_redactions.py
Normal file
179
tests/rest/client/test_redactions.py
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.rest import admin
|
||||||
|
from synapse.rest.client.v1 import login, room
|
||||||
|
from synapse.rest.client.v2_alpha import sync
|
||||||
|
|
||||||
|
from tests.unittest import HomeserverTestCase
|
||||||
|
|
||||||
|
|
||||||
|
class RedactionsTestCase(HomeserverTestCase):
|
||||||
|
"""Tests that various redaction events are handled correctly"""
|
||||||
|
|
||||||
|
servlets = [
|
||||||
|
admin.register_servlets,
|
||||||
|
room.register_servlets,
|
||||||
|
login.register_servlets,
|
||||||
|
sync.register_servlets,
|
||||||
|
]
|
||||||
|
|
||||||
|
def prepare(self, reactor, clock, hs):
|
||||||
|
# register a couple of users
|
||||||
|
self.mod_user_id = self.register_user("user1", "pass")
|
||||||
|
self.mod_access_token = self.login("user1", "pass")
|
||||||
|
self.other_user_id = self.register_user("otheruser", "pass")
|
||||||
|
self.other_access_token = self.login("otheruser", "pass")
|
||||||
|
|
||||||
|
# Create a room
|
||||||
|
self.room_id = self.helper.create_room_as(
|
||||||
|
self.mod_user_id, tok=self.mod_access_token
|
||||||
|
)
|
||||||
|
|
||||||
|
# Invite the other user
|
||||||
|
self.helper.invite(
|
||||||
|
room=self.room_id,
|
||||||
|
src=self.mod_user_id,
|
||||||
|
tok=self.mod_access_token,
|
||||||
|
targ=self.other_user_id,
|
||||||
|
)
|
||||||
|
# The other user joins
|
||||||
|
self.helper.join(
|
||||||
|
room=self.room_id, user=self.other_user_id, tok=self.other_access_token
|
||||||
|
)
|
||||||
|
|
||||||
|
def _redact_event(self, access_token, room_id, event_id, expect_code=200):
|
||||||
|
"""Helper function to send a redaction event.
|
||||||
|
|
||||||
|
Returns the json body.
|
||||||
|
"""
|
||||||
|
path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id)
|
||||||
|
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"POST", path, content={}, access_token=access_token
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEqual(int(channel.result["code"]), expect_code)
|
||||||
|
return channel.json_body
|
||||||
|
|
||||||
|
def _sync_room_timeline(self, access_token, room_id):
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET", "sync", access_token=self.mod_access_token
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEqual(channel.result["code"], b"200")
|
||||||
|
room_sync = channel.json_body["rooms"]["join"][room_id]
|
||||||
|
return room_sync["timeline"]["events"]
|
||||||
|
|
||||||
|
def test_redact_event_as_moderator(self):
|
||||||
|
# as a regular user, send a message to redact
|
||||||
|
b = self.helper.send(room_id=self.room_id, tok=self.other_access_token)
|
||||||
|
msg_id = b["event_id"]
|
||||||
|
|
||||||
|
# as the moderator, send a redaction
|
||||||
|
b = self._redact_event(self.mod_access_token, self.room_id, msg_id)
|
||||||
|
redaction_id = b["event_id"]
|
||||||
|
|
||||||
|
# now sync
|
||||||
|
timeline = self._sync_room_timeline(self.mod_access_token, self.room_id)
|
||||||
|
|
||||||
|
# the last event should be the redaction
|
||||||
|
self.assertEqual(timeline[-1]["event_id"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-1]["redacts"], msg_id)
|
||||||
|
|
||||||
|
# and the penultimate should be the redacted original
|
||||||
|
self.assertEqual(timeline[-2]["event_id"], msg_id)
|
||||||
|
self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-2]["content"], {})
|
||||||
|
|
||||||
|
def test_redact_event_as_normal(self):
|
||||||
|
# as a regular user, send a message to redact
|
||||||
|
b = self.helper.send(room_id=self.room_id, tok=self.other_access_token)
|
||||||
|
normal_msg_id = b["event_id"]
|
||||||
|
|
||||||
|
# also send one as the admin
|
||||||
|
b = self.helper.send(room_id=self.room_id, tok=self.mod_access_token)
|
||||||
|
admin_msg_id = b["event_id"]
|
||||||
|
|
||||||
|
# as a normal, try to redact the admin's event
|
||||||
|
self._redact_event(
|
||||||
|
self.other_access_token, self.room_id, admin_msg_id, expect_code=403
|
||||||
|
)
|
||||||
|
|
||||||
|
# now try to redact our own event
|
||||||
|
b = self._redact_event(self.other_access_token, self.room_id, normal_msg_id)
|
||||||
|
redaction_id = b["event_id"]
|
||||||
|
|
||||||
|
# now sync
|
||||||
|
timeline = self._sync_room_timeline(self.other_access_token, self.room_id)
|
||||||
|
|
||||||
|
# the last event should be the redaction of the normal event
|
||||||
|
self.assertEqual(timeline[-1]["event_id"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-1]["redacts"], normal_msg_id)
|
||||||
|
|
||||||
|
# the penultimate should be the unredacted one from the admin
|
||||||
|
self.assertEqual(timeline[-2]["event_id"], admin_msg_id)
|
||||||
|
self.assertNotIn("redacted_by", timeline[-2]["unsigned"])
|
||||||
|
self.assertTrue(timeline[-2]["content"]["body"], {})
|
||||||
|
|
||||||
|
# and the antepenultimate should be the redacted normal
|
||||||
|
self.assertEqual(timeline[-3]["event_id"], normal_msg_id)
|
||||||
|
self.assertEqual(timeline[-3]["unsigned"]["redacted_by"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-3]["content"], {})
|
||||||
|
|
||||||
|
def test_redact_nonexistent_event(self):
|
||||||
|
# control case: an existing event
|
||||||
|
b = self.helper.send(room_id=self.room_id, tok=self.other_access_token)
|
||||||
|
msg_id = b["event_id"]
|
||||||
|
b = self._redact_event(self.other_access_token, self.room_id, msg_id)
|
||||||
|
redaction_id = b["event_id"]
|
||||||
|
|
||||||
|
# room moderators can send redactions for non-existent events
|
||||||
|
self._redact_event(self.mod_access_token, self.room_id, "$zzz")
|
||||||
|
|
||||||
|
# ... but normals cannot
|
||||||
|
self._redact_event(
|
||||||
|
self.other_access_token, self.room_id, "$zzz", expect_code=404
|
||||||
|
)
|
||||||
|
|
||||||
|
# when we sync, we should see only the valid redaction
|
||||||
|
timeline = self._sync_room_timeline(self.other_access_token, self.room_id)
|
||||||
|
self.assertEqual(timeline[-1]["event_id"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-1]["redacts"], msg_id)
|
||||||
|
|
||||||
|
# and the penultimate should be the redacted original
|
||||||
|
self.assertEqual(timeline[-2]["event_id"], msg_id)
|
||||||
|
self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id)
|
||||||
|
self.assertEqual(timeline[-2]["content"], {})
|
||||||
|
|
||||||
|
def test_redact_create_event(self):
|
||||||
|
# control case: an existing event
|
||||||
|
b = self.helper.send(room_id=self.room_id, tok=self.mod_access_token)
|
||||||
|
msg_id = b["event_id"]
|
||||||
|
self._redact_event(self.mod_access_token, self.room_id, msg_id)
|
||||||
|
|
||||||
|
# sync the room, to get the id of the create event
|
||||||
|
timeline = self._sync_room_timeline(self.other_access_token, self.room_id)
|
||||||
|
create_event_id = timeline[0]["event_id"]
|
||||||
|
|
||||||
|
# room moderators cannot send redactions for create events
|
||||||
|
self._redact_event(
|
||||||
|
self.mod_access_token, self.room_id, create_event_id, expect_code=403
|
||||||
|
)
|
||||||
|
|
||||||
|
# and nor can normals
|
||||||
|
self._redact_event(
|
||||||
|
self.other_access_token, self.room_id, create_event_id, expect_code=403
|
||||||
|
)
|
|
@ -93,7 +93,7 @@ class RelationsTestCase(unittest.HomeserverTestCase):
|
||||||
def test_deny_double_react(self):
|
def test_deny_double_react(self):
|
||||||
"""Test that we deny relations on membership events
|
"""Test that we deny relations on membership events
|
||||||
"""
|
"""
|
||||||
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
|
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a")
|
||||||
self.assertEquals(200, channel.code, channel.json_body)
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
|
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a")
|
||||||
|
@ -540,14 +540,122 @@ class RelationsTestCase(unittest.HomeserverTestCase):
|
||||||
{"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
|
{"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_relations_redaction_redacts_edits(self):
|
||||||
|
"""Test that edits of an event are redacted when the original event
|
||||||
|
is redacted.
|
||||||
|
"""
|
||||||
|
# Send a new event
|
||||||
|
res = self.helper.send(self.room, body="Heyo!", tok=self.user_token)
|
||||||
|
original_event_id = res["event_id"]
|
||||||
|
|
||||||
|
# Add a relation
|
||||||
|
channel = self._send_relation(
|
||||||
|
RelationTypes.REPLACE,
|
||||||
|
"m.room.message",
|
||||||
|
parent_id=original_event_id,
|
||||||
|
content={
|
||||||
|
"msgtype": "m.text",
|
||||||
|
"body": "Wibble",
|
||||||
|
"m.new_content": {"msgtype": "m.text", "body": "First edit"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
# Check the relation is returned
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
"/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message"
|
||||||
|
% (self.room, original_event_id),
|
||||||
|
access_token=self.user_token,
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
self.assertIn("chunk", channel.json_body)
|
||||||
|
self.assertEquals(len(channel.json_body["chunk"]), 1)
|
||||||
|
|
||||||
|
# Redact the original event
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"PUT",
|
||||||
|
"/rooms/%s/redact/%s/%s"
|
||||||
|
% (self.room, original_event_id, "test_relations_redaction_redacts_edits"),
|
||||||
|
access_token=self.user_token,
|
||||||
|
content="{}",
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
# Try to check for remaining m.replace relations
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
"/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message"
|
||||||
|
% (self.room, original_event_id),
|
||||||
|
access_token=self.user_token,
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
# Check that no relations are returned
|
||||||
|
self.assertIn("chunk", channel.json_body)
|
||||||
|
self.assertEquals(channel.json_body["chunk"], [])
|
||||||
|
|
||||||
|
def test_aggregations_redaction_prevents_access_to_aggregations(self):
|
||||||
|
"""Test that annotations of an event are redacted when the original event
|
||||||
|
is redacted.
|
||||||
|
"""
|
||||||
|
# Send a new event
|
||||||
|
res = self.helper.send(self.room, body="Hello!", tok=self.user_token)
|
||||||
|
original_event_id = res["event_id"]
|
||||||
|
|
||||||
|
# Add a relation
|
||||||
|
channel = self._send_relation(
|
||||||
|
RelationTypes.ANNOTATION, "m.reaction", key="👍", parent_id=original_event_id
|
||||||
|
)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
# Redact the original
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"PUT",
|
||||||
|
"/rooms/%s/redact/%s/%s"
|
||||||
|
% (
|
||||||
|
self.room,
|
||||||
|
original_event_id,
|
||||||
|
"test_aggregations_redaction_prevents_access_to_aggregations",
|
||||||
|
),
|
||||||
|
access_token=self.user_token,
|
||||||
|
content="{}",
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
# Check that aggregations returns zero
|
||||||
|
request, channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
"/_matrix/client/unstable/rooms/%s/aggregations/%s/m.annotation/m.reaction"
|
||||||
|
% (self.room, original_event_id),
|
||||||
|
access_token=self.user_token,
|
||||||
|
)
|
||||||
|
self.render(request)
|
||||||
|
self.assertEquals(200, channel.code, channel.json_body)
|
||||||
|
|
||||||
|
self.assertIn("chunk", channel.json_body)
|
||||||
|
self.assertEquals(channel.json_body["chunk"], [])
|
||||||
|
|
||||||
def _send_relation(
|
def _send_relation(
|
||||||
self, relation_type, event_type, key=None, content={}, access_token=None
|
self,
|
||||||
|
relation_type,
|
||||||
|
event_type,
|
||||||
|
key=None,
|
||||||
|
content={},
|
||||||
|
access_token=None,
|
||||||
|
parent_id=None,
|
||||||
):
|
):
|
||||||
"""Helper function to send a relation pointing at `self.parent_id`
|
"""Helper function to send a relation pointing at `self.parent_id`
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
relation_type (str): One of `RelationTypes`
|
relation_type (str): One of `RelationTypes`
|
||||||
event_type (str): The type of the event to create
|
event_type (str): The type of the event to create
|
||||||
|
parent_id (str): The event_id this relation relates to. If None, then self.parent_id
|
||||||
key (str|None): The aggregation key used for m.annotation relation
|
key (str|None): The aggregation key used for m.annotation relation
|
||||||
type.
|
type.
|
||||||
content(dict|None): The content of the created event.
|
content(dict|None): The content of the created event.
|
||||||
|
@ -564,10 +672,12 @@ class RelationsTestCase(unittest.HomeserverTestCase):
|
||||||
if key:
|
if key:
|
||||||
query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8"))
|
query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8"))
|
||||||
|
|
||||||
|
original_id = parent_id if parent_id else self.parent_id
|
||||||
|
|
||||||
request, channel = self.make_request(
|
request, channel = self.make_request(
|
||||||
"POST",
|
"POST",
|
||||||
"/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s"
|
"/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s"
|
||||||
% (self.room, self.parent_id, relation_type, event_type, query),
|
% (self.room, original_id, relation_type, event_type, query),
|
||||||
json.dumps(content).encode("utf-8"),
|
json.dumps(content).encode("utf-8"),
|
||||||
access_token=access_token,
|
access_token=access_token,
|
||||||
)
|
)
|
||||||
|
|
|
@ -13,9 +13,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from prometheus_client.exposition import generate_latest
|
from synapse.metrics import REGISTRY, generate_latest
|
||||||
|
|
||||||
from synapse.metrics import REGISTRY
|
|
||||||
from synapse.types import Requester, UserID
|
from synapse.types import Requester, UserID
|
||||||
|
|
||||||
from tests.unittest import HomeserverTestCase
|
from tests.unittest import HomeserverTestCase
|
||||||
|
|
|
@ -447,6 +447,7 @@ class HomeserverTestCase(TestCase):
|
||||||
# Create the user
|
# Create the user
|
||||||
request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
|
request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register")
|
||||||
self.render(request)
|
self.render(request)
|
||||||
|
self.assertEqual(channel.code, 200)
|
||||||
nonce = channel.json_body["nonce"]
|
nonce = channel.json_body["nonce"]
|
||||||
|
|
||||||
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
|
want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1)
|
||||||
|
|
Loading…
Reference in a new issue