mirror of
https://mau.dev/maunium/synapse.git
synced 2024-12-17 21:53:51 +01:00
merge develop
This commit is contained in:
commit
7a6df013cc
242 changed files with 7391 additions and 3347 deletions
.dockerignore.gitignore.travis.ymlAUTHORS.rstCHANGES.rstCONTRIBUTING.rstDockerfileMANIFEST.inREADME.rstUPGRADE.rst
contrib
docs
admin_api
consent_tracking.mdmanhole.mdpostgres.rstprivacy_policy_templates/en
server_notices.mdworkers.rstjenkins
scripts-dev
scripts
synapse
__init__.py
api
app
appservice.pyclient_reader.pyevent_creator.pyfederation_reader.pyfederation_sender.pyfrontend_proxy.pyhomeserver.pymedia_repository.pypusher.pysynchrotron.pysynctl.pyuser_dir.py
appservice
config
__init__.py_base.pyappservice.pyconsent_config.pyhomeserver.pykey.pylogger.pyregistration.pyserver_notices_config.pytls.py
crypto
events
federation
federation_base.pyfederation_client.pyfederation_server.pysend_queue.pytransaction_queue.py
transport
units.pygroups
handlers
__init__.pyappservice.pydeactivate_account.pydevice.pye2e_keys.pyevents.pyfederation.pygroups_local.pyidentity.pyinitial_sync.pymessage.pypresence.pyreceipts.pyregister.pyroom.pyroom_list.pyroom_member.pysync.pytyping.pyuser_directory.py
http
5
.dockerignore
Normal file
5
.dockerignore
Normal file
|
@ -0,0 +1,5 @@
|
|||
Dockerfile
|
||||
.travis.yml
|
||||
.gitignore
|
||||
demo/etc
|
||||
tox.ini
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -1,5 +1,6 @@
|
|||
*.pyc
|
||||
.*.swp
|
||||
*~
|
||||
|
||||
.DS_Store
|
||||
_trial_temp/
|
||||
|
@ -13,6 +14,7 @@ docs/build/
|
|||
cmdclient_config.json
|
||||
homeserver*.db
|
||||
homeserver*.log
|
||||
homeserver*.log.*
|
||||
homeserver*.pid
|
||||
homeserver*.yaml
|
||||
|
||||
|
@ -32,6 +34,7 @@ demo/media_store.*
|
|||
demo/etc
|
||||
|
||||
uploads
|
||||
cache
|
||||
|
||||
.idea/
|
||||
media_store/
|
||||
|
@ -39,6 +42,7 @@ media_store/
|
|||
*.tac
|
||||
|
||||
build/
|
||||
venv/
|
||||
|
||||
localhost-800*/
|
||||
static/client/register/register_config.js
|
||||
|
@ -48,3 +52,4 @@ env/
|
|||
*.config
|
||||
|
||||
.vscode/
|
||||
.ropeproject/
|
||||
|
|
18
.travis.yml
18
.travis.yml
|
@ -1,14 +1,22 @@
|
|||
sudo: false
|
||||
language: python
|
||||
python: 2.7
|
||||
|
||||
# tell travis to cache ~/.cache/pip
|
||||
cache: pip
|
||||
|
||||
env:
|
||||
- TOX_ENV=packaging
|
||||
- TOX_ENV=pep8
|
||||
- TOX_ENV=py27
|
||||
matrix:
|
||||
include:
|
||||
- python: 2.7
|
||||
env: TOX_ENV=packaging
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=pep8
|
||||
|
||||
- python: 2.7
|
||||
env: TOX_ENV=py27
|
||||
|
||||
- python: 3.6
|
||||
env: TOX_ENV=py36
|
||||
|
||||
install:
|
||||
- pip install tox
|
||||
|
|
|
@ -60,3 +60,6 @@ Niklas Riekenbrauck <nikriek at gmail dot.com>
|
|||
|
||||
Christoph Witzany <christoph at web.crofting.com>
|
||||
* Add LDAP support for authentication
|
||||
|
||||
Pierre Jaury <pierre at jaury.eu>
|
||||
* Docker packaging
|
372
CHANGES.rst
372
CHANGES.rst
|
@ -1,11 +1,375 @@
|
|||
Unreleased
|
||||
==========
|
||||
Changes in synapse v0.30.0 (2018-05-24)
|
||||
==========================================
|
||||
|
||||
synctl no longer starts the main synapse when using ``-a`` option with workers.
|
||||
A new worker file should be added with ``worker_app: synapse.app.homeserver``.
|
||||
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||
channel whereby server administrators can send messages to users on the server.
|
||||
|
||||
They are used as part of communication of the server policies (see ``docs/consent_tracking.md``),
|
||||
however the intention is that they may also find a use for features such
|
||||
as "Message of the day".
|
||||
|
||||
This feature is specific to Synapse, but uses standard Matrix communication mechanisms,
|
||||
so should work with any Matrix client. For more details see ``docs/server_notices.md``
|
||||
|
||||
Further Server Notices/Consent Tracking Support:
|
||||
|
||||
* Allow overriding the server_notices user's avatar (PR #3273)
|
||||
* Use the localpart in the consent uri (PR #3272)
|
||||
* Support for putting %(consent_uri)s in messages (PR #3271)
|
||||
* Block attempts to send server notices to remote users (PR #3270)
|
||||
* Docs on consent bits (PR #3268)
|
||||
|
||||
|
||||
|
||||
Changes in synapse v0.30.0-rc1 (2018-05-23)
|
||||
==========================================
|
||||
|
||||
Server Notices/Consent Tracking Support:
|
||||
|
||||
* ConsentResource to gather policy consent from users (PR #3213)
|
||||
* Move RoomCreationHandler out of synapse.handlers.Handlers (PR #3225)
|
||||
* Infrastructure for a server notices room (PR #3232)
|
||||
* Send users a server notice about consent (PR #3236)
|
||||
* Reject attempts to send event before privacy consent is given (PR #3257)
|
||||
* Add a 'has_consented' template var to consent forms (PR #3262)
|
||||
* Fix dependency on jinja2 (PR #3263)
|
||||
|
||||
Features:
|
||||
|
||||
* Cohort analytics (PR #3163, #3241, #3251)
|
||||
* Add lxml to docker image for web previews (PR #3239) Thanks to @ptman!
|
||||
* Add in flight request metrics (PR #3252)
|
||||
|
||||
Changes:
|
||||
|
||||
* Remove unused `update_external_syncs` (PR #3233)
|
||||
* Use stream rather depth ordering for push actions (PR #3212)
|
||||
* Make purge_history operate on tokens (PR #3221)
|
||||
* Don't support limitless pagination (PR #3265)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
* Fix logcontext resource usage tracking (PR #3258)
|
||||
* Fix error in handling receipts (PR #3235)
|
||||
* Stop the transaction cache caching failures (PR #3255)
|
||||
|
||||
|
||||
Changes in synapse v0.29.1 (2018-05-17)
|
||||
==========================================
|
||||
Changes:
|
||||
|
||||
* Update docker documentation (PR #3222)
|
||||
|
||||
Changes in synapse v0.29.0 (2018-05-16)
|
||||
===========================================
|
||||
Not changes since v0.29.0-rc1
|
||||
|
||||
Changes in synapse v0.29.0-rc1 (2018-05-14)
|
||||
===========================================
|
||||
|
||||
Notable changes, a docker file for running Synapse (Thanks to @kaiyou!) and a
|
||||
closed spec bug in the Client Server API. Additionally further prep for Python 3
|
||||
migration.
|
||||
|
||||
Potentially breaking change:
|
||||
|
||||
* Make Client-Server API return 401 for invalid token (PR #3161).
|
||||
|
||||
This changes the Client-server spec to return a 401 error code instead of 403
|
||||
when the access token is unrecognised. This is the behaviour required by the
|
||||
specification, but some clients may be relying on the old, incorrect
|
||||
behaviour.
|
||||
|
||||
Thanks to @NotAFile for fixing this.
|
||||
|
||||
Features:
|
||||
|
||||
* Add a Dockerfile for synapse (PR #2846) Thanks to @kaiyou!
|
||||
|
||||
Changes - General:
|
||||
|
||||
* nuke-room-from-db.sh: added postgresql option and help (PR #2337) Thanks to @rubo77!
|
||||
* Part user from rooms on account deactivate (PR #3201)
|
||||
* Make 'unexpected logging context' into warnings (PR #3007)
|
||||
* Set Server header in SynapseRequest (PR #3208)
|
||||
* remove duplicates from groups tables (PR #3129)
|
||||
* Improve exception handling for background processes (PR #3138)
|
||||
* Add missing consumeErrors to improve exception handling (PR #3139)
|
||||
* reraise exceptions more carefully (PR #3142)
|
||||
* Remove redundant call to preserve_fn (PR #3143)
|
||||
* Trap exceptions thrown within run_in_background (PR #3144)
|
||||
|
||||
Changes - Refactors:
|
||||
|
||||
* Refactor /context to reuse pagination storage functions (PR #3193)
|
||||
* Refactor recent events func to use pagination func (PR #3195)
|
||||
* Refactor pagination DB API to return concrete type (PR #3196)
|
||||
* Refactor get_recent_events_for_room return type (PR #3198)
|
||||
* Refactor sync APIs to reuse pagination API (PR #3199)
|
||||
* Remove unused code path from member change DB func (PR #3200)
|
||||
* Refactor request handling wrappers (PR #3203)
|
||||
* transaction_id, destination defined twice (PR #3209) Thanks to @damir-manapov!
|
||||
* Refactor event storage to prepare for changes in state calculations (PR #3141)
|
||||
* Set Server header in SynapseRequest (PR #3208)
|
||||
* Use deferred.addTimeout instead of time_bound_deferred (PR #3127, #3178)
|
||||
* Use run_in_background in preference to preserve_fn (PR #3140)
|
||||
|
||||
Changes - Python 3 migration:
|
||||
|
||||
* Construct HMAC as bytes on py3 (PR #3156) Thanks to @NotAFile!
|
||||
* run config tests on py3 (PR #3159) Thanks to @NotAFile!
|
||||
* Open certificate files as bytes (PR #3084) Thanks to @NotAFile!
|
||||
* Open config file in non-bytes mode (PR #3085) Thanks to @NotAFile!
|
||||
* Make event properties raise AttributeError instead (PR #3102) Thanks to @NotAFile!
|
||||
* Use six.moves.urlparse (PR #3108) Thanks to @NotAFile!
|
||||
* Add py3 tests to tox with folders that work (PR #3145) Thanks to @NotAFile!
|
||||
* Don't yield in list comprehensions (PR #3150) Thanks to @NotAFile!
|
||||
* Move more xrange to six (PR #3151) Thanks to @NotAFile!
|
||||
* make imports local (PR #3152) Thanks to @NotAFile!
|
||||
* move httplib import to six (PR #3153) Thanks to @NotAFile!
|
||||
* Replace stringIO imports with six (PR #3154, #3168) Thanks to @NotAFile!
|
||||
* more bytes strings (PR #3155) Thanks to @NotAFile!
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
* synapse fails to start under Twisted >= 18.4 (PR #3157)
|
||||
* Fix a class of logcontext leaks (PR #3170)
|
||||
* Fix a couple of logcontext leaks in unit tests (PR #3172)
|
||||
* Fix logcontext leak in media repo (PR #3174)
|
||||
* Escape label values in prometheus metrics (PR #3175, #3186)
|
||||
* Fix 'Unhandled Error' logs with Twisted 18.4 (PR #3182) Thanks to @Half-Shot!
|
||||
* Fix logcontext leaks in rate limiter (PR #3183)
|
||||
* notifications: Convert next_token to string according to the spec (PR #3190) Thanks to @mujx!
|
||||
* nuke-room-from-db.sh: fix deletion from search table (PR #3194) Thanks to @rubo77!
|
||||
* add guard for None on purge_history api (PR #3160) Thanks to @krombel!
|
||||
|
||||
Changes in synapse v0.28.1 (2018-05-01)
|
||||
=======================================
|
||||
|
||||
SECURITY UPDATE
|
||||
|
||||
* Clamp the allowed values of event depth received over federation to be
|
||||
[0, 2^63 - 1]. This mitigates an attack where malicious events
|
||||
injected with depth = 2^63 - 1 render rooms unusable. Depth is used to
|
||||
determine the cosmetic ordering of events within a room, and so the ordering
|
||||
of events in such a room will default to using stream_ordering rather than depth
|
||||
(topological_ordering).
|
||||
|
||||
This is a temporary solution to mitigate abuse in the wild, whilst a long term solution
|
||||
is being implemented to improve how the depth parameter is used.
|
||||
|
||||
Full details at
|
||||
https://docs.google.com/document/d/1I3fi2S-XnpO45qrpCsowZv8P8dHcNZ4fsBsbOW7KABI
|
||||
|
||||
* Pin Twisted to <18.4 until we stop using the private _OpenSSLECCurve API.
|
||||
|
||||
|
||||
Changes in synapse v0.28.0 (2018-04-26)
|
||||
=======================================
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
* Fix quarantine media admin API and search reindex (PR #3130)
|
||||
* Fix media admin APIs (PR #3134)
|
||||
|
||||
|
||||
Changes in synapse v0.28.0-rc1 (2018-04-24)
|
||||
===========================================
|
||||
|
||||
Minor performance improvement to federation sending and bug fixes.
|
||||
|
||||
(Note: This release does not include the delta state resolution implementation discussed in matrix live)
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add metrics for event processing lag (PR #3090)
|
||||
* Add metrics for ResponseCache (PR #3092)
|
||||
|
||||
Changes:
|
||||
|
||||
* Synapse on PyPy (PR #2760) Thanks to @Valodim!
|
||||
* move handling of auto_join_rooms to RegisterHandler (PR #2996) Thanks to @krombel!
|
||||
* Improve handling of SRV records for federation connections (PR #3016) Thanks to @silkeh!
|
||||
* Document the behaviour of ResponseCache (PR #3059)
|
||||
* Preparation for py3 (PR #3061, #3073, #3074, #3075, #3103, #3104, #3106, #3107, #3109, #3110) Thanks to @NotAFile!
|
||||
* update prometheus dashboard to use new metric names (PR #3069) Thanks to @krombel!
|
||||
* use python3-compatible prints (PR #3074) Thanks to @NotAFile!
|
||||
* Send federation events concurrently (PR #3078)
|
||||
* Limit concurrent event sends for a room (PR #3079)
|
||||
* Improve R30 stat definition (PR #3086)
|
||||
* Send events to ASes concurrently (PR #3088)
|
||||
* Refactor ResponseCache usage (PR #3093)
|
||||
* Clarify that SRV may not point to a CNAME (PR #3100) Thanks to @silkeh!
|
||||
* Use str(e) instead of e.message (PR #3103) Thanks to @NotAFile!
|
||||
* Use six.itervalues in some places (PR #3106) Thanks to @NotAFile!
|
||||
* Refactor store.have_events (PR #3117)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
* Return 401 for invalid access_token on logout (PR #2938) Thanks to @dklug!
|
||||
* Return a 404 rather than a 500 on rejoining empty rooms (PR #3080)
|
||||
* fix federation_domain_whitelist (PR #3099)
|
||||
* Avoid creating events with huge numbers of prev_events (PR #3113)
|
||||
* Reject events which have lots of prev_events (PR #3118)
|
||||
|
||||
|
||||
Changes in synapse v0.27.4 (2018-04-13)
|
||||
======================================
|
||||
|
||||
Changes:
|
||||
|
||||
* Update canonicaljson dependency (#3095)
|
||||
|
||||
|
||||
Changes in synapse v0.27.3 (2018-04-11)
|
||||
======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* URL quote path segments over federation (#3082)
|
||||
|
||||
Changes in synapse v0.27.3-rc2 (2018-04-09)
|
||||
==========================================
|
||||
|
||||
v0.27.3-rc1 used a stale version of the develop branch so the changelog overstates
|
||||
the functionality. v0.27.3-rc2 is up to date, rc1 should be ignored.
|
||||
|
||||
Changes in synapse v0.27.3-rc1 (2018-04-09)
|
||||
=======================================
|
||||
|
||||
Notable changes include API support for joinability of groups. Also new metrics
|
||||
and phone home stats. Phone home stats include better visibility of system usage
|
||||
so we can tweak synpase to work better for all users rather than our own experience
|
||||
with matrix.org. Also, recording 'r30' stat which is the measure we use to track
|
||||
overal growth of the Matrix ecosystem. It is defined as:-
|
||||
|
||||
Counts the number of native 30 day retained users, defined as:-
|
||||
* Users who have created their accounts more than 30 days
|
||||
* Where last seen at most 30 days ago
|
||||
* Where account creation and last_seen are > 30 days"
|
||||
|
||||
|
||||
Features:
|
||||
|
||||
* Add joinability for groups (PR #3045)
|
||||
* Implement group join API (PR #3046)
|
||||
* Add counter metrics for calculating state delta (PR #3033)
|
||||
* R30 stats (PR #3041)
|
||||
* Measure time it takes to calculate state group ID (PR #3043)
|
||||
* Add basic performance statistics to phone home (PR #3044)
|
||||
* Add response size metrics (PR #3071)
|
||||
* phone home cache size configurations (PR #3063)
|
||||
|
||||
Changes:
|
||||
|
||||
* Add a blurb explaining the main synapse worker (PR #2886) Thanks to @turt2live!
|
||||
* Replace old style error catching with 'as' keyword (PR #3000) Thanks to @NotAFile!
|
||||
* Use .iter* to avoid copies in StateHandler (PR #3006)
|
||||
* Linearize calls to _generate_user_id (PR #3029)
|
||||
* Remove last usage of ujson (PR #3030)
|
||||
* Use simplejson throughout (PR #3048)
|
||||
* Use static JSONEncoders (PR #3049)
|
||||
* Remove uses of events.content (PR #3060)
|
||||
* Improve database cache performance (PR #3068)
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Add room_id to the response of `rooms/{roomId}/join` (PR #2986) Thanks to @jplatte!
|
||||
* Fix replication after switch to simplejson (PR #3015)
|
||||
* 404 correctly on missing paths via NoResource (PR #3022)
|
||||
* Fix error when claiming e2e keys from offline servers (PR #3034)
|
||||
* fix tests/storage/test_user_directory.py (PR #3042)
|
||||
* use PUT instead of POST for federating groups/m.join_policy (PR #3070) Thanks to @krombel!
|
||||
* postgres port script: fix state_groups_pkey error (PR #3072)
|
||||
|
||||
|
||||
Changes in synapse v0.27.2 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug which broke TCP replication between workers (PR #3015)
|
||||
|
||||
|
||||
Changes in synapse v0.27.1 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
Meta release as v0.27.0 temporarily pointed to the wrong commit
|
||||
|
||||
|
||||
Changes in synapse v0.27.0 (2018-03-26)
|
||||
=======================================
|
||||
|
||||
No changes since v0.27.0-rc2
|
||||
|
||||
|
||||
Changes in synapse v0.27.0-rc2 (2018-03-19)
|
||||
===========================================
|
||||
|
||||
Pulls in v0.26.1
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug introduced in v0.27.0-rc1 that causes much increased memory usage in state cache (PR #3005)
|
||||
|
||||
|
||||
Changes in synapse v0.26.1 (2018-03-15)
|
||||
=======================================
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix bug where an invalid event caused server to stop functioning correctly,
|
||||
due to parsing and serializing bugs in ujson library (PR #3008)
|
||||
|
||||
|
||||
Changes in synapse v0.27.0-rc1 (2018-03-14)
|
||||
===========================================
|
||||
|
||||
The common case for running Synapse is not to run separate workers, but for those that do, be aware that synctl no longer starts the main synapse when using ``-a`` option with workers. A new worker file should be added with ``worker_app: synapse.app.homeserver``.
|
||||
|
||||
This release also begins the process of renaming a number of the metrics
|
||||
reported to prometheus. See `docs/metrics-howto.rst <docs/metrics-howto.rst#block-and-response-metrics-renamed-for-0-27-0>`_.
|
||||
Note that the v0.28.0 release will remove the deprecated metric names.
|
||||
|
||||
Features:
|
||||
|
||||
* Add ability for ASes to override message send time (PR #2754)
|
||||
* Add support for custom storage providers for media repository (PR #2867, #2777, #2783, #2789, #2791, #2804, #2812, #2814, #2857, #2868, #2767)
|
||||
* Add purge API features, see `docs/admin_api/purge_history_api.rst <docs/admin_api/purge_history_api.rst>`_ for full details (PR #2858, #2867, #2882, #2946, #2962, #2943)
|
||||
* Add support for whitelisting 3PIDs that users can register. (PR #2813)
|
||||
* Add ``/room/{id}/event/{id}`` API (PR #2766)
|
||||
* Add an admin API to get all the media in a room (PR #2818) Thanks to @turt2live!
|
||||
* Add ``federation_domain_whitelist`` option (PR #2820, #2821)
|
||||
|
||||
|
||||
Changes:
|
||||
|
||||
* Continue to factor out processing from main process and into worker processes. See updated `docs/workers.rst <docs/workers.rst>`_ (PR #2892 - #2904, #2913, #2920 - #2926, #2947, #2847, #2854, #2872, #2873, #2874, #2928, #2929, #2934, #2856, #2976 - #2984, #2987 - #2989, #2991 - #2993, #2995, #2784)
|
||||
* Ensure state cache is used when persisting events (PR #2864, #2871, #2802, #2835, #2836, #2841, #2842, #2849)
|
||||
* Change the default config to bind on both IPv4 and IPv6 on all platforms (PR #2435) Thanks to @silkeh!
|
||||
* No longer require a specific version of saml2 (PR #2695) Thanks to @okurz!
|
||||
* Remove ``verbosity``/``log_file`` from generated config (PR #2755)
|
||||
* Add and improve metrics and logging (PR #2770, #2778, #2785, #2786, #2787, #2793, #2794, #2795, #2809, #2810, #2833, #2834, #2844, #2965, #2927, #2975, #2790, #2796, #2838)
|
||||
* When using synctl with workers, don't start the main synapse automatically (PR #2774)
|
||||
* Minor performance improvements (PR #2773, #2792)
|
||||
* Use a connection pool for non-federation outbound connections (PR #2817)
|
||||
* Make it possible to run unit tests against postgres (PR #2829)
|
||||
* Update pynacl dependency to 1.2.1 or higher (PR #2888) Thanks to @bachp!
|
||||
* Remove ability for AS users to call /events and /sync (PR #2948)
|
||||
* Use bcrypt.checkpw (PR #2949) Thanks to @krombel!
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fix broken ``ldap_config`` config option (PR #2683) Thanks to @seckrv!
|
||||
* Fix error message when user is not allowed to unban (PR #2761) Thanks to @turt2live!
|
||||
* Fix publicised groups GET API (singular) over federation (PR #2772)
|
||||
* Fix user directory when using ``user_directory_search_all_users`` config option (PR #2803, #2831)
|
||||
* Fix error on ``/publicRooms`` when no rooms exist (PR #2827)
|
||||
* Fix bug in quarantine_media (PR #2837)
|
||||
* Fix url_previews when no Content-Type is returned from URL (PR #2845)
|
||||
* Fix rare race in sync API when joining room (PR #2944)
|
||||
* Fix slow event search, switch back from GIST to GIN indexes (PR #2769, #2848)
|
||||
|
||||
|
||||
Changes in synapse v0.26.0 (2018-01-05)
|
||||
|
|
|
@ -30,8 +30,12 @@ use github's pull request workflow to review the contribution, and either ask
|
|||
you to make any refinements needed or merge it and make them ourselves. The
|
||||
changes will then land on master when we next do a release.
|
||||
|
||||
We use Jenkins for continuous integration (http://matrix.org/jenkins), and
|
||||
typically all pull requests get automatically tested Jenkins: if your change breaks the build, Jenkins will yell about it in #matrix-dev:matrix.org so please lurk there and keep an eye open.
|
||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
||||
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
||||
integration. All pull requests to synapse get automatically tested by Travis;
|
||||
the Jenkins builds require an adminstrator to start them. If your change
|
||||
breaks the build, this will be shown in github, so please keep an eye on the
|
||||
pull request for feedback.
|
||||
|
||||
Code style
|
||||
~~~~~~~~~~
|
||||
|
@ -115,4 +119,4 @@ can't be accepted. Git makes this trivial - just use the -s flag when you do
|
|||
Conclusion
|
||||
~~~~~~~~~~
|
||||
|
||||
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
||||
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
||||
|
|
19
Dockerfile
Normal file
19
Dockerfile
Normal file
|
@ -0,0 +1,19 @@
|
|||
FROM docker.io/python:2-alpine3.7
|
||||
|
||||
RUN apk add --no-cache --virtual .nacl_deps su-exec build-base libffi-dev zlib-dev libressl-dev libjpeg-turbo-dev linux-headers postgresql-dev libxslt-dev
|
||||
|
||||
COPY . /synapse
|
||||
|
||||
# A wheel cache may be provided in ./cache for faster build
|
||||
RUN cd /synapse \
|
||||
&& pip install --upgrade pip setuptools psycopg2 lxml \
|
||||
&& mkdir -p /synapse/cache \
|
||||
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
||||
&& mv /synapse/contrib/docker/start.py /synapse/contrib/docker/conf / \
|
||||
&& rm -rf setup.py setup.cfg synapse
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
EXPOSE 8008/tcp 8448/tcp
|
||||
|
||||
ENTRYPOINT ["/start.py"]
|
|
@ -25,6 +25,8 @@ recursive-include synapse/static *.js
|
|||
exclude jenkins.sh
|
||||
exclude jenkins*.sh
|
||||
exclude jenkins*
|
||||
exclude Dockerfile
|
||||
exclude .dockerignore
|
||||
recursive-exclude jenkins *.sh
|
||||
|
||||
prune .github
|
||||
|
|
23
README.rst
23
README.rst
|
@ -157,8 +157,9 @@ if you prefer.
|
|||
|
||||
In case of problems, please see the _`Troubleshooting` section below.
|
||||
|
||||
Alternatively, Silvio Fricke has contributed a Dockerfile to automate the
|
||||
above in Docker at https://registry.hub.docker.com/u/silviof/docker-matrix/.
|
||||
There is an offical synapse image available at https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with the docker-compose file available at `contrib/docker`. Further information on this including configuration options is available in `contrib/docker/README.md`.
|
||||
|
||||
Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a Dockerfile to automate a synapse server in a single Docker image, at https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||
|
||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
||||
tested with VirtualBox/AWS/DigitalOcean - see https://github.com/EMnify/matrix-synapse-auto-deploy
|
||||
|
@ -354,6 +355,10 @@ https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one
|
|||
Fedora
|
||||
------
|
||||
|
||||
Synapse is in the Fedora repositories as ``matrix-synapse``::
|
||||
|
||||
sudo dnf install matrix-synapse
|
||||
|
||||
Oleg Girko provides Fedora RPMs at
|
||||
https://obs.infoserver.lv/project/monitor/matrix-synapse
|
||||
|
||||
|
@ -610,6 +615,9 @@ should have the format ``_matrix._tcp.<yourdomain.com> <ttl> IN SRV 10 0 <port>
|
|||
$ dig -t srv _matrix._tcp.example.com
|
||||
_matrix._tcp.example.com. 3600 IN SRV 10 0 8448 synapse.example.com.
|
||||
|
||||
Note that the server hostname cannot be an alias (CNAME record): it has to point
|
||||
directly to the server hosting the synapse instance.
|
||||
|
||||
You can then configure your homeserver to use ``<yourdomain.com>`` as the domain in
|
||||
its user-ids, by setting ``server_name``::
|
||||
|
||||
|
@ -890,6 +898,17 @@ This should end with a 'PASSED' result::
|
|||
|
||||
PASSED (successes=143)
|
||||
|
||||
Running the Integration Tests
|
||||
=============================
|
||||
|
||||
Synapse is accompanied by `SyTest <https://github.com/matrix-org/sytest>`_,
|
||||
a Matrix homeserver integration testing suite, which uses HTTP requests to
|
||||
access the API as a Matrix client would. It is able to run Synapse directly from
|
||||
the source tree, so installation of the server is not required.
|
||||
|
||||
Testing with SyTest is recommended for verifying that changes related to the
|
||||
Client-Server API are functioning correctly. See the `installation instructions
|
||||
<https://github.com/matrix-org/sytest#installing>`_ for details.
|
||||
|
||||
Building Internal API Documentation
|
||||
===================================
|
||||
|
|
12
UPGRADE.rst
12
UPGRADE.rst
|
@ -48,6 +48,18 @@ returned by the Client-Server API:
|
|||
# configured on port 443.
|
||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||
|
||||
Upgrading to $NEXT_VERSION
|
||||
====================
|
||||
|
||||
This release expands the anonymous usage stats sent if the opt-in
|
||||
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||
and cpu use at a very coarse level. This requires administrators to install
|
||||
the optional ``psutil`` python module.
|
||||
|
||||
We would appreciate it if you could assist by ensuring this module is available
|
||||
and ``report_stats`` is enabled. This will let us see if performance changes to
|
||||
synapse are having an impact to the general community.
|
||||
|
||||
Upgrading to v0.15.0
|
||||
====================
|
||||
|
||||
|
|
10
contrib/README.rst
Normal file
10
contrib/README.rst
Normal file
|
@ -0,0 +1,10 @@
|
|||
Community Contributions
|
||||
=======================
|
||||
|
||||
Everything in this directory are projects submitted by the community that may be useful
|
||||
to others. As such, the project maintainers cannot guarantee support, stability
|
||||
or backwards compatibility of these projects.
|
||||
|
||||
Files in this directory should *not* be relied on directly, as they may not
|
||||
continue to work or exist in future. If you wish to use any of these files then
|
||||
they should be copied to avoid them breaking from underneath you.
|
153
contrib/docker/README.md
Normal file
153
contrib/docker/README.md
Normal file
|
@ -0,0 +1,153 @@
|
|||
# Synapse Docker
|
||||
|
||||
The `matrixdotorg/synapse` Docker image will run Synapse as a single process. It does not provide a
|
||||
database server or a TURN server, you should run these separately.
|
||||
|
||||
If you run a Postgres server, you should simply include it in the same Compose
|
||||
project or set the proper environment variables and the image will automatically
|
||||
use that server.
|
||||
|
||||
## Build
|
||||
|
||||
Build the docker image with the `docker build` command from the root of the synapse repository.
|
||||
|
||||
```
|
||||
docker build -t docker.io/matrixdotorg/synapse .
|
||||
```
|
||||
|
||||
The `-t` option sets the image tag. Official images are tagged `matrixdotorg/synapse:<version>` where `<version>` is the same as the release tag in the synapse git repository.
|
||||
|
||||
You may have a local Python wheel cache available, in which case copy the relevant packages in the ``cache/`` directory at the root of the project.
|
||||
|
||||
## Run
|
||||
|
||||
This image is designed to run either with an automatically generated configuration
|
||||
file or with a custom configuration that requires manual edition.
|
||||
|
||||
### Automated configuration
|
||||
|
||||
It is recommended that you use Docker Compose to run your containers, including
|
||||
this image and a Postgres server. A sample ``docker-compose.yml`` is provided,
|
||||
including example labels for reverse proxying and other artifacts.
|
||||
|
||||
Read the section about environment variables and set at least mandatory variables,
|
||||
then run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
If secrets are not specified in the environment variables, they will be generated
|
||||
as part of the startup. Please ensure these secrets are kept between launches of the
|
||||
Docker container, as their loss may require users to log in again.
|
||||
|
||||
### Manual configuration
|
||||
|
||||
A sample ``docker-compose.yml`` is provided, including example labels for
|
||||
reverse proxying and other artifacts. The docker-compose file is an example,
|
||||
please comment/uncomment sections that are not suitable for your usecase.
|
||||
|
||||
Specify a ``SYNAPSE_CONFIG_PATH``, preferably to a persistent path,
|
||||
to use manual configuration. To generate a fresh ``homeserver.yaml``, simply run:
|
||||
|
||||
```
|
||||
docker-compose run --rm -e SYNAPSE_SERVER_NAME=my.matrix.host synapse generate
|
||||
```
|
||||
|
||||
Then, customize your configuration and run the server:
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
### Without Compose
|
||||
|
||||
If you do not wish to use Compose, you may still run this image using plain
|
||||
Docker commands. Note that the following is just a guideline and you may need
|
||||
to add parameters to the docker run command to account for the network situation
|
||||
with your postgres database.
|
||||
|
||||
```
|
||||
docker run \
|
||||
-d \
|
||||
--name synapse \
|
||||
-v ${DATA_PATH}:/data \
|
||||
-e SYNAPSE_SERVER_NAME=my.matrix.host \
|
||||
-e SYNAPSE_REPORT_STATS=yes \
|
||||
docker.io/matrixdotorg/synapse:latest
|
||||
```
|
||||
|
||||
## Volumes
|
||||
|
||||
The image expects a single volume, located at ``/data``, that will hold:
|
||||
|
||||
* temporary files during uploads;
|
||||
* uploaded media and thumbnails;
|
||||
* the SQLite database if you do not configure postgres;
|
||||
* the appservices configuration.
|
||||
|
||||
You are free to use separate volumes depending on storage endpoints at your
|
||||
disposal. For instance, ``/data/media`` coud be stored on a large but low
|
||||
performance hdd storage while other files could be stored on high performance
|
||||
endpoints.
|
||||
|
||||
In order to setup an application service, simply create an ``appservices``
|
||||
directory in the data volume and write the application service Yaml
|
||||
configuration file there. Multiple application services are supported.
|
||||
|
||||
## Environment
|
||||
|
||||
Unless you specify a custom path for the configuration file, a very generic
|
||||
file will be generated, based on the following environment settings.
|
||||
These are a good starting point for setting up your own deployment.
|
||||
|
||||
Global settings:
|
||||
|
||||
* ``UID``, the user id Synapse will run as [default 991]
|
||||
* ``GID``, the group id Synapse will run as [default 991]
|
||||
* ``SYNAPSE_CONFIG_PATH``, path to a custom config file
|
||||
|
||||
If ``SYNAPSE_CONFIG_PATH`` is set, you should generate a configuration file
|
||||
then customize it manually. No other environment variable is required.
|
||||
|
||||
Otherwise, a dynamic configuration file will be used. The following environment
|
||||
variables are available for configuration:
|
||||
|
||||
* ``SYNAPSE_SERVER_NAME`` (mandatory), the current server public hostname.
|
||||
* ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous
|
||||
statistics reporting back to the Matrix project which helps us to get funding.
|
||||
* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if
|
||||
you run your own TLS-capable reverse proxy).
|
||||
* ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on
|
||||
the Synapse instance.
|
||||
* ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server.
|
||||
* ``SYNAPSE_EVENT_CACHE_SIZE``, the event cache size [default `10K`].
|
||||
* ``SYNAPSE_CACHE_FACTOR``, the cache factor [default `0.5`].
|
||||
* ``SYNAPSE_RECAPTCHA_PUBLIC_KEY``, set this variable to the recaptcha public
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_RECAPTCHA_PRIVATE_KEY``, set this variable to the recaptcha private
|
||||
key in order to enable recaptcha upon registration.
|
||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||
uris to enable TURN for this homeserver.
|
||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||
|
||||
Shared secrets, that will be initialized to random values if not set:
|
||||
|
||||
* ``SYNAPSE_REGISTRATION_SHARED_SECRET``, secret for registrering users if
|
||||
registration is disable.
|
||||
* ``SYNAPSE_MACAROON_SECRET_KEY`` secret for signing access tokens
|
||||
to the server.
|
||||
|
||||
Database specific values (will use SQLite if not set):
|
||||
|
||||
* `POSTGRES_DB` - The database name for the synapse postgres database. [default: `synapse`]
|
||||
* `POSTGRES_HOST` - The host of the postgres database if you wish to use postgresql instead of sqlite3. [default: `db` which is useful when using a container on the same docker network in a compose file where the postgres service is called `db`]
|
||||
* `POSTGRES_PASSWORD` - The password for the synapse postgres database. **If this is set then postgres will be used instead of sqlite3.** [default: none] **NOTE**: You are highly encouraged to use postgresql! Please use the compose file to make it easier to deploy.
|
||||
* `POSTGRES_USER` - The user for the synapse postgres database. [default: `matrix`]
|
||||
|
||||
Mail server specific values (will not send emails if not set):
|
||||
|
||||
* ``SYNAPSE_SMTP_HOST``, hostname to the mail server.
|
||||
* ``SYNAPSE_SMTP_PORT``, TCP port for accessing the mail server [default ``25``].
|
||||
* ``SYNAPSE_SMTP_USER``, username for authenticating against the mail server if any.
|
||||
* ``SYNAPSE_SMTP_PASSWORD``, password for authenticating against the mail server if any.
|
219
contrib/docker/conf/homeserver.yaml
Normal file
219
contrib/docker/conf/homeserver.yaml
Normal file
|
@ -0,0 +1,219 @@
|
|||
# vim:ft=yaml
|
||||
|
||||
## TLS ##
|
||||
|
||||
tls_certificate_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.crt"
|
||||
tls_private_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.key"
|
||||
tls_dh_params_path: "/data/{{ SYNAPSE_SERVER_NAME }}.tls.dh"
|
||||
no_tls: {{ "True" if SYNAPSE_NO_TLS else "False" }}
|
||||
tls_fingerprints: []
|
||||
|
||||
## Server ##
|
||||
|
||||
server_name: "{{ SYNAPSE_SERVER_NAME }}"
|
||||
pid_file: /homeserver.pid
|
||||
web_client: False
|
||||
soft_file_limit: 0
|
||||
|
||||
## Ports ##
|
||||
|
||||
listeners:
|
||||
{% if not SYNAPSE_NO_TLS %}
|
||||
-
|
||||
port: 8448
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
tls: true
|
||||
x_forwarded: false
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
- names: [federation] # Federation APIs
|
||||
compress: false
|
||||
{% endif %}
|
||||
|
||||
- port: 8008
|
||||
tls: false
|
||||
bind_addresses: ['0.0.0.0']
|
||||
type: http
|
||||
x_forwarded: false
|
||||
|
||||
resources:
|
||||
- names: [client]
|
||||
compress: true
|
||||
- names: [federation]
|
||||
compress: false
|
||||
|
||||
## Database ##
|
||||
|
||||
{% if POSTGRES_PASSWORD %}
|
||||
database:
|
||||
name: "psycopg2"
|
||||
args:
|
||||
user: "{{ POSTGRES_USER or "synapse" }}"
|
||||
password: "{{ POSTGRES_PASSWORD }}"
|
||||
database: "{{ POSTGRES_DB or "synapse" }}"
|
||||
host: "{{ POSTGRES_HOST or "db" }}"
|
||||
port: "{{ POSTGRES_PORT or "5432" }}"
|
||||
cp_min: 5
|
||||
cp_max: 10
|
||||
{% else %}
|
||||
database:
|
||||
name: "sqlite3"
|
||||
args:
|
||||
database: "/data/homeserver.db"
|
||||
{% endif %}
|
||||
|
||||
## Performance ##
|
||||
|
||||
event_cache_size: "{{ SYNAPSE_EVENT_CACHE_SIZE or "10K" }}"
|
||||
verbose: 0
|
||||
log_file: "/data/homeserver.log"
|
||||
log_config: "/compiled/log.config"
|
||||
|
||||
## Ratelimiting ##
|
||||
|
||||
rc_messages_per_second: 0.2
|
||||
rc_message_burst_count: 10.0
|
||||
federation_rc_window_size: 1000
|
||||
federation_rc_sleep_limit: 10
|
||||
federation_rc_sleep_delay: 500
|
||||
federation_rc_reject_limit: 50
|
||||
federation_rc_concurrent: 3
|
||||
|
||||
## Files ##
|
||||
|
||||
media_store_path: "/data/media"
|
||||
uploads_path: "/data/uploads"
|
||||
max_upload_size: "10M"
|
||||
max_image_pixels: "32M"
|
||||
dynamic_thumbnails: false
|
||||
|
||||
# List of thumbnail to precalculate when an image is uploaded.
|
||||
thumbnail_sizes:
|
||||
- width: 32
|
||||
height: 32
|
||||
method: crop
|
||||
- width: 96
|
||||
height: 96
|
||||
method: crop
|
||||
- width: 320
|
||||
height: 240
|
||||
method: scale
|
||||
- width: 640
|
||||
height: 480
|
||||
method: scale
|
||||
- width: 800
|
||||
height: 600
|
||||
method: scale
|
||||
|
||||
url_preview_enabled: False
|
||||
max_spider_size: "10M"
|
||||
|
||||
## Captcha ##
|
||||
|
||||
{% if SYNAPSE_RECAPTCHA_PUBLIC_KEY %}
|
||||
recaptcha_public_key: "{{ SYNAPSE_RECAPTCHA_PUBLIC_KEY }}"
|
||||
recaptcha_private_key: "{{ SYNAPSE_RECAPTCHA_PRIVATE_KEY }}"
|
||||
enable_registration_captcha: True
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
{% else %}
|
||||
recaptcha_public_key: "YOUR_PUBLIC_KEY"
|
||||
recaptcha_private_key: "YOUR_PRIVATE_KEY"
|
||||
enable_registration_captcha: False
|
||||
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify"
|
||||
{% endif %}
|
||||
|
||||
## Turn ##
|
||||
|
||||
{% if SYNAPSE_TURN_URIS %}
|
||||
turn_uris:
|
||||
{% for uri in SYNAPSE_TURN_URIS.split(',') %} - "{{ uri }}"
|
||||
{% endfor %}
|
||||
turn_shared_secret: "{{ SYNAPSE_TURN_SECRET }}"
|
||||
turn_user_lifetime: "1h"
|
||||
turn_allow_guests: True
|
||||
{% else %}
|
||||
turn_uris: []
|
||||
turn_shared_secret: "YOUR_SHARED_SECRET"
|
||||
turn_user_lifetime: "1h"
|
||||
turn_allow_guests: True
|
||||
{% endif %}
|
||||
|
||||
## Registration ##
|
||||
|
||||
enable_registration: {{ "True" if SYNAPSE_ENABLE_REGISTRATION else "False" }}
|
||||
registration_shared_secret: "{{ SYNAPSE_REGISTRATION_SHARED_SECRET }}"
|
||||
bcrypt_rounds: 12
|
||||
allow_guest_access: {{ "True" if SYNAPSE_ALLOW_GUEST else "False" }}
|
||||
enable_group_creation: true
|
||||
|
||||
# The list of identity servers trusted to verify third party
|
||||
# identifiers by this server.
|
||||
trusted_third_party_id_servers:
|
||||
- matrix.org
|
||||
- vector.im
|
||||
- riot.im
|
||||
|
||||
## Metrics ###
|
||||
|
||||
{% if SYNAPSE_REPORT_STATS.lower() == "yes" %}
|
||||
enable_metrics: True
|
||||
report_stats: True
|
||||
{% else %}
|
||||
enable_metrics: False
|
||||
report_stats: False
|
||||
{% endif %}
|
||||
|
||||
## API Configuration ##
|
||||
|
||||
room_invite_state_types:
|
||||
- "m.room.join_rules"
|
||||
- "m.room.canonical_alias"
|
||||
- "m.room.avatar"
|
||||
- "m.room.name"
|
||||
|
||||
{% if SYNAPSE_APPSERVICES %}
|
||||
app_service_config_files:
|
||||
{% for appservice in SYNAPSE_APPSERVICES %} - "{{ appservice }}"
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
app_service_config_files: []
|
||||
{% endif %}
|
||||
|
||||
macaroon_secret_key: "{{ SYNAPSE_MACAROON_SECRET_KEY }}"
|
||||
expire_access_token: False
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
signing_key_path: "/data/{{ SYNAPSE_SERVER_NAME }}.signing.key"
|
||||
old_signing_keys: {}
|
||||
key_refresh_interval: "1d" # 1 Day.
|
||||
|
||||
# The trusted servers to download signing keys from.
|
||||
perspectives:
|
||||
servers:
|
||||
"matrix.org":
|
||||
verify_keys:
|
||||
"ed25519:auto":
|
||||
key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw"
|
||||
|
||||
password_config:
|
||||
enabled: true
|
||||
|
||||
{% if SYNAPSE_SMTP_HOST %}
|
||||
email:
|
||||
enable_notifs: false
|
||||
smtp_host: "{{ SYNAPSE_SMTP_HOST }}"
|
||||
smtp_port: {{ SYNAPSE_SMTP_PORT or "25" }}
|
||||
smtp_user: "{{ SYNAPSE_SMTP_USER }}"
|
||||
smtp_pass: "{{ SYNAPSE_SMTP_PASSWORD }}"
|
||||
require_transport_security: False
|
||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||
app_name: Matrix
|
||||
template_dir: res/templates
|
||||
notif_template_html: notif_mail.html
|
||||
notif_template_text: notif_mail.txt
|
||||
notif_for_new_users: True
|
||||
riot_base_url: "https://{{ SYNAPSE_SERVER_NAME }}"
|
||||
{% endif %}
|
29
contrib/docker/conf/log.config
Normal file
29
contrib/docker/conf/log.config
Normal file
|
@ -0,0 +1,29 @@
|
|||
version: 1
|
||||
|
||||
formatters:
|
||||
precise:
|
||||
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||
|
||||
filters:
|
||||
context:
|
||||
(): synapse.util.logcontext.LoggingContextFilter
|
||||
request: ""
|
||||
|
||||
handlers:
|
||||
console:
|
||||
class: logging.StreamHandler
|
||||
formatter: precise
|
||||
filters: [context]
|
||||
|
||||
loggers:
|
||||
synapse:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
|
||||
synapse.storage.SQL:
|
||||
# beware: increasing this to DEBUG will make synapse log sensitive
|
||||
# information such as access tokens.
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
|
||||
root:
|
||||
level: {{ SYNAPSE_LOG_LEVEL or "WARNING" }}
|
||||
handlers: [console]
|
49
contrib/docker/docker-compose.yml
Normal file
49
contrib/docker/docker-compose.yml
Normal file
|
@ -0,0 +1,49 @@
|
|||
# This compose file is compatible with Compose itself, it might need some
|
||||
# adjustments to run properly with stack.
|
||||
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
|
||||
synapse:
|
||||
image: docker.io/matrixdotorg/synapse:latest
|
||||
# Since snyapse does not retry to connect to the database, restart upon
|
||||
# failure
|
||||
restart: unless-stopped
|
||||
# See the readme for a full documentation of the environment settings
|
||||
environment:
|
||||
- SYNAPSE_SERVER_NAME=my.matrix.host
|
||||
- SYNAPSE_REPORT_STATS=no
|
||||
- SYNAPSE_ENABLE_REGISTRATION=yes
|
||||
- SYNAPSE_LOG_LEVEL=INFO
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may either store all the files in a local folder
|
||||
- ./files:/data
|
||||
# .. or you may split this between different storage points
|
||||
# - ./files:/data
|
||||
# - /path/to/ssd:/data/uploads
|
||||
# - /path/to/large_hdd:/data/media
|
||||
depends_on:
|
||||
- db
|
||||
# In order to expose Synapse, remove one of the following, you might for
|
||||
# instance expose the TLS port directly:
|
||||
ports:
|
||||
- 8448:8448/tcp
|
||||
# ... or use a reverse proxy, here is an example for traefik:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=Host:my.matrix.Host
|
||||
- traefik.port=8448
|
||||
|
||||
db:
|
||||
image: docker.io/postgres:10-alpine
|
||||
# Change that password, of course!
|
||||
environment:
|
||||
- POSTGRES_USER=synapse
|
||||
- POSTGRES_PASSWORD=changeme
|
||||
volumes:
|
||||
# You may store the database tables in a local folder..
|
||||
- ./schemas:/var/lib/postgresql/data
|
||||
# .. or store them on some high performance storage for better results
|
||||
# - /path/to/ssd/storage:/var/lib/postfesql/data
|
66
contrib/docker/start.py
Executable file
66
contrib/docker/start.py
Executable file
|
@ -0,0 +1,66 @@
|
|||
#!/usr/local/bin/python
|
||||
|
||||
import jinja2
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import glob
|
||||
|
||||
# Utility functions
|
||||
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
||||
|
||||
def check_arguments(environ, args):
|
||||
for argument in args:
|
||||
if argument not in environ:
|
||||
print("Environment variable %s is mandatory, exiting." % argument)
|
||||
sys.exit(2)
|
||||
|
||||
def generate_secrets(environ, secrets):
|
||||
for name, secret in secrets.items():
|
||||
if secret not in environ:
|
||||
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
|
||||
if os.path.exists(filename):
|
||||
with open(filename) as handle: value = handle.read()
|
||||
else:
|
||||
print("Generating a random secret for {}".format(name))
|
||||
value = os.urandom(32).encode("hex")
|
||||
with open(filename, "w") as handle: handle.write(value)
|
||||
environ[secret] = value
|
||||
|
||||
# Prepare the configuration
|
||||
mode = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
environ = os.environ.copy()
|
||||
ownership = "{}:{}".format(environ.get("UID", 991), environ.get("GID", 991))
|
||||
args = ["python", "-m", "synapse.app.homeserver"]
|
||||
|
||||
# In generate mode, generate a configuration, missing keys, then exit
|
||||
if mode == "generate":
|
||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS", "SYNAPSE_CONFIG_PATH"))
|
||||
args += [
|
||||
"--server-name", environ["SYNAPSE_SERVER_NAME"],
|
||||
"--report-stats", environ["SYNAPSE_REPORT_STATS"],
|
||||
"--config-path", environ["SYNAPSE_CONFIG_PATH"],
|
||||
"--generate-config"
|
||||
]
|
||||
os.execv("/usr/local/bin/python", args)
|
||||
|
||||
# In normal mode, generate missing keys if any, then run synapse
|
||||
else:
|
||||
# Parse the configuration file
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
args += ["--config-path", environ["SYNAPSE_CONFIG_PATH"]]
|
||||
else:
|
||||
check_arguments(environ, ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"))
|
||||
generate_secrets(environ, {
|
||||
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
|
||||
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY"
|
||||
})
|
||||
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
|
||||
if not os.path.exists("/compiled"): os.mkdir("/compiled")
|
||||
convert("/conf/homeserver.yaml", "/compiled/homeserver.yaml", environ)
|
||||
convert("/conf/log.config", "/compiled/log.config", environ)
|
||||
subprocess.check_output(["chown", "-R", ownership, "/data"])
|
||||
args += ["--config-path", "/compiled/homeserver.yaml"]
|
||||
# Generate missing keys and start synapse
|
||||
subprocess.check_output(args + ["--generate-keys"])
|
||||
os.execv("/sbin/su-exec", ["su-exec", ownership] + args)
|
|
@ -22,6 +22,8 @@ import argparse
|
|||
from synapse.events import FrozenEvent
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
def make_graph(file_name, room_id, file_prefix, limit):
|
||||
print "Reading lines"
|
||||
|
@ -58,7 +60,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
|
|||
for key, value in unfreeze(event.get_dict()["content"]).items():
|
||||
if value is None:
|
||||
value = "<null>"
|
||||
elif isinstance(value, basestring):
|
||||
elif isinstance(value, string_types):
|
||||
pass
|
||||
else:
|
||||
value = json.dumps(value)
|
||||
|
|
|
@ -202,11 +202,11 @@ new PromConsole.Graph({
|
|||
<h1>Requests</h1>
|
||||
|
||||
<h3>Requests by Servlet</h3>
|
||||
<div id="synapse_http_server_requests_servlet"></div>
|
||||
<div id="synapse_http_server_request_count_servlet"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet"),
|
||||
expr: "rate(synapse_http_server_requests:servlet[2m])",
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -215,11 +215,11 @@ new PromConsole.Graph({
|
|||
})
|
||||
</script>
|
||||
<h4> (without <tt>EventStreamRestServlet</tt> or <tt>SyncRestServlet</tt>)</h4>
|
||||
<div id="synapse_http_server_requests_servlet_minus_events"></div>
|
||||
<div id="synapse_http_server_request_count_servlet_minus_events"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_requests_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_requests:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
node: document.querySelector("#synapse_http_server_request_count_servlet_minus_events"),
|
||||
expr: "rate(synapse_http_server_request_count:servlet{servlet!=\"EventStreamRestServlet\", servlet!=\"SyncRestServlet\"}[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -233,7 +233,7 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total[2m]) / rate(synapse_http_server_response_time:count[2m]) / 1000",
|
||||
expr: "rate(synapse_http_server_response_time_seconds[2m]) / rate(synapse_http_server_response_count[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -276,7 +276,7 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_ru_utime"),
|
||||
expr: "rate(synapse_http_server_response_ru_utime:total[2m])",
|
||||
expr: "rate(synapse_http_server_response_ru_utime_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -291,7 +291,7 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_response_db_txn_duration"),
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration:total[2m])",
|
||||
expr: "rate(synapse_http_server_response_db_txn_duration_seconds[2m])",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
@ -306,7 +306,7 @@ new PromConsole.Graph({
|
|||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#synapse_http_server_send_time_avg"),
|
||||
expr: "rate(synapse_http_server_response_time:total{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_time:count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
expr: "rate(synapse_http_server_response_time_second{servlet='RoomSendEventRestServlet'}[2m]) / rate(synapse_http_server_response_count{servlet='RoomSendEventRestServlet'}[2m]) / 1000",
|
||||
name: "[[servlet]]",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize,
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
synapse_federation_transaction_queue_pendingEdus:total = sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)
|
||||
synapse_federation_transaction_queue_pendingPdus:total = sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)
|
||||
|
||||
synapse_http_server_requests:method{servlet=""} = sum(synapse_http_server_requests) by (method)
|
||||
synapse_http_server_requests:servlet{method=""} = sum(synapse_http_server_requests) by (servlet)
|
||||
synapse_http_server_request_count:method{servlet=""} = sum(synapse_http_server_request_count) by (method)
|
||||
synapse_http_server_request_count:servlet{method=""} = sum(synapse_http_server_request_count) by (servlet)
|
||||
|
||||
synapse_http_server_requests:total{servlet=""} = sum(synapse_http_server_requests:by_method) by (servlet)
|
||||
synapse_http_server_request_count:total{servlet=""} = sum(synapse_http_server_request_count:by_method) by (servlet)
|
||||
|
||||
synapse_cache:hit_ratio_5m = rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])
|
||||
synapse_cache:hit_ratio_30s = rate(synapse_util_caches_cache:hits[30s]) / rate(synapse_util_caches_cache:total[30s])
|
||||
|
|
|
@ -5,19 +5,19 @@ groups:
|
|||
expr: "sum(synapse_federation_transaction_queue_pendingEdus or absent(synapse_federation_transaction_queue_pendingEdus)*0)"
|
||||
- record: "synapse_federation_transaction_queue_pendingPdus:total"
|
||||
expr: "sum(synapse_federation_transaction_queue_pendingPdus or absent(synapse_federation_transaction_queue_pendingPdus)*0)"
|
||||
- record: 'synapse_http_server_requests:method'
|
||||
- record: 'synapse_http_server_request_count:method'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: "sum(synapse_http_server_requests) by (method)"
|
||||
- record: 'synapse_http_server_requests:servlet'
|
||||
expr: "sum(synapse_http_server_request_count) by (method)"
|
||||
- record: 'synapse_http_server_request_count:servlet'
|
||||
labels:
|
||||
method: ""
|
||||
expr: 'sum(synapse_http_server_requests) by (servlet)'
|
||||
expr: 'sum(synapse_http_server_request_count) by (servlet)'
|
||||
|
||||
- record: 'synapse_http_server_requests:total'
|
||||
- record: 'synapse_http_server_request_count:total'
|
||||
labels:
|
||||
servlet: ""
|
||||
expr: 'sum(synapse_http_server_requests:by_method) by (servlet)'
|
||||
expr: 'sum(synapse_http_server_request_count:by_method) by (servlet)'
|
||||
|
||||
- record: 'synapse_cache:hit_ratio_5m'
|
||||
expr: 'rate(synapse_util_caches_cache:hits[5m]) / rate(synapse_util_caches_cache:total[5m])'
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
# (e.g. https://www.archlinux.org/packages/community/any/matrix-synapse/ for ArchLinux)
|
||||
# rather than in a user home directory or similar under virtualenv.
|
||||
|
||||
# **NOTE:** This is an example service file that may change in the future. If you
|
||||
# wish to use this please copy rather than symlink it.
|
||||
|
||||
[Unit]
|
||||
Description=Synapse Matrix homeserver
|
||||
|
||||
|
@ -12,6 +15,7 @@ Group=synapse
|
|||
WorkingDirectory=/var/lib/synapse
|
||||
ExecStart=/usr/bin/python2.7 -m synapse.app.homeserver --config-path=/etc/synapse/homeserver.yaml
|
||||
ExecStop=/usr/bin/synctl stop /etc/synapse/homeserver.yaml
|
||||
# EnvironmentFile=-/etc/sysconfig/synapse # Can be used to e.g. set SYNAPSE_CACHE_FACTOR
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -16,9 +16,11 @@ including an ``access_token`` of a server admin.
|
|||
|
||||
By default, events sent by local users are not deleted, as they may represent
|
||||
the only copies of this content in existence. (Events sent by remote users are
|
||||
deleted, and room state data before the cutoff is always removed).
|
||||
deleted.)
|
||||
|
||||
To delete local events as well, set ``delete_local_events`` in the body:
|
||||
Room state data (such as joins, leaves, topic) is always preserved.
|
||||
|
||||
To delete local message events as well, set ``delete_local_events`` in the body:
|
||||
|
||||
.. code:: json
|
||||
|
||||
|
|
160
docs/consent_tracking.md
Normal file
160
docs/consent_tracking.md
Normal file
|
@ -0,0 +1,160 @@
|
|||
Support in Synapse for tracking agreement to server terms and conditions
|
||||
========================================================================
|
||||
|
||||
Synapse 0.30 introduces support for tracking whether users have agreed to the
|
||||
terms and conditions set by the administrator of a server - and blocking access
|
||||
to the server until they have.
|
||||
|
||||
There are several parts to this functionality; each requires some specific
|
||||
configuration in `homeserver.yaml` to be enabled.
|
||||
|
||||
Note that various parts of the configuation and this document refer to the
|
||||
"privacy policy": agreement with a privacy policy is one particular use of this
|
||||
feature, but of course adminstrators can specify other terms and conditions
|
||||
unrelated to "privacy" per se.
|
||||
|
||||
Collecting policy agreement from a user
|
||||
---------------------------------------
|
||||
|
||||
Synapse can be configured to serve the user a simple policy form with an
|
||||
"accept" button. Clicking "Accept" records the user's acceptance in the
|
||||
database and shows a success page.
|
||||
|
||||
To enable this, first create templates for the policy and success pages.
|
||||
These should be stored on the local filesystem.
|
||||
|
||||
These templates use the [Jinja2](http://jinja.pocoo.org) templating language,
|
||||
and [docs/privacy_policy_templates](privacy_policy_templates) gives
|
||||
examples of the sort of thing that can be done.
|
||||
|
||||
Note that the templates must be stored under a name giving the language of the
|
||||
template - currently this must always be `en` (for "English");
|
||||
internationalisation support is intended for the future.
|
||||
|
||||
The template for the policy itself should be versioned and named according to
|
||||
the version: for example `1.0.html`. The version of the policy which the user
|
||||
has agreed to is stored in the database.
|
||||
|
||||
Once the templates are in place, make the following changes to `homeserver.yaml`:
|
||||
|
||||
1. Add a `user_consent` section, which should look like:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
template_dir: privacy_policy_templates
|
||||
version: 1.0
|
||||
```
|
||||
|
||||
`template_dir` points to the directory containing the policy
|
||||
templates. `version` defines the version of the policy which will be served
|
||||
to the user. In the example above, Synapse will serve
|
||||
`privacy_policy_templates/en/1.0.html`.
|
||||
|
||||
|
||||
2. Add a `form_secret` setting at the top level:
|
||||
|
||||
|
||||
```yaml
|
||||
form_secret: "<unique secret>"
|
||||
```
|
||||
|
||||
This should be set to an arbitrary secret string (try `pwgen -y 30` to
|
||||
generate suitable secrets).
|
||||
|
||||
More on what this is used for below.
|
||||
|
||||
3. Add `consent` wherever the `client` resource is currently enabled in the
|
||||
`listeners` configuration. For example:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 8008
|
||||
resources:
|
||||
- names:
|
||||
- client
|
||||
- consent
|
||||
```
|
||||
|
||||
|
||||
Finally, ensure that `jinja2` is installed. If you are using a virtualenv, this
|
||||
should be a matter of `pip install Jinja2`. On debian, try `apt-get install
|
||||
python-jinja2`.
|
||||
|
||||
Once this is complete, and the server has been restarted, try visiting
|
||||
`https://<server>/_matrix/consent`. If correctly configured, this should give
|
||||
an error "Missing string query parameter 'u'". It is now possible to manually
|
||||
construct URIs where users can give their consent.
|
||||
|
||||
### Constructing the consent URI
|
||||
|
||||
It may be useful to manually construct the "consent URI" for a given user - for
|
||||
instance, in order to send them an email asking them to consent. To do this,
|
||||
take the base `https://<server>/_matrix/consent` URL and add the following
|
||||
query parameters:
|
||||
|
||||
* `u`: the user id of the user. This can either be a full MXID
|
||||
(`@user:server.com`) or just the localpart (`user`).
|
||||
|
||||
* `h`: hex-encoded HMAC-SHA256 of `u` using the `form_secret` as a key. It is
|
||||
possible to calculate this on the commandline with something like:
|
||||
|
||||
```bash
|
||||
echo -n '<user>' | openssl sha256 -hmac '<form_secret>'
|
||||
```
|
||||
|
||||
This should result in a URI which looks something like:
|
||||
`https://<server>/_matrix/consent?u=<user>&h=68a152465a4d...`.
|
||||
|
||||
|
||||
Sending users a server notice asking them to agree to the policy
|
||||
----------------------------------------------------------------
|
||||
|
||||
It is possible to configure Synapse to send a [server
|
||||
notice](server_notices.md) to anybody who has not yet agreed to the current
|
||||
version of the policy. To do so:
|
||||
|
||||
* ensure that the consent resource is configured, as in the previous section
|
||||
|
||||
* ensure that server notices are configured, as in [server_notices.md](server_notices.md).
|
||||
|
||||
* Add `server_notice_content` under `user_consent` in `homeserver.yaml`. For
|
||||
example:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
server_notice_content:
|
||||
msgtype: m.text
|
||||
body: >-
|
||||
Please give your consent to the privacy policy at %(consent_uri)s.
|
||||
```
|
||||
|
||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||
consent uri for that user.
|
||||
|
||||
* ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||
URI that clients use to connect to the server. (It is used to construct
|
||||
`consent_uri` in the server notice.)
|
||||
|
||||
|
||||
Blocking users from using the server until they agree to the policy
|
||||
-------------------------------------------------------------------
|
||||
|
||||
Synapse can be configured to block any attempts to join rooms or send messages
|
||||
until the user has given their agreement to the policy. (Joining the server
|
||||
notices room is exempted from this).
|
||||
|
||||
To enable this, add `block_events_error` under `user_consent`. For example:
|
||||
|
||||
```yaml
|
||||
user_consent:
|
||||
block_events_error: >-
|
||||
You can't send any messages until you consent to the privacy policy at
|
||||
%(consent_uri)s.
|
||||
```
|
||||
|
||||
Synapse automatically replaces the placeholder `%(consent_uri)s` with the
|
||||
consent uri for that user.
|
||||
|
||||
ensure that `public_baseurl` is set in `homeserver.yaml`, and gives the base
|
||||
URI that clients use to connect to the server. (It is used to construct
|
||||
`consent_uri` in the error.)
|
43
docs/manhole.md
Normal file
43
docs/manhole.md
Normal file
|
@ -0,0 +1,43 @@
|
|||
Using the synapse manhole
|
||||
=========================
|
||||
|
||||
The "manhole" allows server administrators to access a Python shell on a running
|
||||
Synapse installation. This is a very powerful mechanism for administration and
|
||||
debugging.
|
||||
|
||||
To enable it, first uncomment the `manhole` listener configuration in
|
||||
`homeserver.yaml`:
|
||||
|
||||
```yaml
|
||||
listeners:
|
||||
- port: 9000
|
||||
bind_addresses: ['::1', '127.0.0.1']
|
||||
type: manhole
|
||||
```
|
||||
|
||||
(`bind_addresses` in the above is important: it ensures that access to the
|
||||
manhole is only possible for local users).
|
||||
|
||||
Note that this will give administrative access to synapse to **all users** with
|
||||
shell access to the server. It should therefore **not** be enabled in
|
||||
environments where untrusted users have shell access.
|
||||
|
||||
Then restart synapse, and point an ssh client at port 9000 on localhost, using
|
||||
the username `matrix`:
|
||||
|
||||
```bash
|
||||
ssh -p9000 matrix@localhost
|
||||
```
|
||||
|
||||
The password is `rabbithole`.
|
||||
|
||||
This gives a Python REPL in which `hs` gives access to the
|
||||
`synapse.server.HomeServer` object - which in turn gives access to many other
|
||||
parts of the process.
|
||||
|
||||
As a simple example, retrieving an event from the database:
|
||||
|
||||
```
|
||||
>>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')
|
||||
<Deferred at 0x7ff253fc6998 current result: <FrozenEvent event_id='$1416420717069yeQaw:matrix.org', type='m.room.create', state_key=''>>
|
||||
```
|
|
@ -6,7 +6,13 @@ Postgres version 9.4 or later is known to work.
|
|||
Set up database
|
||||
===============
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise
|
||||
Assuming your PostgreSQL database user is called ``postgres``, create a user
|
||||
``synapse_user`` with::
|
||||
|
||||
su - postgres
|
||||
createuser --pwprompt synapse_user
|
||||
|
||||
The PostgreSQL database used *must* have the correct encoding set, otherwise it
|
||||
would not be able to store UTF8 strings. To create a database with the correct
|
||||
encoding use, e.g.::
|
||||
|
||||
|
@ -46,8 +52,8 @@ As with Debian/Ubuntu, postgres support depends on the postgres python connector
|
|||
Synapse config
|
||||
==============
|
||||
|
||||
When you are ready to start using PostgreSQL, add the following line to your
|
||||
config file::
|
||||
When you are ready to start using PostgreSQL, edit the ``database`` section in
|
||||
your config file to match the following lines::
|
||||
|
||||
database:
|
||||
name: psycopg2
|
||||
|
@ -96,9 +102,12 @@ complete, restart synapse. For instance::
|
|||
cp homeserver.db homeserver.db.snapshot
|
||||
./synctl start
|
||||
|
||||
Assuming your new config file (as described in the section *Synapse config*)
|
||||
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||
``homeserver.db.snapshot`` then simply run::
|
||||
Copy the old config file into a new config file::
|
||||
|
||||
cp homeserver.yaml homeserver-postgres.yaml
|
||||
|
||||
Edit the database section as described in the section *Synapse config* above
|
||||
and with the SQLite snapshot located at ``homeserver.db.snapshot`` simply run::
|
||||
|
||||
synapse_port_db --sqlite-database homeserver.db.snapshot \
|
||||
--postgres-config homeserver-postgres.yaml
|
||||
|
@ -117,6 +126,11 @@ run::
|
|||
--postgres-config homeserver-postgres.yaml
|
||||
|
||||
Once that has completed, change the synapse config to point at the PostgreSQL
|
||||
database configuration file ``homeserver-postgres.yaml`` (i.e. rename it to
|
||||
``homeserver.yaml``) and restart synapse. Synapse should now be running against
|
||||
PostgreSQL.
|
||||
database configuration file ``homeserver-postgres.yaml``:
|
||||
|
||||
./synctl stop
|
||||
mv homeserver.yaml homeserver-old-sqlite.yaml
|
||||
mv homeserver-postgres.yaml homeserver.yaml
|
||||
./synctl start
|
||||
|
||||
Synapse should now be running against PostgreSQL.
|
||||
|
|
23
docs/privacy_policy_templates/en/1.0.html
Normal file
23
docs/privacy_policy_templates/en/1.0.html
Normal file
|
@ -0,0 +1,23 @@
|
|||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Matrix.org Privacy policy</title>
|
||||
</head>
|
||||
<body>
|
||||
{% if has_consented %}
|
||||
<p>
|
||||
Your base already belong to us.
|
||||
</p>
|
||||
{% else %}
|
||||
<p>
|
||||
All your base are belong to us.
|
||||
</p>
|
||||
<form method="post" action="consent">
|
||||
<input type="hidden" name="v" value="{{version}}"/>
|
||||
<input type="hidden" name="u" value="{{user}}"/>
|
||||
<input type="hidden" name="h" value="{{userhmac}}"/>
|
||||
<input type="submit" value="Sure thing!"/>
|
||||
</form>
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
11
docs/privacy_policy_templates/en/success.html
Normal file
11
docs/privacy_policy_templates/en/success.html
Normal file
|
@ -0,0 +1,11 @@
|
|||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Matrix.org Privacy policy</title>
|
||||
</head>
|
||||
<body>
|
||||
<p>
|
||||
Sweet.
|
||||
</p>
|
||||
</body>
|
||||
</html>
|
74
docs/server_notices.md
Normal file
74
docs/server_notices.md
Normal file
|
@ -0,0 +1,74 @@
|
|||
Server Notices
|
||||
==============
|
||||
|
||||
'Server Notices' are a new feature introduced in Synapse 0.30. They provide a
|
||||
channel whereby server administrators can send messages to users on the server.
|
||||
|
||||
They are used as part of communication of the server polices(see
|
||||
[consent_tracking.md](consent_tracking.md)), however the intention is that
|
||||
they may also find a use for features such as "Message of the day".
|
||||
|
||||
This is a feature specific to Synapse, but it uses standard Matrix
|
||||
communication mechanisms, so should work with any Matrix client.
|
||||
|
||||
User experience
|
||||
---------------
|
||||
|
||||
When the user is first sent a server notice, they will get an invitation to a
|
||||
room (typically called 'Server Notices', though this is configurable in
|
||||
`homeserver.yaml`). They will be **unable to reject** this invitation -
|
||||
attempts to do so will receive an error.
|
||||
|
||||
Once they accept the invitation, they will see the notice message in the room
|
||||
history; it will appear to have come from the 'server notices user' (see
|
||||
below).
|
||||
|
||||
The user is prevented from sending any messages in this room by the power
|
||||
levels.
|
||||
|
||||
Having joined the room, the user can leave the room if they want. Subsequent
|
||||
server notices will then cause a new room to be created.
|
||||
|
||||
Synapse configuration
|
||||
---------------------
|
||||
|
||||
Server notices come from a specific user id on the server. Server
|
||||
administrators are free to choose the user id - something like `server` is
|
||||
suggested, meaning the notices will come from
|
||||
`@server:<your_server_name>`. Once the Server Notices user is configured, that
|
||||
user id becomes a special, privileged user, so administrators should ensure
|
||||
that **it is not already allocated**.
|
||||
|
||||
In order to support server notices, it is necessary to add some configuration
|
||||
to the `homeserver.yaml` file. In particular, you should add a `server_notices`
|
||||
section, which should look like this:
|
||||
|
||||
```yaml
|
||||
server_notices:
|
||||
system_mxid_localpart: server
|
||||
system_mxid_display_name: "Server Notices"
|
||||
system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
room_name: "Server Notices"
|
||||
```
|
||||
|
||||
The only compulsory setting is `system_mxid_localpart`, which defines the user
|
||||
id of the Server Notices user, as above. `room_name` defines the name of the
|
||||
room which will be created.
|
||||
|
||||
`system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the
|
||||
displayname and avatar of the Server Notices user.
|
||||
|
||||
Sending notices
|
||||
---------------
|
||||
|
||||
As of the current version of synapse, there is no convenient interface for
|
||||
sending notices (other than the automated ones sent as part of consent
|
||||
tracking).
|
||||
|
||||
In the meantime, it is possible to test this feature using the manhole. Having
|
||||
gone into the manhole as described in [manhole.md](manhole.md), a notice can be
|
||||
sent with something like:
|
||||
|
||||
```
|
||||
>>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'})
|
||||
```
|
|
@ -55,7 +55,12 @@ synapse process.)
|
|||
|
||||
You then create a set of configs for the various worker processes. These
|
||||
should be worker configuration files, and should be stored in a dedicated
|
||||
subdirectory, to allow synctl to manipulate them.
|
||||
subdirectory, to allow synctl to manipulate them. An additional configuration
|
||||
for the master synapse process will need to be created because the process will
|
||||
not be started automatically. That configuration should look like this::
|
||||
|
||||
worker_app: synapse.app.homeserver
|
||||
daemonize: true
|
||||
|
||||
Each worker configuration file inherits the configuration of the main homeserver
|
||||
configuration file. You can then override configuration specific to that worker,
|
||||
|
@ -230,9 +235,11 @@ file. For example::
|
|||
``synapse.app.event_creator``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Handles non-state event creation. It can handle REST endpoints matching:
|
||||
Handles some event creation. It can handle REST endpoints matching::
|
||||
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||
|
||||
It will create events locally and then send them on to the main synapse
|
||||
instance to be persisted and handled.
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#! /bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
cd "`dirname $0`/.."
|
||||
|
||||
TOX_DIR=$WORKSPACE/.tox
|
||||
|
@ -14,7 +16,20 @@ fi
|
|||
tox -e py27 --notest -v
|
||||
|
||||
TOX_BIN=$TOX_DIR/py27/bin
|
||||
$TOX_BIN/pip install setuptools
|
||||
|
||||
# cryptography 2.2 requires setuptools >= 18.5.
|
||||
#
|
||||
# older versions of virtualenv (?) give us a virtualenv with the same version
|
||||
# of setuptools as is installed on the system python (and tox runs virtualenv
|
||||
# under python3, so we get the version of setuptools that is installed on that).
|
||||
#
|
||||
# anyway, make sure that we have a recent enough setuptools.
|
||||
$TOX_BIN/pip install 'setuptools>=18.5'
|
||||
|
||||
# we also need a semi-recent version of pip, because old ones fail to install
|
||||
# the "enum34" dependency of cryptography.
|
||||
$TOX_BIN/pip install 'pip>=10'
|
||||
|
||||
{ python synapse/python_dependencies.py
|
||||
echo lxml psycopg2
|
||||
} | xargs $TOX_BIN/pip install
|
||||
|
|
|
@ -6,9 +6,19 @@
|
|||
|
||||
## Do not run it lightly.
|
||||
|
||||
set -e
|
||||
|
||||
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
|
||||
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
|
||||
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
|
||||
echo "or"
|
||||
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
|
||||
exit
|
||||
fi
|
||||
|
||||
ROOMID="$1"
|
||||
|
||||
sqlite3 homeserver.db <<EOF
|
||||
cat <<EOF
|
||||
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
||||
|
@ -29,7 +39,7 @@ DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
|||
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
||||
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
||||
DELETE FROM event_search_content WHERE c1room_id = '$ROOMID';
|
||||
DELETE FROM event_search WHERE room_id = '$ROOMID';
|
||||
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
||||
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
||||
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -29,6 +30,8 @@ import time
|
|||
import traceback
|
||||
import yaml
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
logger = logging.getLogger("synapse_port_db")
|
||||
|
||||
|
@ -250,6 +253,12 @@ class Porter(object):
|
|||
@defer.inlineCallbacks
|
||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
||||
backward_chunk):
|
||||
logger.info(
|
||||
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||
table, postgres_size, table_size,
|
||||
backward_chunk+1, forward_chunk-1,
|
||||
)
|
||||
|
||||
if not table_size:
|
||||
return
|
||||
|
||||
|
@ -467,31 +476,10 @@ class Porter(object):
|
|||
self.progress.set_state("Preparing PostgreSQL")
|
||||
self.setup_db(postgres_config, postgres_engine)
|
||||
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={
|
||||
"type": "table",
|
||||
},
|
||||
retcol="name",
|
||||
)
|
||||
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
)
|
||||
|
||||
tables = set(sqlite_tables) & set(postgres_tables)
|
||||
|
||||
self.progress.set_state("Creating tables")
|
||||
|
||||
logger.info("Found %d tables", len(tables))
|
||||
|
||||
self.progress.set_state("Creating port tables")
|
||||
def create_port_table(txn):
|
||||
txn.execute(
|
||||
"CREATE TABLE port_from_sqlite3 ("
|
||||
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||
" table_name varchar(100) NOT NULL UNIQUE,"
|
||||
" forward_rowid bigint NOT NULL,"
|
||||
" backward_rowid bigint NOT NULL"
|
||||
|
@ -517,18 +505,33 @@ class Porter(object):
|
|||
"alter_table", alter_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
pass
|
||||
|
||||
try:
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info("Failed to create port table: %s", e)
|
||||
yield self.postgres_store.runInteraction(
|
||||
"create_port_table", create_port_table
|
||||
)
|
||||
|
||||
self.progress.set_state("Setting up")
|
||||
# Step 2. Get tables.
|
||||
self.progress.set_state("Fetching tables")
|
||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={
|
||||
"type": "table",
|
||||
},
|
||||
retcol="name",
|
||||
)
|
||||
|
||||
# Set up tables.
|
||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||
table="information_schema.tables",
|
||||
keyvalues={},
|
||||
retcol="distinct table_name",
|
||||
)
|
||||
|
||||
tables = set(sqlite_tables) & set(postgres_tables)
|
||||
logger.info("Found %d tables", len(tables))
|
||||
|
||||
# Step 3. Figure out what still needs copying
|
||||
self.progress.set_state("Checking on port progress")
|
||||
setup_res = yield defer.gatherResults(
|
||||
[
|
||||
self.setup_table(table)
|
||||
|
@ -539,7 +542,8 @@ class Porter(object):
|
|||
consumeErrors=True,
|
||||
)
|
||||
|
||||
# Process tables.
|
||||
# Step 4. Do the copying.
|
||||
self.progress.set_state("Copying to postgres")
|
||||
yield defer.gatherResults(
|
||||
[
|
||||
self.handle_table(*res)
|
||||
|
@ -548,6 +552,9 @@ class Porter(object):
|
|||
consumeErrors=True,
|
||||
)
|
||||
|
||||
# Step 5. Do final post-processing
|
||||
yield self._setup_state_group_id_seq()
|
||||
|
||||
self.progress.done()
|
||||
except:
|
||||
global end_error_exec_info
|
||||
|
@ -569,7 +576,7 @@ class Porter(object):
|
|||
def conv(j, col):
|
||||
if j in bool_cols:
|
||||
return bool(col)
|
||||
elif isinstance(col, basestring) and "\0" in col:
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
||||
raise BadValueException();
|
||||
return col
|
||||
|
@ -707,6 +714,16 @@ class Porter(object):
|
|||
|
||||
defer.returnValue((done, remaining + done))
|
||||
|
||||
def _setup_state_group_id_seq(self):
|
||||
def r(txn):
|
||||
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||
next_id = txn.fetchone()[0]+1
|
||||
txn.execute(
|
||||
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
||||
(next_id,),
|
||||
)
|
||||
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||
|
||||
|
||||
##############################################
|
||||
###### The following is simply UI stuff ######
|
||||
|
|
|
@ -16,4 +16,4 @@
|
|||
""" This is a reference implementation of a Matrix home server.
|
||||
"""
|
||||
|
||||
__version__ = "0.26.0"
|
||||
__version__ = "0.30.0"
|
||||
|
|
|
@ -57,7 +57,7 @@ class Auth(object):
|
|||
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
|
||||
|
||||
self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
|
||||
register_cache("token_cache", self.token_cache)
|
||||
register_cache("cache", "token_cache", self.token_cache)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_from_context(self, event, context, do_sig_check=True):
|
||||
|
@ -204,8 +204,8 @@ class Auth(object):
|
|||
|
||||
ip_addr = self.hs.get_ip_from_request(request)
|
||||
user_agent = request.requestHeaders.getRawHeaders(
|
||||
"User-Agent",
|
||||
default=[""]
|
||||
b"User-Agent",
|
||||
default=[b""]
|
||||
)[0]
|
||||
if user and access_token and ip_addr:
|
||||
self.store.insert_client_ip(
|
||||
|
@ -672,7 +672,7 @@ def has_access_token(request):
|
|||
bool: False if no access_token was given, True otherwise.
|
||||
"""
|
||||
query_params = request.args.get("access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
return bool(query_params) or bool(auth_headers)
|
||||
|
||||
|
||||
|
@ -692,8 +692,8 @@ def get_access_token_from_request(request, token_not_found_http_status=401):
|
|||
AuthError: If there isn't an access_token in the request.
|
||||
"""
|
||||
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization")
|
||||
query_params = request.args.get("access_token")
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization")
|
||||
query_params = request.args.get(b"access_token")
|
||||
if auth_headers:
|
||||
# Try the get the access_token from a "Authorization: Bearer"
|
||||
# header
|
||||
|
|
|
@ -16,6 +16,9 @@
|
|||
|
||||
"""Contains constants from the specification."""
|
||||
|
||||
# the "depth" field on events is limited to 2**63 - 1
|
||||
MAX_DEPTH = 2**63 - 1
|
||||
|
||||
|
||||
class Membership(object):
|
||||
|
||||
|
|
|
@ -15,9 +15,12 @@
|
|||
|
||||
"""Contains exceptions and error codes."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
import simplejson as json
|
||||
from six import iteritems
|
||||
from six.moves import http_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -49,6 +52,8 @@ class Codes(object):
|
|||
THREEPID_DENIED = "M_THREEPID_DENIED"
|
||||
INVALID_USERNAME = "M_INVALID_USERNAME"
|
||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||
|
||||
|
||||
class CodeMessageException(RuntimeError):
|
||||
|
@ -136,6 +141,32 @@ class SynapseError(CodeMessageException):
|
|||
return res
|
||||
|
||||
|
||||
class ConsentNotGivenError(SynapseError):
|
||||
"""The error returned to the client when the user has not consented to the
|
||||
privacy policy.
|
||||
"""
|
||||
def __init__(self, msg, consent_uri):
|
||||
"""Constructs a ConsentNotGivenError
|
||||
|
||||
Args:
|
||||
msg (str): The human-readable error message
|
||||
consent_url (str): The URL where the user can give their consent
|
||||
"""
|
||||
super(ConsentNotGivenError, self).__init__(
|
||||
code=http_client.FORBIDDEN,
|
||||
msg=msg,
|
||||
errcode=Codes.CONSENT_NOT_GIVEN
|
||||
)
|
||||
self._consent_uri = consent_uri
|
||||
|
||||
def error_dict(self):
|
||||
return cs_error(
|
||||
self.msg,
|
||||
self.errcode,
|
||||
consent_uri=self._consent_uri
|
||||
)
|
||||
|
||||
|
||||
class RegistrationError(SynapseError):
|
||||
"""An error raised when a registration event fails."""
|
||||
pass
|
||||
|
@ -290,13 +321,13 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs):
|
|||
|
||||
Args:
|
||||
msg (str): The error message.
|
||||
code (int): The error code.
|
||||
code (str): The error code.
|
||||
kwargs : Additional keys to add to the response.
|
||||
Returns:
|
||||
A dict representing the error response JSON.
|
||||
"""
|
||||
err = {"error": msg, "errcode": code}
|
||||
for key, value in kwargs.iteritems():
|
||||
for key, value in iteritems(kwargs):
|
||||
err[key] = value
|
||||
return err
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ from synapse.storage.presence import UserPresenceState
|
|||
from synapse.types import UserID, RoomID
|
||||
from twisted.internet import defer
|
||||
|
||||
import ujson as json
|
||||
import simplejson as json
|
||||
import jsonschema
|
||||
from jsonschema import FormatChecker
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,6 +15,12 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""Contains the URL paths to prefix various aspects of the server with. """
|
||||
from hashlib import sha256
|
||||
import hmac
|
||||
|
||||
from six.moves.urllib.parse import urlencode
|
||||
|
||||
from synapse.config import ConfigError
|
||||
|
||||
CLIENT_PREFIX = "/_matrix/client/api/v1"
|
||||
CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha"
|
||||
|
@ -25,3 +32,46 @@ SERVER_KEY_PREFIX = "/_matrix/key/v1"
|
|||
SERVER_KEY_V2_PREFIX = "/_matrix/key/v2"
|
||||
MEDIA_PREFIX = "/_matrix/media/r0"
|
||||
LEGACY_MEDIA_PREFIX = "/_matrix/media/v1"
|
||||
|
||||
|
||||
class ConsentURIBuilder(object):
|
||||
def __init__(self, hs_config):
|
||||
"""
|
||||
Args:
|
||||
hs_config (synapse.config.homeserver.HomeServerConfig):
|
||||
"""
|
||||
if hs_config.form_secret is None:
|
||||
raise ConfigError(
|
||||
"form_secret not set in config",
|
||||
)
|
||||
if hs_config.public_baseurl is None:
|
||||
raise ConfigError(
|
||||
"public_baseurl not set in config",
|
||||
)
|
||||
|
||||
self._hmac_secret = hs_config.form_secret.encode("utf-8")
|
||||
self._public_baseurl = hs_config.public_baseurl
|
||||
|
||||
def build_user_consent_uri(self, user_id):
|
||||
"""Build a URI which we can give to the user to do their privacy
|
||||
policy consent
|
||||
|
||||
Args:
|
||||
user_id (str): mxid or username of user
|
||||
|
||||
Returns
|
||||
(str) the URI where the user can do consent
|
||||
"""
|
||||
mac = hmac.new(
|
||||
key=self._hmac_secret,
|
||||
msg=user_id,
|
||||
digestmod=sha256,
|
||||
).hexdigest()
|
||||
consent_uri = "%s_matrix/consent?%s" % (
|
||||
self._public_baseurl,
|
||||
urlencode({
|
||||
"u": user_id,
|
||||
"h": mac
|
||||
}),
|
||||
)
|
||||
return consent_uri
|
||||
|
|
|
@ -32,11 +32,11 @@ from synapse.replication.tcp.client import ReplicationClientHandler
|
|||
from synapse.server import HomeServer
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.appservice")
|
||||
|
||||
|
@ -64,7 +64,7 @@ class AppserviceServer(HomeServer):
|
|||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -74,6 +74,7 @@ class AppserviceServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -112,9 +113,14 @@ class ASReplicationHandler(ReplicationClientHandler):
|
|||
|
||||
if stream_name == "events":
|
||||
max_stream_id = self.store.get_room_max_stream_ordering()
|
||||
preserve_fn(
|
||||
self.appservice_handler.notify_interested_services
|
||||
)(max_stream_id)
|
||||
run_in_background(self._notify_app_services, max_stream_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_app_services(self, room_stream_id):
|
||||
try:
|
||||
yield self.appservice_handler.notify_interested_services(room_stream_id)
|
||||
except Exception:
|
||||
logger.exception("Error notifying application services of event")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
|
|
@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
|
|||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.client_reader")
|
||||
|
||||
|
@ -88,7 +88,7 @@ class ClientReaderServer(HomeServer):
|
|||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -98,6 +98,7 @@ class ClientReaderServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ from synapse.util.logcontext import LoggingContext
|
|||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.event_creator")
|
||||
|
||||
|
@ -104,7 +104,7 @@ class EventCreatorServer(HomeServer):
|
|||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -114,6 +114,7 @@ class EventCreatorServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ from synapse.util.logcontext import LoggingContext
|
|||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_reader")
|
||||
|
||||
|
@ -77,7 +77,7 @@ class FederationReaderServer(HomeServer):
|
|||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -87,6 +87,7 @@ class FederationReaderServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -38,11 +38,11 @@ from synapse.server import HomeServer
|
|||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.federation_sender")
|
||||
|
||||
|
@ -91,7 +91,7 @@ class FederationSenderServer(HomeServer):
|
|||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -101,6 +101,7 @@ class FederationSenderServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -229,7 +230,7 @@ class FederationSenderHandler(object):
|
|||
# presence, typing, etc.
|
||||
if stream_name == "federation":
|
||||
send_queue.process_rows_for_federation(self.federation_sender, rows)
|
||||
preserve_fn(self.update_token)(token)
|
||||
run_in_background(self.update_token, token)
|
||||
|
||||
# We also need to poke the federation sender when new events happen
|
||||
elif stream_name == "events":
|
||||
|
@ -237,19 +238,22 @@ class FederationSenderHandler(object):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def update_token(self, token):
|
||||
self.federation_position = token
|
||||
try:
|
||||
self.federation_position = token
|
||||
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
# We linearize here to ensure we don't have races updating the token
|
||||
with (yield self._fed_position_linearizer.queue(None)):
|
||||
if self._last_ack < self.federation_position:
|
||||
yield self.store.update_federation_out_pos(
|
||||
"federation", self.federation_position
|
||||
)
|
||||
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(self.federation_position)
|
||||
self._last_ack = self.federation_position
|
||||
# We ACK this token over replication so that the master can drop
|
||||
# its in memory queues
|
||||
self.replication_client.send_federation_ack(self.federation_position)
|
||||
self._last_ack = self.federation_position
|
||||
except Exception:
|
||||
logger.exception("Error updating federation stream position")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -44,7 +44,7 @@ from synapse.util.logcontext import LoggingContext
|
|||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.frontend_proxy")
|
||||
|
||||
|
@ -90,7 +90,7 @@ class KeyUploadServlet(RestServlet):
|
|||
# They're actually trying to upload something, proxy to main synapse.
|
||||
# Pass through the auth headers, if any, in case the access token
|
||||
# is there.
|
||||
auth_headers = request.requestHeaders.getRawHeaders("Authorization", [])
|
||||
auth_headers = request.requestHeaders.getRawHeaders(b"Authorization", [])
|
||||
headers = {
|
||||
"Authorization": auth_headers,
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ class FrontendProxyServer(HomeServer):
|
|||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -152,6 +152,7 @@ class FrontendProxyServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ from synapse.module_api import ModuleApi
|
|||
from synapse.http.additional_resource import AdditionalResource
|
||||
from synapse.http.server import RootRedirect
|
||||
from synapse.http.site import SynapseSite
|
||||
from synapse.metrics import register_memory_metrics
|
||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||
from synapse.metrics import RegistryProxy
|
||||
from synapse.metrics.resource import METRICS_PREFIX
|
||||
from synapse.python_dependencies import CONDITIONAL_REQUIREMENTS, \
|
||||
check_requirements
|
||||
from synapse.replication.http import ReplicationRestResource, REPLICATION_PREFIX
|
||||
|
@ -48,6 +48,7 @@ from synapse.server import HomeServer
|
|||
from synapse.storage import are_all_users_on_domain
|
||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.manhole import manhole
|
||||
|
@ -56,10 +57,12 @@ from synapse.util.rlimit import change_resource_limit
|
|||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import EncodingResourceWrapper, Resource
|
||||
from twisted.web.resource import EncodingResourceWrapper, NoResource
|
||||
from twisted.web.server import GzipEncoderFactory
|
||||
from twisted.web.static import File
|
||||
|
||||
from prometheus_client.twisted import MetricsResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.homeserver")
|
||||
|
||||
|
||||
|
@ -126,7 +129,7 @@ class SynapseHomeServer(HomeServer):
|
|||
if WEB_CLIENT_PREFIX in resources:
|
||||
root_resource = RootRedirect(WEB_CLIENT_PREFIX)
|
||||
else:
|
||||
root_resource = Resource()
|
||||
root_resource = NoResource()
|
||||
|
||||
root_resource = create_resource_tree(resources, root_resource)
|
||||
|
||||
|
@ -139,6 +142,7 @@ class SynapseHomeServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
),
|
||||
self.tls_server_context_factory,
|
||||
)
|
||||
|
@ -152,6 +156,7 @@ class SynapseHomeServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
logger.info("Synapse now listening on port %d", port)
|
||||
|
@ -181,6 +186,15 @@ class SynapseHomeServer(HomeServer):
|
|||
"/_matrix/client/versions": client_resource,
|
||||
})
|
||||
|
||||
if name == "consent":
|
||||
from synapse.rest.consent.consent_resource import ConsentResource
|
||||
consent_resource = ConsentResource(self)
|
||||
if compress:
|
||||
consent_resource = gz_wrap(consent_resource)
|
||||
resources.update({
|
||||
"/_matrix/consent": consent_resource,
|
||||
})
|
||||
|
||||
if name == "federation":
|
||||
resources.update({
|
||||
FEDERATION_PREFIX: TransportLayerServer(self),
|
||||
|
@ -218,7 +232,7 @@ class SynapseHomeServer(HomeServer):
|
|||
resources[WEB_CLIENT_PREFIX] = build_resource_for_web_client(self)
|
||||
|
||||
if name == "metrics" and self.get_config().enable_metrics:
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy())
|
||||
|
||||
if name == "replication":
|
||||
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
|
||||
|
@ -350,8 +364,6 @@ def setup(config_options):
|
|||
hs.get_datastore().start_doing_background_updates()
|
||||
hs.get_federation_client().start_get_pdu_cache()
|
||||
|
||||
register_memory_metrics(hs)
|
||||
|
||||
reactor.callWhenRunning(start)
|
||||
|
||||
return hs
|
||||
|
@ -402,6 +414,10 @@ def run(hs):
|
|||
|
||||
stats = {}
|
||||
|
||||
# Contains the list of processes we will be monitoring
|
||||
# currently either 0 or 1
|
||||
stats_process = []
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def phone_stats_home():
|
||||
logger.info("Gathering stats for reporting")
|
||||
|
@ -425,8 +441,21 @@ def run(hs):
|
|||
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
|
||||
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
|
||||
|
||||
r30_results = yield hs.get_datastore().count_r30_users()
|
||||
for name, count in r30_results.iteritems():
|
||||
stats["r30_users_" + name] = count
|
||||
|
||||
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
|
||||
stats["daily_sent_messages"] = daily_sent_messages
|
||||
stats["cache_factor"] = CACHE_SIZE_FACTOR
|
||||
stats["event_cache_size"] = hs.config.event_cache_size
|
||||
|
||||
if len(stats_process) > 0:
|
||||
stats["memory_rss"] = 0
|
||||
stats["cpu_average"] = 0
|
||||
for process in stats_process:
|
||||
stats["memory_rss"] += process.memory_info().rss
|
||||
stats["cpu_average"] += int(process.cpu_percent(interval=None))
|
||||
|
||||
logger.info("Reporting stats to matrix.org: %s" % (stats,))
|
||||
try:
|
||||
|
@ -437,10 +466,40 @@ def run(hs):
|
|||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
|
||||
def performance_stats_init():
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||
# so the next request will use this as the initial point.
|
||||
process.memory_info().rss
|
||||
process.cpu_percent(interval=None)
|
||||
logger.info("report_stats can use psutil")
|
||||
stats_process.append(process)
|
||||
except (ImportError, AttributeError):
|
||||
logger.warn(
|
||||
"report_stats enabled but psutil is not installed or incorrect version."
|
||||
" Disabling reporting of memory/cpu stats."
|
||||
" Ensuring psutil is available will help matrix.org track performance"
|
||||
" changes across releases."
|
||||
)
|
||||
|
||||
def generate_user_daily_visit_stats():
|
||||
hs.get_datastore().generate_user_daily_visits()
|
||||
|
||||
# Rather than update on per session basis, batch up the requests.
|
||||
# If you increase the loop period, the accuracy of user_daily_visits
|
||||
# table will decrease
|
||||
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||
|
||||
if hs.config.report_stats:
|
||||
logger.info("Scheduling stats reporting for 3 hour intervals")
|
||||
clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000)
|
||||
|
||||
# We need to defer this init for the cases that we daemonize
|
||||
# otherwise the process ID we get is that of the non-daemon process
|
||||
clock.call_later(0, performance_stats_init)
|
||||
|
||||
# We wait 5 minutes to send the first set of stats as the server can
|
||||
# be quite busy the first few minutes
|
||||
clock.call_later(5 * 60, phone_stats_home)
|
||||
|
|
|
@ -43,7 +43,7 @@ from synapse.util.logcontext import LoggingContext
|
|||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.media_repository")
|
||||
|
||||
|
@ -84,7 +84,7 @@ class MediaRepositoryServer(HomeServer):
|
|||
),
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -94,6 +94,7 @@ class MediaRepositoryServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -33,11 +33,11 @@ from synapse.server import HomeServer
|
|||
from synapse.storage import DataStore
|
||||
from synapse.storage.engines import create_engine
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.pusher")
|
||||
|
||||
|
@ -94,7 +94,7 @@ class PusherServer(HomeServer):
|
|||
if name == "metrics":
|
||||
resources[METRICS_PREFIX] = MetricsResource(self)
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -104,6 +104,7 @@ class PusherServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -140,24 +141,27 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
|||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(PusherReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
preserve_fn(self.poke_pushers)(stream_name, token, rows)
|
||||
run_in_background(self.poke_pushers, stream_name, token, rows)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def poke_pushers(self, stream_name, token, rows):
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
try:
|
||||
if stream_name == "pushers":
|
||||
for row in rows:
|
||||
if row.deleted:
|
||||
yield self.stop_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
else:
|
||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||
elif stream_name == "events":
|
||||
yield self.pusher_pool.on_new_notifications(
|
||||
token, token,
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
yield self.pusher_pool.on_new_receipts(
|
||||
token, token, set(row.room_id for row in rows)
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error poking pushers")
|
||||
|
||||
def stop_pusher(self, user_id, app_id, pushkey):
|
||||
key = "%s:%s" % (app_id, pushkey)
|
||||
|
|
|
@ -51,12 +51,14 @@ from synapse.storage.engines import create_engine
|
|||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.storage.roommember import RoomMemberStore
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
from six import iteritems
|
||||
|
||||
logger = logging.getLogger("synapse.app.synchrotron")
|
||||
|
||||
|
@ -211,7 +213,7 @@ class SynchrotronPresence(object):
|
|||
|
||||
def get_currently_syncing_users(self):
|
||||
return [
|
||||
user_id for user_id, count in self.user_to_num_current_syncs.iteritems()
|
||||
user_id for user_id, count in iteritems(self.user_to_num_current_syncs)
|
||||
if count > 0
|
||||
]
|
||||
|
||||
|
@ -269,7 +271,7 @@ class SynchrotronServer(HomeServer):
|
|||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -279,6 +281,7 @@ class SynchrotronServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -325,8 +328,7 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
|||
|
||||
def on_rdata(self, stream_name, token, rows):
|
||||
super(SyncReplicationHandler, self).on_rdata(stream_name, token, rows)
|
||||
|
||||
preserve_fn(self.process_and_notify)(stream_name, token, rows)
|
||||
run_in_background(self.process_and_notify, stream_name, token, rows)
|
||||
|
||||
def get_streams_to_replicate(self):
|
||||
args = super(SyncReplicationHandler, self).get_streams_to_replicate()
|
||||
|
@ -338,55 +340,58 @@ class SyncReplicationHandler(ReplicationClientHandler):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def process_and_notify(self, stream_name, token, rows):
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
event = yield self.store.get_event(row.event_id)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data",):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
try:
|
||||
if stream_name == "events":
|
||||
# We shouldn't get multiple rows per token for events stream, so
|
||||
# we don't need to optimise this for multiple rows.
|
||||
for row in rows:
|
||||
event = yield self.store.get_event(row.event_id)
|
||||
extra_users = ()
|
||||
if event.type == EventTypes.Member:
|
||||
extra_users = (event.state_key,)
|
||||
max_token = self.store.get_room_max_stream_ordering()
|
||||
self.notifier.on_new_room_event(
|
||||
event, token, max_token, extra_users
|
||||
)
|
||||
elif stream_name == "push_rules":
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", token, users=entities,
|
||||
"push_rules_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
"device_list_key", token, rooms=all_room_ids,
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name in ("account_data", "tag_account_data",):
|
||||
self.notifier.on_new_event(
|
||||
"account_data_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"receipt_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "typing":
|
||||
self.typing_handler.process_replication_rows(token, rows)
|
||||
self.notifier.on_new_event(
|
||||
"typing_key", token, rooms=[row.room_id for row in rows],
|
||||
)
|
||||
elif stream_name == "to_device":
|
||||
entities = [row.entity for row in rows if row.entity.startswith("@")]
|
||||
if entities:
|
||||
self.notifier.on_new_event(
|
||||
"to_device_key", token, users=entities,
|
||||
)
|
||||
elif stream_name == "device_lists":
|
||||
all_room_ids = set()
|
||||
for row in rows:
|
||||
room_ids = yield self.store.get_rooms_for_user(row.user_id)
|
||||
all_room_ids.update(room_ids)
|
||||
self.notifier.on_new_event(
|
||||
"device_list_key", token, rooms=all_room_ids,
|
||||
)
|
||||
elif stream_name == "presence":
|
||||
yield self.presence_handler.process_replication_rows(token, rows)
|
||||
elif stream_name == "receipts":
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[row.user_id for row in rows],
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error processing replication")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
|
|
@ -38,7 +38,7 @@ def pid_running(pid):
|
|||
try:
|
||||
os.kill(pid, 0)
|
||||
return True
|
||||
except OSError, err:
|
||||
except OSError as err:
|
||||
if err.errno == errno.EPERM:
|
||||
return True
|
||||
return False
|
||||
|
@ -98,7 +98,7 @@ def stop(pidfile, app):
|
|||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
write("stopped %s" % (app,), colour=GREEN)
|
||||
except OSError, err:
|
||||
except OSError as err:
|
||||
if err.errno == errno.ESRCH:
|
||||
write("%s not running" % (app,), colour=YELLOW)
|
||||
elif err.errno == errno.EPERM:
|
||||
|
@ -252,6 +252,7 @@ def main():
|
|||
for running_pid in running_pids:
|
||||
while pid_running(running_pid):
|
||||
time.sleep(0.2)
|
||||
write("All processes exited; now restarting...")
|
||||
|
||||
if action == "start" or action == "restart":
|
||||
if start_stop_synapse:
|
||||
|
|
|
@ -39,11 +39,11 @@ from synapse.storage.engines import create_engine
|
|||
from synapse.storage.user_directory import UserDirectoryStore
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
from synapse.util.httpresourcetree import create_resource_tree
|
||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext, run_in_background
|
||||
from synapse.util.manhole import manhole
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from twisted.internet import reactor
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.web.resource import NoResource
|
||||
|
||||
logger = logging.getLogger("synapse.app.user_dir")
|
||||
|
||||
|
@ -116,7 +116,7 @@ class UserDirectoryServer(HomeServer):
|
|||
"/_matrix/client/api/v1": resource,
|
||||
})
|
||||
|
||||
root_resource = create_resource_tree(resources, Resource())
|
||||
root_resource = create_resource_tree(resources, NoResource())
|
||||
|
||||
_base.listen_tcp(
|
||||
bind_addresses,
|
||||
|
@ -126,6 +126,7 @@ class UserDirectoryServer(HomeServer):
|
|||
site_tag,
|
||||
listener_config,
|
||||
root_resource,
|
||||
self.version_string,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -164,7 +165,14 @@ class UserDirectoryReplicationHandler(ReplicationClientHandler):
|
|||
stream_name, token, rows
|
||||
)
|
||||
if stream_name == "current_state_deltas":
|
||||
preserve_fn(self.user_directory.notify_new_event)()
|
||||
run_in_background(self._notify_directory)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify_directory(self):
|
||||
try:
|
||||
yield self.user_directory.notify_new_event()
|
||||
except Exception:
|
||||
logger.exception("Error notifiying user directory of state update")
|
||||
|
||||
|
||||
def start(config_options):
|
||||
|
|
|
@ -21,6 +21,8 @@ from twisted.internet import defer
|
|||
import logging
|
||||
import re
|
||||
|
||||
from six import string_types
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -146,7 +148,7 @@ class ApplicationService(object):
|
|||
)
|
||||
|
||||
regex = regex_obj.get("regex")
|
||||
if isinstance(regex, basestring):
|
||||
if isinstance(regex, string_types):
|
||||
regex_obj["regex"] = re.compile(regex) # Pre-compile regex
|
||||
else:
|
||||
raise ValueError(
|
||||
|
|
|
@ -18,7 +18,6 @@ from synapse.api.constants import ThirdPartyEntityKind
|
|||
from synapse.api.errors import CodeMessageException
|
||||
from synapse.http.client import SimpleHttpClient
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.types import ThirdPartyInstanceID
|
||||
|
||||
|
@ -73,7 +72,8 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||
super(ApplicationServiceApi, self).__init__(hs)
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
self.protocol_meta_cache = ResponseCache(hs, timeout_ms=HOUR_IN_MS)
|
||||
self.protocol_meta_cache = ResponseCache(hs, "as_protocol_meta",
|
||||
timeout_ms=HOUR_IN_MS)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def query_user(self, service, user_id):
|
||||
|
@ -193,12 +193,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
|||
defer.returnValue(None)
|
||||
|
||||
key = (service.id, protocol)
|
||||
result = self.protocol_meta_cache.get(key)
|
||||
if not result:
|
||||
result = self.protocol_meta_cache.set(
|
||||
key, preserve_fn(_get)()
|
||||
)
|
||||
return make_deferred_yieldable(result)
|
||||
return self.protocol_meta_cache.wrap(key, _get)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def push_bulk(self, service, events, txn_id=None):
|
||||
|
|
|
@ -51,7 +51,7 @@ components.
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.appservice import ApplicationServiceState
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
import logging
|
||||
|
@ -106,7 +106,7 @@ class _ServiceQueuer(object):
|
|||
def enqueue(self, service, event):
|
||||
# if this service isn't being sent something
|
||||
self.queued_events.setdefault(service.id, []).append(event)
|
||||
preserve_fn(self._send_request)(service)
|
||||
run_in_background(self._send_request, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _send_request(self, service):
|
||||
|
@ -152,10 +152,10 @@ class _TransactionController(object):
|
|||
if sent:
|
||||
yield txn.complete(self.store)
|
||||
else:
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
preserve_fn(self._start_recoverer)(service)
|
||||
run_in_background(self._start_recoverer, service)
|
||||
except Exception:
|
||||
logger.exception("Error creating appservice transaction")
|
||||
run_in_background(self._start_recoverer, service)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_recovered(self, recoverer):
|
||||
|
@ -176,17 +176,20 @@ class _TransactionController(object):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def _start_recoverer(self, service):
|
||||
yield self.store.set_appservice_state(
|
||||
service,
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
logger.info(
|
||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||
service.id
|
||||
)
|
||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||
self.add_recoverers([recoverer])
|
||||
recoverer.recover()
|
||||
try:
|
||||
yield self.store.set_appservice_state(
|
||||
service,
|
||||
ApplicationServiceState.DOWN
|
||||
)
|
||||
logger.info(
|
||||
"Application service falling behind. Starting recoverer. AS ID %s",
|
||||
service.id
|
||||
)
|
||||
recoverer = self.recoverer_fn(service, self.on_recovered)
|
||||
self.add_recoverers([recoverer])
|
||||
recoverer.recover()
|
||||
except Exception:
|
||||
logger.exception("Error starting AS recoverer")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_service_up(self, service):
|
||||
|
|
|
@ -12,3 +12,9 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import ConfigError
|
||||
|
||||
# export ConfigError if somebody does import *
|
||||
# this is largely a fudge to stop PEP8 moaning about the import
|
||||
__all__ = ["ConfigError"]
|
||||
|
|
|
@ -19,6 +19,8 @@ import os
|
|||
import yaml
|
||||
from textwrap import dedent
|
||||
|
||||
from six import integer_types
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
pass
|
||||
|
@ -49,7 +51,7 @@ Missing mandatory `server_name` config option.
|
|||
class Config(object):
|
||||
@staticmethod
|
||||
def parse_size(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
if isinstance(value, integer_types):
|
||||
return value
|
||||
sizes = {"K": 1024, "M": 1024 * 1024}
|
||||
size = 1
|
||||
|
@ -61,7 +63,7 @@ class Config(object):
|
|||
|
||||
@staticmethod
|
||||
def parse_duration(value):
|
||||
if isinstance(value, int) or isinstance(value, long):
|
||||
if isinstance(value, integer_types):
|
||||
return value
|
||||
second = 1000
|
||||
minute = 60 * second
|
||||
|
@ -279,31 +281,31 @@ class Config(object):
|
|||
)
|
||||
if not cls.path_exists(config_dir_path):
|
||||
os.makedirs(config_dir_path)
|
||||
with open(config_path, "wb") as config_file:
|
||||
config_bytes, config = obj.generate_config(
|
||||
with open(config_path, "w") as config_file:
|
||||
config_str, config = obj.generate_config(
|
||||
config_dir_path=config_dir_path,
|
||||
server_name=server_name,
|
||||
report_stats=(config_args.report_stats == "yes"),
|
||||
is_generating_file=True
|
||||
)
|
||||
obj.invoke_all("generate_files", config)
|
||||
config_file.write(config_bytes)
|
||||
print (
|
||||
config_file.write(config_str)
|
||||
print((
|
||||
"A config file has been generated in %r for server name"
|
||||
" %r with corresponding SSL keys and self-signed"
|
||||
" certificates. Please review this file and customise it"
|
||||
" to your needs."
|
||||
) % (config_path, server_name)
|
||||
print (
|
||||
) % (config_path, server_name))
|
||||
print(
|
||||
"If this server name is incorrect, you will need to"
|
||||
" regenerate the SSL certificates"
|
||||
)
|
||||
return
|
||||
else:
|
||||
print (
|
||||
print((
|
||||
"Config file %r already exists. Generating any missing key"
|
||||
" files."
|
||||
) % (config_path,)
|
||||
) % (config_path,))
|
||||
generate_keys = True
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
|
|
|
@ -17,10 +17,12 @@ from ._base import Config, ConfigError
|
|||
from synapse.appservice import ApplicationService
|
||||
from synapse.types import UserID
|
||||
|
||||
import urllib
|
||||
import yaml
|
||||
import logging
|
||||
|
||||
from six import string_types
|
||||
from six.moves.urllib import parse as urlparse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -89,21 +91,21 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||
"id", "as_token", "hs_token", "sender_localpart"
|
||||
]
|
||||
for field in required_string_fields:
|
||||
if not isinstance(as_info.get(field), basestring):
|
||||
if not isinstance(as_info.get(field), string_types):
|
||||
raise KeyError("Required string field: '%s' (%s)" % (
|
||||
field, config_filename,
|
||||
))
|
||||
|
||||
# 'url' must either be a string or explicitly null, not missing
|
||||
# to avoid accidentally turning off push for ASes.
|
||||
if (not isinstance(as_info.get("url"), basestring) and
|
||||
if (not isinstance(as_info.get("url"), string_types) and
|
||||
as_info.get("url", "") is not None):
|
||||
raise KeyError(
|
||||
"Required string field or explicit null: 'url' (%s)" % (config_filename,)
|
||||
)
|
||||
|
||||
localpart = as_info["sender_localpart"]
|
||||
if urllib.quote(localpart) != localpart:
|
||||
if urlparse.quote(localpart) != localpart:
|
||||
raise ValueError(
|
||||
"sender_localpart needs characters which are not URL encoded."
|
||||
)
|
||||
|
@ -128,7 +130,7 @@ def _load_appservice(hostname, as_info, config_filename):
|
|||
"Expected namespace entry in %s to be an object,"
|
||||
" but got %s", ns, regex_obj
|
||||
)
|
||||
if not isinstance(regex_obj.get("regex"), basestring):
|
||||
if not isinstance(regex_obj.get("regex"), string_types):
|
||||
raise ValueError(
|
||||
"Missing/bad type 'regex' key in %s", regex_obj
|
||||
)
|
||||
|
|
85
synapse/config/consent_config.py
Normal file
85
synapse/config/consent_config.py
Normal file
|
@ -0,0 +1,85 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ._base import Config
|
||||
|
||||
DEFAULT_CONFIG = """\
|
||||
# User Consent configuration
|
||||
#
|
||||
# Parts of this section are required if enabling the 'consent' resource under
|
||||
# 'listeners', in particular 'template_dir' and 'version'.
|
||||
#
|
||||
# 'template_dir' gives the location of the templates for the HTML forms.
|
||||
# This directory should contain one subdirectory per language (eg, 'en', 'fr'),
|
||||
# and each language directory should contain the policy document (named as
|
||||
# '<version>.html') and a success page (success.html).
|
||||
#
|
||||
# 'version' specifies the 'current' version of the policy document. It defines
|
||||
# the version to be served by the consent resource if there is no 'v'
|
||||
# parameter.
|
||||
#
|
||||
# 'server_notice_content', if enabled, will send a user a "Server Notice"
|
||||
# asking them to consent to the privacy policy. The 'server_notices' section
|
||||
# must also be configured for this to work. Notices will *not* be sent to
|
||||
# guest users unless 'send_server_notice_to_guests' is set to true.
|
||||
#
|
||||
# 'block_events_error', if set, will block any attempts to send events
|
||||
# until the user consents to the privacy policy. The value of the setting is
|
||||
# used as the text of the error.
|
||||
#
|
||||
# user_consent:
|
||||
# template_dir: res/templates/privacy
|
||||
# version: 1.0
|
||||
# server_notice_content:
|
||||
# msgtype: m.text
|
||||
# body: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
# send_server_notice_to_guests: True
|
||||
# block_events_error: >-
|
||||
# To continue using this homeserver you must review and agree to the
|
||||
# terms and conditions at %(consent_uri)s
|
||||
#
|
||||
"""
|
||||
|
||||
|
||||
class ConsentConfig(Config):
|
||||
def __init__(self):
|
||||
super(ConsentConfig, self).__init__()
|
||||
|
||||
self.user_consent_version = None
|
||||
self.user_consent_template_dir = None
|
||||
self.user_consent_server_notice_content = None
|
||||
self.user_consent_server_notice_to_guests = False
|
||||
self.block_events_without_consent_error = None
|
||||
|
||||
def read_config(self, config):
|
||||
consent_config = config.get("user_consent")
|
||||
if consent_config is None:
|
||||
return
|
||||
self.user_consent_version = str(consent_config["version"])
|
||||
self.user_consent_template_dir = consent_config["template_dir"]
|
||||
self.user_consent_server_notice_content = consent_config.get(
|
||||
"server_notice_content",
|
||||
)
|
||||
self.block_events_without_consent_error = consent_config.get(
|
||||
"block_events_error",
|
||||
)
|
||||
self.user_consent_server_notice_to_guests = bool(consent_config.get(
|
||||
"send_server_notice_to_guests", False,
|
||||
))
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -12,7 +13,6 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .tls import TlsConfig
|
||||
from .server import ServerConfig
|
||||
from .logger import LoggingConfig
|
||||
|
@ -37,6 +37,8 @@ from .push import PushConfig
|
|||
from .spam_checker import SpamCheckerConfig
|
||||
from .groups import GroupsConfig
|
||||
from .user_directory import UserDirectoryConfig
|
||||
from .consent_config import ConsentConfig
|
||||
from .server_notices_config import ServerNoticesConfig
|
||||
|
||||
|
||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||
|
@ -45,12 +47,15 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
|||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||
JWTConfig, PasswordConfig, EmailConfig,
|
||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,):
|
||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||
ConsentConfig,
|
||||
ServerNoticesConfig,
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
sys.stdout.write(
|
||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2])[0]
|
||||
HomeServerConfig().generate_config(sys.argv[1], sys.argv[2], True)[0]
|
||||
)
|
||||
|
|
|
@ -59,14 +59,20 @@ class KeyConfig(Config):
|
|||
|
||||
self.expire_access_token = config.get("expire_access_token", False)
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
self.form_secret = config.get("form_secret", None)
|
||||
|
||||
def default_config(self, config_dir_path, server_name, is_generating_file=False,
|
||||
**kwargs):
|
||||
base_key_name = os.path.join(config_dir_path, server_name)
|
||||
|
||||
if is_generating_file:
|
||||
macaroon_secret_key = random_string_with_symbols(50)
|
||||
form_secret = '"%s"' % random_string_with_symbols(50)
|
||||
else:
|
||||
macaroon_secret_key = None
|
||||
form_secret = 'null'
|
||||
|
||||
return """\
|
||||
macaroon_secret_key: "%(macaroon_secret_key)s"
|
||||
|
@ -74,6 +80,10 @@ class KeyConfig(Config):
|
|||
# Used to enable access token expiration.
|
||||
expire_access_token: False
|
||||
|
||||
# a secret which is used to calculate HMACs for form values, to stop
|
||||
# falsification of values
|
||||
form_secret: %(form_secret)s
|
||||
|
||||
## Signing Keys ##
|
||||
|
||||
# Path to the signing key to sign messages with
|
||||
|
|
|
@ -117,7 +117,7 @@ class LoggingConfig(Config):
|
|||
log_config = config.get("log_config")
|
||||
if log_config and not os.path.exists(log_config):
|
||||
log_file = self.abspath("homeserver.log")
|
||||
with open(log_config, "wb") as log_config_file:
|
||||
with open(log_config, "w") as log_config_file:
|
||||
log_config_file.write(
|
||||
DEFAULT_LOG_CONFIG.substitute(log_file=log_file)
|
||||
)
|
||||
|
|
|
@ -77,7 +77,9 @@ class RegistrationConfig(Config):
|
|||
|
||||
# Set the number of bcrypt rounds used to generate password hash.
|
||||
# Larger numbers increase the work factor needed to generate the hash.
|
||||
# The default number of rounds is 12.
|
||||
# The default number is 12 (which equates to 2^12 rounds).
|
||||
# N.B. that increasing this will exponentially increase the time required
|
||||
# to register or login - e.g. 24 => 2^24 rounds which will take >20 mins.
|
||||
bcrypt_rounds: 12
|
||||
|
||||
# Allows users to register as guests without a password/email/etc, and
|
||||
|
|
86
synapse/config/server_notices_config.py
Normal file
86
synapse/config/server_notices_config.py
Normal file
|
@ -0,0 +1,86 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from ._base import Config
|
||||
from synapse.types import UserID
|
||||
|
||||
DEFAULT_CONFIG = """\
|
||||
# Server Notices room configuration
|
||||
#
|
||||
# Uncomment this section to enable a room which can be used to send notices
|
||||
# from the server to users. It is a special room which cannot be left; notices
|
||||
# come from a special "notices" user id.
|
||||
#
|
||||
# If you uncomment this section, you *must* define the system_mxid_localpart
|
||||
# setting, which defines the id of the user which will be used to send the
|
||||
# notices.
|
||||
#
|
||||
# It's also possible to override the room name, the display name of the
|
||||
# "notices" user, and the avatar for the user.
|
||||
#
|
||||
# server_notices:
|
||||
# system_mxid_localpart: notices
|
||||
# system_mxid_display_name: "Server Notices"
|
||||
# system_mxid_avatar_url: "mxc://server.com/oumMVlgDnLYFaPVkExemNVVZ"
|
||||
# room_name: "Server Notices"
|
||||
"""
|
||||
|
||||
|
||||
class ServerNoticesConfig(Config):
|
||||
"""Configuration for the server notices room.
|
||||
|
||||
Attributes:
|
||||
server_notices_mxid (str|None):
|
||||
The MXID to use for server notices.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_mxid_display_name (str|None):
|
||||
The display name to use for the server notices user.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_mxid_avatar_url (str|None):
|
||||
The display name to use for the server notices user.
|
||||
None if server notices are not enabled.
|
||||
|
||||
server_notices_room_name (str|None):
|
||||
The name to use for the server notices room.
|
||||
None if server notices are not enabled.
|
||||
"""
|
||||
def __init__(self):
|
||||
super(ServerNoticesConfig, self).__init__()
|
||||
self.server_notices_mxid = None
|
||||
self.server_notices_mxid_display_name = None
|
||||
self.server_notices_mxid_avatar_url = None
|
||||
self.server_notices_room_name = None
|
||||
|
||||
def read_config(self, config):
|
||||
c = config.get("server_notices")
|
||||
if c is None:
|
||||
return
|
||||
|
||||
mxid_localpart = c['system_mxid_localpart']
|
||||
self.server_notices_mxid = UserID(
|
||||
mxid_localpart, self.server_name,
|
||||
).to_string()
|
||||
self.server_notices_mxid_display_name = c.get(
|
||||
'system_mxid_display_name', None,
|
||||
)
|
||||
self.server_notices_mxid_avatar_url = c.get(
|
||||
'system_mxid_avatar_url', None,
|
||||
)
|
||||
# todo: i18n
|
||||
self.server_notices_room_name = c.get('room_name', "Server Notices")
|
||||
|
||||
def default_config(self, **kwargs):
|
||||
return DEFAULT_CONFIG
|
|
@ -133,7 +133,7 @@ class TlsConfig(Config):
|
|||
tls_dh_params_path = config["tls_dh_params_path"]
|
||||
|
||||
if not self.path_exists(tls_private_key_path):
|
||||
with open(tls_private_key_path, "w") as private_key_file:
|
||||
with open(tls_private_key_path, "wb") as private_key_file:
|
||||
tls_private_key = crypto.PKey()
|
||||
tls_private_key.generate_key(crypto.TYPE_RSA, 2048)
|
||||
private_key_pem = crypto.dump_privatekey(
|
||||
|
@ -148,7 +148,7 @@ class TlsConfig(Config):
|
|||
)
|
||||
|
||||
if not self.path_exists(tls_certificate_path):
|
||||
with open(tls_certificate_path, "w") as certificate_file:
|
||||
with open(tls_certificate_path, "wb") as certificate_file:
|
||||
cert = crypto.X509()
|
||||
subject = cert.get_subject()
|
||||
subject.CN = config["server_name"]
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
# limitations under the License.
|
||||
|
||||
from twisted.internet import ssl
|
||||
from OpenSSL import SSL
|
||||
from twisted.internet._sslverify import _OpenSSLECCurve, _defaultCurveName
|
||||
from OpenSSL import SSL, crypto
|
||||
from twisted.internet._sslverify import _defaultCurveName
|
||||
|
||||
import logging
|
||||
|
||||
|
@ -32,8 +32,9 @@ class ServerContextFactory(ssl.ContextFactory):
|
|||
@staticmethod
|
||||
def configure_context(context, config):
|
||||
try:
|
||||
_ecCurve = _OpenSSLECCurve(_defaultCurveName)
|
||||
_ecCurve.addECKeyToContext(context)
|
||||
_ecCurve = crypto.get_elliptic_curve(_defaultCurveName)
|
||||
context.set_tmp_ecdh(_ecCurve)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Failed to enable elliptic curve for TLS")
|
||||
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
|
||||
|
|
|
@ -19,7 +19,8 @@ from synapse.api.errors import SynapseError, Codes
|
|||
from synapse.util import unwrapFirstError, logcontext
|
||||
from synapse.util.logcontext import (
|
||||
PreserveLoggingContext,
|
||||
preserve_fn
|
||||
preserve_fn,
|
||||
run_in_background,
|
||||
)
|
||||
from synapse.util.metrics import Measure
|
||||
|
||||
|
@ -127,7 +128,7 @@ class Keyring(object):
|
|||
|
||||
verify_requests.append(verify_request)
|
||||
|
||||
preserve_fn(self._start_key_lookups)(verify_requests)
|
||||
run_in_background(self._start_key_lookups, verify_requests)
|
||||
|
||||
# Pass those keys to handle_key_deferred so that the json object
|
||||
# signatures can be verified
|
||||
|
@ -146,53 +147,56 @@ class Keyring(object):
|
|||
verify_requests (List[VerifyKeyRequest]):
|
||||
"""
|
||||
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
}
|
||||
try:
|
||||
# create a deferred for each server we're going to look up the keys
|
||||
# for; we'll resolve them once we have completed our lookups.
|
||||
# These will be passed into wait_for_previous_lookups to block
|
||||
# any other lookups until we have finished.
|
||||
# The deferreds are called with no logcontext.
|
||||
server_to_deferred = {
|
||||
rq.server_name: defer.Deferred()
|
||||
for rq in verify_requests
|
||||
}
|
||||
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
# We want to wait for any previous lookups to complete before
|
||||
# proceeding.
|
||||
yield self.wait_for_previous_lookups(
|
||||
[rq.server_name for rq in verify_requests],
|
||||
server_to_deferred,
|
||||
)
|
||||
|
||||
# Actually start fetching keys.
|
||||
self._get_server_verify_keys(verify_requests)
|
||||
|
||||
# When we've finished fetching all the keys for a given server_name,
|
||||
# resolve the deferred passed to `wait_for_previous_lookups` so that
|
||||
# any lookups waiting will proceed.
|
||||
#
|
||||
# map from server name to a set of request ids
|
||||
server_to_request_ids = {}
|
||||
|
||||
for verify_request in verify_requests:
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids.setdefault(server_name, set()).add(request_id)
|
||||
|
||||
def remove_deferreds(res, verify_request):
|
||||
server_name = verify_request.server_name
|
||||
request_id = id(verify_request)
|
||||
server_to_request_ids[server_name].discard(request_id)
|
||||
if not server_to_request_ids[server_name]:
|
||||
d = server_to_deferred.pop(server_name, None)
|
||||
if d:
|
||||
d.callback(None)
|
||||
return res
|
||||
|
||||
for verify_request in verify_requests:
|
||||
verify_request.deferred.addBoth(
|
||||
remove_deferreds, verify_request,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error starting key lookups")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_previous_lookups(self, server_names, server_to_deferred):
|
||||
"""Waits for any previous key lookups for the given servers to finish.
|
||||
|
@ -313,7 +317,7 @@ class Keyring(object):
|
|||
if not verify_request.deferred.called:
|
||||
verify_request.deferred.errback(err)
|
||||
|
||||
preserve_fn(do_iterations)().addErrback(on_err)
|
||||
run_in_background(do_iterations).addErrback(on_err)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_keys_from_store(self, server_name_and_key_ids):
|
||||
|
@ -329,8 +333,9 @@ class Keyring(object):
|
|||
"""
|
||||
res = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_server_verify_keys)(
|
||||
server_name, key_ids
|
||||
run_in_background(
|
||||
self.store.get_server_verify_keys,
|
||||
server_name, key_ids,
|
||||
).addCallback(lambda ks, server: (server, ks), server_name)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
|
@ -352,13 +357,13 @@ class Keyring(object):
|
|||
logger.exception(
|
||||
"Unable to get key from %r: %s %s",
|
||||
perspective_name,
|
||||
type(e).__name__, str(e.message),
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
defer.returnValue({})
|
||||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(p_name, p_keys)
|
||||
run_in_background(get_key, p_name, p_keys)
|
||||
for p_name, p_keys in self.perspective_servers.items()
|
||||
],
|
||||
consumeErrors=True,
|
||||
|
@ -384,7 +389,7 @@ class Keyring(object):
|
|||
logger.info(
|
||||
"Unable to get key %r for %r directly: %s %s",
|
||||
key_ids, server_name,
|
||||
type(e).__name__, str(e.message),
|
||||
type(e).__name__, str(e),
|
||||
)
|
||||
|
||||
if not keys:
|
||||
|
@ -398,7 +403,7 @@ class Keyring(object):
|
|||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_key)(server_name, key_ids)
|
||||
run_in_background(get_key, server_name, key_ids)
|
||||
for server_name, key_ids in server_name_and_key_ids
|
||||
],
|
||||
consumeErrors=True,
|
||||
|
@ -481,7 +486,8 @@ class Keyring(object):
|
|||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=server_name,
|
||||
from_server=perspective_name,
|
||||
verify_keys=response_keys,
|
||||
|
@ -539,7 +545,8 @@ class Keyring(object):
|
|||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store_keys)(
|
||||
run_in_background(
|
||||
self.store_keys,
|
||||
server_name=key_server_name,
|
||||
from_server=server_name,
|
||||
verify_keys=verify_keys,
|
||||
|
@ -615,7 +622,8 @@ class Keyring(object):
|
|||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_keys_json)(
|
||||
run_in_background(
|
||||
self.store.store_server_keys_json,
|
||||
server_name=server_name,
|
||||
key_id=key_id,
|
||||
from_server=server_name,
|
||||
|
@ -716,7 +724,8 @@ class Keyring(object):
|
|||
# TODO(markjh): Store whether the keys have expired.
|
||||
return logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.store_server_verify_key)(
|
||||
run_in_background(
|
||||
self.store.store_server_verify_key,
|
||||
server_name, server_name, key.time_added, key
|
||||
)
|
||||
for key_id, key in verify_keys.items()
|
||||
|
@ -734,7 +743,7 @@ def _handle_key_deferred(verify_request):
|
|||
except IOError as e:
|
||||
logger.warn(
|
||||
"Got IOError when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
server_name, type(e).__name__, str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
502,
|
||||
|
@ -744,7 +753,7 @@ def _handle_key_deferred(verify_request):
|
|||
except Exception as e:
|
||||
logger.exception(
|
||||
"Got Exception when downloading keys for %s: %s %s",
|
||||
server_name, type(e).__name__, str(e.message),
|
||||
server_name, type(e).__name__, str(e),
|
||||
)
|
||||
raise SynapseError(
|
||||
401,
|
||||
|
|
|
@ -47,14 +47,26 @@ class _EventInternalMetadata(object):
|
|||
|
||||
|
||||
def _event_dict_property(key):
|
||||
# We want to be able to use hasattr with the event dict properties.
|
||||
# However, (on python3) hasattr expects AttributeError to be raised. Hence,
|
||||
# we need to transform the KeyError into an AttributeError
|
||||
def getter(self):
|
||||
return self._event_dict[key]
|
||||
try:
|
||||
return self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def setter(self, v):
|
||||
self._event_dict[key] = v
|
||||
try:
|
||||
self._event_dict[key] = v
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
def delete(self):
|
||||
del self._event_dict[key]
|
||||
try:
|
||||
del self._event_dict[key]
|
||||
except KeyError:
|
||||
raise AttributeError(key)
|
||||
|
||||
return property(
|
||||
getter,
|
||||
|
|
|
@ -20,6 +20,8 @@ from frozendict import frozendict
|
|||
|
||||
import re
|
||||
|
||||
from six import string_types
|
||||
|
||||
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
|
||||
# (?<!stuff) matches if the current position in the string is not preceded
|
||||
# by a match for 'stuff'.
|
||||
|
@ -277,7 +279,7 @@ def serialize_event(e, time_now_ms, as_client_event=True,
|
|||
|
||||
if only_event_fields:
|
||||
if (not isinstance(only_event_fields, list) or
|
||||
not all(isinstance(f, basestring) for f in only_event_fields)):
|
||||
not all(isinstance(f, string_types) for f in only_event_fields)):
|
||||
raise TypeError("only_event_fields must be a list of strings")
|
||||
d = only_fields(d, only_event_fields)
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@ from synapse.types import EventID, RoomID, UserID
|
|||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
|
||||
from six import string_types
|
||||
|
||||
|
||||
class EventValidator(object):
|
||||
|
||||
|
@ -49,7 +51,7 @@ class EventValidator(object):
|
|||
strings.append("state_key")
|
||||
|
||||
for s in strings:
|
||||
if not isinstance(getattr(event, s), basestring):
|
||||
if not isinstance(getattr(event, s), string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
||||
if event.type == EventTypes.Member:
|
||||
|
@ -88,5 +90,5 @@ class EventValidator(object):
|
|||
for s in keys:
|
||||
if s not in d:
|
||||
raise SynapseError(400, "'%s' not in content" % (s,))
|
||||
if not isinstance(d[s], basestring):
|
||||
if not isinstance(d[s], string_types):
|
||||
raise SynapseError(400, "Not '%s' a string type" % (s,))
|
||||
|
|
|
@ -14,7 +14,10 @@
|
|||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
import six
|
||||
|
||||
from synapse.api.constants import MAX_DEPTH
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
from synapse.crypto.event_signing import check_event_content_hash
|
||||
from synapse.events import FrozenEvent
|
||||
from synapse.events.utils import prune_event
|
||||
|
@ -190,11 +193,23 @@ def event_from_pdu_json(pdu_json, outlier=False):
|
|||
FrozenEvent
|
||||
|
||||
Raises:
|
||||
SynapseError: if the pdu is missing required fields
|
||||
SynapseError: if the pdu is missing required fields or is otherwise
|
||||
not a valid matrix event
|
||||
"""
|
||||
# we could probably enforce a bunch of other fields here (room_id, sender,
|
||||
# origin, etc etc)
|
||||
assert_params_in_request(pdu_json, ('event_id', 'type'))
|
||||
assert_params_in_request(pdu_json, ('event_id', 'type', 'depth'))
|
||||
|
||||
depth = pdu_json['depth']
|
||||
if not isinstance(depth, six.integer_types):
|
||||
raise SynapseError(400, "Depth %r not an intger" % (depth, ),
|
||||
Codes.BAD_JSON)
|
||||
|
||||
if depth < 0:
|
||||
raise SynapseError(400, "Depth too small", Codes.BAD_JSON)
|
||||
elif depth > MAX_DEPTH:
|
||||
raise SynapseError(400, "Depth too large", Codes.BAD_JSON)
|
||||
|
||||
event = FrozenEvent(
|
||||
pdu_json
|
||||
)
|
||||
|
|
|
@ -19,6 +19,8 @@ import itertools
|
|||
import logging
|
||||
import random
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Membership
|
||||
|
@ -30,20 +32,17 @@ from synapse.federation.federation_base import (
|
|||
FederationBase,
|
||||
event_from_pdu_json,
|
||||
)
|
||||
import synapse.metrics
|
||||
from synapse.util import logcontext, unwrapFirstError
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# synapse.federation.federation_client is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
|
||||
sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
|
||||
sent_queries_counter = Counter("synapse_federation_client_sent_queries", "", ["type"])
|
||||
|
||||
|
||||
PDU_RETRY_TIME_MS = 1 * 60 * 1000
|
||||
|
@ -106,7 +105,7 @@ class FederationClient(FederationBase):
|
|||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc(query_type)
|
||||
sent_queries_counter.labels(query_type).inc()
|
||||
|
||||
return self.transport_layer.make_query(
|
||||
destination, query_type, args, retry_on_dns_fail=retry_on_dns_fail,
|
||||
|
@ -125,7 +124,7 @@ class FederationClient(FederationBase):
|
|||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_device_keys")
|
||||
sent_queries_counter.labels("client_device_keys").inc()
|
||||
return self.transport_layer.query_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
@ -135,7 +134,7 @@ class FederationClient(FederationBase):
|
|||
"""Query the device keys for a list of user ids hosted on a remote
|
||||
server.
|
||||
"""
|
||||
sent_queries_counter.inc("user_devices")
|
||||
sent_queries_counter.labels("user_devices").inc()
|
||||
return self.transport_layer.query_user_devices(
|
||||
destination, user_id, timeout
|
||||
)
|
||||
|
@ -152,7 +151,7 @@ class FederationClient(FederationBase):
|
|||
a Deferred which will eventually yield a JSON object from the
|
||||
response
|
||||
"""
|
||||
sent_queries_counter.inc("client_one_time_keys")
|
||||
sent_queries_counter.labels("client_one_time_keys").inc()
|
||||
return self.transport_layer.claim_client_keys(
|
||||
destination, content, timeout
|
||||
)
|
||||
|
@ -394,7 +393,7 @@ class FederationClient(FederationBase):
|
|||
seen_events = yield self.store.get_events(event_ids, allow_rejected=True)
|
||||
signed_events = seen_events.values()
|
||||
else:
|
||||
seen_events = yield self.store.have_events(event_ids)
|
||||
seen_events = yield self.store.have_seen_events(event_ids)
|
||||
signed_events = []
|
||||
|
||||
failed_to_fetch = set()
|
||||
|
@ -413,11 +412,12 @@ class FederationClient(FederationBase):
|
|||
|
||||
batch_size = 20
|
||||
missing_events = list(missing_events)
|
||||
for i in xrange(0, len(missing_events), batch_size):
|
||||
for i in range(0, len(missing_events), batch_size):
|
||||
batch = set(missing_events[i:i + batch_size])
|
||||
|
||||
deferreds = [
|
||||
preserve_fn(self.get_pdu)(
|
||||
run_in_background(
|
||||
self.get_pdu,
|
||||
destinations=random_server_list(),
|
||||
event_id=e_id,
|
||||
)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015, 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -26,30 +27,32 @@ from synapse.federation.federation_base import (
|
|||
|
||||
from synapse.federation.persistence import TransactionActions
|
||||
from synapse.federation.units import Edu, Transaction
|
||||
import synapse.metrics
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util import async
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logutils import log_function
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
from six import iteritems
|
||||
|
||||
# when processing incoming transactions, we try to handle multiple rooms in
|
||||
# parallel, up to this limit.
|
||||
TRANSACTION_CONCURRENCY_LIMIT = 10
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# synapse.federation.federation_server is a silly name
|
||||
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
|
||||
received_pdus_counter = Counter("synapse_federation_server_received_pdus", "")
|
||||
|
||||
received_pdus_counter = metrics.register_counter("received_pdus")
|
||||
received_edus_counter = Counter("synapse_federation_server_received_edus", "")
|
||||
|
||||
received_edus_counter = metrics.register_counter("received_edus")
|
||||
|
||||
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
|
||||
received_queries_counter = Counter(
|
||||
"synapse_federation_server_received_queries", "", ["type"]
|
||||
)
|
||||
|
||||
|
||||
class FederationServer(FederationBase):
|
||||
|
||||
def __init__(self, hs):
|
||||
super(FederationServer, self).__init__(hs)
|
||||
|
||||
|
@ -65,7 +68,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
# We cache responses to state queries, as they take a while and often
|
||||
# come in waves.
|
||||
self._state_resp_cache = ResponseCache(hs, timeout_ms=30000)
|
||||
self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
|
@ -129,7 +132,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
logger.debug("[%s] Transaction is new", transaction.transaction_id)
|
||||
|
||||
received_pdus_counter.inc_by(len(transaction.pdus))
|
||||
received_pdus_counter.inc(len(transaction.pdus))
|
||||
|
||||
pdus_by_room = {}
|
||||
|
||||
|
@ -212,16 +215,17 @@ class FederationServer(FederationBase):
|
|||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
result = self._state_resp_cache.get((room_id, event_id))
|
||||
if not result:
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
d = self._state_resp_cache.set(
|
||||
(room_id, event_id),
|
||||
preserve_fn(self._on_context_state_request_compute)(room_id, event_id)
|
||||
)
|
||||
resp = yield make_deferred_yieldable(d)
|
||||
else:
|
||||
resp = yield make_deferred_yieldable(result)
|
||||
# we grab the linearizer to protect ourselves from servers which hammer
|
||||
# us. In theory we might already have the response to this query
|
||||
# in the cache so we could return it without waiting for the linearizer
|
||||
# - but that's non-trivial to get right, and anyway somewhat defeats
|
||||
# the point of the linearizer.
|
||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||
resp = yield self._state_resp_cache.wrap(
|
||||
(room_id, event_id),
|
||||
self._on_context_state_request_compute,
|
||||
room_id, event_id,
|
||||
)
|
||||
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
@ -289,7 +293,7 @@ class FederationServer(FederationBase):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def on_query_request(self, query_type, args):
|
||||
received_queries_counter.inc(query_type)
|
||||
received_queries_counter.labels(query_type).inc()
|
||||
resp = yield self.registry.on_query(query_type, args)
|
||||
defer.returnValue((200, resp))
|
||||
|
||||
|
@ -425,9 +429,9 @@ class FederationServer(FederationBase):
|
|||
"Claimed one-time-keys: %s",
|
||||
",".join((
|
||||
"%s for %s:%s" % (key_id, user_id, device_id)
|
||||
for user_id, user_keys in json_result.iteritems()
|
||||
for device_id, device_keys in user_keys.iteritems()
|
||||
for key_id, _ in device_keys.iteritems()
|
||||
for user_id, user_keys in iteritems(json_result)
|
||||
for device_id, device_keys in iteritems(user_keys)
|
||||
for key_id, _ in iteritems(device_keys)
|
||||
)),
|
||||
)
|
||||
|
||||
|
@ -494,13 +498,33 @@ class FederationServer(FederationBase):
|
|||
def _handle_received_pdu(self, origin, pdu):
|
||||
""" Process a PDU received in a federation /send/ transaction.
|
||||
|
||||
If the event is invalid, then this method throws a FederationError.
|
||||
(The error will then be logged and sent back to the sender (which
|
||||
probably won't do anything with it), and other events in the
|
||||
transaction will be processed as normal).
|
||||
|
||||
It is likely that we'll then receive other events which refer to
|
||||
this rejected_event in their prev_events, etc. When that happens,
|
||||
we'll attempt to fetch the rejected event again, which will presumably
|
||||
fail, so those second-generation events will also get rejected.
|
||||
|
||||
Eventually, we get to the point where there are more than 10 events
|
||||
between any new events and the original rejected event. Since we
|
||||
only try to backfill 10 events deep on received pdu, we then accept the
|
||||
new event, possibly introducing a discontinuity in the DAG, with new
|
||||
forward extremities, so normal service is approximately returned,
|
||||
until we try to backfill across the discontinuity.
|
||||
|
||||
Args:
|
||||
origin (str): server which sent the pdu
|
||||
pdu (FrozenEvent): received pdu
|
||||
|
||||
Returns (Deferred): completes with None
|
||||
Raises: FederationError if the signatures / hash do not match
|
||||
"""
|
||||
|
||||
Raises: FederationError if the signatures / hash do not match, or
|
||||
if the event was unacceptable for any other reason (eg, too large,
|
||||
too many prev_events, couldn't find the prev_events)
|
||||
"""
|
||||
# check that it's actually being sent from a valid destination to
|
||||
# workaround bug #1753 in 0.18.5 and 0.18.6
|
||||
if origin != get_domain_from_id(pdu.event_id):
|
||||
|
|
|
@ -33,19 +33,18 @@ from .units import Edu
|
|||
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
from synapse.util.metrics import Measure
|
||||
import synapse.metrics
|
||||
from synapse.metrics import LaterGauge
|
||||
|
||||
from blist import sorteddict
|
||||
from collections import namedtuple
|
||||
|
||||
import logging
|
||||
|
||||
from six import itervalues, iteritems
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
|
||||
class FederationRemoteSendQueue(object):
|
||||
"""A drop in replacement for TransactionQueue"""
|
||||
|
||||
|
@ -75,10 +74,8 @@ class FederationRemoteSendQueue(object):
|
|||
# lambda binds to the queue rather than to the name of the queue which
|
||||
# changes. ARGH.
|
||||
def register(name, queue):
|
||||
metrics.register_callback(
|
||||
queue_name + "_size",
|
||||
lambda: len(queue),
|
||||
)
|
||||
LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,),
|
||||
"", lambda: len(queue))
|
||||
|
||||
for queue_name in [
|
||||
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
|
||||
|
@ -122,7 +119,7 @@ class FederationRemoteSendQueue(object):
|
|||
|
||||
user_ids = set(
|
||||
user_id
|
||||
for uids in self.presence_changed.itervalues()
|
||||
for uids in itervalues(self.presence_changed)
|
||||
for user_id in uids
|
||||
)
|
||||
|
||||
|
@ -276,7 +273,7 @@ class FederationRemoteSendQueue(object):
|
|||
# stream position.
|
||||
keyed_edus = {self.keyed_edu_changed[k]: k for k in keys[i:j]}
|
||||
|
||||
for ((destination, edu_key), pos) in keyed_edus.iteritems():
|
||||
for ((destination, edu_key), pos) in iteritems(keyed_edus):
|
||||
rows.append((pos, KeyedEduRow(
|
||||
key=edu_key,
|
||||
edu=self.keyed_edu[(destination, edu_key)],
|
||||
|
@ -309,7 +306,7 @@ class FederationRemoteSendQueue(object):
|
|||
j = keys.bisect_right(to_token) + 1
|
||||
device_messages = {self.device_messages[k]: k for k in keys[i:j]}
|
||||
|
||||
for (destination, pos) in device_messages.iteritems():
|
||||
for (destination, pos) in iteritems(device_messages):
|
||||
rows.append((pos, DeviceRow(
|
||||
destination=destination,
|
||||
)))
|
||||
|
@ -528,19 +525,19 @@ def process_rows_for_federation(transaction_queue, rows):
|
|||
if buff.presence:
|
||||
transaction_queue.send_presence(buff.presence)
|
||||
|
||||
for destination, edu_map in buff.keyed_edus.iteritems():
|
||||
for destination, edu_map in iteritems(buff.keyed_edus):
|
||||
for key, edu in edu_map.items():
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=key,
|
||||
)
|
||||
|
||||
for destination, edu_list in buff.edus.iteritems():
|
||||
for destination, edu_list in iteritems(buff.edus):
|
||||
for edu in edu_list:
|
||||
transaction_queue.send_edu(
|
||||
edu.destination, edu.edu_type, edu.content, key=None,
|
||||
)
|
||||
|
||||
for destination, failure_list in buff.failures.iteritems():
|
||||
for destination, failure_list in iteritems(buff.failures):
|
||||
for failure in failure_list:
|
||||
transaction_queue.send_failure(destination, failure)
|
||||
|
||||
|
|
|
@ -26,23 +26,23 @@ from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
|
|||
from synapse.util.metrics import measure_func
|
||||
from synapse.handlers.presence import format_user_presence_state, get_interested_remotes
|
||||
import synapse.metrics
|
||||
from synapse.metrics import LaterGauge
|
||||
from synapse.metrics import (
|
||||
sent_edus_counter,
|
||||
sent_transactions_counter,
|
||||
events_processed_counter,
|
||||
)
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
client_metrics = synapse.metrics.get_metrics_for("synapse.federation.client")
|
||||
sent_pdus_destination_dist = client_metrics.register_distribution(
|
||||
"sent_pdu_destinations"
|
||||
sent_pdus_destination_dist = Counter(
|
||||
"synapse_federation_transaction_queue_sent_pdu_destinations", ""
|
||||
)
|
||||
sent_edus_counter = client_metrics.register_counter("sent_edus")
|
||||
|
||||
sent_transactions_counter = client_metrics.register_counter("sent_transactions")
|
||||
|
||||
events_processed_counter = client_metrics.register_counter("events_processed")
|
||||
|
||||
|
||||
class TransactionQueue(object):
|
||||
|
@ -69,8 +69,10 @@ class TransactionQueue(object):
|
|||
# done
|
||||
self.pending_transactions = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_destinations",
|
||||
LaterGauge(
|
||||
"synapse_federation_transaction_queue_pending_destinations",
|
||||
"",
|
||||
[],
|
||||
lambda: len(self.pending_transactions),
|
||||
)
|
||||
|
||||
|
@ -94,12 +96,16 @@ class TransactionQueue(object):
|
|||
# Map of destination -> (edu_type, key) -> Edu
|
||||
self.pending_edus_keyed_by_dest = edus_keyed = {}
|
||||
|
||||
metrics.register_callback(
|
||||
"pending_pdus",
|
||||
LaterGauge(
|
||||
"synapse_federation_transaction_queue_pending_pdus",
|
||||
"",
|
||||
[],
|
||||
lambda: sum(map(len, pdus.values())),
|
||||
)
|
||||
metrics.register_callback(
|
||||
"pending_edus",
|
||||
LaterGauge(
|
||||
"synapse_federation_transaction_queue_pending_edus",
|
||||
"",
|
||||
[],
|
||||
lambda: (
|
||||
sum(map(len, edus.values()))
|
||||
+ sum(map(len, presence.values()))
|
||||
|
@ -169,7 +175,7 @@ class TransactionQueue(object):
|
|||
while True:
|
||||
last_token = yield self.store.get_federation_out_pos("events")
|
||||
next_token, events = yield self.store.get_all_new_events_stream(
|
||||
last_token, self._last_poked_id, limit=20,
|
||||
last_token, self._last_poked_id, limit=100,
|
||||
)
|
||||
|
||||
logger.debug("Handling %s -> %s", last_token, next_token)
|
||||
|
@ -177,24 +183,33 @@ class TransactionQueue(object):
|
|||
if not events and next_token >= self._last_poked_id:
|
||||
break
|
||||
|
||||
for event in events:
|
||||
@defer.inlineCallbacks
|
||||
def handle_event(event):
|
||||
# Only send events for this server.
|
||||
send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of()
|
||||
is_mine = self.is_mine_id(event.event_id)
|
||||
if not is_mine and send_on_behalf_of is None:
|
||||
continue
|
||||
return
|
||||
|
||||
try:
|
||||
# Get the state from before the event.
|
||||
# We need to make sure that this is the state from before
|
||||
# the event and not from after it.
|
||||
# Otherwise if the last member on a server in a room is
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
destinations = yield self.state.get_current_hosts_in_room(
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to calculate hosts in room for event: %s",
|
||||
event.event_id,
|
||||
)
|
||||
return
|
||||
|
||||
# Get the state from before the event.
|
||||
# We need to make sure that this is the state from before
|
||||
# the event and not from after it.
|
||||
# Otherwise if the last member on a server in a room is
|
||||
# banned then it won't receive the event because it won't
|
||||
# be in the room after the ban.
|
||||
destinations = yield self.state.get_current_hosts_in_room(
|
||||
event.room_id, latest_event_ids=[
|
||||
prev_id for prev_id, _ in event.prev_events
|
||||
],
|
||||
)
|
||||
destinations = set(destinations)
|
||||
|
||||
if send_on_behalf_of is not None:
|
||||
|
@ -207,12 +222,41 @@ class TransactionQueue(object):
|
|||
|
||||
self._send_pdu(event, destinations)
|
||||
|
||||
events_processed_counter.inc_by(len(events))
|
||||
@defer.inlineCallbacks
|
||||
def handle_room_events(events):
|
||||
for event in events:
|
||||
yield handle_event(event)
|
||||
|
||||
events_by_room = {}
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
logcontext.run_in_background(handle_room_events, evs)
|
||||
for evs in events_by_room.itervalues()
|
||||
],
|
||||
consumeErrors=True
|
||||
))
|
||||
|
||||
yield self.store.update_federation_out_pos(
|
||||
"events", next_token
|
||||
)
|
||||
|
||||
if events:
|
||||
now = self.clock.time_msec()
|
||||
ts = yield self.store.get_received_ts(events[-1].event_id)
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
"federation_sender").set(now - ts)
|
||||
synapse.metrics.event_processing_last_ts.labels(
|
||||
"federation_sender").set(ts)
|
||||
|
||||
events_processed_counter.inc(len(events))
|
||||
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"federation_sender").set(next_token)
|
||||
|
||||
finally:
|
||||
self._is_processing = False
|
||||
|
||||
|
@ -234,7 +278,7 @@ class TransactionQueue(object):
|
|||
if not destinations:
|
||||
return
|
||||
|
||||
sent_pdus_destination_dist.inc_by(len(destinations))
|
||||
sent_pdus_destination_dist.inc(len(destinations))
|
||||
|
||||
for destination in destinations:
|
||||
self.pending_pdus_by_dest.setdefault(destination, []).append(
|
||||
|
@ -282,6 +326,8 @@ class TransactionQueue(object):
|
|||
break
|
||||
|
||||
yield self._process_presence_inner(states_map.values())
|
||||
except Exception:
|
||||
logger.exception("Error sending presence states to servers")
|
||||
finally:
|
||||
self._processing_pending_presence = False
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -20,6 +21,7 @@ from synapse.api.urls import FEDERATION_PREFIX as PREFIX
|
|||
from synapse.util.logutils import log_function
|
||||
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -49,7 +51,7 @@ class TransportLayerClient(object):
|
|||
logger.debug("get_room_state dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state/%s/" % room_id
|
||||
path = _create_path(PREFIX, "/state/%s/", room_id)
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
@ -71,7 +73,7 @@ class TransportLayerClient(object):
|
|||
logger.debug("get_room_state_ids dest=%s, room=%s",
|
||||
destination, room_id)
|
||||
|
||||
path = PREFIX + "/state_ids/%s/" % room_id
|
||||
path = _create_path(PREFIX, "/state_ids/%s/", room_id)
|
||||
return self.client.get_json(
|
||||
destination, path=path, args={"event_id": event_id},
|
||||
)
|
||||
|
@ -93,7 +95,7 @@ class TransportLayerClient(object):
|
|||
logger.debug("get_pdu dest=%s, event_id=%s",
|
||||
destination, event_id)
|
||||
|
||||
path = PREFIX + "/event/%s/" % (event_id, )
|
||||
path = _create_path(PREFIX, "/event/%s/", event_id)
|
||||
return self.client.get_json(destination, path=path, timeout=timeout)
|
||||
|
||||
@log_function
|
||||
|
@ -119,7 +121,7 @@ class TransportLayerClient(object):
|
|||
# TODO: raise?
|
||||
return
|
||||
|
||||
path = PREFIX + "/backfill/%s/" % (room_id,)
|
||||
path = _create_path(PREFIX, "/backfill/%s/", room_id)
|
||||
|
||||
args = {
|
||||
"v": event_tuples,
|
||||
|
@ -157,9 +159,11 @@ class TransportLayerClient(object):
|
|||
# generated by the json_data_callback.
|
||||
json_data = transaction.get_dict()
|
||||
|
||||
path = _create_path(PREFIX, "/send/%s/", transaction.transaction_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
transaction.destination,
|
||||
path=PREFIX + "/send/%s/" % transaction.transaction_id,
|
||||
path=path,
|
||||
data=json_data,
|
||||
json_data_callback=json_data_callback,
|
||||
long_retries=True,
|
||||
|
@ -177,7 +181,7 @@ class TransportLayerClient(object):
|
|||
@log_function
|
||||
def make_query(self, destination, query_type, args, retry_on_dns_fail,
|
||||
ignore_backoff=False):
|
||||
path = PREFIX + "/query/%s" % query_type
|
||||
path = _create_path(PREFIX, "/query/%s", query_type)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -222,7 +226,7 @@ class TransportLayerClient(object):
|
|||
"make_membership_event called with membership='%s', must be one of %s" %
|
||||
(membership, ",".join(valid_memberships))
|
||||
)
|
||||
path = PREFIX + "/make_%s/%s/%s" % (membership, room_id, user_id)
|
||||
path = _create_path(PREFIX, "/make_%s/%s/%s", membership, room_id, user_id)
|
||||
|
||||
ignore_backoff = False
|
||||
retry_on_dns_fail = False
|
||||
|
@ -248,7 +252,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_join(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/send_join/%s/%s" % (room_id, event_id)
|
||||
path = _create_path(PREFIX, "/send_join/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
|
@ -261,7 +265,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_leave(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/send_leave/%s/%s" % (room_id, event_id)
|
||||
path = _create_path(PREFIX, "/send_leave/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
|
@ -280,7 +284,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_invite(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/invite/%s/%s" % (room_id, event_id)
|
||||
path = _create_path(PREFIX, "/invite/%s/%s", room_id, event_id)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
|
@ -322,7 +326,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def exchange_third_party_invite(self, destination, room_id, event_dict):
|
||||
path = PREFIX + "/exchange_third_party_invite/%s" % (room_id,)
|
||||
path = _create_path(PREFIX, "/exchange_third_party_invite/%s", room_id,)
|
||||
|
||||
response = yield self.client.put_json(
|
||||
destination=destination,
|
||||
|
@ -335,7 +339,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def get_event_auth(self, destination, room_id, event_id):
|
||||
path = PREFIX + "/event_auth/%s/%s" % (room_id, event_id)
|
||||
path = _create_path(PREFIX, "/event_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -347,7 +351,7 @@ class TransportLayerClient(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def send_query_auth(self, destination, room_id, event_id, content):
|
||||
path = PREFIX + "/query_auth/%s/%s" % (room_id, event_id)
|
||||
path = _create_path(PREFIX, "/query_auth/%s/%s", room_id, event_id)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -409,7 +413,7 @@ class TransportLayerClient(object):
|
|||
Returns:
|
||||
A dict containg the device keys.
|
||||
"""
|
||||
path = PREFIX + "/user/devices/" + user_id
|
||||
path = _create_path(PREFIX, "/user/devices/%s", user_id)
|
||||
|
||||
content = yield self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -459,7 +463,7 @@ class TransportLayerClient(object):
|
|||
@log_function
|
||||
def get_missing_events(self, destination, room_id, earliest_events,
|
||||
latest_events, limit, min_depth, timeout):
|
||||
path = PREFIX + "/get_missing_events/%s" % (room_id,)
|
||||
path = _create_path(PREFIX, "/get_missing_events/%s", room_id,)
|
||||
|
||||
content = yield self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -479,7 +483,7 @@ class TransportLayerClient(object):
|
|||
def get_group_profile(self, destination, group_id, requester_user_id):
|
||||
"""Get a group profile
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -498,7 +502,7 @@ class TransportLayerClient(object):
|
|||
requester_user_id (str)
|
||||
content (dict): The new profile of the group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/profile" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/profile", group_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -512,7 +516,7 @@ class TransportLayerClient(object):
|
|||
def get_group_summary(self, destination, group_id, requester_user_id):
|
||||
"""Get a group summary
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/summary" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/summary", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -525,7 +529,7 @@ class TransportLayerClient(object):
|
|||
def get_rooms_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get all rooms in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/rooms" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/rooms", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -538,7 +542,7 @@ class TransportLayerClient(object):
|
|||
content):
|
||||
"""Add a room to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -552,7 +556,10 @@ class TransportLayerClient(object):
|
|||
config_key, content):
|
||||
"""Update room in group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s/config/%s" % (group_id, room_id, config_key,)
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/room/%s/config/%s",
|
||||
group_id, room_id, config_key,
|
||||
)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -565,7 +572,7 @@ class TransportLayerClient(object):
|
|||
def remove_room_from_group(self, destination, group_id, requester_user_id, room_id):
|
||||
"""Remove a room from a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/room/%s" % (group_id, room_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/room/%s", group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
|
@ -578,7 +585,7 @@ class TransportLayerClient(object):
|
|||
def get_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/users", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -591,7 +598,7 @@ class TransportLayerClient(object):
|
|||
def get_invited_users_in_group(self, destination, group_id, requester_user_id):
|
||||
"""Get users that have been invited to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/invited_users" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/invited_users", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -604,7 +611,23 @@ class TransportLayerClient(object):
|
|||
def accept_group_invite(self, destination, group_id, user_id, content):
|
||||
"""Accept a group invite
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/accept_invite" % (group_id, user_id)
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/users/%s/accept_invite",
|
||||
group_id, user_id,
|
||||
)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def join_group(self, destination, group_id, user_id, content):
|
||||
"""Attempts to join a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/join", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -617,7 +640,7 @@ class TransportLayerClient(object):
|
|||
def invite_to_group(self, destination, group_id, user_id, requester_user_id, content):
|
||||
"""Invite a user to a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/invite" % (group_id, user_id)
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -633,7 +656,7 @@ class TransportLayerClient(object):
|
|||
invited.
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/local/%s/users/%s/invite" % (group_id, user_id)
|
||||
path = _create_path(PREFIX, "/groups/local/%s/users/%s/invite", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -647,7 +670,7 @@ class TransportLayerClient(object):
|
|||
user_id, content):
|
||||
"""Remove a user fron a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/users/%s/remove" % (group_id, user_id)
|
||||
path = _create_path(PREFIX, "/groups/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -664,7 +687,7 @@ class TransportLayerClient(object):
|
|||
kicked from the group.
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/local/%s/users/%s/remove" % (group_id, user_id)
|
||||
path = _create_path(PREFIX, "/groups/local/%s/users/%s/remove", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -679,7 +702,7 @@ class TransportLayerClient(object):
|
|||
the attestations
|
||||
"""
|
||||
|
||||
path = PREFIX + "/groups/%s/renew_attestation/%s" % (group_id, user_id)
|
||||
path = _create_path(PREFIX, "/groups/%s/renew_attestation/%s", group_id, user_id)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -694,11 +717,12 @@ class TransportLayerClient(object):
|
|||
"""Update a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -714,11 +738,12 @@ class TransportLayerClient(object):
|
|||
"""Delete a room entry in a group summary
|
||||
"""
|
||||
if category_id:
|
||||
path = PREFIX + "/groups/%s/summary/categories/%s/rooms/%s" % (
|
||||
path = _create_path(
|
||||
PREFIX + "/groups/%s/summary/categories/%s/rooms/%s",
|
||||
group_id, category_id, room_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/rooms/%s" % (group_id, room_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/rooms/%s", group_id, room_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
|
@ -731,7 +756,7 @@ class TransportLayerClient(object):
|
|||
def get_group_categories(self, destination, group_id, requester_user_id):
|
||||
"""Get all categories in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/categories", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -744,7 +769,7 @@ class TransportLayerClient(object):
|
|||
def get_group_category(self, destination, group_id, requester_user_id, category_id):
|
||||
"""Get category info in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -758,7 +783,7 @@ class TransportLayerClient(object):
|
|||
content):
|
||||
"""Update a category in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -773,7 +798,7 @@ class TransportLayerClient(object):
|
|||
category_id):
|
||||
"""Delete a category in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/categories/%s" % (group_id, category_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/categories/%s", group_id, category_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
|
@ -786,7 +811,7 @@ class TransportLayerClient(object):
|
|||
def get_group_roles(self, destination, group_id, requester_user_id):
|
||||
"""Get all roles in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles" % (group_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/roles", group_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -799,7 +824,7 @@ class TransportLayerClient(object):
|
|||
def get_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Get a roles info
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.get_json(
|
||||
destination=destination,
|
||||
|
@ -813,7 +838,7 @@ class TransportLayerClient(object):
|
|||
content):
|
||||
"""Update a role in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -827,7 +852,7 @@ class TransportLayerClient(object):
|
|||
def delete_group_role(self, destination, group_id, requester_user_id, role_id):
|
||||
"""Delete a role in a group
|
||||
"""
|
||||
path = PREFIX + "/groups/%s/roles/%s" % (group_id, role_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/roles/%s", group_id, role_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
|
@ -842,11 +867,12 @@ class TransportLayerClient(object):
|
|||
"""Update a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
|
||||
return self.client.post_json(
|
||||
destination=destination,
|
||||
|
@ -856,17 +882,33 @@ class TransportLayerClient(object):
|
|||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def set_group_join_policy(self, destination, group_id, requester_user_id,
|
||||
content):
|
||||
"""Sets the join policy for a group
|
||||
"""
|
||||
path = _create_path(PREFIX, "/groups/%s/settings/m.join_policy", group_id,)
|
||||
|
||||
return self.client.put_json(
|
||||
destination=destination,
|
||||
path=path,
|
||||
args={"requester_user_id": requester_user_id},
|
||||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
@log_function
|
||||
def delete_group_summary_user(self, destination, group_id, requester_user_id,
|
||||
user_id, role_id):
|
||||
"""Delete a users entry in a group
|
||||
"""
|
||||
if role_id:
|
||||
path = PREFIX + "/groups/%s/summary/roles/%s/users/%s" % (
|
||||
path = _create_path(
|
||||
PREFIX, "/groups/%s/summary/roles/%s/users/%s",
|
||||
group_id, role_id, user_id,
|
||||
)
|
||||
else:
|
||||
path = PREFIX + "/groups/%s/summary/users/%s" % (group_id, user_id,)
|
||||
path = _create_path(PREFIX, "/groups/%s/summary/users/%s", group_id, user_id,)
|
||||
|
||||
return self.client.delete_json(
|
||||
destination=destination,
|
||||
|
@ -889,3 +931,22 @@ class TransportLayerClient(object):
|
|||
data=content,
|
||||
ignore_backoff=True,
|
||||
)
|
||||
|
||||
|
||||
def _create_path(prefix, path, *args):
|
||||
"""Creates a path from the prefix, path template and args. Ensures that
|
||||
all args are url encoded.
|
||||
|
||||
Example:
|
||||
|
||||
_create_path(PREFIX, "/event/%s/", event_id)
|
||||
|
||||
Args:
|
||||
prefix (str)
|
||||
path (str): String template for the path
|
||||
args: ([str]): Args to insert into path. Each arg will be url encoded
|
||||
|
||||
Returns:
|
||||
str
|
||||
"""
|
||||
return prefix + path % tuple(urllib.quote(arg, "") for arg in args)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -24,7 +25,7 @@ from synapse.http.servlet import (
|
|||
)
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
from synapse.util.versionstring import get_version_string
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.types import ThirdPartyInstanceID, get_domain_from_id
|
||||
|
||||
import functools
|
||||
|
@ -93,12 +94,6 @@ class Authenticator(object):
|
|||
"signatures": {},
|
||||
}
|
||||
|
||||
if (
|
||||
self.federation_domain_whitelist is not None and
|
||||
self.server_name not in self.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(self.server_name)
|
||||
|
||||
if content is not None:
|
||||
json_request["content"] = content
|
||||
|
||||
|
@ -137,6 +132,12 @@ class Authenticator(object):
|
|||
json_request["origin"] = origin
|
||||
json_request["signatures"].setdefault(origin, {})[key] = sig
|
||||
|
||||
if (
|
||||
self.federation_domain_whitelist is not None and
|
||||
origin not in self.federation_domain_whitelist
|
||||
):
|
||||
raise FederationDeniedError(origin)
|
||||
|
||||
if not json_request["signatures"]:
|
||||
raise NoAuthenticationError(
|
||||
401, "Missing Authorization headers", Codes.UNAUTHORIZED,
|
||||
|
@ -151,11 +152,18 @@ class Authenticator(object):
|
|||
# alive
|
||||
retry_timings = yield self.store.get_destination_retry_timings(origin)
|
||||
if retry_timings and retry_timings["retry_last_ts"]:
|
||||
logger.info("Marking origin %r as up", origin)
|
||||
preserve_fn(self.store.set_destination_retry_timings)(origin, 0, 0)
|
||||
run_in_background(self._reset_retry_timings, origin)
|
||||
|
||||
defer.returnValue(origin)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _reset_retry_timings(self, origin):
|
||||
try:
|
||||
logger.info("Marking origin %r as up", origin)
|
||||
yield self.store.set_destination_retry_timings(origin, 0, 0)
|
||||
except Exception:
|
||||
logger.exception("Error resetting retry timings on %s", origin)
|
||||
|
||||
|
||||
class BaseFederationServlet(object):
|
||||
REQUIRE_AUTH = True
|
||||
|
@ -802,6 +810,23 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet):
|
|||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsJoinServlet(BaseFederationServlet):
|
||||
"""Attempt to join a group
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/users/(?P<user_id>[^/]*)/join$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, origin, content, query, group_id, user_id):
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
raise SynapseError(403, "user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.join_group(
|
||||
group_id, user_id, content,
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
class FederationGroupsRemoveUserServlet(BaseFederationServlet):
|
||||
"""Leave or kick a user from the group
|
||||
"""
|
||||
|
@ -1124,6 +1149,24 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet):
|
|||
defer.returnValue((200, resp))
|
||||
|
||||
|
||||
class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet):
|
||||
"""Sets whether a group is joinable without an invite or knock
|
||||
"""
|
||||
PATH = "/groups/(?P<group_id>[^/]*)/settings/m.join_policy$"
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_PUT(self, origin, content, query, group_id):
|
||||
requester_user_id = parse_string_from_args(query, "requester_user_id")
|
||||
if get_domain_from_id(requester_user_id) != origin:
|
||||
raise SynapseError(403, "requester_user_id doesn't match origin")
|
||||
|
||||
new_content = yield self.handler.set_group_join_policy(
|
||||
group_id, requester_user_id, content
|
||||
)
|
||||
|
||||
defer.returnValue((200, new_content))
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES = (
|
||||
FederationSendServlet,
|
||||
FederationPullServlet,
|
||||
|
@ -1163,6 +1206,7 @@ GROUP_SERVER_SERVLET_CLASSES = (
|
|||
FederationGroupsInvitedUsersServlet,
|
||||
FederationGroupsInviteServlet,
|
||||
FederationGroupsAcceptInviteServlet,
|
||||
FederationGroupsJoinServlet,
|
||||
FederationGroupsRemoveUserServlet,
|
||||
FederationGroupsSummaryRoomsServlet,
|
||||
FederationGroupsCategoriesServlet,
|
||||
|
@ -1172,6 +1216,7 @@ GROUP_SERVER_SERVLET_CLASSES = (
|
|||
FederationGroupsSummaryUsersServlet,
|
||||
FederationGroupsAddRoomsServlet,
|
||||
FederationGroupsAddRoomsConfigServlet,
|
||||
FederationGroupsSettingJoinPolicyServlet,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -74,8 +74,6 @@ class Transaction(JsonEncodedObject):
|
|||
"previous_ids",
|
||||
"pdus",
|
||||
"edus",
|
||||
"transaction_id",
|
||||
"destination",
|
||||
"pdu_failures",
|
||||
]
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
from signedjson.sign import sign_json
|
||||
|
||||
|
@ -165,31 +165,35 @@ class GroupAttestionRenewer(object):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def _renew_attestation(group_id, user_id):
|
||||
if not self.is_mine_id(group_id):
|
||||
destination = get_domain_from_id(group_id)
|
||||
elif not self.is_mine_id(user_id):
|
||||
destination = get_domain_from_id(user_id)
|
||||
else:
|
||||
logger.warn(
|
||||
"Incorrectly trying to do attestations for user: %r in %r",
|
||||
user_id, group_id,
|
||||
try:
|
||||
if not self.is_mine_id(group_id):
|
||||
destination = get_domain_from_id(group_id)
|
||||
elif not self.is_mine_id(user_id):
|
||||
destination = get_domain_from_id(user_id)
|
||||
else:
|
||||
logger.warn(
|
||||
"Incorrectly trying to do attestations for user: %r in %r",
|
||||
user_id, group_id,
|
||||
)
|
||||
yield self.store.remove_attestation_renewal(group_id, user_id)
|
||||
return
|
||||
|
||||
attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
|
||||
yield self.transport_client.renew_group_attestation(
|
||||
destination, group_id, user_id,
|
||||
content={"attestation": attestation},
|
||||
)
|
||||
yield self.store.remove_attestation_renewal(group_id, user_id)
|
||||
return
|
||||
|
||||
attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
|
||||
yield self.transport_client.renew_group_attestation(
|
||||
destination, group_id, user_id,
|
||||
content={"attestation": attestation},
|
||||
)
|
||||
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
yield self.store.update_attestation_renewal(
|
||||
group_id, user_id, attestation
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error renewing attestation of %r in %r",
|
||||
user_id, group_id)
|
||||
|
||||
for row in rows:
|
||||
group_id = row["group_id"]
|
||||
user_id = row["user_id"]
|
||||
|
||||
preserve_fn(_renew_attestation)(group_id, user_id)
|
||||
run_in_background(_renew_attestation, group_id, user_id)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -19,6 +20,8 @@ from synapse.api.errors import SynapseError
|
|||
from synapse.types import GroupID, RoomID, UserID, get_domain_from_id
|
||||
from twisted.internet import defer
|
||||
|
||||
from six import string_types
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -205,6 +208,28 @@ class GroupsServerHandler(object):
|
|||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_group_join_policy(self, group_id, requester_user_id, content):
|
||||
"""Sets the group join policy.
|
||||
|
||||
Currently supported policies are:
|
||||
- "invite": an invite must be received and accepted in order to join.
|
||||
- "open": anyone can join.
|
||||
"""
|
||||
yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True, and_is_admin=requester_user_id
|
||||
)
|
||||
|
||||
join_policy = _parse_join_policy_from_contents(content)
|
||||
if join_policy is None:
|
||||
raise SynapseError(
|
||||
400, "No value specified for 'm.join_policy'"
|
||||
)
|
||||
|
||||
yield self.store.set_group_join_policy(group_id, join_policy=join_policy)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_categories(self, group_id, requester_user_id):
|
||||
"""Get all categories in a group (as seen by user)
|
||||
|
@ -381,9 +406,16 @@ class GroupsServerHandler(object):
|
|||
|
||||
yield self.check_group_is_ours(group_id, requester_user_id)
|
||||
|
||||
group_description = yield self.store.get_group(group_id)
|
||||
group = yield self.store.get_group(group_id)
|
||||
|
||||
if group:
|
||||
cols = [
|
||||
"name", "short_description", "long_description",
|
||||
"avatar_url", "is_public",
|
||||
]
|
||||
group_description = {key: group[key] for key in cols}
|
||||
group_description["is_openly_joinable"] = group["join_policy"] == "open"
|
||||
|
||||
if group_description:
|
||||
defer.returnValue(group_description)
|
||||
else:
|
||||
raise SynapseError(404, "Unknown group")
|
||||
|
@ -401,7 +433,7 @@ class GroupsServerHandler(object):
|
|||
"long_description"):
|
||||
if keyname in content:
|
||||
value = content[keyname]
|
||||
if not isinstance(value, basestring):
|
||||
if not isinstance(value, string_types):
|
||||
raise SynapseError(400, "%r value is not a string" % (keyname,))
|
||||
profile[keyname] = value
|
||||
|
||||
|
@ -654,6 +686,40 @@ class GroupsServerHandler(object):
|
|||
else:
|
||||
raise SynapseError(502, "Unknown state returned by HS")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _add_user(self, group_id, user_id, content):
|
||||
"""Add a user to a group based on a content dict.
|
||||
|
||||
See accept_invite, join_group.
|
||||
"""
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id, user_id,
|
||||
)
|
||||
|
||||
remote_attestation = content["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
else:
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
|
||||
yield self.store.add_user_to_group(
|
||||
group_id, user_id,
|
||||
is_admin=False,
|
||||
is_public=is_public,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
)
|
||||
|
||||
defer.returnValue(local_attestation)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_invite(self, group_id, requester_user_id, content):
|
||||
"""User tries to accept an invite to the group.
|
||||
|
@ -670,30 +736,27 @@ class GroupsServerHandler(object):
|
|||
if not is_invited:
|
||||
raise SynapseError(403, "User not invited to group")
|
||||
|
||||
if not self.hs.is_mine_id(requester_user_id):
|
||||
local_attestation = self.attestations.create_attestation(
|
||||
group_id, requester_user_id,
|
||||
)
|
||||
remote_attestation = content["attestation"]
|
||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
user_id=requester_user_id,
|
||||
group_id=group_id,
|
||||
)
|
||||
else:
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
defer.returnValue({
|
||||
"state": "join",
|
||||
"attestation": local_attestation,
|
||||
})
|
||||
|
||||
is_public = _parse_visibility_from_contents(content)
|
||||
@defer.inlineCallbacks
|
||||
def join_group(self, group_id, requester_user_id, content):
|
||||
"""User tries to join the group.
|
||||
|
||||
yield self.store.add_user_to_group(
|
||||
group_id, requester_user_id,
|
||||
is_admin=False,
|
||||
is_public=is_public,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
This will error if the group requires an invite/knock to join
|
||||
"""
|
||||
|
||||
group_info = yield self.check_group_is_ours(
|
||||
group_id, requester_user_id, and_exists=True
|
||||
)
|
||||
if group_info['join_policy'] != "open":
|
||||
raise SynapseError(403, "Group is not publicly joinable")
|
||||
|
||||
local_attestation = yield self._add_user(group_id, requester_user_id, content)
|
||||
|
||||
defer.returnValue({
|
||||
"state": "join",
|
||||
|
@ -835,6 +898,31 @@ class GroupsServerHandler(object):
|
|||
})
|
||||
|
||||
|
||||
def _parse_join_policy_from_contents(content):
|
||||
"""Given a content for a request, return the specified join policy or None
|
||||
"""
|
||||
|
||||
join_policy_dict = content.get("m.join_policy")
|
||||
if join_policy_dict:
|
||||
return _parse_join_policy_dict(join_policy_dict)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def _parse_join_policy_dict(join_policy_dict):
|
||||
"""Given a dict for the "m.join_policy" config return the join policy specified
|
||||
"""
|
||||
join_policy_type = join_policy_dict.get("type")
|
||||
if not join_policy_type:
|
||||
return "invite"
|
||||
|
||||
if join_policy_type not in ("invite", "open"):
|
||||
raise SynapseError(
|
||||
400, "Synapse only supports 'invite'/'open' join rule"
|
||||
)
|
||||
return join_policy_type
|
||||
|
||||
|
||||
def _parse_visibility_from_contents(content):
|
||||
"""Given a content for a request parse out whether the entity should be
|
||||
public or not
|
||||
|
|
|
@ -14,9 +14,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
from .register import RegistrationHandler
|
||||
from .room import (
|
||||
RoomCreationHandler, RoomContextHandler,
|
||||
)
|
||||
from .room import RoomContextHandler
|
||||
from .message import MessageHandler
|
||||
from .federation import FederationHandler
|
||||
from .directory import DirectoryHandler
|
||||
|
@ -47,7 +45,6 @@ class Handlers(object):
|
|||
def __init__(self, hs):
|
||||
self.registration_handler = RegistrationHandler(hs)
|
||||
self.message_handler = MessageHandler(hs)
|
||||
self.room_creation_handler = RoomCreationHandler(hs)
|
||||
self.federation_handler = FederationHandler(hs)
|
||||
self.directory_handler = DirectoryHandler(hs)
|
||||
self.admin_handler = AdminHandler(hs)
|
||||
|
|
|
@ -18,15 +18,16 @@ from twisted.internet import defer
|
|||
import synapse
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logcontext import (
|
||||
make_deferred_yieldable, run_in_background,
|
||||
)
|
||||
from prometheus_client import Counter
|
||||
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
events_processed_counter = metrics.register_counter("events_processed")
|
||||
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
|
||||
|
||||
|
||||
def log_failure(failure):
|
||||
|
@ -84,11 +85,16 @@ class ApplicationServicesHandler(object):
|
|||
if not events:
|
||||
break
|
||||
|
||||
events_by_room = {}
|
||||
for event in events:
|
||||
events_by_room.setdefault(event.room_id, []).append(event)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_event(event):
|
||||
# Gather interested services
|
||||
services = yield self._get_services_for_event(event)
|
||||
if len(services) == 0:
|
||||
continue # no services need notifying
|
||||
return # no services need notifying
|
||||
|
||||
# Do we know this user exists? If not, poke the user
|
||||
# query API for all services which match that user regex.
|
||||
|
@ -104,13 +110,32 @@ class ApplicationServicesHandler(object):
|
|||
|
||||
# Fork off pushes to these services
|
||||
for service in services:
|
||||
preserve_fn(self.scheduler.submit_event_for_as)(
|
||||
service, event
|
||||
)
|
||||
self.scheduler.submit_event_for_as(service, event)
|
||||
|
||||
events_processed_counter.inc_by(len(events))
|
||||
@defer.inlineCallbacks
|
||||
def handle_room_events(events):
|
||||
for event in events:
|
||||
yield handle_event(event)
|
||||
|
||||
yield make_deferred_yieldable(defer.gatherResults([
|
||||
run_in_background(handle_room_events, evs)
|
||||
for evs in events_by_room.itervalues()
|
||||
], consumeErrors=True))
|
||||
|
||||
yield self.store.set_appservice_last_pos(upper_bound)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
ts = yield self.store.get_received_ts(events[-1].event_id)
|
||||
|
||||
synapse.metrics.event_processing_positions.labels(
|
||||
"appservice_sender").set(upper_bound)
|
||||
|
||||
events_processed_counter.inc(len(events))
|
||||
|
||||
synapse.metrics.event_processing_lag.labels(
|
||||
"appservice_sender").set(now - ts)
|
||||
synapse.metrics.event_processing_last_ts.labels(
|
||||
"appservice_sender").set(ts)
|
||||
finally:
|
||||
self.is_processing = False
|
||||
|
||||
|
@ -167,7 +192,10 @@ class ApplicationServicesHandler(object):
|
|||
services = yield self._get_services_for_3pn(protocol)
|
||||
|
||||
results = yield make_deferred_yieldable(defer.DeferredList([
|
||||
preserve_fn(self.appservice_api.query_3pe)(service, kind, protocol, fields)
|
||||
run_in_background(
|
||||
self.appservice_api.query_3pe,
|
||||
service, kind, protocol, fields,
|
||||
)
|
||||
for service in services
|
||||
], consumeErrors=True))
|
||||
|
||||
|
@ -228,11 +256,15 @@ class ApplicationServicesHandler(object):
|
|||
event based on the service regex.
|
||||
"""
|
||||
services = self.store.get_app_services()
|
||||
interested_list = [
|
||||
s for s in services if (
|
||||
yield s.is_interested(event, self.store)
|
||||
)
|
||||
]
|
||||
|
||||
# we can't use a list comprehension here. Since python 3, list
|
||||
# comprehensions use a generator internally. This means you can't yield
|
||||
# inside of a list comprehension anymore.
|
||||
interested_list = []
|
||||
for s in services:
|
||||
if (yield s.is_interested(event, self.store)):
|
||||
interested_list.append(s)
|
||||
|
||||
defer.returnValue(interested_list)
|
||||
|
||||
def _get_services_for_user(self, user_id):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2017, 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -12,9 +12,11 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
from ._base import BaseHandler
|
||||
from synapse.types import UserID, create_requester
|
||||
from synapse.util.logcontext import run_in_background
|
||||
|
||||
import logging
|
||||
|
||||
|
@ -27,6 +29,15 @@ class DeactivateAccountHandler(BaseHandler):
|
|||
super(DeactivateAccountHandler, self).__init__(hs)
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
self._device_handler = hs.get_device_handler()
|
||||
self._room_member_handler = hs.get_room_member_handler()
|
||||
self.user_directory_handler = hs.get_user_directory_handler()
|
||||
|
||||
# Flag that indicates whether the process to part users from rooms is running
|
||||
self._user_parter_running = False
|
||||
|
||||
# Start the user parter loop so it can resume parting users from rooms where
|
||||
# it left off (if it has work left to do).
|
||||
reactor.callWhenRunning(self._start_user_parting)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def deactivate_account(self, user_id):
|
||||
|
@ -50,3 +61,73 @@ class DeactivateAccountHandler(BaseHandler):
|
|||
|
||||
yield self.store.user_delete_threepids(user_id)
|
||||
yield self.store.user_set_password_hash(user_id, None)
|
||||
|
||||
# Add the user to a table of users pending deactivation (ie.
|
||||
# removal from all the rooms they're a member of)
|
||||
yield self.store.add_user_pending_deactivation(user_id)
|
||||
|
||||
# delete from user directory
|
||||
yield self.user_directory_handler.handle_user_deactivated(user_id)
|
||||
|
||||
# Now start the process that goes through that list and
|
||||
# parts users from rooms (if it isn't already running)
|
||||
self._start_user_parting()
|
||||
|
||||
def _start_user_parting(self):
|
||||
"""
|
||||
Start the process that goes through the table of users
|
||||
pending deactivation, if it isn't already running.
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if not self._user_parter_running:
|
||||
run_in_background(self._user_parter_loop)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _user_parter_loop(self):
|
||||
"""Loop that parts deactivated users from rooms
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
self._user_parter_running = True
|
||||
logger.info("Starting user parter")
|
||||
try:
|
||||
while True:
|
||||
user_id = yield self.store.get_user_pending_deactivation()
|
||||
if user_id is None:
|
||||
break
|
||||
logger.info("User parter parting %r", user_id)
|
||||
yield self._part_user(user_id)
|
||||
yield self.store.del_user_pending_deactivation(user_id)
|
||||
logger.info("User parter finished parting %r", user_id)
|
||||
logger.info("User parter finished: stopping")
|
||||
finally:
|
||||
self._user_parter_running = False
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _part_user(self, user_id):
|
||||
"""Causes the given user_id to leave all the rooms they're joined to
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
user = UserID.from_string(user_id)
|
||||
|
||||
rooms_for_user = yield self.store.get_rooms_for_user(user_id)
|
||||
for room_id in rooms_for_user:
|
||||
logger.info("User parter parting %r from %r", user_id, room_id)
|
||||
try:
|
||||
yield self._room_member_handler.update_membership(
|
||||
create_requester(user),
|
||||
user,
|
||||
room_id,
|
||||
"leave",
|
||||
ratelimit=False,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
user_id, room_id,
|
||||
)
|
||||
|
|
|
@ -26,6 +26,8 @@ from ._base import BaseHandler
|
|||
|
||||
import logging
|
||||
|
||||
from six import itervalues, iteritems
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -155,7 +157,7 @@ class DeviceHandler(BaseHandler):
|
|||
|
||||
try:
|
||||
yield self.store.delete_device(user_id, device_id)
|
||||
except errors.StoreError, e:
|
||||
except errors.StoreError as e:
|
||||
if e.code == 404:
|
||||
# no match
|
||||
pass
|
||||
|
@ -204,7 +206,7 @@ class DeviceHandler(BaseHandler):
|
|||
|
||||
try:
|
||||
yield self.store.delete_devices(user_id, device_ids)
|
||||
except errors.StoreError, e:
|
||||
except errors.StoreError as e:
|
||||
if e.code == 404:
|
||||
# no match
|
||||
pass
|
||||
|
@ -243,7 +245,7 @@ class DeviceHandler(BaseHandler):
|
|||
new_display_name=content.get("display_name")
|
||||
)
|
||||
yield self.notify_device_update(user_id, [device_id])
|
||||
except errors.StoreError, e:
|
||||
except errors.StoreError as e:
|
||||
if e.code == 404:
|
||||
raise errors.NotFoundError()
|
||||
else:
|
||||
|
@ -318,7 +320,7 @@ class DeviceHandler(BaseHandler):
|
|||
# The user may have left the room
|
||||
# TODO: Check if they actually did or if we were just invited.
|
||||
if room_id not in room_ids:
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
for key, event_id in iteritems(current_state_ids):
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
|
@ -338,7 +340,7 @@ class DeviceHandler(BaseHandler):
|
|||
# special-case for an empty prev state: include all members
|
||||
# in the changed list
|
||||
if not event_ids:
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
for key, event_id in iteritems(current_state_ids):
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
|
@ -354,10 +356,10 @@ class DeviceHandler(BaseHandler):
|
|||
|
||||
# Check if we've joined the room? If so we just blindly add all the users to
|
||||
# the "possibly changed" users.
|
||||
for state_dict in prev_state_ids.itervalues():
|
||||
for state_dict in itervalues(prev_state_ids):
|
||||
member_event = state_dict.get((EventTypes.Member, user_id), None)
|
||||
if not member_event or member_event != current_member_id:
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
for key, event_id in iteritems(current_state_ids):
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
|
@ -367,14 +369,14 @@ class DeviceHandler(BaseHandler):
|
|||
# If there has been any change in membership, include them in the
|
||||
# possibly changed list. We'll check if they are joined below,
|
||||
# and we're not toooo worried about spuriously adding users.
|
||||
for key, event_id in current_state_ids.iteritems():
|
||||
for key, event_id in iteritems(current_state_ids):
|
||||
etype, state_key = key
|
||||
if etype != EventTypes.Member:
|
||||
continue
|
||||
|
||||
# check if this member has changed since any of the extremities
|
||||
# at the stream_ordering, and add them to the list if so.
|
||||
for state_dict in prev_state_ids.itervalues():
|
||||
for state_dict in itervalues(prev_state_ids):
|
||||
prev_event_id = state_dict.get(key, None)
|
||||
if not prev_event_id or prev_event_id != event_id:
|
||||
if state_key != user_id:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -13,17 +14,18 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ujson as json
|
||||
import simplejson as json
|
||||
import logging
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from twisted.internet import defer
|
||||
from six import iteritems
|
||||
|
||||
from synapse.api.errors import (
|
||||
SynapseError, CodeMessageException, FederationDeniedError,
|
||||
)
|
||||
from synapse.types import get_domain_from_id, UserID
|
||||
from synapse.util.logcontext import preserve_fn, make_deferred_yieldable
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.util.retryutils import NotRetryingDestination
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -91,7 +93,7 @@ class E2eKeysHandler(object):
|
|||
remote_queries_not_in_cache = {}
|
||||
if remote_queries:
|
||||
query_list = []
|
||||
for user_id, device_ids in remote_queries.iteritems():
|
||||
for user_id, device_ids in iteritems(remote_queries):
|
||||
if device_ids:
|
||||
query_list.extend((user_id, device_id) for device_id in device_ids)
|
||||
else:
|
||||
|
@ -102,9 +104,9 @@ class E2eKeysHandler(object):
|
|||
query_list
|
||||
)
|
||||
)
|
||||
for user_id, devices in remote_results.iteritems():
|
||||
for user_id, devices in iteritems(remote_results):
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
for device_id, device in devices.iteritems():
|
||||
for device_id, device in iteritems(devices):
|
||||
keys = device.get("keys", None)
|
||||
device_display_name = device.get("device_display_name", None)
|
||||
if keys:
|
||||
|
@ -134,28 +136,13 @@ class E2eKeysHandler(object):
|
|||
if user_id in destination_query:
|
||||
results[user_id] = keys
|
||||
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except FederationDeniedError as e:
|
||||
failures[destination] = {
|
||||
"status": 403, "message": "Federation Denied",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
yield make_deferred_yieldable(defer.gatherResults([
|
||||
preserve_fn(do_remote_query)(destination)
|
||||
run_in_background(do_remote_query, destination)
|
||||
for destination in remote_queries_not_in_cache
|
||||
]))
|
||||
], consumeErrors=True))
|
||||
|
||||
defer.returnValue({
|
||||
"device_keys": results, "failures": failures,
|
||||
|
@ -252,32 +239,21 @@ class E2eKeysHandler(object):
|
|||
for user_id, keys in remote_result["one_time_keys"].items():
|
||||
if user_id in device_keys:
|
||||
json_result[user_id] = keys
|
||||
except CodeMessageException as e:
|
||||
failures[destination] = {
|
||||
"status": e.code, "message": e.message
|
||||
}
|
||||
except NotRetryingDestination as e:
|
||||
failures[destination] = {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
except Exception as e:
|
||||
# include ConnectionRefused and other errors
|
||||
failures[destination] = {
|
||||
"status": 503, "message": e.message
|
||||
}
|
||||
failures[destination] = _exception_to_failure(e)
|
||||
|
||||
yield make_deferred_yieldable(defer.gatherResults([
|
||||
preserve_fn(claim_client_keys)(destination)
|
||||
run_in_background(claim_client_keys, destination)
|
||||
for destination in remote_queries
|
||||
]))
|
||||
], consumeErrors=True))
|
||||
|
||||
logger.info(
|
||||
"Claimed one-time-keys: %s",
|
||||
",".join((
|
||||
"%s for %s:%s" % (key_id, user_id, device_id)
|
||||
for user_id, user_keys in json_result.iteritems()
|
||||
for device_id, device_keys in user_keys.iteritems()
|
||||
for key_id, _ in device_keys.iteritems()
|
||||
for user_id, user_keys in iteritems(json_result)
|
||||
for device_id, device_keys in iteritems(user_keys)
|
||||
for key_id, _ in iteritems(device_keys)
|
||||
)),
|
||||
)
|
||||
|
||||
|
@ -362,6 +338,31 @@ class E2eKeysHandler(object):
|
|||
)
|
||||
|
||||
|
||||
def _exception_to_failure(e):
|
||||
if isinstance(e, CodeMessageException):
|
||||
return {
|
||||
"status": e.code, "message": e.message,
|
||||
}
|
||||
|
||||
if isinstance(e, NotRetryingDestination):
|
||||
return {
|
||||
"status": 503, "message": "Not ready for retry",
|
||||
}
|
||||
|
||||
if isinstance(e, FederationDeniedError):
|
||||
return {
|
||||
"status": 403, "message": "Federation Denied",
|
||||
}
|
||||
|
||||
# include ConnectionRefused and other errors
|
||||
#
|
||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||
# give a string for e.message, which simplejson then fails to serialize.
|
||||
return {
|
||||
"status": 503, "message": str(e.message),
|
||||
}
|
||||
|
||||
|
||||
def _one_time_keys_match(old_key_json, new_key):
|
||||
old_key = json.loads(old_key_json)
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ class EventStreamHandler(BaseHandler):
|
|||
|
||||
self.notifier = hs.get_notifier()
|
||||
self.state = hs.get_state_handler()
|
||||
self._server_notices_sender = hs.get_server_notices_sender()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
|
@ -58,6 +59,10 @@ class EventStreamHandler(BaseHandler):
|
|||
|
||||
If `only_keys` is not None, events from keys will be sent down.
|
||||
"""
|
||||
|
||||
# send any outstanding server notices to the user.
|
||||
yield self._server_notices_sender.on_user_syncing(auth_user_id)
|
||||
|
||||
auth_user = UserID.from_string(auth_user_id)
|
||||
presence_handler = self.hs.get_presence_handler()
|
||||
|
||||
|
|
|
@ -15,8 +15,17 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""Contains handlers for federation events."""
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
import six
|
||||
from six.moves import http_client
|
||||
from six import iteritems
|
||||
from twisted.internet import defer
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
@ -43,10 +52,6 @@ from synapse.util.retryutils import NotRetryingDestination
|
|||
|
||||
from synapse.util.distributor import user_joined_room
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -77,6 +82,7 @@ class FederationHandler(BaseHandler):
|
|||
self.pusher_pool = hs.get_pusherpool()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
# When joining a room we need to queue any events for that room up
|
||||
self.room_queues = {}
|
||||
|
@ -115,6 +121,19 @@ class FederationHandler(BaseHandler):
|
|||
logger.debug("Already seen pdu %s", pdu.event_id)
|
||||
return
|
||||
|
||||
# do some initial sanity-checking of the event. In particular, make
|
||||
# sure it doesn't have hundreds of prev_events or auth_events, which
|
||||
# could cause a huge state resolution or cascade of event fetches.
|
||||
try:
|
||||
self._sanity_check_event(pdu)
|
||||
except SynapseError as err:
|
||||
raise FederationError(
|
||||
"ERROR",
|
||||
err.code,
|
||||
err.msg,
|
||||
affected=pdu.event_id,
|
||||
)
|
||||
|
||||
# If we are currently in the process of joining this room, then we
|
||||
# queue up events for later processing.
|
||||
if pdu.room_id in self.room_queues:
|
||||
|
@ -149,10 +168,6 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
auth_chain = []
|
||||
|
||||
have_seen = yield self.store.have_events(
|
||||
[ev for ev, _ in pdu.prev_events]
|
||||
)
|
||||
|
||||
fetch_state = False
|
||||
|
||||
# Get missing pdus if necessary.
|
||||
|
@ -168,7 +183,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||
seen = set(have_seen.keys())
|
||||
seen = yield self.store.have_seen_events(prevs)
|
||||
|
||||
if min_depth and pdu.depth < min_depth:
|
||||
# This is so that we don't notify the user about this
|
||||
|
@ -196,8 +211,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
# Update the set of things we've seen after trying to
|
||||
# fetch the missing stuff
|
||||
have_seen = yield self.store.have_events(prevs)
|
||||
seen = set(have_seen.iterkeys())
|
||||
seen = yield self.store.have_seen_events(prevs)
|
||||
|
||||
if not prevs - seen:
|
||||
logger.info(
|
||||
|
@ -248,8 +262,7 @@ class FederationHandler(BaseHandler):
|
|||
min_depth (int): Minimum depth of events to return.
|
||||
"""
|
||||
# We recalculate seen, since it may have changed.
|
||||
have_seen = yield self.store.have_events(prevs)
|
||||
seen = set(have_seen.keys())
|
||||
seen = yield self.store.have_seen_events(prevs)
|
||||
|
||||
if not prevs - seen:
|
||||
return
|
||||
|
@ -361,9 +374,7 @@ class FederationHandler(BaseHandler):
|
|||
if auth_chain:
|
||||
event_ids |= {e.event_id for e in auth_chain}
|
||||
|
||||
seen_ids = set(
|
||||
(yield self.store.have_events(event_ids)).keys()
|
||||
)
|
||||
seen_ids = yield self.store.have_seen_events(event_ids)
|
||||
|
||||
if state and auth_chain is not None:
|
||||
# If we have any state or auth_chain given to us by the replication
|
||||
|
@ -469,18 +480,18 @@ class FederationHandler(BaseHandler):
|
|||
# to get all state ids that we're interested in.
|
||||
event_map = yield self.store.get_events([
|
||||
e_id
|
||||
for key_to_eid in event_to_state_ids.values()
|
||||
for key, e_id in key_to_eid.items()
|
||||
for key_to_eid in event_to_state_ids.itervalues()
|
||||
for key, e_id in key_to_eid.iteritems()
|
||||
if key[0] != EventTypes.Member or check_match(key[1])
|
||||
])
|
||||
|
||||
event_to_state = {
|
||||
e_id: {
|
||||
key: event_map[inner_e_id]
|
||||
for key, inner_e_id in key_to_eid.items()
|
||||
for key, inner_e_id in key_to_eid.iteritems()
|
||||
if inner_e_id in event_map
|
||||
}
|
||||
for e_id, key_to_eid in event_to_state_ids.items()
|
||||
for e_id, key_to_eid in event_to_state_ids.iteritems()
|
||||
}
|
||||
|
||||
def redact_disallowed(event, state):
|
||||
|
@ -495,7 +506,7 @@ class FederationHandler(BaseHandler):
|
|||
# membership states for the requesting server to determine
|
||||
# if the server is either in the room or has been invited
|
||||
# into the room.
|
||||
for ev in state.values():
|
||||
for ev in state.itervalues():
|
||||
if ev.type != EventTypes.Member:
|
||||
continue
|
||||
try:
|
||||
|
@ -527,9 +538,16 @@ class FederationHandler(BaseHandler):
|
|||
def backfill(self, dest, room_id, limit, extremities):
|
||||
""" Trigger a backfill request to `dest` for the given `room_id`
|
||||
|
||||
This will attempt to get more events from the remote. This may return
|
||||
be successfull and still return no events if the other side has no new
|
||||
events to offer.
|
||||
This will attempt to get more events from the remote. If the other side
|
||||
has no new events to offer, this will return an empty list.
|
||||
|
||||
As the events are received, we check their signatures, and also do some
|
||||
sanity-checking on them. If any of the backfilled events are invalid,
|
||||
this method throws a SynapseError.
|
||||
|
||||
TODO: make this more useful to distinguish failures of the remote
|
||||
server from invalid events (there is probably no point in trying to
|
||||
re-fetch invalid events from every other HS in the room.)
|
||||
"""
|
||||
if dest == self.server_name:
|
||||
raise SynapseError(400, "Can't backfill from self.")
|
||||
|
@ -541,6 +559,16 @@ class FederationHandler(BaseHandler):
|
|||
extremities=extremities,
|
||||
)
|
||||
|
||||
# ideally we'd sanity check the events here for excess prev_events etc,
|
||||
# but it's hard to reject events at this point without completely
|
||||
# breaking backfill in the same way that it is currently broken by
|
||||
# events whose signature we cannot verify (#3121).
|
||||
#
|
||||
# So for now we accept the events anyway. #3124 tracks this.
|
||||
#
|
||||
# for ev in events:
|
||||
# self._sanity_check_event(ev)
|
||||
|
||||
# Don't bother processing events we already have.
|
||||
seen_events = yield self.store.have_events_in_timeline(
|
||||
set(e.event_id for e in events)
|
||||
|
@ -613,7 +641,8 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
logcontext.preserve_fn(self.replication_layer.get_pdu)(
|
||||
logcontext.run_in_background(
|
||||
self.replication_layer.get_pdu,
|
||||
[dest],
|
||||
event_id,
|
||||
outlier=True,
|
||||
|
@ -633,7 +662,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
failed_to_fetch = missing_auth - set(auth_events)
|
||||
|
||||
seen_events = yield self.store.have_events(
|
||||
seen_events = yield self.store.have_seen_events(
|
||||
set(auth_events.keys()) | set(state_events.keys())
|
||||
)
|
||||
|
||||
|
@ -723,9 +752,19 @@ class FederationHandler(BaseHandler):
|
|||
curr_state = yield self.state_handler.get_current_state(room_id)
|
||||
|
||||
def get_domains_from_state(state):
|
||||
"""Get joined domains from state
|
||||
|
||||
Args:
|
||||
state (dict[tuple, FrozenEvent]): State map from type/state
|
||||
key to event.
|
||||
|
||||
Returns:
|
||||
list[tuple[str, int]]: Returns a list of servers with the
|
||||
lowest depth of their joins. Sorted by lowest depth first.
|
||||
"""
|
||||
joined_users = [
|
||||
(state_key, int(event.depth))
|
||||
for (e_type, state_key), event in state.items()
|
||||
for (e_type, state_key), event in state.iteritems()
|
||||
if e_type == EventTypes.Member
|
||||
and event.membership == Membership.JOIN
|
||||
]
|
||||
|
@ -742,7 +781,7 @@ class FederationHandler(BaseHandler):
|
|||
except Exception:
|
||||
pass
|
||||
|
||||
return sorted(joined_domains.items(), key=lambda d: d[1])
|
||||
return sorted(joined_domains.iteritems(), key=lambda d: d[1])
|
||||
|
||||
curr_domains = get_domains_from_state(curr_state)
|
||||
|
||||
|
@ -759,7 +798,7 @@ class FederationHandler(BaseHandler):
|
|||
yield self.backfill(
|
||||
dom, room_id,
|
||||
limit=100,
|
||||
extremities=[e for e in extremities.keys()]
|
||||
extremities=extremities,
|
||||
)
|
||||
# If this succeeded then we probably already have the
|
||||
# appropriate stuff.
|
||||
|
@ -805,7 +844,7 @@ class FederationHandler(BaseHandler):
|
|||
tried_domains = set(likely_domains)
|
||||
tried_domains.add(self.server_name)
|
||||
|
||||
event_ids = list(extremities.keys())
|
||||
event_ids = list(extremities.iterkeys())
|
||||
|
||||
logger.debug("calling resolve_state_groups in _maybe_backfill")
|
||||
resolve = logcontext.preserve_fn(
|
||||
|
@ -815,34 +854,69 @@ class FederationHandler(BaseHandler):
|
|||
[resolve(room_id, [e]) for e in event_ids],
|
||||
consumeErrors=True,
|
||||
))
|
||||
|
||||
# dict[str, dict[tuple, str]], a map from event_id to state map of
|
||||
# event_ids.
|
||||
states = dict(zip(event_ids, [s.state for s in states]))
|
||||
|
||||
state_map = yield self.store.get_events(
|
||||
[e_id for ids in states.values() for e_id in ids],
|
||||
[e_id for ids in states.itervalues() for e_id in ids.itervalues()],
|
||||
get_prev_content=False
|
||||
)
|
||||
states = {
|
||||
key: {
|
||||
k: state_map[e_id]
|
||||
for k, e_id in state_dict.items()
|
||||
for k, e_id in state_dict.iteritems()
|
||||
if e_id in state_map
|
||||
} for key, state_dict in states.items()
|
||||
} for key, state_dict in states.iteritems()
|
||||
}
|
||||
|
||||
for e_id, _ in sorted_extremeties_tuple:
|
||||
likely_domains = get_domains_from_state(states[e_id])
|
||||
|
||||
success = yield try_backfill([
|
||||
dom for dom in likely_domains
|
||||
dom for dom, _ in likely_domains
|
||||
if dom not in tried_domains
|
||||
])
|
||||
if success:
|
||||
defer.returnValue(True)
|
||||
|
||||
tried_domains.update(likely_domains)
|
||||
tried_domains.update(dom for dom, _ in likely_domains)
|
||||
|
||||
defer.returnValue(False)
|
||||
|
||||
def _sanity_check_event(self, ev):
|
||||
"""
|
||||
Do some early sanity checks of a received event
|
||||
|
||||
In particular, checks it doesn't have an excessive number of
|
||||
prev_events or auth_events, which could cause a huge state resolution
|
||||
or cascade of event fetches.
|
||||
|
||||
Args:
|
||||
ev (synapse.events.EventBase): event to be checked
|
||||
|
||||
Returns: None
|
||||
|
||||
Raises:
|
||||
SynapseError if the event does not pass muster
|
||||
"""
|
||||
if len(ev.prev_events) > 20:
|
||||
logger.warn("Rejecting event %s which has %i prev_events",
|
||||
ev.event_id, len(ev.prev_events))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many prev_events",
|
||||
)
|
||||
|
||||
if len(ev.auth_events) > 10:
|
||||
logger.warn("Rejecting event %s which has %i auth_events",
|
||||
ev.event_id, len(ev.auth_events))
|
||||
raise SynapseError(
|
||||
http_client.BAD_REQUEST,
|
||||
"Too many auth_events",
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_invite(self, target_host, event):
|
||||
""" Sends the invite to the remote server for signing.
|
||||
|
@ -967,7 +1041,7 @@ class FederationHandler(BaseHandler):
|
|||
# lots of requests for missing prev_events which we do actually
|
||||
# have. Hence we fire off the deferred, but don't wait for it.
|
||||
|
||||
logcontext.preserve_fn(self._handle_queued_pdus)(room_queue)
|
||||
logcontext.run_in_background(self._handle_queued_pdus, room_queue)
|
||||
|
||||
defer.returnValue(True)
|
||||
|
||||
|
@ -1121,6 +1195,13 @@ class FederationHandler(BaseHandler):
|
|||
if not self.is_mine_id(event.state_key):
|
||||
raise SynapseError(400, "The invite event must be for this server")
|
||||
|
||||
# block any attempts to invite the server notices mxid
|
||||
if event.state_key == self._server_notices_mxid:
|
||||
raise SynapseError(
|
||||
http_client.FORBIDDEN,
|
||||
"Cannot invite this user",
|
||||
)
|
||||
|
||||
event.internal_metadata.outlier = True
|
||||
event.internal_metadata.invite_from_remote = True
|
||||
|
||||
|
@ -1308,7 +1389,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
if state_groups:
|
||||
_, state = state_groups.items().pop()
|
||||
_, state = list(iteritems(state_groups)).pop()
|
||||
results = {
|
||||
(e.type, e.state_key): e for e in state
|
||||
}
|
||||
|
@ -1457,18 +1538,21 @@ class FederationHandler(BaseHandler):
|
|||
backfilled=backfilled,
|
||||
)
|
||||
except: # noqa: E722, as we reraise the exception this is fine.
|
||||
# Ensure that we actually remove the entries in the push actions
|
||||
# staging area
|
||||
logcontext.preserve_fn(
|
||||
self.store.remove_push_actions_from_staging
|
||||
)(event.event_id)
|
||||
raise
|
||||
tp, value, tb = sys.exc_info()
|
||||
|
||||
logcontext.run_in_background(
|
||||
self.store.remove_push_actions_from_staging,
|
||||
event.event_id,
|
||||
)
|
||||
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
if not backfilled:
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
logcontext.preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
event_stream_id, max_stream_id
|
||||
logcontext.run_in_background(
|
||||
self.pusher_pool.on_new_notifications,
|
||||
event_stream_id, max_stream_id,
|
||||
)
|
||||
|
||||
defer.returnValue((context, event_stream_id, max_stream_id))
|
||||
|
@ -1482,7 +1566,8 @@ class FederationHandler(BaseHandler):
|
|||
"""
|
||||
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||
[
|
||||
logcontext.preserve_fn(self._prep_event)(
|
||||
logcontext.run_in_background(
|
||||
self._prep_event,
|
||||
origin,
|
||||
ev_info["event"],
|
||||
state=ev_info.get("state"),
|
||||
|
@ -1736,7 +1821,8 @@ class FederationHandler(BaseHandler):
|
|||
event_key = None
|
||||
|
||||
if event_auth_events - current_state:
|
||||
have_events = yield self.store.have_events(
|
||||
# TODO: can we use store.have_seen_events here instead?
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
event_auth_events - current_state
|
||||
)
|
||||
else:
|
||||
|
@ -1759,12 +1845,12 @@ class FederationHandler(BaseHandler):
|
|||
origin, event.room_id, event.event_id
|
||||
)
|
||||
|
||||
seen_remotes = yield self.store.have_events(
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in remote_auth_chain]
|
||||
)
|
||||
|
||||
for e in remote_auth_chain:
|
||||
if e.event_id in seen_remotes.keys():
|
||||
if e.event_id in seen_remotes:
|
||||
continue
|
||||
|
||||
if e.event_id == event.event_id:
|
||||
|
@ -1791,7 +1877,7 @@ class FederationHandler(BaseHandler):
|
|||
except AuthError:
|
||||
pass
|
||||
|
||||
have_events = yield self.store.have_events(
|
||||
have_events = yield self.store.get_seen_events_with_rejections(
|
||||
[e_id for e_id, _ in event.auth_events]
|
||||
)
|
||||
seen_events = set(have_events.keys())
|
||||
|
@ -1810,7 +1896,8 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
different_events = yield logcontext.make_deferred_yieldable(
|
||||
defer.gatherResults([
|
||||
logcontext.preserve_fn(self.store.get_event)(
|
||||
logcontext.run_in_background(
|
||||
self.store.get_event,
|
||||
d,
|
||||
allow_none=True,
|
||||
allow_rejected=False,
|
||||
|
@ -1876,13 +1963,13 @@ class FederationHandler(BaseHandler):
|
|||
local_auth_chain,
|
||||
)
|
||||
|
||||
seen_remotes = yield self.store.have_events(
|
||||
seen_remotes = yield self.store.have_seen_events(
|
||||
[e.event_id for e in result["auth_chain"]]
|
||||
)
|
||||
|
||||
# 3. Process any remote auth chain events we haven't seen.
|
||||
for ev in result["auth_chain"]:
|
||||
if ev.event_id in seen_remotes.keys():
|
||||
if ev.event_id in seen_remotes:
|
||||
continue
|
||||
|
||||
if ev.event_id == event.event_id:
|
||||
|
@ -1948,7 +2035,7 @@ class FederationHandler(BaseHandler):
|
|||
this will not be included in the current_state in the context.
|
||||
"""
|
||||
state_updates = {
|
||||
k: a.event_id for k, a in auth_events.iteritems()
|
||||
k: a.event_id for k, a in iteritems(auth_events)
|
||||
if k != event_key
|
||||
}
|
||||
context.current_state_ids = dict(context.current_state_ids)
|
||||
|
@ -1958,7 +2045,7 @@ class FederationHandler(BaseHandler):
|
|||
context.delta_ids.update(state_updates)
|
||||
context.prev_state_ids = dict(context.prev_state_ids)
|
||||
context.prev_state_ids.update({
|
||||
k: a.event_id for k, a in auth_events.iteritems()
|
||||
k: a.event_id for k, a in iteritems(auth_events)
|
||||
})
|
||||
context.state_group = yield self.store.store_state_group(
|
||||
event.event_id,
|
||||
|
@ -2010,7 +2097,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
def get_next(it, opt=None):
|
||||
try:
|
||||
return it.next()
|
||||
return next(it)
|
||||
except Exception:
|
||||
return opt
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 Vector Creations Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,6 +15,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
from six import iteritems
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.types import get_domain_from_id
|
||||
|
@ -90,6 +92,8 @@ class GroupsLocalHandler(object):
|
|||
get_group_role = _create_rerouter("get_group_role")
|
||||
get_group_roles = _create_rerouter("get_group_roles")
|
||||
|
||||
set_group_join_policy = _create_rerouter("set_group_join_policy")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_group_summary(self, group_id, requester_user_id):
|
||||
"""Get the group summary for a group.
|
||||
|
@ -226,7 +230,45 @@ class GroupsLocalHandler(object):
|
|||
def join_group(self, group_id, user_id, content):
|
||||
"""Request to join a group
|
||||
"""
|
||||
raise NotImplementedError() # TODO
|
||||
if self.is_mine_id(group_id):
|
||||
yield self.groups_server_handler.join_group(
|
||||
group_id, user_id, content
|
||||
)
|
||||
local_attestation = None
|
||||
remote_attestation = None
|
||||
else:
|
||||
local_attestation = self.attestations.create_attestation(group_id, user_id)
|
||||
content["attestation"] = local_attestation
|
||||
|
||||
res = yield self.transport_client.join_group(
|
||||
get_domain_from_id(group_id), group_id, user_id, content,
|
||||
)
|
||||
|
||||
remote_attestation = res["attestation"]
|
||||
|
||||
yield self.attestations.verify_attestation(
|
||||
remote_attestation,
|
||||
group_id=group_id,
|
||||
user_id=user_id,
|
||||
server_name=get_domain_from_id(group_id),
|
||||
)
|
||||
|
||||
# TODO: Check that the group is public and we're being added publically
|
||||
is_publicised = content.get("publicise", False)
|
||||
|
||||
token = yield self.store.register_user_group_membership(
|
||||
group_id, user_id,
|
||||
membership="join",
|
||||
is_admin=False,
|
||||
local_attestation=local_attestation,
|
||||
remote_attestation=remote_attestation,
|
||||
is_publicised=is_publicised,
|
||||
)
|
||||
self.notifier.on_new_event(
|
||||
"groups_key", token, users=[user_id],
|
||||
)
|
||||
|
||||
defer.returnValue({})
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def accept_invite(self, group_id, user_id, content):
|
||||
|
@ -408,7 +450,7 @@ class GroupsLocalHandler(object):
|
|||
|
||||
results = {}
|
||||
failed_results = []
|
||||
for destination, dest_user_ids in destinations.iteritems():
|
||||
for destination, dest_user_ids in iteritems(destinations):
|
||||
try:
|
||||
r = yield self.transport_client.bulk_get_publicised_groups(
|
||||
destination, list(dest_user_ids),
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
"""Utilities for interacting with Identity Servers"""
|
||||
|
||||
import logging
|
||||
|
||||
import simplejson as json
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import (
|
||||
|
@ -24,9 +29,6 @@ from ._base import BaseHandler
|
|||
from synapse.util.async import run_on_reactor
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ from synapse.types import (
|
|||
from synapse.util import unwrapFirstError
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
@ -166,7 +166,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
(messages, token), current_state = yield make_deferred_yieldable(
|
||||
defer.gatherResults(
|
||||
[
|
||||
preserve_fn(self.store.get_recent_events_for_room)(
|
||||
run_in_background(
|
||||
self.store.get_recent_events_for_room,
|
||||
event.room_id,
|
||||
limit=limit,
|
||||
end_token=room_end_token,
|
||||
|
@ -180,8 +181,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
self.store, user_id, messages
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
end_token = now_token.copy_and_replace("room_key", room_end_token)
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
d["messages"] = {
|
||||
|
@ -324,8 +325,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token[0])
|
||||
end_token = StreamToken.START.copy_and_replace("room_key", token[1])
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token)
|
||||
end_token = StreamToken.START.copy_and_replace("room_key", stream_token)
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
|
@ -391,9 +392,10 @@ class InitialSyncHandler(BaseHandler):
|
|||
|
||||
presence, receipts, (messages, token) = yield defer.gatherResults(
|
||||
[
|
||||
preserve_fn(get_presence)(),
|
||||
preserve_fn(get_receipts)(),
|
||||
preserve_fn(self.store.get_recent_events_for_room)(
|
||||
run_in_background(get_presence),
|
||||
run_in_background(get_receipts),
|
||||
run_in_background(
|
||||
self.store.get_recent_events_for_room,
|
||||
room_id,
|
||||
limit=limit,
|
||||
end_token=now_token.room_key,
|
||||
|
@ -406,8 +408,8 @@ class InitialSyncHandler(BaseHandler):
|
|||
self.store, user_id, messages, is_peeking=is_peeking,
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token[0])
|
||||
end_token = now_token.copy_and_replace("room_key", token[1])
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
end_token = now_token
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
|
|
|
@ -13,11 +13,23 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import simplejson
|
||||
import sys
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
import six
|
||||
from six import string_types, itervalues, iteritems
|
||||
from twisted.internet import defer, reactor
|
||||
from twisted.internet.defer import succeed
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.api.errors import AuthError, Codes, SynapseError
|
||||
from synapse.api.constants import EventTypes, Membership, MAX_DEPTH
|
||||
from synapse.api.errors import (
|
||||
AuthError, Codes, SynapseError,
|
||||
ConsentNotGivenError,
|
||||
)
|
||||
from synapse.api.urls import ConsentURIBuilder
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events.utils import serialize_event
|
||||
from synapse.events.validator import EventValidator
|
||||
|
@ -25,21 +37,15 @@ from synapse.types import (
|
|||
UserID, RoomAlias, RoomStreamToken,
|
||||
)
|
||||
from synapse.util.async import run_on_reactor, ReadWriteLock, Limiter
|
||||
from synapse.util.logcontext import preserve_fn, run_in_background
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.util.metrics import measure_func
|
||||
from synapse.util.frozenutils import unfreeze
|
||||
from synapse.util.frozenutils import frozendict_json_encoder
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.visibility import filter_events_for_client
|
||||
from synapse.replication.http.send_event import send_event_to_master
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
import logging
|
||||
import random
|
||||
import ujson
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -86,14 +92,14 @@ class MessageHandler(BaseHandler):
|
|||
# map from purge id to PurgeStatus
|
||||
self._purges_by_id = {}
|
||||
|
||||
def start_purge_history(self, room_id, topological_ordering,
|
||||
def start_purge_history(self, room_id, token,
|
||||
delete_local_events=False):
|
||||
"""Start off a history purge on a room.
|
||||
|
||||
Args:
|
||||
room_id (str): The room to purge from
|
||||
|
||||
topological_ordering (int): minimum topo ordering to preserve
|
||||
token (str): topological token to delete events before
|
||||
delete_local_events (bool): True to delete local events as well as
|
||||
remote ones
|
||||
|
||||
|
@ -115,19 +121,19 @@ class MessageHandler(BaseHandler):
|
|||
self._purges_by_id[purge_id] = PurgeStatus()
|
||||
run_in_background(
|
||||
self._purge_history,
|
||||
purge_id, room_id, topological_ordering, delete_local_events,
|
||||
purge_id, room_id, token, delete_local_events,
|
||||
)
|
||||
return purge_id
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _purge_history(self, purge_id, room_id, topological_ordering,
|
||||
def _purge_history(self, purge_id, room_id, token,
|
||||
delete_local_events):
|
||||
"""Carry out a history purge on a room.
|
||||
|
||||
Args:
|
||||
purge_id (str): The id for this purge
|
||||
room_id (str): The room to purge from
|
||||
topological_ordering (int): minimum topo ordering to preserve
|
||||
token (str): topological token to delete events before
|
||||
delete_local_events (bool): True to delete local events as well as
|
||||
remote ones
|
||||
|
||||
|
@ -138,7 +144,7 @@ class MessageHandler(BaseHandler):
|
|||
try:
|
||||
with (yield self.pagination_lock.write(room_id)):
|
||||
yield self.store.purge_history(
|
||||
room_id, topological_ordering, delete_local_events,
|
||||
room_id, token, delete_local_events,
|
||||
)
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
|
@ -397,7 +403,7 @@ class MessageHandler(BaseHandler):
|
|||
"avatar_url": profile.avatar_url,
|
||||
"display_name": profile.display_name,
|
||||
}
|
||||
for user_id, profile in users_with_profile.iteritems()
|
||||
for user_id, profile in iteritems(users_with_profile)
|
||||
})
|
||||
|
||||
|
||||
|
@ -431,9 +437,12 @@ class EventCreationHandler(object):
|
|||
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
|
||||
if self.config.block_events_without_consent_error is not None:
|
||||
self._consent_uri_builder = ConsentURIBuilder(self.config)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_event(self, requester, event_dict, token_id=None, txn_id=None,
|
||||
prev_event_ids=None):
|
||||
prev_events_and_hashes=None):
|
||||
"""
|
||||
Given a dict from a client, create a new event.
|
||||
|
||||
|
@ -447,50 +456,136 @@ class EventCreationHandler(object):
|
|||
event_dict (dict): An entire event
|
||||
token_id (str)
|
||||
txn_id (str)
|
||||
prev_event_ids (list): The prev event ids to use when creating the event
|
||||
|
||||
prev_events_and_hashes (list[(str, dict[str, str], int)]|None):
|
||||
the forward extremities to use as the prev_events for the
|
||||
new event. For each event, a tuple of (event_id, hashes, depth)
|
||||
where *hashes* is a map from algorithm to hash.
|
||||
|
||||
If None, they will be requested from the database.
|
||||
|
||||
Returns:
|
||||
Tuple of created event (FrozenEvent), Context
|
||||
"""
|
||||
builder = self.event_builder_factory.new(event_dict)
|
||||
|
||||
with (yield self.limiter.queue(builder.room_id)):
|
||||
self.validator.validate_new(builder)
|
||||
self.validator.validate_new(builder)
|
||||
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
target = UserID.from_string(builder.state_key)
|
||||
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
if membership in {Membership.JOIN, Membership.INVITE}:
|
||||
# If event doesn't include a display name, add one.
|
||||
profile = self.profile_handler
|
||||
content = builder.content
|
||||
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
try:
|
||||
if "displayname" not in content:
|
||||
content["displayname"] = yield profile.get_displayname(target)
|
||||
if "avatar_url" not in content:
|
||||
content["avatar_url"] = yield profile.get_avatar_url(target)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
"Failed to get profile information for %r: %s",
|
||||
target, e
|
||||
)
|
||||
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
is_exempt = yield self._is_exempt_from_privacy_policy(builder)
|
||||
if not is_exempt:
|
||||
yield self.assert_accepted_privacy_policy(requester)
|
||||
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
if token_id is not None:
|
||||
builder.internal_metadata.token_id = token_id
|
||||
|
||||
event, context = yield self.create_new_client_event(
|
||||
builder=builder,
|
||||
requester=requester,
|
||||
prev_event_ids=prev_event_ids,
|
||||
)
|
||||
if txn_id is not None:
|
||||
builder.internal_metadata.txn_id = txn_id
|
||||
|
||||
event, context = yield self.create_new_client_event(
|
||||
builder=builder,
|
||||
requester=requester,
|
||||
prev_events_and_hashes=prev_events_and_hashes,
|
||||
)
|
||||
|
||||
defer.returnValue((event, context))
|
||||
|
||||
def _is_exempt_from_privacy_policy(self, builder):
|
||||
""""Determine if an event to be sent is exempt from having to consent
|
||||
to the privacy policy
|
||||
|
||||
Args:
|
||||
builder (synapse.events.builder.EventBuilder): event being created
|
||||
|
||||
Returns:
|
||||
Deferred[bool]: true if the event can be sent without the user
|
||||
consenting
|
||||
"""
|
||||
# the only thing the user can do is join the server notices room.
|
||||
if builder.type == EventTypes.Member:
|
||||
membership = builder.content.get("membership", None)
|
||||
if membership == Membership.JOIN:
|
||||
return self._is_server_notices_room(builder.room_id)
|
||||
return succeed(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_server_notices_room(self, room_id):
|
||||
if self.config.server_notices_mxid is None:
|
||||
defer.returnValue(False)
|
||||
user_ids = yield self.store.get_users_in_room(room_id)
|
||||
defer.returnValue(self.config.server_notices_mxid in user_ids)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def assert_accepted_privacy_policy(self, requester):
|
||||
"""Check if a user has accepted the privacy policy
|
||||
|
||||
Called when the given user is about to do something that requires
|
||||
privacy consent. We see if the user is exempt and otherwise check that
|
||||
they have given consent. If they have not, a ConsentNotGiven error is
|
||||
raised.
|
||||
|
||||
Args:
|
||||
requester (synapse.types.Requester):
|
||||
The user making the request
|
||||
|
||||
Returns:
|
||||
Deferred[None]: returns normally if the user has consented or is
|
||||
exempt
|
||||
|
||||
Raises:
|
||||
ConsentNotGivenError: if the user has not given consent yet
|
||||
"""
|
||||
if self.config.block_events_without_consent_error is None:
|
||||
return
|
||||
|
||||
# exempt AS users from needing consent
|
||||
if requester.app_service is not None:
|
||||
return
|
||||
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
# exempt the system notices user
|
||||
if (
|
||||
self.config.server_notices_mxid is not None and
|
||||
user_id == self.config.server_notices_mxid
|
||||
):
|
||||
return
|
||||
|
||||
u = yield self.store.get_user_by_id(user_id)
|
||||
assert u is not None
|
||||
if u["consent_version"] == self.config.user_consent_version:
|
||||
return
|
||||
|
||||
consent_uri = self._consent_uri_builder.build_user_consent_uri(
|
||||
requester.user.localpart,
|
||||
)
|
||||
msg = self.config.block_events_without_consent_error % {
|
||||
'consent_uri': consent_uri,
|
||||
}
|
||||
raise ConsentNotGivenError(
|
||||
msg=msg,
|
||||
consent_uri=consent_uri,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_nonmember_event(self, requester, event, context, ratelimit=True):
|
||||
"""
|
||||
|
@ -557,64 +652,80 @@ class EventCreationHandler(object):
|
|||
|
||||
See self.create_event and self.send_nonmember_event.
|
||||
"""
|
||||
event, context = yield self.create_event(
|
||||
requester,
|
||||
event_dict,
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
spam_error = self.spam_checker.check_event_for_spam(event)
|
||||
if spam_error:
|
||||
if not isinstance(spam_error, basestring):
|
||||
spam_error = "Spam is not permitted here"
|
||||
raise SynapseError(
|
||||
403, spam_error, Codes.FORBIDDEN
|
||||
# We limit the number of concurrent event sends in a room so that we
|
||||
# don't fork the DAG too much. If we don't limit then we can end up in
|
||||
# a situation where event persistence can't keep up, causing
|
||||
# extremities to pile up, which in turn leads to state resolution
|
||||
# taking longer.
|
||||
with (yield self.limiter.queue(event_dict["room_id"])):
|
||||
event, context = yield self.create_event(
|
||||
requester,
|
||||
event_dict,
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id
|
||||
)
|
||||
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
spam_error = self.spam_checker.check_event_for_spam(event)
|
||||
if spam_error:
|
||||
if not isinstance(spam_error, string_types):
|
||||
spam_error = "Spam is not permitted here"
|
||||
raise SynapseError(
|
||||
403, spam_error, Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
yield self.send_nonmember_event(
|
||||
requester,
|
||||
event,
|
||||
context,
|
||||
ratelimit=ratelimit,
|
||||
)
|
||||
defer.returnValue(event)
|
||||
|
||||
@measure_func("create_new_client_event")
|
||||
@defer.inlineCallbacks
|
||||
def create_new_client_event(self, builder, requester=None, prev_event_ids=None):
|
||||
if prev_event_ids:
|
||||
prev_events = yield self.store.add_event_hashes(prev_event_ids)
|
||||
prev_max_depth = yield self.store.get_max_depth_of_events(prev_event_ids)
|
||||
depth = prev_max_depth + 1
|
||||
else:
|
||||
latest_ret = yield self.store.get_latest_event_ids_and_hashes_in_room(
|
||||
builder.room_id,
|
||||
def create_new_client_event(self, builder, requester=None,
|
||||
prev_events_and_hashes=None):
|
||||
"""Create a new event for a local client
|
||||
|
||||
Args:
|
||||
builder (EventBuilder):
|
||||
|
||||
requester (synapse.types.Requester|None):
|
||||
|
||||
prev_events_and_hashes (list[(str, dict[str, str], int)]|None):
|
||||
the forward extremities to use as the prev_events for the
|
||||
new event. For each event, a tuple of (event_id, hashes, depth)
|
||||
where *hashes* is a map from algorithm to hash.
|
||||
|
||||
If None, they will be requested from the database.
|
||||
|
||||
Returns:
|
||||
Deferred[(synapse.events.EventBase, synapse.events.snapshot.EventContext)]
|
||||
"""
|
||||
|
||||
if prev_events_and_hashes is not None:
|
||||
assert len(prev_events_and_hashes) <= 10, \
|
||||
"Attempting to create an event with %i prev_events" % (
|
||||
len(prev_events_and_hashes),
|
||||
)
|
||||
else:
|
||||
prev_events_and_hashes = \
|
||||
yield self.store.get_prev_events_for_room(builder.room_id)
|
||||
|
||||
# We want to limit the max number of prev events we point to in our
|
||||
# new event
|
||||
if len(latest_ret) > 10:
|
||||
# Sort by reverse depth, so we point to the most recent.
|
||||
latest_ret.sort(key=lambda a: -a[2])
|
||||
new_latest_ret = latest_ret[:5]
|
||||
if prev_events_and_hashes:
|
||||
depth = max([d for _, _, d in prev_events_and_hashes]) + 1
|
||||
# we cap depth of generated events, to ensure that they are not
|
||||
# rejected by other servers (and so that they can be persisted in
|
||||
# the db)
|
||||
depth = min(depth, MAX_DEPTH)
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
# We also randomly point to some of the older events, to make
|
||||
# sure that we don't completely ignore the older events.
|
||||
if latest_ret[5:]:
|
||||
sample_size = min(5, len(latest_ret[5:]))
|
||||
new_latest_ret.extend(random.sample(latest_ret[5:], sample_size))
|
||||
latest_ret = new_latest_ret
|
||||
|
||||
if latest_ret:
|
||||
depth = max([d for _, _, d in latest_ret]) + 1
|
||||
else:
|
||||
depth = 1
|
||||
|
||||
prev_events = [
|
||||
(event_id, prev_hashes)
|
||||
for event_id, prev_hashes, _ in latest_ret
|
||||
]
|
||||
prev_events = [
|
||||
(event_id, prev_hashes)
|
||||
for event_id, prev_hashes, _ in prev_events_and_hashes
|
||||
]
|
||||
|
||||
builder.prev_events = prev_events
|
||||
builder.depth = depth
|
||||
|
@ -678,8 +789,8 @@ class EventCreationHandler(object):
|
|||
|
||||
# Ensure that we can round trip before trying to persist in db
|
||||
try:
|
||||
dump = ujson.dumps(unfreeze(event.content))
|
||||
ujson.loads(dump)
|
||||
dump = frozendict_json_encoder.encode(event.content)
|
||||
simplejson.loads(dump)
|
||||
except Exception:
|
||||
logger.exception("Failed to encode content: %r", event.content)
|
||||
raise
|
||||
|
@ -713,8 +824,14 @@ class EventCreationHandler(object):
|
|||
except: # noqa: E722, as we reraise the exception this is fine.
|
||||
# Ensure that we actually remove the entries in the push actions
|
||||
# staging area, if we calculated them.
|
||||
preserve_fn(self.store.remove_push_actions_from_staging)(event.event_id)
|
||||
raise
|
||||
tp, value, tb = sys.exc_info()
|
||||
|
||||
run_in_background(
|
||||
self.store.remove_push_actions_from_staging,
|
||||
event.event_id,
|
||||
)
|
||||
|
||||
six.reraise(tp, value, tb)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def persist_and_notify_client_event(
|
||||
|
@ -765,7 +882,7 @@ class EventCreationHandler(object):
|
|||
|
||||
state_to_include_ids = [
|
||||
e_id
|
||||
for k, e_id in context.current_state_ids.iteritems()
|
||||
for k, e_id in iteritems(context.current_state_ids)
|
||||
if k[0] in self.hs.config.room_invite_state_types
|
||||
or k == (EventTypes.Member, event.sender)
|
||||
]
|
||||
|
@ -779,7 +896,7 @@ class EventCreationHandler(object):
|
|||
"content": e.content,
|
||||
"sender": e.sender,
|
||||
}
|
||||
for e in state_to_include.itervalues()
|
||||
for e in itervalues(state_to_include)
|
||||
]
|
||||
|
||||
invitee = UserID.from_string(event.state_key)
|
||||
|
@ -834,22 +951,33 @@ class EventCreationHandler(object):
|
|||
|
||||
# this intentionally does not yield: we don't care about the result
|
||||
# and don't need to wait for it.
|
||||
preserve_fn(self.pusher_pool.on_new_notifications)(
|
||||
run_in_background(
|
||||
self.pusher_pool.on_new_notifications,
|
||||
event_stream_id, max_stream_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _notify():
|
||||
yield run_on_reactor()
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
try:
|
||||
self.notifier.on_new_room_event(
|
||||
event, event_stream_id, max_stream_id,
|
||||
extra_users=extra_users
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error notifying about new room event")
|
||||
|
||||
preserve_fn(_notify)()
|
||||
run_in_background(_notify)
|
||||
|
||||
if event.type == EventTypes.Message:
|
||||
presence = self.hs.get_presence_handler()
|
||||
# We don't want to block sending messages on any presence code. This
|
||||
# matters as sometimes presence code can take a while.
|
||||
preserve_fn(presence.bump_presence_active_time)(requester.user)
|
||||
run_in_background(self._bump_active_time, requester.user)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _bump_active_time(self, user):
|
||||
try:
|
||||
presence = self.hs.get_presence_handler()
|
||||
yield presence.bump_presence_active_time(user)
|
||||
except Exception:
|
||||
logger.exception("Error bumping presence active time")
|
||||
|
|
|
@ -25,38 +25,42 @@ The methods that define policy are:
|
|||
from twisted.internet import defer, reactor
|
||||
from contextlib import contextmanager
|
||||
|
||||
from six import itervalues, iteritems
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.constants import PresenceState
|
||||
from synapse.storage.presence import UserPresenceState
|
||||
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.util.async import Linearizer
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
import synapse.metrics
|
||||
from synapse.metrics import LaterGauge
|
||||
|
||||
import logging
|
||||
|
||||
from prometheus_client import Counter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
metrics = synapse.metrics.get_metrics_for(__name__)
|
||||
|
||||
notified_presence_counter = metrics.register_counter("notified_presence")
|
||||
federation_presence_out_counter = metrics.register_counter("federation_presence_out")
|
||||
presence_updates_counter = metrics.register_counter("presence_updates")
|
||||
timers_fired_counter = metrics.register_counter("timers_fired")
|
||||
federation_presence_counter = metrics.register_counter("federation_presence")
|
||||
bump_active_time_counter = metrics.register_counter("bump_active_time")
|
||||
notified_presence_counter = Counter("synapse_handler_presence_notified_presence", "")
|
||||
federation_presence_out_counter = Counter(
|
||||
"synapse_handler_presence_federation_presence_out", "")
|
||||
presence_updates_counter = Counter("synapse_handler_presence_presence_updates", "")
|
||||
timers_fired_counter = Counter("synapse_handler_presence_timers_fired", "")
|
||||
federation_presence_counter = Counter("synapse_handler_presence_federation_presence", "")
|
||||
bump_active_time_counter = Counter("synapse_handler_presence_bump_active_time", "")
|
||||
|
||||
get_updates_counter = metrics.register_counter("get_updates", labels=["type"])
|
||||
get_updates_counter = Counter("synapse_handler_presence_get_updates", "", ["type"])
|
||||
|
||||
notify_reason_counter = metrics.register_counter("notify_reason", labels=["reason"])
|
||||
state_transition_counter = metrics.register_counter(
|
||||
"state_transition", labels=["from", "to"]
|
||||
notify_reason_counter = Counter(
|
||||
"synapse_handler_presence_notify_reason", "", ["reason"])
|
||||
state_transition_counter = Counter(
|
||||
"synapse_handler_presence_state_transition", "", ["from", "to"]
|
||||
)
|
||||
|
||||
|
||||
|
@ -87,6 +91,11 @@ assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
|
|||
class PresenceHandler(object):
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self.is_mine = hs.is_mine
|
||||
self.is_mine_id = hs.is_mine_id
|
||||
self.clock = hs.get_clock()
|
||||
|
@ -94,7 +103,6 @@ class PresenceHandler(object):
|
|||
self.wheel_timer = WheelTimer()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.federation = hs.get_federation_sender()
|
||||
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
federation_registry = hs.get_federation_registry()
|
||||
|
@ -137,8 +145,9 @@ class PresenceHandler(object):
|
|||
for state in active_presence
|
||||
}
|
||||
|
||||
metrics.register_callback(
|
||||
"user_to_current_state_size", lambda: len(self.user_to_current_state)
|
||||
LaterGauge(
|
||||
"synapse_handlers_presence_user_to_current_state_size", "", [],
|
||||
lambda: len(self.user_to_current_state)
|
||||
)
|
||||
|
||||
now = self.clock.time_msec()
|
||||
|
@ -208,7 +217,8 @@ class PresenceHandler(object):
|
|||
60 * 1000,
|
||||
)
|
||||
|
||||
metrics.register_callback("wheel_timer_size", lambda: len(self.wheel_timer))
|
||||
LaterGauge("synapse_handlers_presence_wheel_timer_size", "", [],
|
||||
lambda: len(self.wheel_timer))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _on_shutdown(self):
|
||||
|
@ -254,6 +264,14 @@ class PresenceHandler(object):
|
|||
|
||||
logger.info("Finished _persist_unpersisted_changes")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_states_and_catch_exception(self, new_states):
|
||||
try:
|
||||
res = yield self._update_states(new_states)
|
||||
defer.returnValue(res)
|
||||
except Exception:
|
||||
logger.exception("Error updating presence")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _update_states(self, new_states):
|
||||
"""Updates presence of users. Sets the appropriate timeouts. Pokes
|
||||
|
@ -303,10 +321,10 @@ class PresenceHandler(object):
|
|||
|
||||
# TODO: We should probably ensure there are no races hereafter
|
||||
|
||||
presence_updates_counter.inc_by(len(new_states))
|
||||
presence_updates_counter.inc(len(new_states))
|
||||
|
||||
if to_notify:
|
||||
notified_presence_counter.inc_by(len(to_notify))
|
||||
notified_presence_counter.inc(len(to_notify))
|
||||
yield self._persist_and_notify(to_notify.values())
|
||||
|
||||
self.unpersisted_users_changes |= set(s.user_id for s in new_states)
|
||||
|
@ -317,7 +335,7 @@ class PresenceHandler(object):
|
|||
if user_id not in to_notify
|
||||
}
|
||||
if to_federation_ping:
|
||||
federation_presence_out_counter.inc_by(len(to_federation_ping))
|
||||
federation_presence_out_counter.inc(len(to_federation_ping))
|
||||
|
||||
self._push_to_remotes(to_federation_ping.values())
|
||||
|
||||
|
@ -355,7 +373,7 @@ class PresenceHandler(object):
|
|||
for user_id in users_to_check
|
||||
]
|
||||
|
||||
timers_fired_counter.inc_by(len(states))
|
||||
timers_fired_counter.inc(len(states))
|
||||
|
||||
changes = handle_timeouts(
|
||||
states,
|
||||
|
@ -364,7 +382,7 @@ class PresenceHandler(object):
|
|||
now=now,
|
||||
)
|
||||
|
||||
preserve_fn(self._update_states)(changes)
|
||||
run_in_background(self._update_states_and_catch_exception, changes)
|
||||
except Exception:
|
||||
logger.exception("Exception in _handle_timeouts loop")
|
||||
|
||||
|
@ -422,20 +440,23 @@ class PresenceHandler(object):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def _end():
|
||||
if affect_presence:
|
||||
try:
|
||||
self.user_to_num_current_syncs[user_id] -= 1
|
||||
|
||||
prev_state = yield self.current_state_for_user(user_id)
|
||||
yield self._update_states([prev_state.copy_and_replace(
|
||||
last_user_sync_ts=self.clock.time_msec(),
|
||||
)])
|
||||
except Exception:
|
||||
logger.exception("Error updating presence after sync")
|
||||
|
||||
@contextmanager
|
||||
def _user_syncing():
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
preserve_fn(_end)()
|
||||
if affect_presence:
|
||||
run_in_background(_end)
|
||||
|
||||
defer.returnValue(_user_syncing())
|
||||
|
||||
|
@ -452,61 +473,6 @@ class PresenceHandler(object):
|
|||
syncing_user_ids.update(user_ids)
|
||||
return syncing_user_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_external_syncs(self, process_id, syncing_user_ids):
|
||||
"""Update the syncing users for an external process
|
||||
|
||||
Args:
|
||||
process_id(str): An identifier for the process the users are
|
||||
syncing against. This allows synapse to process updates
|
||||
as user start and stop syncing against a given process.
|
||||
syncing_user_ids(set(str)): The set of user_ids that are
|
||||
currently syncing on that server.
|
||||
"""
|
||||
|
||||
# Grab the previous list of user_ids that were syncing on that process
|
||||
prev_syncing_user_ids = (
|
||||
self.external_process_to_current_syncs.get(process_id, set())
|
||||
)
|
||||
# Grab the current presence state for both the users that are syncing
|
||||
# now and the users that were syncing before this update.
|
||||
prev_states = yield self.current_state_for_users(
|
||||
syncing_user_ids | prev_syncing_user_ids
|
||||
)
|
||||
updates = []
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
# For each new user that is syncing check if we need to mark them as
|
||||
# being online.
|
||||
for new_user_id in syncing_user_ids - prev_syncing_user_ids:
|
||||
prev_state = prev_states[new_user_id]
|
||||
if prev_state.state == PresenceState.OFFLINE:
|
||||
updates.append(prev_state.copy_and_replace(
|
||||
state=PresenceState.ONLINE,
|
||||
last_active_ts=time_now_ms,
|
||||
last_user_sync_ts=time_now_ms,
|
||||
))
|
||||
else:
|
||||
updates.append(prev_state.copy_and_replace(
|
||||
last_user_sync_ts=time_now_ms,
|
||||
))
|
||||
|
||||
# For each user that is still syncing or stopped syncing update the
|
||||
# last sync time so that we will correctly apply the grace period when
|
||||
# they stop syncing.
|
||||
for old_user_id in prev_syncing_user_ids:
|
||||
prev_state = prev_states[old_user_id]
|
||||
updates.append(prev_state.copy_and_replace(
|
||||
last_user_sync_ts=time_now_ms,
|
||||
))
|
||||
|
||||
yield self._update_states(updates)
|
||||
|
||||
# Update the last updated time for the process. We expire the entries
|
||||
# if we don't receive an update in the given timeframe.
|
||||
self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
|
||||
self.external_process_to_current_syncs[process_id] = syncing_user_ids
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_external_syncs_row(self, process_id, user_id, is_syncing, sync_time_msec):
|
||||
"""Update the syncing users for an external process as a delta.
|
||||
|
@ -570,7 +536,7 @@ class PresenceHandler(object):
|
|||
prev_state.copy_and_replace(
|
||||
last_user_sync_ts=time_now_ms,
|
||||
)
|
||||
for prev_state in prev_states.itervalues()
|
||||
for prev_state in itervalues(prev_states)
|
||||
])
|
||||
self.external_process_last_updated_ms.pop(process_id, None)
|
||||
|
||||
|
@ -593,14 +559,14 @@ class PresenceHandler(object):
|
|||
for user_id in user_ids
|
||||
}
|
||||
|
||||
missing = [user_id for user_id, state in states.iteritems() if not state]
|
||||
missing = [user_id for user_id, state in iteritems(states) if not state]
|
||||
if missing:
|
||||
# There are things not in our in memory cache. Lets pull them out of
|
||||
# the database.
|
||||
res = yield self.store.get_presence_for_users(missing)
|
||||
states.update(res)
|
||||
|
||||
missing = [user_id for user_id, state in states.iteritems() if not state]
|
||||
missing = [user_id for user_id, state in iteritems(states) if not state]
|
||||
if missing:
|
||||
new = {
|
||||
user_id: UserPresenceState.default(user_id)
|
||||
|
@ -696,7 +662,7 @@ class PresenceHandler(object):
|
|||
updates.append(prev_state.copy_and_replace(**new_fields))
|
||||
|
||||
if updates:
|
||||
federation_presence_counter.inc_by(len(updates))
|
||||
federation_presence_counter.inc(len(updates))
|
||||
yield self._update_states(updates)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -971,28 +937,28 @@ def should_notify(old_state, new_state):
|
|||
return False
|
||||
|
||||
if old_state.status_msg != new_state.status_msg:
|
||||
notify_reason_counter.inc("status_msg_change")
|
||||
notify_reason_counter.labels("status_msg_change").inc()
|
||||
return True
|
||||
|
||||
if old_state.state != new_state.state:
|
||||
notify_reason_counter.inc("state_change")
|
||||
state_transition_counter.inc(old_state.state, new_state.state)
|
||||
notify_reason_counter.labels("state_change").inc()
|
||||
state_transition_counter.labels(old_state.state, new_state.state).inc()
|
||||
return True
|
||||
|
||||
if old_state.state == PresenceState.ONLINE:
|
||||
if new_state.currently_active != old_state.currently_active:
|
||||
notify_reason_counter.inc("current_active_change")
|
||||
notify_reason_counter.labels("current_active_change").inc()
|
||||
return True
|
||||
|
||||
if new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Only notify about last active bumps if we're not currently acive
|
||||
if not new_state.currently_active:
|
||||
notify_reason_counter.inc("last_active_change_online")
|
||||
notify_reason_counter.labels("last_active_change_online").inc()
|
||||
return True
|
||||
|
||||
elif new_state.last_active_ts - old_state.last_active_ts > LAST_ACTIVE_GRANULARITY:
|
||||
# Always notify for a transition where last active gets bumped.
|
||||
notify_reason_counter.inc("last_active_change_not_online")
|
||||
notify_reason_counter.labels("last_active_change_not_online").inc()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -1066,14 +1032,14 @@ class PresenceEventSource(object):
|
|||
if changed is not None and len(changed) < 500:
|
||||
# For small deltas, its quicker to get all changes and then
|
||||
# work out if we share a room or they're in our presence list
|
||||
get_updates_counter.inc("stream")
|
||||
get_updates_counter.labels("stream").inc()
|
||||
for other_user_id in changed:
|
||||
if other_user_id in users_interested_in:
|
||||
user_ids_changed.add(other_user_id)
|
||||
else:
|
||||
# Too many possible updates. Find all users we can see and check
|
||||
# if any of them have changed.
|
||||
get_updates_counter.inc("full")
|
||||
get_updates_counter.labels("full").inc()
|
||||
|
||||
if from_key:
|
||||
user_ids_changed = stream_change_cache.get_entities_changed(
|
||||
|
@ -1088,7 +1054,7 @@ class PresenceEventSource(object):
|
|||
defer.returnValue((updates.values(), max_token))
|
||||
else:
|
||||
defer.returnValue(([
|
||||
s for s in updates.itervalues()
|
||||
s for s in itervalues(updates)
|
||||
if s.state != PresenceState.OFFLINE
|
||||
], max_token))
|
||||
|
||||
|
@ -1345,11 +1311,11 @@ def get_interested_remotes(store, states, state_handler):
|
|||
# hosts in those rooms.
|
||||
room_ids_to_states, users_to_states = yield get_interested_parties(store, states)
|
||||
|
||||
for room_id, states in room_ids_to_states.iteritems():
|
||||
for room_id, states in iteritems(room_ids_to_states):
|
||||
hosts = yield state_handler.get_current_hosts_in_room(room_id)
|
||||
hosts_and_states.append((hosts, states))
|
||||
|
||||
for user_id, states in users_to_states.iteritems():
|
||||
for user_id, states in iteritems(users_to_states):
|
||||
host = get_domain_from_id(user_id)
|
||||
hosts_and_states.append(([host], states))
|
||||
|
||||
|
|
|
@ -135,37 +135,40 @@ class ReceiptsHandler(BaseHandler):
|
|||
"""Given a list of receipts, works out which remote servers should be
|
||||
poked and pokes them.
|
||||
"""
|
||||
# TODO: Some of this stuff should be coallesced.
|
||||
for receipt in receipts:
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
try:
|
||||
# TODO: Some of this stuff should be coallesced.
|
||||
for receipt in receipts:
|
||||
room_id = receipt["room_id"]
|
||||
receipt_type = receipt["receipt_type"]
|
||||
user_id = receipt["user_id"]
|
||||
event_ids = receipt["event_ids"]
|
||||
data = receipt["data"]
|
||||
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
users = yield self.state.get_current_user_in_room(room_id)
|
||||
remotedomains = set(get_domain_from_id(u) for u in users)
|
||||
remotedomains = remotedomains.copy()
|
||||
remotedomains.discard(self.server_name)
|
||||
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
logger.debug("Sending receipt to: %r", remotedomains)
|
||||
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
for domain in remotedomains:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.receipt",
|
||||
content={
|
||||
room_id: {
|
||||
receipt_type: {
|
||||
user_id: {
|
||||
"event_ids": event_ids,
|
||||
"data": data,
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
key=(room_id, receipt_type, user_id),
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error pushing receipts to remote servers")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_receipts_for_room(self, room_id, to_key):
|
||||
|
|
|
@ -23,8 +23,8 @@ from synapse.api.errors import (
|
|||
)
|
||||
from synapse.http.client import CaptchaServerHttpClient
|
||||
from synapse import types
|
||||
from synapse.types import UserID
|
||||
from synapse.util.async import run_on_reactor
|
||||
from synapse.types import UserID, create_requester, RoomID, RoomAlias
|
||||
from synapse.util.async import run_on_reactor, Linearizer
|
||||
from synapse.util.threepids import check_3pid_allowed
|
||||
from ._base import BaseHandler
|
||||
|
||||
|
@ -34,6 +34,11 @@ logger = logging.getLogger(__name__)
|
|||
class RegistrationHandler(BaseHandler):
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
super(RegistrationHandler, self).__init__(hs)
|
||||
|
||||
self.auth = hs.get_auth()
|
||||
|
@ -46,6 +51,11 @@ class RegistrationHandler(BaseHandler):
|
|||
|
||||
self.macaroon_gen = hs.get_macaroon_generator()
|
||||
|
||||
self._generate_user_id_linearizer = Linearizer(
|
||||
name="_generate_user_id_linearizer",
|
||||
)
|
||||
self._server_notices_mxid = hs.config.server_notices_mxid
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def check_username(self, localpart, guest_access_token=None,
|
||||
assigned_user_id=None):
|
||||
|
@ -201,10 +211,17 @@ class RegistrationHandler(BaseHandler):
|
|||
token = None
|
||||
attempts += 1
|
||||
|
||||
# auto-join the user to any rooms we're supposed to dump them into
|
||||
fake_requester = create_requester(user_id)
|
||||
for r in self.hs.config.auto_join_rooms:
|
||||
try:
|
||||
yield self._join_user_to_room(fake_requester, r)
|
||||
except Exception as e:
|
||||
logger.error("Failed to join new user to %r: %r", r, e)
|
||||
|
||||
# We used to generate default identicons here, but nowadays
|
||||
# we want clients to generate their own as part of their branding
|
||||
# rather than there being consistent matrix-wide ones, so we don't.
|
||||
|
||||
defer.returnValue((user_id, token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -327,6 +344,14 @@ class RegistrationHandler(BaseHandler):
|
|||
yield identity_handler.bind_threepid(c, user_id)
|
||||
|
||||
def check_user_id_not_appservice_exclusive(self, user_id, allowed_appservice=None):
|
||||
# don't allow people to register the server notices mxid
|
||||
if self._server_notices_mxid is not None:
|
||||
if user_id == self._server_notices_mxid:
|
||||
raise SynapseError(
|
||||
400, "This user ID is reserved.",
|
||||
errcode=Codes.EXCLUSIVE
|
||||
)
|
||||
|
||||
# valid user IDs must not clash with any user ID namespaces claimed by
|
||||
# application services.
|
||||
services = self.store.get_app_services()
|
||||
|
@ -345,9 +370,11 @@ class RegistrationHandler(BaseHandler):
|
|||
@defer.inlineCallbacks
|
||||
def _generate_user_id(self, reseed=False):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
self._next_generated_user_id = (
|
||||
yield self.store.find_next_generated_user_id_localpart()
|
||||
)
|
||||
with (yield self._generate_user_id_linearizer.queue(())):
|
||||
if reseed or self._next_generated_user_id is None:
|
||||
self._next_generated_user_id = (
|
||||
yield self.store.find_next_generated_user_id_localpart()
|
||||
)
|
||||
|
||||
id = self._next_generated_user_id
|
||||
self._next_generated_user_id += 1
|
||||
|
@ -477,3 +504,28 @@ class RegistrationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
defer.returnValue((user_id, access_token))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _join_user_to_room(self, requester, room_identifier):
|
||||
room_id = None
|
||||
room_member_handler = self.hs.get_room_member_handler()
|
||||
if RoomID.is_valid(room_identifier):
|
||||
room_id = room_identifier
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
room_id, remote_room_hosts = (
|
||||
yield room_member_handler.lookup_room_alias(room_alias)
|
||||
)
|
||||
room_id = room_id.to_string()
|
||||
else:
|
||||
raise SynapseError(400, "%s was not legal room ID or room alias" % (
|
||||
room_identifier,
|
||||
))
|
||||
|
||||
yield room_member_handler.update_membership(
|
||||
requester=requester,
|
||||
target=requester.user,
|
||||
room_id=room_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
action="join",
|
||||
)
|
||||
|
|
|
@ -68,14 +68,27 @@ class RoomCreationHandler(BaseHandler):
|
|||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_room(self, requester, config, ratelimit=True):
|
||||
def create_room(self, requester, config, ratelimit=True,
|
||||
creator_join_profile=None):
|
||||
""" Creates a new room.
|
||||
|
||||
Args:
|
||||
requester (Requester): The user who requested the room creation.
|
||||
requester (synapse.types.Requester):
|
||||
The user who requested the room creation.
|
||||
config (dict) : A dict of configuration options.
|
||||
ratelimit (bool): set to False to disable the rate limiter
|
||||
|
||||
creator_join_profile (dict|None):
|
||||
Set to override the displayname and avatar for the creating
|
||||
user in this room. If unset, displayname and avatar will be
|
||||
derived from the user's profile. If set, should contain the
|
||||
values to go in the body of the 'join' event (typically
|
||||
`avatar_url` and/or `displayname`.
|
||||
|
||||
Returns:
|
||||
The new room ID.
|
||||
Deferred[dict]:
|
||||
a dict containing the keys `room_id` and, if an alias was
|
||||
requested, `room_alias`.
|
||||
Raises:
|
||||
SynapseError if the room ID couldn't be stored, or something went
|
||||
horribly wrong.
|
||||
|
@ -113,6 +126,10 @@ class RoomCreationHandler(BaseHandler):
|
|||
except Exception:
|
||||
raise SynapseError(400, "Invalid user_id: %s" % (i,))
|
||||
|
||||
yield self.event_creation_handler.assert_accepted_privacy_policy(
|
||||
requester,
|
||||
)
|
||||
|
||||
invite_3pid_list = config.get("invite_3pid", [])
|
||||
|
||||
visibility = config.get("visibility", None)
|
||||
|
@ -176,7 +193,8 @@ class RoomCreationHandler(BaseHandler):
|
|||
initial_state=initial_state,
|
||||
creation_content=creation_content,
|
||||
room_alias=room_alias,
|
||||
power_level_content_override=config.get("power_level_content_override", {})
|
||||
power_level_content_override=config.get("power_level_content_override", {}),
|
||||
creator_join_profile=creator_join_profile,
|
||||
)
|
||||
|
||||
if "name" in config:
|
||||
|
@ -256,6 +274,7 @@ class RoomCreationHandler(BaseHandler):
|
|||
creation_content,
|
||||
room_alias,
|
||||
power_level_content_override,
|
||||
creator_join_profile,
|
||||
):
|
||||
def create(etype, content, **kwargs):
|
||||
e = {
|
||||
|
@ -299,6 +318,7 @@ class RoomCreationHandler(BaseHandler):
|
|||
room_id,
|
||||
"join",
|
||||
ratelimit=False,
|
||||
content=creator_join_profile,
|
||||
)
|
||||
|
||||
# We treat the power levels override specially as this needs to be one
|
||||
|
|
|
@ -15,12 +15,13 @@
|
|||
|
||||
from twisted.internet import defer
|
||||
|
||||
from six.moves import range
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
||||
from synapse.api.constants import (
|
||||
EventTypes, JoinRules,
|
||||
)
|
||||
from synapse.util.logcontext import make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.caches.descriptors import cachedInlineCallbacks
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
|
@ -44,8 +45,9 @@ EMTPY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None)
|
|||
class RoomListHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(RoomListHandler, self).__init__(hs)
|
||||
self.response_cache = ResponseCache(hs)
|
||||
self.remote_response_cache = ResponseCache(hs, timeout_ms=30 * 1000)
|
||||
self.response_cache = ResponseCache(hs, "room_list")
|
||||
self.remote_response_cache = ResponseCache(hs, "remote_room_list",
|
||||
timeout_ms=30 * 1000)
|
||||
|
||||
def get_local_public_room_list(self, limit=None, since_token=None,
|
||||
search_filter=None,
|
||||
|
@ -77,18 +79,11 @@ class RoomListHandler(BaseHandler):
|
|||
)
|
||||
|
||||
key = (limit, since_token, network_tuple)
|
||||
result = self.response_cache.get(key)
|
||||
if not result:
|
||||
logger.info("No cached result, calculating one.")
|
||||
result = self.response_cache.set(
|
||||
key,
|
||||
preserve_fn(self._get_public_room_list)(
|
||||
limit, since_token, network_tuple=network_tuple
|
||||
)
|
||||
)
|
||||
else:
|
||||
logger.info("Using cached deferred result.")
|
||||
return make_deferred_yieldable(result)
|
||||
return self.response_cache.wrap(
|
||||
key,
|
||||
self._get_public_room_list,
|
||||
limit, since_token, network_tuple=network_tuple,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _get_public_room_list(self, limit=None, since_token=None,
|
||||
|
@ -207,7 +202,7 @@ class RoomListHandler(BaseHandler):
|
|||
step = len(rooms_to_scan) if len(rooms_to_scan) != 0 else 1
|
||||
|
||||
chunk = []
|
||||
for i in xrange(0, len(rooms_to_scan), step):
|
||||
for i in range(0, len(rooms_to_scan), step):
|
||||
batch = rooms_to_scan[i:i + step]
|
||||
logger.info("Processing %i rooms for result", len(batch))
|
||||
yield concurrently_execute(
|
||||
|
@ -422,18 +417,14 @@ class RoomListHandler(BaseHandler):
|
|||
server_name, limit, since_token, include_all_networks,
|
||||
third_party_instance_id,
|
||||
)
|
||||
result = self.remote_response_cache.get(key)
|
||||
if not result:
|
||||
result = self.remote_response_cache.set(
|
||||
key,
|
||||
repl_layer.get_public_rooms(
|
||||
server_name, limit=limit, since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
)
|
||||
return result
|
||||
return self.remote_response_cache.wrap(
|
||||
key,
|
||||
repl_layer.get_public_rooms,
|
||||
server_name, limit=limit, since_token=since_token,
|
||||
search_filter=search_filter,
|
||||
include_all_networks=include_all_networks,
|
||||
third_party_instance_id=third_party_instance_id,
|
||||
)
|
||||
|
||||
|
||||
class RoomListNextBatch(namedtuple("RoomListNextBatch", (
|
||||
|
|
|
@ -17,11 +17,14 @@
|
|||
import abc
|
||||
import logging
|
||||
|
||||
from six.moves import http_client
|
||||
|
||||
from signedjson.key import decode_verify_key_bytes
|
||||
from signedjson.sign import verify_signed_json
|
||||
from twisted.internet import defer
|
||||
from unpaddedbase64 import decode_base64
|
||||
|
||||
import synapse.server
|
||||
import synapse.types
|
||||
from synapse.api.constants import (
|
||||
EventTypes, Membership,
|
||||
|
@ -46,6 +49,11 @@ class RoomMemberHandler(object):
|
|||
__metaclass__ = abc.ABCMeta
|
||||
|
||||
def __init__(self, hs):
|
||||
"""
|
||||
|
||||
Args:
|
||||
hs (synapse.server.HomeServer):
|
||||
"""
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.auth = hs.get_auth()
|
||||
|
@ -63,6 +71,7 @@ class RoomMemberHandler(object):
|
|||
|
||||
self.clock = hs.get_clock()
|
||||
self.spam_checker = hs.get_spam_checker()
|
||||
self._server_notices_mxid = self.config.server_notices_mxid
|
||||
|
||||
@abc.abstractmethod
|
||||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
|
@ -149,7 +158,7 @@ class RoomMemberHandler(object):
|
|||
@defer.inlineCallbacks
|
||||
def _local_membership_update(
|
||||
self, requester, target, room_id, membership,
|
||||
prev_event_ids,
|
||||
prev_events_and_hashes,
|
||||
txn_id=None,
|
||||
ratelimit=True,
|
||||
content=None,
|
||||
|
@ -175,7 +184,7 @@ class RoomMemberHandler(object):
|
|||
},
|
||||
token_id=requester.access_token_id,
|
||||
txn_id=txn_id,
|
||||
prev_event_ids=prev_event_ids,
|
||||
prev_events_and_hashes=prev_events_and_hashes,
|
||||
)
|
||||
|
||||
# Check if this event matches the previous membership event for the user.
|
||||
|
@ -290,11 +299,26 @@ class RoomMemberHandler(object):
|
|||
if is_blocked:
|
||||
raise SynapseError(403, "This room has been blocked on this server")
|
||||
|
||||
if effective_membership_state == "invite":
|
||||
if effective_membership_state == Membership.INVITE:
|
||||
# block any attempts to invite the server notices mxid
|
||||
if target.to_string() == self._server_notices_mxid:
|
||||
raise SynapseError(
|
||||
http_client.FORBIDDEN,
|
||||
"Cannot invite this user",
|
||||
)
|
||||
|
||||
block_invite = False
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
if (self._server_notices_mxid is not None and
|
||||
requester.user.to_string() == self._server_notices_mxid):
|
||||
# allow the server notices mxid to send invites
|
||||
is_requester_admin = True
|
||||
|
||||
else:
|
||||
is_requester_admin = yield self.auth.is_server_admin(
|
||||
requester.user,
|
||||
)
|
||||
|
||||
if not is_requester_admin:
|
||||
if self.config.block_non_admin_invites:
|
||||
logger.info(
|
||||
|
@ -314,7 +338,12 @@ class RoomMemberHandler(object):
|
|||
403, "Invites have been disabled on this server",
|
||||
)
|
||||
|
||||
latest_event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||
prev_events_and_hashes = yield self.store.get_prev_events_for_room(
|
||||
room_id,
|
||||
)
|
||||
latest_event_ids = (
|
||||
event_id for (event_id, _, _) in prev_events_and_hashes
|
||||
)
|
||||
current_state_ids = yield self.state_handler.get_current_state_ids(
|
||||
room_id, latest_event_ids=latest_event_ids,
|
||||
)
|
||||
|
@ -344,6 +373,20 @@ class RoomMemberHandler(object):
|
|||
if same_sender and same_membership and same_content:
|
||||
defer.returnValue(old_state)
|
||||
|
||||
# we don't allow people to reject invites to the server notice
|
||||
# room, but they can leave it once they are joined.
|
||||
if (
|
||||
old_membership == Membership.INVITE and
|
||||
effective_membership_state == Membership.LEAVE
|
||||
):
|
||||
is_blocked = yield self._is_server_notice_room(room_id)
|
||||
if is_blocked:
|
||||
raise SynapseError(
|
||||
http_client.FORBIDDEN,
|
||||
"You cannot reject this invite",
|
||||
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
|
||||
)
|
||||
|
||||
is_host_in_room = yield self._is_host_in_room(current_state_ids)
|
||||
|
||||
if effective_membership_state == Membership.JOIN:
|
||||
|
@ -403,7 +446,7 @@ class RoomMemberHandler(object):
|
|||
membership=effective_membership_state,
|
||||
txn_id=txn_id,
|
||||
ratelimit=ratelimit,
|
||||
prev_event_ids=latest_event_ids,
|
||||
prev_events_and_hashes=prev_events_and_hashes,
|
||||
content=content,
|
||||
)
|
||||
defer.returnValue(res)
|
||||
|
@ -839,6 +882,13 @@ class RoomMemberHandler(object):
|
|||
|
||||
defer.returnValue(False)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _is_server_notice_room(self, room_id):
|
||||
if self._server_notices_mxid is None:
|
||||
defer.returnValue(False)
|
||||
user_ids = yield self.store.get_users_in_room(room_id)
|
||||
defer.returnValue(self._server_notices_mxid in user_ids)
|
||||
|
||||
|
||||
class RoomMemberMasterHandler(RoomMemberHandler):
|
||||
def __init__(self, hs):
|
||||
|
@ -852,6 +902,14 @@ class RoomMemberMasterHandler(RoomMemberHandler):
|
|||
def _remote_join(self, requester, remote_room_hosts, room_id, user, content):
|
||||
"""Implements RoomMemberHandler._remote_join
|
||||
"""
|
||||
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
|
||||
# and if it is the only entry we'd like to return a 404 rather than a
|
||||
# 500.
|
||||
|
||||
remote_room_hosts = [
|
||||
host for host in remote_room_hosts if host != self.hs.hostname
|
||||
]
|
||||
|
||||
if len(remote_room_hosts) == 0:
|
||||
raise SynapseError(404, "No known servers")
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
from synapse.api.constants import Membership, EventTypes
|
||||
from synapse.util.async import concurrently_execute
|
||||
from synapse.util.logcontext import LoggingContext, make_deferred_yieldable, preserve_fn
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
from synapse.util.metrics import Measure, measure_func
|
||||
from synapse.util.caches.response_cache import ResponseCache
|
||||
from synapse.push.clientformat import format_push_rules_for_user
|
||||
|
@ -29,6 +29,8 @@ import collections
|
|||
import logging
|
||||
import itertools
|
||||
|
||||
from six import itervalues, iteritems
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -53,6 +55,7 @@ class TimelineBatch(collections.namedtuple("TimelineBatch", [
|
|||
to tell if room needs to be part of the sync result.
|
||||
"""
|
||||
return bool(self.events)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
|
||||
|
@ -77,6 +80,7 @@ class JoinedSyncResult(collections.namedtuple("JoinedSyncResult", [
|
|||
# nb the notification count does not, er, count: if there's nothing
|
||||
# else in the result, we don't need to send it.
|
||||
)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [
|
||||
|
@ -96,6 +100,7 @@ class ArchivedSyncResult(collections.namedtuple("ArchivedSyncResult", [
|
|||
or self.state
|
||||
or self.account_data
|
||||
)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
|
||||
|
@ -107,6 +112,7 @@ class InvitedSyncResult(collections.namedtuple("InvitedSyncResult", [
|
|||
def __nonzero__(self):
|
||||
"""Invited rooms should always be reported to the client"""
|
||||
return True
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [
|
||||
|
@ -118,6 +124,7 @@ class GroupsSyncResult(collections.namedtuple("GroupsSyncResult", [
|
|||
|
||||
def __nonzero__(self):
|
||||
return bool(self.join or self.invite or self.leave)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class DeviceLists(collections.namedtuple("DeviceLists", [
|
||||
|
@ -128,6 +135,7 @@ class DeviceLists(collections.namedtuple("DeviceLists", [
|
|||
|
||||
def __nonzero__(self):
|
||||
return bool(self.changed or self.left)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class SyncResult(collections.namedtuple("SyncResult", [
|
||||
|
@ -160,6 +168,7 @@ class SyncResult(collections.namedtuple("SyncResult", [
|
|||
self.device_lists or
|
||||
self.groups
|
||||
)
|
||||
__bool__ = __nonzero__ # python3
|
||||
|
||||
|
||||
class SyncHandler(object):
|
||||
|
@ -170,7 +179,7 @@ class SyncHandler(object):
|
|||
self.presence_handler = hs.get_presence_handler()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.clock = hs.get_clock()
|
||||
self.response_cache = ResponseCache(hs)
|
||||
self.response_cache = ResponseCache(hs, "sync")
|
||||
self.state = hs.get_state_handler()
|
||||
|
||||
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
|
||||
|
@ -181,15 +190,11 @@ class SyncHandler(object):
|
|||
Returns:
|
||||
A Deferred SyncResult.
|
||||
"""
|
||||
result = self.response_cache.get(sync_config.request_key)
|
||||
if not result:
|
||||
result = self.response_cache.set(
|
||||
sync_config.request_key,
|
||||
preserve_fn(self._wait_for_sync_for_user)(
|
||||
sync_config, since_token, timeout, full_state
|
||||
)
|
||||
)
|
||||
return make_deferred_yieldable(result)
|
||||
return self.response_cache.wrap(
|
||||
sync_config.request_key,
|
||||
self._wait_for_sync_for_user,
|
||||
sync_config, since_token, timeout, full_state,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
|
||||
|
@ -273,7 +278,7 @@ class SyncHandler(object):
|
|||
# result returned by the event source is poor form (it might cache
|
||||
# the object)
|
||||
room_id = event["room_id"]
|
||||
event_copy = {k: v for (k, v) in event.iteritems()
|
||||
event_copy = {k: v for (k, v) in iteritems(event)
|
||||
if k != "room_id"}
|
||||
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
|
||||
|
||||
|
@ -292,7 +297,7 @@ class SyncHandler(object):
|
|||
for event in receipts:
|
||||
room_id = event["room_id"]
|
||||
# exclude room id, as above
|
||||
event_copy = {k: v for (k, v) in event.iteritems()
|
||||
event_copy = {k: v for (k, v) in iteritems(event)
|
||||
if k != "room_id"}
|
||||
ephemeral_by_room.setdefault(room_id, []).append(event_copy)
|
||||
|
||||
|
@ -323,7 +328,7 @@ class SyncHandler(object):
|
|||
current_state_ids = frozenset()
|
||||
if any(e.is_state() for e in recents):
|
||||
current_state_ids = yield self.state.get_current_state_ids(room_id)
|
||||
current_state_ids = frozenset(current_state_ids.itervalues())
|
||||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
|
@ -352,12 +357,24 @@ class SyncHandler(object):
|
|||
since_key = since_token.room_key
|
||||
|
||||
while limited and len(recents) < timeline_limit and max_repeat:
|
||||
events, end_key = yield self.store.get_room_events_stream_for_room(
|
||||
room_id,
|
||||
limit=load_limit + 1,
|
||||
from_key=since_key,
|
||||
to_key=end_key,
|
||||
)
|
||||
# If we have a since_key then we are trying to get any events
|
||||
# that have happened since `since_key` up to `end_key`, so we
|
||||
# can just use `get_room_events_stream_for_room`.
|
||||
# Otherwise, we want to return the last N events in the room
|
||||
# in toplogical ordering.
|
||||
if since_key:
|
||||
events, end_key = yield self.store.get_room_events_stream_for_room(
|
||||
room_id,
|
||||
limit=load_limit + 1,
|
||||
from_key=since_key,
|
||||
to_key=end_key,
|
||||
)
|
||||
else:
|
||||
events, end_key = yield self.store.get_recent_events_for_room(
|
||||
room_id,
|
||||
limit=load_limit + 1,
|
||||
end_token=end_key,
|
||||
)
|
||||
loaded_recents = sync_config.filter_collection.filter_room_timeline(
|
||||
events
|
||||
)
|
||||
|
@ -368,7 +385,7 @@ class SyncHandler(object):
|
|||
current_state_ids = frozenset()
|
||||
if any(e.is_state() for e in loaded_recents):
|
||||
current_state_ids = yield self.state.get_current_state_ids(room_id)
|
||||
current_state_ids = frozenset(current_state_ids.itervalues())
|
||||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
loaded_recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
|
@ -427,7 +444,7 @@ class SyncHandler(object):
|
|||
Returns:
|
||||
A Deferred map from ((type, state_key)->Event)
|
||||
"""
|
||||
last_events, token = yield self.store.get_recent_events_for_room(
|
||||
last_events, _ = yield self.store.get_recent_events_for_room(
|
||||
room_id, end_token=stream_position.room_key, limit=1,
|
||||
)
|
||||
|
||||
|
@ -1026,7 +1043,7 @@ class SyncHandler(object):
|
|||
if since_token:
|
||||
for joined_sync in sync_result_builder.joined:
|
||||
it = itertools.chain(
|
||||
joined_sync.timeline.events, joined_sync.state.itervalues()
|
||||
joined_sync.timeline.events, itervalues(joined_sync.state)
|
||||
)
|
||||
for event in it:
|
||||
if event.type == EventTypes.Member:
|
||||
|
@ -1104,7 +1121,7 @@ class SyncHandler(object):
|
|||
newly_left_rooms = []
|
||||
room_entries = []
|
||||
invited = []
|
||||
for room_id, events in mem_change_events_by_room_id.iteritems():
|
||||
for room_id, events in iteritems(mem_change_events_by_room_id):
|
||||
non_joins = [e for e in events if e.membership != Membership.JOIN]
|
||||
has_join = len(non_joins) != len(events)
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import SynapseError, AuthError
|
||||
from synapse.util.logcontext import preserve_fn
|
||||
from synapse.util.logcontext import run_in_background
|
||||
from synapse.util.metrics import Measure
|
||||
from synapse.util.wheel_timer import WheelTimer
|
||||
from synapse.types import UserID, get_domain_from_id
|
||||
|
@ -97,7 +97,8 @@ class TypingHandler(object):
|
|||
if self.hs.is_mine_id(member.user_id):
|
||||
last_fed_poke = self._member_last_federation_poke.get(member, None)
|
||||
if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now:
|
||||
preserve_fn(self._push_remote)(
|
||||
run_in_background(
|
||||
self._push_remote,
|
||||
member=member,
|
||||
typing=True
|
||||
)
|
||||
|
@ -196,7 +197,7 @@ class TypingHandler(object):
|
|||
def _push_update(self, member, typing):
|
||||
if self.hs.is_mine_id(member.user_id):
|
||||
# Only send updates for changes to our own users.
|
||||
preserve_fn(self._push_remote)(member, typing)
|
||||
run_in_background(self._push_remote, member, typing)
|
||||
|
||||
self._push_update_local(
|
||||
member=member,
|
||||
|
@ -205,28 +206,31 @@ class TypingHandler(object):
|
|||
|
||||
@defer.inlineCallbacks
|
||||
def _push_remote(self, member, typing):
|
||||
users = yield self.state.get_current_user_in_room(member.room_id)
|
||||
self._member_last_federation_poke[member] = self.clock.time_msec()
|
||||
try:
|
||||
users = yield self.state.get_current_user_in_room(member.room_id)
|
||||
self._member_last_federation_poke[member] = self.clock.time_msec()
|
||||
|
||||
now = self.clock.time_msec()
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
now = self.clock.time_msec()
|
||||
self.wheel_timer.insert(
|
||||
now=now,
|
||||
obj=member,
|
||||
then=now + FEDERATION_PING_INTERVAL,
|
||||
)
|
||||
|
||||
for domain in set(get_domain_from_id(u) for u in users):
|
||||
if domain != self.server_name:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.typing",
|
||||
content={
|
||||
"room_id": member.room_id,
|
||||
"user_id": member.user_id,
|
||||
"typing": typing,
|
||||
},
|
||||
key=member,
|
||||
)
|
||||
for domain in set(get_domain_from_id(u) for u in users):
|
||||
if domain != self.server_name:
|
||||
self.federation.send_edu(
|
||||
destination=domain,
|
||||
edu_type="m.typing",
|
||||
content={
|
||||
"room_id": member.room_id,
|
||||
"user_id": member.user_id,
|
||||
"typing": typing,
|
||||
},
|
||||
key=member,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("Error pushing typing notif to remotes")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _recv_edu(self, origin, content):
|
||||
|
|
|
@ -22,6 +22,7 @@ from synapse.util.metrics import Measure
|
|||
from synapse.util.async import sleep
|
||||
from synapse.types import get_localpart_from_id
|
||||
|
||||
from six import iteritems
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -122,6 +123,13 @@ class UserDirectoryHandler(object):
|
|||
user_id, profile.display_name, profile.avatar_url, None,
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def handle_user_deactivated(self, user_id):
|
||||
"""Called when a user ID is deactivated
|
||||
"""
|
||||
yield self.store.remove_from_user_dir(user_id)
|
||||
yield self.store.remove_from_user_in_public_room(user_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def _unsafe_process(self):
|
||||
# If self.pos is None then means we haven't fetched it from DB
|
||||
|
@ -403,7 +411,7 @@ class UserDirectoryHandler(object):
|
|||
|
||||
if change:
|
||||
users_with_profile = yield self.state.get_current_user_in_room(room_id)
|
||||
for user_id, profile in users_with_profile.iteritems():
|
||||
for user_id, profile in iteritems(users_with_profile):
|
||||
yield self._handle_new_user(room_id, user_id, profile)
|
||||
else:
|
||||
users = yield self.store.get_users_in_public_due_to_room(room_id)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2014-2016 OpenMarket Ltd
|
||||
# Copyright 2018 New Vector Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -12,3 +13,24 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from twisted.internet.defer import CancelledError
|
||||
from twisted.python import failure
|
||||
|
||||
from synapse.api.errors import SynapseError
|
||||
|
||||
|
||||
class RequestTimedOutError(SynapseError):
|
||||
"""Exception representing timeout of an outbound request"""
|
||||
def __init__(self):
|
||||
super(RequestTimedOutError, self).__init__(504, "Timed out")
|
||||
|
||||
|
||||
def cancelled_to_request_timed_out_error(value, timeout):
|
||||
"""Turns CancelledErrors into RequestTimedOutErrors.
|
||||
|
||||
For use with async.add_timeout_to_deferred
|
||||
"""
|
||||
if isinstance(value, failure.Failure):
|
||||
value.trap(CancelledError)
|
||||
raise RequestTimedOutError()
|
||||
return value
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from synapse.http.server import wrap_request_handler
|
||||
from synapse.http.server import wrap_json_request_handler
|
||||
from twisted.web.resource import Resource
|
||||
from twisted.web.server import NOT_DONE_YET
|
||||
|
||||
|
@ -42,14 +42,13 @@ class AdditionalResource(Resource):
|
|||
Resource.__init__(self)
|
||||
self._handler = handler
|
||||
|
||||
# these are required by the request_handler wrapper
|
||||
self.version_string = hs.version_string
|
||||
# required by the request_handler wrapper
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
def render(self, request):
|
||||
self._async_render(request)
|
||||
return NOT_DONE_YET
|
||||
|
||||
@wrap_request_handler
|
||||
@wrap_json_request_handler
|
||||
def _async_render(self, request):
|
||||
return self._handler(request)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue