Date: Tue, 31 May 2016 20:28:42 +0100
Subject: [PATCH 044/414] handle emotes & notices correctly in email notifs
---
res/templates/notif.html | 6 +++++-
res/templates/notif.txt | 6 +++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/res/templates/notif.html b/res/templates/notif.html
index 834840861..88b921ca9 100644
--- a/res/templates/notif.html
+++ b/res/templates/notif.html
@@ -17,11 +17,15 @@
{% if loop.index0 == 0 or notif.messages[loop.index0 - 1].sender_name != notif.messages[loop.index0].sender_name %}
- {{ message.sender_name }}
+ {% if message.msgtype == "m.emote" %}*{% endif %} {{ message.sender_name }}
{% endif %}
{% if message.msgtype == "m.text" %}
{{ message.body_text_html }}
+ {% elif message.msgtype == "m.emote" %}
+ {{ message.body_text_html }}
+ {% elif message.msgtype == "m.notice" %}
+ {{ message.body_text_html }}
{% elif message.msgtype == "m.image" %}
{% elif message.msgtype == "m.file" %}
diff --git a/res/templates/notif.txt b/res/templates/notif.txt
index a3ddac80c..a37bee983 100644
--- a/res/templates/notif.txt
+++ b/res/templates/notif.txt
@@ -1,7 +1,11 @@
{% for message in notif.messages %}
-{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
+{% if message.msgtype == "m.emote" %}* {% endif %}{{ message.sender_name }} ({{ message.ts|format_ts("%H:%M") }})
{% if message.msgtype == "m.text" %}
{{ message.body_text_plain }}
+{% elif message.msgtype == "m.emote" %}
+{{ message.body_text_plain }}
+{% elif message.msgtype == "m.notice" %}
+{{ message.body_text_plain }}
{% elif message.msgtype == "m.image" %}
{{ message.body_text_plain }}
{% elif message.msgtype == "m.file" %}
From 6ecb2ca4ec3fae8c6f2e837b4ec99cc6929de638 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 1 Jun 2016 09:48:55 +0100
Subject: [PATCH 045/414] pep8
---
synapse/federation/transport/server.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index da9e7a326..a1a334955 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -134,7 +134,8 @@ class Authenticator(object):
class BaseFederationServlet(object):
- def __init__(self, handler, authenticator, ratelimiter, server_name, room_list_handler):
+ def __init__(self, handler, authenticator, ratelimiter, server_name,
+ room_list_handler):
self.handler = handler
self.authenticator = authenticator
self.ratelimiter = ratelimiter
@@ -492,6 +493,7 @@ class OpenIdUserInfo(BaseFederationServlet):
def _wrap(self, code):
return code
+
class PublicRoomList(BaseFederationServlet):
"""
Fetch the public room list for this server.
From 43db0d9f6a314679bd25b82354e5c469e7a010b9 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 10:31:09 +0100
Subject: [PATCH 046/414] Add get_users_with_read_receipts_in_room cache
---
synapse/push/bulk_push_rule_evaluator.py | 8 +++----
synapse/storage/receipts.py | 28 ++++++++++++++++++++++++
2 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 25f2fb9da..1e5c4b073 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -87,13 +87,13 @@ def evaluator_for_event(event, hs, store):
all_in_room = yield store.get_users_in_room(room_id)
all_in_room = set(all_in_room)
- receipts = yield store.get_receipts_for_room(room_id, "m.read")
+ users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id)
# any users with pushers must be ours: they have pushers
user_ids = set(users_with_pushers)
- for r in receipts:
- if hs.is_mine_id(r['user_id']) and r['user_id'] in all_in_room:
- user_ids.add(r['user_id'])
+ for uid in users_with_receipts:
+ if hs.is_mine_id(uid) and uid in all_in_room:
+ user_ids.add(uid)
# if this event is an invite event, we may need to run rules for the user
# who's been invited, otherwise they won't get told they've been invited
diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py
index fdcf28f3e..964f30dff 100644
--- a/synapse/storage/receipts.py
+++ b/synapse/storage/receipts.py
@@ -34,6 +34,26 @@ class ReceiptsStore(SQLBaseStore):
"ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
)
+ @cachedInlineCallbacks()
+ def get_users_with_read_receipts_in_room(self, room_id):
+ receipts = yield self.get_receipts_for_room(room_id, "m.read")
+ defer.returnValue(set(r['user_id'] for r in receipts))
+
+ def _invalidate_get_users_with_receipts_in_room(self, room_id, receipt_type,
+ user_id):
+ if receipt_type != "m.read":
+ return
+
+ # Returns an ObservableDeferred
+ res = self.get_users_with_read_receipts_in_room.cache.get((room_id,), None)
+
+ if res and res.called and user_id in res.result:
+ # We'd only be adding to the set, so no point invalidating if the
+ # user is already there
+ return
+
+ self.get_users_with_read_receipts_in_room.invalidate((room_id,))
+
@cached(num_args=2)
def get_receipts_for_room(self, room_id, receipt_type):
return self._simple_select_list(
@@ -228,6 +248,10 @@ class ReceiptsStore(SQLBaseStore):
txn.call_after(
self.get_receipts_for_room.invalidate, (room_id, receipt_type)
)
+ txn.call_after(
+ self._invalidate_get_users_with_receipts_in_room,
+ room_id, receipt_type, user_id,
+ )
txn.call_after(
self.get_receipts_for_user.invalidate, (user_id, receipt_type)
)
@@ -373,6 +397,10 @@ class ReceiptsStore(SQLBaseStore):
txn.call_after(
self.get_receipts_for_room.invalidate, (room_id, receipt_type)
)
+ txn.call_after(
+ self._invalidate_get_users_with_receipts_in_room,
+ room_id, receipt_type, user_id,
+ )
txn.call_after(
self.get_receipts_for_user.invalidate, (user_id, receipt_type)
)
From 195254cae80f4748c3fc0ac3b46000047c2e6cc0 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 1 Jun 2016 11:14:16 +0100
Subject: [PATCH 047/414] Inject fake room list handler in tests
Otherwise it tries to start the remote public room list updating looping call which breaks.
---
tests/utils.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tests/utils.py b/tests/utils.py
index 59d985b5f..006abedbc 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -67,6 +67,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
version_string="Synapse/tests",
database_engine=create_engine(config.database_config),
get_db_conn=db_pool.get_db_conn,
+ room_list_handler=object(),
**kargs
)
hs.setup()
@@ -75,6 +76,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
name, db_pool=None, datastore=datastore, config=config,
version_string="Synapse/tests",
database_engine=create_engine(config.database_config),
+ room_list_handler=object(),
**kargs
)
From d60eed07109f61ebe2120e1eb566e5bb7095fbad Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 1 Jun 2016 11:45:43 +0100
Subject: [PATCH 048/414] Limit number of notifications in an email
notification
---
synapse/storage/event_push_actions.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 4dae51a17..940e11d7a 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -119,7 +119,8 @@ class EventPushActionsStore(SQLBaseStore):
@defer.inlineCallbacks
def get_unread_push_actions_for_user_in_range(self, user_id,
min_stream_ordering,
- max_stream_ordering=None):
+ max_stream_ordering=None,
+ limit=20):
def get_after_receipt(txn):
sql = (
"SELECT ep.event_id, ep.room_id, ep.stream_ordering, ep.actions, "
@@ -151,7 +152,8 @@ class EventPushActionsStore(SQLBaseStore):
if max_stream_ordering is not None:
sql += " AND ep.stream_ordering <= ?"
args.append(max_stream_ordering)
- sql += " ORDER BY ep.stream_ordering ASC"
+ sql += " ORDER BY ep.stream_ordering ASC LIMIT ?"
+ args.append(limit)
txn.execute(sql, args)
return txn.fetchall()
after_read_receipt = yield self.runInteraction(
From c8285564a3772db387fd5c94b1a82329dc320e36 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 11:08:45 +0100
Subject: [PATCH 049/414] Use state to calculate get_users_in_room
---
synapse/push/action_generator.py | 2 +-
synapse/push/bulk_push_rule_evaluator.py | 27 +++++++++------
synapse/storage/events.py | 3 --
synapse/storage/pusher.py | 42 ++++++++++++++++--------
synapse/storage/roommember.py | 3 --
5 files changed, 47 insertions(+), 30 deletions(-)
diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py
index 9b208668b..46e768e35 100644
--- a/synapse/push/action_generator.py
+++ b/synapse/push/action_generator.py
@@ -40,7 +40,7 @@ class ActionGenerator:
def handle_push_actions_for_event(self, event, context):
with Measure(self.clock, "handle_push_actions_for_event"):
bulk_evaluator = yield evaluator_for_event(
- event, self.hs, self.store
+ event, self.hs, self.store, context.current_state
)
actions_by_user = yield bulk_evaluator.action_for_event_by_user(
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 1e5c4b073..8c59e59e0 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -21,7 +21,7 @@ from twisted.internet import defer
from .baserules import list_with_base_rules
from .push_rule_evaluator import PushRuleEvaluatorForEvent
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
from synapse.visibility import filter_events_for_clients
@@ -72,20 +72,24 @@ def _get_rules(room_id, user_ids, store):
@defer.inlineCallbacks
-def evaluator_for_event(event, hs, store):
+def evaluator_for_event(event, hs, store, current_state):
room_id = event.room_id
-
- # users in the room who have pushers need to get push rules run because
- # that's how their pushers work
- users_with_pushers = yield store.get_users_with_pushers_in_room(room_id)
-
# We also will want to generate notifs for other people in the room so
# their unread countss are correct in the event stream, but to avoid
# generating them for bot / AS users etc, we only do so for people who've
# sent a read receipt into the room.
- all_in_room = yield store.get_users_in_room(room_id)
- all_in_room = set(all_in_room)
+ all_in_room = set(
+ e.state_key for e in current_state.values()
+ if e.type == EventTypes.Member and e.membership == Membership.JOIN
+ )
+
+ # users in the room who have pushers need to get push rules run because
+ # that's how their pushers work
+ if_users_with_pushers = yield store.get_if_users_have_pushers(all_in_room)
+ users_with_pushers = set(
+ uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
+ )
users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id)
@@ -143,7 +147,10 @@ class BulkPushRuleEvaluator:
self.store, user_tuples, [event], {event.event_id: current_state}
)
- room_members = yield self.store.get_users_in_room(self.room_id)
+ room_members = set(
+ e.state_key for e in current_state.values()
+ if e.type == EventTypes.Member and e.membership == Membership.JOIN
+ )
evaluator = PushRuleEvaluatorForEvent(event, len(room_members))
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 4655669ba..2b3f79577 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -342,9 +342,6 @@ class EventsStore(SQLBaseStore):
txn.call_after(self._get_current_state_for_key.invalidate_all)
txn.call_after(self.get_rooms_for_user.invalidate_all)
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
- txn.call_after(
- self.get_users_with_pushers_in_room.invalidate, (event.room_id,)
- )
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,))
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 9e8e2e296..39d5349ea 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -18,7 +18,7 @@ from twisted.internet import defer
from canonicaljson import encode_canonical_json
-from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
import logging
import simplejson as json
@@ -135,19 +135,35 @@ class PusherStore(SQLBaseStore):
"get_all_updated_pushers", get_all_updated_pushers_txn
)
- @cachedInlineCallbacks(num_args=1)
- def get_users_with_pushers_in_room(self, room_id):
- users = yield self.get_users_in_room(room_id)
-
+ @cachedInlineCallbacks(lru=True, num_args=1)
+ def get_if_user_has_pusher(self, user_id):
result = yield self._simple_select_many_batch(
table='pushers',
- column='user_name',
- iterable=users,
- retcols=['user_name'],
- desc='get_users_with_pushers_in_room'
+ keyvalues={
+ 'user_name': 'user_id',
+ },
+ retcol='user_name',
+ desc='get_if_user_has_pusher',
+ allow_none=True,
)
- defer.returnValue([r['user_name'] for r in result])
+ defer.returnValue(bool(result))
+
+ @cachedList(cached_method_name="get_if_user_has_pusher",
+ list_name="user_ids", num_args=1, inlineCallbacks=True)
+ def get_if_users_have_pushers(self, user_ids):
+ rows = yield self._simple_select_many_batch(
+ table='pushers',
+ column='user_name',
+ iterable=user_ids,
+ retcols=['user_name'],
+ desc='get_if_users_have_pushers'
+ )
+
+ result = {user_id: False for user_id in user_ids}
+ result.update({r['user_name']: True for r in rows})
+
+ defer.returnValue(result)
@defer.inlineCallbacks
def add_pusher(self, user_id, access_token, kind, app_id,
@@ -178,16 +194,16 @@ class PusherStore(SQLBaseStore):
},
)
if newly_inserted:
- # get_users_with_pushers_in_room only cares if the user has
+ # get_if_user_has_pusher only cares if the user has
# at least *one* pusher.
- txn.call_after(self.get_users_with_pushers_in_room.invalidate_all)
+ txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,))
yield self.runInteraction("add_pusher", f)
@defer.inlineCallbacks
def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id):
def delete_pusher_txn(txn, stream_id):
- txn.call_after(self.get_users_with_pushers_in_room.invalidate_all)
+ txn.call_after(self.get_if_user_has_pusher.invalidate, (user_id,))
self._simple_delete_one_txn(
txn,
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index face685ed..41b395e07 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -58,9 +58,6 @@ class RoomMemberStore(SQLBaseStore):
txn.call_after(self.get_rooms_for_user.invalidate, (event.state_key,))
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
- txn.call_after(
- self.get_users_with_pushers_in_room.invalidate, (event.room_id,)
- )
txn.call_after(
self._membership_stream_cache.entity_has_changed,
event.state_key, event.internal_metadata.stream_ordering
From 991af8b0d6406b633386384d823e5c3a9c2ceb8b Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 1 Jun 2016 17:40:52 +0100
Subject: [PATCH 050/414] WIP on unsubscribing email notifs without logging in
---
synapse/api/auth.py | 25 +++++++++------
synapse/rest/client/v1/pusher.py | 55 +++++++++++++++++++++++++++++++-
2 files changed, 70 insertions(+), 10 deletions(-)
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 2474a1453..2ece59bb1 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -13,7 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""This module contains classes for authenticating the user."""
from canonicaljson import encode_canonical_json
from signedjson.key import decode_verify_key_bytes
from signedjson.sign import verify_signed_json, SignatureVerifyException
@@ -42,13 +41,20 @@ AuthEventTypes = (
class Auth(object):
-
+ """
+ FIXME: This class contains a mix of functions for authenticating users
+ of our client-server API and authenticating events added to room graphs.
+ """
def __init__(self, hs):
self.hs = hs
self.clock = hs.get_clock()
self.store = hs.get_datastore()
self.state = hs.get_state_handler()
self.TOKEN_NOT_FOUND_HTTP_STATUS = 401
+ # Docs for these currently lives at
+ # https://github.com/matrix-org/matrix-doc/blob/master/drafts/macaroons_caveats.rst
+ # In addition, we have type == delete_pusher which grants access only to
+ # delete pushers.
self._KNOWN_CAVEAT_PREFIXES = set([
"gen = ",
"guest = ",
@@ -507,7 +513,7 @@ class Auth(object):
return default
@defer.inlineCallbacks
- def get_user_by_req(self, request, allow_guest=False):
+ def get_user_by_req(self, request, allow_guest=False, rights="access"):
""" Get a registered user's ID.
Args:
@@ -529,7 +535,7 @@ class Auth(object):
)
access_token = request.args["access_token"][0]
- user_info = yield self.get_user_by_access_token(access_token)
+ user_info = yield self.get_user_by_access_token(access_token, rights)
user = user_info["user"]
token_id = user_info["token_id"]
is_guest = user_info["is_guest"]
@@ -590,7 +596,7 @@ class Auth(object):
defer.returnValue(user_id)
@defer.inlineCallbacks
- def get_user_by_access_token(self, token):
+ def get_user_by_access_token(self, token, rights="access"):
""" Get a registered user's ID.
Args:
@@ -601,7 +607,7 @@ class Auth(object):
AuthError if no user by that token exists or the token is invalid.
"""
try:
- ret = yield self.get_user_from_macaroon(token)
+ ret = yield self.get_user_from_macaroon(token, rights)
except AuthError:
# TODO(daniel): Remove this fallback when all existing access tokens
# have been re-issued as macaroons.
@@ -609,11 +615,11 @@ class Auth(object):
defer.returnValue(ret)
@defer.inlineCallbacks
- def get_user_from_macaroon(self, macaroon_str):
+ def get_user_from_macaroon(self, macaroon_str, rights="access"):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
- self.validate_macaroon(macaroon, "access", self.hs.config.expire_access_token)
+ self.validate_macaroon(macaroon, rights, self.hs.config.expire_access_token)
user_prefix = "user_id = "
user = None
@@ -667,7 +673,8 @@ class Auth(object):
Args:
macaroon(pymacaroons.Macaroon): The macaroon to validate
- type_string(str): The kind of token this is (e.g. "access", "refresh")
+ type_string(str): The kind of token required (e.g. "access", "refresh",
+ "delete_pusher")
verify_expiry(bool): Whether to verify whether the macaroon has expired.
This should really always be True, but no clients currently implement
token refresh, so we can't enforce expiry yet.
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index ab928a16d..fa7a0992d 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -17,7 +17,11 @@ from twisted.internet import defer
from synapse.api.errors import SynapseError, Codes
from synapse.push import PusherConfigException
-from synapse.http.servlet import parse_json_object_from_request
+from synapse.http.servlet import (
+ parse_json_object_from_request, parse_string, RestServlet
+)
+from synapse.http.server import finish_request
+from synapse.api.errors import StoreError
from .base import ClientV1RestServlet, client_path_patterns
@@ -136,6 +140,55 @@ class PushersSetRestServlet(ClientV1RestServlet):
return 200, {}
+class PushersRemoveRestServlet(RestServlet):
+ """
+ To allow pusher to be delete by clicking a link (ie. GET request)
+ """
+ PATTERNS = client_path_patterns("/pushers/remove$")
+ SUCCESS_HTML = "You have been unsubscribed"
+
+ def __init__(self, hs):
+ super(RestServlet, self).__init__()
+ self.notifier = hs.get_notifier()
+
+ @defer.inlineCallbacks
+ def on_GET(self, request):
+ requester = yield self.auth.get_user_by_req(request, "delete_pusher")
+ user = requester.user
+
+ app_id = parse_string(request, "app_id", required=True)
+ pushkey = parse_string(request, "pushkey", required=True)
+
+ pusher_pool = self.hs.get_pusherpool()
+
+ try:
+ yield pusher_pool.remove_pusher(
+ app_id=app_id,
+ pushkey=pushkey,
+ user_id=user.to_string(),
+ )
+ except StoreError as se:
+ if se.code != 404:
+ # This is fine: they're already unsubscribed
+ raise
+
+ self.notifier.on_new_replication_data()
+
+ request.setResponseCode(200)
+ request.setHeader(b"Content-Type", b"text/html; charset=utf-8")
+ request.setHeader(b"Server", self.hs.version_string)
+ request.setHeader(b"Content-Length", b"%d" % (
+ len(PushersRemoveRestServlet.SUCCESS_HTML),
+ ))
+ request.write(PushersRemoveRestServlet.SUCCESS_HTML)
+ finish_request(request)
+ defer.returnValue(None)
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
def register_servlets(hs, http_server):
PushersRestServlet(hs).register(http_server)
PushersSetRestServlet(hs).register(http_server)
+ PushersRemoveRestServlet(hs).register(http_server)
From e0deeff23eec099ad6bcb6e7170f524dc14982e4 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 1 Jun 2016 17:58:58 +0100
Subject: [PATCH 051/414] Fix room list spidering
---
synapse/handlers/room.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 77063b021..9fd34588d 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -441,7 +441,7 @@ class RoomListHandler(BaseHandler):
self.hs.config.secondary_directory_servers
)
self.remote_list_request_cache.set((), deferred)
- yield deferred
+ self.remote_list_cache = yield deferred
@defer.inlineCallbacks
def get_aggregated_public_room_list(self):
From aaa70e26a2eb37fbdf728393148e003dc9866afd Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Wed, 1 Jun 2016 22:13:47 +0100
Subject: [PATCH 052/414] special case m.room.third_party_invite event auth to
match invites, otherwise they get out of sync and you get
https://github.com/vector-im/vector-web/issues/1208
---
synapse/api/auth.py | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 2474a1453..007a0998a 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -120,6 +120,24 @@ class Auth(object):
return allowed
self.check_event_sender_in_room(event, auth_events)
+
+ # Special case to allow m.room.third_party_invite events wherever
+ # a user is allowed to issue invites. Fixes
+ # https://github.com/vector-im/vector-web/issues/1208 hopefully
+ if event.type == EventTypes.ThirdPartyInvite:
+ user_level = self._get_user_power_level(event.user_id, auth_events)
+ invite_level = self._get_named_level(auth_events, "invite", 0)
+
+ if user_level < invite_level:
+ raise AuthError(
+ 403, (
+ "You cannot issue a third party invite for %s." %
+ (event.content.display_name,)
+ )
+ )
+ else:
+ return True
+
self._can_send_event(event, auth_events)
if event.type == EventTypes.PowerLevels:
From e793866398ffb3e222a86ebb15b9d24220accbc8 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 09:41:13 +0100
Subject: [PATCH 053/414] Use user_id in email greeting if display name is null
---
synapse/push/mailer.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 3ae92d157..fe5d67a03 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -122,6 +122,8 @@ class Mailer(object):
user_display_name = yield self.store.get_profile_displayname(
UserID.from_string(user_id).localpart
)
+ if user_display_name is None:
+ user_display_name = user_id
except StoreError:
user_display_name = user_id
From a15ad608496fd29fb8bf289152c23adca822beca Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 11:44:15 +0100
Subject: [PATCH 054/414] Email unsubscribing that may in theory, work
Were it not for that fact that you can't use the base handler in the pusher because it pulls in the world. Comitting while I fix that on a different branch.
---
synapse/handlers/auth.py | 5 +++++
synapse/push/emailpusher.py | 2 +-
synapse/push/mailer.py | 21 ++++++++++++++++-----
3 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 26c865e17..200793b5e 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -529,6 +529,11 @@ class AuthHandler(BaseHandler):
macaroon.add_first_party_caveat("time < %d" % (expiry,))
return macaroon.serialize()
+ def generate_delete_pusher_token(self, user_id):
+ macaroon = self._generate_base_macaroon(user_id)
+ macaroon.add_first_party_caveat("type = delete_pusher")
+ return macaroon.serialize()
+
def validate_short_term_login_token_and_get_user_id(self, login_token):
try:
macaroon = pymacaroons.Macaroon.deserialize(login_token)
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index a72cba830..46d7c0434 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -273,5 +273,5 @@ class EmailPusher(object):
logger.info("Sending notif email for user %r", self.user_id)
yield self.mailer.send_notification_mail(
- self.user_id, self.email, push_actions, reason
+ self.app_id, self.user_id, self.email, push_actions, reason
)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 3ae92d157..95250bad7 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -81,6 +81,7 @@ class Mailer(object):
def __init__(self, hs):
self.hs = hs
self.store = self.hs.get_datastore()
+ self.handlers = self.hs.get_handlers()
self.state_handler = self.hs.get_state_handler()
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
self.app_name = self.hs.config.email_app_name
@@ -95,7 +96,8 @@ class Mailer(object):
)
@defer.inlineCallbacks
- def send_notification_mail(self, user_id, email_address, push_actions, reason):
+ def send_notification_mail(self, app_id, user_id, email_address,
+ push_actions, reason):
raw_from = email.utils.parseaddr(self.hs.config.email_notif_from)[1]
raw_to = email.utils.parseaddr(email_address)[1]
@@ -157,7 +159,7 @@ class Mailer(object):
template_vars = {
"user_display_name": user_display_name,
- "unsubscribe_link": self.make_unsubscribe_link(),
+ "unsubscribe_link": self.make_unsubscribe_link(app_id, email_address),
"summary_text": summary_text,
"app_name": self.app_name,
"rooms": rooms,
@@ -423,9 +425,18 @@ class Mailer(object):
notif['room_id'], notif['event_id']
)
- def make_unsubscribe_link(self):
- # XXX: matrix.to
- return "https://vector.im/#/settings"
+ def make_unsubscribe_link(self, app_id, email_address):
+ params = {
+ "access_token": self.handlers.auth.generate_delete_pusher_token(),
+ "app_id": app_id,
+ "pushkey": email_address,
+ }
+
+ # XXX: make r0 once API is stable
+ return "%s_matrix/client/unstable/pushers/remove?%s" % (
+ self.hs.config.public_baseurl,
+ urllib.urlencode(params),
+ )
def mxc_to_http_filter(self, value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
From f84b89f0c6b2e67897fec8639b79bf1d45c8f2b6 Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Thu, 2 Jun 2016 13:29:48 +0100
Subject: [PATCH 055/414] if an email pusher specifies a brand param, use it
---
synapse/push/emailpusher.py | 7 ++++++-
synapse/push/mailer.py | 4 ++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index a72cba830..e38ed0200 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -72,7 +72,12 @@ class EmailPusher(object):
self.processing = False
if self.hs.config.email_enable_notifs:
- self.mailer = Mailer(self.hs)
+ if 'data' in pusherdict and 'brand' in pusherdict['data']:
+ app_name = pusherdict['data']['brand']
+ else:
+ app_name = self.hs.config.email_app_name
+
+ self.mailer = Mailer(self.hs, app_name)
else:
self.mailer = None
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index fe5d67a03..0e9d8ccb5 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -78,12 +78,12 @@ ALLOWED_ATTRS = {
class Mailer(object):
- def __init__(self, hs):
+ def __init__(self, hs, app_name):
self.hs = hs
self.store = self.hs.get_datastore()
self.state_handler = self.hs.get_state_handler()
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
- self.app_name = self.hs.config.email_app_name
+ self.app_name = app_name
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = self.mxc_to_http_filter
From 4a10510cd5aff790127a185ecefc83b881a717cc Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 13:31:45 +0100
Subject: [PATCH 056/414] Split out the auth handler
---
synapse/handlers/__init__.py | 2 --
synapse/handlers/register.py | 2 +-
synapse/rest/client/v1/login.py | 11 ++++++-----
synapse/rest/client/v2_alpha/account.py | 4 ++--
synapse/rest/client/v2_alpha/auth.py | 2 +-
synapse/rest/client/v2_alpha/register.py | 2 +-
synapse/rest/client/v2_alpha/tokenrefresh.py | 2 +-
synapse/server.py | 5 +++++
tests/rest/client/v2_alpha/test_register.py | 2 +-
tests/utils.py | 15 +++++----------
10 files changed, 23 insertions(+), 24 deletions(-)
diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py
index c0069e23d..d28e07f0d 100644
--- a/synapse/handlers/__init__.py
+++ b/synapse/handlers/__init__.py
@@ -24,7 +24,6 @@ from .federation import FederationHandler
from .profile import ProfileHandler
from .directory import DirectoryHandler
from .admin import AdminHandler
-from .auth import AuthHandler
from .identity import IdentityHandler
from .receipts import ReceiptsHandler
from .search import SearchHandler
@@ -50,7 +49,6 @@ class Handlers(object):
self.directory_handler = DirectoryHandler(hs)
self.admin_handler = AdminHandler(hs)
self.receipts_handler = ReceiptsHandler(hs)
- self.auth_handler = AuthHandler(hs)
self.identity_handler = IdentityHandler(hs)
self.search_handler = SearchHandler(hs)
self.room_context_handler = RoomContextHandler(hs)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 16f33f837..bbc07b045 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -413,7 +413,7 @@ class RegistrationHandler(BaseHandler):
defer.returnValue((user_id, token))
def auth_handler(self):
- return self.hs.get_handlers().auth_handler
+ return self.hs.get_auth_handler()
@defer.inlineCallbacks
def guest_access_token_for(self, medium, address, inviter_user_id):
diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py
index 3b5544851..8df9d10ef 100644
--- a/synapse/rest/client/v1/login.py
+++ b/synapse/rest/client/v1/login.py
@@ -58,6 +58,7 @@ class LoginRestServlet(ClientV1RestServlet):
self.cas_required_attributes = hs.config.cas_required_attributes
self.servername = hs.config.server_name
self.http_client = hs.get_simple_http_client()
+ self.auth_handler = self.hs.get_auth_handler()
def on_GET(self, request):
flows = []
@@ -143,7 +144,7 @@ class LoginRestServlet(ClientV1RestServlet):
user_id, self.hs.hostname
).to_string()
- auth_handler = self.handlers.auth_handler
+ auth_handler = self.auth_handler
user_id, access_token, refresh_token = yield auth_handler.login_with_password(
user_id=user_id,
password=login_submission["password"])
@@ -160,7 +161,7 @@ class LoginRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def do_token_login(self, login_submission):
token = login_submission['token']
- auth_handler = self.handlers.auth_handler
+ auth_handler = self.auth_handler
user_id = (
yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
)
@@ -194,7 +195,7 @@ class LoginRestServlet(ClientV1RestServlet):
raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
user_id = UserID.create(user, self.hs.hostname).to_string()
- auth_handler = self.handlers.auth_handler
+ auth_handler = self.auth_handler
user_exists = yield auth_handler.does_user_exist(user_id)
if user_exists:
user_id, access_token, refresh_token = (
@@ -243,7 +244,7 @@ class LoginRestServlet(ClientV1RestServlet):
raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
user_id = UserID.create(user, self.hs.hostname).to_string()
- auth_handler = self.handlers.auth_handler
+ auth_handler = self.auth_handler
user_exists = yield auth_handler.does_user_exist(user_id)
if user_exists:
user_id, access_token, refresh_token = (
@@ -412,7 +413,7 @@ class CasTicketServlet(ClientV1RestServlet):
raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
user_id = UserID.create(user, self.hs.hostname).to_string()
- auth_handler = self.handlers.auth_handler
+ auth_handler = self.auth_handler
user_exists = yield auth_handler.does_user_exist(user_id)
if not user_exists:
user_id, _ = (
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index c88c27053..9a84873a5 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -35,7 +35,7 @@ class PasswordRestServlet(RestServlet):
super(PasswordRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.auth_handler = hs.get_handlers().auth_handler
+ self.auth_handler = hs.get_auth_handler()
@defer.inlineCallbacks
def on_POST(self, request):
@@ -97,7 +97,7 @@ class ThreepidRestServlet(RestServlet):
self.hs = hs
self.identity_handler = hs.get_handlers().identity_handler
self.auth = hs.get_auth()
- self.auth_handler = hs.get_handlers().auth_handler
+ self.auth_handler = hs.get_auth_handler()
@defer.inlineCallbacks
def on_GET(self, request):
diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py
index 78181b7b1..58d3cad6a 100644
--- a/synapse/rest/client/v2_alpha/auth.py
+++ b/synapse/rest/client/v2_alpha/auth.py
@@ -104,7 +104,7 @@ class AuthRestServlet(RestServlet):
super(AuthRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
- self.auth_handler = hs.get_handlers().auth_handler
+ self.auth_handler = hs.get_auth_handler()
self.registration_handler = hs.get_handlers().registration_handler
@defer.inlineCallbacks
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 1ecc02d94..2088c316d 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -49,7 +49,7 @@ class RegisterRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.store = hs.get_datastore()
- self.auth_handler = hs.get_handlers().auth_handler
+ self.auth_handler = hs.get_auth_handler()
self.registration_handler = hs.get_handlers().registration_handler
self.identity_handler = hs.get_handlers().identity_handler
diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py
index a158c2209..8270e8787 100644
--- a/synapse/rest/client/v2_alpha/tokenrefresh.py
+++ b/synapse/rest/client/v2_alpha/tokenrefresh.py
@@ -38,7 +38,7 @@ class TokenRefreshRestServlet(RestServlet):
body = parse_json_object_from_request(request)
try:
old_refresh_token = body["refresh_token"]
- auth_handler = self.hs.get_handlers().auth_handler
+ auth_handler = self.hs.get_auth_handler()
(user_id, new_refresh_token) = yield self.store.exchange_refresh_token(
old_refresh_token, auth_handler.generate_refresh_token)
new_access_token = yield auth_handler.issue_access_token(user_id)
diff --git a/synapse/server.py b/synapse/server.py
index 7cf22b1ee..dd4b81c65 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -33,6 +33,7 @@ from synapse.handlers.presence import PresenceHandler
from synapse.handlers.sync import SyncHandler
from synapse.handlers.typing import TypingHandler
from synapse.handlers.room import RoomListHandler
+from synapse.handlers.auth import AuthHandler
from synapse.handlers.appservice import ApplicationServicesHandler
from synapse.state import StateHandler
from synapse.storage import DataStore
@@ -89,6 +90,7 @@ class HomeServer(object):
'sync_handler',
'typing_handler',
'room_list_handler',
+ 'auth_handler',
'application_service_api',
'application_service_scheduler',
'application_service_handler',
@@ -190,6 +192,9 @@ class HomeServer(object):
def build_room_list_handler(self):
return RoomListHandler(self)
+ def build_auth_handler(self):
+ return AuthHandler(self)
+
def build_application_service_api(self):
return ApplicationServiceApi(self)
diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py
index affd42c01..cda0a2b27 100644
--- a/tests/rest/client/v2_alpha/test_register.py
+++ b/tests/rest/client/v2_alpha/test_register.py
@@ -33,7 +33,6 @@ class RegisterRestServletTestCase(unittest.TestCase):
# do the dance to hook it up to the hs global
self.handlers = Mock(
- auth_handler=self.auth_handler,
registration_handler=self.registration_handler,
identity_handler=self.identity_handler,
login_handler=self.login_handler
@@ -42,6 +41,7 @@ class RegisterRestServletTestCase(unittest.TestCase):
self.hs.hostname = "superbig~testing~thing.com"
self.hs.get_auth = Mock(return_value=self.auth)
self.hs.get_handlers = Mock(return_value=self.handlers)
+ self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
self.hs.config.enable_registration = True
# init the thing we're testing
diff --git a/tests/utils.py b/tests/utils.py
index 006abedbc..e19ae581e 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -81,16 +81,11 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
)
# bcrypt is far too slow to be doing in unit tests
- def swap_out_hash_for_testing(old_build_handlers):
- def build_handlers():
- handlers = old_build_handlers()
- auth_handler = handlers.auth_handler
- auth_handler.hash = lambda p: hashlib.md5(p).hexdigest()
- auth_handler.validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
- return handlers
- return build_handlers
-
- hs.build_handlers = swap_out_hash_for_testing(hs.build_handlers)
+ # Need to let the HS build an auth handler and then mess with it
+ # because AuthHandler's constructor requires the HS, so we can't make one
+ # beforehand and pass it in to the HS's constructor (chicken / egg)
+ hs.get_auth_handler().hash = lambda p: hashlib.md5(p).hexdigest()
+ hs.get_auth_handler().validate_hash = lambda p, h: hashlib.md5(p).hexdigest() == h
fed = kargs.get("resource_for_federation", None)
if fed:
From 356f13c0696526032c211c103dad2f57d18473fa Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 2 Jun 2016 14:07:38 +0100
Subject: [PATCH 057/414] Disable INCLUDE_ALL_UNREAD_NOTIFS
---
synapse/push/emailpusher.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py
index e38ed0200..2c21ed308 100644
--- a/synapse/push/emailpusher.py
+++ b/synapse/push/emailpusher.py
@@ -44,7 +44,8 @@ THROTTLE_RESET_AFTER_MS = (12 * 60 * 60 * 1000)
# does each email include all unread notifs, or just the ones which have happened
# since the last mail?
-INCLUDE_ALL_UNREAD_NOTIFS = True
+# XXX: this is currently broken as it includes ones from parted rooms(!)
+INCLUDE_ALL_UNREAD_NOTIFS = False
class EmailPusher(object):
From 70599ce9252997d32d0bf9f26a4e02c99bbe474d Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 2 Jun 2016 15:20:15 +0100
Subject: [PATCH 058/414] Allow external processes to mark a user as syncing.
(#812)
* Add infrastructure to the presence handler to track sync requests in external processes
* Expire stale entries for dead external processes
* Add an http endpoint for making users as syncing
Add some docstrings and comments.
* Fixes
---
synapse/handlers/presence.py | 119 ++++++++++++++++++++---
synapse/replication/presence_resource.py | 59 +++++++++++
synapse/replication/resource.py | 2 +
tests/handlers/test_presence.py | 16 ++-
4 files changed, 174 insertions(+), 22 deletions(-)
create mode 100644 synapse/replication/presence_resource.py
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 37f57301f..fc8538b41 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -68,6 +68,10 @@ FEDERATION_TIMEOUT = 30 * 60 * 1000
# How often to resend presence to remote servers
FEDERATION_PING_INTERVAL = 25 * 60 * 1000
+# How long we will wait before assuming that the syncs from an external process
+# are dead.
+EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000
+
assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER
@@ -158,10 +162,21 @@ class PresenceHandler(object):
self.serial_to_user = {}
self._next_serial = 1
- # Keeps track of the number of *ongoing* syncs. While this is non zero
- # a user will never go offline.
+ # Keeps track of the number of *ongoing* syncs on this process. While
+ # this is non zero a user will never go offline.
self.user_to_num_current_syncs = {}
+ # Keeps track of the number of *ongoing* syncs on other processes.
+ # While any sync is ongoing on another process the user will never
+ # go offline.
+ # Each process has a unique identifier and an update frequency. If
+ # no update is received from that process within the update period then
+ # we assume that all the sync requests on that process have stopped.
+ # Stored as a dict from process_id to set of user_id, and a dict of
+ # process_id to millisecond timestamp last updated.
+ self.external_process_to_current_syncs = {}
+ self.external_process_last_updated_ms = {}
+
# Start a LoopingCall in 30s that fires every 5s.
# The initial delay is to allow disconnected clients a chance to
# reconnect before we treat them as offline.
@@ -272,13 +287,26 @@ class PresenceHandler(object):
# Fetch the list of users that *may* have timed out. Things may have
# changed since the timeout was set, so we won't necessarily have to
# take any action.
- users_to_check = self.wheel_timer.fetch(now)
+ users_to_check = set(self.wheel_timer.fetch(now))
+
+ # Check whether the lists of syncing processes from an external
+ # process have expired.
+ expired_process_ids = [
+ process_id for process_id, last_update
+ in self.external_process_last_update.items()
+ if now - last_update > EXTERNAL_PROCESS_EXPIRY
+ ]
+ for process_id in expired_process_ids:
+ users_to_check.update(
+ self.external_process_to_current_syncs.pop(process_id, ())
+ )
+ self.external_process_last_update.pop(process_id)
states = [
self.user_to_current_state.get(
user_id, UserPresenceState.default(user_id)
)
- for user_id in set(users_to_check)
+ for user_id in users_to_check
]
timers_fired_counter.inc_by(len(states))
@@ -286,7 +314,7 @@ class PresenceHandler(object):
changes = handle_timeouts(
states,
is_mine_fn=self.is_mine_id,
- user_to_num_current_syncs=self.user_to_num_current_syncs,
+ syncing_users=self.get_syncing_users(),
now=now,
)
@@ -363,6 +391,73 @@ class PresenceHandler(object):
defer.returnValue(_user_syncing())
+ def get_currently_syncing_users(self):
+ """Get the set of user ids that are currently syncing on this HS.
+ Returns:
+ set(str): A set of user_id strings.
+ """
+ syncing_user_ids = {
+ user_id for user_id, count in self.user_to_num_current_syncs.items()
+ if count
+ }
+ syncing_user_ids.update(self.external_process_to_current_syncs.values())
+ return syncing_user_ids
+
+ @defer.inlineCallbacks
+ def update_external_syncs(self, process_id, syncing_user_ids):
+ """Update the syncing users for an external process
+
+ Args:
+ process_id(str): An identifier for the process the users are
+ syncing against. This allows synapse to process updates
+ as user start and stop syncing against a given process.
+ syncing_user_ids(set(str)): The set of user_ids that are
+ currently syncing on that server.
+ """
+
+ # Grab the previous list of user_ids that were syncing on that process
+ prev_syncing_user_ids = (
+ self.external_process_to_current_syncs.get(process_id, set())
+ )
+ # Grab the current presence state for both the users that are syncing
+ # now and the users that were syncing before this update.
+ prev_states = yield self.current_state_for_users(
+ syncing_user_ids | prev_syncing_user_ids
+ )
+ updates = []
+ time_now_ms = self.clock.time_msec()
+
+ # For each new user that is syncing check if we need to mark them as
+ # being online.
+ for new_user_id in syncing_user_ids - prev_syncing_user_ids:
+ prev_state = prev_states[new_user_id]
+ if prev_state.state == PresenceState.OFFLINE:
+ updates.append(prev_state.copy_and_replace(
+ state=PresenceState.ONLINE,
+ last_active_ts=time_now_ms,
+ last_user_sync_ts=time_now_ms,
+ ))
+ else:
+ updates.append(prev_state.copy_and_replace(
+ last_user_sync_ts=time_now_ms,
+ ))
+
+ # For each user that is still syncing or stopped syncing update the
+ # last sync time so that we will correctly apply the grace period when
+ # they stop syncing.
+ for old_user_id in prev_syncing_user_ids:
+ prev_state = prev_states[old_user_id]
+ updates.append(prev_state.copy_and_replace(
+ last_user_sync_ts=time_now_ms,
+ ))
+
+ yield self._update_states(updates)
+
+ # Update the last updated time for the process. We expire the entries
+ # if we don't receive an update in the given timeframe.
+ self.external_process_last_updated_ms[process_id] = self.clock.time_msec()
+ self.external_process_to_current_syncs[process_id] = syncing_user_ids
+
@defer.inlineCallbacks
def current_state_for_user(self, user_id):
"""Get the current presence state for a user.
@@ -935,15 +1030,14 @@ class PresenceEventSource(object):
return self.get_new_events(user, from_key=None, include_offline=False)
-def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now):
+def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now):
"""Checks the presence of users that have timed out and updates as
appropriate.
Args:
user_states(list): List of UserPresenceState's to check.
is_mine_fn (fn): Function that returns if a user_id is ours
- user_to_num_current_syncs (dict): Mapping of user_id to number of currently
- active syncs.
+ syncing_user_ids (set): Set of user_ids with active syncs.
now (int): Current time in ms.
Returns:
@@ -954,21 +1048,20 @@ def handle_timeouts(user_states, is_mine_fn, user_to_num_current_syncs, now):
for state in user_states:
is_mine = is_mine_fn(state.user_id)
- new_state = handle_timeout(state, is_mine, user_to_num_current_syncs, now)
+ new_state = handle_timeout(state, is_mine, syncing_user_ids, now)
if new_state:
changes[state.user_id] = new_state
return changes.values()
-def handle_timeout(state, is_mine, user_to_num_current_syncs, now):
+def handle_timeout(state, is_mine, syncing_user_ids, now):
"""Checks the presence of the user to see if any of the timers have elapsed
Args:
state (UserPresenceState)
is_mine (bool): Whether the user is ours
- user_to_num_current_syncs (dict): Mapping of user_id to number of currently
- active syncs.
+ syncing_user_ids (set): Set of user_ids with active syncs.
now (int): Current time in ms.
Returns:
@@ -1002,7 +1095,7 @@ def handle_timeout(state, is_mine, user_to_num_current_syncs, now):
# If there are have been no sync for a while (and none ongoing),
# set presence to offline
- if not user_to_num_current_syncs.get(user_id, 0):
+ if user_id not in syncing_user_ids:
if now - state.last_user_sync_ts > SYNC_ONLINE_TIMEOUT:
state = state.copy_and_replace(
state=PresenceState.OFFLINE,
diff --git a/synapse/replication/presence_resource.py b/synapse/replication/presence_resource.py
new file mode 100644
index 000000000..fc18130ab
--- /dev/null
+++ b/synapse/replication/presence_resource.py
@@ -0,0 +1,59 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from synapse.http.server import respond_with_json_bytes, request_handler
+from synapse.http.servlet import parse_json_object_from_request
+
+from twisted.web.resource import Resource
+from twisted.web.server import NOT_DONE_YET
+from twisted.internet import defer
+
+
+class PresenceResource(Resource):
+ """
+ HTTP endpoint for marking users as syncing.
+
+ POST /_synapse/replication/presence HTTP/1.1
+ Content-Type: application/json
+
+ {
+ "process_id": "",
+ "syncing_users": [""]
+ }
+ """
+
+ def __init__(self, hs):
+ Resource.__init__(self) # Resource is old-style, so no super()
+
+ self.version_string = hs.version_string
+ self.clock = hs.get_clock()
+ self.presence_handler = hs.get_presence_handler()
+
+ def render_POST(self, request):
+ self._async_render_POST(request)
+ return NOT_DONE_YET
+
+ @request_handler()
+ @defer.inlineCallbacks
+ def _async_render_POST(self, request):
+ content = parse_json_object_from_request(request)
+
+ process_id = content["process_id"]
+ syncing_user_ids = content["syncing_users"]
+
+ yield self.presence_handler.update_external_syncs(
+ process_id, set(syncing_user_ids)
+ )
+
+ respond_with_json_bytes(request, 200, "{}")
diff --git a/synapse/replication/resource.py b/synapse/replication/resource.py
index 847f212a3..8c2d487ff 100644
--- a/synapse/replication/resource.py
+++ b/synapse/replication/resource.py
@@ -16,6 +16,7 @@
from synapse.http.servlet import parse_integer, parse_string
from synapse.http.server import request_handler, finish_request
from synapse.replication.pusher_resource import PusherResource
+from synapse.replication.presence_resource import PresenceResource
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
@@ -115,6 +116,7 @@ class ReplicationResource(Resource):
self.clock = hs.get_clock()
self.putChild("remove_pushers", PusherResource(hs))
+ self.putChild("syncing_users", PresenceResource(hs))
def render_GET(self, request):
self._async_render_GET(request)
diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py
index 87c795fcf..b531ba854 100644
--- a/tests/handlers/test_presence.py
+++ b/tests/handlers/test_presence.py
@@ -264,7 +264,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={}, now=now
+ state, is_mine=True, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
@@ -282,7 +282,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={}, now=now
+ state, is_mine=True, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
@@ -300,9 +300,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={
- user_id: 1,
- }, now=now
+ state, is_mine=True, syncing_user_ids=set([user_id]), now=now
)
self.assertIsNotNone(new_state)
@@ -321,7 +319,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={}, now=now
+ state, is_mine=True, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
@@ -340,7 +338,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={}, now=now
+ state, is_mine=True, syncing_user_ids=set(), now=now
)
self.assertIsNone(new_state)
@@ -358,7 +356,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=False, user_to_num_current_syncs={}, now=now
+ state, is_mine=False, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
@@ -377,7 +375,7 @@ class PresenceTimeoutTestCase(unittest.TestCase):
)
new_state = handle_timeout(
- state, is_mine=True, user_to_num_current_syncs={}, now=now
+ state, is_mine=True, syncing_user_ids=set(), now=now
)
self.assertIsNotNone(new_state)
From 661a540dd1de89a3ab3a8f6ca0f780ea7d264176 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 2 Jun 2016 15:20:28 +0100
Subject: [PATCH 059/414] Deduplicate presence entries in sync (#818)
---
synapse/handlers/sync.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 3b89582d7..5307b62b8 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -637,6 +637,9 @@ class SyncHandler(object):
)
presence.extend(states)
+ # Deduplicate the presence entries so that there's at most one per user
+ presence = {p["content"]["user_id"]: p for p in presence}.values()
+
presence = sync_config.filter_collection.filter_presence(
presence
)
From 80f34d7b574e1a6b8bc922df41bd53b59260fcf2 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 2 Jun 2016 15:23:09 +0100
Subject: [PATCH 060/414] Fix setting the _clock in SQLBaseStore
---
synapse/storage/_base.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 56a0dd80f..32c6677d4 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -152,6 +152,7 @@ class SQLBaseStore(object):
def __init__(self, hs):
self.hs = hs
+ self._clock = hs.get_clock()
self._db_pool = hs.get_db_pool()
self._previous_txn_total_time = 0
From 56d15a05306896555ded3025f8a808bda04872fa Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 2 Jun 2016 16:28:54 +0100
Subject: [PATCH 061/414] Store the typing users as user_id strings. (#819)
Rather than storing them as UserID objects.
---
synapse/handlers/typing.py | 64 ++++++++++++++++++++---------------
tests/handlers/test_typing.py | 4 +--
2 files changed, 38 insertions(+), 30 deletions(-)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index d46f05f42..3c54307be 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -30,7 +30,7 @@ logger = logging.getLogger(__name__)
# A tiny object useful for storing a user's membership in a room, as a mapping
# key
-RoomMember = namedtuple("RoomMember", ("room_id", "user"))
+RoomMember = namedtuple("RoomMember", ("room_id", "user_id"))
class TypingHandler(object):
@@ -38,7 +38,7 @@ class TypingHandler(object):
self.store = hs.get_datastore()
self.server_name = hs.config.server_name
self.auth = hs.get_auth()
- self.is_mine = hs.is_mine
+ self.is_mine_id = hs.is_mine_id
self.notifier = hs.get_notifier()
self.clock = hs.get_clock()
@@ -67,20 +67,23 @@ class TypingHandler(object):
@defer.inlineCallbacks
def started_typing(self, target_user, auth_user, room_id, timeout):
- if not self.is_mine(target_user):
+ target_user_id = target_user.to_string()
+ auth_user_id = auth_user.to_string()
+
+ if not self.is_mine_id(target_user_id):
raise SynapseError(400, "User is not hosted on this Home Server")
- if target_user != auth_user:
+ if target_user_id != auth_user_id:
raise AuthError(400, "Cannot set another user's typing state")
- yield self.auth.check_joined_room(room_id, target_user.to_string())
+ yield self.auth.check_joined_room(room_id, target_user_id)
logger.debug(
- "%s has started typing in %s", target_user.to_string(), room_id
+ "%s has started typing in %s", target_user_id, room_id
)
until = self.clock.time_msec() + timeout
- member = RoomMember(room_id=room_id, user=target_user)
+ member = RoomMember(room_id=room_id, user_id=target_user_id)
was_present = member in self._member_typing_until
@@ -104,25 +107,28 @@ class TypingHandler(object):
yield self._push_update(
room_id=room_id,
- user=target_user,
+ user_id=target_user_id,
typing=True,
)
@defer.inlineCallbacks
def stopped_typing(self, target_user, auth_user, room_id):
- if not self.is_mine(target_user):
+ target_user_id = target_user.to_string()
+ auth_user_id = auth_user.to_string()
+
+ if not self.is_mine_id(target_user_id):
raise SynapseError(400, "User is not hosted on this Home Server")
- if target_user != auth_user:
+ if target_user_id != auth_user_id:
raise AuthError(400, "Cannot set another user's typing state")
- yield self.auth.check_joined_room(room_id, target_user.to_string())
+ yield self.auth.check_joined_room(room_id, target_user_id)
logger.debug(
- "%s has stopped typing in %s", target_user.to_string(), room_id
+ "%s has stopped typing in %s", target_user_id, room_id
)
- member = RoomMember(room_id=room_id, user=target_user)
+ member = RoomMember(room_id=room_id, user_id=target_user_id)
if member in self._member_typing_timer:
self.clock.cancel_call_later(self._member_typing_timer[member])
@@ -132,8 +138,9 @@ class TypingHandler(object):
@defer.inlineCallbacks
def user_left_room(self, user, room_id):
- if self.is_mine(user):
- member = RoomMember(room_id=room_id, user=user)
+ user_id = user.to_string()
+ if self.is_mine_id(user_id):
+ member = RoomMember(room_id=room_id, user=user_id)
yield self._stopped_typing(member)
@defer.inlineCallbacks
@@ -144,7 +151,7 @@ class TypingHandler(object):
yield self._push_update(
room_id=member.room_id,
- user=member.user,
+ user_id=member.user_id,
typing=False,
)
@@ -156,7 +163,7 @@ class TypingHandler(object):
del self._member_typing_timer[member]
@defer.inlineCallbacks
- def _push_update(self, room_id, user, typing):
+ def _push_update(self, room_id, user_id, typing):
domains = yield self.store.get_joined_hosts_for_room(room_id)
deferreds = []
@@ -164,7 +171,7 @@ class TypingHandler(object):
if domain == self.server_name:
self._push_update_local(
room_id=room_id,
- user=user,
+ user_id=user_id,
typing=typing
)
else:
@@ -173,7 +180,7 @@ class TypingHandler(object):
edu_type="m.typing",
content={
"room_id": room_id,
- "user_id": user.to_string(),
+ "user_id": user_id,
"typing": typing,
},
))
@@ -183,23 +190,26 @@ class TypingHandler(object):
@defer.inlineCallbacks
def _recv_edu(self, origin, content):
room_id = content["room_id"]
- user = UserID.from_string(content["user_id"])
+ user_id = content["user_id"]
+
+ # Check that the string is a valid user id
+ UserID.from_string(user_id)
domains = yield self.store.get_joined_hosts_for_room(room_id)
if self.server_name in domains:
self._push_update_local(
room_id=room_id,
- user=user,
+ user_id=user_id,
typing=content["typing"]
)
- def _push_update_local(self, room_id, user, typing):
+ def _push_update_local(self, room_id, user_id, typing):
room_set = self._room_typing.setdefault(room_id, set())
if typing:
- room_set.add(user)
+ room_set.add(user_id)
else:
- room_set.discard(user)
+ room_set.discard(user_id)
self._latest_room_serial += 1
self._room_serials[room_id] = self._latest_room_serial
@@ -215,9 +225,7 @@ class TypingHandler(object):
for room_id, serial in self._room_serials.items():
if last_id < serial and serial <= current_id:
typing = self._room_typing[room_id]
- typing_bytes = json.dumps([
- u.to_string() for u in typing
- ], ensure_ascii=False)
+ typing_bytes = json.dumps(list(typing), ensure_ascii=False)
rows.append((serial, room_id, typing_bytes))
rows.sort()
return rows
@@ -239,7 +247,7 @@ class TypingNotificationEventSource(object):
"type": "m.typing",
"room_id": room_id,
"content": {
- "user_ids": [u.to_string() for u in typing],
+ "user_ids": list(typing),
},
}
diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py
index abb739ae5..ab9899b7d 100644
--- a/tests/handlers/test_typing.py
+++ b/tests/handlers/test_typing.py
@@ -251,12 +251,12 @@ class TypingNotificationsTestCase(unittest.TestCase):
# Gut-wrenching
from synapse.handlers.typing import RoomMember
- member = RoomMember(self.room_id, self.u_apple)
+ member = RoomMember(self.room_id, self.u_apple.to_string())
self.handler._member_typing_until[member] = 1002000
self.handler._member_typing_timer[member] = (
self.clock.call_later(1002, lambda: 0)
)
- self.handler._room_typing[self.room_id] = set((self.u_apple,))
+ self.handler._room_typing[self.room_id] = set((self.u_apple.to_string(),))
self.assertEquals(self.event_source.get_current_key(), 0)
From 07a555991600137c830eb3b06f90a305c8f1e3d8 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 17:17:16 +0100
Subject: [PATCH 062/414] Fix error in email notification string formatting
---
synapse/push/mailer.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 0e9d8ccb5..63c7ec18a 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -41,7 +41,7 @@ logger = logging.getLogger(__name__)
MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \
- "in the %s room..."
+ "in the %(room)s room..."
MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..."
MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..."
MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..."
From 2675c1e40ebc3392ce719ac2304b97e98c7fefb4 Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Thu, 2 Jun 2016 17:21:12 +0100
Subject: [PATCH 063/414] add some branding debugging
---
synapse/push/mailer.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 0e9d8ccb5..944f3b481 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -84,6 +84,7 @@ class Mailer(object):
self.state_handler = self.hs.get_state_handler()
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
self.app_name = app_name
+ logger.info("Created Mailer for app_name %s" % app_name)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = self.mxc_to_http_filter
From 1f31cc37f8611f9ae5612ef5be82e63735fbdf34 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 17:21:31 +0100
Subject: [PATCH 064/414] Working unsubscribe links going straight to the HS
and authed by macaroons that let you delete pushers and nothing else
---
synapse/api/auth.py | 7 +++++++
synapse/app/pusher.py | 23 ++++++++++++++++++++++-
synapse/push/mailer.py | 8 ++++----
synapse/rest/client/v1/pusher.py | 4 +++-
4 files changed, 36 insertions(+), 6 deletions(-)
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 463bd8b69..31e1abb96 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -660,6 +660,13 @@ class Auth(object):
"is_guest": True,
"token_id": None,
}
+ elif rights == "delete_pusher":
+ # We don't store these tokens in the database
+ ret = {
+ "user": user,
+ "is_guest": False,
+ "token_id": None,
+ }
else:
# This codepath exists so that we can actually return a
# token ID, because we use token IDs in place of device
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 135dd58c1..f1de1e7ce 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -21,6 +21,7 @@ from synapse.config._base import ConfigError
from synapse.config.database import DatabaseConfig
from synapse.config.logger import LoggingConfig
from synapse.config.emailconfig import EmailConfig
+from synapse.config.key import KeyConfig
from synapse.http.site import SynapseSite
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.storage.roommember import RoomMemberStore
@@ -63,6 +64,26 @@ class SlaveConfig(DatabaseConfig):
self.pid_file = self.abspath(config.get("pid_file"))
self.public_baseurl = config["public_baseurl"]
+ # some things used by the auth handler but not actually used in the
+ # pusher codebase
+ self.bcrypt_rounds = None
+ self.ldap_enabled = None
+ self.ldap_server = None
+ self.ldap_port = None
+ self.ldap_tls = None
+ self.ldap_search_base = None
+ self.ldap_search_property = None
+ self.ldap_email_property = None
+ self.ldap_full_name_property = None
+
+ # We would otherwise try to use the registration shared secret as the
+ # macaroon shared secret if there was no macaroon_shared_secret, but
+ # that means pulling in RegistrationConfig too. We don't need to be
+ # backwards compaitible in the pusher codebase so just make people set
+ # macaroon_shared_secret. We set this to None to prevent it referencing
+ # an undefined key.
+ self.registration_shared_secret = None
+
def default_config(self, server_name, **kwargs):
pid_file = self.abspath("pusher.pid")
return """\
@@ -95,7 +116,7 @@ class SlaveConfig(DatabaseConfig):
""" % locals()
-class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig):
+class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig):
pass
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index e877d8fda..60d3700af 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -81,7 +81,7 @@ class Mailer(object):
def __init__(self, hs, app_name):
self.hs = hs
self.store = self.hs.get_datastore()
- self.handlers = self.hs.get_handlers()
+ self.auth_handler = self.hs.get_auth_handler()
self.state_handler = self.hs.get_state_handler()
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
self.app_name = app_name
@@ -161,7 +161,7 @@ class Mailer(object):
template_vars = {
"user_display_name": user_display_name,
- "unsubscribe_link": self.make_unsubscribe_link(app_id, email_address),
+ "unsubscribe_link": self.make_unsubscribe_link(user_id, app_id, email_address),
"summary_text": summary_text,
"app_name": self.app_name,
"rooms": rooms,
@@ -427,9 +427,9 @@ class Mailer(object):
notif['room_id'], notif['event_id']
)
- def make_unsubscribe_link(self, app_id, email_address):
+ def make_unsubscribe_link(self, user_id, app_id, email_address):
params = {
- "access_token": self.handlers.auth.generate_delete_pusher_token(),
+ "access_token": self.auth_handler.generate_delete_pusher_token(user_id),
"app_id": app_id,
"pushkey": email_address,
}
diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py
index fa7a0992d..9a2ed6ed8 100644
--- a/synapse/rest/client/v1/pusher.py
+++ b/synapse/rest/client/v1/pusher.py
@@ -149,11 +149,13 @@ class PushersRemoveRestServlet(RestServlet):
def __init__(self, hs):
super(RestServlet, self).__init__()
+ self.hs = hs
self.notifier = hs.get_notifier()
+ self.auth = hs.get_v1auth()
@defer.inlineCallbacks
def on_GET(self, request):
- requester = yield self.auth.get_user_by_req(request, "delete_pusher")
+ requester = yield self.auth.get_user_by_req(request, rights="delete_pusher")
user = requester.user
app_id = parse_string(request, "app_id", required=True)
From 745ddb4dd04d0346f27f65a1e5508900b58e658a Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 2 Jun 2016 17:38:41 +0100
Subject: [PATCH 065/414] peppate
---
synapse/push/mailer.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 60d3700af..3c9a66008 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -161,7 +161,9 @@ class Mailer(object):
template_vars = {
"user_display_name": user_display_name,
- "unsubscribe_link": self.make_unsubscribe_link(user_id, app_id, email_address),
+ "unsubscribe_link": self.make_unsubscribe_link(
+ user_id, app_id, email_address
+ ),
"summary_text": summary_text,
"app_name": self.app_name,
"rooms": rooms,
From 79d1f072f4fc9a6b2e9773e8cb700b26bc2dff51 Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Thu, 2 Jun 2016 21:34:40 +0100
Subject: [PATCH 066/414] brand the email from header
---
synapse/config/emailconfig.py | 2 +-
synapse/push/mailer.py | 9 ++++++++-
2 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
index 90bdd08f0..a18716127 100644
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -89,7 +89,7 @@ class EmailConfig(Config):
# enable_notifs: false
# smtp_host: "localhost"
# smtp_port: 25
- # notif_from: Your Friendly Matrix Home Server
+ # notif_from: "Your Friendly %(app)s Home Server "
# app_name: Matrix
# template_dir: res/templates
# notif_template_html: notif_mail.html
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 944f3b481..c1e9057eb 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -97,7 +97,14 @@ class Mailer(object):
@defer.inlineCallbacks
def send_notification_mail(self, user_id, email_address, push_actions, reason):
- raw_from = email.utils.parseaddr(self.hs.config.email_notif_from)[1]
+ try:
+ from_string = self.hs.config.email_notif_from % {
+ "app": self.app_name
+ }
+ except TypeError:
+ from_string = self.hs.config.email_notif_from
+
+ raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1]
if raw_to == '':
From 0eae0757232169b833224f48208aed9fdc9c6fe6 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 10:58:03 +0100
Subject: [PATCH 067/414] Add slaved stores for filters, tokens, and push rules
---
.../replication/slave/storage/appservice.py | 30 +++++++++
.../replication/slave/storage/filtering.py | 24 +++++++
.../replication/slave/storage/push_rule.py | 67 +++++++++++++++++++
.../replication/slave/storage/registration.py | 30 +++++++++
4 files changed, 151 insertions(+)
create mode 100644 synapse/replication/slave/storage/appservice.py
create mode 100644 synapse/replication/slave/storage/filtering.py
create mode 100644 synapse/replication/slave/storage/push_rule.py
create mode 100644 synapse/replication/slave/storage/registration.py
diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py
new file mode 100644
index 000000000..25792d942
--- /dev/null
+++ b/synapse/replication/slave/storage/appservice.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.config.appservice import load_appservices
+
+
+class SlavedApplicationServiceStore(BaseSlavedStore):
+ def __init__(self, db_conn, hs):
+ super(SlavedApplicationServiceStore, self).__init__(db_conn, hs)
+ self.services_cache = load_appservices(
+ hs.config.server_name,
+ hs.config.app_service_config_files
+ )
+
+ get_app_service_by_token = DataStore.get_app_service_by_token.__func__
+ get_app_service_by_user_id = DataStore.get_app_service_by_user_id.__func__
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py
new file mode 100644
index 000000000..5037f395b
--- /dev/null
+++ b/synapse/replication/slave/storage/filtering.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage.filtering import FilteringStore
+
+
+class SlavedFilteringStore(BaseSlavedStore):
+ def __init__(self, db_conn, hs):
+ super(SlavedFilteringStore, self).__init__(db_conn, hs)
+
+ get_user_filter = FilteringStore.__dict__["get_user_filter"]
diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py
new file mode 100644
index 000000000..21ceb0213
--- /dev/null
+++ b/synapse/replication/slave/storage/push_rule.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .events import SlavedEventStore
+from ._slaved_id_tracker import SlavedIdTracker
+from synapse.storage import DataStore
+from synapse.storage.push_rule import PushRuleStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+
+
+class SlavedPushRuleStore(SlavedEventStore):
+ def __init__(self, db_conn, hs):
+ super(SlavedPushRuleStore, self).__init__(db_conn, hs)
+ self._push_rules_stream_id_gen = SlavedIdTracker(
+ db_conn, "push_rules_stream", "stream_id",
+ )
+ self.push_rules_stream_cache = StreamChangeCache(
+ "PushRulesStreamChangeCache",
+ self._push_rules_stream_id_gen.get_current_token(),
+ )
+
+ get_push_rules_for_user = PushRuleStore.__dict__["get_push_rules_for_user"]
+ get_push_rules_enabled_for_user = (
+ PushRuleStore.__dict__["get_push_rules_enabled_for_user"]
+ )
+ have_push_rules_changed_for_user = (
+ DataStore.have_push_rules_changed_for_user.__func__
+ )
+
+ def get_push_rules_stream_token(self):
+ return (
+ self._push_rules_stream_id_gen.get_current_token(),
+ self._stream_id_gen.get_current_token(),
+ )
+
+ def stream_positions(self):
+ result = super(SlavedPushRuleStore, self).stream_positions()
+ result["push_rules"] = self._push_rules_stream_id_gen.get_current_token()
+ return result
+
+ def process_replication(self, result):
+ stream = result.get("push_rules")
+ if stream:
+ for row in stream["rows"]:
+ position = row[0]
+ user_id = row[2]
+ self.get_push_rules_for_user.invalidate((user_id,))
+ self.get_push_rules_enabled_for_user.invalidate((user_id,))
+ self.push_rules_stream_cache.entity_has_changed(
+ user_id, position
+ )
+
+ self._push_rules_stream_id_gen.advance(int(stream["position"]))
+
+ return super(SlavedPushRuleStore, self).process_replication(result)
diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py
new file mode 100644
index 000000000..307833f9e
--- /dev/null
+++ b/synapse/replication/slave/storage/registration.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015, 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from synapse.storage import DataStore
+from synapse.storage.registration import RegistrationStore
+
+
+class SlavedRegistrationStore(BaseSlavedStore):
+ def __init__(self, db_conn, hs):
+ super(SlavedRegistrationStore, self).__init__(db_conn, hs)
+
+ # TODO: use the cached version and invalidate deleted tokens
+ get_user_by_access_token = RegistrationStore.__dict__[
+ "get_user_by_access_token"
+ ].orig
+
+ _query_for_auth = DataStore._query_for_auth.__func__
From f88d747f7959808884451245aeba65edf7c490bf Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 11:03:10 +0100
Subject: [PATCH 068/414] Add a comment explaining why the filter cache doesn't
need exipiring
---
synapse/replication/slave/storage/filtering.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py
index 5037f395b..819ed6288 100644
--- a/synapse/replication/slave/storage/filtering.py
+++ b/synapse/replication/slave/storage/filtering.py
@@ -21,4 +21,5 @@ class SlavedFilteringStore(BaseSlavedStore):
def __init__(self, db_conn, hs):
super(SlavedFilteringStore, self).__init__(db_conn, hs)
+ # Filters are immutable so this cache doesn't need to be expired
get_user_filter = FilteringStore.__dict__["get_user_filter"]
From 9c26b390a2112590fe4252057dc1f081cb99a6b1 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 11:34:06 +0100
Subject: [PATCH 069/414] Only get local users
---
synapse/push/bulk_push_rule_evaluator.py | 7 +++++--
synapse/storage/pusher.py | 2 +-
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 8c59e59e0..d50db3b73 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -83,10 +83,13 @@ def evaluator_for_event(event, hs, store, current_state):
e.state_key for e in current_state.values()
if e.type == EventTypes.Member and e.membership == Membership.JOIN
)
+ local_users_in_room = set(uid for uid in all_in_room if hs.is_mine_id(uid))
# users in the room who have pushers need to get push rules run because
# that's how their pushers work
- if_users_with_pushers = yield store.get_if_users_have_pushers(all_in_room)
+ if_users_with_pushers = yield store.get_if_users_have_pushers(
+ local_users_in_room
+ )
users_with_pushers = set(
uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
)
@@ -96,7 +99,7 @@ def evaluator_for_event(event, hs, store, current_state):
# any users with pushers must be ours: they have pushers
user_ids = set(users_with_pushers)
for uid in users_with_receipts:
- if hs.is_mine_id(uid) and uid in all_in_room:
+ if uid in local_users_in_room:
user_ids.add(uid)
# if this event is an invite event, we may need to run rules for the user
diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py
index 39d5349ea..a7d7c54d7 100644
--- a/synapse/storage/pusher.py
+++ b/synapse/storage/pusher.py
@@ -135,7 +135,7 @@ class PusherStore(SQLBaseStore):
"get_all_updated_pushers", get_all_updated_pushers_txn
)
- @cachedInlineCallbacks(lru=True, num_args=1)
+ @cachedInlineCallbacks(lru=True, num_args=1, max_entries=15000)
def get_if_user_has_pusher(self, user_id):
result = yield self._simple_select_many_batch(
table='pushers',
From 59f2d7352224af97e8f091f673858dde42b00197 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 15:45:37 +0100
Subject: [PATCH 070/414] Remove unnecessary sets
---
synapse/push/bulk_push_rule_evaluator.py | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index d50db3b73..af5212a5d 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -79,25 +79,24 @@ def evaluator_for_event(event, hs, store, current_state):
# generating them for bot / AS users etc, we only do so for people who've
# sent a read receipt into the room.
- all_in_room = set(
+ local_users_in_room = set(
e.state_key for e in current_state.values()
if e.type == EventTypes.Member and e.membership == Membership.JOIN
+ and hs.is_mine_id(e.state_key)
)
- local_users_in_room = set(uid for uid in all_in_room if hs.is_mine_id(uid))
# users in the room who have pushers need to get push rules run because
# that's how their pushers work
if_users_with_pushers = yield store.get_if_users_have_pushers(
local_users_in_room
)
- users_with_pushers = set(
+ user_ids = set(
uid for uid, have_pusher in if_users_with_pushers.items() if have_pusher
)
users_with_receipts = yield store.get_users_with_read_receipts_in_room(room_id)
# any users with pushers must be ours: they have pushers
- user_ids = set(users_with_pushers)
for uid in users_with_receipts:
if uid in local_users_in_room:
user_ids.add(uid)
@@ -111,8 +110,6 @@ def evaluator_for_event(event, hs, store, current_state):
if has_pusher:
user_ids.add(invited_user)
- user_ids = list(user_ids)
-
rules_by_user = yield _get_rules(room_id, user_ids, store)
defer.returnValue(BulkPushRuleEvaluator(
From 3ae915b27e4531031ee325931b3c62bc200ce798 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 11:05:53 +0100
Subject: [PATCH 071/414] Add a slaved store for presence
---
synapse/replication/slave/storage/presence.py | 59 +++++++++++++++++++
synapse/storage/__init__.py | 6 +-
2 files changed, 62 insertions(+), 3 deletions(-)
create mode 100644 synapse/replication/slave/storage/presence.py
diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py
new file mode 100644
index 000000000..703f4a49b
--- /dev/null
+++ b/synapse/replication/slave/storage/presence.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import BaseSlavedStore
+from ._slaved_id_tracker import SlavedIdTracker
+
+from synapse.util.caches.stream_change_cache import StreamChangeCache
+from synapse.storage import DataStore
+
+
+class SlavedPresenceStore(BaseSlavedStore):
+ def __init__(self, db_conn, hs):
+ super(SlavedPresenceStore, self).__init__(db_conn, hs)
+ self._presence_id_gen = SlavedIdTracker(
+ db_conn, "presence_stream", "stream_id",
+ )
+
+ self._presence_on_startup = self._get_active_presence(db_conn)
+
+ self.presence_stream_cache = self.presence_stream_cache = StreamChangeCache(
+ "PresenceStreamChangeCache", self._presence_id_gen.get_current_token()
+ )
+
+ _get_active_presence = DataStore._get_active_presence.__func__
+ take_presence_startup_info = DataStore.take_presence_startup_info.__func__
+ get_presence_for_users = DataStore.get_presence_for_users.__func__
+
+ def get_current_presence_token(self):
+ return self._presence_id_gen.get_current_token()
+
+ def stream_positions(self):
+ result = super(SlavedPresenceStore, self).stream_positions()
+ position = self._presence_id_gen.get_current_token()
+ result["presence"] = position
+ return result
+
+ def process_replication(self, result):
+ stream = result.get("presence")
+ if stream:
+ self._presence_id_gen.advance(int(stream["position"]))
+ for row in stream["rows"]:
+ position, user_id = row[:2]
+ self.presence_stream_cache.entity_has_changed(
+ user_id, position
+ )
+
+ return super(SlavedPresenceStore, self).process_replication(result)
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 8581796b7..6928a213e 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -149,7 +149,7 @@ class DataStore(RoomMemberStore, RoomStore,
"AccountDataAndTagsChangeCache", account_max,
)
- self.__presence_on_startup = self._get_active_presence(db_conn)
+ self._presence_on_startup = self._get_active_presence(db_conn)
presence_cache_prefill, min_presence_val = self._get_cache_dict(
db_conn, "presence_stream",
@@ -190,8 +190,8 @@ class DataStore(RoomMemberStore, RoomStore,
super(DataStore, self).__init__(hs)
def take_presence_startup_info(self):
- active_on_startup = self.__presence_on_startup
- self.__presence_on_startup = None
+ active_on_startup = self._presence_on_startup
+ self._presence_on_startup = None
return active_on_startup
def _get_active_presence(self, db_conn):
From 6a0afa582aa5bf816e082af31ac44e2a8fee28c0 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 14:27:07 +0100
Subject: [PATCH 072/414] Load push rules in storage layer, so that they get
cached
---
synapse/handlers/sync.py | 5 ++--
synapse/push/bulk_push_rule_evaluator.py | 28 -----------------
synapse/push/clientformat.py | 30 ++++++++++++++-----
synapse/rest/client/v1/push_rule.py | 6 ++--
synapse/storage/push_rule.py | 38 +++++++++++++++++++++++-
5 files changed, 63 insertions(+), 44 deletions(-)
diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py
index 5307b62b8..be26a491f 100644
--- a/synapse/handlers/sync.py
+++ b/synapse/handlers/sync.py
@@ -198,9 +198,8 @@ class SyncHandler(object):
@defer.inlineCallbacks
def push_rules_for_user(self, user):
user_id = user.to_string()
- rawrules = yield self.store.get_push_rules_for_user(user_id)
- enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id)
- rules = format_push_rules_for_user(user, rawrules, enabled_map)
+ rules = yield self.store.get_push_rules_for_user(user_id)
+ rules = format_push_rules_for_user(user, rules)
defer.returnValue(rules)
@defer.inlineCallbacks
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index af5212a5d..6e42121b1 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -18,7 +18,6 @@ import ujson as json
from twisted.internet import defer
-from .baserules import list_with_base_rules
from .push_rule_evaluator import PushRuleEvaluatorForEvent
from synapse.api.constants import EventTypes, Membership
@@ -38,36 +37,9 @@ def decode_rule_json(rule):
@defer.inlineCallbacks
def _get_rules(room_id, user_ids, store):
rules_by_user = yield store.bulk_get_push_rules(user_ids)
- rules_enabled_by_user = yield store.bulk_get_push_rules_enabled(user_ids)
rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None}
- rules_by_user = {
- uid: list_with_base_rules([
- decode_rule_json(rule_list)
- for rule_list in rules_by_user.get(uid, [])
- ])
- for uid in user_ids
- }
-
- # We apply the rules-enabled map here: bulk_get_push_rules doesn't
- # fetch disabled rules, but this won't account for any server default
- # rules the user has disabled, so we need to do this too.
- for uid in user_ids:
- user_enabled_map = rules_enabled_by_user.get(uid)
- if not user_enabled_map:
- continue
-
- for i, rule in enumerate(rules_by_user[uid]):
- rule_id = rule['rule_id']
-
- if rule_id in user_enabled_map:
- if rule.get('enabled', True) != bool(user_enabled_map[rule_id]):
- # Rules are cached across users.
- rule = dict(rule)
- rule['enabled'] = bool(user_enabled_map[rule_id])
- rules_by_user[uid][i] = rule
-
defer.returnValue(rules_by_user)
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index ae9db9ec2..b3983f794 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -23,10 +23,7 @@ import copy
import simplejson as json
-def format_push_rules_for_user(user, rawrules, enabled_map):
- """Converts a list of rawrules and a enabled map into nested dictionaries
- to match the Matrix client-server format for push rules"""
-
+def load_rules_for_user(user, rawrules, enabled_map):
ruleslist = []
for rawrule in rawrules:
rule = dict(rawrule)
@@ -35,7 +32,26 @@ def format_push_rules_for_user(user, rawrules, enabled_map):
ruleslist.append(rule)
# We're going to be mutating this a lot, so do a deep copy
- ruleslist = copy.deepcopy(list_with_base_rules(ruleslist))
+ rules = list(list_with_base_rules(ruleslist))
+
+ for i, rule in enumerate(rules):
+ rule_id = rule['rule_id']
+ if rule_id in enabled_map:
+ if rule.get('enabled', True) != bool(enabled_map[rule_id]):
+ # Rules are cached across users.
+ rule = dict(rule)
+ rule['enabled'] = bool(enabled_map[rule_id])
+ rules[i] = rule
+
+ return rules
+
+
+def format_push_rules_for_user(user, ruleslist):
+ """Converts a list of rawrules and a enabled map into nested dictionaries
+ to match the Matrix client-server format for push rules"""
+
+ # We're going to be mutating this a lot, so do a deep copy
+ ruleslist = copy.deepcopy(ruleslist)
rules = {'global': {}, 'device': {}}
@@ -60,9 +76,7 @@ def format_push_rules_for_user(user, rawrules, enabled_map):
template_rule = _rule_to_template(r)
if template_rule:
- if r['rule_id'] in enabled_map:
- template_rule['enabled'] = enabled_map[r['rule_id']]
- elif 'enabled' in r:
+ if 'enabled' in r:
template_rule['enabled'] = r['enabled']
else:
template_rule['enabled'] = True
diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py
index 02d837ee6..6bb4821ec 100644
--- a/synapse/rest/client/v1/push_rule.py
+++ b/synapse/rest/client/v1/push_rule.py
@@ -128,11 +128,9 @@ class PushRuleRestServlet(ClientV1RestServlet):
# we build up the full structure and then decide which bits of it
# to send which means doing unnecessary work sometimes but is
# is probably not going to make a whole lot of difference
- rawrules = yield self.store.get_push_rules_for_user(user_id)
+ rules = yield self.store.get_push_rules_for_user(user_id)
- enabled_map = yield self.store.get_push_rules_enabled_for_user(user_id)
-
- rules = format_push_rules_for_user(requester.user, rawrules, enabled_map)
+ rules = format_push_rules_for_user(requester.user, rules)
path = request.postpath[1:]
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index ebb97c847..786d6f6d6 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -15,6 +15,7 @@
from ._base import SQLBaseStore
from synapse.util.caches.descriptors import cachedInlineCallbacks, cachedList
+from synapse.push.baserules import list_with_base_rules
from twisted.internet import defer
import logging
@@ -23,6 +24,29 @@ import simplejson as json
logger = logging.getLogger(__name__)
+def _load_rules(rawrules, enabled_map):
+ ruleslist = []
+ for rawrule in rawrules:
+ rule = dict(rawrule)
+ rule["conditions"] = json.loads(rawrule["conditions"])
+ rule["actions"] = json.loads(rawrule["actions"])
+ ruleslist.append(rule)
+
+ # We're going to be mutating this a lot, so do a deep copy
+ rules = list(list_with_base_rules(ruleslist))
+
+ for i, rule in enumerate(rules):
+ rule_id = rule['rule_id']
+ if rule_id in enabled_map:
+ if rule.get('enabled', True) != bool(enabled_map[rule_id]):
+ # Rules are cached across users.
+ rule = dict(rule)
+ rule['enabled'] = bool(enabled_map[rule_id])
+ rules[i] = rule
+
+ return rules
+
+
class PushRuleStore(SQLBaseStore):
@cachedInlineCallbacks(lru=True)
def get_push_rules_for_user(self, user_id):
@@ -42,7 +66,11 @@ class PushRuleStore(SQLBaseStore):
key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
)
- defer.returnValue(rows)
+ enabled_map = yield self.get_push_rules_enabled_for_user(user_id)
+
+ rules = _load_rules(rows, enabled_map)
+
+ defer.returnValue(rules)
@cachedInlineCallbacks(lru=True)
def get_push_rules_enabled_for_user(self, user_id):
@@ -85,6 +113,14 @@ class PushRuleStore(SQLBaseStore):
for row in rows:
results.setdefault(row['user_name'], []).append(row)
+
+ enabled_map_by_user = yield self.bulk_get_push_rules_enabled(user_ids)
+
+ for user_id, rules in results.items():
+ results[user_id] = _load_rules(
+ rules, enabled_map_by_user.get(user_id, {})
+ )
+
defer.returnValue(results)
@cachedList(cached_method_name="get_push_rules_enabled_for_user",
From 597013caa5e22c7134b6ca6e398659ba76047b15 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 1 Jun 2016 18:01:22 +0100
Subject: [PATCH 073/414] Make cachedList go a bit faster
---
synapse/metrics/metric.py | 22 +++++++++-------
synapse/util/caches/descriptors.py | 42 +++++++++++++++++++++++-------
2 files changed, 45 insertions(+), 19 deletions(-)
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 368fc2498..6f82b360b 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -15,6 +15,7 @@
from itertools import chain
+from collections import Counter
# TODO(paul): I can't believe Python doesn't have one of these
@@ -55,30 +56,29 @@ class CounterMetric(BaseMetric):
"""The simplest kind of metric; one that stores a monotonically-increasing
integer that counts events."""
+ __slots__ = ("counts")
+
def __init__(self, *args, **kwargs):
super(CounterMetric, self).__init__(*args, **kwargs)
- self.counts = {}
+ self.counts = Counter()
# Scalar metrics are never empty
if self.is_scalar():
self.counts[()] = 0
def inc_by(self, incr, *values):
- if len(values) != self.dimension():
- raise ValueError(
- "Expected as many values to inc() as labels (%d)" % (self.dimension())
- )
+ # if len(values) != self.dimension():
+ # raise ValueError(
+ # "Expected as many values to inc() as labels (%d)" % (self.dimension())
+ # )
# TODO: should assert that the tag values are all strings
- if values not in self.counts:
- self.counts[values] = incr
- else:
- self.counts[values] += incr
+ self.counts[values] += incr
def inc(self, *values):
- self.inc_by(1, *values)
+ self.counts[values] += 1
def render_item(self, k):
return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])]
@@ -132,6 +132,8 @@ class CacheMetric(object):
This metric generates standard metric name pairs, so that monitoring rules
can easily be applied to measure hit ratio."""
+ __slots__ = ("name", "hits", "total", "size")
+
def __init__(self, name, size_callback, labels=[]):
self.name = name
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 758f5982b..4bbb16ed3 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -32,6 +32,7 @@ import os
import functools
import inspect
import threading
+import itertools
logger = logging.getLogger(__name__)
@@ -43,6 +44,14 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
class Cache(object):
+ __slots__ = (
+ "cache",
+ "max_entries",
+ "name",
+ "keylen",
+ "sequence",
+ "thread",
+ )
def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False):
if lru:
@@ -293,16 +302,21 @@ class CacheListDescriptor(object):
# cached is a dict arg -> deferred, where deferred results in a
# 2-tuple (`arg`, `result`)
- cached = {}
+ results = {}
+ cached_defers = {}
missing = []
for arg in list_args:
key = list(keyargs)
key[self.list_pos] = arg
try:
- res = cache.get(tuple(key)).observe()
- res.addCallback(lambda r, arg: (arg, r), arg)
- cached[arg] = res
+ res = cache.get(tuple(key))
+ if not res.called:
+ res = res.observe()
+ res.addCallback(lambda r, arg: (arg, r), arg)
+ cached_defers[arg] = res
+ else:
+ results[arg] = res.result
except KeyError:
missing.append(arg)
@@ -340,12 +354,22 @@ class CacheListDescriptor(object):
res = observer.observe()
res.addCallback(lambda r, arg: (arg, r), arg)
- cached[arg] = res
+ cached_defers[arg] = res
- return preserve_context_over_deferred(defer.gatherResults(
- cached.values(),
- consumeErrors=True,
- ).addErrback(unwrapFirstError).addCallback(lambda res: dict(res)))
+ if cached_defers:
+ return preserve_context_over_deferred(defer.gatherResults(
+ cached_defers.values(),
+ consumeErrors=True,
+ ).addCallback(
+ lambda res: {
+ k: v
+ for k, v in itertools.chain(results.items(), res)
+ }
+ )).addErrback(
+ unwrapFirstError
+ )
+ else:
+ return results
obj.__dict__[self.orig.__name__] = wrapped
From e043ede4a2f18a47b67bf19368600183554824f7 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 2 Jun 2016 11:52:32 +0100
Subject: [PATCH 074/414] Small optimisation to CacheListDescriptor
---
synapse/metrics/metric.py | 22 ++++++++++------------
synapse/util/async.py | 9 +++++++++
synapse/util/caches/descriptors.py | 4 ++--
3 files changed, 21 insertions(+), 14 deletions(-)
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 6f82b360b..368fc2498 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -15,7 +15,6 @@
from itertools import chain
-from collections import Counter
# TODO(paul): I can't believe Python doesn't have one of these
@@ -56,29 +55,30 @@ class CounterMetric(BaseMetric):
"""The simplest kind of metric; one that stores a monotonically-increasing
integer that counts events."""
- __slots__ = ("counts")
-
def __init__(self, *args, **kwargs):
super(CounterMetric, self).__init__(*args, **kwargs)
- self.counts = Counter()
+ self.counts = {}
# Scalar metrics are never empty
if self.is_scalar():
self.counts[()] = 0
def inc_by(self, incr, *values):
- # if len(values) != self.dimension():
- # raise ValueError(
- # "Expected as many values to inc() as labels (%d)" % (self.dimension())
- # )
+ if len(values) != self.dimension():
+ raise ValueError(
+ "Expected as many values to inc() as labels (%d)" % (self.dimension())
+ )
# TODO: should assert that the tag values are all strings
- self.counts[values] += incr
+ if values not in self.counts:
+ self.counts[values] = incr
+ else:
+ self.counts[values] += incr
def inc(self, *values):
- self.counts[values] += 1
+ self.inc_by(1, *values)
def render_item(self, k):
return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])]
@@ -132,8 +132,6 @@ class CacheMetric(object):
This metric generates standard metric name pairs, so that monitoring rules
can easily be applied to measure hit ratio."""
- __slots__ = ("name", "hits", "total", "size")
-
def __init__(self, name, size_callback, labels=[]):
self.name = name
diff --git a/synapse/util/async.py b/synapse/util/async.py
index 0d6f48e2d..40be7fe7e 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async.py
@@ -102,6 +102,15 @@ class ObservableDeferred(object):
def observers(self):
return self._observers
+ def has_called(self):
+ return self._result is not None
+
+ def has_succeeded(self):
+ return self._result is not None and self._result[0] is True
+
+ def get_result(self):
+ return self._result[1]
+
def __getattr__(self, name):
return getattr(self._deferred, name)
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 4bbb16ed3..5be409727 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -311,12 +311,12 @@ class CacheListDescriptor(object):
try:
res = cache.get(tuple(key))
- if not res.called:
+ if not res.has_succeeded():
res = res.observe()
res.addCallback(lambda r, arg: (arg, r), arg)
cached_defers[arg] = res
else:
- results[arg] = res.result
+ results[arg] = res.get_result()
except KeyError:
missing.append(arg)
From 81cf449daa8b310899014f5564f5fdf10289e79c Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 11:19:27 +0100
Subject: [PATCH 075/414] Add methods to events, account data and receipt
slaves
Adds the methods needed by /sync to the slaved events,
account data and receipt stores.
---
.../replication/slave/storage/account_data.py | 41 ++++++++++++++++++-
synapse/replication/slave/storage/events.py | 21 ++++++++--
synapse/replication/slave/storage/receipts.py | 25 ++++++++++-
3 files changed, 81 insertions(+), 6 deletions(-)
diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py
index f59b0eabb..735c03c7e 100644
--- a/synapse/replication/slave/storage/account_data.py
+++ b/synapse/replication/slave/storage/account_data.py
@@ -15,7 +15,10 @@
from ._base import BaseSlavedStore
from ._slaved_id_tracker import SlavedIdTracker
+from synapse.storage import DataStore
from synapse.storage.account_data import AccountDataStore
+from synapse.storage.tags import TagsStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
class SlavedAccountDataStore(BaseSlavedStore):
@@ -25,6 +28,14 @@ class SlavedAccountDataStore(BaseSlavedStore):
self._account_data_id_gen = SlavedIdTracker(
db_conn, "account_data_max_stream_id", "stream_id",
)
+ self._account_data_stream_cache = StreamChangeCache(
+ "AccountDataAndTagsChangeCache",
+ self._account_data_id_gen.get_current_token(),
+ )
+
+ get_account_data_for_user = (
+ AccountDataStore.__dict__["get_account_data_for_user"]
+ )
get_global_account_data_by_type_for_users = (
AccountDataStore.__dict__["get_global_account_data_by_type_for_users"]
@@ -34,6 +45,16 @@ class SlavedAccountDataStore(BaseSlavedStore):
AccountDataStore.__dict__["get_global_account_data_by_type_for_user"]
)
+ get_tags_for_user = TagsStore.__dict__["get_tags_for_user"]
+
+ get_updated_tags = DataStore.get_updated_tags.__func__
+ get_updated_account_data_for_user = (
+ DataStore.get_updated_account_data_for_user.__func__
+ )
+
+ def get_max_account_data_stream_id(self):
+ return self._account_data_id_gen.get_current_token()
+
def stream_positions(self):
result = super(SlavedAccountDataStore, self).stream_positions()
position = self._account_data_id_gen.get_current_token()
@@ -47,15 +68,33 @@ class SlavedAccountDataStore(BaseSlavedStore):
if stream:
self._account_data_id_gen.advance(int(stream["position"]))
for row in stream["rows"]:
- user_id, data_type = row[1:3]
+ position, user_id, data_type = row[:3]
self.get_global_account_data_by_type_for_user.invalidate(
(data_type, user_id,)
)
+ self.get_account_data_for_user.invalidate((user_id,))
+ self._account_data_stream_cache.entity_has_changed(
+ user_id, position
+ )
stream = result.get("room_account_data")
if stream:
self._account_data_id_gen.advance(int(stream["position"]))
+ for row in stream["rows"]:
+ position, user_id = row[:2]
+ self.get_account_data_for_user.invalidate((user_id,))
+ self._account_data_stream_cache.entity_has_changed(
+ user_id, position
+ )
stream = result.get("tag_account_data")
if stream:
self._account_data_id_gen.advance(int(stream["position"]))
+ for row in stream["rows"]:
+ position, user_id = row[:2]
+ self.get_tags_for_user.invalidate((user_id,))
+ self._account_data_stream_cache.entity_has_changed(
+ user_id, position
+ )
+
+ return super(SlavedAccountDataStore, self).process_replication(result)
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index c0d741452..cbc1ae419 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -23,6 +23,7 @@ from synapse.storage.roommember import RoomMemberStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.event_push_actions import EventPushActionsStore
from synapse.storage.state import StateStore
+from synapse.storage.stream import StreamStore
from synapse.util.caches.stream_change_cache import StreamChangeCache
import ujson as json
@@ -57,6 +58,9 @@ class SlavedEventStore(BaseSlavedStore):
"EventsRoomStreamChangeCache", min_event_val,
prefilled_cache=event_cache_prefill,
)
+ self._membership_stream_cache = StreamChangeCache(
+ "MembershipStreamChangeCache", events_max,
+ )
# Cached functions can't be accessed through a class instance so we need
# to reach inside the __dict__ to extract them.
@@ -87,6 +91,9 @@ class SlavedEventStore(BaseSlavedStore):
_get_state_group_from_group = (
StateStore.__dict__["_get_state_group_from_group"]
)
+ get_recent_event_ids_for_room = (
+ StreamStore.__dict__["get_recent_event_ids_for_room"]
+ )
get_unread_push_actions_for_user_in_range = (
DataStore.get_unread_push_actions_for_user_in_range.__func__
@@ -109,10 +116,16 @@ class SlavedEventStore(BaseSlavedStore):
DataStore.get_room_events_stream_for_room.__func__
)
get_events_around = DataStore.get_events_around.__func__
+ get_state_for_event = DataStore.get_state_for_event.__func__
get_state_for_events = DataStore.get_state_for_events.__func__
get_state_groups = DataStore.get_state_groups.__func__
+ get_recent_events_for_room = DataStore.get_recent_events_for_room.__func__
+ get_room_events_stream_for_rooms = (
+ DataStore.get_room_events_stream_for_rooms.__func__
+ )
+ get_stream_token_for_event = DataStore.get_stream_token_for_event.__func__
- _set_before_and_after = DataStore._set_before_and_after
+ _set_before_and_after = staticmethod(DataStore._set_before_and_after)
_get_events = DataStore._get_events.__func__
_get_events_from_cache = DataStore._get_events_from_cache.__func__
@@ -220,9 +233,9 @@ class SlavedEventStore(BaseSlavedStore):
self.get_rooms_for_user.invalidate((event.state_key,))
# self.get_joined_hosts_for_room.invalidate((event.room_id,))
self.get_users_in_room.invalidate((event.room_id,))
- # self._membership_stream_cache.entity_has_changed(
- # event.state_key, event.internal_metadata.stream_ordering
- # )
+ self._membership_stream_cache.entity_has_changed(
+ event.state_key, event.internal_metadata.stream_ordering
+ )
self.get_invited_rooms_for_user.invalidate((event.state_key,))
if not event.is_state():
diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py
index ec007516d..ac9662d39 100644
--- a/synapse/replication/slave/storage/receipts.py
+++ b/synapse/replication/slave/storage/receipts.py
@@ -18,6 +18,7 @@ from ._slaved_id_tracker import SlavedIdTracker
from synapse.storage import DataStore
from synapse.storage.receipts import ReceiptsStore
+from synapse.util.caches.stream_change_cache import StreamChangeCache
# So, um, we want to borrow a load of functions intended for reading from
# a DataStore, but we don't want to take functions that either write to the
@@ -37,11 +38,28 @@ class SlavedReceiptsStore(BaseSlavedStore):
db_conn, "receipts_linearized", "stream_id"
)
+ self._receipts_stream_cache = StreamChangeCache(
+ "ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
+ )
+
get_receipts_for_user = ReceiptsStore.__dict__["get_receipts_for_user"]
+ get_linearized_receipts_for_room = (
+ ReceiptsStore.__dict__["get_linearized_receipts_for_room"]
+ )
+ _get_linearized_receipts_for_rooms = (
+ ReceiptsStore.__dict__["_get_linearized_receipts_for_rooms"]
+ )
+ get_last_receipt_event_id_for_user = (
+ ReceiptsStore.__dict__["get_last_receipt_event_id_for_user"]
+ )
get_max_receipt_stream_id = DataStore.get_max_receipt_stream_id.__func__
get_all_updated_receipts = DataStore.get_all_updated_receipts.__func__
+ get_linearized_receipts_for_rooms = (
+ DataStore.get_linearized_receipts_for_rooms.__func__
+ )
+
def stream_positions(self):
result = super(SlavedReceiptsStore, self).stream_positions()
result["receipts"] = self._receipts_id_gen.get_current_token()
@@ -52,10 +70,15 @@ class SlavedReceiptsStore(BaseSlavedStore):
if stream:
self._receipts_id_gen.advance(int(stream["position"]))
for row in stream["rows"]:
- room_id, receipt_type, user_id = row[1:4]
+ position, room_id, receipt_type, user_id = row[:4]
self.invalidate_caches_for_receipt(room_id, receipt_type, user_id)
+ self._receipts_stream_cache.entity_has_changed(room_id, position)
return super(SlavedReceiptsStore, self).process_replication(result)
def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
self.get_receipts_for_user.invalidate((user_id, receipt_type))
+ self.get_linearized_receipts_for_room.invalidate_many((room_id,))
+ self.get_last_receipt_event_id_for_user.invalidate(
+ (user_id, room_id, receipt_type)
+ )
From ccb56fc24bd7d2675dc21796b29333538ca0d5fa Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 2 Jun 2016 12:47:06 +0100
Subject: [PATCH 076/414] Make get_joined_hosts_for_room use get_users_in_room
---
synapse/storage/roommember.py | 19 +++----------------
1 file changed, 3 insertions(+), 16 deletions(-)
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 41b395e07..64b4bd371 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -238,23 +238,10 @@ class RoomMemberStore(SQLBaseStore):
return results
- @cached(max_entries=5000)
+ @cachedInlineCallbacks(max_entries=5000)
def get_joined_hosts_for_room(self, room_id):
- return self.runInteraction(
- "get_joined_hosts_for_room",
- self._get_joined_hosts_for_room_txn,
- room_id,
- )
-
- def _get_joined_hosts_for_room_txn(self, txn, room_id):
- rows = self._get_members_rows_txn(
- txn,
- room_id, membership=Membership.JOIN
- )
-
- joined_domains = set(get_domain_from_id(r["user_id"]) for r in rows)
-
- return joined_domains
+ user_ids = yield self.get_users_in_room(room_id)
+ defer.returnValue(set(get_domain_from_id(uid) for uid in user_ids))
def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None):
rows = self._get_members_rows_txn(
From 4c04222fa55d35ad3a75c5538ec477046b6c5b30 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 2 Jun 2016 13:02:33 +0100
Subject: [PATCH 077/414] Poke notifier on next reactor tick
---
synapse/handlers/message.py | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index c41dafdef..15caf1950 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -26,9 +26,9 @@ from synapse.types import (
UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id
)
from synapse.util import unwrapFirstError
-from synapse.util.async import concurrently_execute
+from synapse.util.async import concurrently_execute, run_on_reactor
from synapse.util.caches.snapshot_cache import SnapshotCache
-from synapse.util.logcontext import PreserveLoggingContext, preserve_fn
+from synapse.util.logcontext import preserve_fn
from synapse.visibility import filter_events_for_client
from ._base import BaseHandler
@@ -908,13 +908,16 @@ class MessageHandler(BaseHandler):
"Failed to get destination from event %s", s.event_id
)
- with PreserveLoggingContext():
- # Don't block waiting on waking up all the listeners.
+ @defer.inlineCallbacks
+ def _notify():
+ yield run_on_reactor()
self.notifier.on_new_room_event(
event, event_stream_id, max_stream_id,
extra_users=extra_users
)
+ preserve_fn(_notify)()
+
# If invite, remove room_state from unsigned before sending.
event.unsigned.pop("invite_room_state", None)
From 73c711243382a48b9b67fddf5ed9df2d1ee1be43 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 2 Jun 2016 11:29:44 +0100
Subject: [PATCH 078/414] Change CacheMetrics to be quicker
We change it so that each cache has an individual CacheMetric, instead
of having one global CacheMetric. This means that when a cache tries to
increment a counter it does not need to go through so many indirections.
---
synapse/metrics/__init__.py | 16 +++-----
synapse/metrics/metric.py | 44 +++++++++++-----------
synapse/util/caches/__init__.py | 20 +++++++---
synapse/util/caches/descriptors.py | 17 +++++++--
synapse/util/caches/dictionary_cache.py | 8 ++--
synapse/util/caches/expiringcache.py | 8 ++--
synapse/util/caches/stream_change_cache.py | 16 ++++----
tests/metrics/test_metric.py | 23 +++++------
8 files changed, 82 insertions(+), 70 deletions(-)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 5664d5a38..c38f24485 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -33,11 +33,7 @@ from .metric import (
logger = logging.getLogger(__name__)
-# We'll keep all the available metrics in a single toplevel dict, one shared
-# for the entire process. We don't currently support per-HomeServer instances
-# of metrics, because in practice any one python VM will host only one
-# HomeServer anyway. This makes a lot of implementation neater
-all_metrics = {}
+all_metrics = []
class Metrics(object):
@@ -53,7 +49,7 @@ class Metrics(object):
metric = metric_class(full_name, *args, **kwargs)
- all_metrics[full_name] = metric
+ all_metrics.append(metric)
return metric
def register_counter(self, *args, **kwargs):
@@ -84,12 +80,12 @@ def render_all():
# TODO(paul): Internal hack
update_resource_metrics()
- for name in sorted(all_metrics.keys()):
+ for metric in all_metrics:
try:
- strs += all_metrics[name].render()
+ strs += metric.render()
except Exception:
- strs += ["# FAILED to render %s" % name]
- logger.exception("Failed to render %s metric", name)
+ strs += ["# FAILED to render"]
+ logger.exception("Failed to render metric")
strs.append("") # to generate a final CRLF
diff --git a/synapse/metrics/metric.py b/synapse/metrics/metric.py
index 368fc2498..341043952 100644
--- a/synapse/metrics/metric.py
+++ b/synapse/metrics/metric.py
@@ -47,9 +47,6 @@ class BaseMetric(object):
for k, v in zip(self.labels, values)])
)
- def render(self):
- return map_concat(self.render_item, sorted(self.counts.keys()))
-
class CounterMetric(BaseMetric):
"""The simplest kind of metric; one that stores a monotonically-increasing
@@ -83,6 +80,9 @@ class CounterMetric(BaseMetric):
def render_item(self, k):
return ["%s%s %d" % (self.name, self._render_key(k), self.counts[k])]
+ def render(self):
+ return map_concat(self.render_item, sorted(self.counts.keys()))
+
class CallbackMetric(BaseMetric):
"""A metric that returns the numeric value returned by a callback whenever
@@ -126,30 +126,30 @@ class DistributionMetric(object):
class CacheMetric(object):
- """A combination of two CounterMetrics, one to count cache hits and one to
- count a total, and a callback metric to yield the current size.
+ __slots__ = ("name", "cache_name", "hits", "misses", "size_callback")
- This metric generates standard metric name pairs, so that monitoring rules
- can easily be applied to measure hit ratio."""
-
- def __init__(self, name, size_callback, labels=[]):
+ def __init__(self, name, size_callback, cache_name):
self.name = name
+ self.cache_name = cache_name
- self.hits = CounterMetric(name + ":hits", labels=labels)
- self.total = CounterMetric(name + ":total", labels=labels)
+ self.hits = 0
+ self.misses = 0
- self.size = CallbackMetric(
- name + ":size",
- callback=size_callback,
- labels=labels,
- )
+ self.size_callback = size_callback
- def inc_hits(self, *values):
- self.hits.inc(*values)
- self.total.inc(*values)
+ def inc_hits(self):
+ self.hits += 1
- def inc_misses(self, *values):
- self.total.inc(*values)
+ def inc_misses(self):
+ self.misses += 1
def render(self):
- return self.hits.render() + self.total.render() + self.size.render()
+ size = self.size_callback()
+ hits = self.hits
+ total = self.misses + self.hits
+
+ return [
+ """%s:hits{name="%s"} %d""" % (self.name, self.cache_name, hits),
+ """%s:total{name="%s"} %d""" % (self.name, self.cache_name, total),
+ """%s:size{name="%s"} %d""" % (self.name, self.cache_name, size),
+ ]
diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py
index d53569ca4..ebd715c5d 100644
--- a/synapse/util/caches/__init__.py
+++ b/synapse/util/caches/__init__.py
@@ -24,11 +24,21 @@ DEBUG_CACHES = False
metrics = synapse.metrics.get_metrics_for("synapse.util.caches")
caches_by_name = {}
-cache_counter = metrics.register_cache(
- "cache",
- lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
- labels=["name"],
-)
+# cache_counter = metrics.register_cache(
+# "cache",
+# lambda: {(name,): len(caches_by_name[name]) for name in caches_by_name.keys()},
+# labels=["name"],
+# )
+
+
+def register_cache(name, cache):
+ caches_by_name[name] = cache
+ return metrics.register_cache(
+ "cache",
+ lambda: len(cache),
+ name,
+ )
+
_string_cache = LruCache(int(5000 * CACHE_SIZE_FACTOR))
caches_by_name["string_cache"] = _string_cache
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 758f5982b..5d25c9e76 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -22,7 +22,7 @@ from synapse.util.logcontext import (
PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn
)
-from . import caches_by_name, DEBUG_CACHES, cache_counter
+from . import DEBUG_CACHES, register_cache
from twisted.internet import defer
@@ -43,6 +43,15 @@ CACHE_SIZE_FACTOR = float(os.environ.get("SYNAPSE_CACHE_FACTOR", 0.1))
class Cache(object):
+ __slots__ = (
+ "cache",
+ "max_entries",
+ "name",
+ "keylen",
+ "sequence",
+ "thread",
+ "metrics",
+ )
def __init__(self, name, max_entries=1000, keylen=1, lru=True, tree=False):
if lru:
@@ -59,7 +68,7 @@ class Cache(object):
self.keylen = keylen
self.sequence = 0
self.thread = None
- caches_by_name[name] = self.cache
+ self.metrics = register_cache(name, self.cache)
def check_thread(self):
expected_thread = self.thread
@@ -74,10 +83,10 @@ class Cache(object):
def get(self, key, default=_CacheSentinel):
val = self.cache.get(key, _CacheSentinel)
if val is not _CacheSentinel:
- cache_counter.inc_hits(self.name)
+ self.metrics.inc_hits()
return val
- cache_counter.inc_misses(self.name)
+ self.metrics.inc_misses()
if default is _CacheSentinel:
raise KeyError()
diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py
index f92d80542..b0ca1bb79 100644
--- a/synapse/util/caches/dictionary_cache.py
+++ b/synapse/util/caches/dictionary_cache.py
@@ -15,7 +15,7 @@
from synapse.util.caches.lrucache import LruCache
from collections import namedtuple
-from . import caches_by_name, cache_counter
+from . import register_cache
import threading
import logging
@@ -43,7 +43,7 @@ class DictionaryCache(object):
__slots__ = []
self.sentinel = Sentinel()
- caches_by_name[name] = self.cache
+ self.metrics = register_cache(name, self.cache)
def check_thread(self):
expected_thread = self.thread
@@ -58,7 +58,7 @@ class DictionaryCache(object):
def get(self, key, dict_keys=None):
entry = self.cache.get(key, self.sentinel)
if entry is not self.sentinel:
- cache_counter.inc_hits(self.name)
+ self.metrics.inc_hits()
if dict_keys is None:
return DictionaryEntry(entry.full, dict(entry.value))
@@ -69,7 +69,7 @@ class DictionaryCache(object):
if k in entry.value
})
- cache_counter.inc_misses(self.name)
+ self.metrics.inc_misses()
return DictionaryEntry(False, {})
def invalidate(self, key):
diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py
index 2b68c1ac9..080388958 100644
--- a/synapse/util/caches/expiringcache.py
+++ b/synapse/util/caches/expiringcache.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.util.caches import cache_counter, caches_by_name
+from synapse.util.caches import register_cache
import logging
@@ -49,7 +49,7 @@ class ExpiringCache(object):
self._cache = {}
- caches_by_name[cache_name] = self._cache
+ self.metrics = register_cache(cache_name, self._cache)
def start(self):
if not self._expiry_ms:
@@ -78,9 +78,9 @@ class ExpiringCache(object):
def __getitem__(self, key):
try:
entry = self._cache[key]
- cache_counter.inc_hits(self._cache_name)
+ self.metrics.inc_hits()
except KeyError:
- cache_counter.inc_misses(self._cache_name)
+ self.metrics.inc_misses()
raise
if self._reset_expiry_on_get:
diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py
index ea8a74ca6..3c051dabc 100644
--- a/synapse/util/caches/stream_change_cache.py
+++ b/synapse/util/caches/stream_change_cache.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.util.caches import cache_counter, caches_by_name
+from synapse.util.caches import register_cache
from blist import sorteddict
@@ -42,7 +42,7 @@ class StreamChangeCache(object):
self._cache = sorteddict()
self._earliest_known_stream_pos = current_stream_pos
self.name = name
- caches_by_name[self.name] = self._cache
+ self.metrics = register_cache(self.name, self._cache)
for entity, stream_pos in prefilled_cache.items():
self.entity_has_changed(entity, stream_pos)
@@ -53,19 +53,19 @@ class StreamChangeCache(object):
assert type(stream_pos) is int
if stream_pos < self._earliest_known_stream_pos:
- cache_counter.inc_misses(self.name)
+ self.metrics.inc_misses()
return True
latest_entity_change_pos = self._entity_to_key.get(entity, None)
if latest_entity_change_pos is None:
- cache_counter.inc_hits(self.name)
+ self.metrics.inc_hits()
return False
if stream_pos < latest_entity_change_pos:
- cache_counter.inc_misses(self.name)
+ self.metrics.inc_misses()
return True
- cache_counter.inc_hits(self.name)
+ self.metrics.inc_hits()
return False
def get_entities_changed(self, entities, stream_pos):
@@ -82,10 +82,10 @@ class StreamChangeCache(object):
self._cache[k] for k in keys[i:]
).intersection(entities)
- cache_counter.inc_hits(self.name)
+ self.metrics.inc_hits()
else:
result = entities
- cache_counter.inc_misses(self.name)
+ self.metrics.inc_misses()
return result
diff --git a/tests/metrics/test_metric.py b/tests/metrics/test_metric.py
index f3c1927ce..f85455a5a 100644
--- a/tests/metrics/test_metric.py
+++ b/tests/metrics/test_metric.py
@@ -61,9 +61,6 @@ class CounterMetricTestCase(unittest.TestCase):
'vector{method="PUT"} 1',
])
- # Check that passing too few values errors
- self.assertRaises(ValueError, counter.inc)
-
class CallbackMetricTestCase(unittest.TestCase):
@@ -138,27 +135,27 @@ class CacheMetricTestCase(unittest.TestCase):
def test_cache(self):
d = dict()
- metric = CacheMetric("cache", lambda: len(d))
+ metric = CacheMetric("cache", lambda: len(d), "cache_name")
self.assertEquals(metric.render(), [
- 'cache:hits 0',
- 'cache:total 0',
- 'cache:size 0',
+ 'cache:hits{name="cache_name"} 0',
+ 'cache:total{name="cache_name"} 0',
+ 'cache:size{name="cache_name"} 0',
])
metric.inc_misses()
d["key"] = "value"
self.assertEquals(metric.render(), [
- 'cache:hits 0',
- 'cache:total 1',
- 'cache:size 1',
+ 'cache:hits{name="cache_name"} 0',
+ 'cache:total{name="cache_name"} 1',
+ 'cache:size{name="cache_name"} 1',
])
metric.inc_hits()
self.assertEquals(metric.render(), [
- 'cache:hits 1',
- 'cache:total 2',
- 'cache:size 1',
+ 'cache:hits{name="cache_name"} 1',
+ 'cache:total{name="cache_name"} 2',
+ 'cache:size{name="cache_name"} 1',
])
From 58a224a6515dceacebc729f1e6fbb87a22f3a35a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 11:47:07 +0100
Subject: [PATCH 079/414] Pull out update_results_dict
---
synapse/util/caches/descriptors.py | 15 +++++++--------
1 file changed, 7 insertions(+), 8 deletions(-)
diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py
index 5be409727..799fd2a9c 100644
--- a/synapse/util/caches/descriptors.py
+++ b/synapse/util/caches/descriptors.py
@@ -32,7 +32,7 @@ import os
import functools
import inspect
import threading
-import itertools
+
logger = logging.getLogger(__name__)
@@ -357,17 +357,16 @@ class CacheListDescriptor(object):
cached_defers[arg] = res
if cached_defers:
+ def update_results_dict(res):
+ results.update(res)
+ return results
+
return preserve_context_over_deferred(defer.gatherResults(
cached_defers.values(),
consumeErrors=True,
- ).addCallback(
- lambda res: {
- k: v
- for k, v in itertools.chain(results.items(), res)
- }
- )).addErrback(
+ ).addCallback(update_results_dict).addErrback(
unwrapFirstError
- )
+ ))
else:
return results
From abb151f3c9bf78f2825dba18da6bbc88ce61d32c Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 11:57:26 +0100
Subject: [PATCH 080/414] Add a separate process that can handle /sync requests
---
synapse/app/synchrotron.py | 467 +++++++++++++++++++++++++++++++++++++
1 file changed, 467 insertions(+)
create mode 100644 synapse/app/synchrotron.py
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
new file mode 100644
index 000000000..f592ad352
--- /dev/null
+++ b/synapse/app/synchrotron.py
@@ -0,0 +1,467 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import synapse
+
+from synapse.api.constants import EventTypes
+from synapse.config._base import ConfigError
+from synapse.config.database import DatabaseConfig
+from synapse.config.logger import LoggingConfig
+from synapse.config.appservice import AppServiceConfig
+from synapse.events import FrozenEvent
+from synapse.handlers.presence import PresenceHandler
+from synapse.http.site import SynapseSite
+from synapse.http.server import JsonResource
+from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
+from synapse.rest.client.v2_alpha import sync
+from synapse.replication.slave.storage.events import SlavedEventStore
+from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
+from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
+from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
+from synapse.replication.slave.storage.registration import SlavedRegistrationStore
+from synapse.replication.slave.storage.filtering import SlavedFilteringStore
+from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
+from synapse.replication.slave.storage.presence import SlavedPresenceStore
+from synapse.server import HomeServer
+from synapse.storage.engines import create_engine
+from synapse.storage.presence import UserPresenceState
+from synapse.storage.roommember import RoomMemberStore
+from synapse.util.async import sleep
+from synapse.util.httpresourcetree import create_resource_tree
+from synapse.util.logcontext import LoggingContext
+from synapse.util.manhole import manhole
+from synapse.util.rlimit import change_resource_limit
+from synapse.util.stringutils import random_string
+from synapse.util.versionstring import get_version_string
+
+from twisted.internet import reactor, defer
+from twisted.web.resource import Resource
+
+from daemonize import Daemonize
+
+import sys
+import logging
+import contextlib
+import ujson as json
+
+logger = logging.getLogger("synapse.app.synchrotron")
+
+
+class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig):
+ def read_config(self, config):
+ self.replication_url = config["replication_url"]
+ self.server_name = config["server_name"]
+ self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
+ "use_insecure_ssl_client_just_for_testing_do_not_use", False
+ )
+ self.user_agent_suffix = None
+ self.listeners = config["listeners"]
+ self.soft_file_limit = config.get("soft_file_limit")
+ self.daemonize = config.get("daemonize")
+ self.pid_file = self.abspath(config.get("pid_file"))
+ self.macaroon_secret_key = config["macaroon_secret_key"]
+ self.expire_access_token = config.get("expire_access_token", False)
+
+ def default_config(self, server_name, **kwargs):
+ pid_file = self.abspath("synchroton.pid")
+ return """\
+ # Slave configuration
+
+ # The replication listener on the synapse to talk to.
+ #replication_url: https://localhost:{replication_port}/_synapse/replication
+
+ server_name: "%(server_name)s"
+
+ listeners:
+ # Enable a /sync listener on the synchrontron
+ #- type: http
+ # port: {http_port}
+ # bind_address: ""
+ # Enable a ssh manhole listener on the synchrotron
+ # - type: manhole
+ # port: {manhole_port}
+ # bind_address: 127.0.0.1
+ # Enable a metric listener on the synchrotron
+ # - type: http
+ # port: {metrics_port}
+ # bind_address: 127.0.0.1
+ # resources:
+ # - names: ["metrics"]
+ # compress: False
+
+ report_stats: False
+
+ daemonize: False
+
+ pid_file: %(pid_file)s
+ """ % locals()
+
+
+class SynchrotronSlavedStore(
+ SlavedPushRuleStore,
+ SlavedEventStore,
+ SlavedReceiptsStore,
+ SlavedAccountDataStore,
+ SlavedApplicationServiceStore,
+ SlavedRegistrationStore,
+ SlavedFilteringStore,
+ SlavedPresenceStore,
+):
+ def get_presence_list_accepted(self, user_localpart):
+ return ()
+
+ def insert_client_ip(self, user, access_token, ip, user_agent):
+ pass
+
+ # XXX: This is a bit broken because we don't persist forgotten rooms
+ # in a way that they can be streamed. This means that we don't have a
+ # way to invalidate the forgotten rooms cache correctly.
+ # For now we expire the cache every 10 minutes.
+ BROKEN_CACHE_EXPIRY_MS = 60 * 60 * 1000
+ who_forgot_in_room = (
+ RoomMemberStore.__dict__["who_forgot_in_room"]
+ )
+
+
+class SynchrotronPresence(object):
+ def __init__(self, hs):
+ self.http_client = hs.get_simple_http_client()
+ self.store = hs.get_datastore()
+ self.user_to_num_current_syncs = {}
+ self.syncing_users_url = hs.config.replication_url + "/syncing_users"
+ self.clock = hs.get_clock()
+
+ active_presence = self.store.take_presence_startup_info()
+ self.user_to_current_state = {
+ state.user_id: state
+ for state in active_presence
+ }
+
+ self.process_id = random_string(16)
+ logger.info("Presence process_id is %r", self.process_id)
+
+ def set_state(self, user, state):
+ # TODO Hows this supposed to work?
+ pass
+
+ get_states = PresenceHandler.get_states.__func__
+ current_state_for_users = PresenceHandler.current_state_for_users.__func__
+
+ @defer.inlineCallbacks
+ def user_syncing(self, user_id, affect_presence):
+ if affect_presence:
+ curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
+ self.user_to_num_current_syncs[user_id] = curr_sync + 1
+ # TODO: Send this less frequently.
+ # TODO: Make sure this doesn't race. Currently we can lose updates
+ # if two users come online in quick sucession and the second http
+ # to the master completes before the first.
+ # TODO: Don't block the sync request on this HTTP hit.
+ yield self._send_syncing_users()
+
+ def _end():
+ if affect_presence:
+ self.user_to_num_current_syncs[user_id] -= 1
+
+ @contextlib.contextmanager
+ def _user_syncing():
+ try:
+ yield
+ finally:
+ _end()
+
+ defer.returnValue(_user_syncing())
+
+ def _send_syncing_users(self):
+ return self.http_client.post_json_get_json(self.syncing_users_url, {
+ "process_id": self.process_id,
+ "syncing_users": [
+ user_id for user_id, count in self.user_to_num_current_syncs.items()
+ if count > 0
+ ],
+ })
+
+ def process_replication(self, result):
+ stream = result.get("presence", {"rows": []})
+ for row in stream["rows"]:
+ (
+ position, user_id, state, last_active_ts,
+ last_federation_update_ts, last_user_sync_ts, status_msg,
+ currently_active
+ ) = row
+ self.user_to_current_state[user_id] = UserPresenceState(
+ user_id, state, last_active_ts,
+ last_federation_update_ts, last_user_sync_ts, status_msg,
+ currently_active
+ )
+
+
+class SynchrotronTyping(object):
+ def __init__(self, hs):
+ self._latest_room_serial = 0
+ self._room_serials = {}
+ self._room_typing = {}
+
+ def stream_positions(self):
+ return {"typing": self._latest_room_serial}
+
+ def process_replication(self, result):
+ stream = result.get("typing")
+ if stream:
+ self._latest_room_serial = int(stream["position"])
+
+ for row in stream["rows"]:
+ position, room_id, typing_json = row
+ typing = json.loads(typing_json)
+ self._room_serials[room_id] = position
+ self._room_typing[room_id] = typing
+
+
+class SynchrotronApplicationService(object):
+ def notify_interested_services(self, event):
+ pass
+
+
+class SynchrotronServer(HomeServer):
+ def get_db_conn(self, run_new_connection=True):
+ # Any param beginning with cp_ is a parameter for adbapi, and should
+ # not be passed to the database engine.
+ db_params = {
+ k: v for k, v in self.db_config.get("args", {}).items()
+ if not k.startswith("cp_")
+ }
+ db_conn = self.database_engine.module.connect(**db_params)
+
+ if run_new_connection:
+ self.database_engine.on_new_connection(db_conn)
+ return db_conn
+
+ def setup(self):
+ logger.info("Setting up.")
+ self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
+ logger.info("Finished setting up.")
+
+ def _listen_http(self, listener_config):
+ port = listener_config["port"]
+ bind_address = listener_config.get("bind_address", "")
+ site_tag = listener_config.get("tag", port)
+ resources = {}
+ for res in listener_config["resources"]:
+ for name in res["names"]:
+ if name == "metrics":
+ resources[METRICS_PREFIX] = MetricsResource(self)
+ elif name == "client":
+ resource = JsonResource(self, canonical_json=False)
+ sync.register_servlets(self, resource)
+ resources.update({
+ "/_matrix/client/r0": resource,
+ "/_matrix/client/unstable": resource,
+ "/_matrix/client/v2_alpha": resource,
+ })
+
+ root_resource = create_resource_tree(resources, Resource())
+ reactor.listenTCP(
+ port,
+ SynapseSite(
+ "synapse.access.http.%s" % (site_tag,),
+ site_tag,
+ listener_config,
+ root_resource,
+ ),
+ interface=bind_address
+ )
+ logger.info("Synapse synchrotron now listening on port %d", port)
+
+ def start_listening(self):
+ for listener in self.config.listeners:
+ if listener["type"] == "http":
+ self._listen_http(listener)
+ elif listener["type"] == "manhole":
+ reactor.listenTCP(
+ listener["port"],
+ manhole(
+ username="matrix",
+ password="rabbithole",
+ globals={"hs": self},
+ ),
+ interface=listener.get("bind_address", '127.0.0.1')
+ )
+ else:
+ logger.warn("Unrecognized listener type: %s", listener["type"])
+
+ @defer.inlineCallbacks
+ def replicate(self):
+ http_client = self.get_simple_http_client()
+ store = self.get_datastore()
+ replication_url = self.config.replication_url
+ clock = self.get_clock()
+ notifier = self.get_notifier()
+ presence_handler = self.get_presence_handler()
+ typing_handler = self.get_typing_handler()
+
+ def expire_broken_caches():
+ store.who_forgot_in_room.invalidate_all()
+
+ def notify_from_stream(
+ result, stream_name, stream_key, room=None, user=None
+ ):
+ stream = result.get(stream_name)
+ if stream:
+ position_index = stream["field_names"].index("position")
+ if room:
+ room_index = stream["field_names"].index(room)
+ if user:
+ user_index = stream["field_names"].index(user)
+
+ users = ()
+ rooms = ()
+ for row in stream["rows"]:
+ position = row[position_index]
+
+ if user:
+ users = (row[user_index],)
+
+ if room:
+ rooms = (row[room_index],)
+
+ notifier.on_new_event(
+ stream_key, position, users=users, rooms=rooms
+ )
+
+ def notify(result):
+ stream = result.get("events")
+ if stream:
+ max_position = stream["position"]
+ for row in stream["rows"]:
+ position = row[0]
+ internal = json.loads(row[1])
+ event_json = json.loads(row[2])
+ event = FrozenEvent(event_json, internal_metadata_dict=internal)
+ extra_users = ()
+ if event.type == EventTypes.Member:
+ extra_users = (event.state_key,)
+ notifier.on_new_room_event(
+ event, position, max_position, extra_users
+ )
+
+ notify_from_stream(
+ result, "push_rules", "push_rules_key", user="user_id"
+ )
+ notify_from_stream(
+ result, "user_account_data", "account_data_key", user="user_id"
+ )
+ notify_from_stream(
+ result, "room_account_data", "account_data_key", user="user_id"
+ )
+ notify_from_stream(
+ result, "tag_account_data", "account_data_key", user="user_id"
+ )
+ notify_from_stream(
+ result, "receipts", "receipt_key", room="room_id"
+ )
+ notify_from_stream(
+ result, "typing", "typing_key", room="room_id"
+ )
+
+ next_expire_broken_caches_ms = 0
+ while True:
+ try:
+ args = store.stream_positions()
+ args.update(typing_handler.stream_positions())
+ args["timeout"] = 30000
+ result = yield http_client.get_json(replication_url, args=args)
+ now_ms = clock.time_msec()
+ if now_ms > next_expire_broken_caches_ms:
+ expire_broken_caches()
+ next_expire_broken_caches_ms = (
+ now_ms + store.BROKEN_CACHE_EXPIRY_MS
+ )
+ yield store.process_replication(result)
+ typing_handler.process_replication(result)
+ presence_handler.process_replication(result)
+ notify(result)
+ except:
+ logger.exception("Error replicating from %r", replication_url)
+ sleep(5)
+
+ def build_presence_handler(self):
+ return SynchrotronPresence(self)
+
+ def build_typing_handler(self):
+ return SynchrotronTyping(self)
+
+
+def setup(config_options):
+ try:
+ config = SynchrotronConfig.load_config(
+ "Synapse synchrotron", config_options
+ )
+ except ConfigError as e:
+ sys.stderr.write("\n" + e.message + "\n")
+ sys.exit(1)
+
+ if not config:
+ sys.exit(0)
+
+ config.setup_logging()
+
+ database_engine = create_engine(config.database_config)
+
+ ss = SynchrotronServer(
+ config.server_name,
+ db_config=config.database_config,
+ config=config,
+ version_string=get_version_string("Synapse", synapse),
+ database_engine=database_engine,
+ application_service_handler=SynchrotronApplicationService(),
+ )
+
+ ss.setup()
+ ss.start_listening()
+
+ change_resource_limit(ss.config.soft_file_limit)
+
+ def start():
+ ss.get_datastore().start_profiling()
+ ss.replicate()
+
+ reactor.callWhenRunning(start)
+
+ return ss
+
+
+if __name__ == '__main__':
+ with LoggingContext("main"):
+ ps = setup(sys.argv[1:])
+
+ if ps.config.daemonize:
+ def run():
+ with LoggingContext("run"):
+ change_resource_limit(ps.config.soft_file_limit)
+ reactor.run()
+
+ daemon = Daemonize(
+ app="synapse-pusher",
+ pid=ps.config.pid_file,
+ action=run,
+ auto_close_fds=False,
+ verbose=True,
+ logger=logger,
+ )
+
+ daemon.start()
+ else:
+ reactor.run()
From a7ff5a17702812ae586228396d534a8ed3d88475 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 13:40:55 +0100
Subject: [PATCH 081/414] Presence metrics. Change def of small delta
---
synapse/handlers/presence.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index fc8538b41..eb877763e 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -50,6 +50,9 @@ timers_fired_counter = metrics.register_counter("timers_fired")
federation_presence_counter = metrics.register_counter("federation_presence")
bump_active_time_counter = metrics.register_counter("bump_active_time")
+full_update_presence_counter = metrics.register_counter("full_update_presence")
+partial_update_presence_counter = metrics.register_counter("partial_update_presence")
+
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
# "currently_active"
@@ -974,13 +977,13 @@ class PresenceEventSource(object):
user_ids_changed = set()
changed = None
- if from_key and max_token - from_key < 100:
- # For small deltas, its quicker to get all changes and then
- # work out if we share a room or they're in our presence list
+ if from_key:
changed = stream_change_cache.get_all_entities_changed(from_key)
- # get_all_entities_changed can return None
- if changed is not None:
+ if changed is not None and len(changed) < 100:
+ # For small deltas, its quicker to get all changes and then
+ # work out if we share a room or they're in our presence list
+ partial_update_presence_counter.inc()
for other_user_id in changed:
if other_user_id in friends:
user_ids_changed.add(other_user_id)
@@ -992,6 +995,8 @@ class PresenceEventSource(object):
else:
# Too many possible updates. Find all users we can see and check
# if any of them have changed.
+ full_update_presence_counter.inc()
+
user_ids_to_check = set()
for room_id in room_ids:
users = yield self.store.get_users_in_room(room_id)
From 4ce84a1acd89a7f61896e92605e5463864848122 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 13:49:16 +0100
Subject: [PATCH 082/414] Change metric style
---
synapse/handlers/presence.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index eb877763e..0e19f777b 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -50,8 +50,7 @@ timers_fired_counter = metrics.register_counter("timers_fired")
federation_presence_counter = metrics.register_counter("federation_presence")
bump_active_time_counter = metrics.register_counter("bump_active_time")
-full_update_presence_counter = metrics.register_counter("full_update_presence")
-partial_update_presence_counter = metrics.register_counter("partial_update_presence")
+get_updates_counter = metrics.register_counter("get_updates", labels=["type"])
# If a user was last active in the last LAST_ACTIVE_GRANULARITY, consider them
@@ -980,10 +979,10 @@ class PresenceEventSource(object):
if from_key:
changed = stream_change_cache.get_all_entities_changed(from_key)
- if changed is not None and len(changed) < 100:
+ if changed is not None and len(changed) < 500:
# For small deltas, its quicker to get all changes and then
# work out if we share a room or they're in our presence list
- partial_update_presence_counter.inc()
+ get_updates_counter.inc("stream")
for other_user_id in changed:
if other_user_id in friends:
user_ids_changed.add(other_user_id)
@@ -995,7 +994,7 @@ class PresenceEventSource(object):
else:
# Too many possible updates. Find all users we can see and check
# if any of them have changed.
- full_update_presence_counter.inc()
+ get_updates_counter.inc("full")
user_ids_to_check = set()
for room_id in room_ids:
From ab116bdb0c2d8e295b1473af84c453d212dc07ea Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 14:03:42 +0100
Subject: [PATCH 083/414] Fix typo
---
synapse/handlers/typing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 3c54307be..861b8f798 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -140,7 +140,7 @@ class TypingHandler(object):
def user_left_room(self, user, room_id):
user_id = user.to_string()
if self.is_mine_id(user_id):
- member = RoomMember(room_id=room_id, user=user_id)
+ member = RoomMember(room_id=room_id, user_id=user_id)
yield self._stopped_typing(member)
@defer.inlineCallbacks
From 80aade380545a0b661e2bbef48e175900ed4d41f Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 14:24:19 +0100
Subject: [PATCH 084/414] Send updates to the syncing users every ten seconds
or immediately if they've just come online
---
synapse/app/synchrotron.py | 53 +++++++++++++++++++++++++++++++-------
1 file changed, 43 insertions(+), 10 deletions(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index f592ad352..7b45c87a9 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -16,7 +16,7 @@
import synapse
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, PresenceState
from synapse.config._base import ConfigError
from synapse.config.database import DatabaseConfig
from synapse.config.logger import LoggingConfig
@@ -41,7 +41,7 @@ from synapse.storage.presence import UserPresenceState
from synapse.storage.roommember import RoomMemberStore
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
-from synapse.util.logcontext import LoggingContext
+from synapse.util.logcontext import LoggingContext, preserve_fn
from synapse.util.manhole import manhole
from synapse.util.rlimit import change_resource_limit
from synapse.util.stringutils import random_string
@@ -135,6 +135,8 @@ class SynchrotronSlavedStore(
RoomMemberStore.__dict__["who_forgot_in_room"]
)
+UPDATE_SYNCING_USERS_MS = 10 * 1000
+
class SynchrotronPresence(object):
def __init__(self, hs):
@@ -153,6 +155,13 @@ class SynchrotronPresence(object):
self.process_id = random_string(16)
logger.info("Presence process_id is %r", self.process_id)
+ self._sending_sync = False
+ self._need_to_send_sync = False
+ self.clock.looping_call(
+ self._send_syncing_users_regularly,
+ UPDATE_SYNCING_USERS_MS,
+ )
+
def set_state(self, user, state):
# TODO Hows this supposed to work?
pass
@@ -165,12 +174,10 @@ class SynchrotronPresence(object):
if affect_presence:
curr_sync = self.user_to_num_current_syncs.get(user_id, 0)
self.user_to_num_current_syncs[user_id] = curr_sync + 1
- # TODO: Send this less frequently.
- # TODO: Make sure this doesn't race. Currently we can lose updates
- # if two users come online in quick sucession and the second http
- # to the master completes before the first.
- # TODO: Don't block the sync request on this HTTP hit.
- yield self._send_syncing_users()
+ prev_states = yield self.current_state_for_users([user_id])
+ if prev_states[user_id].state == PresenceState.OFFLINE:
+ # TODO: Don't block the sync request on this HTTP hit.
+ yield self._send_syncing_users_now()
def _end():
if affect_presence:
@@ -185,8 +192,24 @@ class SynchrotronPresence(object):
defer.returnValue(_user_syncing())
- def _send_syncing_users(self):
- return self.http_client.post_json_get_json(self.syncing_users_url, {
+ def _send_syncing_users_regularly(self):
+ # Only send an update if we aren't in the middle of sending one.
+ if not self._sending_sync:
+ preserve_fn(self._send_syncing_users_now)()
+
+ @defer.inlineCallbacks
+ def _send_syncing_users_now(self):
+ if self._sending_sync:
+ # We don't want to race with sending another update.
+ # Instead we wait for that update to finish and send another
+ # update afterwards.
+ self._need_to_send_sync = True
+ return
+
+ # Flag that we are sending an update.
+ self._sending_sync = True
+
+ yield self.http_client.post_json_get_json(self.syncing_users_url, {
"process_id": self.process_id,
"syncing_users": [
user_id for user_id, count in self.user_to_num_current_syncs.items()
@@ -194,6 +217,16 @@ class SynchrotronPresence(object):
],
})
+ # Unset the flag as we are no longer sending an update.
+ self._sending_sync = False
+ if self._need_to_send_sync:
+ # If something happened while we were sending the update then
+ # we might need to send another update.
+ # TODO: Check if the update that was sent matches the current state
+ # as we only need to send an update if they are different.
+ self._need_to_send_sync = False
+ yield self._send_syncing_users_now()
+
def process_replication(self, result):
stream = result.get("presence", {"rows": []})
for row in stream["rows"]:
From eef541a2919649e6d756d45a29d47fe76cfe02e2 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 14:42:35 +0100
Subject: [PATCH 085/414] Move insert_client_ip to a separate class
---
synapse/storage/__init__.py | 48 ++-----------------------
synapse/storage/client_ips.py | 68 +++++++++++++++++++++++++++++++++++
2 files changed, 71 insertions(+), 45 deletions(-)
create mode 100644 synapse/storage/client_ips.py
diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py
index 6928a213e..e93c3de66 100644
--- a/synapse/storage/__init__.py
+++ b/synapse/storage/__init__.py
@@ -17,7 +17,7 @@ from twisted.internet import defer
from .appservice import (
ApplicationServiceStore, ApplicationServiceTransactionStore
)
-from ._base import Cache, LoggingTransaction
+from ._base import LoggingTransaction
from .directory import DirectoryStore
from .events import EventsStore
from .presence import PresenceStore, UserPresenceState
@@ -45,6 +45,7 @@ from .search import SearchStore
from .tags import TagsStore
from .account_data import AccountDataStore
from .openid import OpenIdStore
+from .client_ips import ClientIpStore
from .util.id_generators import IdGenerator, StreamIdGenerator, ChainedIdGenerator
@@ -58,12 +59,6 @@ import logging
logger = logging.getLogger(__name__)
-# Number of msec of granularity to store the user IP 'last seen' time. Smaller
-# times give more inserts into the database even for readonly API hits
-# 120 seconds == 2 minutes
-LAST_SEEN_GRANULARITY = 120 * 1000
-
-
class DataStore(RoomMemberStore, RoomStore,
RegistrationStore, StreamStore, ProfileStore,
PresenceStore, TransactionStore,
@@ -84,6 +79,7 @@ class DataStore(RoomMemberStore, RoomStore,
AccountDataStore,
EventPushActionsStore,
OpenIdStore,
+ ClientIpStore,
):
def __init__(self, db_conn, hs):
@@ -91,11 +87,6 @@ class DataStore(RoomMemberStore, RoomStore,
self._clock = hs.get_clock()
self.database_engine = hs.database_engine
- self.client_ip_last_seen = Cache(
- name="client_ip_last_seen",
- keylen=4,
- )
-
self._stream_id_gen = StreamIdGenerator(
db_conn, "events", "stream_ordering",
extra_tables=[("local_invites", "stream_id")]
@@ -216,39 +207,6 @@ class DataStore(RoomMemberStore, RoomStore,
return [UserPresenceState(**row) for row in rows]
- @defer.inlineCallbacks
- def insert_client_ip(self, user, access_token, ip, user_agent):
- now = int(self._clock.time_msec())
- key = (user.to_string(), access_token, ip)
-
- try:
- last_seen = self.client_ip_last_seen.get(key)
- except KeyError:
- last_seen = None
-
- # Rate-limited inserts
- if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
- defer.returnValue(None)
-
- self.client_ip_last_seen.prefill(key, now)
-
- # It's safe not to lock here: a) no unique constraint,
- # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
- yield self._simple_upsert(
- "user_ips",
- keyvalues={
- "user_id": user.to_string(),
- "access_token": access_token,
- "ip": ip,
- "user_agent": user_agent,
- },
- values={
- "last_seen": now,
- },
- desc="insert_client_ip",
- lock=False,
- )
-
@defer.inlineCallbacks
def count_daily_users(self):
"""
diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py
new file mode 100644
index 000000000..a90990e00
--- /dev/null
+++ b/synapse/storage/client_ips.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ._base import SQLBaseStore, Cache
+
+from twisted.internet import defer
+
+
+# Number of msec of granularity to store the user IP 'last seen' time. Smaller
+# times give more inserts into the database even for readonly API hits
+# 120 seconds == 2 minutes
+LAST_SEEN_GRANULARITY = 120 * 1000
+
+
+class ClientIpStore(SQLBaseStore):
+
+ def __init__(self, hs):
+ self.client_ip_last_seen = Cache(
+ name="client_ip_last_seen",
+ keylen=4,
+ )
+
+ super(ClientIpStore, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def insert_client_ip(self, user, access_token, ip, user_agent):
+ now = int(self._clock.time_msec())
+ key = (user.to_string(), access_token, ip)
+
+ try:
+ last_seen = self.client_ip_last_seen.get(key)
+ except KeyError:
+ last_seen = None
+
+ # Rate-limited inserts
+ if last_seen is not None and (now - last_seen) < LAST_SEEN_GRANULARITY:
+ defer.returnValue(None)
+
+ self.client_ip_last_seen.prefill(key, now)
+
+ # It's safe not to lock here: a) no unique constraint,
+ # b) LAST_SEEN_GRANULARITY makes concurrent updates incredibly unlikely
+ yield self._simple_upsert(
+ "user_ips",
+ keyvalues={
+ "user_id": user.to_string(),
+ "access_token": access_token,
+ "ip": ip,
+ "user_agent": user_agent,
+ },
+ values={
+ "last_seen": now,
+ },
+ desc="insert_client_ip",
+ lock=False,
+ )
From 0b3c80a234cd8f16c8714af7e7b719dc2e635b20 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 14:55:01 +0100
Subject: [PATCH 086/414] Use ClientIpStore to record client ips
---
synapse/app/synchrotron.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 7b45c87a9..0446a1643 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -27,6 +27,7 @@ from synapse.http.site import SynapseSite
from synapse.http.server import JsonResource
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.rest.client.v2_alpha import sync
+from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
@@ -36,6 +37,7 @@ from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.server import HomeServer
+from synapse.storage.client_ips import ClientIpStore
from synapse.storage.engines import create_engine
from synapse.storage.presence import UserPresenceState
from synapse.storage.roommember import RoomMemberStore
@@ -119,13 +121,12 @@ class SynchrotronSlavedStore(
SlavedRegistrationStore,
SlavedFilteringStore,
SlavedPresenceStore,
+ BaseSlavedStore,
+ ClientIpStore, # After BaseSlavedStre because the constructor is different
):
def get_presence_list_accepted(self, user_localpart):
return ()
- def insert_client_ip(self, user, access_token, ip, user_agent):
- pass
-
# XXX: This is a bit broken because we don't persist forgotten rooms
# in a way that they can be streamed. This means that we don't have a
# way to invalidate the forgotten rooms cache correctly.
From da491e75b2d46c885f7fbb9240501c223e7c59bd Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 14:56:36 +0100
Subject: [PATCH 087/414] Appease flake8
---
synapse/app/synchrotron.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 0446a1643..af06ce70d 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -122,7 +122,7 @@ class SynchrotronSlavedStore(
SlavedFilteringStore,
SlavedPresenceStore,
BaseSlavedStore,
- ClientIpStore, # After BaseSlavedStre because the constructor is different
+ ClientIpStore, # After BaseSlavedStre because the constructor is different
):
def get_presence_list_accepted(self, user_localpart):
return ()
From 48340e4f13a8090feac070ebb507e7629d03b530 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 15:02:27 +0100
Subject: [PATCH 088/414] Clear the list of ongoing syncs on shutdown
---
synapse/app/synchrotron.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index af06ce70d..f4b416f77 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -163,6 +163,8 @@ class SynchrotronPresence(object):
UPDATE_SYNCING_USERS_MS,
)
+ reactor.addSystemEventTrigger("before", "shutdown", self._on_shutdown)
+
def set_state(self, user, state):
# TODO Hows this supposed to work?
pass
@@ -193,6 +195,13 @@ class SynchrotronPresence(object):
defer.returnValue(_user_syncing())
+ @defer.inlineCallbacks
+ def _on_shutdown(self):
+ # When the synchrotron is shutdown tell the master to clear the in
+ # progress syncs for this process
+ self.user_to_num_current_syncs.clear()
+ yield self._send_syncing_users_now()
+
def _send_syncing_users_regularly(self):
# Only send an update if we aren't in the middle of sending one.
if not self._sending_sync:
From 21961c93c72c5d99d44bf6a264b641c18ac0219b Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 15:31:56 +0100
Subject: [PATCH 089/414] Bump changelog and version
---
CHANGES.rst | 50 +++++++++++++++++++++++++++++++++++++++++++++
synapse/__init__.py | 2 +-
2 files changed, 51 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index b027fb970..776681de5 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,53 @@
+Changes in synapse v0.16.0-rc1 (2016-06-03)
+===========================================
+
+Features:
+
+* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
+ #821)
+* Add a ``url_preview_ip_range_whitelist`` config param (PR #760)
+* Add /report endpoint (PR #762)
+* Add basic ignore user API (PR #763)
+* Add an openidish mechanism for proving that you own a given user_id (PR #765)
+* Allow clients to specify a server_name to avoid 'No known servers' (PR #794)
+* Add secondary_directory_servers option to fetch room list from other servers
+ (PR #808, #813)
+
+Changes:
+
+* Report per request metrics for all of the things using request_handler (PR
+ #756)
+* Correctly handle ``NULL`` password hashes from the database (PR #775)
+* Allow receipts for events we haven't seen in the db (PR #784)
+* Make synctl read a cache factor from config file (PR #785)
+* Increment badge count per missed convo, not per msg (PR #793)
+* Special case m.room.third_party_invite event auth to match invites (PR #814)
+
+
+Bug fixes:
+
+* Fix typo in event_auth servlet path (PR #757)
+* Fix password reset (PR #758)
+
+
+Performance improvements:
+
+* Reduce database inserts when sending transactions (PR #767)
+* Queue events by room for persistence (PR #768)
+* Add cache to ``get_user_by_id`` (PR #772)
+* Add and use ``get_domain_from_id`` (PR #773)
+* Use tree cache for ``get_linearized_receipts_for_room`` (PR #779)
+* Remove unused indices (PR #782)
+* Add caches to ``bulk_get_push_rules*`` (PR #804)
+* Cache ``get_event_reference_hashes`` (PR #806)
+* Add ``get_users_with_read_receipts_in_room`` cache (PR #809)
+* Use state to calculate ``get_users_in_room`` (PR #811)
+* Load push rules in storage layer so that they get cached (PR #825)
+* Make ``get_joined_hosts_for_room`` use get_users_in_room (PR #828)
+* Poke notifier on next reactor tick (PR #829)
+* Change CacheMetrics to be quicker (PR #830)
+
+
Changes in synapse v0.15.0-rc1 (2016-04-26)
===========================================
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 988318f5e..3b290db79 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.15.0-rc1"
+__version__ = "0.16.0-rc1"
From c11614bcdc9acf87388554e11f1c8d911bd85b57 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 15:50:15 +0100
Subject: [PATCH 090/414] Note that v0.15.x was never released
---
CHANGES.rst | 2 ++
1 file changed, 2 insertions(+)
diff --git a/CHANGES.rst b/CHANGES.rst
index 776681de5..e77b31b58 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,6 +1,8 @@
Changes in synapse v0.16.0-rc1 (2016-06-03)
===========================================
+Version 0.15 was not released. See v0.15.0-rc1 below for additional changes.
+
Features:
* Add email notifications for missed messages (PR #759, #786, #799, #810, #815,
From 06d40c8b9841cd877e70e205d55a08f423ff2ec9 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 3 Jun 2016 16:31:23 +0100
Subject: [PATCH 091/414] Add substitutions to email notif From
---
synapse/push/mailer.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 88402e42a..933a53fc3 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -186,7 +186,7 @@ class Mailer(object):
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
- multipart_msg['From'] = self.hs.config.email_notif_from
+ multipart_msg['From'] = self.hs.config.email_notif_from % (self.app_name, )
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()
multipart_msg['Message-ID'] = email.utils.make_msgid()
From fbf608decbf85051379dc24446b1b6e89ff97e8c Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 3 Jun 2016 16:38:39 +0100
Subject: [PATCH 092/414] Oops, we're using the dict form
---
synapse/push/mailer.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 933a53fc3..011bc4d2b 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -186,7 +186,9 @@ class Mailer(object):
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
- multipart_msg['From'] = self.hs.config.email_notif_from % (self.app_name, )
+ multipart_msg['From'] = self.hs.config.email_notif_from % {
+ "app": self.app_name
+ }
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()
multipart_msg['Message-ID'] = email.utils.make_msgid()
From 72c4d482e99d30fe96e2b24389629abe5b572626 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 3 Jun 2016 16:39:50 +0100
Subject: [PATCH 093/414] 3rd time lucky: we'd already calculated it above
---
synapse/push/mailer.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index 011bc4d2b..e5c3929cd 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -186,9 +186,7 @@ class Mailer(object):
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
- multipart_msg['From'] = self.hs.config.email_notif_from % {
- "app": self.app_name
- }
+ multipart_msg['From'] = from_string
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()
multipart_msg['Message-ID'] = email.utils.make_msgid()
From 05e01f21d7012c1853ff566c8a76aa66087bfbd7 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 17:12:48 +0100
Subject: [PATCH 094/414] Remove event fetching from DB threads
---
synapse/replication/slave/storage/events.py | 5 -
synapse/storage/appservice.py | 21 ++-
synapse/storage/events.py | 138 --------------------
synapse/storage/room.py | 46 ++++---
synapse/storage/search.py | 29 ++--
synapse/storage/stream.py | 34 +++--
tests/storage/test_appservice.py | 2 +-
7 files changed, 75 insertions(+), 200 deletions(-)
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index cbc1ae419..877c68508 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -131,15 +131,10 @@ class SlavedEventStore(BaseSlavedStore):
_get_events_from_cache = DataStore._get_events_from_cache.__func__
_invalidate_get_event_cache = DataStore._invalidate_get_event_cache.__func__
- _parse_events_txn = DataStore._parse_events_txn.__func__
- _get_events_txn = DataStore._get_events_txn.__func__
- _get_event_txn = DataStore._get_event_txn.__func__
_enqueue_events = DataStore._enqueue_events.__func__
_do_fetch = DataStore._do_fetch.__func__
- _fetch_events_txn = DataStore._fetch_events_txn.__func__
_fetch_event_rows = DataStore._fetch_event_rows.__func__
_get_event_from_row = DataStore._get_event_from_row.__func__
- _get_event_from_row_txn = DataStore._get_event_from_row_txn.__func__
_get_rooms_for_user_where_membership_is_txn = (
DataStore._get_rooms_for_user_where_membership_is_txn.__func__
)
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index feb9d228a..ffb7d4a25 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -298,6 +298,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
dict(txn_id=txn_id, as_id=service.id)
)
+ @defer.inlineCallbacks
def get_oldest_unsent_txn(self, service):
"""Get the oldest transaction which has not been sent for this
service.
@@ -308,12 +309,23 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
A Deferred which resolves to an AppServiceTransaction or
None.
"""
- return self.runInteraction(
+ entry = yield self.runInteraction(
"get_oldest_unsent_appservice_txn",
self._get_oldest_unsent_txn,
service
)
+ if not entry:
+ defer.returnValue(None)
+
+ event_ids = json.loads(entry["event_ids"])
+
+ events = yield self.get_events(event_ids)
+
+ defer.returnValue(AppServiceTransaction(
+ service=service, id=entry["txn_id"], events=events
+ ))
+
def _get_oldest_unsent_txn(self, txn, service):
# Monotonically increasing txn ids, so just select the smallest
# one in the txns table (we delete them when they are sent)
@@ -328,12 +340,7 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
entry = rows[0]
- event_ids = json.loads(entry["event_ids"])
- events = self._get_events_txn(txn, event_ids)
-
- return AppServiceTransaction(
- service=service, id=entry["txn_id"], events=events
- )
+ return entry
def _get_last_txn(self, txn, service_id):
txn.execute(
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 2b3f79577..b710505a7 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -762,41 +762,6 @@ class EventsStore(SQLBaseStore):
if e_id in event_map and event_map[e_id]
])
- def _get_events_txn(self, txn, event_ids, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
- if not event_ids:
- return []
-
- event_map = self._get_events_from_cache(
- event_ids,
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- allow_rejected=allow_rejected,
- )
-
- missing_events_ids = [e for e in event_ids if e not in event_map]
-
- if not missing_events_ids:
- return [
- event_map[e_id] for e_id in event_ids
- if e_id in event_map and event_map[e_id]
- ]
-
- missing_events = self._fetch_events_txn(
- txn,
- missing_events_ids,
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- allow_rejected=allow_rejected,
- )
-
- event_map.update(missing_events)
-
- return [
- event_map[e_id] for e_id in event_ids
- if e_id in event_map and event_map[e_id]
- ]
-
def _invalidate_get_event_cache(self, event_id):
for check_redacted in (False, True):
for get_prev_content in (False, True):
@@ -804,18 +769,6 @@ class EventsStore(SQLBaseStore):
(event_id, check_redacted, get_prev_content)
)
- def _get_event_txn(self, txn, event_id, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
-
- events = self._get_events_txn(
- txn, [event_id],
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- allow_rejected=allow_rejected,
- )
-
- return events[0] if events else None
-
def _get_events_from_cache(self, events, check_redacted, get_prev_content,
allow_rejected):
event_map = {}
@@ -981,34 +934,6 @@ class EventsStore(SQLBaseStore):
return rows
- def _fetch_events_txn(self, txn, events, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
- if not events:
- return {}
-
- rows = self._fetch_event_rows(
- txn, events,
- )
-
- if not allow_rejected:
- rows[:] = [r for r in rows if not r["rejects"]]
-
- res = [
- self._get_event_from_row_txn(
- txn,
- row["internal_metadata"], row["json"], row["redacts"],
- check_redacted=check_redacted,
- get_prev_content=get_prev_content,
- rejected_reason=row["rejects"],
- )
- for row in rows
- ]
-
- return {
- r.event_id: r
- for r in res
- }
-
@defer.inlineCallbacks
def _get_event_from_row(self, internal_metadata, js, redacted,
check_redacted=True, get_prev_content=False,
@@ -1070,69 +995,6 @@ class EventsStore(SQLBaseStore):
defer.returnValue(ev)
- def _get_event_from_row_txn(self, txn, internal_metadata, js, redacted,
- check_redacted=True, get_prev_content=False,
- rejected_reason=None):
- d = json.loads(js)
- internal_metadata = json.loads(internal_metadata)
-
- if rejected_reason:
- rejected_reason = self._simple_select_one_onecol_txn(
- txn,
- table="rejections",
- keyvalues={"event_id": rejected_reason},
- retcol="reason",
- )
-
- ev = FrozenEvent(
- d,
- internal_metadata_dict=internal_metadata,
- rejected_reason=rejected_reason,
- )
-
- if check_redacted and redacted:
- ev = prune_event(ev)
-
- redaction_id = self._simple_select_one_onecol_txn(
- txn,
- table="redactions",
- keyvalues={"redacts": ev.event_id},
- retcol="event_id",
- )
-
- ev.unsigned["redacted_by"] = redaction_id
- # Get the redaction event.
-
- because = self._get_event_txn(
- txn,
- redaction_id,
- check_redacted=False
- )
-
- if because:
- ev.unsigned["redacted_because"] = because
-
- if get_prev_content and "replaces_state" in ev.unsigned:
- prev = self._get_event_txn(
- txn,
- ev.unsigned["replaces_state"],
- get_prev_content=False,
- )
- if prev:
- ev.unsigned["prev_content"] = prev.content
- ev.unsigned["prev_sender"] = prev.sender
-
- self._get_event_cache.prefill(
- (ev.event_id, check_redacted, get_prev_content), ev
- )
-
- return ev
-
- def _parse_events_txn(self, txn, rows):
- event_ids = [r["event_id"] for r in rows]
-
- return self._get_events_txn(txn, event_ids)
-
@defer.inlineCallbacks
def count_daily_messages(self):
"""
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 26933e593..97f9f1929 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -194,32 +194,44 @@ class RoomStore(SQLBaseStore):
@cachedInlineCallbacks()
def get_room_name_and_aliases(self, room_id):
- def f(txn):
+ def get_room_name(txn):
sql = (
- "SELECT event_id FROM current_state_events "
- "WHERE room_id = ? "
+ "SELECT name FROM room_names"
+ " INNER JOIN current_state_events USING (room_id, event_id)"
+ " WHERE room_id = ?"
+ " LIMIT 1"
)
- sql += " AND ((type = 'm.room.name' AND state_key = '')"
- sql += " OR type = 'm.room.aliases')"
-
txn.execute(sql, (room_id,))
- results = self.cursor_to_dict(txn)
+ rows = txn.fetchall()
+ if rows:
+ return rows[0][0]
+ else:
+ return None
- return self._parse_events_txn(txn, results)
+ return [row[0] for row in txn.fetchall()]
- events = yield self.runInteraction("get_room_name_and_aliases", f)
+ def get_room_aliases(txn):
+ sql = (
+ "SELECT content FROM current_state_events"
+ " INNER JOIN events USING (room_id, event_id)"
+ " WHERE room_id = ?"
+ )
+ txn.execute(sql, (room_id,))
+ return [row[0] for row in txn.fetchall()]
+
+ name = yield self.runInteraction("get_room_name", get_room_name)
+ alias_contents = yield self.runInteraction("get_room_aliases", get_room_aliases)
- name = None
aliases = []
- for e in events:
- if e.type == 'm.room.name':
- if 'name' in e.content:
- name = e.content['name']
- elif e.type == 'm.room.aliases':
- if 'aliases' in e.content:
- aliases.extend(e.content['aliases'])
+ for c in alias_contents:
+ try:
+ content = json.loads(c)
+ except:
+ continue
+
+ aliases.extend(content.get('aliases', []))
defer.returnValue((name, aliases))
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index 022429962..12941d177 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -21,6 +21,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine
import logging
import re
+import ujson as json
logger = logging.getLogger(__name__)
@@ -52,7 +53,7 @@ class SearchStore(BackgroundUpdateStore):
def reindex_search_txn(txn):
sql = (
- "SELECT stream_ordering, event_id FROM events"
+ "SELECT stream_ordering, event_id, room_id, type, content FROM events"
" WHERE ? <= stream_ordering AND stream_ordering < ?"
" AND (%s)"
" ORDER BY stream_ordering DESC"
@@ -61,28 +62,30 @@ class SearchStore(BackgroundUpdateStore):
txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size))
- rows = txn.fetchall()
+ rows = self.cursor_to_dict(txn)
if not rows:
return 0
- min_stream_id = rows[-1][0]
- event_ids = [row[1] for row in rows]
-
- events = self._get_events_txn(txn, event_ids)
+ min_stream_id = rows[-1]["stream_ordering"]
event_search_rows = []
- for event in events:
+ for row in rows:
try:
- event_id = event.event_id
- room_id = event.room_id
- content = event.content
- if event.type == "m.room.message":
+ event_id = row["event_id"]
+ room_id = row["room_id"]
+ etype = row["type"]
+ try:
+ content = json.loads(row["content"])
+ except:
+ continue
+
+ if etype == "m.room.message":
key = "content.body"
value = content["body"]
- elif event.type == "m.room.topic":
+ elif etype == "m.room.topic":
key = "content.topic"
value = content["topic"]
- elif event.type == "m.room.name":
+ elif etype == "m.room.name":
key = "content.name"
value = content["name"]
except (KeyError, AttributeError):
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 95b12559a..b9ad965fd 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -132,29 +132,25 @@ class StreamStore(SQLBaseStore):
return True
return False
- ret = self._get_events_txn(
- txn,
- # apply the filter on the room id list
- [
- r["event_id"] for r in rows
- if app_service_interested(r)
- ],
- get_prev_content=True
- )
+ return [r for r in rows if app_service_interested(r)]
- self._set_before_and_after(ret, rows)
+ rows = yield self.runInteraction("get_appservice_room_stream", f)
- if rows:
- key = "s%d" % max(r["stream_ordering"] for r in rows)
- else:
- # Assume we didn't get anything because there was nothing to
- # get.
- key = to_key
+ ret = yield self._get_events(
+ [r["event_id"] for r in rows],
+ get_prev_content=True
+ )
- return ret, key
+ self._set_before_and_after(ret, rows, topo_order=from_id is None)
- results = yield self.runInteraction("get_appservice_room_stream", f)
- defer.returnValue(results)
+ if rows:
+ key = "s%d" % max(r["stream_ordering"] for r in rows)
+ else:
+ # Assume we didn't get anything because there was nothing to
+ # get.
+ key = to_key
+
+ defer.returnValue((ret, key))
@defer.inlineCallbacks
def get_room_events_stream_for_rooms(self, room_ids, from_key, to_key, limit=0,
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 573419812..f44c4870e 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -357,7 +357,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
# we aren't testing store._base stuff here, so mock this out
- self.store._get_events_txn = Mock(return_value=events)
+ self.store.get_events = Mock(return_value=events)
yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
yield self._insert_txn(service.id, 10, events)
From 10ea3f46ba3eda2f7c220a5e5902b687feb3042c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 17:55:32 +0100
Subject: [PATCH 095/414] Change the way we cache events
---
synapse/storage/events.py | 80 ++++++++++++++++++++-------------------
1 file changed, 41 insertions(+), 39 deletions(-)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index b710505a7..779743b8f 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -139,6 +139,9 @@ class _EventPeristenceQueue(object):
pass
+_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
+
+
class EventsStore(SQLBaseStore):
EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts"
@@ -741,7 +744,6 @@ class EventsStore(SQLBaseStore):
event_map = self._get_events_from_cache(
event_ids,
check_redacted=check_redacted,
- get_prev_content=get_prev_content,
allow_rejected=allow_rejected,
)
@@ -751,40 +753,49 @@ class EventsStore(SQLBaseStore):
missing_events = yield self._enqueue_events(
missing_events_ids,
check_redacted=check_redacted,
- get_prev_content=get_prev_content,
allow_rejected=allow_rejected,
)
event_map.update(missing_events)
- defer.returnValue([
+ events = [
event_map[e_id] for e_id in event_id_list
if e_id in event_map and event_map[e_id]
- ])
+ ]
+
+ if get_prev_content:
+ for event in events:
+ if "replaces_state" in event.unsigned:
+ prev = yield self.get_event(
+ event.unsigned["replaces_state"],
+ get_prev_content=False,
+ allow_none=True,
+ )
+ if prev:
+ event.unsigned = dict(event.unsigned)
+ event.unsigned["prev_content"] = prev.content
+ event.unsigned["prev_sender"] = prev.sender
+
+ defer.returnValue(events)
def _invalidate_get_event_cache(self, event_id):
- for check_redacted in (False, True):
- for get_prev_content in (False, True):
- self._get_event_cache.invalidate(
- (event_id, check_redacted, get_prev_content)
- )
+ self._get_event_cache.invalidate((event_id,))
- def _get_events_from_cache(self, events, check_redacted, get_prev_content,
- allow_rejected):
+ def _get_events_from_cache(self, events, check_redacted, allow_rejected):
event_map = {}
for event_id in events:
- try:
- ret = self._get_event_cache.get(
- (event_id, check_redacted, get_prev_content,)
- )
+ ret = self._get_event_cache.get((event_id,), None)
+ if not ret:
+ continue
- if allow_rejected or not ret.rejected_reason:
- event_map[event_id] = ret
+ if allow_rejected or not ret.event.rejected_reason:
+ if check_redacted and ret.redacted_event:
+ event_map[event_id] = ret.redacted_event
else:
- event_map[event_id] = None
- except KeyError:
- pass
+ event_map[event_id] = ret.event
+ else:
+ event_map[event_id] = None
return event_map
@@ -855,8 +866,7 @@ class EventsStore(SQLBaseStore):
reactor.callFromThread(fire, event_list)
@defer.inlineCallbacks
- def _enqueue_events(self, events, check_redacted=True,
- get_prev_content=False, allow_rejected=False):
+ def _enqueue_events(self, events, check_redacted=True, allow_rejected=False):
"""Fetches events from the database using the _event_fetch_list. This
allows batch and bulk fetching of events - it allows us to fetch events
without having to create a new transaction for each request for events.
@@ -895,7 +905,6 @@ class EventsStore(SQLBaseStore):
preserve_fn(self._get_event_from_row)(
row["internal_metadata"], row["json"], row["redacts"],
check_redacted=check_redacted,
- get_prev_content=get_prev_content,
rejected_reason=row["rejects"],
)
for row in rows
@@ -936,8 +945,7 @@ class EventsStore(SQLBaseStore):
@defer.inlineCallbacks
def _get_event_from_row(self, internal_metadata, js, redacted,
- check_redacted=True, get_prev_content=False,
- rejected_reason=None):
+ check_redacted=True, rejected_reason=None):
d = json.loads(js)
internal_metadata = json.loads(internal_metadata)
@@ -949,14 +957,17 @@ class EventsStore(SQLBaseStore):
desc="_get_event_from_row",
)
- ev = FrozenEvent(
+ original_ev = FrozenEvent(
d,
internal_metadata_dict=internal_metadata,
rejected_reason=rejected_reason,
)
+ ev = original_ev
+ redacted_event = None
if check_redacted and redacted:
ev = prune_event(ev)
+ redacted_event = ev
redaction_id = yield self._simple_select_one_onecol(
table="redactions",
@@ -979,19 +990,10 @@ class EventsStore(SQLBaseStore):
# will serialise this field correctly
ev.unsigned["redacted_because"] = because
- if get_prev_content and "replaces_state" in ev.unsigned:
- prev = yield self.get_event(
- ev.unsigned["replaces_state"],
- get_prev_content=False,
- allow_none=True,
- )
- if prev:
- ev.unsigned["prev_content"] = prev.content
- ev.unsigned["prev_sender"] = prev.sender
-
- self._get_event_cache.prefill(
- (ev.event_id, check_redacted, get_prev_content), ev
- )
+ self._get_event_cache.prefill((ev.event_id,), _EventCacheEntry(
+ event=original_ev,
+ redacted_event=redacted_event,
+ ))
defer.returnValue(ev)
From 8f79084bd44f76223048c1bd6d836f904edcc95e Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 18:03:40 +0100
Subject: [PATCH 096/414] Add get_presence_list_accepted to the broken caches
in synchrotron
---
synapse/app/synchrotron.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index f4b416f77..c77854fab 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -39,7 +39,7 @@ from synapse.replication.slave.storage.presence import SlavedPresenceStore
from synapse.server import HomeServer
from synapse.storage.client_ips import ClientIpStore
from synapse.storage.engines import create_engine
-from synapse.storage.presence import UserPresenceState
+from synapse.storage.presence import PresenceStore, UserPresenceState
from synapse.storage.roommember import RoomMemberStore
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
@@ -124,9 +124,6 @@ class SynchrotronSlavedStore(
BaseSlavedStore,
ClientIpStore, # After BaseSlavedStre because the constructor is different
):
- def get_presence_list_accepted(self, user_localpart):
- return ()
-
# XXX: This is a bit broken because we don't persist forgotten rooms
# in a way that they can be streamed. This means that we don't have a
# way to invalidate the forgotten rooms cache correctly.
@@ -136,6 +133,13 @@ class SynchrotronSlavedStore(
RoomMemberStore.__dict__["who_forgot_in_room"]
)
+ # XXX: This is a bit broken because we don't persist the accepted list in a
+ # way that can be replicated. This means that we don't have a way to
+ # invalidate the cache correctly.
+ get_presence_list_accepted = PresenceStore.__dict__[
+ "get_presence_list_accepted"
+ ]
+
UPDATE_SYNCING_USERS_MS = 10 * 1000
@@ -357,6 +361,7 @@ class SynchrotronServer(HomeServer):
def expire_broken_caches():
store.who_forgot_in_room.invalidate_all()
+ store.get_presence_list_accepted.invalidate_all()
def notify_from_stream(
result, stream_name, stream_key, room=None, user=None
From ac9716f1546ae486cac435b8a577cc2c54b666d6 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 3 Jun 2016 18:10:00 +0100
Subject: [PATCH 097/414] Fix spelling
---
synapse/app/synchrotron.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index c77854fab..aa81e1c5d 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -122,7 +122,7 @@ class SynchrotronSlavedStore(
SlavedFilteringStore,
SlavedPresenceStore,
BaseSlavedStore,
- ClientIpStore, # After BaseSlavedStre because the constructor is different
+ ClientIpStore, # After BaseSlavedStore because the constructor is different
):
# XXX: This is a bit broken because we don't persist forgotten rooms
# in a way that they can be streamed. This means that we don't have a
From cffe46408f40db082df76adc263cf5014031ae54 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 3 Jun 2016 18:25:21 +0100
Subject: [PATCH 098/414] Don't rely on options when inserting event into cache
---
synapse/storage/events.py | 83 ++++++++++++++++++++-------------------
1 file changed, 43 insertions(+), 40 deletions(-)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 779743b8f..5db24e86f 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -741,13 +741,12 @@ class EventsStore(SQLBaseStore):
event_id_list = event_ids
event_ids = set(event_ids)
- event_map = self._get_events_from_cache(
+ event_entry_map = self._get_events_from_cache(
event_ids,
- check_redacted=check_redacted,
allow_rejected=allow_rejected,
)
- missing_events_ids = [e for e in event_ids if e not in event_map]
+ missing_events_ids = [e for e in event_ids if e not in event_entry_map]
if missing_events_ids:
missing_events = yield self._enqueue_events(
@@ -756,32 +755,40 @@ class EventsStore(SQLBaseStore):
allow_rejected=allow_rejected,
)
- event_map.update(missing_events)
+ event_entry_map.update(missing_events)
- events = [
- event_map[e_id] for e_id in event_id_list
- if e_id in event_map and event_map[e_id]
- ]
+ events = []
+ for event_id in event_id_list:
+ entry = event_entry_map.get(event_id, None)
+ if not entry:
+ continue
- if get_prev_content:
- for event in events:
- if "replaces_state" in event.unsigned:
- prev = yield self.get_event(
- event.unsigned["replaces_state"],
- get_prev_content=False,
- allow_none=True,
- )
- if prev:
- event.unsigned = dict(event.unsigned)
- event.unsigned["prev_content"] = prev.content
- event.unsigned["prev_sender"] = prev.sender
+ if allow_rejected or not entry.event.rejected_reason:
+ if check_redacted and entry.redacted_event:
+ event = entry.redacted_event
+ else:
+ event = entry.event
+
+ events.append(event)
+
+ if get_prev_content:
+ if "replaces_state" in event.unsigned:
+ prev = yield self.get_event(
+ event.unsigned["replaces_state"],
+ get_prev_content=False,
+ allow_none=True,
+ )
+ if prev:
+ event.unsigned = dict(event.unsigned)
+ event.unsigned["prev_content"] = prev.content
+ event.unsigned["prev_sender"] = prev.sender
defer.returnValue(events)
def _invalidate_get_event_cache(self, event_id):
self._get_event_cache.invalidate((event_id,))
- def _get_events_from_cache(self, events, check_redacted, allow_rejected):
+ def _get_events_from_cache(self, events, allow_rejected):
event_map = {}
for event_id in events:
@@ -790,10 +797,7 @@ class EventsStore(SQLBaseStore):
continue
if allow_rejected or not ret.event.rejected_reason:
- if check_redacted and ret.redacted_event:
- event_map[event_id] = ret.redacted_event
- else:
- event_map[event_id] = ret.event
+ event_map[event_id] = ret
else:
event_map[event_id] = None
@@ -904,7 +908,6 @@ class EventsStore(SQLBaseStore):
[
preserve_fn(self._get_event_from_row)(
row["internal_metadata"], row["json"], row["redacts"],
- check_redacted=check_redacted,
rejected_reason=row["rejects"],
)
for row in rows
@@ -913,7 +916,7 @@ class EventsStore(SQLBaseStore):
)
defer.returnValue({
- e.event_id: e
+ e.event.event_id: e
for e in res if e
})
@@ -945,7 +948,7 @@ class EventsStore(SQLBaseStore):
@defer.inlineCallbacks
def _get_event_from_row(self, internal_metadata, js, redacted,
- check_redacted=True, rejected_reason=None):
+ rejected_reason=None):
d = json.loads(js)
internal_metadata = json.loads(internal_metadata)
@@ -954,7 +957,7 @@ class EventsStore(SQLBaseStore):
table="rejections",
keyvalues={"event_id": rejected_reason},
retcol="reason",
- desc="_get_event_from_row",
+ desc="_get_event_from_row_rejected_reason",
)
original_ev = FrozenEvent(
@@ -963,20 +966,18 @@ class EventsStore(SQLBaseStore):
rejected_reason=rejected_reason,
)
- ev = original_ev
redacted_event = None
- if check_redacted and redacted:
- ev = prune_event(ev)
- redacted_event = ev
+ if redacted:
+ redacted_event = prune_event(original_ev)
redaction_id = yield self._simple_select_one_onecol(
table="redactions",
- keyvalues={"redacts": ev.event_id},
+ keyvalues={"redacts": redacted_event.event_id},
retcol="event_id",
- desc="_get_event_from_row",
+ desc="_get_event_from_row_redactions",
)
- ev.unsigned["redacted_by"] = redaction_id
+ redacted_event.unsigned["redacted_by"] = redaction_id
# Get the redaction event.
because = yield self.get_event(
@@ -988,14 +989,16 @@ class EventsStore(SQLBaseStore):
if because:
# It's fine to do add the event directly, since get_pdu_json
# will serialise this field correctly
- ev.unsigned["redacted_because"] = because
+ redacted_event.unsigned["redacted_because"] = because
- self._get_event_cache.prefill((ev.event_id,), _EventCacheEntry(
+ cache_entry = _EventCacheEntry(
event=original_ev,
redacted_event=redacted_event,
- ))
+ )
- defer.returnValue(ev)
+ self._get_event_cache.prefill((original_ev.event_id,), cache_entry)
+
+ defer.returnValue(cache_entry)
@defer.inlineCallbacks
def count_daily_messages(self):
From 70aee0717c22acf7eabb5f158cbaf527137bc90e Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 Jun 2016 11:08:12 +0100
Subject: [PATCH 099/414] Add events to cache when we persist them
---
synapse/storage/events.py | 41 +++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5db24e86f..16398dc0a 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -635,6 +635,8 @@ class EventsStore(SQLBaseStore):
],
)
+ self._add_to_cache(txn, events_and_contexts)
+
if backfilled:
# Backfilled events come before the current state so we don't need
# to update the current state table
@@ -676,6 +678,45 @@ class EventsStore(SQLBaseStore):
return
+ def _add_to_cache(self, txn, events_and_contexts):
+ to_prefill = []
+
+ rows = []
+ N = 200
+ for i in range(0, len(events_and_contexts), N):
+ ev_map = {
+ e[0].event_id: e[0]
+ for e in events_and_contexts[i:i + N]
+ }
+ if not ev_map:
+ break
+
+ sql = (
+ "SELECT "
+ " e.event_id as event_id, "
+ " r.redacts as redacts,"
+ " rej.event_id as rejects "
+ " FROM events as e"
+ " LEFT JOIN rejections as rej USING (event_id)"
+ " LEFT JOIN redactions as r ON e.event_id = r.redacts"
+ " WHERE e.event_id IN (%s)"
+ ) % (",".join(["?"] * len(ev_map)),)
+
+ txn.execute(sql, ev_map.keys())
+ rows = self.cursor_to_dict(txn)
+ for row in rows:
+ event = ev_map[row["event_id"]]
+ if not row["rejects"] and not row["redacts"]:
+ to_prefill.append(_EventCacheEntry(
+ event=event,
+ redacted_event=None,
+ ))
+
+ def prefill():
+ for cache_entry in to_prefill:
+ self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
+ txn.call_after(prefill)
+
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
txn.call_after(self._invalidate_get_event_cache, event.redacts)
From 7aa778fba9bb81087c3a1029e0a0d4ff55b1a065 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 Jun 2016 11:58:09 +0100
Subject: [PATCH 100/414] Add metric counter for number of persisted events
---
synapse/storage/events.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 5db24e86f..ff4f742f6 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -27,6 +27,9 @@ from synapse.api.constants import EventTypes
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple
+import synapse
+import synapse.metrics
+
import logging
import math
@@ -35,6 +38,10 @@ import ujson as json
logger = logging.getLogger(__name__)
+metrics = synapse.metrics.get_metrics_for(__name__)
+persist_event_counter = metrics.register_counter("persisted_events")
+
+
def encode_json(json_object):
if USE_FROZEN_DICTS:
# ujson doesn't like frozen_dicts
@@ -261,6 +268,7 @@ class EventsStore(SQLBaseStore):
events_and_contexts=chunk,
backfilled=backfilled,
)
+ persist_event_counter.inc_by(len(chunk))
@defer.inlineCallbacks
@log_function
@@ -278,6 +286,7 @@ class EventsStore(SQLBaseStore):
current_state=current_state,
backfilled=backfilled,
)
+ persist_event_counter.inc()
except _RollbackButIsFineException:
pass
From 377eb480ca66a376e85cf8927f7f9112ed60e8bc Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 Jun 2016 15:14:21 +0100
Subject: [PATCH 101/414] Fire after 30s not 8h
---
synapse/handlers/presence.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 0e19f777b..2e772da66 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -183,7 +183,7 @@ class PresenceHandler(object):
# The initial delay is to allow disconnected clients a chance to
# reconnect before we treat them as offline.
self.clock.call_later(
- 30 * 1000,
+ 30,
self.clock.looping_call,
self._handle_timeouts,
5000,
From 96dc600579cd6ef9937b0e007f51aa4da0fc122d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 Jun 2016 15:44:41 +0100
Subject: [PATCH 102/414] Fix typos
---
synapse/handlers/presence.py | 70 +++++++++++++++++++-----------------
1 file changed, 37 insertions(+), 33 deletions(-)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 2e772da66..94160a5be 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -283,44 +283,48 @@ class PresenceHandler(object):
"""Checks the presence of users that have timed out and updates as
appropriate.
"""
+ logger.info("Handling presence timeouts")
now = self.clock.time_msec()
- with Measure(self.clock, "presence_handle_timeouts"):
- # Fetch the list of users that *may* have timed out. Things may have
- # changed since the timeout was set, so we won't necessarily have to
- # take any action.
- users_to_check = set(self.wheel_timer.fetch(now))
+ try:
+ with Measure(self.clock, "presence_handle_timeouts"):
+ # Fetch the list of users that *may* have timed out. Things may have
+ # changed since the timeout was set, so we won't necessarily have to
+ # take any action.
+ users_to_check = set(self.wheel_timer.fetch(now))
- # Check whether the lists of syncing processes from an external
- # process have expired.
- expired_process_ids = [
- process_id for process_id, last_update
- in self.external_process_last_update.items()
- if now - last_update > EXTERNAL_PROCESS_EXPIRY
- ]
- for process_id in expired_process_ids:
- users_to_check.update(
- self.external_process_to_current_syncs.pop(process_id, ())
+ # Check whether the lists of syncing processes from an external
+ # process have expired.
+ expired_process_ids = [
+ process_id for process_id, last_update
+ in self.external_process_last_updated_ms.items()
+ if now - last_update > EXTERNAL_PROCESS_EXPIRY
+ ]
+ for process_id in expired_process_ids:
+ users_to_check.update(
+ self.external_process_last_updated_ms.pop(process_id, ())
+ )
+ self.external_process_last_update.pop(process_id)
+
+ states = [
+ self.user_to_current_state.get(
+ user_id, UserPresenceState.default(user_id)
+ )
+ for user_id in users_to_check
+ ]
+
+ timers_fired_counter.inc_by(len(states))
+
+ changes = handle_timeouts(
+ states,
+ is_mine_fn=self.is_mine_id,
+ syncing_user_ids=self.get_currently_syncing_users(),
+ now=now,
)
- self.external_process_last_update.pop(process_id)
- states = [
- self.user_to_current_state.get(
- user_id, UserPresenceState.default(user_id)
- )
- for user_id in users_to_check
- ]
-
- timers_fired_counter.inc_by(len(states))
-
- changes = handle_timeouts(
- states,
- is_mine_fn=self.is_mine_id,
- syncing_users=self.get_syncing_users(),
- now=now,
- )
-
- preserve_fn(self._update_states)(changes)
+ preserve_fn(self._update_states)(changes)
+ except:
+ logger.exception("Exception in _handle_timeouts loop")
@defer.inlineCallbacks
def bump_presence_active_time(self, user):
From 216a05b3e39e08b0600a39fc111b4d669d06ff7c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 6 Jun 2016 16:00:09 +0100
Subject: [PATCH 103/414] .values() returns list of sets
---
synapse/handlers/presence.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py
index 94160a5be..6b70fa381 100644
--- a/synapse/handlers/presence.py
+++ b/synapse/handlers/presence.py
@@ -406,7 +406,8 @@ class PresenceHandler(object):
user_id for user_id, count in self.user_to_num_current_syncs.items()
if count
}
- syncing_user_ids.update(self.external_process_to_current_syncs.values())
+ for user_ids in self.external_process_to_current_syncs.values():
+ syncing_user_ids.update(user_ids)
return syncing_user_ids
@defer.inlineCallbacks
From 5ef84da4f11f1b1cceb0c44d9867bb597ee68e64 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Mon, 6 Jun 2016 16:05:28 +0100
Subject: [PATCH 104/414] Yield on the sleeps intended to backoff replication
---
synapse/app/pusher.py | 2 +-
synapse/app/synchrotron.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index f1de1e7ce..3c3fa3805 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -311,7 +311,7 @@ class PusherServer(HomeServer):
poke_pushers(result)
except:
logger.exception("Error replicating from %r", replication_url)
- sleep(30)
+ yield sleep(30)
def setup(config_options):
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index aa81e1c5d..7273055cc 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -443,7 +443,7 @@ class SynchrotronServer(HomeServer):
notify(result)
except:
logger.exception("Error replicating from %r", replication_url)
- sleep(5)
+ yield sleep(5)
def build_presence_handler(self):
return SynchrotronPresence(self)
From 4a5bbb1941ae63f1d6632aa35e80274e56c8dbb9 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Mon, 6 Jun 2016 16:37:12 +0100
Subject: [PATCH 105/414] Fix a KeyError in the synchrotron presence
---
synapse/app/synchrotron.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index aa81e1c5d..3d0d5cc15 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -187,7 +187,10 @@ class SynchrotronPresence(object):
yield self._send_syncing_users_now()
def _end():
- if affect_presence:
+ # We check that the user_id is in user_to_num_current_syncs because
+ # user_to_num_current_syncs may have been cleared if we are
+ # shutting down.
+ if affect_presence and user_id in self.user_to_num_current_syncs:
self.user_to_num_current_syncs[user_id] -= 1
@contextlib.contextmanager
From 310197bab5cf8ed2c26fae522f15f092dbcdff58 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 09:34:50 +0100
Subject: [PATCH 106/414] Fix AS retries
---
synapse/storage/appservice.py | 4 ++--
tests/storage/test_appservice.py | 6 +++---
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index ffb7d4a25..a28157163 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -320,10 +320,10 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
event_ids = json.loads(entry["event_ids"])
- events = yield self.get_events(event_ids)
+ event_map = yield self.get_events(event_ids)
defer.returnValue(AppServiceTransaction(
- service=service, id=entry["txn_id"], events=events
+ service=service, id=entry["txn_id"], events=event_map.values()
))
def _get_oldest_unsent_txn(self, txn, service):
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index f44c4870e..6db4b966d 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -353,21 +353,21 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_get_oldest_unsent_txn(self):
service = Mock(id=self.as_list[0]["id"])
- events = [Mock(event_id="e1"), Mock(event_id="e2")]
+ events = {"e1": Mock(event_id="e1"), "e2": Mock(event_id="e2")}
other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
# we aren't testing store._base stuff here, so mock this out
self.store.get_events = Mock(return_value=events)
yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
- yield self._insert_txn(service.id, 10, events)
+ yield self._insert_txn(service.id, 10, events.values())
yield self._insert_txn(service.id, 11, other_events)
yield self._insert_txn(service.id, 12, other_events)
txn = yield self.store.get_oldest_unsent_txn(service)
self.assertEquals(service, txn.service)
self.assertEquals(10, txn.id)
- self.assertEquals(events, txn.events)
+ self.assertEquals(events.values(), txn.events)
@defer.inlineCallbacks
def test_get_appservices_by_state_single(self):
From 84379062f9ec259abc302af321d4ed8f5a958c01 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 10:24:50 +0100
Subject: [PATCH 107/414] Fix AS retries, but with correct ordering
---
synapse/storage/appservice.py | 4 ++--
tests/storage/test_appservice.py | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py
index a28157163..d1ee533fa 100644
--- a/synapse/storage/appservice.py
+++ b/synapse/storage/appservice.py
@@ -320,10 +320,10 @@ class ApplicationServiceTransactionStore(SQLBaseStore):
event_ids = json.loads(entry["event_ids"])
- event_map = yield self.get_events(event_ids)
+ events = yield self._get_events(event_ids)
defer.returnValue(AppServiceTransaction(
- service=service, id=entry["txn_id"], events=event_map.values()
+ service=service, id=entry["txn_id"], events=events
))
def _get_oldest_unsent_txn(self, txn, service):
diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py
index 6db4b966d..3e2862daa 100644
--- a/tests/storage/test_appservice.py
+++ b/tests/storage/test_appservice.py
@@ -353,21 +353,21 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_get_oldest_unsent_txn(self):
service = Mock(id=self.as_list[0]["id"])
- events = {"e1": Mock(event_id="e1"), "e2": Mock(event_id="e2")}
+ events = [Mock(event_id="e1"), Mock(event_id="e2")]
other_events = [Mock(event_id="e5"), Mock(event_id="e6")]
# we aren't testing store._base stuff here, so mock this out
- self.store.get_events = Mock(return_value=events)
+ self.store._get_events = Mock(return_value=events)
yield self._insert_txn(self.as_list[1]["id"], 9, other_events)
- yield self._insert_txn(service.id, 10, events.values())
+ yield self._insert_txn(service.id, 10, events)
yield self._insert_txn(service.id, 11, other_events)
yield self._insert_txn(service.id, 12, other_events)
txn = yield self.store.get_oldest_unsent_txn(service)
self.assertEquals(service, txn.service)
self.assertEquals(10, txn.id)
- self.assertEquals(events.values(), txn.events)
+ self.assertEquals(events, txn.events)
@defer.inlineCallbacks
def test_get_appservices_by_state_single(self):
From 88625db05f274ad855fb51b33c84c09c947a6bd0 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 7 Jun 2016 11:33:36 +0100
Subject: [PATCH 108/414] Notify users for events in rooms they join.
Change how the notifier updates the map from room_id to user streams on
receiving a join event. Make it update the map when it notifies for the
join event, rather than using the "user_joined_room" distributor signal
---
synapse/notifier.py | 14 ++++++--------
1 file changed, 6 insertions(+), 8 deletions(-)
diff --git a/synapse/notifier.py b/synapse/notifier.py
index cbec4d30a..30883a069 100644
--- a/synapse/notifier.py
+++ b/synapse/notifier.py
@@ -14,7 +14,7 @@
# limitations under the License.
from twisted.internet import defer
-from synapse.api.constants import EventTypes
+from synapse.api.constants import EventTypes, Membership
from synapse.api.errors import AuthError
from synapse.util.logutils import log_function
@@ -152,10 +152,6 @@ class Notifier(object):
self.appservice_handler = hs.get_application_service_handler()
self.state_handler = hs.get_state_handler()
- hs.get_distributor().observe(
- "user_joined_room", self._user_joined_room
- )
-
self.clock.looping_call(
self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS
)
@@ -248,6 +244,9 @@ class Notifier(object):
)
app_streams |= app_user_streams
+ if event.type == EventTypes.Member and event.membership == Membership.JOIN:
+ self._user_joined_room(event.state_key, event.room_id)
+
self.on_new_event(
"room_key", room_stream_id,
users=extra_users,
@@ -483,9 +482,8 @@ class Notifier(object):
user_stream.appservice, set()
).add(user_stream)
- def _user_joined_room(self, user, room_id):
- user = str(user)
- new_user_stream = self.user_to_user_stream.get(user)
+ def _user_joined_room(self, user_id, room_id):
+ new_user_stream = self.user_to_user_stream.get(user_id)
if new_user_stream is not None:
room_streams = self.room_to_user_streams.setdefault(room_id, set())
room_streams.add(new_user_stream)
From 75331c5fca6d2207094b8cbf0b3bb34cc52a4ec4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 13:33:13 +0100
Subject: [PATCH 109/414] Change the way we do stats
---
synapse/metrics/__init__.py | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index f317034b8..ef14bcd84 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -153,11 +153,7 @@ reactor_metrics = get_metrics_for("reactor")
tick_time = reactor_metrics.register_distribution("tick_time")
pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
-gc_time = (
- reactor_metrics.register_distribution("gc_time_gen0"),
- reactor_metrics.register_distribution("gc_time_gen2"),
- reactor_metrics.register_distribution("gc_time_gen2"),
-)
+gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
def runUntilCurrentTimer(func):
@@ -190,7 +186,7 @@ def runUntilCurrentTimer(func):
# one if necessary.
threshold = gc.get_threshold()
counts = gc.get_count()
- for i in [2, 1, 0]:
+ for i in (0, 1, 2):
if threshold[i] < counts[i]:
logger.info("Collecting gc %d", i)
@@ -198,7 +194,7 @@ def runUntilCurrentTimer(func):
gc.collect(i)
end = time.time() * 1000
- gc_time[i].inc_by(end - start)
+ gc_time.inc_by(end - start, i)
return ret
From 48e65099b52383743a47844b6369e173b9a96f90 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 13:40:22 +0100
Subject: [PATCH 110/414] Also record number of unreachable objects
---
synapse/metrics/__init__.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index ef14bcd84..b29cec3de 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -154,6 +154,7 @@ tick_time = reactor_metrics.register_distribution("tick_time")
pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
+gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
def runUntilCurrentTimer(func):
@@ -186,15 +187,16 @@ def runUntilCurrentTimer(func):
# one if necessary.
threshold = gc.get_threshold()
counts = gc.get_count()
- for i in (0, 1, 2):
+ for i in (2, 1, 0):
if threshold[i] < counts[i]:
logger.info("Collecting gc %d", i)
start = time.time() * 1000
- gc.collect(i)
+ unreachable = gc.collect(i)
end = time.time() * 1000
gc_time.inc_by(end - start, i)
+ gc_unreachable.inc_by(unreachable, i)
return ret
From 0b2158719c43eab87ab7a9448ae1d85008b92b92 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 7 Jun 2016 15:07:11 +0100
Subject: [PATCH 111/414] Remove dead code.
Loading push rules now happens in the datastore, so we can remove
the methods that loaded them outside the datastore.
The ``waiting_for_join_list`` in federation handler is populated by
anything, so can be removed.
The ``_get_members_events_txn`` method isn't called from anywhere
so can be removed.
---
synapse/handlers/federation.py | 13 ------------
synapse/push/bulk_push_rule_evaluator.py | 8 --------
synapse/push/clientformat.py | 26 ------------------------
synapse/storage/roommember.py | 7 -------
4 files changed, 54 deletions(-)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 648a505e6..ff83c608e 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -66,10 +66,6 @@ class FederationHandler(BaseHandler):
self.hs = hs
- self.distributor.observe("user_joined_room", self.user_joined_room)
-
- self.waiting_for_join_list = {}
-
self.store = hs.get_datastore()
self.replication_layer = hs.get_replication_layer()
self.state_handler = hs.get_state_handler()
@@ -1091,15 +1087,6 @@ class FederationHandler(BaseHandler):
def get_min_depth_for_context(self, context):
return self.store.get_min_depth(context)
- @log_function
- def user_joined_room(self, user, room_id):
- waiters = self.waiting_for_join_list.get(
- (user.to_string(), room_id),
- []
- )
- while waiters:
- waiters.pop().callback(None)
-
@defer.inlineCallbacks
@log_function
def _handle_new_event(self, origin, event, state=None, auth_events=None,
diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py
index 6e42121b1..756e5da51 100644
--- a/synapse/push/bulk_push_rule_evaluator.py
+++ b/synapse/push/bulk_push_rule_evaluator.py
@@ -14,7 +14,6 @@
# limitations under the License.
import logging
-import ujson as json
from twisted.internet import defer
@@ -27,13 +26,6 @@ from synapse.visibility import filter_events_for_clients
logger = logging.getLogger(__name__)
-def decode_rule_json(rule):
- rule = dict(rule)
- rule['conditions'] = json.loads(rule['conditions'])
- rule['actions'] = json.loads(rule['actions'])
- return rule
-
-
@defer.inlineCallbacks
def _get_rules(room_id, user_ids, store):
rules_by_user = yield store.bulk_get_push_rules(user_ids)
diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py
index b3983f794..e0331b2d2 100644
--- a/synapse/push/clientformat.py
+++ b/synapse/push/clientformat.py
@@ -13,37 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from synapse.push.baserules import list_with_base_rules
-
from synapse.push.rulekinds import (
PRIORITY_CLASS_MAP, PRIORITY_CLASS_INVERSE_MAP
)
import copy
-import simplejson as json
-
-
-def load_rules_for_user(user, rawrules, enabled_map):
- ruleslist = []
- for rawrule in rawrules:
- rule = dict(rawrule)
- rule["conditions"] = json.loads(rawrule["conditions"])
- rule["actions"] = json.loads(rawrule["actions"])
- ruleslist.append(rule)
-
- # We're going to be mutating this a lot, so do a deep copy
- rules = list(list_with_base_rules(ruleslist))
-
- for i, rule in enumerate(rules):
- rule_id = rule['rule_id']
- if rule_id in enabled_map:
- if rule.get('enabled', True) != bool(enabled_map[rule_id]):
- # Rules are cached across users.
- rule = dict(rule)
- rule['enabled'] = bool(enabled_map[rule_id])
- rules[i] = rule
-
- return rules
def format_push_rules_for_user(user, ruleslist):
diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py
index 64b4bd371..8bd693be7 100644
--- a/synapse/storage/roommember.py
+++ b/synapse/storage/roommember.py
@@ -243,13 +243,6 @@ class RoomMemberStore(SQLBaseStore):
user_ids = yield self.get_users_in_room(room_id)
defer.returnValue(set(get_domain_from_id(uid) for uid in user_ids))
- def _get_members_events_txn(self, txn, room_id, membership=None, user_id=None):
- rows = self._get_members_rows_txn(
- txn,
- room_id, membership, user_id,
- )
- return [r["event_id"] for r in rows]
-
def _get_members_rows_txn(self, txn, room_id, membership=None, user_id=None):
where_clause = "c.room_id = ?"
where_values = [room_id]
From dded389ac16ec023c986df400d25ca94a4a28677 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 15:45:56 +0100
Subject: [PATCH 112/414] Allow setting of gc.set_thresholds
---
synapse/app/homeserver.py | 5 +++++
synapse/app/pusher.py | 5 +++++
synapse/app/synchrotron.py | 15 ++++++++++-----
synapse/config/server.py | 19 ++++++++++++++++++-
4 files changed, 38 insertions(+), 6 deletions(-)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index df675c0ed..22e1721fc 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -16,6 +16,7 @@
import synapse
+import gc
import logging
import os
import sys
@@ -351,6 +352,8 @@ class SynapseService(service.Service):
def startService(self):
hs = setup(self.config)
change_resource_limit(hs.config.soft_file_limit)
+ if hs.config.gc_thresholds:
+ gc.set_threshold(*hs.config.gc_thresholds)
def stopService(self):
return self._port.stopListening()
@@ -422,6 +425,8 @@ def run(hs):
# sys.settrace(logcontext_tracer)
with LoggingContext("run"):
change_resource_limit(hs.config.soft_file_limit)
+ if hs.config.gc_thresholds:
+ gc.set_threshold(*hs.config.gc_thresholds)
reactor.run()
if hs.config.daemonize:
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 3c3fa3805..7e2bf7ecc 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -43,6 +43,7 @@ from twisted.web.resource import Resource
from daemonize import Daemonize
+import gc
import sys
import logging
@@ -342,6 +343,8 @@ def setup(config_options):
ps.start_listening()
change_resource_limit(ps.config.soft_file_limit)
+ if ps.config.gc_thresholds:
+ gc.set_threshold(*ps.config.gc_thresholds)
def start():
ps.replicate()
@@ -361,6 +364,8 @@ if __name__ == '__main__':
def run():
with LoggingContext("run"):
change_resource_limit(ps.config.soft_file_limit)
+ if ps.config.gc_thresholds:
+ gc.set_threshold(*ps.config.gc_thresholds)
reactor.run()
daemon = Daemonize(
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 5c552ffb2..f9673ab8d 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -57,6 +57,7 @@ from daemonize import Daemonize
import sys
import logging
import contextlib
+import gc
import ujson as json
logger = logging.getLogger("synapse.app.synchrotron")
@@ -484,6 +485,8 @@ def setup(config_options):
ss.start_listening()
change_resource_limit(ss.config.soft_file_limit)
+ if ss.config.gc_thresholds:
+ ss.set_threshold(*ss.config.gc_thresholds)
def start():
ss.get_datastore().start_profiling()
@@ -496,17 +499,19 @@ def setup(config_options):
if __name__ == '__main__':
with LoggingContext("main"):
- ps = setup(sys.argv[1:])
+ ss = setup(sys.argv[1:])
- if ps.config.daemonize:
+ if ss.config.daemonize:
def run():
with LoggingContext("run"):
- change_resource_limit(ps.config.soft_file_limit)
+ change_resource_limit(ss.config.soft_file_limit)
+ if ss.config.gc_thresholds:
+ gc.set_threshold(*ss.config.gc_thresholds)
reactor.run()
daemon = Daemonize(
- app="synapse-pusher",
- pid=ps.config.pid_file,
+ app="synapse-synchrotron",
+ pid=ss.config.pid_file,
action=run,
auto_close_fds=False,
verbose=True,
diff --git a/synapse/config/server.py b/synapse/config/server.py
index c2d8f8a52..44b8d422e 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ._base import Config
+from ._base import Config, ConfigError
class ServerConfig(Config):
@@ -38,6 +38,20 @@ class ServerConfig(Config):
self.listeners = config.get("listeners", [])
+ thresholds = config.get("gc_thresholds", None)
+ if thresholds is not None:
+ try:
+ assert len(thresholds) == 3
+ self.gc_thresholds = (
+ int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
+ )
+ except:
+ raise ConfigError(
+ "Value of `gc_threshold` must be a list of three integers if set"
+ )
+ else:
+ self.gc_thresholds = None
+
bind_port = config.get("bind_port")
if bind_port:
self.listeners = []
@@ -157,6 +171,9 @@ class ServerConfig(Config):
# hard limit.
soft_file_limit: 0
+ # The GC threshold parameters to pass to `gc.set_threshold`, if defined
+ # gc_thresholds: [700, 10, 10]
+
# A list of other Home Servers to fetch the public room directory from
# and include in the public room directory of this home server
# This is a temporary stopgap solution to populate new server with a
From 2d1d1025fac846e2746dc627c0ebb6542c1488d3 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 16:26:25 +0100
Subject: [PATCH 113/414] Add gc_threshold to pusher and synchrotron
---
synapse/app/pusher.py | 14 ++++++++++++++
synapse/app/synchrotron.py | 14 ++++++++++++++
2 files changed, 28 insertions(+)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 7e2bf7ecc..4ec23d84c 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -65,6 +65,20 @@ class SlaveConfig(DatabaseConfig):
self.pid_file = self.abspath(config.get("pid_file"))
self.public_baseurl = config["public_baseurl"]
+ thresholds = config.get("gc_thresholds", None)
+ if thresholds is not None:
+ try:
+ assert len(thresholds) == 3
+ self.gc_thresholds = (
+ int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
+ )
+ except:
+ raise ConfigError(
+ "Value of `gc_threshold` must be a list of three integers if set"
+ )
+ else:
+ self.gc_thresholds = None
+
# some things used by the auth handler but not actually used in the
# pusher codebase
self.bcrypt_rounds = None
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index f9673ab8d..297e19945 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -78,6 +78,20 @@ class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig):
self.macaroon_secret_key = config["macaroon_secret_key"]
self.expire_access_token = config.get("expire_access_token", False)
+ thresholds = config.get("gc_thresholds", None)
+ if thresholds is not None:
+ try:
+ assert len(thresholds) == 3
+ self.gc_thresholds = (
+ int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
+ )
+ except:
+ raise ConfigError(
+ "Value of `gc_threshold` must be a list of three integers if set"
+ )
+ else:
+ self.gc_thresholds = None
+
def default_config(self, server_name, **kwargs):
pid_file = self.abspath("synchroton.pid")
return """\
From 64935d11f7730702cafba8591512ddb57e8fadf1 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 7 Jun 2016 16:35:28 +0100
Subject: [PATCH 114/414] Add script for running sytest with dendron
---
jenkins-dendron-postgres.sh | 84 +++++++++++++++++++++++++++++++++++++
1 file changed, 84 insertions(+)
create mode 100755 jenkins-dendron-postgres.sh
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
new file mode 100755
index 000000000..8e3a4c51a
--- /dev/null
+++ b/jenkins-dendron-postgres.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+set -eux
+
+: ${WORKSPACE:="$(pwd)"}
+
+export PYTHONDONTWRITEBYTECODE=yep
+export SYNAPSE_CACHE_FACTOR=1
+
+# Output test results as junit xml
+export TRIAL_FLAGS="--reporter=subunit"
+export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
+# Write coverage reports to a separate file for each process
+export COVERAGE_OPTS="-p"
+export DUMP_COVERAGE_COMMAND="coverage help"
+
+# Output flake8 violations to violations.flake8.log
+# Don't exit with non-0 status code on Jenkins,
+# so that the build steps continue and a later step can decided whether to
+# UNSTABLE or FAILURE this build.
+export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
+
+rm .coverage* || echo "No coverage files to remove"
+
+tox --notest -e py27
+
+TOX_BIN=$WORKSPACE/.tox/py27/bin
+python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
+$TOX_BIN/pip install psycopg2
+$TOX_BIN/pip install lxml
+
+: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
+
+if [[ ! -e .dendron-base ]]; then
+ git clone https://github.com/matrix-org/dendron.git .dendron-base --mirror
+else
+ (cd .dendron-base; git fetch -p)
+fi
+
+rm -rf dendron
+git clone .dendron-base dendron --shared
+cd dendron
+
+: ${GOPATH:=${WORKSPACE}/.gopath}
+if [[ "${GOPATH}" != *:* ]]; then
+ mkdir -p "${GOPATH}"
+ export PATH="${GOPATH}/bin:${PATH}"
+fi
+export GOPATH
+
+git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
+
+go get github.com/constabulary/gb/...
+gb generate
+gb build
+
+cd ..
+
+
+if [[ ! -e .sytest-base ]]; then
+ git clone https://github.com/matrix-org/sytest.git .sytest-base --mirror
+else
+ (cd .sytest-base; git fetch -p)
+fi
+
+rm -rf sytest
+git clone .sytest-base sytest --shared
+cd sytest
+
+git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
+
+: ${PORT_BASE:=8000}
+
+./jenkins/prep_sytest_for_postgres.sh
+
+echo >&2 "Running sytest with PostgreSQL";
+./jenkins/install_and_run.sh --python $TOX_BIN/python \
+ --synapse-directory $WORKSPACE \
+ --dendron $WORKSPACE/dendron/bin/dendron \
+ --synchrotron \
+ --pusher \
+ --port-base $PORT_BASE
+
+cd ..
From 18f0cc7d993408a754e7ff26e9474a969adf762a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 16:51:01 +0100
Subject: [PATCH 115/414] Record some more GC metrics
---
synapse/metrics/__init__.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index b29cec3de..8f69aa1ff 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -156,6 +156,11 @@ pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
+reactor_metrics.register_callback("gc_total_objects", lambda: len(gc.get_objects()))
+reactor_metrics.register_callback(
+ "gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
+)
+
def runUntilCurrentTimer(func):
From 0f2165ccf4fd0ae6636018cea7e1b91141179e88 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 7 Jun 2016 17:00:45 +0100
Subject: [PATCH 116/414] Don't track total objects as its too expensive to
calculate
---
synapse/metrics/__init__.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py
index 8f69aa1ff..bdd7292a3 100644
--- a/synapse/metrics/__init__.py
+++ b/synapse/metrics/__init__.py
@@ -156,7 +156,6 @@ pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
-reactor_metrics.register_callback("gc_total_objects", lambda: len(gc.get_objects()))
reactor_metrics.register_callback(
"gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
)
From bab916bccc57734fb96f7f9be66b1b15b2ed4dbf Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 11:05:45 +0100
Subject: [PATCH 117/414] Bump version and changelog to v0.16.0-rc2
---
CHANGES.rst | 27 +++++++++++++++++++++++++++
synapse/__init__.py | 2 +-
2 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index e77b31b58..40f7ebd73 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,30 @@
+Changes in synapse v0.16.0-rc2 (2016-06-08)
+===========================================
+
+Features:
+
+* Add configuration option for tuning GC via ``gc.set_threshold`` (PR #849)
+
+Changes:
+
+* Record metrics about GC (PR #771, #847, #852)
+* Add metric counter for number of persisted events (PR #841)
+
+Bug fixes:
+
+* Fix 'From' header in email notifications (PR #843)
+* Fix presence where timeouts were not being fired for the first 8h after
+ restarts (PR #842)
+* Fix bug where synapse sent malformed transactions to AS's when retrying
+ transactions (Commits 310197b, 843790)
+
+Performance Improvements:
+
+* Remove event fetching from DB threads (PR #835)
+* Change the way we cache events (PR #836)
+* Add events to cache when we persist them (PR #840)
+
+
Changes in synapse v0.16.0-rc1 (2016-06-03)
===========================================
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 3b290db79..ad088a788 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.16.0-rc1"
+__version__ = "0.16.0-rc2"
From 66503a69c9070230c99737976bff73f68079e4d2 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 11:13:56 +0100
Subject: [PATCH 118/414] Update commit hash in changelog
---
CHANGES.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 40f7ebd73..6194b3eb6 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -16,7 +16,7 @@ Bug fixes:
* Fix presence where timeouts were not being fired for the first 8h after
restarts (PR #842)
* Fix bug where synapse sent malformed transactions to AS's when retrying
- transactions (Commits 310197b, 843790)
+ transactions (Commits 310197b, 8437906)
Performance Improvements:
From 1a815fb04f1d17286be27379dd7463936606bd3a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 11:33:30 +0100
Subject: [PATCH 119/414] Don't hit DB for noop replications queries
---
synapse/handlers/typing.py | 3 +++
synapse/storage/account_data.py | 3 +++
synapse/storage/presence.py | 3 +++
synapse/storage/push_rule.py | 3 +++
synapse/storage/tags.py | 3 +++
5 files changed, 15 insertions(+)
diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py
index 861b8f798..5589296c0 100644
--- a/synapse/handlers/typing.py
+++ b/synapse/handlers/typing.py
@@ -221,6 +221,9 @@ class TypingHandler(object):
def get_all_typing_updates(self, last_id, current_id):
# TODO: Work out a way to do this without scanning the entire state.
+ if last_id == current_id:
+ return []
+
rows = []
for room_id, serial in self._room_serials.items():
if last_id < serial and serial <= current_id:
diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py
index ec7e8d40d..3fa226e92 100644
--- a/synapse/storage/account_data.py
+++ b/synapse/storage/account_data.py
@@ -138,6 +138,9 @@ class AccountDataStore(SQLBaseStore):
A deferred pair of lists of tuples of stream_id int, user_id string,
room_id string, type string, and content string.
"""
+ if last_room_id == current_id and last_global_id == current_id:
+ return defer.succeed(([], []))
+
def get_updated_account_data_txn(txn):
sql = (
"SELECT stream_id, user_id, account_data_type, content"
diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py
index 3fab57a7e..d03f7c541 100644
--- a/synapse/storage/presence.py
+++ b/synapse/storage/presence.py
@@ -118,6 +118,9 @@ class PresenceStore(SQLBaseStore):
)
def get_all_presence_updates(self, last_id, current_id):
+ if last_id == current_id:
+ return defer.succeed([])
+
def get_all_presence_updates_txn(txn):
sql = (
"SELECT stream_id, user_id, state, last_active_ts,"
diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py
index 786d6f6d6..8183b7f1b 100644
--- a/synapse/storage/push_rule.py
+++ b/synapse/storage/push_rule.py
@@ -421,6 +421,9 @@ class PushRuleStore(SQLBaseStore):
def get_all_push_rule_updates(self, last_id, current_id, limit):
"""Get all the push rules changes that have happend on the server"""
+ if last_id == current_id:
+ return defer.succeed([])
+
def get_all_push_rule_updates_txn(txn):
sql = (
"SELECT stream_id, event_stream_ordering, user_id, rule_id,"
diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py
index 9da23f34c..5a2c1aa59 100644
--- a/synapse/storage/tags.py
+++ b/synapse/storage/tags.py
@@ -68,6 +68,9 @@ class TagsStore(SQLBaseStore):
A deferred list of tuples of stream_id int, user_id string,
room_id string, tag string and content string.
"""
+ if last_id == current_id:
+ defer.returnValue([])
+
def get_all_updated_tags_txn(txn):
sql = (
"SELECT stream_id, user_id, room_id"
From 17aab5827a1a1eace4e44d130eef7da4dda6984f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 11:55:31 +0100
Subject: [PATCH 120/414] Add some logging for when servers ask for missing
events
---
synapse/federation/federation_server.py | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index f1d231b9d..9f2a64ded 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -377,10 +377,20 @@ class FederationServer(FederationBase):
@log_function
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
+ logger.info(
+ "on_get_missing_events: earliest_events: %r, latest_events: %r,"
+ " limit: %d, min_depth: %d",
+ earliest_events, latest_events, limit, min_depth
+ )
missing_events = yield self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit, min_depth
)
+ if len(missing_events) < 5:
+ logger.info("Returning %d events: %r", len(missing_events), missing_events)
+ else:
+ logger.info("Returning %d events", len(missing_events))
+
time_now = self._clock.time_msec()
defer.returnValue({
@@ -490,6 +500,11 @@ class FederationServer(FederationBase):
latest = set(latest)
latest |= seen
+ logger.info(
+ "Missing %d events for room %r: %r...",
+ len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+ )
+
missing_events = yield self.get_missing_events(
origin,
pdu.room_id,
@@ -517,6 +532,10 @@ class FederationServer(FederationBase):
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if prevs - seen:
+ logger.info(
+ "Still missing %d events for room %r: %r...",
+ len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+ )
fetch_state = True
if fetch_state:
From 1fd6eb695d1fffbe830faf50c13607116300095b Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 14:15:18 +0100
Subject: [PATCH 121/414] Enable auth on federation PublicRoomList
---
synapse/federation/transport/server.py | 5 -----
1 file changed, 5 deletions(-)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index a1a334955..ab9f38f01 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -532,11 +532,6 @@ class PublicRoomList(BaseFederationServlet):
data = yield self.room_list_handler.get_local_public_room_list()
defer.returnValue((200, data))
- # Avoid doing remote HS authorization checks which are done by default by
- # BaseFederationServlet.
- def _wrap(self, code):
- return code
-
SERVLET_CLASSES = (
FederationSendServlet,
From efeabd31801224cbacd31b61ff0d869b70b1820d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 14:23:15 +0100
Subject: [PATCH 122/414] Log user that is making /publicRooms calls
---
synapse/rest/client/v1/room.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index db52a1fc3..604c2a565 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -279,6 +279,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks
def on_GET(self, request):
+ try:
+ yield self.auth.get_user_by_req(request)
+ except AuthError:
+ # This endpoint isn't authed, but its useful to know who's hitting
+ # it if they *do* supply an access token
+ pass
+
handler = self.hs.get_room_list_handler()
data = yield handler.get_aggregated_public_room_list()
From d88faf92d16d9384433452e4fb7901fd2bd6eda4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 14:39:31 +0100
Subject: [PATCH 123/414] Fix up federation PublicRoomList
---
synapse/federation/transport/server.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index ab9f38f01..6fc3e2207 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -528,7 +528,7 @@ class PublicRoomList(BaseFederationServlet):
PATH = "/publicRooms"
@defer.inlineCallbacks
- def on_GET(self, request):
+ def on_GET(self, origin, content, query):
data = yield self.room_list_handler.get_local_public_room_list()
defer.returnValue((200, data))
From 690029d1a3ebd26f56656a723fefdeafd71310e4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 14:47:42 +0100
Subject: [PATCH 124/414] Don't make rooms visibile by default
---
synapse/rest/client/v1/room.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py
index 604c2a565..86fbe2747 100644
--- a/synapse/rest/client/v1/room.py
+++ b/synapse/rest/client/v1/room.py
@@ -72,8 +72,6 @@ class RoomCreateRestServlet(ClientV1RestServlet):
def get_room_config(self, request):
user_supplied_config = parse_json_object_from_request(request)
- # default visibility
- user_supplied_config.setdefault("visibility", "public")
return user_supplied_config
def on_OPTIONS(self, request):
From defa28efa186013fab18b3da76f60273cb6c3bb1 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Wed, 8 Jun 2016 15:11:31 +0100
Subject: [PATCH 125/414] Disable the synchrotron on jenkins until the sytest
support lands (#855)
* Disable the synchrotron on jenkins until the sytest support lands
* Poke jenkins
* Poke jenkins
* Poke jenkins
* Poke jenkins
* Poke jenkins
* Poke jenkins
* Poke jenkins
* Poke jenkins
---
jenkins-dendron-postgres.sh | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
index 8e3a4c51a..d15836e6b 100755
--- a/jenkins-dendron-postgres.sh
+++ b/jenkins-dendron-postgres.sh
@@ -73,11 +73,12 @@ git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling b
./jenkins/prep_sytest_for_postgres.sh
+mkdir -p var
+
echo >&2 "Running sytest with PostgreSQL";
./jenkins/install_and_run.sh --python $TOX_BIN/python \
--synapse-directory $WORKSPACE \
--dendron $WORKSPACE/dendron/bin/dendron \
- --synchrotron \
--pusher \
--port-base $PORT_BASE
From 81c07a32fd27260b5112dcc87845a9e87fa5db58 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 8 Jun 2016 15:43:37 +0100
Subject: [PATCH 126/414] Pull full state for each room all at once
---
synapse/handlers/room.py | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 9fd34588d..ae44c7a55 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -20,7 +20,7 @@ from ._base import BaseHandler
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
from synapse.api.constants import (
- EventTypes, JoinRules, RoomCreationPreset,
+ EventTypes, JoinRules, RoomCreationPreset, Membership,
)
from synapse.api.errors import AuthError, StoreError, SynapseError
from synapse.util import stringutils
@@ -367,14 +367,10 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks
def handle_room(room_id):
- # We pull each bit of state out indvidually to avoid pulling the
- # full state into memory. Due to how the caching works this should
- # be fairly quick, even if not originally in the cache.
- def get_state(etype, state_key):
- return self.state_handler.get_current_state(room_id, etype, state_key)
+ current_state = yield self.state_handler.get_current_state(room_id)
# Double check that this is actually a public room.
- join_rules_event = yield get_state(EventTypes.JoinRules, "")
+ join_rules_event = current_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
join_rule = join_rules_event.content.get("join_rule", None)
if join_rule and join_rule != JoinRules.PUBLIC:
@@ -382,47 +378,51 @@ class RoomListHandler(BaseHandler):
result = {"room_id": room_id}
- joined_users = yield self.store.get_users_in_room(room_id)
- if len(joined_users) == 0:
+ num_joined_users = len([
+ 1 for _, event in current_state.items()
+ if event.type == EventTypes.Member
+ and event.membership == Membership.JOIN
+ ])
+ if num_joined_users == 0:
return
- result["num_joined_members"] = len(joined_users)
+ result["num_joined_members"] = num_joined_users
aliases = yield self.store.get_aliases_for_room(room_id)
if aliases:
result["aliases"] = aliases
- name_event = yield get_state(EventTypes.Name, "")
+ name_event = yield current_state.get((EventTypes.Name, ""))
if name_event:
name = name_event.content.get("name", None)
if name:
result["name"] = name
- topic_event = yield get_state(EventTypes.Topic, "")
+ topic_event = current_state.get((EventTypes.Topic, ""))
if topic_event:
topic = topic_event.content.get("topic", None)
if topic:
result["topic"] = topic
- canonical_event = yield get_state(EventTypes.CanonicalAlias, "")
+ canonical_event = current_state.get((EventTypes.CanonicalAlias, ""))
if canonical_event:
canonical_alias = canonical_event.content.get("alias", None)
if canonical_alias:
result["canonical_alias"] = canonical_alias
- visibility_event = yield get_state(EventTypes.RoomHistoryVisibility, "")
+ visibility_event = current_state.get((EventTypes.RoomHistoryVisibility, ""))
visibility = None
if visibility_event:
visibility = visibility_event.content.get("history_visibility", None)
result["world_readable"] = visibility == "world_readable"
- guest_event = yield get_state(EventTypes.GuestAccess, "")
+ guest_event = current_state.get((EventTypes.GuestAccess, ""))
guest = None
if guest_event:
guest = guest_event.content.get("guest_access", None)
result["guest_can_join"] = guest == "can_join"
- avatar_event = yield get_state("m.room.avatar", "")
+ avatar_event = current_state.get(("m.room.avatar", ""))
if avatar_event:
avatar_url = avatar_event.content.get("url", None)
if avatar_url:
From 6e7dc7c7dde377794c23d5db6f25ffacfb08e82a Mon Sep 17 00:00:00 2001
From: Negar Fazeli
Date: Wed, 8 Jun 2016 19:16:46 +0200
Subject: [PATCH 127/414] Fix a bug caused by a change in auth_handler function
Fix the relevant unit test cases
---
synapse/handlers/register.py | 4 ++--
tests/handlers/test_register.py | 9 +++------
2 files changed, 5 insertions(+), 8 deletions(-)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index bbc07b045..e0aaefe7b 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -388,8 +388,8 @@ class RegistrationHandler(BaseHandler):
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
- auth_handler = self.hs.get_handlers().auth_handler
- token = auth_handler.generate_short_term_login_token(user_id, duration_seconds)
+ token = self.auth_handler().generate_short_term_login_token(
+ user_id, duration_seconds)
if need_register:
yield self.store.register(
diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py
index 9d5c653b4..69a5e5b1d 100644
--- a/tests/handlers/test_register.py
+++ b/tests/handlers/test_register.py
@@ -41,14 +41,15 @@ class RegistrationTestCase(unittest.TestCase):
handlers=None,
http_client=None,
expire_access_token=True)
+ self.auth_handler = Mock(
+ generate_short_term_login_token=Mock(return_value='secret'))
self.hs.handlers = RegistrationHandlers(self.hs)
self.handler = self.hs.get_handlers().registration_handler
self.hs.get_handlers().profile_handler = Mock()
self.mock_handler = Mock(spec=[
"generate_short_term_login_token",
])
-
- self.hs.get_handlers().auth_handler = self.mock_handler
+ self.hs.get_auth_handler = Mock(return_value=self.auth_handler)
@defer.inlineCallbacks
def test_user_is_created_and_logged_in_if_doesnt_exist(self):
@@ -56,8 +57,6 @@ class RegistrationTestCase(unittest.TestCase):
local_part = "someone"
display_name = "someone"
user_id = "@someone:test"
- mock_token = self.mock_handler.generate_short_term_login_token
- mock_token.return_value = 'secret'
result_user_id, result_token = yield self.handler.get_or_create_user(
local_part, display_name, duration_ms)
self.assertEquals(result_user_id, user_id)
@@ -75,8 +74,6 @@ class RegistrationTestCase(unittest.TestCase):
local_part = "frank"
display_name = "Frank"
user_id = "@frank:test"
- mock_token = self.mock_handler.generate_short_term_login_token
- mock_token.return_value = 'secret'
result_user_id, result_token = yield self.handler.get_or_create_user(
local_part, display_name, duration_ms)
self.assertEquals(result_user_id, user_id)
From 95f305c35a790e8f10fef7e16268dfaba6bc4c31 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 9 Jun 2016 10:57:11 +0100
Subject: [PATCH 128/414] Remove redundant exception log in /events
---
synapse/rest/client/v1/events.py | 41 +++++++++++++++-----------------
1 file changed, 19 insertions(+), 22 deletions(-)
diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py
index d1afa0f0d..498bb9e18 100644
--- a/synapse/rest/client/v1/events.py
+++ b/synapse/rest/client/v1/events.py
@@ -45,30 +45,27 @@ class EventStreamRestServlet(ClientV1RestServlet):
raise SynapseError(400, "Guest users must specify room_id param")
if "room_id" in request.args:
room_id = request.args["room_id"][0]
- try:
- handler = self.handlers.event_stream_handler
- pagin_config = PaginationConfig.from_request(request)
- timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
- if "timeout" in request.args:
- try:
- timeout = int(request.args["timeout"][0])
- except ValueError:
- raise SynapseError(400, "timeout must be in milliseconds.")
- as_client_event = "raw" not in request.args
+ handler = self.handlers.event_stream_handler
+ pagin_config = PaginationConfig.from_request(request)
+ timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS
+ if "timeout" in request.args:
+ try:
+ timeout = int(request.args["timeout"][0])
+ except ValueError:
+ raise SynapseError(400, "timeout must be in milliseconds.")
- chunk = yield handler.get_stream(
- requester.user.to_string(),
- pagin_config,
- timeout=timeout,
- as_client_event=as_client_event,
- affect_presence=(not is_guest),
- room_id=room_id,
- is_guest=is_guest,
- )
- except:
- logger.exception("Event stream failed")
- raise
+ as_client_event = "raw" not in request.args
+
+ chunk = yield handler.get_stream(
+ requester.user.to_string(),
+ pagin_config,
+ timeout=timeout,
+ as_client_event=as_client_event,
+ affect_presence=(not is_guest),
+ room_id=room_id,
+ is_guest=is_guest,
+ )
defer.returnValue((200, chunk))
From eba4ff1bcbd99ae5b23f7cdae2306662319d3b4a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 9 Jun 2016 11:29:43 +0100
Subject: [PATCH 129/414] 502 on /thumbnail when can't contact remote server
---
synapse/rest/media/v1/media_repository.py | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index d96bf9afe..2468c3ac4 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -26,6 +26,7 @@ from .thumbnailer import Thumbnailer
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from synapse.util.stringutils import random_string
+from synapse.api.errors import SynapseError
from twisted.internet import defer, threads
@@ -134,10 +135,15 @@ class MediaRepository(object):
request_path = "/".join((
"/_matrix/media/v1/download", server_name, media_id,
))
- length, headers = yield self.client.get_file(
- server_name, request_path, output_stream=f,
- max_size=self.max_upload_size,
- )
+ try:
+ length, headers = yield self.client.get_file(
+ server_name, request_path, output_stream=f,
+ max_size=self.max_upload_size,
+ )
+ except Exception as e:
+ logger.warn("Failed to fetch remoted media %r", e)
+ raise SynapseError(502, "Failed to fetch remoted media")
+
media_type = headers["Content-Type"][0]
time_now_ms = self.clock.time_msec()
From a31befbcd0f8238920b0ca198d35ef657c78e766 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 9 Jun 2016 13:23:41 +0100
Subject: [PATCH 130/414] Bump version and changelog
---
CHANGES.rst | 10 ++++++++++
synapse/__init__.py | 2 +-
2 files changed, 11 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 6194b3eb6..ad974c586 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,13 @@
+Changes in synapse v0.16.0 (2016-06-09)
+=======================================
+
+NB: As of v0.14 all AS config files must have an ID specified.
+
+
+Bug fixes:
+
+* Don't make rooms published by default (PR #857)
+
Changes in synapse v0.16.0-rc2 (2016-06-08)
===========================================
diff --git a/synapse/__init__.py b/synapse/__init__.py
index ad088a788..dc211e963 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.16.0-rc2"
+__version__ = "0.16.0"
From 8327d5df709f1726c961743d937852e487648f5b Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 9 Jun 2016 14:16:26 +0100
Subject: [PATCH 131/414] Change CHANGELOG
---
CHANGES.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index ad974c586..32f18e709 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,7 +1,7 @@
Changes in synapse v0.16.0 (2016-06-09)
=======================================
-NB: As of v0.14 all AS config files must have an ID specified.
+NB: As of v0.14 all AS config files must have an ID field.
Bug fixes:
From 7dbb473339bc41daf6c05b64756f97e011f653f5 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 9 Jun 2016 18:50:38 +0100
Subject: [PATCH 132/414] Add function to load config without generating it
Renames ``load_config`` to ``load_or_generate_config``
Adds a method called ``load_config`` that just loads the
config.
The main synapse.app.homeserver will continue to use
``load_or_generate_config`` to retain backwards compat.
However new worker processes can use ``load_config`` to
load the config avoiding some of the cruft needed to generate
the config.
As the new ``load_config`` method is expected to be used by new
configs it removes support for the legacy commandline overrides
that ``load_or_generate_config`` supports
---
synapse/app/homeserver.py | 3 +-
synapse/config/_base.py | 145 ++++++++++++++++++++++++----------
tests/config/test_generate.py | 2 +-
tests/config/test_load.py | 22 +++++-
4 files changed, 125 insertions(+), 47 deletions(-)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 22e1721fc..40ffd9bf0 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -266,10 +266,9 @@ def setup(config_options):
HomeServer
"""
try:
- config = HomeServerConfig.load_config(
+ config = HomeServerConfig.load_or_generate_config(
"Synapse Homeserver",
config_options,
- generate_section="Homeserver"
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
diff --git a/synapse/config/_base.py b/synapse/config/_base.py
index 7449f3649..af9f17bf7 100644
--- a/synapse/config/_base.py
+++ b/synapse/config/_base.py
@@ -157,9 +157,40 @@ class Config(object):
return default_config, config
@classmethod
- def load_config(cls, description, argv, generate_section=None):
- obj = cls()
+ def load_config(cls, description, argv):
+ config_parser = argparse.ArgumentParser(
+ description=description,
+ )
+ config_parser.add_argument(
+ "-c", "--config-path",
+ action="append",
+ metavar="CONFIG_FILE",
+ help="Specify config file. Can be given multiple times and"
+ " may specify directories containing *.yaml files."
+ )
+ config_parser.add_argument(
+ "--keys-directory",
+ metavar="DIRECTORY",
+ help="Where files such as certs and signing keys are stored when"
+ " their location is given explicitly in the config."
+ " Defaults to the directory containing the last config file",
+ )
+
+ config_args = config_parser.parse_args(argv)
+
+ config_files = find_config_files(search_paths=config_args.config_path)
+
+ obj = cls()
+ obj.read_config_files(
+ config_files,
+ keys_directory=config_args.keys_directory,
+ generate_keys=False,
+ )
+ return obj
+
+ @classmethod
+ def load_or_generate_config(cls, description, argv):
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument(
"-c", "--config-path",
@@ -176,7 +207,7 @@ class Config(object):
config_parser.add_argument(
"--report-stats",
action="store",
- help="Stuff",
+ help="Whether the generated config reports anonymized usage statistics",
choices=["yes", "no"]
)
config_parser.add_argument(
@@ -197,36 +228,11 @@ class Config(object):
)
config_args, remaining_args = config_parser.parse_known_args(argv)
+ config_files = find_config_files(search_paths=config_args.config_path)
+
generate_keys = config_args.generate_keys
- config_files = []
- if config_args.config_path:
- for config_path in config_args.config_path:
- if os.path.isdir(config_path):
- # We accept specifying directories as config paths, we search
- # inside that directory for all files matching *.yaml, and then
- # we apply them in *sorted* order.
- files = []
- for entry in os.listdir(config_path):
- entry_path = os.path.join(config_path, entry)
- if not os.path.isfile(entry_path):
- print (
- "Found subdirectory in config directory: %r. IGNORING."
- ) % (entry_path, )
- continue
-
- if not entry.endswith(".yaml"):
- print (
- "Found file in config directory that does not"
- " end in '.yaml': %r. IGNORING."
- ) % (entry_path, )
- continue
-
- files.append(entry_path)
-
- config_files.extend(sorted(files))
- else:
- config_files.append(config_path)
+ obj = cls()
if config_args.generate_config:
if config_args.report_stats is None:
@@ -299,28 +305,43 @@ class Config(object):
" -c CONFIG-FILE\""
)
- if config_args.keys_directory:
- config_dir_path = config_args.keys_directory
- else:
- config_dir_path = os.path.dirname(config_args.config_path[-1])
- config_dir_path = os.path.abspath(config_dir_path)
+ obj.read_config_files(
+ config_files,
+ keys_directory=config_args.keys_directory,
+ generate_keys=generate_keys,
+ )
+
+ if generate_keys:
+ return None
+
+ obj.invoke_all("read_arguments", args)
+
+ return obj
+
+ def read_config_files(self, config_files, keys_directory=None,
+ generate_keys=False):
+ if not keys_directory:
+ keys_directory = os.path.dirname(config_files[-1])
+
+ config_dir_path = os.path.abspath(keys_directory)
specified_config = {}
for config_file in config_files:
- yaml_config = cls.read_config_file(config_file)
+ yaml_config = self.read_config_file(config_file)
specified_config.update(yaml_config)
if "server_name" not in specified_config:
raise ConfigError(MISSING_SERVER_NAME)
server_name = specified_config["server_name"]
- _, config = obj.generate_config(
+ _, config = self.generate_config(
config_dir_path=config_dir_path,
server_name=server_name,
is_generating_file=False,
)
config.pop("log_config")
config.update(specified_config)
+
if "report_stats" not in config:
raise ConfigError(
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
@@ -328,11 +349,51 @@ class Config(object):
)
if generate_keys:
- obj.invoke_all("generate_files", config)
+ self.invoke_all("generate_files", config)
return
- obj.invoke_all("read_config", config)
+ self.invoke_all("read_config", config)
- obj.invoke_all("read_arguments", args)
- return obj
+def find_config_files(search_paths):
+ """Finds config files using a list of search paths. If a path is a file
+ then that file path is added to the list. If a search path is a directory
+ then all the "*.yaml" files in that directory are added to the list in
+ sorted order.
+
+ Args:
+ search_paths(list(str)): A list of paths to search.
+
+ Returns:
+ list(str): A list of file paths.
+ """
+
+ config_files = []
+ if search_paths:
+ for config_path in search_paths:
+ if os.path.isdir(config_path):
+ # We accept specifying directories as config paths, we search
+ # inside that directory for all files matching *.yaml, and then
+ # we apply them in *sorted* order.
+ files = []
+ for entry in os.listdir(config_path):
+ entry_path = os.path.join(config_path, entry)
+ if not os.path.isfile(entry_path):
+ print (
+ "Found subdirectory in config directory: %r. IGNORING."
+ ) % (entry_path, )
+ continue
+
+ if not entry.endswith(".yaml"):
+ print (
+ "Found file in config directory that does not"
+ " end in '.yaml': %r. IGNORING."
+ ) % (entry_path, )
+ continue
+
+ files.append(entry_path)
+
+ config_files.extend(sorted(files))
+ else:
+ config_files.append(config_path)
+ return config_files
diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py
index 4329d7397..8f57fbeb2 100644
--- a/tests/config/test_generate.py
+++ b/tests/config/test_generate.py
@@ -30,7 +30,7 @@ class ConfigGenerationTestCase(unittest.TestCase):
shutil.rmtree(self.dir)
def test_generate_config_generates_files(self):
- HomeServerConfig.load_config("", [
+ HomeServerConfig.load_or_generate_config("", [
"--generate-config",
"-c", self.file,
"--report-stats=yes",
diff --git a/tests/config/test_load.py b/tests/config/test_load.py
index bf46233c5..161a87d7e 100644
--- a/tests/config/test_load.py
+++ b/tests/config/test_load.py
@@ -34,6 +34,8 @@ class ConfigLoadingTestCase(unittest.TestCase):
self.generate_config_and_remove_lines_containing("server_name")
with self.assertRaises(Exception):
HomeServerConfig.load_config("", ["-c", self.file])
+ with self.assertRaises(Exception):
+ HomeServerConfig.load_or_generate_config("", ["-c", self.file])
def test_generates_and_loads_macaroon_secret_key(self):
self.generate_config()
@@ -54,11 +56,24 @@ class ConfigLoadingTestCase(unittest.TestCase):
"was: %r" % (config.macaroon_secret_key,)
)
+ config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
+ self.assertTrue(
+ hasattr(config, "macaroon_secret_key"),
+ "Want config to have attr macaroon_secret_key"
+ )
+ if len(config.macaroon_secret_key) < 5:
+ self.fail(
+ "Want macaroon secret key to be string of at least length 5,"
+ "was: %r" % (config.macaroon_secret_key,)
+ )
+
def test_load_succeeds_if_macaroon_secret_key_missing(self):
self.generate_config_and_remove_lines_containing("macaroon")
config1 = HomeServerConfig.load_config("", ["-c", self.file])
config2 = HomeServerConfig.load_config("", ["-c", self.file])
+ config3 = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
self.assertEqual(config1.macaroon_secret_key, config2.macaroon_secret_key)
+ self.assertEqual(config1.macaroon_secret_key, config3.macaroon_secret_key)
def test_disable_registration(self):
self.generate_config()
@@ -70,14 +85,17 @@ class ConfigLoadingTestCase(unittest.TestCase):
config = HomeServerConfig.load_config("", ["-c", self.file])
self.assertFalse(config.enable_registration)
+ config = HomeServerConfig.load_or_generate_config("", ["-c", self.file])
+ self.assertFalse(config.enable_registration)
+
# Check that either config value is clobbered by the command line.
- config = HomeServerConfig.load_config("", [
+ config = HomeServerConfig.load_or_generate_config("", [
"-c", self.file, "--enable-registration"
])
self.assertTrue(config.enable_registration)
def generate_config(self):
- HomeServerConfig.load_config("", [
+ HomeServerConfig.load_or_generate_config("", [
"--generate-config",
"-c", self.file,
"--report-stats=yes",
From 50f69e2ef266cf08aaff0311705fcf56dc1bd9f3 Mon Sep 17 00:00:00 2001
From: Bartek Rutkowski
Date: Fri, 10 Jun 2016 11:33:43 +0100
Subject: [PATCH 133/414] Change /bin/bash to /bin/sh in tox.ini
No features of Bash are used here, so using /bin/sh makes it more portable to systems that don't have Bash natively (like BSD systems).
---
tox.ini | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tox.ini b/tox.ini
index 757b7189c..52d93c65e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,7 +11,7 @@ deps =
setenv =
PYTHONDONTWRITEBYTECODE = no_byte_code
commands =
- /bin/bash -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
+ /bin/sh -c "find {toxinidir} -name '*.pyc' -delete ; coverage run {env:COVERAGE_OPTS:} --source={toxinidir}/synapse \
{envbindir}/trial {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:}"
{env:DUMP_COVERAGE_COMMAND:coverage report -m}
@@ -26,4 +26,4 @@ skip_install = True
basepython = python2.7
deps =
flake8
-commands = /bin/bash -c "flake8 synapse tests {env:PEP8SUFFIX:}"
+commands = /bin/sh -c "flake8 synapse tests {env:PEP8SUFFIX:}"
From fdc015c6e9b023c5cb87491b7e64efd46eedd129 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 10 Jun 2016 16:30:26 +0100
Subject: [PATCH 134/414] Enable testing the synchrotron on jenkins
---
jenkins-dendron-postgres.sh | 1 +
1 file changed, 1 insertion(+)
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
index d15836e6b..7e6f24aa7 100755
--- a/jenkins-dendron-postgres.sh
+++ b/jenkins-dendron-postgres.sh
@@ -80,6 +80,7 @@ echo >&2 "Running sytest with PostgreSQL";
--synapse-directory $WORKSPACE \
--dendron $WORKSPACE/dendron/bin/dendron \
--pusher \
+ --synchrotron \
--port-base $PORT_BASE
cd ..
From 33546b58aa0a235d159269b8705a8999d219f41a Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Sun, 12 Jun 2016 23:11:29 +0100
Subject: [PATCH 135/414] point to the CAPTCHA docs
---
synapse/config/captcha.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py
index b54dbabbe..7ba0c2de6 100644
--- a/synapse/config/captcha.py
+++ b/synapse/config/captcha.py
@@ -27,6 +27,7 @@ class CaptchaConfig(Config):
def default_config(self, **kwargs):
return """\
## Captcha ##
+ # See docs/CAPTCHA_SETUP for full details of configuring this.
# This Home Server's ReCAPTCHA public key.
recaptcha_public_key: "YOUR_PUBLIC_KEY"
From 36e2aade8790f3f2d86e8f6cc8a6de21e8bec4fa Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 14 Jun 2016 13:25:29 +0100
Subject: [PATCH 136/414] Make get_domain_from_id throw SynapseError on invalid
ID
---
synapse/types.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/synapse/types.py b/synapse/types.py
index 7b6ae44bd..f639651a7 100644
--- a/synapse/types.py
+++ b/synapse/types.py
@@ -22,7 +22,10 @@ Requester = namedtuple("Requester", ["user", "access_token_id", "is_guest"])
def get_domain_from_id(string):
- return string.split(":", 1)[1]
+ try:
+ return string.split(":", 1)[1]
+ except IndexError:
+ raise SynapseError(400, "Invalid ID: %r", string)
class DomainSpecificString(
From 255c229f23635f7dc0299de5d54460eea2e2af1c Mon Sep 17 00:00:00 2001
From: Richard van der Hoff
Date: Wed, 15 Jun 2016 10:23:03 +0100
Subject: [PATCH 137/414] Work around TLS bug in twisted
Wrap up twisted's FileBodyProducer to work around
https://twistedmatrix.com/trac/ticket/8473. Hopefully this fixes
https://matrix.org/jira/browse/SYN-700.
---
synapse/http/client.py | 28 ++++++++++++++++++++++++++--
1 file changed, 26 insertions(+), 2 deletions(-)
diff --git a/synapse/http/client.py b/synapse/http/client.py
index c7fa69243..3ec9bc7fa 100644
--- a/synapse/http/client.py
+++ b/synapse/http/client.py
@@ -24,12 +24,13 @@ from synapse.http.endpoint import SpiderEndpoint
from canonicaljson import encode_canonical_json
-from twisted.internet import defer, reactor, ssl, protocol
+from twisted.internet import defer, reactor, ssl, protocol, task
from twisted.internet.endpoints import SSL4ClientEndpoint, TCP4ClientEndpoint
from twisted.web.client import (
BrowserLikeRedirectAgent, ContentDecoderAgent, GzipDecoder, Agent,
- readBody, FileBodyProducer, PartialDownloadError,
+ readBody, PartialDownloadError,
)
+from twisted.web.client import FileBodyProducer as TwistedFileBodyProducer
from twisted.web.http import PotentialDataLoss
from twisted.web.http_headers import Headers
from twisted.web._newclient import ResponseDone
@@ -468,3 +469,26 @@ class InsecureInterceptableContextFactory(ssl.ContextFactory):
def creatorForNetloc(self, hostname, port):
return self
+
+
+class FileBodyProducer(TwistedFileBodyProducer):
+ """Workaround for https://twistedmatrix.com/trac/ticket/8473
+
+ We override the pauseProducing and resumeProducing methods in twisted's
+ FileBodyProducer so that they do not raise exceptions if the task has
+ already completed.
+ """
+
+ def pauseProducing(self):
+ try:
+ super(FileBodyProducer, self).pauseProducing()
+ except task.TaskDone:
+ # task has already completed
+ pass
+
+ def resumeProducing(self):
+ try:
+ super(FileBodyProducer, self).resumeProducing()
+ except task.NotPaused:
+ # task was not paused (probably because it had already completed)
+ pass
From b31c49d6760b4cdeefc8e0b43d6639be4576e249 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 10:58:07 +0100
Subject: [PATCH 138/414] Correctly mark backfilled events as backfilled
---
synapse/handlers/federation.py | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index ff83c608e..c2df43e2f 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -345,19 +345,21 @@ class FederationHandler(BaseHandler):
)
missing_auth = required_auth - set(auth_events)
- results = yield defer.gatherResults(
- [
- self.replication_layer.get_pdu(
- [dest],
- event_id,
- outlier=True,
- timeout=10000,
- )
- for event_id in missing_auth
- ],
- consumeErrors=True
- ).addErrback(unwrapFirstError)
- auth_events.update({a.event_id: a for a in results})
+ if missing_auth:
+ logger.info("Missing auth for backfill: %r", missing_auth)
+ results = yield defer.gatherResults(
+ [
+ self.replication_layer.get_pdu(
+ [dest],
+ event_id,
+ outlier=True,
+ timeout=10000,
+ )
+ for event_id in missing_auth
+ ],
+ consumeErrors=True
+ ).addErrback(unwrapFirstError)
+ auth_events.update({a.event_id: a for a in results})
ev_infos = []
for a in auth_events.values():
@@ -399,7 +401,7 @@ class FederationHandler(BaseHandler):
# previous to work out the state.
# TODO: We can probably do something more clever here.
yield self._handle_new_event(
- dest, event
+ dest, event, backfilled=True,
)
defer.returnValue(events)
From d41a1a91d3cce28e5416a91b7494d079e4c765f0 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 15:12:59 +0100
Subject: [PATCH 139/414] Linearize fetching of gaps on incoming events
This potentially stops the server from doing multiple requests for the
same data.
---
synapse/federation/federation_base.py | 3 +
synapse/federation/federation_client.py | 2 +
synapse/federation/federation_server.py | 76 +++++++++++++++----------
synapse/federation/replication.py | 2 +
4 files changed, 52 insertions(+), 31 deletions(-)
diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py
index a0b7cb796..da2f5e8cf 100644
--- a/synapse/federation/federation_base.py
+++ b/synapse/federation/federation_base.py
@@ -31,6 +31,9 @@ logger = logging.getLogger(__name__)
class FederationBase(object):
+ def __init__(self, hs):
+ pass
+
@defer.inlineCallbacks
def _check_sigs_and_hash_and_fetch(self, origin, pdus, outlier=False,
include_none=False):
diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py
index d835c1b03..b06387051 100644
--- a/synapse/federation/federation_client.py
+++ b/synapse/federation/federation_client.py
@@ -52,6 +52,8 @@ sent_queries_counter = metrics.register_counter("sent_queries", labels=["type"])
class FederationClient(FederationBase):
+ def __init__(self, hs):
+ super(FederationClient, self).__init__(hs)
def start_get_pdu_cache(self):
self._get_pdu_cache = ExpiringCache(
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 9f2a64ded..fe92457ba 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -19,6 +19,7 @@ from twisted.internet import defer
from .federation_base import FederationBase
from .units import Transaction, Edu
+from synapse.util.async import Linearizer
from synapse.util.logutils import log_function
from synapse.events import FrozenEvent
import synapse.metrics
@@ -44,6 +45,11 @@ received_queries_counter = metrics.register_counter("received_queries", labels=[
class FederationServer(FederationBase):
+ def __init__(self, hs):
+ super(FederationServer, self).__init__(hs)
+
+ self._room_pdu_linearizer = Linearizer()
+
def set_handler(self, handler):
"""Sets the handler that the replication layer will use to communicate
receipt of new PDUs from other home servers. The required methods are
@@ -491,43 +497,51 @@ class FederationServer(FederationBase):
pdu.internal_metadata.outlier = True
elif min_depth and pdu.depth > min_depth:
if get_missing and prevs - seen:
- latest = yield self.store.get_latest_event_ids_in_room(
- pdu.room_id
- )
+ # If we're missing stuff, ensure we only fetch stuff one
+ # at a time.
+ with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
+ # We recalculate seen, since it may have changed.
+ have_seen = yield self.store.have_events(prevs)
+ seen = set(have_seen.keys())
- # We add the prev events that we have seen to the latest
- # list to ensure the remote server doesn't give them to us
- latest = set(latest)
- latest |= seen
+ if prevs - seen:
+ latest = yield self.store.get_latest_event_ids_in_room(
+ pdu.room_id
+ )
- logger.info(
- "Missing %d events for room %r: %r...",
- len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
- )
+ # We add the prev events that we have seen to the latest
+ # list to ensure the remote server doesn't give them to us
+ latest = set(latest)
+ latest |= seen
- missing_events = yield self.get_missing_events(
- origin,
- pdu.room_id,
- earliest_events_ids=list(latest),
- latest_events=[pdu],
- limit=10,
- min_depth=min_depth,
- )
+ logger.info(
+ "Missing %d events for room %r: %r...",
+ len(prevs - seen), pdu.room_id, list(prevs - seen)[:5]
+ )
- # We want to sort these by depth so we process them and
- # tell clients about them in order.
- missing_events.sort(key=lambda x: x.depth)
+ missing_events = yield self.get_missing_events(
+ origin,
+ pdu.room_id,
+ earliest_events_ids=list(latest),
+ latest_events=[pdu],
+ limit=10,
+ min_depth=min_depth,
+ )
- for e in missing_events:
- yield self._handle_new_pdu(
- origin,
- e,
- get_missing=False
- )
+ # We want to sort these by depth so we process them and
+ # tell clients about them in order.
+ missing_events.sort(key=lambda x: x.depth)
- have_seen = yield self.store.have_events(
- [ev for ev, _ in pdu.prev_events]
- )
+ for e in missing_events:
+ yield self._handle_new_pdu(
+ origin,
+ e,
+ get_missing=False
+ )
+
+ have_seen = yield self.store.have_events(
+ [ev for ev, _ in pdu.prev_events]
+ )
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
diff --git a/synapse/federation/replication.py b/synapse/federation/replication.py
index 3e062a5ea..ea66a5dcb 100644
--- a/synapse/federation/replication.py
+++ b/synapse/federation/replication.py
@@ -72,5 +72,7 @@ class ReplicationLayer(FederationClient, FederationServer):
self.hs = hs
+ super(ReplicationLayer, self).__init__(hs)
+
def __str__(self):
return "" % self.server_name
From 0ef0655b83adee8671358e35c42e2e646ef8d2fd Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 15:50:17 +0100
Subject: [PATCH 140/414] Bump version and changelog
---
CHANGES.rst | 27 ++++++++++++++++++++++++++-
synapse/__init__.py | 2 +-
2 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 32f18e709..d5f465792 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,28 @@
+Changes in synapse v0.16.1-rc1 (2016-06-15)
+===========================================
+
+Features: None
+
+Changes:
+
+* Log requester for ``/publicRoom`` endpoints when possible (PR #856)
+* 502 on ``/thumbnail`` when can't connect to remote server (PR #862)
+* Linearize fetching of gaps on incoming events (PR #871)
+
+
+Bugs fixes:
+
+* Fix bug where rooms where marked as published by default (PR #857)
+* Fix bug where joining room with an event with invalid sender (PR #868)
+* Fix bug where backfilled events were sent down sync streams (PR #869)
+* Fix bug where outgoing connections could wedge indefinitely (PR #870)
+
+
+Performance improvements:
+
+* Improve ``/publicRooms`` performance(PR #859)
+
+
Changes in synapse v0.16.0 (2016-06-09)
=======================================
@@ -28,7 +53,7 @@ Bug fixes:
* Fix bug where synapse sent malformed transactions to AS's when retrying
transactions (Commits 310197b, 8437906)
-Performance Improvements:
+Performance improvements:
* Remove event fetching from DB threads (PR #835)
* Change the way we cache events (PR #836)
diff --git a/synapse/__init__.py b/synapse/__init__.py
index dc211e963..faaa86d97 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.16.0"
+__version__ = "0.16.1-rc1"
From 0477368e9afb7de9d8f95352f47973e51f0a837c Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 16:06:26 +0100
Subject: [PATCH 141/414] Update change log
---
CHANGES.rst | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index d5f465792..1a47aae85 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -15,7 +15,8 @@ Bugs fixes:
* Fix bug where rooms where marked as published by default (PR #857)
* Fix bug where joining room with an event with invalid sender (PR #868)
* Fix bug where backfilled events were sent down sync streams (PR #869)
-* Fix bug where outgoing connections could wedge indefinitely (PR #870)
+* Fix bug where outgoing connections could wedge indefinitely, causing push
+ notifications to be unreliable (PR #870)
Performance improvements:
From a60169ea0987df41ee540eefbb77cf3ff53446bc Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 16:57:48 +0100
Subject: [PATCH 142/414] Handle og props with not content
---
synapse/rest/media/v1/preview_url_resource.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index 37dd1de89..fc72896e0 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -252,7 +252,8 @@ class PreviewUrlResource(Resource):
og = {}
for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"):
- og[tag.attrib['property']] = tag.attrib['content']
+ if 'content' in tag.attrib:
+ og[tag.attrib['property']] = tag.attrib['content']
# TODO: grab article: meta tags too, e.g.:
From 1e9026e484be0f90256ae60c05eed9d1f87cf6b9 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 16:58:05 +0100
Subject: [PATCH 143/414] Handle floats as img widths
---
synapse/rest/media/v1/preview_url_resource.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index fc72896e0..a6807df62 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -280,7 +280,7 @@ class PreviewUrlResource(Resource):
# TODO: consider inlined CSS styles as well as width & height attribs
images = tree.xpath("//img[@src][number(@width)>10][number(@height)>10]")
images = sorted(images, key=lambda i: (
- -1 * int(i.attrib['width']) * int(i.attrib['height'])
+ -1 * float(i.attrib['width']) * float(i.attrib['height'])
))
if not images:
images = tree.xpath("//img[@src]")
From 09a17f965cf55dca45983473ed744f539b9ec92e Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 15 Jun 2016 16:58:12 +0100
Subject: [PATCH 144/414] Line lengths
---
synapse/rest/media/v1/preview_url_resource.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py
index a6807df62..74c64f137 100644
--- a/synapse/rest/media/v1/preview_url_resource.py
+++ b/synapse/rest/media/v1/preview_url_resource.py
@@ -288,9 +288,9 @@ class PreviewUrlResource(Resource):
og['og:image'] = images[0].attrib['src']
# pre-cache the image for posterity
- # FIXME: it might be cleaner to use the same flow as the main /preview_url request
- # itself and benefit from the same caching etc. But for now we just rely on the
- # caching on the master request to speed things up.
+ # FIXME: it might be cleaner to use the same flow as the main /preview_url
+ # request itself and benefit from the same caching etc. But for now we
+ # just rely on the caching on the master request to speed things up.
if 'og:image' in og and og['og:image']:
image_info = yield self._download_url(
self._rebase_url(og['og:image'], media_info['uri']), requester.user
From ed5f43a55accc8502a60b721871b208db704de3e Mon Sep 17 00:00:00 2001
From: Salvatore LaMendola
Date: Thu, 16 Jun 2016 00:43:42 -0400
Subject: [PATCH 145/414] Fix TypeError in call to bcrypt.hashpw - At the very
least, this TypeError caused logins to fail on my own running instance of
Synapse, and the simple (explicit) UTF-8 conversion resolved login errors
for me.
Signed-off-by: Salvatore LaMendola
---
synapse/handlers/auth.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 200793b5e..b38f81e99 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -626,6 +626,6 @@ class AuthHandler(BaseHandler):
Whether self.hash(password) == stored_hash (bool).
"""
if stored_hash:
- return bcrypt.hashpw(password, stored_hash) == stored_hash
+ return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash
else:
return False
From 885ee861f7270fef1370a2d63e009a8fceaf8dd5 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 11:06:12 +0100
Subject: [PATCH 146/414] Inline the synchrotron and pusher configs into the
main config
---
synapse/app/pusher.py | 169 ++++++++++-------------------------
synapse/app/synchrotron.py | 135 +++++++---------------------
synapse/config/homeserver.py | 4 +-
synapse/config/logger.py | 86 +++++++++---------
synapse/config/server.py | 31 ++++---
5 files changed, 144 insertions(+), 281 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 4ec23d84c..6c8c02fb3 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -18,10 +18,9 @@ import synapse
from synapse.server import HomeServer
from synapse.config._base import ConfigError
-from synapse.config.database import DatabaseConfig
-from synapse.config.logger import LoggingConfig
-from synapse.config.emailconfig import EmailConfig
-from synapse.config.key import KeyConfig
+from synapse.config.workers import clobber_with_worker_config
+from synapse.config.logger import setup_logging
+from synapse.config.homeserver import HomeServerConfig
from synapse.http.site import SynapseSite
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.storage.roommember import RoomMemberStore
@@ -43,98 +42,12 @@ from twisted.web.resource import Resource
from daemonize import Daemonize
-import gc
import sys
import logging
logger = logging.getLogger("synapse.app.pusher")
-class SlaveConfig(DatabaseConfig):
- def read_config(self, config):
- self.replication_url = config["replication_url"]
- self.server_name = config["server_name"]
- self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
- "use_insecure_ssl_client_just_for_testing_do_not_use", False
- )
- self.user_agent_suffix = None
- self.start_pushers = True
- self.listeners = config["listeners"]
- self.soft_file_limit = config.get("soft_file_limit")
- self.daemonize = config.get("daemonize")
- self.pid_file = self.abspath(config.get("pid_file"))
- self.public_baseurl = config["public_baseurl"]
-
- thresholds = config.get("gc_thresholds", None)
- if thresholds is not None:
- try:
- assert len(thresholds) == 3
- self.gc_thresholds = (
- int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
- )
- except:
- raise ConfigError(
- "Value of `gc_threshold` must be a list of three integers if set"
- )
- else:
- self.gc_thresholds = None
-
- # some things used by the auth handler but not actually used in the
- # pusher codebase
- self.bcrypt_rounds = None
- self.ldap_enabled = None
- self.ldap_server = None
- self.ldap_port = None
- self.ldap_tls = None
- self.ldap_search_base = None
- self.ldap_search_property = None
- self.ldap_email_property = None
- self.ldap_full_name_property = None
-
- # We would otherwise try to use the registration shared secret as the
- # macaroon shared secret if there was no macaroon_shared_secret, but
- # that means pulling in RegistrationConfig too. We don't need to be
- # backwards compaitible in the pusher codebase so just make people set
- # macaroon_shared_secret. We set this to None to prevent it referencing
- # an undefined key.
- self.registration_shared_secret = None
-
- def default_config(self, server_name, **kwargs):
- pid_file = self.abspath("pusher.pid")
- return """\
- # Slave configuration
-
- # The replication listener on the synapse to talk to.
- #replication_url: https://localhost:{replication_port}/_synapse/replication
-
- server_name: "%(server_name)s"
-
- listeners: []
- # Enable a ssh manhole listener on the pusher.
- # - type: manhole
- # port: {manhole_port}
- # bind_address: 127.0.0.1
- # Enable a metric listener on the pusher.
- # - type: http
- # port: {metrics_port}
- # bind_address: 127.0.0.1
- # resources:
- # - names: ["metrics"]
- # compress: False
-
- report_stats: False
-
- daemonize: False
-
- pid_file: %(pid_file)s
-
- """ % locals()
-
-
-class PusherSlaveConfig(SlaveConfig, LoggingConfig, EmailConfig, KeyConfig):
- pass
-
-
class PusherSlaveStore(
SlavedEventStore, SlavedPusherStore, SlavedReceiptsStore,
SlavedAccountDataStore
@@ -232,8 +145,8 @@ class PusherServer(HomeServer):
)
logger.info("Synapse pusher now listening on port %d", port)
- def start_listening(self):
- for listener in self.config.listeners:
+ def start_listening(self, listeners):
+ for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
@@ -329,19 +242,32 @@ class PusherServer(HomeServer):
yield sleep(30)
-def setup(config_options):
+def setup(worker_name, config_options):
try:
- config = PusherSlaveConfig.load_config(
+ config = HomeServerConfig.load_config(
"Synapse pusher", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
- if not config:
- sys.exit(0)
+ worker_config = config.workers[worker_name]
- config.setup_logging()
+ setup_logging(worker_config.log_config, worker_config.log_file)
+
+ clobber_with_worker_config(config, worker_config)
+
+ if config.start_pushers:
+ sys.stderr.write(
+ "\nThe pushers must be disabled in the main synapse process"
+ "\nbefore they can be run in a separate worker."
+ "\nPlease add ``start_pushers: false`` to the main config"
+ "\n"
+ )
+ sys.exit(1)
+
+ # Force the pushers to start since they will be disabled in the main config
+ config.start_pushers = True
database_engine = create_engine(config.database_config)
@@ -354,11 +280,15 @@ def setup(config_options):
)
ps.setup()
- ps.start_listening()
+ ps.start_listening(worker_config.listeners)
- change_resource_limit(ps.config.soft_file_limit)
- if ps.config.gc_thresholds:
- gc.set_threshold(*ps.config.gc_thresholds)
+ def run():
+ with LoggingContext("run"):
+ logger.info("Running")
+ change_resource_limit(worker_config.soft_file_limit)
+ if worker_config.gc_thresholds:
+ ps.set_threshold(worker_config.gc_thresholds)
+ reactor.run()
def start():
ps.replicate()
@@ -367,30 +297,21 @@ def setup(config_options):
reactor.callWhenRunning(start)
- return ps
+ if worker_config.daemonize:
+ daemon = Daemonize(
+ app="synapse-pusher",
+ pid=worker_config.pid_file,
+ action=run,
+ auto_close_fds=False,
+ verbose=True,
+ logger=logger,
+ )
+ daemon.start()
+ else:
+ run()
if __name__ == '__main__':
with LoggingContext("main"):
- ps = setup(sys.argv[1:])
-
- if ps.config.daemonize:
- def run():
- with LoggingContext("run"):
- change_resource_limit(ps.config.soft_file_limit)
- if ps.config.gc_thresholds:
- gc.set_threshold(*ps.config.gc_thresholds)
- reactor.run()
-
- daemon = Daemonize(
- app="synapse-pusher",
- pid=ps.config.pid_file,
- action=run,
- auto_close_fds=False,
- verbose=True,
- logger=logger,
- )
-
- daemon.start()
- else:
- reactor.run()
+ worker_name = sys.argv[1]
+ ps = setup(worker_name, sys.argv[2:])
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 297e19945..7a607faef 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -18,9 +18,9 @@ import synapse
from synapse.api.constants import EventTypes, PresenceState
from synapse.config._base import ConfigError
-from synapse.config.database import DatabaseConfig
-from synapse.config.logger import LoggingConfig
-from synapse.config.appservice import AppServiceConfig
+from synapse.config.homeserver import HomeServerConfig
+from synapse.config.logger import setup_logging
+from synapse.config.workers import clobber_with_worker_config
from synapse.events import FrozenEvent
from synapse.handlers.presence import PresenceHandler
from synapse.http.site import SynapseSite
@@ -57,76 +57,11 @@ from daemonize import Daemonize
import sys
import logging
import contextlib
-import gc
import ujson as json
logger = logging.getLogger("synapse.app.synchrotron")
-class SynchrotronConfig(DatabaseConfig, LoggingConfig, AppServiceConfig):
- def read_config(self, config):
- self.replication_url = config["replication_url"]
- self.server_name = config["server_name"]
- self.use_insecure_ssl_client_just_for_testing_do_not_use = config.get(
- "use_insecure_ssl_client_just_for_testing_do_not_use", False
- )
- self.user_agent_suffix = None
- self.listeners = config["listeners"]
- self.soft_file_limit = config.get("soft_file_limit")
- self.daemonize = config.get("daemonize")
- self.pid_file = self.abspath(config.get("pid_file"))
- self.macaroon_secret_key = config["macaroon_secret_key"]
- self.expire_access_token = config.get("expire_access_token", False)
-
- thresholds = config.get("gc_thresholds", None)
- if thresholds is not None:
- try:
- assert len(thresholds) == 3
- self.gc_thresholds = (
- int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
- )
- except:
- raise ConfigError(
- "Value of `gc_threshold` must be a list of three integers if set"
- )
- else:
- self.gc_thresholds = None
-
- def default_config(self, server_name, **kwargs):
- pid_file = self.abspath("synchroton.pid")
- return """\
- # Slave configuration
-
- # The replication listener on the synapse to talk to.
- #replication_url: https://localhost:{replication_port}/_synapse/replication
-
- server_name: "%(server_name)s"
-
- listeners:
- # Enable a /sync listener on the synchrontron
- #- type: http
- # port: {http_port}
- # bind_address: ""
- # Enable a ssh manhole listener on the synchrotron
- # - type: manhole
- # port: {manhole_port}
- # bind_address: 127.0.0.1
- # Enable a metric listener on the synchrotron
- # - type: http
- # port: {metrics_port}
- # bind_address: 127.0.0.1
- # resources:
- # - names: ["metrics"]
- # compress: False
-
- report_stats: False
-
- daemonize: False
-
- pid_file: %(pid_file)s
- """ % locals()
-
-
class SynchrotronSlavedStore(
SlavedPushRuleStore,
SlavedEventStore,
@@ -350,8 +285,8 @@ class SynchrotronServer(HomeServer):
)
logger.info("Synapse synchrotron now listening on port %d", port)
- def start_listening(self):
- for listener in self.config.listeners:
+ def start_listening(self, listeners):
+ for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
@@ -470,19 +405,20 @@ class SynchrotronServer(HomeServer):
return SynchrotronTyping(self)
-def setup(config_options):
+def start(worker_name, config_options):
try:
- config = SynchrotronConfig.load_config(
+ config = HomeServerConfig.load_config(
"Synapse synchrotron", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
- if not config:
- sys.exit(0)
+ worker_config = config.workers[worker_name]
- config.setup_logging()
+ setup_logging(worker_config.log_config, worker_config.log_file)
+
+ clobber_with_worker_config(config, worker_config)
database_engine = create_engine(config.database_config)
@@ -496,11 +432,15 @@ def setup(config_options):
)
ss.setup()
- ss.start_listening()
+ ss.start_listening(worker_config.listeners)
- change_resource_limit(ss.config.soft_file_limit)
- if ss.config.gc_thresholds:
- ss.set_threshold(*ss.config.gc_thresholds)
+ def run():
+ with LoggingContext("run"):
+ logger.info("Running")
+ change_resource_limit(worker_config.soft_file_limit)
+ if worker_config.gc_thresholds:
+ ss.set_threshold(worker_config.gc_thresholds)
+ reactor.run()
def start():
ss.get_datastore().start_profiling()
@@ -508,30 +448,21 @@ def setup(config_options):
reactor.callWhenRunning(start)
- return ss
+ if worker_config.daemonize:
+ daemon = Daemonize(
+ app="synapse-synchrotron",
+ pid=worker_config.pid_file,
+ action=run,
+ auto_close_fds=False,
+ verbose=True,
+ logger=logger,
+ )
+ daemon.start()
+ else:
+ run()
if __name__ == '__main__':
with LoggingContext("main"):
- ss = setup(sys.argv[1:])
-
- if ss.config.daemonize:
- def run():
- with LoggingContext("run"):
- change_resource_limit(ss.config.soft_file_limit)
- if ss.config.gc_thresholds:
- gc.set_threshold(*ss.config.gc_thresholds)
- reactor.run()
-
- daemon = Daemonize(
- app="synapse-synchrotron",
- pid=ss.config.pid_file,
- action=run,
- auto_close_fds=False,
- verbose=True,
- logger=logger,
- )
-
- daemon.start()
- else:
- reactor.run()
+ worker_name = sys.argv[1]
+ start(worker_name, sys.argv[2:])
diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py
index fc2445484..79b0534b3 100644
--- a/synapse/config/homeserver.py
+++ b/synapse/config/homeserver.py
@@ -32,13 +32,15 @@ from .password import PasswordConfig
from .jwt import JWTConfig
from .ldap import LDAPConfig
from .emailconfig import EmailConfig
+from .workers import WorkerConfig
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
- JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,):
+ JWTConfig, LDAPConfig, PasswordConfig, EmailConfig,
+ WorkerConfig,):
pass
diff --git a/synapse/config/logger.py b/synapse/config/logger.py
index 5047db898..dc68683fb 100644
--- a/synapse/config/logger.py
+++ b/synapse/config/logger.py
@@ -126,54 +126,58 @@ class LoggingConfig(Config):
)
def setup_logging(self):
- log_format = (
- "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
- " - %(message)s"
- )
- if self.log_config is None:
+ setup_logging(self.log_config, self.log_file, self.verbosity)
- level = logging.INFO
- level_for_storage = logging.INFO
- if self.verbosity:
- level = logging.DEBUG
- if self.verbosity > 1:
- level_for_storage = logging.DEBUG
- # FIXME: we need a logging.WARN for a -q quiet option
- logger = logging.getLogger('')
- logger.setLevel(level)
+def setup_logging(log_config=None, log_file=None, verbosity=None):
+ log_format = (
+ "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
+ " - %(message)s"
+ )
+ if log_config is None:
- logging.getLogger('synapse.storage').setLevel(level_for_storage)
+ level = logging.INFO
+ level_for_storage = logging.INFO
+ if verbosity:
+ level = logging.DEBUG
+ if verbosity > 1:
+ level_for_storage = logging.DEBUG
- formatter = logging.Formatter(log_format)
- if self.log_file:
- # TODO: Customisable file size / backup count
- handler = logging.handlers.RotatingFileHandler(
- self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
- )
+ # FIXME: we need a logging.WARN for a -q quiet option
+ logger = logging.getLogger('')
+ logger.setLevel(level)
- def sighup(signum, stack):
- logger.info("Closing log file due to SIGHUP")
- handler.doRollover()
- logger.info("Opened new log file due to SIGHUP")
+ logging.getLogger('synapse.storage').setLevel(level_for_storage)
- # TODO(paul): obviously this is a terrible mechanism for
- # stealing SIGHUP, because it means no other part of synapse
- # can use it instead. If we want to catch SIGHUP anywhere
- # else as well, I'd suggest we find a nicer way to broadcast
- # it around.
- if getattr(signal, "SIGHUP"):
- signal.signal(signal.SIGHUP, sighup)
- else:
- handler = logging.StreamHandler()
- handler.setFormatter(formatter)
+ formatter = logging.Formatter(log_format)
+ if log_file:
+ # TODO: Customisable file size / backup count
+ handler = logging.handlers.RotatingFileHandler(
+ log_file, maxBytes=(1000 * 1000 * 100), backupCount=3
+ )
- handler.addFilter(LoggingContextFilter(request=""))
+ def sighup(signum, stack):
+ logger.info("Closing log file due to SIGHUP")
+ handler.doRollover()
+ logger.info("Opened new log file due to SIGHUP")
- logger.addHandler(handler)
+ # TODO(paul): obviously this is a terrible mechanism for
+ # stealing SIGHUP, because it means no other part of synapse
+ # can use it instead. If we want to catch SIGHUP anywhere
+ # else as well, I'd suggest we find a nicer way to broadcast
+ # it around.
+ if getattr(signal, "SIGHUP"):
+ signal.signal(signal.SIGHUP, sighup)
else:
- with open(self.log_config, 'r') as f:
- logging.config.dictConfig(yaml.load(f))
+ handler = logging.StreamHandler()
+ handler.setFormatter(formatter)
- observer = PythonLoggingObserver()
- observer.start()
+ handler.addFilter(LoggingContextFilter(request=""))
+
+ logger.addHandler(handler)
+ else:
+ with open(log_config, 'r') as f:
+ logging.config.dictConfig(yaml.load(f))
+
+ observer = PythonLoggingObserver()
+ observer.start()
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 44b8d422e..f370b22c3 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -38,19 +38,7 @@ class ServerConfig(Config):
self.listeners = config.get("listeners", [])
- thresholds = config.get("gc_thresholds", None)
- if thresholds is not None:
- try:
- assert len(thresholds) == 3
- self.gc_thresholds = (
- int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
- )
- except:
- raise ConfigError(
- "Value of `gc_threshold` must be a list of three integers if set"
- )
- else:
- self.gc_thresholds = None
+ self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
bind_port = config.get("bind_port")
if bind_port:
@@ -264,3 +252,20 @@ class ServerConfig(Config):
type=int,
help="Turn on the twisted telnet manhole"
" service on the given port.")
+
+
+def read_gc_thresholds(thresholds):
+ """Reads the three integer thresholds for garbage collection. Ensures that
+ the thresholds are integers if thresholds are supplied.
+ """
+ if thresholds is None:
+ return None
+ try:
+ assert len(thresholds) == 3
+ return (
+ int(thresholds[0]), int(thresholds[1]), int(thresholds[2]),
+ )
+ except:
+ raise ConfigError(
+ "Value of `gc_threshold` must be a list of three integers if set"
+ )
From dbb5a39b64e3b52978ecb98f8f64b7b50acf9b59 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 11:09:15 +0100
Subject: [PATCH 147/414] Add worker config module
---
synapse/config/workers.py | 71 +++++++++++++++++++++++++++++++++++++++
1 file changed, 71 insertions(+)
create mode 100644 synapse/config/workers.py
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
new file mode 100644
index 000000000..fd19e38b8
--- /dev/null
+++ b/synapse/config/workers.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 matrix.org
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+
+from ._base import Config
+from .server import read_gc_thresholds
+
+
+Worker = collections.namedtuple("Worker", [
+ "app",
+ "listeners",
+ "pid_file",
+ "daemonize",
+ "log_file",
+ "log_config",
+ "event_cache_size",
+ "soft_file_limit",
+ "gc_thresholds",
+ "replication_url",
+])
+
+
+def clobber_with_worker_config(config, worker_config):
+ """Overrides some of the keys of the main config with worker-specific
+ values."""
+ config.event_cache_size = worker_config.event_cache_size
+ config.replication_url = worker_config.replication_url
+
+
+def read_worker_config(config):
+ return Worker(
+ app=config["app"],
+ listeners=config.get("listeners", []),
+ pid_file=config.get("pid_file"),
+ daemonize=config["daemonize"],
+ log_file=config.get("log_file"),
+ log_config=config.get("log_config"),
+ event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")),
+ soft_file_limit=config.get("soft_file_limit"),
+ gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")),
+ replication_url=config.get("replication_url"),
+ )
+
+
+class WorkerConfig(Config):
+ """The workers are processes run separately to the main synapse process.
+ Each worker has a name that identifies it within the config file.
+ They have their own pid_file and listener configuration. They use the
+ replication_url to talk to the main synapse process. They have their
+ own cache size tuning, gc threshold tuning and open file limits."""
+
+ def read_config(self, config):
+ workers = config.get("workers", {})
+
+ self.workers = {
+ worker_name: read_worker_config(worker_config)
+ for worker_name, worker_config in workers.items()
+ }
From 80a1bc7db517298baec24c1f11a144552719fb7b Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 11:29:45 +0100
Subject: [PATCH 148/414] Comment on what's going on in
clobber_with_worker_config
---
synapse/config/workers.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index fd19e38b8..4f4658c0a 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -35,8 +35,19 @@ Worker = collections.namedtuple("Worker", [
def clobber_with_worker_config(config, worker_config):
"""Overrides some of the keys of the main config with worker-specific
- values."""
+ values. We only need to override the keys that are accessed deep
+ withing synapse code. Most of the keys that we want to override in
+ the workers are accessed in setup code that is rewritten specifically
+ for the workers. In that new code we can access the worker config directly,
+ so we don't need to override the values in the main config."""
+
+ # TODO: The event_cache_size is accessed in the db setup. It should be
+ # possible to rejigg that code so that the cache size is pulled from the
+ # worker config directly.
config.event_cache_size = worker_config.event_cache_size
+
+ # TODO: The replication_url should only be accessed within worker specific
+ # code so it really shouldn't need to be clobbered in the main config.
config.replication_url = worker_config.replication_url
From bde13833cb42fc6e09928ffb4f4efad9244abffa Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 12:44:40 +0100
Subject: [PATCH 149/414] Access replication_url from the worker config
directly
---
synapse/app/pusher.py | 5 +++--
synapse/app/synchrotron.py | 5 +++--
synapse/config/workers.py | 4 ----
3 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 6c8c02fb3..a26a3bd39 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -112,7 +112,7 @@ class PusherServer(HomeServer):
def remove_pusher(self, app_id, push_key, user_id):
http_client = self.get_simple_http_client()
- replication_url = self.config.replication_url
+ replication_url = self.worker_config.replication_url
url = replication_url + "/remove_pushers"
return http_client.post_json_get_json(url, {
"remove": [{
@@ -166,7 +166,7 @@ class PusherServer(HomeServer):
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
- replication_url = self.config.replication_url
+ replication_url = self.worker_config.replication_url
pusher_pool = self.get_pusherpool()
clock = self.get_clock()
@@ -275,6 +275,7 @@ def setup(worker_name, config_options):
config.server_name,
db_config=config.database_config,
config=config,
+ worker_config=worker_config,
version_string=get_version_string("Synapse", synapse),
database_engine=database_engine,
)
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 7a607faef..4443c73e6 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -98,7 +98,7 @@ class SynchrotronPresence(object):
self.http_client = hs.get_simple_http_client()
self.store = hs.get_datastore()
self.user_to_num_current_syncs = {}
- self.syncing_users_url = hs.config.replication_url + "/syncing_users"
+ self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users"
self.clock = hs.get_clock()
active_presence = self.store.take_presence_startup_info()
@@ -306,7 +306,7 @@ class SynchrotronServer(HomeServer):
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
- replication_url = self.config.replication_url
+ replication_url = self.worker_config.replication_url
clock = self.get_clock()
notifier = self.get_notifier()
presence_handler = self.get_presence_handler()
@@ -426,6 +426,7 @@ def start(worker_name, config_options):
config.server_name,
db_config=config.database_config,
config=config,
+ worker_config=worker_config,
version_string=get_version_string("Synapse", synapse),
database_engine=database_engine,
application_service_handler=SynchrotronApplicationService(),
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 4f4658c0a..f2c77ef59 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -46,10 +46,6 @@ def clobber_with_worker_config(config, worker_config):
# worker config directly.
config.event_cache_size = worker_config.event_cache_size
- # TODO: The replication_url should only be accessed within worker specific
- # code so it really shouldn't need to be clobbered in the main config.
- config.replication_url = worker_config.replication_url
-
def read_worker_config(config):
return Worker(
From 364d6167926d5d8b2a312e3d35623d2e05330e0a Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 12:53:15 +0100
Subject: [PATCH 150/414] Access the event_cache_size directly from the server
object.
This means that the workers can override the event_cache_size
directly without clobbering the value in the main synapse config.
---
synapse/app/pusher.py | 6 +++---
synapse/app/synchrotron.py | 6 +++---
synapse/config/workers.py | 14 --------------
synapse/server.py | 3 +++
synapse/storage/_base.py | 2 +-
5 files changed, 10 insertions(+), 21 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index a26a3bd39..5d4db4f89 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -18,7 +18,6 @@ import synapse
from synapse.server import HomeServer
from synapse.config._base import ConfigError
-from synapse.config.workers import clobber_with_worker_config
from synapse.config.logger import setup_logging
from synapse.config.homeserver import HomeServerConfig
from synapse.http.site import SynapseSite
@@ -241,6 +240,9 @@ class PusherServer(HomeServer):
logger.exception("Error replicating from %r", replication_url)
yield sleep(30)
+ def get_event_cache_size(self):
+ return self.worker_config.event_cache_size
+
def setup(worker_name, config_options):
try:
@@ -255,8 +257,6 @@ def setup(worker_name, config_options):
setup_logging(worker_config.log_config, worker_config.log_file)
- clobber_with_worker_config(config, worker_config)
-
if config.start_pushers:
sys.stderr.write(
"\nThe pushers must be disabled in the main synapse process"
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 4443c73e6..d10bb2b3f 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -20,7 +20,6 @@ from synapse.api.constants import EventTypes, PresenceState
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
-from synapse.config.workers import clobber_with_worker_config
from synapse.events import FrozenEvent
from synapse.handlers.presence import PresenceHandler
from synapse.http.site import SynapseSite
@@ -404,6 +403,9 @@ class SynchrotronServer(HomeServer):
def build_typing_handler(self):
return SynchrotronTyping(self)
+ def get_event_cache_size(self):
+ return self.worker_config.event_cache_size
+
def start(worker_name, config_options):
try:
@@ -418,8 +420,6 @@ def start(worker_name, config_options):
setup_logging(worker_config.log_config, worker_config.log_file)
- clobber_with_worker_config(config, worker_config)
-
database_engine = create_engine(config.database_config)
ss = SynchrotronServer(
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index f2c77ef59..503358e03 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -33,20 +33,6 @@ Worker = collections.namedtuple("Worker", [
])
-def clobber_with_worker_config(config, worker_config):
- """Overrides some of the keys of the main config with worker-specific
- values. We only need to override the keys that are accessed deep
- withing synapse code. Most of the keys that we want to override in
- the workers are accessed in setup code that is rewritten specifically
- for the workers. In that new code we can access the worker config directly,
- so we don't need to override the values in the main config."""
-
- # TODO: The event_cache_size is accessed in the db setup. It should be
- # possible to rejigg that code so that the cache size is pulled from the
- # worker config directly.
- config.event_cache_size = worker_config.event_cache_size
-
-
def read_worker_config(config):
return Worker(
app=config["app"],
diff --git a/synapse/server.py b/synapse/server.py
index dd4b81c65..b3c31ece7 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -236,6 +236,9 @@ class HomeServer(object):
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
+ def get_event_cache_size(self):
+ return self.config.event_cache_size
+
def _make_dependency_method(depname):
def _get(hs):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 32c6677d4..2932880cc 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -166,7 +166,7 @@ class SQLBaseStore(object):
self._get_event_counters = PerformanceCounters()
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
- max_entries=hs.config.event_cache_size)
+ max_entries=hs.get_event_cache_size())
self._state_group_cache = DictionaryCache(
"*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR
From a352b68acf473f59012340b7f481f3dfd6544ac6 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Thu, 16 Jun 2016 17:29:50 +0100
Subject: [PATCH 151/414] Use worker_ prefixes for worker config, use existing
support for multiple config files
---
synapse/app/pusher.py | 29 ++++++++++------------
synapse/app/synchrotron.py | 29 ++++++++++------------
synapse/config/workers.py | 49 +++++++-------------------------------
synapse/server.py | 3 ---
synapse/storage/_base.py | 2 +-
5 files changed, 33 insertions(+), 79 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 5d4db4f89..9ac26d52c 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -111,7 +111,7 @@ class PusherServer(HomeServer):
def remove_pusher(self, app_id, push_key, user_id):
http_client = self.get_simple_http_client()
- replication_url = self.worker_config.replication_url
+ replication_url = self.config.worker_replication_url
url = replication_url + "/remove_pushers"
return http_client.post_json_get_json(url, {
"remove": [{
@@ -165,7 +165,7 @@ class PusherServer(HomeServer):
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
- replication_url = self.worker_config.replication_url
+ replication_url = self.config.worker_replication_url
pusher_pool = self.get_pusherpool()
clock = self.get_clock()
@@ -240,11 +240,8 @@ class PusherServer(HomeServer):
logger.exception("Error replicating from %r", replication_url)
yield sleep(30)
- def get_event_cache_size(self):
- return self.worker_config.event_cache_size
-
-def setup(worker_name, config_options):
+def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse pusher", config_options
@@ -253,9 +250,9 @@ def setup(worker_name, config_options):
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
- worker_config = config.workers[worker_name]
+ assert config.worker_app == "synapse.app.pusher"
- setup_logging(worker_config.log_config, worker_config.log_file)
+ setup_logging(config.worker_log_config, config.worker_log_file)
if config.start_pushers:
sys.stderr.write(
@@ -275,20 +272,19 @@ def setup(worker_name, config_options):
config.server_name,
db_config=config.database_config,
config=config,
- worker_config=worker_config,
version_string=get_version_string("Synapse", synapse),
database_engine=database_engine,
)
ps.setup()
- ps.start_listening(worker_config.listeners)
+ ps.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
- change_resource_limit(worker_config.soft_file_limit)
- if worker_config.gc_thresholds:
- ps.set_threshold(worker_config.gc_thresholds)
+ change_resource_limit(config.soft_file_limit)
+ if config.gc_thresholds:
+ ps.set_threshold(config.gc_thresholds)
reactor.run()
def start():
@@ -298,10 +294,10 @@ def setup(worker_name, config_options):
reactor.callWhenRunning(start)
- if worker_config.daemonize:
+ if config.worker_daemonize:
daemon = Daemonize(
app="synapse-pusher",
- pid=worker_config.pid_file,
+ pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
@@ -314,5 +310,4 @@ def setup(worker_name, config_options):
if __name__ == '__main__':
with LoggingContext("main"):
- worker_name = sys.argv[1]
- ps = setup(worker_name, sys.argv[2:])
+ ps = start(sys.argv[1:])
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index d10bb2b3f..160db8637 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -97,7 +97,7 @@ class SynchrotronPresence(object):
self.http_client = hs.get_simple_http_client()
self.store = hs.get_datastore()
self.user_to_num_current_syncs = {}
- self.syncing_users_url = hs.worker_config.replication_url + "/syncing_users"
+ self.syncing_users_url = hs.config.worker_replication_url + "/syncing_users"
self.clock = hs.get_clock()
active_presence = self.store.take_presence_startup_info()
@@ -305,7 +305,7 @@ class SynchrotronServer(HomeServer):
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
- replication_url = self.worker_config.replication_url
+ replication_url = self.config.worker_replication_url
clock = self.get_clock()
notifier = self.get_notifier()
presence_handler = self.get_presence_handler()
@@ -403,11 +403,8 @@ class SynchrotronServer(HomeServer):
def build_typing_handler(self):
return SynchrotronTyping(self)
- def get_event_cache_size(self):
- return self.worker_config.event_cache_size
-
-def start(worker_name, config_options):
+def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse synchrotron", config_options
@@ -416,9 +413,9 @@ def start(worker_name, config_options):
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
- worker_config = config.workers[worker_name]
+ assert config.worker_app == "synapse.app.synchrotron"
- setup_logging(worker_config.log_config, worker_config.log_file)
+ setup_logging(config.worker_log_config, config.worker_log_file)
database_engine = create_engine(config.database_config)
@@ -426,21 +423,20 @@ def start(worker_name, config_options):
config.server_name,
db_config=config.database_config,
config=config,
- worker_config=worker_config,
version_string=get_version_string("Synapse", synapse),
database_engine=database_engine,
application_service_handler=SynchrotronApplicationService(),
)
ss.setup()
- ss.start_listening(worker_config.listeners)
+ ss.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
- change_resource_limit(worker_config.soft_file_limit)
- if worker_config.gc_thresholds:
- ss.set_threshold(worker_config.gc_thresholds)
+ change_resource_limit(config.soft_file_limit)
+ if config.gc_thresholds:
+ ss.set_threshold(config.gc_thresholds)
reactor.run()
def start():
@@ -449,10 +445,10 @@ def start(worker_name, config_options):
reactor.callWhenRunning(start)
- if worker_config.daemonize:
+ if config.worker_daemonize:
daemon = Daemonize(
app="synapse-synchrotron",
- pid=worker_config.pid_file,
+ pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
@@ -465,5 +461,4 @@ def start(worker_name, config_options):
if __name__ == '__main__':
with LoggingContext("main"):
- worker_name = sys.argv[1]
- start(worker_name, sys.argv[2:])
+ start(sys.argv[1:])
diff --git a/synapse/config/workers.py b/synapse/config/workers.py
index 503358e03..904789d15 100644
--- a/synapse/config/workers.py
+++ b/synapse/config/workers.py
@@ -13,52 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import collections
-
from ._base import Config
-from .server import read_gc_thresholds
-
-
-Worker = collections.namedtuple("Worker", [
- "app",
- "listeners",
- "pid_file",
- "daemonize",
- "log_file",
- "log_config",
- "event_cache_size",
- "soft_file_limit",
- "gc_thresholds",
- "replication_url",
-])
-
-
-def read_worker_config(config):
- return Worker(
- app=config["app"],
- listeners=config.get("listeners", []),
- pid_file=config.get("pid_file"),
- daemonize=config["daemonize"],
- log_file=config.get("log_file"),
- log_config=config.get("log_config"),
- event_cache_size=Config.parse_size(config.get("event_cache_size", "10K")),
- soft_file_limit=config.get("soft_file_limit"),
- gc_thresholds=read_gc_thresholds(config.get("gc_thresholds")),
- replication_url=config.get("replication_url"),
- )
class WorkerConfig(Config):
"""The workers are processes run separately to the main synapse process.
- Each worker has a name that identifies it within the config file.
They have their own pid_file and listener configuration. They use the
- replication_url to talk to the main synapse process. They have their
- own cache size tuning, gc threshold tuning and open file limits."""
+ replication_url to talk to the main synapse process."""
def read_config(self, config):
- workers = config.get("workers", {})
-
- self.workers = {
- worker_name: read_worker_config(worker_config)
- for worker_name, worker_config in workers.items()
- }
+ self.worker_app = config.get("worker_app")
+ self.worker_listeners = config.get("worker_listeners")
+ self.worker_daemonize = config.get("worker_daemonize")
+ self.worker_pid_file = config.get("worker_pid_file")
+ self.worker_log_file = config.get("worker_log_file")
+ self.worker_log_config = config.get("worker_log_config")
+ self.worker_replication_url = config.get("worker_replication_url")
diff --git a/synapse/server.py b/synapse/server.py
index b3c31ece7..dd4b81c65 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -236,9 +236,6 @@ class HomeServer(object):
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
- def get_event_cache_size(self):
- return self.config.event_cache_size
-
def _make_dependency_method(depname):
def _get(hs):
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 2932880cc..32c6677d4 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -166,7 +166,7 @@ class SQLBaseStore(object):
self._get_event_counters = PerformanceCounters()
self._get_event_cache = Cache("*getEvent*", keylen=3, lru=True,
- max_entries=hs.get_event_cache_size())
+ max_entries=hs.config.event_cache_size)
self._state_group_cache = DictionaryCache(
"*stateGroupCache*", 2000 * CACHE_SIZE_FACTOR
From 8c75040c25495bf29f4c76ca0fcc032975210012 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 17 Jun 2016 11:48:12 +0100
Subject: [PATCH 152/414] Fix setting gc thresholds in the workers
---
synapse/app/pusher.py | 3 ++-
synapse/app/synchrotron.py | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py
index 9ac26d52c..4f1d18ab5 100644
--- a/synapse/app/pusher.py
+++ b/synapse/app/pusher.py
@@ -43,6 +43,7 @@ from daemonize import Daemonize
import sys
import logging
+import gc
logger = logging.getLogger("synapse.app.pusher")
@@ -284,7 +285,7 @@ def start(config_options):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
- ps.set_threshold(config.gc_thresholds)
+ gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py
index 160db8637..8cf5bbbb6 100644
--- a/synapse/app/synchrotron.py
+++ b/synapse/app/synchrotron.py
@@ -56,6 +56,7 @@ from daemonize import Daemonize
import sys
import logging
import contextlib
+import gc
import ujson as json
logger = logging.getLogger("synapse.app.synchrotron")
@@ -436,7 +437,7 @@ def start(config_options):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
- ss.set_threshold(config.gc_thresholds)
+ gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
From ded01c3bf65fd6bb83c9d3546ea44859208e4578 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 17 Jun 2016 13:49:16 +0100
Subject: [PATCH 153/414] Fix ``KeyError: 'msgtype'``. Use ``.get``
Fixes a key error where the mailer tried to get the ``msgtype`` of an
event that was missing a ``msgtype``.
```
File "synapse/push/mailer.py", line 264, in get_notif_vars
File "synapse/push/mailer.py", line 285, in get_message_vars
File ".../frozendict/__init__.py", line 10, in __getitem__
return self.__dict[key]
KeyError: 'msgtype'
```
---
synapse/push/mailer.py | 29 +++++++++++++++--------------
1 file changed, 15 insertions(+), 14 deletions(-)
diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py
index e5c3929cd..1028731bc 100644
--- a/synapse/push/mailer.py
+++ b/synapse/push/mailer.py
@@ -273,16 +273,16 @@ class Mailer(object):
sender_state_event = room_state[("m.room.member", event.sender)]
sender_name = name_from_member_event(sender_state_event)
- sender_avatar_url = None
- if "avatar_url" in sender_state_event.content:
- sender_avatar_url = sender_state_event.content["avatar_url"]
+ sender_avatar_url = sender_state_event.content.get("avatar_url")
# 'hash' for deterministically picking default images: use
# sender_hash % the number of default images to choose from
sender_hash = string_ordinal_total(event.sender)
+ msgtype = event.content.get("msgtype")
+
ret = {
- "msgtype": event.content["msgtype"],
+ "msgtype": msgtype,
"is_historical": event.event_id != notif['event_id'],
"id": event.event_id,
"ts": event.origin_server_ts,
@@ -291,9 +291,9 @@ class Mailer(object):
"sender_hash": sender_hash,
}
- if event.content["msgtype"] == "m.text":
+ if msgtype == "m.text":
self.add_text_message_vars(ret, event)
- elif event.content["msgtype"] == "m.image":
+ elif msgtype == "m.image":
self.add_image_message_vars(ret, event)
if "body" in event.content:
@@ -302,16 +302,17 @@ class Mailer(object):
return ret
def add_text_message_vars(self, messagevars, event):
- if "format" in event.content:
- msgformat = event.content["format"]
- else:
- msgformat = None
+ msgformat = event.content.get("format")
+
messagevars["format"] = msgformat
- if msgformat == "org.matrix.custom.html":
- messagevars["body_text_html"] = safe_markup(event.content["formatted_body"])
- else:
- messagevars["body_text_html"] = safe_text(event.content["body"])
+ formatted_body = event.content.get("formatted_body")
+ body = event.content.get("body")
+
+ if msgformat == "org.matrix.custom.html" and formatted_body:
+ messagevars["body_text_html"] = safe_markup(formatted_body)
+ elif body:
+ messagevars["body_text_html"] = safe_text(body)
return messagevars
From 2884712ca733f45d32468ecf2ede7a1518e85be4 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 17 Jun 2016 14:47:33 +0100
Subject: [PATCH 154/414] Only re-sign our own events
---
synapse/federation/federation_server.py | 15 +++++++++------
synapse/handlers/federation.py | 15 +++++++++------
2 files changed, 18 insertions(+), 12 deletions(-)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index fe92457ba..2a589524a 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -193,13 +193,16 @@ class FederationServer(FederationBase):
)
for event in auth_chain:
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
+ # We sign these again because there was a bug where we
+ # incorrectly signed things the first time round
+ if self.hs.is_mine_id(event.event_id):
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
)
- )
else:
raise NotImplementedError("Specify an event")
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index c2df43e2f..6c0bc7eaf 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1018,13 +1018,16 @@ class FederationHandler(BaseHandler):
res = results.values()
for event in res:
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
+ # We sign these again because there was a bug where we
+ # incorrectly signed things the first time round
+ if self.hs.is_mine_id(event.event_id):
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
)
- )
defer.returnValue(res)
else:
From 3e41de05cc13220f5cd88ae78002adf782728322 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 17 Jun 2016 15:11:22 +0100
Subject: [PATCH 155/414] Turn use_frozen_events off by default
---
synapse/config/server.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index f370b22c3..7840dc3ad 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -27,7 +27,7 @@ class ServerConfig(Config):
self.daemonize = config.get("daemonize")
self.print_pidfile = config.get("print_pidfile")
self.user_agent_suffix = config.get("user_agent_suffix")
- self.use_frozen_dicts = config.get("use_frozen_dicts", True)
+ self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.public_baseurl = config.get("public_baseurl")
self.secondary_directory_servers = config.get("secondary_directory_servers", [])
From 0113ad36ee7bc315aa162c42277b90764825f219 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 17 Jun 2016 15:13:13 +0100
Subject: [PATCH 156/414] Enable use_frozen_events in tests
---
tests/utils.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/utils.py b/tests/utils.py
index e19ae581e..6e41ae1ff 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -54,6 +54,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
config.trusted_third_party_id_servers = []
config.room_invite_state_types = []
+ config.use_frozen_dicts = True
config.database_config = {"name": "sqlite3"}
if "clock" not in kargs:
From 120c2387053bdc30824d6b15931532664f739192 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 17 Jun 2016 16:10:37 +0100
Subject: [PATCH 157/414] Disable responding with canonical json for federation
---
synapse/federation/transport/server.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 6fc3e2207..8a1965f45 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -37,7 +37,7 @@ class TransportLayerServer(JsonResource):
self.hs = hs
self.clock = hs.get_clock()
- super(TransportLayerServer, self).__init__(hs)
+ super(TransportLayerServer, self).__init__(hs, canonical_json=False)
self.authenticator = Authenticator(hs)
self.ratelimiter = FederationRateLimiter(
From 8f4a9bbc16e6b54f1ab110085e42884fd16abb6a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Fri, 17 Jun 2016 16:43:45 +0100
Subject: [PATCH 158/414] Linearize some federation endpoints based on (origin,
room_id)
---
synapse/federation/federation_server.py | 141 +++++++++++++-----------
synapse/federation/transport/server.py | 2 +-
2 files changed, 77 insertions(+), 66 deletions(-)
diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py
index 2a589524a..85f5e752f 100644
--- a/synapse/federation/federation_server.py
+++ b/synapse/federation/federation_server.py
@@ -49,6 +49,7 @@ class FederationServer(FederationBase):
super(FederationServer, self).__init__(hs)
self._room_pdu_linearizer = Linearizer()
+ self._server_linearizer = Linearizer()
def set_handler(self, handler):
"""Sets the handler that the replication layer will use to communicate
@@ -89,11 +90,14 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
@log_function
def on_backfill_request(self, origin, room_id, versions, limit):
- pdus = yield self.handler.on_backfill_request(
- origin, room_id, versions, limit
- )
+ with (yield self._server_linearizer.queue((origin, room_id))):
+ pdus = yield self.handler.on_backfill_request(
+ origin, room_id, versions, limit
+ )
- defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
+ res = self._transaction_from_pdus(pdus).get_dict()
+
+ defer.returnValue((200, res))
@defer.inlineCallbacks
@log_function
@@ -184,27 +188,28 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
@log_function
def on_context_state_request(self, origin, room_id, event_id):
- if event_id:
- pdus = yield self.handler.get_state_for_pdu(
- origin, room_id, event_id,
- )
- auth_chain = yield self.store.get_auth_chain(
- [pdu.event_id for pdu in pdus]
- )
+ with (yield self._server_linearizer.queue((origin, room_id))):
+ if event_id:
+ pdus = yield self.handler.get_state_for_pdu(
+ origin, room_id, event_id,
+ )
+ auth_chain = yield self.store.get_auth_chain(
+ [pdu.event_id for pdu in pdus]
+ )
- for event in auth_chain:
- # We sign these again because there was a bug where we
- # incorrectly signed things the first time round
- if self.hs.is_mine_id(event.event_id):
- event.signatures.update(
- compute_event_signature(
- event,
- self.hs.hostname,
- self.hs.config.signing_key[0]
+ for event in auth_chain:
+ # We sign these again because there was a bug where we
+ # incorrectly signed things the first time round
+ if self.hs.is_mine_id(event.event_id):
+ event.signatures.update(
+ compute_event_signature(
+ event,
+ self.hs.hostname,
+ self.hs.config.signing_key[0]
+ )
)
- )
- else:
- raise NotImplementedError("Specify an event")
+ else:
+ raise NotImplementedError("Specify an event")
defer.returnValue((200, {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
@@ -283,14 +288,16 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
- time_now = self._clock.time_msec()
- auth_pdus = yield self.handler.on_event_auth(event_id)
- defer.returnValue((200, {
- "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
- }))
+ with (yield self._server_linearizer.queue((origin, room_id))):
+ time_now = self._clock.time_msec()
+ auth_pdus = yield self.handler.on_event_auth(event_id)
+ res = {
+ "auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
+ }
+ defer.returnValue((200, res))
@defer.inlineCallbacks
- def on_query_auth_request(self, origin, content, event_id):
+ def on_query_auth_request(self, origin, content, room_id, event_id):
"""
Content is a dict with keys::
auth_chain (list): A list of events that give the auth chain.
@@ -309,32 +316,33 @@ class FederationServer(FederationBase):
Returns:
Deferred: Results in `dict` with the same format as `content`
"""
- auth_chain = [
- self.event_from_pdu_json(e)
- for e in content["auth_chain"]
- ]
+ with (yield self._server_linearizer.queue((origin, room_id))):
+ auth_chain = [
+ self.event_from_pdu_json(e)
+ for e in content["auth_chain"]
+ ]
- signed_auth = yield self._check_sigs_and_hash_and_fetch(
- origin, auth_chain, outlier=True
- )
+ signed_auth = yield self._check_sigs_and_hash_and_fetch(
+ origin, auth_chain, outlier=True
+ )
- ret = yield self.handler.on_query_auth(
- origin,
- event_id,
- signed_auth,
- content.get("rejects", []),
- content.get("missing", []),
- )
+ ret = yield self.handler.on_query_auth(
+ origin,
+ event_id,
+ signed_auth,
+ content.get("rejects", []),
+ content.get("missing", []),
+ )
- time_now = self._clock.time_msec()
- send_content = {
- "auth_chain": [
- e.get_pdu_json(time_now)
- for e in ret["auth_chain"]
- ],
- "rejects": ret.get("rejects", []),
- "missing": ret.get("missing", []),
- }
+ time_now = self._clock.time_msec()
+ send_content = {
+ "auth_chain": [
+ e.get_pdu_json(time_now)
+ for e in ret["auth_chain"]
+ ],
+ "rejects": ret.get("rejects", []),
+ "missing": ret.get("missing", []),
+ }
defer.returnValue(
(200, send_content)
@@ -386,21 +394,24 @@ class FederationServer(FederationBase):
@log_function
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
- logger.info(
- "on_get_missing_events: earliest_events: %r, latest_events: %r,"
- " limit: %d, min_depth: %d",
- earliest_events, latest_events, limit, min_depth
- )
- missing_events = yield self.handler.on_get_missing_events(
- origin, room_id, earliest_events, latest_events, limit, min_depth
- )
+ with (yield self._server_linearizer.queue((origin, room_id))):
+ logger.info(
+ "on_get_missing_events: earliest_events: %r, latest_events: %r,"
+ " limit: %d, min_depth: %d",
+ earliest_events, latest_events, limit, min_depth
+ )
+ missing_events = yield self.handler.on_get_missing_events(
+ origin, room_id, earliest_events, latest_events, limit, min_depth
+ )
- if len(missing_events) < 5:
- logger.info("Returning %d events: %r", len(missing_events), missing_events)
- else:
- logger.info("Returning %d events", len(missing_events))
+ if len(missing_events) < 5:
+ logger.info(
+ "Returning %d events: %r", len(missing_events), missing_events
+ )
+ else:
+ logger.info("Returning %d events", len(missing_events))
- time_now = self._clock.time_msec()
+ time_now = self._clock.time_msec()
defer.returnValue({
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py
index 8a1965f45..26fa88ae8 100644
--- a/synapse/federation/transport/server.py
+++ b/synapse/federation/transport/server.py
@@ -388,7 +388,7 @@ class FederationQueryAuthServlet(BaseFederationServlet):
@defer.inlineCallbacks
def on_POST(self, origin, content, query, context, event_id):
new_content = yield self.handler.on_query_auth_request(
- origin, content, event_id
+ origin, content, context, event_id
)
defer.returnValue((200, new_content))
From 9f1800fba852314332d7e682484e456d28838619 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 17 Jun 2016 19:14:16 +0100
Subject: [PATCH 159/414] Remove registered_users from the distributor.
The only place that was observed was to set the profile. I've made it
so that the profile is set within store.register in the same transaction
that creates the user.
This required some slight changes to the registration code for upgrading
guest users, since it previously relied on the distributor swallowing errors
if the profile already existed.
---
synapse/handlers/profile.py | 7 -------
synapse/handlers/register.py | 23 ++++++++++-------------
synapse/storage/profile.py | 6 ------
synapse/storage/registration.py | 17 ++++++++++++++---
synapse/util/distributor.py | 4 ----
5 files changed, 24 insertions(+), 33 deletions(-)
diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py
index e37409170..711a6a567 100644
--- a/synapse/handlers/profile.py
+++ b/synapse/handlers/profile.py
@@ -36,13 +36,6 @@ class ProfileHandler(BaseHandler):
"profile", self.on_profile_query
)
- distributor = hs.get_distributor()
-
- distributor.observe("registered_user", self.registered_user)
-
- def registered_user(self, user):
- return self.store.create_profile(user.localpart)
-
@defer.inlineCallbacks
def get_displayname(self, target_user):
if self.hs.is_mine(target_user):
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index e0aaefe7b..4fb12915d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -23,7 +23,6 @@ from synapse.api.errors import (
from ._base import BaseHandler
from synapse.util.async import run_on_reactor
from synapse.http.client import CaptchaServerHttpClient
-from synapse.util.distributor import registered_user
import logging
import urllib
@@ -37,8 +36,6 @@ class RegistrationHandler(BaseHandler):
super(RegistrationHandler, self).__init__(hs)
self.auth = hs.get_auth()
- self.distributor = hs.get_distributor()
- self.distributor.declare("registered_user")
self.captcha_client = CaptchaServerHttpClient(hs)
self._next_generated_user_id = None
@@ -140,9 +137,10 @@ class RegistrationHandler(BaseHandler):
password_hash=password_hash,
was_guest=was_guest,
make_guest=make_guest,
+ create_profile_with_localpart=(
+ None if was_guest else user.localpart
+ ),
)
-
- yield registered_user(self.distributor, user)
else:
# autogen a sequential user ID
attempts = 0
@@ -160,7 +158,8 @@ class RegistrationHandler(BaseHandler):
user_id=user_id,
token=token,
password_hash=password_hash,
- make_guest=make_guest
+ make_guest=make_guest,
+ create_profile_with_localpart=user.localpart,
)
except SynapseError:
# if user id is taken, just generate another
@@ -168,7 +167,6 @@ class RegistrationHandler(BaseHandler):
user_id = None
token = None
attempts += 1
- yield registered_user(self.distributor, user)
# We used to generate default identicons here, but nowadays
# we want clients to generate their own as part of their branding
@@ -201,8 +199,8 @@ class RegistrationHandler(BaseHandler):
token=token,
password_hash="",
appservice_id=service_id,
+ create_profile_with_localpart=user.localpart,
)
- yield registered_user(self.distributor, user)
defer.returnValue((user_id, token))
@defer.inlineCallbacks
@@ -248,9 +246,9 @@ class RegistrationHandler(BaseHandler):
yield self.store.register(
user_id=user_id,
token=token,
- password_hash=None
+ password_hash=None,
+ create_profile_with_localpart=user.localpart,
)
- yield registered_user(self.distributor, user)
except Exception as e:
yield self.store.add_access_token_to_user(user_id, token)
# Ignore Registration errors
@@ -395,10 +393,9 @@ class RegistrationHandler(BaseHandler):
yield self.store.register(
user_id=user_id,
token=token,
- password_hash=None
+ password_hash=None,
+ create_profile_with_localpart=user.localpart,
)
-
- yield registered_user(self.distributor, user)
else:
yield self.store.user_delete_access_tokens(user_id=user_id)
yield self.store.add_access_token_to_user(user_id=user_id, token=token)
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index 26a40905a..c3c3f9ffd 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -17,12 +17,6 @@ from ._base import SQLBaseStore
class ProfileStore(SQLBaseStore):
- def create_profile(self, user_localpart):
- return self._simple_insert(
- table="profiles",
- values={"user_id": user_localpart},
- desc="create_profile",
- )
def get_profile_displayname(self, user_localpart):
return self._simple_select_one_onecol(
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index bda84a744..3de9e0f70 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -76,7 +76,8 @@ class RegistrationStore(SQLBaseStore):
@defer.inlineCallbacks
def register(self, user_id, token, password_hash,
- was_guest=False, make_guest=False, appservice_id=None):
+ was_guest=False, make_guest=False, appservice_id=None,
+ create_profile_with_localpart=None):
"""Attempts to register an account.
Args:
@@ -88,6 +89,8 @@ class RegistrationStore(SQLBaseStore):
make_guest (boolean): True if the the new user should be guest,
false to add a regular user account.
appservice_id (str): The ID of the appservice registering the user.
+ create_profile_with_localpart (str): Optionally create a profile for
+ the given localpart.
Raises:
StoreError if the user_id could not be registered.
"""
@@ -99,7 +102,8 @@ class RegistrationStore(SQLBaseStore):
password_hash,
was_guest,
make_guest,
- appservice_id
+ appservice_id,
+ create_profile_with_localpart,
)
self.get_user_by_id.invalidate((user_id,))
self.is_guest.invalidate((user_id,))
@@ -112,7 +116,8 @@ class RegistrationStore(SQLBaseStore):
password_hash,
was_guest,
make_guest,
- appservice_id
+ appservice_id,
+ create_profile_with_localpart,
):
now = int(self.clock.time())
@@ -157,6 +162,12 @@ class RegistrationStore(SQLBaseStore):
(next_id, user_id, token,)
)
+ if create_profile_with_localpart:
+ txn.execute(
+ "INSERT INTO profiles(user_id) VALUES (?)",
+ (create_profile_with_localpart,)
+ )
+
@cached()
def get_user_by_id(self, user_id):
return self._simple_select_one(
diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py
index d7cccc06b..e68f94ce7 100644
--- a/synapse/util/distributor.py
+++ b/synapse/util/distributor.py
@@ -27,10 +27,6 @@ import logging
logger = logging.getLogger(__name__)
-def registered_user(distributor, user):
- return distributor.fire("registered_user", user)
-
-
def user_left_room(distributor, user, room_id):
return preserve_context_over_fn(
distributor.fire,
From 0c13d45522c5c8c0b68322498a220969eb894ad5 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 17 Jun 2016 19:18:53 +0100
Subject: [PATCH 160/414] Add a comment on why we don't create a profile for
upgrading users
---
synapse/handlers/register.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 4fb12915d..0b7517221 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -138,6 +138,7 @@ class RegistrationHandler(BaseHandler):
was_guest=was_guest,
make_guest=make_guest,
create_profile_with_localpart=(
+ # If the user was a guest then they already have a profile
None if was_guest else user.localpart
),
)
From 41e4b2efeafa6e2f4cbfef4f30620b9f58b020a4 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 17 Jun 2016 19:20:47 +0100
Subject: [PATCH 161/414] Add the create_profile method back since the tests
use it
---
synapse/storage/profile.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py
index c3c3f9ffd..26a40905a 100644
--- a/synapse/storage/profile.py
+++ b/synapse/storage/profile.py
@@ -17,6 +17,12 @@ from ._base import SQLBaseStore
class ProfileStore(SQLBaseStore):
+ def create_profile(self, user_localpart):
+ return self._simple_insert(
+ table="profiles",
+ values={"user_id": user_localpart},
+ desc="create_profile",
+ )
def get_profile_displayname(self, user_localpart):
return self._simple_select_one_onecol(
From 4d362a61ea3173f1be0ac58147db29acfbe1b4c3 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 20 Jun 2016 14:17:15 +0100
Subject: [PATCH 162/414] Bump version and changelog
---
CHANGES.rst | 15 +++++++++++++++
synapse/__init__.py | 2 +-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index 1a47aae85..ecaaa189d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,18 @@
+Changes in synapse v0.16.1 (2016-06-20)
+=======================================
+
+Bug fixes:
+
+* Fix assorted bugs in ``/preview_url`` (PR #872)
+* Fix TypeError when setting unicode passwords (PR #873)
+
+
+Performance improvements:
+
+* Turn ``use_frozen_events`` off by default (PR #877)
+* Disable responding with canonical json for federation (PR #878)
+
+
Changes in synapse v0.16.1-rc1 (2016-06-15)
===========================================
diff --git a/synapse/__init__.py b/synapse/__init__.py
index faaa86d97..3cd79b124 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.16.1-rc1"
+__version__ = "0.16.1"
From d5fb561709cf2181cd5b8fffd2cf70a3fb52e5ab Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Mon, 20 Jun 2016 17:53:38 +0100
Subject: [PATCH 163/414] Optionally make committing to postgres asynchronous.
Useful when running tests when you don't care whether the server
will lose data that it claims that it has committed.
---
synapse/storage/engines/__init__.py | 2 +-
synapse/storage/engines/postgres.py | 13 ++++++++++++-
synapse/storage/engines/sqlite3.py | 2 +-
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py
index 7bb5de1fe..338b49561 100644
--- a/synapse/storage/engines/__init__.py
+++ b/synapse/storage/engines/__init__.py
@@ -32,7 +32,7 @@ def create_engine(database_config):
if engine_class:
module = importlib.import_module(name)
- return engine_class(module)
+ return engine_class(module, database_config)
raise RuntimeError(
"Unsupported database engine '%s'" % (name,)
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index c2290943b..a6ae79dfa 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -19,9 +19,10 @@ from ._base import IncorrectDatabaseSetup
class PostgresEngine(object):
single_threaded = False
- def __init__(self, database_module):
+ def __init__(self, database_module, database_config):
self.module = database_module
self.module.extensions.register_type(self.module.extensions.UNICODE)
+ self.synchronous_commit = database_config.get("synchronous_commit", True)
def check_database(self, txn):
txn.execute("SHOW SERVER_ENCODING")
@@ -40,9 +41,19 @@ class PostgresEngine(object):
db_conn.set_isolation_level(
self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
)
+ # Asynchronous commit, don't wait for the server to call fsync before
+ # ending the transaction.
+ # https://www.postgresql.org/docs/current/static/wal-async-commit.html
+ if not self.synchronous_commit:
+ cursor = db_conn.cursor()
+ cursor.execute("SET synchronous_commit TO OFF")
+ cursor.close()
def is_deadlock(self, error):
if isinstance(error, self.module.DatabaseError):
+ # https://www.postgresql.org/docs/current/static/errcodes-appendix.html
+ # "40001" serialization_failure
+ # "40P01" deadlock_detected
return error.pgcode in ["40001", "40P01"]
return False
diff --git a/synapse/storage/engines/sqlite3.py b/synapse/storage/engines/sqlite3.py
index 14203aa50..755c9a1f0 100644
--- a/synapse/storage/engines/sqlite3.py
+++ b/synapse/storage/engines/sqlite3.py
@@ -21,7 +21,7 @@ import struct
class Sqlite3Engine(object):
single_threaded = True
- def __init__(self, database_module):
+ def __init__(self, database_module, database_config):
self.module = database_module
def check_database(self, txn):
From 6b40e4f52ad4bc0cbab4a0178d3f033d049d84fa Mon Sep 17 00:00:00 2001
From: David Baker
Date: Tue, 21 Jun 2016 11:37:56 +0100
Subject: [PATCH 164/414] Fix substitution failure in mail template
---
res/templates/notif_mail.html | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/res/templates/notif_mail.html b/res/templates/notif_mail.html
index 8aee68b59..535bea764 100644
--- a/res/templates/notif_mail.html
+++ b/res/templates/notif_mail.html
@@ -36,7 +36,7 @@
Sending email at {{ reason.now|format_ts("%c") }} due to activity in room {{ reason.room_name }} because
an event was received at {{ reason.received_at|format_ts("%c") }}
- which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} (delay_before_mail_ms) mins ago,
+ which is more than {{ "%.1f"|format(reason.delay_before_mail_ms / (60*1000)) }} ({{ reason.delay_before_mail_ms }}) mins ago,
{% if reason.last_sent_ts %}
and the last time we sent a mail for this room was {{ reason.last_sent_ts|format_ts("%c") }},
which is more than {{ "%.1f"|format(reason.throttle_ms / (60*1000)) }} (current throttle_ms) mins ago.
From 13e334506cf9093d2872ede95f1527c0c42d71fd Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 21 Jun 2016 11:47:39 +0100
Subject: [PATCH 165/414] Remove the legacy v0 content upload API.
The existing content can still be downloaded. The last upload to the
matrix.org server was in January 2015, so it is probably safe to remove
the upload API.
---
synapse/app/homeserver.py | 3 +-
synapse/config/server.py | 20 ----
synapse/rest/media/v0/content_repository.py | 112 +-------------------
3 files changed, 3 insertions(+), 132 deletions(-)
diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py
index 40ffd9bf0..9c2dd3295 100755
--- a/synapse/app/homeserver.py
+++ b/synapse/app/homeserver.py
@@ -147,7 +147,7 @@ class SynapseHomeServer(HomeServer):
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
- self, self.config.uploads_path, self.auth, self.content_addr
+ self, self.config.uploads_path
),
})
@@ -301,7 +301,6 @@ def setup(config_options):
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
config=config,
- content_addr=config.content_addr,
version_string=version_string,
database_engine=database_engine,
)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index 7840dc3ad..d7e6f2051 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -107,26 +107,6 @@ class ServerConfig(Config):
]
})
- # Attempt to guess the content_addr for the v0 content repostitory
- content_addr = config.get("content_addr")
- if not content_addr:
- for listener in self.listeners:
- if listener["type"] == "http" and not listener.get("tls", False):
- unsecure_port = listener["port"]
- break
- else:
- raise RuntimeError("Could not determine 'content_addr'")
-
- host = self.server_name
- if ':' not in host:
- host = "%s:%d" % (host, unsecure_port)
- else:
- host = host.split(':')[0]
- host = "%s:%d" % (host, unsecure_port)
- content_addr = "http://%s" % (host,)
-
- self.content_addr = content_addr
-
def default_config(self, server_name, **kwargs):
if ":" in server_name:
bind_port = int(server_name.split(":")[1])
diff --git a/synapse/rest/media/v0/content_repository.py b/synapse/rest/media/v0/content_repository.py
index d9fc045fc..956bd5da7 100644
--- a/synapse/rest/media/v0/content_repository.py
+++ b/synapse/rest/media/v0/content_repository.py
@@ -15,14 +15,12 @@
from synapse.http.server import respond_with_json_bytes, finish_request
-from synapse.util.stringutils import random_string
from synapse.api.errors import (
- cs_exception, SynapseError, CodeMessageException, Codes, cs_error
+ Codes, cs_error
)
from twisted.protocols.basic import FileSender
from twisted.web import server, resource
-from twisted.internet import defer
import base64
import simplejson as json
@@ -50,64 +48,10 @@ class ContentRepoResource(resource.Resource):
"""
isLeaf = True
- def __init__(self, hs, directory, auth, external_addr):
+ def __init__(self, hs, directory):
resource.Resource.__init__(self)
self.hs = hs
self.directory = directory
- self.auth = auth
- self.external_addr = external_addr.rstrip('/')
- self.max_upload_size = hs.config.max_upload_size
-
- if not os.path.isdir(self.directory):
- os.mkdir(self.directory)
- logger.info("ContentRepoResource : Created %s directory.",
- self.directory)
-
- @defer.inlineCallbacks
- def map_request_to_name(self, request):
- # auth the user
- requester = yield self.auth.get_user_by_req(request)
-
- # namespace all file uploads on the user
- prefix = base64.urlsafe_b64encode(
- requester.user.to_string()
- ).replace('=', '')
-
- # use a random string for the main portion
- main_part = random_string(24)
-
- # suffix with a file extension if we can make one. This is nice to
- # provide a hint to clients on the file information. We will also reuse
- # this info to spit back the content type to the client.
- suffix = ""
- if request.requestHeaders.hasHeader("Content-Type"):
- content_type = request.requestHeaders.getRawHeaders(
- "Content-Type")[0]
- suffix = "." + base64.urlsafe_b64encode(content_type)
- if (content_type.split("/")[0].lower() in
- ["image", "video", "audio"]):
- file_ext = content_type.split("/")[-1]
- # be a little paranoid and only allow a-z
- file_ext = re.sub("[^a-z]", "", file_ext)
- suffix += "." + file_ext
-
- file_name = prefix + main_part + suffix
- file_path = os.path.join(self.directory, file_name)
- logger.info("User %s is uploading a file to path %s",
- request.user.user_id.to_string(),
- file_path)
-
- # keep trying to make a non-clashing file, with a sensible max attempts
- attempts = 0
- while os.path.exists(file_path):
- main_part = random_string(24)
- file_name = prefix + main_part + suffix
- file_path = os.path.join(self.directory, file_name)
- attempts += 1
- if attempts > 25: # really? Really?
- raise SynapseError(500, "Unable to create file.")
-
- defer.returnValue(file_path)
def render_GET(self, request):
# no auth here on purpose, to allow anyone to view, even across home
@@ -155,58 +99,6 @@ class ContentRepoResource(resource.Resource):
return server.NOT_DONE_YET
- def render_POST(self, request):
- self._async_render(request)
- return server.NOT_DONE_YET
-
def render_OPTIONS(self, request):
respond_with_json_bytes(request, 200, {}, send_cors=True)
return server.NOT_DONE_YET
-
- @defer.inlineCallbacks
- def _async_render(self, request):
- try:
- # TODO: The checks here are a bit late. The content will have
- # already been uploaded to a tmp file at this point
- content_length = request.getHeader("Content-Length")
- if content_length is None:
- raise SynapseError(
- msg="Request must specify a Content-Length", code=400
- )
- if int(content_length) > self.max_upload_size:
- raise SynapseError(
- msg="Upload request body is too large",
- code=413,
- )
-
- fname = yield self.map_request_to_name(request)
-
- # TODO I have a suspicious feeling this is just going to block
- with open(fname, "wb") as f:
- f.write(request.content.read())
-
- # FIXME (erikj): These should use constants.
- file_name = os.path.basename(fname)
- # FIXME: we can't assume what the repo's public mounted path is
- # ...plus self-signed SSL won't work to remote clients anyway
- # ...and we can't assume that it's SSL anyway, as we might want to
- # serve it via the non-SSL listener...
- url = "%s/_matrix/content/%s" % (
- self.external_addr, file_name
- )
-
- respond_with_json_bytes(request, 200,
- json.dumps({"content_token": url}),
- send_cors=True)
-
- except CodeMessageException as e:
- logger.exception(e)
- respond_with_json_bytes(request, e.code,
- json.dumps(cs_exception(e)))
- except Exception as e:
- logger.error("Failed to store file: %s" % e)
- respond_with_json_bytes(
- request,
- 500,
- json.dumps({"error": "Internal server error"}),
- send_cors=True)
From 5cc7564c5c56880ff98af934b9169eac4fe895d3 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 21 Jun 2016 16:38:05 +0100
Subject: [PATCH 166/414] Optionally start or stop workers in synctl.
Optionally start or stop an individual worker by passing -w with
the path to the worker config.
Optionally start or stop every worker and the main synapse by
passing -a with a path to a directory containing worker configs.
The "-w" is intended to be used to bounce individual workers proceses.
THe "-a" is intended for when you want to restart all the workers
simultaneuously, for example when performing database upgrades.
---
synapse/app/synctl.py | 186 +++++++++++++++++++++++++++++++++++-------
1 file changed, 155 insertions(+), 31 deletions(-)
diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py
index 39f4bf6e5..bb41962d4 100755
--- a/synapse/app/synctl.py
+++ b/synapse/app/synctl.py
@@ -14,11 +14,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import sys
+import argparse
+import collections
+import glob
import os
import os.path
-import subprocess
import signal
+import subprocess
+import sys
import yaml
SYNAPSE = ["python", "-B", "-m", "synapse.app.homeserver"]
@@ -28,60 +31,181 @@ RED = "\x1b[1;31m"
NORMAL = "\x1b[m"
+def write(message, colour=NORMAL, stream=sys.stdout):
+ if colour == NORMAL:
+ stream.write(message + "\n")
+ else:
+ stream.write(colour + message + NORMAL + "\n")
+
+
def start(configfile):
- print ("Starting ...")
+ write("Starting ...")
args = SYNAPSE
args.extend(["--daemonize", "-c", configfile])
try:
subprocess.check_call(args)
- print (GREEN + "started" + NORMAL)
+ write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN)
except subprocess.CalledProcessError as e:
- print (
- RED +
- "error starting (exit code: %d); see above for logs" % e.returncode +
- NORMAL
+ write(
+ "error starting (exit code: %d); see above for logs" % e.returncode,
+ colour=RED,
)
-def stop(pidfile):
+def start_worker(app, configfile, worker_configfile):
+ args = [
+ "python", "-B",
+ "-m", app,
+ "-c", configfile,
+ "-c", worker_configfile
+ ]
+
+ try:
+ subprocess.check_call(args)
+ write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
+ except subprocess.CalledProcessError as e:
+ write(
+ "error starting %s(%r) (exit code: %d); see above for logs" % (
+ app, worker_configfile, e.returncode,
+ ),
+ colour=RED,
+ )
+
+
+def stop(pidfile, app):
if os.path.exists(pidfile):
pid = int(open(pidfile).read())
os.kill(pid, signal.SIGTERM)
- print (GREEN + "stopped" + NORMAL)
+ write("stopped %s" % (app,), colour=GREEN)
+
+
+Worker = collections.namedtuple("Worker", [
+ "app", "configfile", "pidfile", "cache_factor"
+])
def main():
- configfile = sys.argv[2] if len(sys.argv) == 3 else "homeserver.yaml"
- if not os.path.exists(configfile):
- sys.stderr.write(
- "No config file found\n"
- "To generate a config file, run '%s -c %s --generate-config"
- " --server-name='\n" % (
- " ".join(SYNAPSE), configfile
- )
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "action",
+ choices=["start", "stop", "restart"],
+ help="whether to start, stop or restart the synapse",
+ )
+ parser.add_argument(
+ "configfile",
+ nargs="?",
+ default="homeserver.yaml",
+ help="the homeserver config file, defaults to homserver.yaml",
+ )
+ parser.add_argument(
+ "-w", "--worker",
+ metavar="WORKERCONFIG",
+ help="start or stop a single worker",
+ )
+ parser.add_argument(
+ "-a", "--all-processes",
+ metavar="WORKERCONFIGDIR",
+ help="start or stop all the workers in the given directory"
+ " and the main synapse process",
+ )
+
+ options = parser.parse_args()
+
+ if options.worker and options.all_processes:
+ write(
+ 'Cannot use "--worker" with "--all-processes"',
+ stream=sys.stderr
)
sys.exit(1)
- config = yaml.load(open(configfile))
+ configfile = options.configfile
+
+ if not os.path.exists(configfile):
+ write(
+ "No config file found\n"
+ "To generate a config file, run '%s -c %s --generate-config"
+ " --server-name='\n" % (
+ " ".join(SYNAPSE), options.configfile
+ ),
+ stream=sys.stderr,
+ )
+ sys.exit(1)
+
+ with open(configfile) as stream:
+ config = yaml.load(stream)
+
pidfile = config["pid_file"]
- cache_factor = config.get("synctl_cache_factor", None)
+ cache_factor = config.get("synctl_cache_factor")
+ start_stop_synapse = True
if cache_factor:
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
- action = sys.argv[1] if sys.argv[1:] else "usage"
- if action == "start":
- start(configfile)
- elif action == "stop":
- stop(pidfile)
- elif action == "restart":
- stop(pidfile)
- start(configfile)
- else:
- sys.stderr.write("Usage: %s [start|stop|restart] [configfile]\n" % (sys.argv[0],))
- sys.exit(1)
+ worker_configfiles = []
+ if options.worker:
+ start_stop_synapse = False
+ worker_configfile = options.worker
+ if not os.path.exists(worker_configfile):
+ write(
+ "No worker config found at %r" % (worker_configfile,),
+ stream=sys.stderr,
+ )
+ sys.exit(1)
+ worker_configfiles.append(worker_configfile)
+
+ if options.all_processes:
+ worker_configdir = options.all_processes
+ if not os.path.isdir(worker_configdir):
+ write(
+ "No worker config directory found at %r" % (worker_configdir,),
+ stream=sys.stderr,
+ )
+ sys.exit(1)
+ worker_configfiles.extend(sorted(glob.glob(
+ os.path.join(worker_configdir, "*.yaml")
+ )))
+
+ workers = []
+ for worker_configfile in worker_configfiles:
+ with open(worker_configfile) as stream:
+ worker_config = yaml.load(stream)
+ worker_app = worker_config["worker_app"]
+ worker_pidfile = worker_config["worker_pid_file"]
+ worker_daemonize = worker_config["worker_daemonize"]
+ assert worker_daemonize # TODO print something more user friendly
+ worker_cache_factor = worker_config.get("synctl_cache_factor")
+ workers.append(Worker(
+ worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
+ ))
+
+ action = options.action
+
+ if action == "stop" or action == "restart":
+ for worker in workers:
+ stop(worker.pidfile, worker.app)
+
+ if start_stop_synapse:
+ stop(pidfile, "synapse.app.homeserver")
+
+ # TODO: Wait for synapse to actually shutdown before starting it again
+
+ if action == "start" or action == "restart":
+ if start_stop_synapse:
+ start(configfile)
+
+ for worker in workers:
+ if worker.cache_factor:
+ os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
+
+ start_worker(worker.app, configfile, worker.configfile)
+
+ if cache_factor:
+ os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
+ else:
+ os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
if __name__ == "__main__":
From 0a32208e5dde4980a5962f17e9b27f2e28e1f3f1 Mon Sep 17 00:00:00 2001
From: Martin Weinelt
Date: Mon, 6 Jun 2016 02:05:57 +0200
Subject: [PATCH 167/414] Rework ldap integration with ldap3
Use the pure-python ldap3 library, which eliminates the need for a
system dependency.
Offer both a `search` and `simple_bind` mode, for more sophisticated
ldap scenarios.
- `search` tries to find a matching DN within the `user_base` while
employing the `user_filter`, then tries the bind when a single
matching DN was found.
- `simple_bind` tries the bind against a specific DN by combining the
localpart and `user_base`
Offer support for STARTTLS on a plain connection.
The configuration was changed to reflect these new possibilities.
Signed-off-by: Martin Weinelt
---
synapse/config/ldap.py | 102 +++++++++++-----
synapse/handlers/auth.py | 211 +++++++++++++++++++++++++++------
synapse/python_dependencies.py | 3 +
tests/utils.py | 1 +
4 files changed, 253 insertions(+), 64 deletions(-)
diff --git a/synapse/config/ldap.py b/synapse/config/ldap.py
index 9c14593a9..d83c2230b 100644
--- a/synapse/config/ldap.py
+++ b/synapse/config/ldap.py
@@ -13,40 +13,88 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from ._base import Config
+from ._base import Config, ConfigError
+
+
+MISSING_LDAP3 = (
+ "Missing ldap3 library. This is required for LDAP Authentication."
+)
+
+
+class LDAPMode(object):
+ SIMPLE = "simple",
+ SEARCH = "search",
+
+ LIST = (SIMPLE, SEARCH)
class LDAPConfig(Config):
def read_config(self, config):
- ldap_config = config.get("ldap_config", None)
- if ldap_config:
- self.ldap_enabled = ldap_config.get("enabled", False)
- self.ldap_server = ldap_config["server"]
- self.ldap_port = ldap_config["port"]
- self.ldap_tls = ldap_config.get("tls", False)
- self.ldap_search_base = ldap_config["search_base"]
- self.ldap_search_property = ldap_config["search_property"]
- self.ldap_email_property = ldap_config["email_property"]
- self.ldap_full_name_property = ldap_config["full_name_property"]
- else:
- self.ldap_enabled = False
- self.ldap_server = None
- self.ldap_port = None
- self.ldap_tls = False
- self.ldap_search_base = None
- self.ldap_search_property = None
- self.ldap_email_property = None
- self.ldap_full_name_property = None
+ ldap_config = config.get("ldap_config", {})
+
+ self.ldap_enabled = ldap_config.get("enabled", False)
+
+ if self.ldap_enabled:
+ # verify dependencies are available
+ try:
+ import ldap3
+ ldap3 # to stop unused lint
+ except ImportError:
+ raise ConfigError(MISSING_LDAP3)
+
+ self.ldap_mode = LDAPMode.SIMPLE
+
+ # verify config sanity
+ self.require_keys(ldap_config, [
+ "uri",
+ "base",
+ "attributes",
+ ])
+
+ self.ldap_uri = ldap_config["uri"]
+ self.ldap_start_tls = ldap_config.get("start_tls", False)
+ self.ldap_base = ldap_config["base"]
+ self.ldap_attributes = ldap_config["attributes"]
+
+ if "bind_dn" in ldap_config:
+ self.ldap_mode = LDAPMode.SEARCH
+ self.require_keys(ldap_config, [
+ "bind_dn",
+ "bind_password",
+ ])
+
+ self.ldap_bind_dn = ldap_config["bind_dn"]
+ self.ldap_bind_password = ldap_config["bind_password"]
+ self.ldap_filter = ldap_config.get("filter", None)
+
+ # verify attribute lookup
+ self.require_keys(ldap_config['attributes'], [
+ "uid",
+ "name",
+ "mail",
+ ])
+
+ def require_keys(self, config, required):
+ missing = [key for key in required if key not in config]
+ if missing:
+ raise ConfigError(
+ "LDAP enabled but missing required config values: {}".format(
+ ", ".join(missing)
+ )
+ )
def default_config(self, **kwargs):
return """\
# ldap_config:
# enabled: true
- # server: "ldap://localhost"
- # port: 389
- # tls: false
- # search_base: "ou=Users,dc=example,dc=com"
- # search_property: "cn"
- # email_property: "email"
- # full_name_property: "givenName"
+ # uri: "ldap://ldap.example.com:389"
+ # start_tls: true
+ # base: "ou=users,dc=example,dc=com"
+ # attributes:
+ # uid: "cn"
+ # mail: "email"
+ # name: "givenName"
+ # #bind_dn:
+ # #bind_password:
+ # #filter: "(objectClass=posixAccount)"
"""
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index b38f81e99..968095c14 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -20,6 +20,7 @@ from synapse.api.constants import LoginType
from synapse.types import UserID
from synapse.api.errors import AuthError, LoginError, Codes, StoreError, SynapseError
from synapse.util.async import run_on_reactor
+from synapse.config.ldap import LDAPMode
from twisted.web.client import PartialDownloadError
@@ -28,6 +29,12 @@ import bcrypt
import pymacaroons
import simplejson
+try:
+ import ldap3
+except ImportError:
+ ldap3 = None
+ pass
+
import synapse.util.stringutils as stringutils
@@ -50,17 +57,20 @@ class AuthHandler(BaseHandler):
self.INVALID_TOKEN_HTTP_STATUS = 401
self.ldap_enabled = hs.config.ldap_enabled
- self.ldap_server = hs.config.ldap_server
- self.ldap_port = hs.config.ldap_port
- self.ldap_tls = hs.config.ldap_tls
- self.ldap_search_base = hs.config.ldap_search_base
- self.ldap_search_property = hs.config.ldap_search_property
- self.ldap_email_property = hs.config.ldap_email_property
- self.ldap_full_name_property = hs.config.ldap_full_name_property
-
- if self.ldap_enabled is True:
- import ldap
- logger.info("Import ldap version: %s", ldap.__version__)
+ if self.ldap_enabled:
+ if not ldap3:
+ raise RuntimeError(
+ 'Missing ldap3 library. This is required for LDAP Authentication.'
+ )
+ self.ldap_mode = hs.config.ldap_mode
+ self.ldap_uri = hs.config.ldap_uri
+ self.ldap_start_tls = hs.config.ldap_start_tls
+ self.ldap_base = hs.config.ldap_base
+ self.ldap_filter = hs.config.ldap_filter
+ self.ldap_attributes = hs.config.ldap_attributes
+ if self.ldap_mode == LDAPMode.SEARCH:
+ self.ldap_bind_dn = hs.config.ldap_bind_dn
+ self.ldap_bind_password = hs.config.ldap_bind_password
self.hs = hs # FIXME better possibility to access registrationHandler later?
@@ -452,40 +462,167 @@ class AuthHandler(BaseHandler):
@defer.inlineCallbacks
def _check_ldap_password(self, user_id, password):
- if not self.ldap_enabled:
- logger.debug("LDAP not configured")
+ """ Attempt to authenticate a user against an LDAP Server
+ and register an account if none exists.
+
+ Returns:
+ True if authentication against LDAP was successful
+ """
+
+ if not ldap3 or not self.ldap_enabled:
defer.returnValue(False)
- import ldap
+ if self.ldap_mode not in LDAPMode.LIST:
+ raise RuntimeError(
+ 'Invalid ldap mode specified: {mode}'.format(
+ mode=self.ldap_mode
+ )
+ )
- logger.info("Authenticating %s with LDAP" % user_id)
try:
- ldap_url = "%s:%s" % (self.ldap_server, self.ldap_port)
- logger.debug("Connecting LDAP server at %s" % ldap_url)
- l = ldap.initialize(ldap_url)
- if self.ldap_tls:
- logger.debug("Initiating TLS")
- self._connection.start_tls_s()
+ server = ldap3.Server(self.ldap_uri)
+ logger.debug(
+ "Attempting ldap connection with %s",
+ self.ldap_uri
+ )
- local_name = UserID.from_string(user_id).localpart
-
- dn = "%s=%s, %s" % (
- self.ldap_search_property,
- local_name,
- self.ldap_search_base)
- logger.debug("DN for LDAP authentication: %s" % dn)
-
- l.simple_bind_s(dn.encode('utf-8'), password.encode('utf-8'))
-
- if not (yield self.does_user_exist(user_id)):
- handler = self.hs.get_handlers().registration_handler
- user_id, access_token = (
- yield handler.register(localpart=local_name)
+ localpart = UserID.from_string(user_id).localpart
+ if self.ldap_mode == LDAPMode.SIMPLE:
+ # bind with the the local users ldap credentials
+ bind_dn = "{prop}={value},{base}".format(
+ prop=self.ldap_attributes['uid'],
+ value=localpart,
+ base=self.ldap_base
+ )
+ conn = ldap3.Connection(server, bind_dn, password)
+ logger.debug(
+ "Established ldap connection in simple mode: %s",
+ conn
)
+ if self.ldap_start_tls:
+ conn.start_tls()
+ logger.debug(
+ "Upgraded ldap connection in simple mode through StartTLS: %s",
+ conn
+ )
+
+ conn.bind()
+
+ elif self.ldap_mode == LDAPMode.SEARCH:
+ # connect with preconfigured credentials and search for local user
+ conn = ldap3.Connection(
+ server,
+ self.ldap_bind_dn,
+ self.ldap_bind_password
+ )
+ logger.debug(
+ "Established ldap connection in search mode: %s",
+ conn
+ )
+
+ if self.ldap_start_tls:
+ conn.start_tls()
+ logger.debug(
+ "Upgraded ldap connection in search mode through StartTLS: %s",
+ conn
+ )
+
+ conn.bind()
+
+ # find matching dn
+ query = "({prop}={value})".format(
+ prop=self.ldap_attributes['uid'],
+ value=localpart
+ )
+ if self.ldap_filter:
+ query = "(&{query}{filter})".format(
+ query=query,
+ filter=self.ldap_filter
+ )
+ logger.debug("ldap search filter: %s", query)
+ result = conn.search(self.ldap_base, query)
+
+ if result and len(conn.response) == 1:
+ # found exactly one result
+ user_dn = conn.response[0]['dn']
+ logger.debug('ldap search found dn: %s', user_dn)
+
+ # unbind and reconnect, rebind with found dn
+ conn.unbind()
+ conn = ldap3.Connection(
+ server,
+ user_dn,
+ password,
+ auto_bind=True
+ )
+ else:
+ # found 0 or > 1 results, abort!
+ logger.warn(
+ "ldap search returned unexpected (%d!=1) amount of results",
+ len(conn.response)
+ )
+ defer.returnValue(False)
+
+ logger.info(
+ "User authenticated against ldap server: %s",
+ conn
+ )
+
+ # check for existing account, if none exists, create one
+ if not (yield self.does_user_exist(user_id)):
+ # query user metadata for account creation
+ query = "({prop}={value})".format(
+ prop=self.ldap_attributes['uid'],
+ value=localpart
+ )
+
+ if self.ldap_mode == LDAPMode.SEARCH and self.ldap_filter:
+ query = "(&{filter}{user_filter})".format(
+ filter=query,
+ user_filter=self.ldap_filter
+ )
+ logger.debug("ldap registration filter: %s", query)
+
+ result = conn.search(
+ search_base=self.ldap_base,
+ search_filter=query,
+ attributes=[
+ self.ldap_attributes['name'],
+ self.ldap_attributes['mail']
+ ]
+ )
+
+ if len(conn.response) == 1:
+ attrs = conn.response[0]['attributes']
+ mail = attrs[self.ldap_attributes['mail']][0]
+ name = attrs[self.ldap_attributes['name']][0]
+
+ # create account
+ registration_handler = self.hs.get_handlers().registration_handler
+ user_id, access_token = (
+ yield registration_handler.register(localpart=localpart)
+ )
+
+ # TODO: bind email, set displayname with data from ldap directory
+
+ logger.info(
+ "ldap registration successful: %d: %s (%s, %)",
+ user_id,
+ localpart,
+ name,
+ mail
+ )
+ else:
+ logger.warn(
+ "ldap registration failed: unexpected (%d!=1) amount of results",
+ len(result)
+ )
+ defer.returnValue(False)
+
defer.returnValue(True)
- except ldap.LDAPError, e:
- logger.warn("LDAP error: %s", e)
+ except ldap3.core.exceptions.LDAPException as e:
+ logger.warn("Error during ldap authentication: %s", e)
defer.returnValue(False)
@defer.inlineCallbacks
diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py
index e0a7a1977..e024cec0a 100644
--- a/synapse/python_dependencies.py
+++ b/synapse/python_dependencies.py
@@ -48,6 +48,9 @@ CONDITIONAL_REQUIREMENTS = {
"Jinja2>=2.8": ["Jinja2>=2.8"],
"bleach>=1.4.2": ["bleach>=1.4.2"],
},
+ "ldap": {
+ "ldap3>=1.0": ["ldap3>=1.0"],
+ },
}
diff --git a/tests/utils.py b/tests/utils.py
index 6e41ae1ff..ed547bc39 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -56,6 +56,7 @@ def setup_test_homeserver(name="test", datastore=None, config=None, **kargs):
config.use_frozen_dicts = True
config.database_config = {"name": "sqlite3"}
+ config.ldap_enabled = False
if "clock" not in kargs:
kargs["clock"] = MockClock()
From 3a4120e49a15f27368a231b32245e32a4ccadb06 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 22 Jun 2016 17:47:18 +0100
Subject: [PATCH 168/414] Put most recent 20 messages in notif
Fixes https://github.com/vector-im/vector-web/issues/1648
---
synapse/storage/event_push_actions.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 940e11d7a..5aaaf4b19 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -152,7 +152,7 @@ class EventPushActionsStore(SQLBaseStore):
if max_stream_ordering is not None:
sql += " AND ep.stream_ordering <= ?"
args.append(max_stream_ordering)
- sql += " ORDER BY ep.stream_ordering ASC LIMIT ?"
+ sql += " ORDER BY ep.stream_ordering DESC LIMIT ?"
args.append(limit)
txn.execute(sql, args)
return txn.fetchall()
@@ -176,7 +176,8 @@ class EventPushActionsStore(SQLBaseStore):
if max_stream_ordering is not None:
sql += " AND ep.stream_ordering <= ?"
args.append(max_stream_ordering)
- sql += " ORDER BY ep.stream_ordering ASC"
+ sql += " ORDER BY ep.stream_ordering DESC LIMIT ?"
+ args.append(limit)
txn.execute(sql, args)
return txn.fetchall()
no_read_receipt = yield self.runInteraction(
@@ -191,7 +192,7 @@ class EventPushActionsStore(SQLBaseStore):
"actions": json.loads(row[3]),
"received_ts": row[4],
} for row in after_read_receipt + no_read_receipt
- ])
+ ][0:limit])
@defer.inlineCallbacks
def get_time_of_last_push_action_before(self, stream_ordering):
From f73fdb04a6cc361e9396c9b22f81544ecfb895bd Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 22 Jun 2016 17:51:40 +0100
Subject: [PATCH 169/414] Style
---
synapse/storage/event_push_actions.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 5aaaf4b19..2e85cf5f5 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -192,7 +192,7 @@ class EventPushActionsStore(SQLBaseStore):
"actions": json.loads(row[3]),
"received_ts": row[4],
} for row in after_read_receipt + no_read_receipt
- ][0:limit])
+ ][:limit])
@defer.inlineCallbacks
def get_time_of_last_push_action_before(self, stream_ordering):
From b5fb7458d501d3e0e24062b2a479232246f13d4e Mon Sep 17 00:00:00 2001
From: David Baker
Date: Wed, 22 Jun 2016 18:07:14 +0100
Subject: [PATCH 170/414] Actually we need to order these properly
otherwise we'll end up returning the wrong 20
---
synapse/storage/event_push_actions.py | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 2e85cf5f5..5f1b6f63a 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -184,7 +184,8 @@ class EventPushActionsStore(SQLBaseStore):
"get_unread_push_actions_for_user_in_range", get_no_receipt
)
- defer.returnValue([
+ # Make a list of dicts from the two sets of results.
+ notifs = [
{
"event_id": row[0],
"room_id": row[1],
@@ -192,7 +193,16 @@ class EventPushActionsStore(SQLBaseStore):
"actions": json.loads(row[3]),
"received_ts": row[4],
} for row in after_read_receipt + no_read_receipt
- ][:limit])
+ ]
+
+ # Now sort it so it's ordered correctly, since currently it will
+ # contain results from the first query, correctly ordered, followed
+ # by results from the second query, but we want them all ordered
+ # by received_ts
+ notifs.sort(key=lambda r: -(r['received_ts'] or 0))
+
+ # Now return the first `limit`
+ defer.returnValue(notifs[:limit])
@defer.inlineCallbacks
def get_time_of_last_push_action_before(self, stream_ordering):
From 870c45913ef17584a65d0acf98336f1ddd6bf1c0 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 24 Jun 2016 11:41:11 +0100
Subject: [PATCH 171/414] Use similar naming we use in email notifs for push
Fixes https://github.com/vector-im/vector-web/issues/1654
---
synapse/push/httppusher.py | 9 +++--
synapse/push/push_tools.py | 33 ++++++++--------
synapse/replication/slave/storage/events.py | 8 ----
synapse/storage/events.py | 7 ----
synapse/storage/room.py | 43 ---------------------
synapse/util/presentable_names.py | 5 ++-
6 files changed, 26 insertions(+), 79 deletions(-)
diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py
index 399280484..2acc6cc21 100644
--- a/synapse/push/httppusher.py
+++ b/synapse/push/httppusher.py
@@ -38,6 +38,7 @@ class HttpPusher(object):
self.hs = hs
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()
+ self.state_handler = self.hs.get_state_handler()
self.user_id = pusherdict['user_name']
self.app_id = pusherdict['app_id']
self.app_display_name = pusherdict['app_display_name']
@@ -237,7 +238,9 @@ class HttpPusher(object):
@defer.inlineCallbacks
def _build_notification_dict(self, event, tweaks, badge):
- ctx = yield push_tools.get_context_for_event(self.hs.get_datastore(), event)
+ ctx = yield push_tools.get_context_for_event(
+ self.state_handler, event, self.user_id
+ )
d = {
'notification': {
@@ -269,8 +272,8 @@ class HttpPusher(object):
if 'content' in event:
d['notification']['content'] = event.content
- if len(ctx['aliases']):
- d['notification']['room_alias'] = ctx['aliases'][0]
+ # We no longer send aliases separately, instead, we send the human
+ # readable name of the room, which may be an alias.
if 'sender_display_name' in ctx and len(ctx['sender_display_name']) > 0:
d['notification']['sender_display_name'] = ctx['sender_display_name']
if 'name' in ctx and len(ctx['name']) > 0:
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index 89a3b5e90..d91ca34a8 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -14,7 +14,9 @@
# limitations under the License.
from twisted.internet import defer
-
+from synapse.util.presentable_names import (
+ calculate_room_name, name_from_member_event
+)
@defer.inlineCallbacks
def get_badge_count(store, user_id):
@@ -45,24 +47,21 @@ def get_badge_count(store, user_id):
@defer.inlineCallbacks
-def get_context_for_event(store, ev):
- name_aliases = yield store.get_room_name_and_aliases(
- ev.room_id
- )
+def get_context_for_event(state_handler, ev, user_id):
+ ctx = {}
- ctx = {'aliases': name_aliases[1]}
- if name_aliases[0] is not None:
- ctx['name'] = name_aliases[0]
+ room_state = yield state_handler.get_current_state(ev.room_id)
- their_member_events_for_room = yield store.get_current_state(
- room_id=ev.room_id,
- event_type='m.room.member',
- state_key=ev.user_id
+ # we no longer bother setting room_alias, and make room_name the
+ # human-readable name instead, be that m.room.namer, an alias or
+ # a list of people in the room
+ name = calculate_room_name(
+ room_state, user_id, fallback_to_single_member=False
)
- for mev in their_member_events_for_room:
- if mev.content['membership'] == 'join' and 'displayname' in mev.content:
- dn = mev.content['displayname']
- if dn is not None:
- ctx['sender_display_name'] = dn
+ if name:
+ ctx['name'] = name
+
+ sender_state_event = room_state[("m.room.member", ev.sender)]
+ ctx['sender_display_name'] = name_from_member_event(sender_state_event)
defer.returnValue(ctx)
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 877c68508..86e0721ac 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -64,7 +64,6 @@ class SlavedEventStore(BaseSlavedStore):
# Cached functions can't be accessed through a class instance so we need
# to reach inside the __dict__ to extract them.
- get_room_name_and_aliases = RoomStore.__dict__["get_room_name_and_aliases"]
get_rooms_for_user = RoomMemberStore.__dict__["get_rooms_for_user"]
get_users_in_room = RoomMemberStore.__dict__["get_users_in_room"]
get_latest_event_ids_in_room = EventFederationStore.__dict__[
@@ -202,7 +201,6 @@ class SlavedEventStore(BaseSlavedStore):
self.get_rooms_for_user.invalidate_all()
self.get_users_in_room.invalidate((event.room_id,))
# self.get_joined_hosts_for_room.invalidate((event.room_id,))
- self.get_room_name_and_aliases.invalidate((event.room_id,))
self._invalidate_get_event_cache(event.event_id)
@@ -246,9 +244,3 @@ class SlavedEventStore(BaseSlavedStore):
self._get_current_state_for_key.invalidate((
event.room_id, event.type, event.state_key
))
-
- if event.type in [EventTypes.Name, EventTypes.Aliases]:
- self.get_room_name_and_aliases.invalidate(
- (event.room_id,)
- )
- pass
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 6d978ffcd..88a6ff731 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -355,7 +355,6 @@ class EventsStore(SQLBaseStore):
txn.call_after(self.get_rooms_for_user.invalidate_all)
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
- txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,))
# Add an entry to the current_state_resets table to record the point
# where we clobbered the current state
@@ -666,12 +665,6 @@ class EventsStore(SQLBaseStore):
(event.room_id, event.type, event.state_key,)
)
- if event.type in [EventTypes.Name, EventTypes.Aliases]:
- txn.call_after(
- self.get_room_name_and_aliases.invalidate,
- (event.room_id,)
- )
-
self._simple_upsert_txn(
txn,
"current_state_events",
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index 97f9f1929..fb89ce01b 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -192,49 +192,6 @@ class RoomStore(SQLBaseStore):
# This should be unreachable.
raise Exception("Unrecognized database engine")
- @cachedInlineCallbacks()
- def get_room_name_and_aliases(self, room_id):
- def get_room_name(txn):
- sql = (
- "SELECT name FROM room_names"
- " INNER JOIN current_state_events USING (room_id, event_id)"
- " WHERE room_id = ?"
- " LIMIT 1"
- )
-
- txn.execute(sql, (room_id,))
- rows = txn.fetchall()
- if rows:
- return rows[0][0]
- else:
- return None
-
- return [row[0] for row in txn.fetchall()]
-
- def get_room_aliases(txn):
- sql = (
- "SELECT content FROM current_state_events"
- " INNER JOIN events USING (room_id, event_id)"
- " WHERE room_id = ?"
- )
- txn.execute(sql, (room_id,))
- return [row[0] for row in txn.fetchall()]
-
- name = yield self.runInteraction("get_room_name", get_room_name)
- alias_contents = yield self.runInteraction("get_room_aliases", get_room_aliases)
-
- aliases = []
-
- for c in alias_contents:
- try:
- content = json.loads(c)
- except:
- continue
-
- aliases.extend(content.get('aliases', []))
-
- defer.returnValue((name, aliases))
-
def add_event_report(self, room_id, event_id, user_id, reason, content,
received_ts):
next_id = self._event_reports_id_gen.get_next()
diff --git a/synapse/util/presentable_names.py b/synapse/util/presentable_names.py
index a6866f611..4c54812e6 100644
--- a/synapse/util/presentable_names.py
+++ b/synapse/util/presentable_names.py
@@ -25,7 +25,8 @@ ALIAS_RE = re.compile(r"^#.*:.+$")
ALL_ALONE = "Empty Room"
-def calculate_room_name(room_state, user_id, fallback_to_members=True):
+def calculate_room_name(room_state, user_id, fallback_to_members=True,
+ fallback_to_single_member=True):
"""
Works out a user-facing name for the given room as per Matrix
spec recommendations.
@@ -129,6 +130,8 @@ def calculate_room_name(room_state, user_id, fallback_to_members=True):
return name_from_member_event(all_members[0])
else:
return ALL_ALONE
+ elif len(other_members) == 1 and not fallback_to_single_member:
+ return None
else:
return descriptor_from_member_events(other_members)
From 46b7362304c0ea056c65323a80a84e231c544e86 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 24 Jun 2016 11:44:57 +0100
Subject: [PATCH 172/414] pep8
---
synapse/replication/slave/storage/events.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py
index 86e0721ac..369d83946 100644
--- a/synapse/replication/slave/storage/events.py
+++ b/synapse/replication/slave/storage/events.py
@@ -18,7 +18,6 @@ from ._slaved_id_tracker import SlavedIdTracker
from synapse.api.constants import EventTypes
from synapse.events import FrozenEvent
from synapse.storage import DataStore
-from synapse.storage.room import RoomStore
from synapse.storage.roommember import RoomMemberStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.event_push_actions import EventPushActionsStore
From aa3a4944d51c60886984211a7f8ae6b7fbac765d Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 24 Jun 2016 11:45:23 +0100
Subject: [PATCH 173/414] more pep8
---
synapse/storage/room.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/synapse/storage/room.py b/synapse/storage/room.py
index fb89ce01b..8251f5867 100644
--- a/synapse/storage/room.py
+++ b/synapse/storage/room.py
@@ -18,7 +18,6 @@ from twisted.internet import defer
from synapse.api.errors import StoreError
from ._base import SQLBaseStore
-from synapse.util.caches.descriptors import cachedInlineCallbacks
from .engines import PostgresEngine, Sqlite3Engine
import collections
From 0b640aa56bce86ca56d9fe3cd9c1fec6620ff18b Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 24 Jun 2016 11:47:11 +0100
Subject: [PATCH 174/414] even more pep8
---
synapse/push/push_tools.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py
index d91ca34a8..6f2d1ad57 100644
--- a/synapse/push/push_tools.py
+++ b/synapse/push/push_tools.py
@@ -18,6 +18,7 @@ from synapse.util.presentable_names import (
calculate_room_name, name_from_member_event
)
+
@defer.inlineCallbacks
def get_badge_count(store, user_id):
invites, joins = yield defer.gatherResults([
From 2455ad8468ea3d372d0f3b3828efa10419ad68ad Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 24 Jun 2016 13:34:20 +0100
Subject: [PATCH 175/414] Remove room name & alias test
as get_room_name_and_alias is now gone
---
.../replication/slave/storage/test_events.py | 41 -------------------
1 file changed, 41 deletions(-)
diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py
index 17587fda0..f33e6f60f 100644
--- a/tests/replication/slave/storage/test_events.py
+++ b/tests/replication/slave/storage/test_events.py
@@ -58,47 +58,6 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase):
def tearDown(self):
[unpatch() for unpatch in self.unpatches]
- @defer.inlineCallbacks
- def test_room_name_and_aliases(self):
- create = yield self.persist(type="m.room.create", key="", creator=USER_ID)
- yield self.persist(type="m.room.member", key=USER_ID, membership="join")
- yield self.persist(type="m.room.name", key="", name="name1")
- yield self.persist(
- type="m.room.aliases", key="blue", aliases=["#1:blue"]
- )
- yield self.replicate()
- yield self.check(
- "get_room_name_and_aliases", (ROOM_ID,), ("name1", ["#1:blue"])
- )
-
- # Set the room name.
- yield self.persist(type="m.room.name", key="", name="name2")
- yield self.replicate()
- yield self.check(
- "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#1:blue"])
- )
-
- # Set the room aliases.
- yield self.persist(
- type="m.room.aliases", key="blue", aliases=["#2:blue"]
- )
- yield self.replicate()
- yield self.check(
- "get_room_name_and_aliases", (ROOM_ID,), ("name2", ["#2:blue"])
- )
-
- # Leave and join the room clobbering the state.
- yield self.persist(type="m.room.member", key=USER_ID, membership="leave")
- yield self.persist(
- type="m.room.member", key=USER_ID, membership="join",
- reset_state=[create]
- )
- yield self.replicate()
-
- yield self.check(
- "get_room_name_and_aliases", (ROOM_ID,), (None, [])
- )
-
@defer.inlineCallbacks
def test_room_members(self):
create = yield self.persist(type="m.room.create", key="", creator=USER_ID)
From f7fe0e5f67e44c07e100226f54e183f82f2c98eb Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 24 Jun 2016 13:53:03 +0100
Subject: [PATCH 176/414] Fix the sytests to use a port-range rather than a
port base
---
jenkins-dendron-postgres.sh | 3 ++-
jenkins-postgres.sh | 3 ++-
jenkins-sqlite.sh | 5 +++--
3 files changed, 7 insertions(+), 4 deletions(-)
diff --git a/jenkins-dendron-postgres.sh b/jenkins-dendron-postgres.sh
index 7e6f24aa7..50268e098 100755
--- a/jenkins-dendron-postgres.sh
+++ b/jenkins-dendron-postgres.sh
@@ -70,6 +70,7 @@ cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
: ${PORT_BASE:=8000}
+: ${PORT_COUNT=20}
./jenkins/prep_sytest_for_postgres.sh
@@ -81,6 +82,6 @@ echo >&2 "Running sytest with PostgreSQL";
--dendron $WORKSPACE/dendron/bin/dendron \
--pusher \
--synchrotron \
- --port-base $PORT_BASE
+ --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1))
cd ..
diff --git a/jenkins-postgres.sh b/jenkins-postgres.sh
index ae6b11159..2f0768fcb 100755
--- a/jenkins-postgres.sh
+++ b/jenkins-postgres.sh
@@ -44,6 +44,7 @@ cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
: ${PORT_BASE:=8000}
+: ${PORT_COUNT=20}
./jenkins/prep_sytest_for_postgres.sh
@@ -51,7 +52,7 @@ echo >&2 "Running sytest with PostgreSQL";
./jenkins/install_and_run.sh --coverage \
--python $TOX_BIN/python \
--synapse-directory $WORKSPACE \
- --port-base $PORT_BASE
+ --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
cd ..
cp sytest/.coverage.* .
diff --git a/jenkins-sqlite.sh b/jenkins-sqlite.sh
index 9398d9db1..da603c5af 100755
--- a/jenkins-sqlite.sh
+++ b/jenkins-sqlite.sh
@@ -41,11 +41,12 @@ cd sytest
git checkout "${GIT_BRANCH}" || (echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop" ; git checkout develop)
-: ${PORT_BASE:=8500}
+: ${PORT_COUNT=20}
+: ${PORT_BASE:=8000}
./jenkins/install_and_run.sh --coverage \
--python $TOX_BIN/python \
--synapse-directory $WORKSPACE \
- --port-base $PORT_BASE
+ --port-range ${PORT_BASE}:$((PORT_BASE+PORT_COUNT-1)) \
cd ..
cp sytest/.coverage.* .
From 70d820c87595f037f0c17dc525604aaaa0cf148c Mon Sep 17 00:00:00 2001
From: Rick Cogley
Date: Sun, 26 Jun 2016 19:07:07 +0900
Subject: [PATCH 177/414] Update to reflect new location at github.
Additionally it does not appear there is turnserver.conf.default, but rather, just /etc/turnserver.conf.
---
docs/turn-howto.rst | 34 ++++++++++++++++++----------------
1 file changed, 18 insertions(+), 16 deletions(-)
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index e2c73458e..4f2794111 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -9,19 +9,21 @@ the Home Server to generate credentials that are valid for use on the TURN
server through the use of a secret shared between the Home Server and the
TURN server.
-This document described how to install coturn
-(https://code.google.com/p/coturn/) which also supports the TURN REST API,
+This document describes how to install coturn
+(https://github.com/coturn/coturn) which also supports the TURN REST API,
and integrate it with synapse.
coturn Setup
============
+You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
+
1. Check out coturn::
- svn checkout http://coturn.googlecode.com/svn/trunk/ coturn
- cd coturn
+ svn checkout https://github.com/coturn/coturn.git coturn
+ cd coturn
2. Configure it::
- ./configure
+ ./configure
You may need to install libevent2: if so, you should do so
in the way recommended by your operating system.
@@ -29,22 +31,21 @@ coturn Setup
database is unnecessary for this purpose.
3. Build and install it::
- make
- make install
+ make
+ make install
- 4. Make a config file in /etc/turnserver.conf. You can customise
- a config file from turnserver.conf.default. The relevant
+ 4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
lines, with example values, are::
- lt-cred-mech
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
+ lt-cred-mech
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
- See turnserver.conf.default for explanations of the options.
+ See turnserver.conf for explanations of the options.
One way to generate the static-auth-secret is with pwgen::
- pwgen -s 64 1
+ pwgen -s 64 1
5. Ensure youe firewall allows traffic into the TURN server on
the ports you've configured it to listen on (remember to allow
@@ -54,7 +55,7 @@ coturn Setup
import your private key and certificate.
7. Start the turn server::
- bin/turnserver -o
+ bin/turnserver -o
synapse Setup
@@ -91,3 +92,4 @@ Now, restart synapse::
./synctl restart
...and your Home Server now supports VoIP relaying!
+
From 63bb8f0df9946fa8084193578b44e6a931f66d51 Mon Sep 17 00:00:00 2001
From: Matthew Hodgson
Date: Mon, 27 Jun 2016 13:13:17 +0400
Subject: [PATCH 178/414] remove vector.im from default secondary DS list
---
synapse/config/server.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/synapse/config/server.py b/synapse/config/server.py
index d7e6f2051..51eaf423c 100644
--- a/synapse/config/server.py
+++ b/synapse/config/server.py
@@ -149,7 +149,6 @@ class ServerConfig(Config):
# room directory.
# secondary_directory_servers:
# - matrix.org
- # - vector.im
# List of ports that Synapse should listen on, their purpose and their
# configuration.
From 551fe80bed666cf89225d650915391bbca84c165 Mon Sep 17 00:00:00 2001
From: Rick Cogley
Date: Tue, 28 Jun 2016 12:47:55 +0900
Subject: [PATCH 179/414] Remove double spaces
Reading the RST spec, I was trying to get breaks to appear by entering the double spaces after the lines in the code blocks. It does not work anyway, and, as pointed out, I've removed.
---
docs/turn-howto.rst | 23 +++++++++++------------
1 file changed, 11 insertions(+), 12 deletions(-)
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index 4f2794111..f0c5601ea 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -19,11 +19,11 @@ coturn Setup
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
1. Check out coturn::
- svn checkout https://github.com/coturn/coturn.git coturn
- cd coturn
+ svn checkout https://github.com/coturn/coturn.git coturn
+ cd coturn
2. Configure it::
- ./configure
+ ./configure
You may need to install libevent2: if so, you should do so
in the way recommended by your operating system.
@@ -31,21 +31,21 @@ You may be able to setup coturn via your package manager, or set it up manually
database is unnecessary for this purpose.
3. Build and install it::
- make
- make install
+ make
+ make install
4. Create or edit the config file in ``/etc/turnserver.conf``. The relevant
lines, with example values, are::
- lt-cred-mech
- use-auth-secret
- static-auth-secret=[your secret key here]
- realm=turn.myserver.org
+ lt-cred-mech
+ use-auth-secret
+ static-auth-secret=[your secret key here]
+ realm=turn.myserver.org
See turnserver.conf for explanations of the options.
One way to generate the static-auth-secret is with pwgen::
- pwgen -s 64 1
+ pwgen -s 64 1
5. Ensure youe firewall allows traffic into the TURN server on
the ports you've configured it to listen on (remember to allow
@@ -55,7 +55,7 @@ You may be able to setup coturn via your package manager, or set it up manually
import your private key and certificate.
7. Start the turn server::
- bin/turnserver -o
+ bin/turnserver -o
synapse Setup
@@ -92,4 +92,3 @@ Now, restart synapse::
./synctl restart
...and your Home Server now supports VoIP relaying!
-
From 1ea358b28b46edffdf62a52e8a2b3faf8b2aae1d Mon Sep 17 00:00:00 2001
From: Rick Cogley
Date: Tue, 28 Jun 2016 18:27:54 +0900
Subject: [PATCH 180/414] Update turn-howto.rst to use git clone
svn checkout is not logical for a checkout from github, so changed the checkout to "git clone".
thanks @dbkr
Signed-off-by: Rick Cogley
---
docs/turn-howto.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index f0c5601ea..afddebd53 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -19,7 +19,7 @@ coturn Setup
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
1. Check out coturn::
- svn checkout https://github.com/coturn/coturn.git coturn
+ git clone https://github.com/coturn/coturn.git coturn
cd coturn
2. Configure it::
From 56ec5869c98c97869f908c0309d2f9c4b648eda2 Mon Sep 17 00:00:00 2001
From: Rick Cogley
Date: Tue, 28 Jun 2016 18:34:38 +0900
Subject: [PATCH 181/414] Update turn-howto.rst to use git clone (2)
Not logical to use svn checkout against a github repo, so changed to git clone.
Signed-off-by: Rick Cogley
---
docs/turn-howto.rst | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/docs/turn-howto.rst b/docs/turn-howto.rst
index afddebd53..04c010071 100644
--- a/docs/turn-howto.rst
+++ b/docs/turn-howto.rst
@@ -19,18 +19,21 @@ coturn Setup
You may be able to setup coturn via your package manager, or set it up manually using the usual ``configure, make, make install`` process.
1. Check out coturn::
+
git clone https://github.com/coturn/coturn.git coturn
cd coturn
2. Configure it::
+
./configure
- You may need to install libevent2: if so, you should do so
+ You may need to install ``libevent2``: if so, you should do so
in the way recommended by your operating system.
You can ignore warnings about lack of database support: a
database is unnecessary for this purpose.
3. Build and install it::
+
make
make install
@@ -55,6 +58,7 @@ You may be able to setup coturn via your package manager, or set it up manually
import your private key and certificate.
7. Start the turn server::
+
bin/turnserver -o
From 314b146b2e3082fc6bc61296f5c2ea5d7735f01e Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 Jun 2016 11:41:20 +0100
Subject: [PATCH 182/414] Track approximate last access time for remote media
---
synapse/rest/media/v1/media_repository.py | 24 ++++++++++++++
synapse/storage/media_repository.py | 15 +++++++++
synapse/storage/prepare_database.py | 2 +-
.../schema/delta/33/remote_media_ts.py | 31 +++++++++++++++++++
4 files changed, 71 insertions(+), 1 deletion(-)
create mode 100644 synapse/storage/schema/delta/33/remote_media_ts.py
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 2468c3ac4..1a287b6fe 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -43,6 +43,9 @@ import urlparse
logger = logging.getLogger(__name__)
+UPDATE_RECENTLY_ACCESSED_REMOTES_TS = 60 * 1000
+
+
class MediaRepository(object):
def __init__(self, hs, filepaths):
self.auth = hs.get_auth()
@@ -57,6 +60,22 @@ class MediaRepository(object):
self.dynamic_thumbnails = hs.config.dynamic_thumbnails
self.thumbnail_requirements = hs.config.thumbnail_requirements
+ self.recently_accessed_remotes = set()
+
+ self.clock.looping_call(
+ self._update_recently_accessed_remotes,
+ UPDATE_RECENTLY_ACCESSED_REMOTES_TS
+ )
+
+ @defer.inlineCallbacks
+ def _update_recently_accessed_remotes(self):
+ media = self.recently_accessed_remotes
+ self.recently_accessed_remotes = set()
+
+ yield self.store.update_cached_last_access_time(
+ media, self.clock.time_msec()
+ )
+
@staticmethod
def _makedirs(filepath):
dirname = os.path.dirname(filepath)
@@ -119,6 +138,11 @@ class MediaRepository(object):
media_info = yield self._download_remote_file(
server_name, media_id
)
+ else:
+ self.recently_accessed_remotes.add((server_name, media_id))
+ yield self.store.update_cached_last_access_time(
+ [(server_name, media_id)], self.clock.time_msec()
+ )
defer.returnValue(media_info)
@defer.inlineCallbacks
diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py
index a820fcf07..44e4d3830 100644
--- a/synapse/storage/media_repository.py
+++ b/synapse/storage/media_repository.py
@@ -157,10 +157,25 @@ class MediaRepositoryStore(SQLBaseStore):
"created_ts": time_now_ms,
"upload_name": upload_name,
"filesystem_id": filesystem_id,
+ "last_access_ts": time_now_ms,
},
desc="store_cached_remote_media",
)
+ def update_cached_last_access_time(self, origin_id_tuples, time_ts):
+ def update_cache_txn(txn):
+ sql = (
+ "UPDATE remote_media_cache SET last_access_ts = ?"
+ " WHERE media_origin = ? AND media_id = ?"
+ )
+
+ txn.executemany(sql, (
+ (time_ts, media_origin, media_id)
+ for media_origin, media_id in origin_id_tuples
+ ))
+
+ return self.runInteraction("update_cached_last_access_time", update_cache_txn)
+
def get_remote_media_thumbnails(self, origin, media_id):
return self._simple_select_list(
"remote_media_cache_thumbnails",
diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py
index c8487c883..8801669a6 100644
--- a/synapse/storage/prepare_database.py
+++ b/synapse/storage/prepare_database.py
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
# Remember to update this number every time a change is made to database
# schema files, so the users will be informed on server restarts.
-SCHEMA_VERSION = 32
+SCHEMA_VERSION = 33
dir_path = os.path.abspath(os.path.dirname(__file__))
diff --git a/synapse/storage/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/delta/33/remote_media_ts.py
new file mode 100644
index 000000000..55ae43f39
--- /dev/null
+++ b/synapse/storage/schema/delta/33/remote_media_ts.py
@@ -0,0 +1,31 @@
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+
+
+ALTER_TABLE = "ALTER TABLE remote_media_cache ADD COLUMN last_access_ts BIGINT"
+
+
+def run_create(cur, database_engine, *args, **kwargs):
+ cur.execute(ALTER_TABLE)
+
+
+def run_upgrade(cur, database_engine, *args, **kwargs):
+ cur.execute(
+ database_engine.convert_param_style(
+ "UPDATE remote_media_cache SET last_access_ts = ?"
+ ),
+ (int(time.time() * 1000),)
+ )
From a70688445dd7a9fa41a55a642fb9a394f291ae45 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 Jun 2016 14:57:59 +0100
Subject: [PATCH 183/414] Implement purge_media_cache admin API
---
synapse/rest/client/v1/admin.py | 32 ++++++++++
synapse/rest/media/v1/filepath.py | 6 ++
synapse/rest/media/v1/media_repository.py | 78 +++++++++++++++++------
synapse/server.py | 5 ++
synapse/storage/media_repository.py | 29 +++++++++
5 files changed, 130 insertions(+), 20 deletions(-)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index aa05b3f02..8ec8569a4 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -46,5 +46,37 @@ class WhoisRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
+class PurgeMediaCacheRestServlet(ClientV1RestServlet):
+ PATTERNS = client_path_patterns("/admin/purge_media_cache")
+
+ def __init__(self, hs):
+ self.media_repository = hs.get_media_repository()
+ super(PurgeMediaCacheRestServlet, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ requester = yield self.auth.get_user_by_req(request)
+ is_admin = yield self.auth.is_server_admin(requester.user)
+
+ if not is_admin:
+ raise AuthError(403, "You are not a server admin")
+
+ before_ts = request.args.get("before_ts", None)
+ if not before_ts:
+ raise SynapseError(400, "Missing 'before_ts' arg")
+
+ logger.info("before_ts: %r", before_ts[0])
+
+ try:
+ before_ts = int(before_ts[0])
+ except Exception:
+ raise SynapseError(400, "Invalid 'before_ts' arg")
+
+ ret = yield self.media_repository.delete_old_remote_media(before_ts)
+
+ defer.returnValue((200, ret))
+
+
def register_servlets(hs, http_server):
WhoisRestServlet(hs).register(http_server)
+ PurgeMediaCacheRestServlet(hs).register(http_server)
diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py
index 422ab86fb..0137458f7 100644
--- a/synapse/rest/media/v1/filepath.py
+++ b/synapse/rest/media/v1/filepath.py
@@ -65,3 +65,9 @@ class MediaFilePaths(object):
file_id[0:2], file_id[2:4], file_id[4:],
file_name
)
+
+ def remote_media_thumbnail_dir(self, server_name, file_id):
+ return os.path.join(
+ self.base_path, "remote_thumbnail", server_name,
+ file_id[0:2], file_id[2:4], file_id[4:],
+ )
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 1a287b6fe..844628c12 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -30,11 +30,13 @@ from synapse.api.errors import SynapseError
from twisted.internet import defer, threads
-from synapse.util.async import ObservableDeferred
+from synapse.util.async import Linearizer
from synapse.util.stringutils import is_ascii
from synapse.util.logcontext import preserve_context_over_fn
import os
+import errno
+import shutil
import cgi
import logging
@@ -47,7 +49,7 @@ UPDATE_RECENTLY_ACCESSED_REMOTES_TS = 60 * 1000
class MediaRepository(object):
- def __init__(self, hs, filepaths):
+ def __init__(self, hs):
self.auth = hs.get_auth()
self.client = MatrixFederationHttpClient(hs)
self.clock = hs.get_clock()
@@ -55,11 +57,12 @@ class MediaRepository(object):
self.store = hs.get_datastore()
self.max_upload_size = hs.config.max_upload_size
self.max_image_pixels = hs.config.max_image_pixels
- self.filepaths = filepaths
- self.downloads = {}
+ self.filepaths = MediaFilePaths(hs.config.media_store_path)
self.dynamic_thumbnails = hs.config.dynamic_thumbnails
self.thumbnail_requirements = hs.config.thumbnail_requirements
+ self.remote_media_linearizer = Linearizer()
+
self.recently_accessed_remotes = set()
self.clock.looping_call(
@@ -112,22 +115,12 @@ class MediaRepository(object):
defer.returnValue("mxc://%s/%s" % (self.server_name, media_id))
+ @defer.inlineCallbacks
def get_remote_media(self, server_name, media_id):
key = (server_name, media_id)
- download = self.downloads.get(key)
- if download is None:
- download = self._get_remote_media_impl(server_name, media_id)
- download = ObservableDeferred(
- download,
- consumeErrors=True
- )
- self.downloads[key] = download
-
- @download.addBoth
- def callback(media_info):
- del self.downloads[key]
- return media_info
- return download.observe()
+ with (yield self.remote_media_linearizer.queue(key)):
+ media_info = yield self._get_remote_media_impl(server_name, media_id)
+ defer.returnValue(media_info)
@defer.inlineCallbacks
def _get_remote_media_impl(self, server_name, media_id):
@@ -440,6 +433,52 @@ class MediaRepository(object):
"height": m_height,
})
+ @defer.inlineCallbacks
+ def delete_old_remote_media(self, before_ts):
+ old_media = yield self.store.get_remote_media_before(before_ts)
+
+ deleted = 0
+
+ for media in old_media:
+ origin = media["media_origin"]
+ media_id = media["media_id"]
+ file_id = media["filesystem_id"]
+ key = (origin, media_id)
+
+ logger.info("Deleting: %r", key)
+
+ with (yield self.remote_media_linearizer.queue(key)):
+ full_path = self.filepaths.remote_media_filepath(origin, file_id)
+ full_dir = os.path.dirname(full_path)
+ try:
+ os.remove(full_path)
+ except OSError as e:
+ logger.warn("Failed to remove file: %r", full_path)
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ continue
+
+ try:
+ os.removedirs(full_dir)
+ except OSError:
+ pass
+
+ thumbnail_dir = self.filepaths.remote_media_thumbnail_dir(
+ origin, file_id
+ )
+ shutil.rmtree(thumbnail_dir, ignore_errors=True)
+
+ yield self.store.delete_remote_media(origin, media_id)
+ try:
+ os.removedirs(thumbnail_dir)
+ except OSError:
+ pass
+
+ deleted += 1
+
+ defer.returnValue({"deleted": deleted})
+
class MediaRepositoryResource(Resource):
"""File uploading and downloading.
@@ -488,9 +527,8 @@ class MediaRepositoryResource(Resource):
def __init__(self, hs):
Resource.__init__(self)
- filepaths = MediaFilePaths(hs.config.media_store_path)
- media_repo = MediaRepository(hs, filepaths)
+ media_repo = hs.get_media_repository()
self.putChild("upload", UploadResource(hs, media_repo))
self.putChild("download", DownloadResource(hs, media_repo))
diff --git a/synapse/server.py b/synapse/server.py
index dd4b81c65..d49a1a8a9 100644
--- a/synapse/server.py
+++ b/synapse/server.py
@@ -45,6 +45,7 @@ from synapse.crypto.keyring import Keyring
from synapse.push.pusherpool import PusherPool
from synapse.events.builder import EventBuilderFactory
from synapse.api.filtering import Filtering
+from synapse.rest.media.v1.media_repository import MediaRepository
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
@@ -113,6 +114,7 @@ class HomeServer(object):
'filtering',
'http_client_context_factory',
'simple_http_client',
+ 'media_repository',
]
def __init__(self, hostname, **kwargs):
@@ -233,6 +235,9 @@ class HomeServer(object):
**self.db_config.get("args", {})
)
+ def build_media_repository(self):
+ return MediaRepository(self)
+
def remove_pusher(self, app_id, push_key, user_id):
return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
diff --git a/synapse/storage/media_repository.py b/synapse/storage/media_repository.py
index 44e4d3830..4c0f82353 100644
--- a/synapse/storage/media_repository.py
+++ b/synapse/storage/media_repository.py
@@ -205,3 +205,32 @@ class MediaRepositoryStore(SQLBaseStore):
},
desc="store_remote_media_thumbnail",
)
+
+ def get_remote_media_before(self, before_ts):
+ sql = (
+ "SELECT media_origin, media_id, filesystem_id"
+ " FROM remote_media_cache"
+ " WHERE last_access_ts < ?"
+ )
+
+ return self._execute(
+ "get_remote_media_before", self.cursor_to_dict, sql, before_ts
+ )
+
+ def delete_remote_media(self, media_origin, media_id):
+ def delete_remote_media_txn(txn):
+ self._simple_delete_txn(
+ txn,
+ "remote_media_cache",
+ keyvalues={
+ "media_origin": media_origin, "media_id": media_id
+ },
+ )
+ self._simple_delete_txn(
+ txn,
+ "remote_media_cache_thumbnails",
+ keyvalues={
+ "media_origin": media_origin, "media_id": media_id
+ },
+ )
+ return self.runInteraction("delete_remote_media", delete_remote_media_txn)
From f52cb4cd7893ebf4ec3c793c215b3b5eb8efc232 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 29 Jun 2016 15:24:50 +0100
Subject: [PATCH 184/414] Remove race
---
synapse/rest/media/v1/media_repository.py | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py
index 844628c12..692e07841 100644
--- a/synapse/rest/media/v1/media_repository.py
+++ b/synapse/rest/media/v1/media_repository.py
@@ -449,7 +449,6 @@ class MediaRepository(object):
with (yield self.remote_media_linearizer.queue(key)):
full_path = self.filepaths.remote_media_filepath(origin, file_id)
- full_dir = os.path.dirname(full_path)
try:
os.remove(full_path)
except OSError as e:
@@ -459,22 +458,12 @@ class MediaRepository(object):
else:
continue
- try:
- os.removedirs(full_dir)
- except OSError:
- pass
-
thumbnail_dir = self.filepaths.remote_media_thumbnail_dir(
origin, file_id
)
shutil.rmtree(thumbnail_dir, ignore_errors=True)
yield self.store.delete_remote_media(origin, media_id)
- try:
- os.removedirs(thumbnail_dir)
- except OSError:
- pass
-
deleted += 1
defer.returnValue({"deleted": deleted})
From f328d95cef99763d056171846253ed68cab58214 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 30 Jun 2016 15:40:58 +0100
Subject: [PATCH 185/414] Feature: Add deactivate account admin API
Allows server admins to "deactivate" accounts, which:
- Revokes all access tokens
- Removes all threepids
- Removes password
The API is a POST to `/admin/deactivate/`
---
synapse/rest/client/v1/admin.py | 26 ++++++++++++++++++++++++++
synapse/storage/_base.py | 5 +++++
synapse/storage/registration.py | 9 +++++++++
3 files changed, 40 insertions(+)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index 8ec8569a4..e54c472e0 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -77,6 +77,32 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
+class DeactivateAccountRestServlet(ClientV1RestServlet):
+ PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)")
+
+ def __init__(self, hs):
+ self.store = hs.get_datastore()
+ super(DeactivateAccountRestServlet, self).__init__(hs)
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, target_user_id):
+ UserID.from_string(target_user_id)
+ requester = yield self.auth.get_user_by_req(request)
+ is_admin = yield self.auth.is_server_admin(requester.user)
+
+ if not is_admin:
+ raise AuthError(403, "You are not a server admin")
+
+ # FIXME: Theoretically there is a race here wherein user resets password
+ # using threepid.
+ yield self.store.user_delete_access_tokens(target_user_id)
+ yield self.store.user_delete_threepids(target_user_id)
+ yield self.store.user_set_password_hash(target_user_id, None)
+
+ defer.returnValue((200, {}))
+
+
def register_servlets(hs, http_server):
WhoisRestServlet(hs).register(http_server)
PurgeMediaCacheRestServlet(hs).register(http_server)
+ DeactivateAccountRestServlet(hs).register(http_server)
diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py
index 32c6677d4..d766a3029 100644
--- a/synapse/storage/_base.py
+++ b/synapse/storage/_base.py
@@ -807,6 +807,11 @@ class SQLBaseStore(object):
if txn.rowcount > 1:
raise StoreError(500, "more than one row matched")
+ def _simple_delete(self, table, keyvalues, desc):
+ return self.runInteraction(
+ desc, self._simple_delete_txn, table, keyvalues
+ )
+
@staticmethod
def _simple_delete_txn(txn, table, keyvalues):
sql = "DELETE FROM %s WHERE %s" % (
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 3de9e0f70..5c75dbab5 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -384,6 +384,15 @@ class RegistrationStore(SQLBaseStore):
defer.returnValue(ret['user_id'])
defer.returnValue(None)
+ def user_delete_threepids(self, user_id):
+ return self._simple_delete(
+ "user_threepids",
+ keyvalues={
+ "user_id": user_id,
+ },
+ desc="user_delete_threepids",
+ )
+
@defer.inlineCallbacks
def count_all_users(self):
"""Counts all users registered on the homeserver."""
From be8be535f73e51a29cfa30f1eac266a7a08b695b Mon Sep 17 00:00:00 2001
From: David Baker
Date: Thu, 30 Jun 2016 17:51:28 +0100
Subject: [PATCH 186/414] requestToken update
Don't send requestToken request to untrusted ID servers
Also correct the THREEPID_IN_USE error to add the M_ prefix. This is a backwards incomaptible change, but the only thing using this is the angular client which is now unmaintained, so it's probably better to just do this now.
---
synapse/api/errors.py | 3 ++-
synapse/handlers/identity.py | 41 ++++++++++++++++++++++++------------
2 files changed, 29 insertions(+), 15 deletions(-)
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index b106fbed6..b219b46a4 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -42,8 +42,9 @@ class Codes(object):
TOO_LARGE = "M_TOO_LARGE"
EXCLUSIVE = "M_EXCLUSIVE"
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
- THREEPID_IN_USE = "THREEPID_IN_USE"
+ THREEPID_IN_USE = "M_THREEPID_IN_USE"
INVALID_USERNAME = "M_INVALID_USERNAME"
+ SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
class CodeMessageException(RuntimeError):
diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py
index 656ce124f..559e5d5a7 100644
--- a/synapse/handlers/identity.py
+++ b/synapse/handlers/identity.py
@@ -21,7 +21,7 @@ from synapse.api.errors import (
)
from ._base import BaseHandler
from synapse.util.async import run_on_reactor
-from synapse.api.errors import SynapseError
+from synapse.api.errors import SynapseError, Codes
import json
import logging
@@ -41,6 +41,20 @@ class IdentityHandler(BaseHandler):
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
+ def _should_trust_id_server(self, id_server):
+ if id_server not in self.trusted_id_servers:
+ if self.trust_any_id_server_just_for_testing_do_not_use:
+ logger.warn(
+ "Trusting untrustworthy ID server %r even though it isn't"
+ " in the trusted id list for testing because"
+ " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
+ " is set in the config",
+ id_server,
+ )
+ else:
+ return False
+ return True
+
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
@@ -59,19 +73,12 @@ class IdentityHandler(BaseHandler):
else:
raise SynapseError(400, "No client_secret in creds")
- if id_server not in self.trusted_id_servers:
- if self.trust_any_id_server_just_for_testing_do_not_use:
- logger.warn(
- "Trusting untrustworthy ID server %r even though it isn't"
- " in the trusted id list for testing because"
- " 'use_insecure_ssl_client_just_for_testing_do_not_use'"
- " is set in the config",
- id_server,
- )
- else:
- logger.warn('%s is not a trusted ID server: rejecting 3pid ' +
- 'credentials', id_server)
- defer.returnValue(None)
+ if not self._should_trust_id_server(id_server):
+ logger.warn(
+ '%s is not a trusted ID server: rejecting 3pid ' +
+ 'credentials', id_server
+ )
+ defer.returnValue(None)
data = {}
try:
@@ -129,6 +136,12 @@ class IdentityHandler(BaseHandler):
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
yield run_on_reactor()
+ if not self._should_trust_id_server(id_server):
+ raise SynapseError(
+ 400, "Untrusted ID server '%s'" % id_server,
+ Codes.SERVER_NOT_TRUSTED
+ )
+
params = {
'email': email,
'client_secret': client_secret,
From 5a6ef20ef625f1ac2cfb4011ce75ca9453b6a70e Mon Sep 17 00:00:00 2001
From: Richard van der Hoff
Date: Fri, 1 Jul 2016 09:08:35 +0100
Subject: [PATCH 187/414] code_style.rst: add link to google style
---
docs/code_style.rst | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/docs/code_style.rst b/docs/code_style.rst
index dc40a7ab7..8cde76149 100644
--- a/docs/code_style.rst
+++ b/docs/code_style.rst
@@ -43,7 +43,8 @@ Basically, PEP8
together, or want to deliberately extend or preserve vertical/horizontal
space)
-Comments should follow the google code style. This is so that we can generate
-documentation with sphinx (http://sphinxcontrib-napoleon.readthedocs.org/en/latest/)
+Comments should follow the `google code style `_.
+This is so that we can generate documentation with
+`sphinx `_.
Code should pass pep8 --max-line-length=100 without any warnings.
From 41f072fd0ee62e1df37ad8bb98489395a32ca6d3 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff
Date: Fri, 1 Jul 2016 09:09:40 +0100
Subject: [PATCH 188/414] code_style.rst: *fix* link to google style
---
docs/code_style.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/code_style.rst b/docs/code_style.rst
index 8cde76149..39710ab4a 100644
--- a/docs/code_style.rst
+++ b/docs/code_style.rst
@@ -43,7 +43,7 @@ Basically, PEP8
together, or want to deliberately extend or preserve vertical/horizontal
space)
-Comments should follow the `google code style `_.
+Comments should follow the `google code style `_.
This is so that we can generate documentation with
`sphinx `_.
From 1238203bc47166d1d4ca686e108e84add3bf98b4 Mon Sep 17 00:00:00 2001
From: Richard van der Hoff
Date: Fri, 1 Jul 2016 09:36:51 +0100
Subject: [PATCH 189/414] code_style.rst: add link to sphinx examples
---
docs/code_style.rst | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/docs/code_style.rst b/docs/code_style.rst
index 39710ab4a..8d73d17be 100644
--- a/docs/code_style.rst
+++ b/docs/code_style.rst
@@ -45,6 +45,8 @@ Basically, PEP8
Comments should follow the `google code style `_.
This is so that we can generate documentation with
-`sphinx `_.
+`sphinx `_. See the
+`examples `_
+in the sphinx documentation.
Code should pass pep8 --max-line-length=100 without any warnings.
From fc8007dbec40212ae85285aea600111ce2d06912 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Sun, 3 Jul 2016 15:08:15 +0900
Subject: [PATCH 190/414] Optionally include password hash in createUser
endpoint
Signed-off-by: Kent Shikama
---
synapse/handlers/register.py | 4 ++--
synapse/rest/client/v1/register.py | 4 +++-
2 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 0b7517221..e255f2da8 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -358,7 +358,7 @@ class RegistrationHandler(BaseHandler):
defer.returnValue(data)
@defer.inlineCallbacks
- def get_or_create_user(self, localpart, displayname, duration_seconds):
+ def get_or_create_user(self, localpart, displayname, duration_seconds, password_hash=None):
"""Creates a new user if the user does not exist,
else revokes all previous access tokens and generates a new one.
@@ -394,7 +394,7 @@ class RegistrationHandler(BaseHandler):
yield self.store.register(
user_id=user_id,
token=token,
- password_hash=None,
+ password_hash=password_hash,
create_profile_with_localpart=user.localpart,
)
else:
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index e3f4fbb0b..ef56d1e90 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -410,12 +410,14 @@ class CreateUserRestServlet(ClientV1RestServlet):
raise SynapseError(400, "Failed to parse 'duration_seconds'")
if duration_seconds > self.direct_user_creation_max_duration:
duration_seconds = self.direct_user_creation_max_duration
+ password_hash = user_json["password_hash"].encode("utf-8") if user_json["password_hash"] else None
handler = self.handlers.registration_handler
user_id, token = yield handler.get_or_create_user(
localpart=localpart,
displayname=displayname,
- duration_seconds=duration_seconds
+ duration_seconds=duration_seconds,
+ password_hash=password_hash
)
defer.returnValue({
From 2e5a31f1973b49ec1a89cfc042e00b51ba7e70fc Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Mon, 4 Jul 2016 22:00:13 +0900
Subject: [PATCH 191/414] Use .get() instead of [] to access password_hash
---
synapse/rest/client/v1/register.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index ef56d1e90..a923d5a19 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -410,7 +410,7 @@ class CreateUserRestServlet(ClientV1RestServlet):
raise SynapseError(400, "Failed to parse 'duration_seconds'")
if duration_seconds > self.direct_user_creation_max_duration:
duration_seconds = self.direct_user_creation_max_duration
- password_hash = user_json["password_hash"].encode("utf-8") if user_json["password_hash"] else None
+ password_hash = user_json["password_hash"].encode("utf-8") if user_json.get("password_hash") else None
handler = self.handlers.registration_handler
user_id, token = yield handler.get_or_create_user(
From bb069079bbd0ce761403416ed4f77051352ed347 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Mon, 4 Jul 2016 22:07:11 +0900
Subject: [PATCH 192/414] Fix style violations
Signed-off-by: Kent Shikama
---
synapse/handlers/register.py | 3 ++-
synapse/rest/client/v1/register.py | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index e255f2da8..88c82ba7d 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -358,7 +358,8 @@ class RegistrationHandler(BaseHandler):
defer.returnValue(data)
@defer.inlineCallbacks
- def get_or_create_user(self, localpart, displayname, duration_seconds, password_hash=None):
+ def get_or_create_user(self, localpart, displayname, duration_seconds,
+ password_hash=None):
"""Creates a new user if the user does not exist,
else revokes all previous access tokens and generates a new one.
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index a923d5a19..d791d5e07 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -410,7 +410,8 @@ class CreateUserRestServlet(ClientV1RestServlet):
raise SynapseError(400, "Failed to parse 'duration_seconds'")
if duration_seconds > self.direct_user_creation_max_duration:
duration_seconds = self.direct_user_creation_max_duration
- password_hash = user_json["password_hash"].encode("utf-8") if user_json.get("password_hash") else None
+ password_hash = user_json["password_hash"].encode("utf-8") \
+ if user_json.get("password_hash") else None
handler = self.handlers.registration_handler
user_id, token = yield handler.get_or_create_user(
From f18d7546c63ae30c4058d1ec6ab2d5c3b001d257 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Mon, 4 Jul 2016 15:48:25 +0100
Subject: [PATCH 193/414] Use a query that postgresql optimises better for
get_events_around
---
synapse/storage/stream.py | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index b9ad965fd..4dd11284e 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -591,25 +591,28 @@ class StreamStore(SQLBaseStore):
query_before = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND (topological_ordering < ?"
- " OR (topological_ordering = ? AND stream_ordering < ?))"
- " ORDER BY topological_ordering DESC, stream_ordering DESC"
- " LIMIT ?"
+ " WHERE room_id = ? AND topological_ordering < ?"
+ " UNION ALL "
+ " SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
)
query_after = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND (topological_ordering > ?"
- " OR (topological_ordering = ? AND stream_ordering > ?))"
- " ORDER BY topological_ordering ASC, stream_ordering ASC"
- " LIMIT ?"
+ " WHERE room_id = ? AND topological_ordering > ?"
+ " UNION ALL"
+ " SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?"
+ " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
)
txn.execute(
query_before,
(
- room_id, topological_ordering, topological_ordering,
- stream_ordering, before_limit,
+ room_id, topological_ordering,
+ room_id, topological_ordering, stream_ordering,
+ before_limit,
)
)
@@ -630,8 +633,9 @@ class StreamStore(SQLBaseStore):
txn.execute(
query_after,
(
- room_id, topological_ordering, topological_ordering,
- stream_ordering, after_limit,
+ room_id, topological_ordering,
+ room_id, topological_ordering, stream_ordering,
+ after_limit,
)
)
From a67bf0b074acfca69647030beb9b775359fe684d Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Mon, 4 Jul 2016 16:02:50 +0100
Subject: [PATCH 194/414] Add storage function to purge history for a room
---
synapse/storage/events.py | 140 ++++++++++++++++++++++++++++++++++++++
1 file changed, 140 insertions(+)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 88a6ff731..98c917ce1 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1281,6 +1281,146 @@ class EventsStore(SQLBaseStore):
)
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
+ def _delete_old_state_txn(self, txn, room_id, topological_ordering):
+ """Deletes old room state
+ """
+
+ # Tables that should be pruned:
+ # event_auth
+ # event_backward_extremities
+ # event_content_hashes
+ # event_destinations
+ # event_edge_hashes
+ # event_edges
+ # event_forward_extremities
+ # event_json
+ # event_push_actions
+ # event_reference_hashes
+ # event_search
+ # event_signatures
+ # event_to_state_groups
+ # events
+ # rejections
+ # room_depth
+ # state_groups
+ # state_groups_state
+
+ # First ensure that we're not about to delete all the forward extremeties
+ txn.execute(
+ "SELECT e.event_id, e.depth FROM events as e "
+ "INNER JOIN event_forward_extremities as f "
+ "ON e.event_id = f.event_id "
+ "AND e.room_id = f.room_id "
+ "WHERE f.room_id = ?",
+ (room_id,)
+ )
+ rows = txn.fetchall()
+ max_depth = max(row[0] for row in rows)
+
+ if max_depth <= topological_ordering:
+ raise Exception("topological_ordering is greater than forward extremeties")
+
+ txn.execute(
+ "SELECT event_id, state_key FROM events"
+ " LEFT JOIN state_events USING (room_id, event_id)"
+ " WHERE room_id = ? AND topological_ordering < ?",
+ (room_id, topological_ordering,)
+ )
+ event_rows = txn.fetchall()
+
+ # We calculate the new entries for the backward extremeties by finding
+ # all events that point to events that are to be purged
+ txn.execute(
+ "SELECT e.event_id FROM events as e"
+ " INNER JOIN event_edges as ed ON e.event_id = ed.prev_event_id"
+ " INNER JOIN events as e2 ON e2.event_id = ed.event_id"
+ " WHERE e.room_id = ? AND e.topological_ordering < ?"
+ " AND e2.topological_ordering >= ?",
+ (room_id, topological_ordering, topological_ordering)
+ )
+ new_backwards_extrems = txn.fetchall()
+
+ # Get all state groups that are only referenced by events that are
+ # to be deleted.
+ txn.execute(
+ "SELECT state_group FROM event_to_state_groups"
+ " INNER JOIN events USING (event_id)"
+ " WHERE state_group IN ("
+ " SELECT DISTINCT state_group FROM events"
+ " INNER JOIN event_to_state_groups USING (event_id)"
+ " WHERE room_id = ? AND topological_ordering < ?"
+ " )"
+ " GROUP BY state_group HAVING MAX(topological_ordering) < ?",
+ (room_id, topological_ordering, topological_ordering)
+ )
+ state_rows = txn.fetchall()
+ txn.executemany(
+ "DELETE FROM state_groups_state WHERE state_group = ?",
+ state_rows
+ )
+ txn.executemany(
+ "DELETE FROM state_groups WHERE id = ?",
+ state_rows
+ )
+ # Delete all non-state
+ txn.executemany(
+ "DELETE FROM event_to_state_groups WHERE event_id = ?",
+ [(event_id,) for event_id, _ in event_rows]
+ )
+
+ txn.execute(
+ "UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
+ (topological_ordering, room_id,)
+ )
+
+ # Delete all remote non-state events
+ to_delete = [
+ (event_id,) for event_id, state_key in event_rows
+ if state_key is None and not self.hs.is_mine_id(event_id)
+ ]
+ to_not_delete = [
+ (event_id,) for event_id, state_key in event_rows
+ if state_key is not None or self.hs.is_mine_id(event_id)
+ ]
+ for table in (
+ "events",
+ "event_json",
+ "event_auth",
+ "event_content_hashes",
+ "event_destinations",
+ "event_edge_hashes",
+ "event_edges",
+ "event_forward_extremities",
+ "event_push_actions",
+ "event_reference_hashes",
+ "event_search",
+ "event_signatures",
+ "rejections",
+ "event_backward_extremities",
+ ):
+ txn.executemany(
+ "DELETE FROM %s WHERE event_id = ?" % (table,),
+ to_delete
+ )
+
+ # Update backward extremeties
+ txn.executemany(
+ "INSERT INTO event_backward_extremities (room_id, event_id)"
+ " VALUES (?, ?)",
+ [(room_id, event_id) for event_id, in new_backwards_extrems]
+ )
+
+ txn.executemany(
+ "DELETE FROM events WHERE event_id = ?",
+ to_delete
+ )
+ # Mark all state and own events as outliers
+ txn.executemany(
+ "UPDATE events SET outlier = ?"
+ " WHERE event_id = ?",
+ to_not_delete
+ )
+
AllNewEventsResult = namedtuple("AllNewEventsResult", [
"new_forward_events", "new_backfill_events",
From 8bdaf5f7afaee98a8cf25d2fb170fe4b2aa97f3d Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Tue, 5 Jul 2016 02:13:52 +0900
Subject: [PATCH 195/414] Add pepper to password hashing
Signed-off-by: Kent Shikama
---
synapse/config/password.py | 6 +++++-
synapse/handlers/auth.py | 5 +++--
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index dec801ef4..ea822f2bb 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -23,10 +23,14 @@ class PasswordConfig(Config):
def read_config(self, config):
password_config = config.get("password_config", {})
self.password_enabled = password_config.get("enabled", True)
+ self.pepper = password_config.get("pepper", "")
def default_config(self, config_dir_path, server_name, **kwargs):
return """
# Enable password for login.
password_config:
enabled: true
- """
+ # Uncomment for extra security for your passwords.
+ # DO NOT CHANGE THIS AFTER INITIAL SETUP!
+ #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9"
+ """
\ No newline at end of file
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index 968095c14..fd5fadf73 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -750,7 +750,7 @@ class AuthHandler(BaseHandler):
Returns:
Hashed password (str).
"""
- return bcrypt.hashpw(password, bcrypt.gensalt(self.bcrypt_rounds))
+ return bcrypt.hashpw(password + self.hs.config.password_config.pepper, bcrypt.gensalt(self.bcrypt_rounds))
def validate_hash(self, password, stored_hash):
"""Validates that self.hash(password) == stored_hash.
@@ -763,6 +763,7 @@ class AuthHandler(BaseHandler):
Whether self.hash(password) == stored_hash (bool).
"""
if stored_hash:
- return bcrypt.hashpw(password, stored_hash.encode('utf-8')) == stored_hash
+ return bcrypt.hashpw(password + self.hs.config.password_config.pepper,
+ stored_hash.encode('utf-8')) == stored_hash
else:
return False
From 0fb76c71ac4bdd00e7524cf11668c13754d29a08 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Mon, 4 Jul 2016 19:44:55 +0100
Subject: [PATCH 196/414] Use different SQL for postgres and sqlite3 for when
using multicolumn indexes
---
synapse/storage/event_push_actions.py | 18 ++---
synapse/storage/stream.py | 100 +++++++++++++-------------
2 files changed, 59 insertions(+), 59 deletions(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index 5f1b6f63a..e3e2e8083 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -16,6 +16,8 @@
from ._base import SQLBaseStore
from twisted.internet import defer
from synapse.util.caches.descriptors import cachedInlineCallbacks
+from synapse.types import RoomStreamToken
+from .stream import lower_bound
import logging
import ujson as json
@@ -73,6 +75,9 @@ class EventPushActionsStore(SQLBaseStore):
stream_ordering = results[0][0]
topological_ordering = results[0][1]
+ token = RoomStreamToken(
+ topological_ordering, stream_ordering
+ )
sql = (
"SELECT sum(notif), sum(highlight)"
@@ -80,15 +85,10 @@ class EventPushActionsStore(SQLBaseStore):
" WHERE"
" user_id = ?"
" AND room_id = ?"
- " AND ("
- " topological_ordering > ?"
- " OR (topological_ordering = ? AND stream_ordering > ?)"
- ")"
- )
- txn.execute(sql, (
- user_id, room_id,
- topological_ordering, topological_ordering, stream_ordering
- ))
+ " AND %s"
+ ) % (lower_bound(token, self.database_engine, inclusive=""),)
+
+ txn.execute(sql, (user_id, room_id))
row = txn.fetchone()
if row:
return {
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 4dd11284e..23b3a40aa 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -40,6 +40,7 @@ from synapse.util.caches.descriptors import cached
from synapse.api.constants import EventTypes
from synapse.types import RoomStreamToken
from synapse.util.logcontext import preserve_fn
+from synapse.storage.engines import PostgresEngine
import logging
@@ -54,25 +55,41 @@ _STREAM_TOKEN = "stream"
_TOPOLOGICAL_TOKEN = "topological"
-def lower_bound(token):
+def lower_bound(token, engine, inclusive=""):
if token.topological is None:
- return "(%d < %s)" % (token.stream, "stream_ordering")
+ return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering")
else:
- return "(%d < %s OR (%d = %s AND %d < %s))" % (
+ if isinstance(engine, PostgresEngine):
+ # Postgres doesn't optimise ``(x < a) OR (x=a AND y= %s)" % (token.stream, "stream_ordering")
+ return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering")
else:
- return "(%d > %s OR (%d = %s AND %d >= %s))" % (
+ if isinstance(engine, PostgresEngine):
+ # Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well
+ # as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we
+ # use the later form when running against postgres.
+ return "((%d,%d) >%s (%s,%s))" % (
+ token.topological, token.stream, inclusive,
+ "topological_ordering", "stream_ordering",
+ )
+ return "(%d > %s OR (%d = %s AND %d >%s %s))" % (
token.topological, "topological_ordering",
token.topological, "topological_ordering",
- token.stream, "stream_ordering",
+ token.stream, inclusive, "stream_ordering",
)
@@ -308,18 +325,22 @@ class StreamStore(SQLBaseStore):
args = [False, room_id]
if direction == 'b':
order = "DESC"
- bounds = upper_bound(RoomStreamToken.parse(from_key))
+ bounds = upper_bound(
+ RoomStreamToken.parse(from_key), self.database_engine
+ )
if to_key:
- bounds = "%s AND %s" % (
- bounds, lower_bound(RoomStreamToken.parse(to_key))
- )
+ bounds = "%s AND %s" % (bounds, lower_bound(
+ RoomStreamToken.parse(to_key), self.database_engine
+ ))
else:
order = "ASC"
- bounds = lower_bound(RoomStreamToken.parse(from_key))
+ bounds = lower_bound(
+ RoomStreamToken.parse(from_key), self.database_engine
+ )
if to_key:
- bounds = "%s AND %s" % (
- bounds, upper_bound(RoomStreamToken.parse(to_key))
- )
+ bounds = "%s AND %s" % (bounds, upper_bound(
+ RoomStreamToken.parse(to_key), self.database_engine
+ ))
if int(limit) > 0:
args.append(int(limit))
@@ -586,35 +607,24 @@ class StreamStore(SQLBaseStore):
retcols=["stream_ordering", "topological_ordering"],
)
- stream_ordering = results["stream_ordering"]
- topological_ordering = results["topological_ordering"]
+ token = RoomStreamToken(
+ results["topological_ordering"],
+ results["stream_ordering"],
+ )
query_before = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND topological_ordering < ?"
- " UNION ALL "
- " SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?"
+ " WHERE room_id = ? AND %s"
" ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
- )
+ ) % (upper_bound(token, self.database_engine, inclusive=""),)
query_after = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND topological_ordering > ?"
- " UNION ALL"
- " SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?"
+ " WHERE room_id = ? AND %s"
" ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
- )
+ ) % (lower_bound(token, self.database_engine, inclusive=""),)
- txn.execute(
- query_before,
- (
- room_id, topological_ordering,
- room_id, topological_ordering, stream_ordering,
- before_limit,
- )
- )
+ txn.execute(query_before, (room_id, before_limit))
rows = self.cursor_to_dict(txn)
events_before = [r["event_id"] for r in rows]
@@ -626,18 +636,11 @@ class StreamStore(SQLBaseStore):
))
else:
start_token = str(RoomStreamToken(
- topological_ordering,
- stream_ordering - 1,
+ token.topological,
+ token.stream - 1,
))
- txn.execute(
- query_after,
- (
- room_id, topological_ordering,
- room_id, topological_ordering, stream_ordering,
- after_limit,
- )
- )
+ txn.execute(query_after, (room_id, after_limit))
rows = self.cursor_to_dict(txn)
events_after = [r["event_id"] for r in rows]
@@ -648,10 +651,7 @@ class StreamStore(SQLBaseStore):
rows[-1]["stream_ordering"],
))
else:
- end_token = str(RoomStreamToken(
- topological_ordering,
- stream_ordering,
- ))
+ end_token = str(token)
return {
"before": {
From 2d21d43c34751cffb5f324bd58ceff060f65f679 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 10:28:51 +0100
Subject: [PATCH 197/414] Add purge_history API
---
synapse/handlers/federation.py | 2 +-
synapse/handlers/message.py | 13 +++++++++++++
synapse/rest/client/v1/admin.py | 18 ++++++++++++++++++
synapse/storage/events.py | 6 ++++++
4 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py
index 6c0bc7eaf..351b21824 100644
--- a/synapse/handlers/federation.py
+++ b/synapse/handlers/federation.py
@@ -1413,7 +1413,7 @@ class FederationHandler(BaseHandler):
local_view = dict(auth_events)
remote_view = dict(auth_events)
remote_view.update({
- (d.type, d.state_key): d for d in different_events
+ (d.type, d.state_key): d for d in different_events if d
})
new_state, prev_state = self.state_handler.resolve_events(
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 15caf1950..878809d50 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -50,6 +50,19 @@ class MessageHandler(BaseHandler):
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
+ @defer.inlineCallbacks
+ def purge_history(self, room_id, event_id):
+ event = yield self.store.get_event(event_id)
+
+ if event.room_id != room_id:
+ raise SynapseError(400, "Event is for wrong room.")
+
+ depth = event.depth
+
+ # TODO: Lock.
+
+ yield self.store.delete_old_state(room_id, depth)
+
@defer.inlineCallbacks
def get_messages(self, requester, room_id=None, pagin_config=None,
as_client_event=True):
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index e54c472e0..71537a7d0 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -77,6 +77,24 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet):
defer.returnValue((200, ret))
+class PurgeHistoryRestServlet(ClientV1RestServlet):
+ PATTERNS = client_path_patterns(
+ "/admin/purge_history/(?P[^/]*)/(?P[^/]*)"
+ )
+
+ @defer.inlineCallbacks
+ def on_POST(self, request, room_id, event_id):
+ requester = yield self.auth.get_user_by_req(request)
+ is_admin = yield self.auth.is_server_admin(requester.user)
+
+ if not is_admin:
+ raise AuthError(403, "You are not a server admin")
+
+ yield self.handlers.message_handler.purge_history(room_id, event_id)
+
+ defer.returnValue((200, {}))
+
+
class DeactivateAccountRestServlet(ClientV1RestServlet):
PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)")
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 98c917ce1..c3b498bb3 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1281,6 +1281,12 @@ class EventsStore(SQLBaseStore):
)
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
+ def delete_old_state(self, room_id, topological_ordering):
+ return self.runInteraction(
+ "delete_old_state",
+ self._delete_old_state_txn, room_id, topological_ordering
+ )
+
def _delete_old_state_txn(self, txn, room_id, topological_ordering):
"""Deletes old room state
"""
From d44d11d864714d4d99953bdae6625973519f120f Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 5 Jul 2016 10:39:13 +0100
Subject: [PATCH 198/414] Use true/false for boolean parameter inclusive to
avoid potential for sqli, and possibly make the code clearer
---
synapse/storage/event_push_actions.py | 2 +-
synapse/storage/stream.py | 10 ++++++----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py
index e3e2e8083..3d93285f8 100644
--- a/synapse/storage/event_push_actions.py
+++ b/synapse/storage/event_push_actions.py
@@ -86,7 +86,7 @@ class EventPushActionsStore(SQLBaseStore):
" user_id = ?"
" AND room_id = ?"
" AND %s"
- ) % (lower_bound(token, self.database_engine, inclusive=""),)
+ ) % (lower_bound(token, self.database_engine, inclusive=False),)
txn.execute(sql, (user_id, room_id))
row = txn.fetchone()
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 23b3a40aa..56304999d 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -55,7 +55,8 @@ _STREAM_TOKEN = "stream"
_TOPOLOGICAL_TOKEN = "topological"
-def lower_bound(token, engine, inclusive=""):
+def lower_bound(token, engine, inclusive=False):
+ inclusive = "=" if inclusive else ""
if token.topological is None:
return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering")
else:
@@ -74,7 +75,8 @@ def lower_bound(token, engine, inclusive=""):
)
-def upper_bound(token, engine, inclusive="="):
+def upper_bound(token, engine, inclusive=True):
+ inclusive = "=" if inclusive else ""
if token.topological is None:
return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering")
else:
@@ -616,13 +618,13 @@ class StreamStore(SQLBaseStore):
"SELECT topological_ordering, stream_ordering, event_id FROM events"
" WHERE room_id = ? AND %s"
" ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
- ) % (upper_bound(token, self.database_engine, inclusive=""),)
+ ) % (upper_bound(token, self.database_engine, inclusive=False),)
query_after = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
" WHERE room_id = ? AND %s"
" ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
- ) % (lower_bound(token, self.database_engine, inclusive=""),)
+ ) % (lower_bound(token, self.database_engine, inclusive=False),)
txn.execute(query_before, (room_id, before_limit))
From 507b8bb0910ef6fae9c7d9cb1405a33c4e4b6e8e Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Tue, 5 Jul 2016 18:42:35 +0900
Subject: [PATCH 199/414] Add comment to prompt changing of pepper
---
synapse/config/password.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index ea822f2bb..7c5cb5f0e 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -31,6 +31,7 @@ class PasswordConfig(Config):
password_config:
enabled: true
# Uncomment for extra security for your passwords.
+ # Change to a secret random string.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9"
"""
\ No newline at end of file
From 1ee258430724618c7014bb176186c23b0b5b06f0 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Tue, 5 Jul 2016 19:01:00 +0900
Subject: [PATCH 200/414] Fix pep8
---
synapse/config/password.py | 2 +-
synapse/handlers/auth.py | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 7c5cb5f0e..058a3a534 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -34,4 +34,4 @@ class PasswordConfig(Config):
# Change to a secret random string.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9"
- """
\ No newline at end of file
+ """
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index fd5fadf73..be46681c6 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -750,7 +750,8 @@ class AuthHandler(BaseHandler):
Returns:
Hashed password (str).
"""
- return bcrypt.hashpw(password + self.hs.config.password_config.pepper, bcrypt.gensalt(self.bcrypt_rounds))
+ return bcrypt.hashpw(password + self.hs.config.password_config.pepper,
+ bcrypt.gensalt(self.bcrypt_rounds))
def validate_hash(self, password, stored_hash):
"""Validates that self.hash(password) == stored_hash.
From 14362bf3590eb95a50201a84c8e16d5626b86249 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Tue, 5 Jul 2016 19:12:53 +0900
Subject: [PATCH 201/414] Fix password config
---
synapse/config/password.py | 2 +-
synapse/handlers/auth.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 058a3a534..00b1ea3df 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -23,7 +23,7 @@ class PasswordConfig(Config):
def read_config(self, config):
password_config = config.get("password_config", {})
self.password_enabled = password_config.get("enabled", True)
- self.pepper = password_config.get("pepper", "")
+ self.password_pepper = password_config.get("pepper", "")
def default_config(self, config_dir_path, server_name, **kwargs):
return """
diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py
index be46681c6..e259213a3 100644
--- a/synapse/handlers/auth.py
+++ b/synapse/handlers/auth.py
@@ -750,7 +750,7 @@ class AuthHandler(BaseHandler):
Returns:
Hashed password (str).
"""
- return bcrypt.hashpw(password + self.hs.config.password_config.pepper,
+ return bcrypt.hashpw(password + self.hs.config.password_pepper,
bcrypt.gensalt(self.bcrypt_rounds))
def validate_hash(self, password, stored_hash):
@@ -764,7 +764,7 @@ class AuthHandler(BaseHandler):
Whether self.hash(password) == stored_hash (bool).
"""
if stored_hash:
- return bcrypt.hashpw(password + self.hs.config.password_config.pepper,
+ return bcrypt.hashpw(password + self.hs.config.password_pepper,
stored_hash.encode('utf-8')) == stored_hash
else:
return False
From 252ee2d979f8814ff5bd0f9acb76b9ba3ce86b52 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Tue, 5 Jul 2016 19:15:51 +0900
Subject: [PATCH 202/414] Remove default password pepper string
---
synapse/config/password.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 00b1ea3df..66f0d93ee 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -30,8 +30,7 @@ class PasswordConfig(Config):
# Enable password for login.
password_config:
enabled: true
- # Uncomment for extra security for your passwords.
# Change to a secret random string.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
- #pepper: "HR32t0xZcQnzn3O0ZkEVuetdFvH1W6TeEPw6JjH0Cl+qflVOseGyFJlJR7ACLnywjN9"
+ #pepper: ""
"""
From b6b0132ac7cac86e8cc5457783311b4db59e5870 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 5 Jul 2016 13:55:18 +0100
Subject: [PATCH 203/414] Make get_events_around more efficient on sqlite3
---
synapse/storage/stream.py | 62 +++++++++++++++++++++++++++++++--------
1 file changed, 49 insertions(+), 13 deletions(-)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index 56304999d..f18fb63c5 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -40,7 +40,7 @@ from synapse.util.caches.descriptors import cached
from synapse.api.constants import EventTypes
from synapse.types import RoomStreamToken
from synapse.util.logcontext import preserve_fn
-from synapse.storage.engines import PostgresEngine
+from synapse.storage.engines import PostgresEngine, Sqlite3Engine
import logging
@@ -614,19 +614,55 @@ class StreamStore(SQLBaseStore):
results["stream_ordering"],
)
- query_before = (
- "SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND %s"
- " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
- ) % (upper_bound(token, self.database_engine, inclusive=False),)
+ if isinstance(self.database_engine, Sqlite3Engine):
+ # SQLite3 doesn't optimise ``(x < a) OR (x = a AND y < b)``
+ # So we give pass it to SQLite3 as the UNION ALL of the two queries.
- query_after = (
- "SELECT topological_ordering, stream_ordering, event_id FROM events"
- " WHERE room_id = ? AND %s"
- " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
- ) % (lower_bound(token, self.database_engine, inclusive=False),)
+ query_before = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering < ?"
+ " UNION ALL"
+ " SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering < ?"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+ )
+ before_args = (
+ room_id, token.topological,
+ room_id, token.topological, token.stream,
+ before_limit,
+ )
- txn.execute(query_before, (room_id, before_limit))
+ query_after = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering > ?"
+ " UNION ALL"
+ " SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND topological_ordering = ? AND stream_ordering > ?"
+ " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
+ )
+ after_args = (
+ room_id, token.topological,
+ room_id, token.topological, token.stream,
+ after_limit,
+ )
+ else:
+ query_before = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND %s"
+ " ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
+ ) % (upper_bound(token, self.database_engine, inclusive=False),)
+
+ before_args = (room_id, before_limit),
+
+ query_after = (
+ "SELECT topological_ordering, stream_ordering, event_id FROM events"
+ " WHERE room_id = ? AND %s"
+ " ORDER BY topological_ordering ASC, stream_ordering ASC LIMIT ?"
+ ) % (lower_bound(token, self.database_engine, inclusive=False),)
+
+ after_args = (room_id, after_limit)
+
+ txn.execute(query_before, before_args)
rows = self.cursor_to_dict(txn)
events_before = [r["event_id"] for r in rows]
@@ -642,7 +678,7 @@ class StreamStore(SQLBaseStore):
token.stream - 1,
))
- txn.execute(query_after, (room_id, after_limit))
+ txn.execute(query_after, after_args)
rows = self.cursor_to_dict(txn)
events_after = [r["event_id"] for r in rows]
From dd2ccee27d834107e86cc18f46a5e4d4aa88d3c9 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Tue, 5 Jul 2016 14:06:07 +0100
Subject: [PATCH 204/414] Fix typo
---
synapse/storage/stream.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index f18fb63c5..c08c5b997 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -652,7 +652,7 @@ class StreamStore(SQLBaseStore):
" ORDER BY topological_ordering DESC, stream_ordering DESC LIMIT ?"
) % (upper_bound(token, self.database_engine, inclusive=False),)
- before_args = (room_id, before_limit),
+ before_args = (room_id, before_limit)
query_after = (
"SELECT topological_ordering, stream_ordering, event_id FROM events"
From 7335f0addae9ff473403eaaffd7d2b02a9f1991f Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 14:44:25 +0100
Subject: [PATCH 205/414] Add ReadWriteLock
---
synapse/util/async.py | 82 +++++++++++++++++++++++++++++++++++++
tests/util/test_rwlock.py | 85 +++++++++++++++++++++++++++++++++++++++
2 files changed, 167 insertions(+)
create mode 100644 tests/util/test_rwlock.py
diff --git a/synapse/util/async.py b/synapse/util/async.py
index 40be7fe7e..c84b23ff4 100644
--- a/synapse/util/async.py
+++ b/synapse/util/async.py
@@ -194,3 +194,85 @@ class Linearizer(object):
self.key_to_defer.pop(key, None)
defer.returnValue(_ctx_manager())
+
+
+class ReadWriteLock(object):
+ """A deferred style read write lock.
+
+ Example:
+
+ with (yield read_write_lock.read("test_key")):
+ # do some work
+ """
+
+ # IMPLEMENTATION NOTES
+ #
+ # We track the most recent queued reader and writer deferreds (which get
+ # resolved when they release the lock).
+ #
+ # Read: We know its safe to acquire a read lock when the latest writer has
+ # been resolved. The new reader is appeneded to the list of latest readers.
+ #
+ # Write: We know its safe to acquire the write lock when both the latest
+ # writers and readers have been resolved. The new writer replaces the latest
+ # writer.
+
+ def __init__(self):
+ # Latest readers queued
+ self.key_to_current_readers = {}
+
+ # Latest writer queued
+ self.key_to_current_writer = {}
+
+ @defer.inlineCallbacks
+ def read(self, key):
+ new_defer = defer.Deferred()
+
+ curr_readers = self.key_to_current_readers.setdefault(key, set())
+ curr_writer = self.key_to_current_writer.get(key, None)
+
+ curr_readers.add(new_defer)
+
+ # We wait for the latest writer to finish writing. We can safely ignore
+ # any existing readers... as they're readers.
+ yield curr_writer
+
+ @contextmanager
+ def _ctx_manager():
+ try:
+ yield
+ finally:
+ new_defer.callback(None)
+ self.key_to_current_readers.get(key, set()).discard(new_defer)
+
+ defer.returnValue(_ctx_manager())
+
+ @defer.inlineCallbacks
+ def write(self, key):
+ new_defer = defer.Deferred()
+
+ curr_readers = self.key_to_current_readers.get(key, set())
+ curr_writer = self.key_to_current_writer.get(key, None)
+
+ # We wait on all latest readers and writer.
+ to_wait_on = list(curr_readers)
+ if curr_writer:
+ to_wait_on.append(curr_writer)
+
+ # We can clear the list of current readers since the new writer waits
+ # for them to finish.
+ curr_readers.clear()
+ self.key_to_current_writer[key] = new_defer
+
+ yield defer.gatherResults(to_wait_on)
+
+ @contextmanager
+ def _ctx_manager():
+ try:
+ yield
+ finally:
+ new_defer.callback(None)
+ if self.key_to_current_writer[key] == new_defer:
+ self.key_to_current_writer.pop(key)
+
+ defer.returnValue(_ctx_manager())
diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py
new file mode 100644
index 000000000..1d745ae1a
--- /dev/null
+++ b/tests/util/test_rwlock.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 OpenMarket Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from tests import unittest
+
+from synapse.util.async import ReadWriteLock
+
+
+class ReadWriteLockTestCase(unittest.TestCase):
+
+ def _assert_called_before_not_after(self, lst, first_false):
+ for i, d in enumerate(lst[:first_false]):
+ self.assertTrue(d.called, msg="%d was unexpectedly false" % i)
+
+ for i, d in enumerate(lst[first_false:]):
+ self.assertFalse(
+ d.called, msg="%d was unexpectedly true" % (i + first_false)
+ )
+
+ def test_rwlock(self):
+ rwlock = ReadWriteLock()
+
+ key = object()
+
+ ds = [
+ rwlock.read(key), # 0
+ rwlock.read(key), # 1
+ rwlock.write(key), # 2
+ rwlock.write(key), # 3
+ rwlock.read(key), # 4
+ rwlock.read(key), # 5
+ rwlock.write(key), # 6
+ ]
+
+ self._assert_called_before_not_after(ds, 2)
+
+ with ds[0].result:
+ self._assert_called_before_not_after(ds, 2)
+ self._assert_called_before_not_after(ds, 2)
+
+ with ds[1].result:
+ self._assert_called_before_not_after(ds, 2)
+ self._assert_called_before_not_after(ds, 3)
+
+ with ds[2].result:
+ self._assert_called_before_not_after(ds, 3)
+ self._assert_called_before_not_after(ds, 4)
+
+ with ds[3].result:
+ self._assert_called_before_not_after(ds, 4)
+ self._assert_called_before_not_after(ds, 6)
+
+ with ds[5].result:
+ self._assert_called_before_not_after(ds, 6)
+ self._assert_called_before_not_after(ds, 6)
+
+ with ds[4].result:
+ self._assert_called_before_not_after(ds, 6)
+ self._assert_called_before_not_after(ds, 7)
+
+ with ds[6].result:
+ pass
+
+ d = rwlock.write(key)
+ self.assertTrue(d.called)
+ with d.result:
+ pass
+
+ d = rwlock.read(key)
+ self.assertTrue(d.called)
+ with d.result:
+ pass
From 8f8798bc0d572af103274fc07d3adac67ce7f51a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 15:30:25 +0100
Subject: [PATCH 206/414] Add ReadWriteLock for pagination and history prune
---
synapse/handlers/message.py | 76 +++++++++++++++++++------------------
synapse/storage/stream.py | 4 +-
2 files changed, 41 insertions(+), 39 deletions(-)
diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py
index 878809d50..ad2753c1b 100644
--- a/synapse/handlers/message.py
+++ b/synapse/handlers/message.py
@@ -26,7 +26,7 @@ from synapse.types import (
UserID, RoomAlias, RoomStreamToken, StreamToken, get_domain_from_id
)
from synapse.util import unwrapFirstError
-from synapse.util.async import concurrently_execute, run_on_reactor
+from synapse.util.async import concurrently_execute, run_on_reactor, ReadWriteLock
from synapse.util.caches.snapshot_cache import SnapshotCache
from synapse.util.logcontext import preserve_fn
from synapse.visibility import filter_events_for_client
@@ -50,6 +50,8 @@ class MessageHandler(BaseHandler):
self.validator = EventValidator()
self.snapshot_cache = SnapshotCache()
+ self.pagination_lock = ReadWriteLock()
+
@defer.inlineCallbacks
def purge_history(self, room_id, event_id):
event = yield self.store.get_event(event_id)
@@ -59,9 +61,8 @@ class MessageHandler(BaseHandler):
depth = event.depth
- # TODO: Lock.
-
- yield self.store.delete_old_state(room_id, depth)
+ with (yield self.pagination_lock.write(room_id)):
+ yield self.store.delete_old_state(room_id, depth)
@defer.inlineCallbacks
def get_messages(self, requester, room_id=None, pagin_config=None,
@@ -98,42 +99,43 @@ class MessageHandler(BaseHandler):
source_config = pagin_config.get_source_config("room")
- membership, member_event_id = yield self._check_in_room_or_world_readable(
- room_id, user_id
- )
-
- if source_config.direction == 'b':
- # if we're going backwards, we might need to backfill. This
- # requires that we have a topo token.
- if room_token.topological:
- max_topo = room_token.topological
- else:
- max_topo = yield self.store.get_max_topological_token_for_stream_and_room(
- room_id, room_token.stream
- )
-
- if membership == Membership.LEAVE:
- # If they have left the room then clamp the token to be before
- # they left the room, to save the effort of loading from the
- # database.
- leave_token = yield self.store.get_topological_token_for_event(
- member_event_id
- )
- leave_token = RoomStreamToken.parse(leave_token)
- if leave_token.topological < max_topo:
- source_config.from_key = str(leave_token)
-
- yield self.hs.get_handlers().federation_handler.maybe_backfill(
- room_id, max_topo
+ with (yield self.pagination_lock.read(room_id)):
+ membership, member_event_id = yield self._check_in_room_or_world_readable(
+ room_id, user_id
)
- events, next_key = yield data_source.get_pagination_rows(
- requester.user, source_config, room_id
- )
+ if source_config.direction == 'b':
+ # if we're going backwards, we might need to backfill. This
+ # requires that we have a topo token.
+ if room_token.topological:
+ max_topo = room_token.topological
+ else:
+ max_topo = yield self.store.get_max_topological_token(
+ room_id, room_token.stream
+ )
- next_token = pagin_config.from_token.copy_and_replace(
- "room_key", next_key
- )
+ if membership == Membership.LEAVE:
+ # If they have left the room then clamp the token to be before
+ # they left the room, to save the effort of loading from the
+ # database.
+ leave_token = yield self.store.get_topological_token_for_event(
+ member_event_id
+ )
+ leave_token = RoomStreamToken.parse(leave_token)
+ if leave_token.topological < max_topo:
+ source_config.from_key = str(leave_token)
+
+ yield self.hs.get_handlers().federation_handler.maybe_backfill(
+ room_id, max_topo
+ )
+
+ events, next_key = yield data_source.get_pagination_rows(
+ requester.user, source_config, room_id
+ )
+
+ next_token = pagin_config.from_token.copy_and_replace(
+ "room_key", next_key
+ )
if not events:
defer.returnValue({
diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py
index b9ad965fd..3dda2dab5 100644
--- a/synapse/storage/stream.py
+++ b/synapse/storage/stream.py
@@ -487,13 +487,13 @@ class StreamStore(SQLBaseStore):
row["topological_ordering"], row["stream_ordering"],)
)
- def get_max_topological_token_for_stream_and_room(self, room_id, stream_key):
+ def get_max_topological_token(self, room_id, stream_key):
sql = (
"SELECT max(topological_ordering) FROM events"
" WHERE room_id = ? AND stream_ordering < ?"
)
return self._execute(
- "get_max_topological_token_for_stream_and_room", None,
+ "get_max_topological_token", None,
sql, room_id, stream_key,
).addCallback(
lambda r: r[0][0] if r else 0
From caf33b2d9be1b992098a00ee61cf4b4009ee3a09 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 17:18:19 +0100
Subject: [PATCH 207/414] Protect password when registering using shared secret
---
scripts/register_new_matrix_user | 11 ++++++++---
synapse/rest/client/v1/register.py | 11 +++++++----
2 files changed, 15 insertions(+), 7 deletions(-)
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 27a6250b1..6d055fd01 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -25,12 +25,17 @@ import urllib2
import yaml
-def request_registration(user, password, server_location, shared_secret):
+def request_registration(user, password, server_location, shared_secret, admin=False):
mac = hmac.new(
key=shared_secret,
- msg=user,
digestmod=hashlib.sha1,
- ).hexdigest()
+ )
+
+ mac.update(user)
+ mac.update(password)
+ mac.update("admin" if admin else "notadmin")
+
+ mac = mac.hexdigest()
data = {
"user": user,
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index d791d5e07..0eb7490e5 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -324,6 +324,8 @@ class RegisterRestServlet(ClientV1RestServlet):
raise SynapseError(400, "Shared secret registration is not enabled")
user = register_json["user"].encode("utf-8")
+ password = register_json["password"].encode("utf-8")
+ admin = register_json.get("admin", None)
# str() because otherwise hmac complains that 'unicode' does not
# have the buffer interface
@@ -331,11 +333,12 @@ class RegisterRestServlet(ClientV1RestServlet):
want_mac = hmac.new(
key=self.hs.config.registration_shared_secret,
- msg=user,
digestmod=sha1,
- ).hexdigest()
-
- password = register_json["password"].encode("utf-8")
+ )
+ want_mac.update(user)
+ want_mac.update(password)
+ want_mac.update("admin" if admin else "notadmin")
+ want_mac = want_mac.hexdigest()
if compare_digest(want_mac, got_mac):
handler = self.handlers.registration_handler
From 651faee698d5ff4806d1e0e7f5cd4c438bf434f1 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 17:30:22 +0100
Subject: [PATCH 208/414] Add an admin option to shared secret registration
---
scripts/register_new_matrix_user | 19 ++++++++--
synapse/handlers/register.py | 4 +-
synapse/rest/client/v1/register.py | 1 +
synapse/storage/registration.py | 61 +++++++++++++++++++-----------
4 files changed, 58 insertions(+), 27 deletions(-)
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 6d055fd01..987bf32d1 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -42,6 +42,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
"password": password,
"mac": mac,
"type": "org.matrix.login.shared_secret",
+ "admin": admin,
}
server_location = server_location.rstrip("/")
@@ -73,7 +74,7 @@ def request_registration(user, password, server_location, shared_secret, admin=F
sys.exit(1)
-def register_new_user(user, password, server_location, shared_secret):
+def register_new_user(user, password, server_location, shared_secret, admin):
if not user:
try:
default_user = getpass.getuser()
@@ -104,7 +105,14 @@ def register_new_user(user, password, server_location, shared_secret):
print "Passwords do not match"
sys.exit(1)
- request_registration(user, password, server_location, shared_secret)
+ if not admin:
+ admin = raw_input("Make admin [no]: ")
+ if admin in ("y", "yes", "true"):
+ admin = True
+ else:
+ admin = False
+
+ request_registration(user, password, server_location, shared_secret, bool(admin))
if __name__ == "__main__":
@@ -124,6 +132,11 @@ if __name__ == "__main__":
default=None,
help="New password for user. Will prompt if omitted.",
)
+ parser.add_argument(
+ "-a", "--admin",
+ action="store_true",
+ help="Register new user as an admin. Will prompt if omitted.",
+ )
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
@@ -156,4 +169,4 @@ if __name__ == "__main__":
else:
secret = args.shared_secret
- register_new_user(args.user, args.password, args.server_url, secret)
+ register_new_user(args.user, args.password, args.server_url, secret, args.admin)
diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py
index 88c82ba7d..8c3381df8 100644
--- a/synapse/handlers/register.py
+++ b/synapse/handlers/register.py
@@ -90,7 +90,8 @@ class RegistrationHandler(BaseHandler):
password=None,
generate_token=True,
guest_access_token=None,
- make_guest=False
+ make_guest=False,
+ admin=False,
):
"""Registers a new client on the server.
@@ -141,6 +142,7 @@ class RegistrationHandler(BaseHandler):
# If the user was a guest then they already have a profile
None if was_guest else user.localpart
),
+ admin=admin,
)
else:
# autogen a sequential user ID
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index 0eb7490e5..25d63a0b0 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -345,6 +345,7 @@ class RegisterRestServlet(ClientV1RestServlet):
user_id, token = yield handler.register(
localpart=user,
password=password,
+ admin=bool(admin),
)
self._remove_session(session)
defer.returnValue({
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 5c75dbab5..4999175dd 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -77,7 +77,7 @@ class RegistrationStore(SQLBaseStore):
@defer.inlineCallbacks
def register(self, user_id, token, password_hash,
was_guest=False, make_guest=False, appservice_id=None,
- create_profile_with_localpart=None):
+ create_profile_with_localpart=None, admin=False):
"""Attempts to register an account.
Args:
@@ -104,6 +104,7 @@ class RegistrationStore(SQLBaseStore):
make_guest,
appservice_id,
create_profile_with_localpart,
+ admin
)
self.get_user_by_id.invalidate((user_id,))
self.is_guest.invalidate((user_id,))
@@ -118,6 +119,7 @@ class RegistrationStore(SQLBaseStore):
make_guest,
appservice_id,
create_profile_with_localpart,
+ admin,
):
now = int(self.clock.time())
@@ -125,29 +127,42 @@ class RegistrationStore(SQLBaseStore):
try:
if was_guest:
- txn.execute("UPDATE users SET"
- " password_hash = ?,"
- " upgrade_ts = ?,"
- " is_guest = ?"
- " WHERE name = ?",
- [password_hash, now, 1 if make_guest else 0, user_id])
+ txn.execute(
+ "UPDATE users SET"
+ " password_hash = ?,"
+ " upgrade_ts = ?,"
+ " is_guest = ?,"
+ " admin = ?"
+ " WHERE name = ?",
+ (password_hash, now, 1 if make_guest else 0, admin, user_id,)
+ )
+ self._simple_update_one_txn(
+ txn,
+ "users",
+ keyvalues={
+ "name": user_id,
+ },
+ updatevalues={
+ "password_hash": password_hash,
+ "upgrade_ts": now,
+ "is_guest": 1 if make_guest else 0,
+ "appservice_id": appservice_id,
+ "admin": admin,
+ }
+ )
else:
- txn.execute("INSERT INTO users "
- "("
- " name,"
- " password_hash,"
- " creation_ts,"
- " is_guest,"
- " appservice_id"
- ") "
- "VALUES (?,?,?,?,?)",
- [
- user_id,
- password_hash,
- now,
- 1 if make_guest else 0,
- appservice_id,
- ])
+ self._simple_insert_txn(
+ txn,
+ "users",
+ values={
+ "name": user_id,
+ "password_hash": password_hash,
+ "creation_ts": now,
+ "is_guest": 1 if make_guest else 0,
+ "appservice_id": appservice_id,
+ "admin": admin,
+ }
+ )
except self.database_engine.module.IntegrityError:
raise StoreError(
400, "User ID already taken.", errcode=Codes.USER_IN_USE
From 4adf93e0f743338c929860a1384beabeae9fded8 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 17:34:25 +0100
Subject: [PATCH 209/414] Fix for postgres
---
synapse/storage/registration.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 4999175dd..232dcfd9e 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -147,7 +147,7 @@ class RegistrationStore(SQLBaseStore):
"upgrade_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
- "admin": admin,
+ "admin": 1 if admin else 0,
}
)
else:
@@ -160,7 +160,7 @@ class RegistrationStore(SQLBaseStore):
"creation_ts": now,
"is_guest": 1 if make_guest else 0,
"appservice_id": appservice_id,
- "admin": admin,
+ "admin": 1 if admin else 0,
}
)
except self.database_engine.module.IntegrityError:
From be3548f7e14f411b0bb4d176ea0977672ed58252 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Tue, 5 Jul 2016 17:46:51 +0100
Subject: [PATCH 210/414] Remove spurious txn
---
synapse/storage/registration.py | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 232dcfd9e..0a6834149 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -127,15 +127,6 @@ class RegistrationStore(SQLBaseStore):
try:
if was_guest:
- txn.execute(
- "UPDATE users SET"
- " password_hash = ?,"
- " upgrade_ts = ?,"
- " is_guest = ?,"
- " admin = ?"
- " WHERE name = ?",
- (password_hash, now, 1 if make_guest else 0, admin, user_id,)
- )
self._simple_update_one_txn(
txn,
"users",
From 896bc6cd464c4e2807a6751bd2de8039bbe1fc63 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Wed, 6 Jul 2016 12:17:54 +0900
Subject: [PATCH 211/414] Update hash_password script
Signed-off-by: Kent Shikama
---
scripts/hash_password | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/scripts/hash_password b/scripts/hash_password
index e78460098..215ab25cf 100755
--- a/scripts/hash_password
+++ b/scripts/hash_password
@@ -1,10 +1,16 @@
#!/usr/bin/env python
import argparse
+
+import sys
+
import bcrypt
import getpass
+import yaml
+
bcrypt_rounds=12
+password_pepper = ""
def prompt_for_pass():
password = getpass.getpass("Password: ")
@@ -28,12 +34,22 @@ if __name__ == "__main__":
default=None,
help="New password for user. Will prompt if omitted.",
)
+ parser.add_argument(
+ "-c", "--config",
+ type=argparse.FileType('r'),
+ help="Path to server config file. Used to read in bcrypt_rounds and password_pepper.",
+ )
args = parser.parse_args()
+ if "config" in args and args.config:
+ config = yaml.safe_load(args.config)
+ bcrypt_rounds = config.get("bcrypt_rounds", bcrypt_rounds)
+ password_config = config.get("password_config", {})
+ password_pepper = password_config.get("pepper", password_pepper)
password = args.password
if not password:
password = prompt_for_pass()
- print bcrypt.hashpw(password, bcrypt.gensalt(bcrypt_rounds))
+ print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
From 8d9a884cee0b3ee5b18b0d037592bb9e5c3ae943 Mon Sep 17 00:00:00 2001
From: Kent Shikama
Date: Wed, 6 Jul 2016 12:18:19 +0900
Subject: [PATCH 212/414] Update password config comment
Signed-off-by: Kent Shikama
---
synapse/config/password.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/synapse/config/password.py b/synapse/config/password.py
index 66f0d93ee..a4bd17139 100644
--- a/synapse/config/password.py
+++ b/synapse/config/password.py
@@ -30,7 +30,7 @@ class PasswordConfig(Config):
# Enable password for login.
password_config:
enabled: true
- # Change to a secret random string.
+ # Uncomment and change to a secret random string for extra security.
# DO NOT CHANGE THIS AFTER INITIAL SETUP!
#pepper: ""
"""
From 0da24cac8bde47961396f7da774d8dc8ed847107 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 6 Jul 2016 11:04:44 +0100
Subject: [PATCH 213/414] Add null separator to hmac
---
scripts/register_new_matrix_user | 2 ++
synapse/rest/client/v1/register.py | 2 ++
2 files changed, 4 insertions(+)
diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user
index 987bf32d1..12ed20d62 100755
--- a/scripts/register_new_matrix_user
+++ b/scripts/register_new_matrix_user
@@ -32,7 +32,9 @@ def request_registration(user, password, server_location, shared_secret, admin=F
)
mac.update(user)
+ mac.update("\x00")
mac.update(password)
+ mac.update("\x00")
mac.update("admin" if admin else "notadmin")
mac = mac.hexdigest()
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index 25d63a0b0..83872f5f6 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -336,7 +336,9 @@ class RegisterRestServlet(ClientV1RestServlet):
digestmod=sha1,
)
want_mac.update(user)
+ want_mac.update("\x00")
want_mac.update(password)
+ want_mac.update("\x00")
want_mac.update("admin" if admin else "notadmin")
want_mac = want_mac.hexdigest()
From 76b18df3d95cd881017a9aa5c8473409928faecd Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 6 Jul 2016 11:16:10 +0100
Subject: [PATCH 214/414] Check that there are no null bytes in user and
passsword
---
synapse/rest/client/v1/register.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/synapse/rest/client/v1/register.py b/synapse/rest/client/v1/register.py
index 83872f5f6..ce7099b18 100644
--- a/synapse/rest/client/v1/register.py
+++ b/synapse/rest/client/v1/register.py
@@ -327,6 +327,12 @@ class RegisterRestServlet(ClientV1RestServlet):
password = register_json["password"].encode("utf-8")
admin = register_json.get("admin", None)
+ # Its important to check as we use null bytes as HMAC field separators
+ if "\x00" in user:
+ raise SynapseError(400, "Invalid user")
+ if "\x00" in password:
+ raise SynapseError(400, "Invalid password")
+
# str() because otherwise hmac complains that 'unicode' does not
# have the buffer interface
got_mac = str(register_json["mac"])
From 67f2c901ea4196d869380c1c5cdd8569934857ed Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Wed, 6 Jul 2016 15:56:59 +0100
Subject: [PATCH 215/414] Add rest servlet. Fix SQL.
---
synapse/rest/client/v1/admin.py | 1 +
synapse/storage/events.py | 9 ++++-----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py
index 71537a7d0..b0cb31a44 100644
--- a/synapse/rest/client/v1/admin.py
+++ b/synapse/rest/client/v1/admin.py
@@ -124,3 +124,4 @@ def register_servlets(hs, http_server):
WhoisRestServlet(hs).register(http_server)
PurgeMediaCacheRestServlet(hs).register(http_server)
DeactivateAccountRestServlet(hs).register(http_server)
+ PurgeHistoryRestServlet(hs).register(http_server)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index c3b498bb3..23ebd5d4c 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1384,10 +1384,6 @@ class EventsStore(SQLBaseStore):
(event_id,) for event_id, state_key in event_rows
if state_key is None and not self.hs.is_mine_id(event_id)
]
- to_not_delete = [
- (event_id,) for event_id, state_key in event_rows
- if state_key is not None or self.hs.is_mine_id(event_id)
- ]
for table in (
"events",
"event_json",
@@ -1424,7 +1420,10 @@ class EventsStore(SQLBaseStore):
txn.executemany(
"UPDATE events SET outlier = ?"
" WHERE event_id = ?",
- to_not_delete
+ [
+ (True, event_id,) for event_id, state_key in event_rows
+ if state_key is not None or self.hs.is_mine_id(event_id)
+ ]
)
From c98e1479bd39a64add0456299644e96480151625 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 7 Jul 2016 11:41:07 +0100
Subject: [PATCH 216/414] Return 400 rather than 500
---
synapse/storage/events.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index 23ebd5d4c..c2136f3fd 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -23,6 +23,7 @@ from synapse.util.async import ObservableDeferred
from synapse.util.logcontext import preserve_fn, PreserveLoggingContext
from synapse.util.logutils import log_function
from synapse.api.constants import EventTypes
+from synapse.api.errors import SynapseError
from canonicaljson import encode_canonical_json
from collections import deque, namedtuple
@@ -1324,7 +1325,9 @@ class EventsStore(SQLBaseStore):
max_depth = max(row[0] for row in rows)
if max_depth <= topological_ordering:
- raise Exception("topological_ordering is greater than forward extremeties")
+ raise SynapseError(
+ 400, "topological_ordering is greater than forward extremeties"
+ )
txn.execute(
"SELECT event_id, state_key FROM events"
From b92e7955be10209fdd13cdb799b1ac55c981d086 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 7 Jul 2016 11:42:15 +0100
Subject: [PATCH 217/414] Comment
---
synapse/storage/events.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/synapse/storage/events.py b/synapse/storage/events.py
index c2136f3fd..b58294216 100644
--- a/synapse/storage/events.py
+++ b/synapse/storage/events.py
@@ -1325,6 +1325,9 @@ class EventsStore(SQLBaseStore):
max_depth = max(row[0] for row in rows)
if max_depth <= topological_ordering:
+ # We need to ensure we don't delete all the events from the datanase
+ # otherwise we wouldn't be able to send any events (due to not
+ # having any backwards extremeties)
raise SynapseError(
400, "topological_ordering is greater than forward extremeties"
)
From 067596d341a661e008195f7f3a6887ade7cafa32 Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 7 Jul 2016 16:11:37 +0100
Subject: [PATCH 218/414] Fix bug where we did not correctly explode when
multiple user_ids were set in macaroon
---
synapse/api/auth.py | 15 ++++++++++-----
1 file changed, 10 insertions(+), 5 deletions(-)
diff --git a/synapse/api/auth.py b/synapse/api/auth.py
index 31e1abb96..a4d658a9d 100644
--- a/synapse/api/auth.py
+++ b/synapse/api/auth.py
@@ -637,17 +637,22 @@ class Auth(object):
try:
macaroon = pymacaroons.Macaroon.deserialize(macaroon_str)
- self.validate_macaroon(macaroon, rights, self.hs.config.expire_access_token)
-
user_prefix = "user_id = "
user = None
+ user_id = None
guest = False
for caveat in macaroon.caveats:
if caveat.caveat_id.startswith(user_prefix):
- user = UserID.from_string(caveat.caveat_id[len(user_prefix):])
+ user_id = caveat.caveat_id[len(user_prefix):]
+ user = UserID.from_string(user_id)
elif caveat.caveat_id == "guest = true":
guest = True
+ self.validate_macaroon(
+ macaroon, rights, self.hs.config.expire_access_token,
+ user_id=user_id,
+ )
+
if user is None:
raise AuthError(
self.TOKEN_NOT_FOUND_HTTP_STATUS, "No user caveat in macaroon",
@@ -692,7 +697,7 @@ class Auth(object):
errcode=Codes.UNKNOWN_TOKEN
)
- def validate_macaroon(self, macaroon, type_string, verify_expiry):
+ def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id):
"""
validate that a Macaroon is understood by and was signed by this server.
@@ -707,7 +712,7 @@ class Auth(object):
v = pymacaroons.Verifier()
v.satisfy_exact("gen = 1")
v.satisfy_exact("type = " + type_string)
- v.satisfy_general(lambda c: c.startswith("user_id = "))
+ v.satisfy_exact("user_id = %s" % user_id)
v.satisfy_exact("guest = true")
if verify_expiry:
v.satisfy_general(self._verify_expiry)
From f90cf150e2b51124bb6848980394c4368e0de73a Mon Sep 17 00:00:00 2001
From: Erik Johnston
Date: Thu, 7 Jul 2016 16:33:00 +0100
Subject: [PATCH 219/414] Bump version and changelog
---
CHANGES.rst | 8 ++++++++
synapse/__init__.py | 2 +-
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/CHANGES.rst b/CHANGES.rst
index ecaaa189d..e1d5e876d 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,11 @@
+Changes in synapse v0.16.1-r1 (2016-07-08)
+==========================================
+
+THIS IS A CRITICAL SECURITY UPDATE.
+
+This fixes a bug which allowed users' accounts to be accessed by unauthorised
+users.
+
Changes in synapse v0.16.1 (2016-06-20)
=======================================
diff --git a/synapse/__init__.py b/synapse/__init__.py
index 3cd79b124..2750ad3f7 100644
--- a/synapse/__init__.py
+++ b/synapse/__init__.py
@@ -16,4 +16,4 @@
""" This is a reference implementation of a Matrix home server.
"""
-__version__ = "0.16.1"
+__version__ = "0.16.1-r1"
From 10c843fcfbd6c3f6bcc13c5b9c71c9007ee54480 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 8 Jul 2016 15:15:55 +0100
Subject: [PATCH 220/414] Ensure that the guest user is in the database when
upgrading accounts
---
synapse/storage/registration.py | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 0a6834149..3a675e53f 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -127,11 +127,24 @@ class RegistrationStore(SQLBaseStore):
try:
if was_guest:
+ # Ensure that the guest user actually exists
+ self._simple_select_one_txn(
+ txn,
+ "users",
+ keyvalues={
+ "name": user_id,
+ "is_guest": 1,
+ },
+ retcols=("name",),
+ allow_none=False,
+ )
+
self._simple_update_one_txn(
txn,
"users",
keyvalues={
"name": user_id,
+ "is_guest": 1,
},
updatevalues={
"password_hash": password_hash,
From dfde67a6fe22535558552060820abfca047540f3 Mon Sep 17 00:00:00 2001
From: Mark Haines
Date: Fri, 8 Jul 2016 15:57:06 +0100
Subject: [PATCH 221/414] Add a comment explaining allow_none
---
synapse/storage/registration.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py
index 3a675e53f..d957a629d 100644
--- a/synapse/storage/registration.py
+++ b/synapse/storage/registration.py
@@ -128,6 +128,8 @@ class RegistrationStore(SQLBaseStore):
try:
if was_guest:
# Ensure that the guest user actually exists
+ # ``allow_none=False`` makes this raise an exception
+ # if the row isn't in the database.
self._simple_select_one_txn(
txn,
"users",
From 385aec401015b12b763f630abf48ad2b8b30649c Mon Sep 17 00:00:00 2001
From: David Baker
Date: Fri, 8 Jul 2016 17:42:48 +0100
Subject: [PATCH 222/414] Implement
https://github.com/matrix-org/matrix-doc/pull/346/files
---
synapse/api/errors.py | 1 +
synapse/rest/client/v2_alpha/account.py | 59 +++++++++++++++++++++++++
2 files changed, 60 insertions(+)
diff --git a/synapse/api/errors.py b/synapse/api/errors.py
index b219b46a4..004164685 100644
--- a/synapse/api/errors.py
+++ b/synapse/api/errors.py
@@ -43,6 +43,7 @@ class Codes(object):
EXCLUSIVE = "M_EXCLUSIVE"
THREEPID_AUTH_FAILED = "M_THREEPID_AUTH_FAILED"
THREEPID_IN_USE = "M_THREEPID_IN_USE"
+ THREEPID_NOT_FOUND = "M_THREEPID_NOT_FOUND"
INVALID_USERNAME = "M_INVALID_USERNAME"
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 9a84873a5..1c37f9131 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -36,11 +36,16 @@ class PasswordRestServlet(RestServlet):
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
+ self.identity_handler = hs.get_handlers().identity_handler
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
+ if '/account/password/email/requestToken' in request.path:
+ ret = yield self.onPasswordEmailTokenRequest(request)
+ defer.returnValue(ret)
+
body = parse_json_object_from_request(request)
authed, result, params, _ = yield self.auth_handler.check_auth([
@@ -85,6 +90,29 @@ class PasswordRestServlet(RestServlet):
defer.returnValue((200, {}))
+ @defer.inlineCallbacks
+ def onPasswordEmailTokenRequest(self, request):
+ body = parse_json_object_from_request(request)
+
+ required = ['id_server', 'client_secret', 'email', 'send_attempt']
+ absent = []
+ for k in required:
+ if k not in body:
+ absent.append(k)
+
+ if len(absent) > 0:
+ raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+ existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ 'email', body['email']
+ )
+
+ if existingUid is None:
+ raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+
+ ret = yield self.identity_handler.requestEmailToken(**body)
+ defer.returnValue((200, ret))
+
def on_OPTIONS(self, _):
return 200, {}
@@ -115,6 +143,10 @@ class ThreepidRestServlet(RestServlet):
def on_POST(self, request):
yield run_on_reactor()
+ if '/account/3pid/email/requestToken' in request.path:
+ ret = yield self.onThreepidEmailTokenRequest(request)
+ defer.returnValue(ret)
+
body = parse_json_object_from_request(request)
threePidCreds = body.get('threePidCreds')
@@ -155,6 +187,33 @@ class ThreepidRestServlet(RestServlet):
defer.returnValue((200, {}))
+ @defer.inlineCallbacks
+ def onThreepidEmailTokenRequest(self, request):
+ body = parse_json_object_from_request(request)
+
+ logger.error("hi")
+
+ required = ['id_server', 'client_secret', 'email', 'send_attempt']
+ absent = []
+ for k in required:
+ if k not in body:
+ absent.append(k)
+
+ if len(absent) > 0:
+ raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+ existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ 'email', body['email']
+ )
+
+ logger.error("existing %r", existingUid)
+
+ if existingUid is not None:
+ raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+
+ ret = yield self.identity_handler.requestEmailToken(**body)
+ defer.returnValue((200, ret))
+
def register_servlets(hs, http_server):
PasswordRestServlet(hs).register(http_server)
From 9c491366c51b2a0ed23e1f3ead80b7ac4307d46f Mon Sep 17 00:00:00 2001
From: David Baker
Date: Mon, 11 Jul 2016 09:07:40 +0100
Subject: [PATCH 223/414] Oops, remove debug logging
---
synapse/rest/client/v2_alpha/account.py | 4 ----
1 file changed, 4 deletions(-)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index 1c37f9131..e2bbfc9d9 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -191,8 +191,6 @@ class ThreepidRestServlet(RestServlet):
def onThreepidEmailTokenRequest(self, request):
body = parse_json_object_from_request(request)
- logger.error("hi")
-
required = ['id_server', 'client_secret', 'email', 'send_attempt']
absent = []
for k in required:
@@ -206,8 +204,6 @@ class ThreepidRestServlet(RestServlet):
'email', body['email']
)
- logger.error("existing %r", existingUid)
-
if existingUid is not None:
raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
From a5db0026ede13159e340db8612bf4cafba8f6ab6 Mon Sep 17 00:00:00 2001
From: David Baker
Date: Mon, 11 Jul 2016 09:57:07 +0100
Subject: [PATCH 224/414] Separate out requestTokens to separate handlers
---
synapse/rest/client/v2_alpha/account.py | 93 ++++++++++++++----------
synapse/rest/client/v2_alpha/register.py | 65 ++++++++++-------
2 files changed, 93 insertions(+), 65 deletions(-)
diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py
index e2bbfc9d9..8a5361762 100644
--- a/synapse/rest/client/v2_alpha/account.py
+++ b/synapse/rest/client/v2_alpha/account.py
@@ -28,24 +28,54 @@ import logging
logger = logging.getLogger(__name__)
+class PasswordRequestTokenRestServlet(RestServlet):
+ PATTERNS = client_v2_patterns("/account/password/email/requestToken$")
+
+ def __init__(self, hs):
+ super(PasswordRequestTokenRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ body = parse_json_object_from_request(request)
+
+ required = ['id_server', 'client_secret', 'email', 'send_attempt']
+ absent = []
+ for k in required:
+ if k not in body:
+ absent.append(k)
+
+ if len(absent) > 0:
+ raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+ existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ 'email', body['email']
+ )
+
+ if existingUid is None:
+ raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+
+ ret = yield self.identity_handler.requestEmailToken(**body)
+ defer.returnValue((200, ret))
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
class PasswordRestServlet(RestServlet):
- PATTERNS = client_v2_patterns("/account/password")
+ PATTERNS = client_v2_patterns("/account/password$")
def __init__(self, hs):
super(PasswordRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
- self.identity_handler = hs.get_handlers().identity_handler
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
- if '/account/password/email/requestToken' in request.path:
- ret = yield self.onPasswordEmailTokenRequest(request)
- defer.returnValue(ret)
-
body = parse_json_object_from_request(request)
authed, result, params, _ = yield self.auth_handler.check_auth([
@@ -90,8 +120,20 @@ class PasswordRestServlet(RestServlet):
defer.returnValue((200, {}))
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
+class ThreepidRequestTokenRestServlet(RestServlet):
+ PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$")
+
+ def __init__(self, hs):
+ self.hs = hs
+ super(ThreepidRequestTokenRestServlet, self).__init__()
+ self.identity_handler = hs.get_handlers().identity_handler
+
@defer.inlineCallbacks
- def onPasswordEmailTokenRequest(self, request):
+ def on_POST(self, request):
body = parse_json_object_from_request(request)
required = ['id_server', 'client_secret', 'email', 'send_attempt']
@@ -107,8 +149,10 @@ class PasswordRestServlet(RestServlet):
'email', body['email']
)
- if existingUid is None:
- raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND)
+ logger.error("existing %r", existingUid)
+
+ if existingUid is not None:
+ raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
ret = yield self.identity_handler.requestEmailToken(**body)
defer.returnValue((200, ret))
@@ -118,7 +162,7 @@ class PasswordRestServlet(RestServlet):
class ThreepidRestServlet(RestServlet):
- PATTERNS = client_v2_patterns("/account/3pid")
+ PATTERNS = client_v2_patterns("/account/3pid$")
def __init__(self, hs):
super(ThreepidRestServlet, self).__init__()
@@ -143,10 +187,6 @@ class ThreepidRestServlet(RestServlet):
def on_POST(self, request):
yield run_on_reactor()
- if '/account/3pid/email/requestToken' in request.path:
- ret = yield self.onThreepidEmailTokenRequest(request)
- defer.returnValue(ret)
-
body = parse_json_object_from_request(request)
threePidCreds = body.get('threePidCreds')
@@ -187,30 +227,9 @@ class ThreepidRestServlet(RestServlet):
defer.returnValue((200, {}))
- @defer.inlineCallbacks
- def onThreepidEmailTokenRequest(self, request):
- body = parse_json_object_from_request(request)
-
- required = ['id_server', 'client_secret', 'email', 'send_attempt']
- absent = []
- for k in required:
- if k not in body:
- absent.append(k)
-
- if len(absent) > 0:
- raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
-
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
- 'email', body['email']
- )
-
- if existingUid is not None:
- raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
-
- ret = yield self.identity_handler.requestEmailToken(**body)
- defer.returnValue((200, ret))
-
def register_servlets(hs, http_server):
+ PasswordRequestTokenRestServlet(hs).register(http_server)
PasswordRestServlet(hs).register(http_server)
+ ThreepidRequestTokenRestServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py
index 2088c316d..e5944b99b 100644
--- a/synapse/rest/client/v2_alpha/register.py
+++ b/synapse/rest/client/v2_alpha/register.py
@@ -41,8 +41,43 @@ else:
logger = logging.getLogger(__name__)
+class RegisterRequestTokenRestServlet(RestServlet):
+ PATTERNS = client_v2_patterns("/register/email/requestToken$")
+
+ def __init__(self, hs):
+ super(RegisterRequestTokenRestServlet, self).__init__()
+ self.hs = hs
+ self.identity_handler = hs.get_handlers().identity_handler
+
+ @defer.inlineCallbacks
+ def on_POST(self, request):
+ body = parse_json_object_from_request(request)
+
+ required = ['id_server', 'client_secret', 'email', 'send_attempt']
+ absent = []
+ for k in required:
+ if k not in body:
+ absent.append(k)
+
+ if len(absent) > 0:
+ raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
+
+ existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
+ 'email', body['email']
+ )
+
+ if existingUid is not None:
+ raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
+
+ ret = yield self.identity_handler.requestEmailToken(**body)
+ defer.returnValue((200, ret))
+
+ def on_OPTIONS(self, _):
+ return 200, {}
+
+
class RegisterRestServlet(RestServlet):
- PATTERNS = client_v2_patterns("/register")
+ PATTERNS = client_v2_patterns("/register$")
def __init__(self, hs):
super(RegisterRestServlet, self).__init__()
@@ -70,10 +105,6 @@ class RegisterRestServlet(RestServlet):
"Do not understand membership kind: %s" % (kind,)
)
- if '/register/email/requestToken' in request.path:
- ret = yield self.onEmailTokenRequest(request)
- defer.returnValue(ret)
-
body = parse_json_object_from_request(request)
# we do basic sanity checks here because the auth layer will store these
@@ -305,29 +336,6 @@ class RegisterRestServlet(RestServlet):
"refresh_token": refresh_token,
})
- @defer.inlineCallbacks
- def onEmailTokenRequest(self, request):
- body = parse_json_object_from_request(request)
-
- required = ['id_server', 'client_secret', 'email', 'send_attempt']
- absent = []
- for k in required:
- if k not in body:
- absent.append(k)
-
- if len(absent) > 0:
- raise SynapseError(400, "Missing params: %r" % absent, Codes.MISSING_PARAM)
-
- existingUid = yield self.hs.get_datastore().get_user_id_by_threepid(
- 'email', body['email']
- )
-
- if existingUid is not None:
- raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE)
-
- ret = yield self.identity_handler.requestEmailToken(**body)
- defer.returnValue((200, ret))
-
@defer.inlineCallbacks
def _do_guest_registration(self):
if not self.hs.config.allow_guest_access:
@@ -345,4 +353,5 @@ class RegisterRestServlet(RestServlet):
def register_servlets(hs, http_server):
+ RegisterRequestTokenRestServlet(hs).register(http_server)
RegisterRestServlet(hs).register(http_server)
From 75fa7f6b3ceae5cf1eeda8f28149796eecdcd133 Mon Sep 17 00:00:00 2001
From: David Baker |