forked from MirrorHub/synapse
Merge pull request #647 from matrix-org/markjh/pushers_stream
Add replication stream for pushers
This commit is contained in:
commit
add89a03a6
8 changed files with 120 additions and 18 deletions
|
@ -282,6 +282,12 @@ class Notifier(object):
|
|||
|
||||
self.notify_replication()
|
||||
|
||||
def on_new_replication_data(self):
|
||||
"""Used to inform replication listeners that something has happend
|
||||
without waking up any of the normal user event streams"""
|
||||
with PreserveLoggingContext():
|
||||
self.notify_replication()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def wait_for_events(self, user_id, timeout, callback, room_ids=None,
|
||||
from_token=StreamToken.START):
|
||||
|
|
|
@ -37,6 +37,7 @@ STREAM_NAMES = (
|
|||
("user_account_data", "room_account_data", "tag_account_data",),
|
||||
("backfill",),
|
||||
("push_rules",),
|
||||
("pushers",),
|
||||
)
|
||||
|
||||
|
||||
|
@ -65,6 +66,7 @@ class ReplicationResource(Resource):
|
|||
* "tag_account_data": Per room per user tags.
|
||||
* "backfill": Old events that have been backfilled from other servers.
|
||||
* "push_rules": Per user changes to push rules.
|
||||
* "pushers": Per user changes to their pushers.
|
||||
|
||||
The API takes two additional query parameters:
|
||||
|
||||
|
@ -120,6 +122,7 @@ class ReplicationResource(Resource):
|
|||
stream_token = yield self.sources.get_current_token()
|
||||
backfill_token = yield self.store.get_current_backfill_token()
|
||||
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
|
||||
pushers_token = self.store.get_pushers_stream_token()
|
||||
|
||||
defer.returnValue(_ReplicationToken(
|
||||
room_stream_token,
|
||||
|
@ -129,6 +132,7 @@ class ReplicationResource(Resource):
|
|||
int(stream_token.account_data_key),
|
||||
backfill_token,
|
||||
push_rules_token,
|
||||
pushers_token,
|
||||
))
|
||||
|
||||
@request_handler
|
||||
|
@ -151,6 +155,7 @@ class ReplicationResource(Resource):
|
|||
yield self.typing(writer, current_token) # TODO: implement limit
|
||||
yield self.receipts(writer, current_token, limit)
|
||||
yield self.push_rules(writer, current_token, limit)
|
||||
yield self.pushers(writer, current_token, limit)
|
||||
self.streams(writer, current_token)
|
||||
|
||||
logger.info("Replicated %d rows", writer.total)
|
||||
|
@ -297,6 +302,24 @@ class ReplicationResource(Resource):
|
|||
"priority_class", "priority", "conditions", "actions"
|
||||
))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def pushers(self, writer, current_token, limit):
|
||||
current_position = current_token.pushers
|
||||
|
||||
pushers = parse_integer(writer.request, "pushers")
|
||||
if pushers is not None:
|
||||
updated, deleted = yield self.store.get_all_updated_pushers(
|
||||
pushers, current_position, limit
|
||||
)
|
||||
writer.write_header_and_rows("pushers", updated, (
|
||||
"position", "user_id", "access_token", "profile_tag", "kind",
|
||||
"app_id", "app_display_name", "device_display_name", "pushkey",
|
||||
"ts", "lang", "data"
|
||||
))
|
||||
writer.write_header_and_rows("deleted", deleted, (
|
||||
"position", "user_id", "app_id", "pushkey"
|
||||
))
|
||||
|
||||
|
||||
class _Writer(object):
|
||||
"""Writes the streams as a JSON object as the response to the request"""
|
||||
|
@ -327,7 +350,7 @@ class _Writer(object):
|
|||
|
||||
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
|
||||
"events", "presence", "typing", "receipts", "account_data", "backfill",
|
||||
"push_rules"
|
||||
"push_rules", "pushers"
|
||||
))):
|
||||
__slots__ = []
|
||||
|
||||
|
|
|
@ -29,6 +29,10 @@ logger = logging.getLogger(__name__)
|
|||
class PusherRestServlet(ClientV1RestServlet):
|
||||
PATTERNS = client_path_patterns("/pushers/set$")
|
||||
|
||||
def __init__(self, hs):
|
||||
super(PusherRestServlet, self).__init__(hs)
|
||||
self.notifier = hs.get_notifier()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def on_POST(self, request):
|
||||
requester = yield self.auth.get_user_by_req(request)
|
||||
|
@ -87,6 +91,8 @@ class PusherRestServlet(ClientV1RestServlet):
|
|||
raise SynapseError(400, "Config Error: " + pce.message,
|
||||
errcode=Codes.MISSING_PARAM)
|
||||
|
||||
self.notifier.on_new_replication_data()
|
||||
|
||||
defer.returnValue((200, {}))
|
||||
|
||||
def on_OPTIONS(self, _):
|
||||
|
|
|
@ -119,12 +119,15 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||
self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id")
|
||||
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
||||
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
|
||||
self._pushers_id_gen = IdGenerator(db_conn, "pushers", "id")
|
||||
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
|
||||
self._push_rules_enable_id_gen = IdGenerator(db_conn, "push_rules_enable", "id")
|
||||
self._push_rules_stream_id_gen = ChainedIdGenerator(
|
||||
self._stream_id_gen, db_conn, "push_rules_stream", "stream_id"
|
||||
)
|
||||
self._pushers_id_gen = StreamIdGenerator(
|
||||
db_conn, "pushers", "id",
|
||||
extra_tables=[("deleted_pushers", "stream_id")],
|
||||
)
|
||||
|
||||
events_max = self._stream_id_gen.get_max_token()
|
||||
event_cache_prefill, min_event_val = self._get_cache_dict(
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
from ._base import SQLBaseStore
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.errors import StoreError
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
|
||||
import logging
|
||||
|
@ -79,12 +77,41 @@ class PusherStore(SQLBaseStore):
|
|||
rows = yield self.runInteraction("get_all_pushers", get_pushers)
|
||||
defer.returnValue(rows)
|
||||
|
||||
def get_pushers_stream_token(self):
|
||||
return self._pushers_id_gen.get_max_token()
|
||||
|
||||
def get_all_updated_pushers(self, last_id, current_id, limit):
|
||||
def get_all_updated_pushers_txn(txn):
|
||||
sql = (
|
||||
"SELECT id, user_name, access_token, profile_tag, kind,"
|
||||
" app_id, app_display_name, device_display_name, pushkey, ts,"
|
||||
" lang, data"
|
||||
" FROM pushers"
|
||||
" WHERE ? < id AND id <= ?"
|
||||
" ORDER BY id ASC LIMIT ?"
|
||||
)
|
||||
txn.execute(sql, (last_id, current_id, limit))
|
||||
updated = txn.fetchall()
|
||||
|
||||
sql = (
|
||||
"SELECT stream_id, user_id, app_id, pushkey"
|
||||
" FROM deleted_pushers"
|
||||
" WHERE ? < stream_id AND stream_id <= ?"
|
||||
" ORDER BY stream_id ASC LIMIT ?"
|
||||
)
|
||||
txn.execute(sql, (last_id, current_id, limit))
|
||||
deleted = txn.fetchall()
|
||||
|
||||
return (updated, deleted)
|
||||
return self.runInteraction(
|
||||
"get_all_updated_pushers", get_all_updated_pushers_txn
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def add_pusher(self, user_id, access_token, kind, app_id,
|
||||
app_display_name, device_display_name,
|
||||
pushkey, pushkey_ts, lang, data, profile_tag=""):
|
||||
try:
|
||||
next_id = self._pushers_id_gen.get_next()
|
||||
with self._pushers_id_gen.get_next() as stream_id:
|
||||
yield self._simple_upsert(
|
||||
"pushers",
|
||||
dict(
|
||||
|
@ -101,23 +128,29 @@ class PusherStore(SQLBaseStore):
|
|||
lang=lang,
|
||||
data=encode_canonical_json(data),
|
||||
profile_tag=profile_tag,
|
||||
),
|
||||
insertion_values=dict(
|
||||
id=next_id,
|
||||
id=stream_id,
|
||||
),
|
||||
desc="add_pusher",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("create_pusher with failed: %s", e)
|
||||
raise StoreError(500, "Problem creating pusher.")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def delete_pusher_by_app_id_pushkey_user_id(self, app_id, pushkey, user_id):
|
||||
yield self._simple_delete_one(
|
||||
"pushers",
|
||||
{"app_id": app_id, "pushkey": pushkey, 'user_name': user_id},
|
||||
desc="delete_pusher_by_app_id_pushkey_user_id",
|
||||
)
|
||||
def delete_pusher_txn(txn, stream_id):
|
||||
self._simple_delete_one_txn(
|
||||
txn,
|
||||
"pushers",
|
||||
{"app_id": app_id, "pushkey": pushkey, "user_name": user_id}
|
||||
)
|
||||
self._simple_upsert_txn(
|
||||
txn,
|
||||
"deleted_pushers",
|
||||
{"app_id": app_id, "pushkey": pushkey, "user_id": user_id},
|
||||
{"stream_id": stream_id},
|
||||
)
|
||||
with self._pushers_id_gen.get_next() as stream_id:
|
||||
yield self.runInteraction(
|
||||
"delete_pusher", delete_pusher_txn, stream_id
|
||||
)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def update_pusher_last_token(self, app_id, pushkey, user_id, last_token):
|
||||
|
|
25
synapse/storage/schema/delta/30/deleted_pushers.sql
Normal file
25
synapse/storage/schema/delta/30/deleted_pushers.sql
Normal file
|
@ -0,0 +1,25 @@
|
|||
/* Copyright 2016 OpenMarket Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
CREATE TABLE IF NOT EXISTS deleted_pushers(
|
||||
stream_id BIGINT NOT NULL,
|
||||
app_id TEXT NOT NULL,
|
||||
pushkey TEXT NOT NULL,
|
||||
user_id TEXT NOT NULL,
|
||||
/* We only track the most recent delete for each app_id, pushkey and user_id. */
|
||||
UNIQUE (app_id, pushkey, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id);
|
|
@ -49,9 +49,14 @@ class StreamIdGenerator(object):
|
|||
with stream_id_gen.get_next() as stream_id:
|
||||
# ... persist event ...
|
||||
"""
|
||||
def __init__(self, db_conn, table, column):
|
||||
def __init__(self, db_conn, table, column, extra_tables=[]):
|
||||
self._lock = threading.Lock()
|
||||
self._current_max = _load_max_id(db_conn, table, column)
|
||||
for table, column in extra_tables:
|
||||
self._current_max = max(
|
||||
self._current_max,
|
||||
_load_max_id(db_conn, table, column)
|
||||
)
|
||||
self._unfinished_ids = deque()
|
||||
|
||||
def get_next(self):
|
||||
|
|
|
@ -131,6 +131,7 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||
test_timeout_tag_account_data = _test_timeout("tag_account_data")
|
||||
test_timeout_backfill = _test_timeout("backfill")
|
||||
test_timeout_push_rules = _test_timeout("push_rules")
|
||||
test_timeout_pushers = _test_timeout("pushers")
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def send_text_message(self, room_id, message):
|
||||
|
|
Loading…
Reference in a new issue