Merge branch 'release-v1.4.0' of github.com:matrix-org/synapse into develop

This commit is contained in:
Erik Johnston 2019-10-02 11:08:07 +01:00
commit ecd254bc49
11 changed files with 193 additions and 19 deletions

1
changelog.d/6141.bugfix Normal file
View file

@ -0,0 +1 @@
Fix bad performance of censoring redactions background task.

1
changelog.d/6145.bugfix Normal file
View file

@ -0,0 +1 @@
Fix fetching censored redactions from DB, which caused APIs like initial sync to fail if it tried to include the censored redaction.

1
changelog.d/6146.bugfix Normal file
View file

@ -0,0 +1 @@
Fix exceptions when storing large retry intervals for down remote servers.

View file

@ -1389,6 +1389,18 @@ class EventsStore(
], ],
) )
for event, _ in events_and_contexts:
if not event.internal_metadata.is_redacted():
# If we're persisting an unredacted event we go and ensure
# that we mark any redactions that reference this event as
# requiring censoring.
self._simple_update_txn(
txn,
table="redactions",
keyvalues={"redacts": event.event_id},
updatevalues={"have_censored": False},
)
def _store_rejected_events_txn(self, txn, events_and_contexts): def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were """Add rows to the 'rejections' table for received events which were
rejected rejected
@ -1552,9 +1564,15 @@ class EventsStore(
def _store_redaction(self, txn, event): def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event # invalidate the cache for the redacted event
txn.call_after(self._invalidate_get_event_cache, event.redacts) txn.call_after(self._invalidate_get_event_cache, event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)", self._simple_insert_txn(
(event.event_id, event.redacts), txn,
table="redactions",
values={
"event_id": event.event_id,
"redacts": event.redacts,
"received_ts": self._clock.time_msec(),
},
) )
@defer.inlineCallbacks @defer.inlineCallbacks
@ -1571,36 +1589,29 @@ class EventsStore(
if self.hs.config.redaction_retention_period is None: if self.hs.config.redaction_retention_period is None:
return return
max_pos = yield self.find_first_stream_ordering_after_ts( before_ts = self._clock.time_msec() - self.hs.config.redaction_retention_period
self._clock.time_msec() - self.hs.config.redaction_retention_period
)
# We fetch all redactions that: # We fetch all redactions that:
# 1. point to an event we have, # 1. point to an event we have,
# 2. has a stream ordering from before the cut off, and # 2. has a received_ts from before the cut off, and
# 3. we haven't yet censored. # 3. we haven't yet censored.
# #
# This is limited to 100 events to ensure that we don't try and do too # This is limited to 100 events to ensure that we don't try and do too
# much at once. We'll get called again so this should eventually catch # much at once. We'll get called again so this should eventually catch
# up. # up.
#
# We use the range [-max_pos, max_pos] to handle backfilled events,
# which are given negative stream ordering.
sql = """ sql = """
SELECT redact_event.event_id, redacts FROM redactions SELECT redactions.event_id, redacts FROM redactions
INNER JOIN events AS redact_event USING (event_id) LEFT JOIN events AS original_event ON (
INNER JOIN events AS original_event ON ( redacts = original_event.event_id
redact_event.room_id = original_event.room_id
AND redacts = original_event.event_id
) )
WHERE NOT have_censored WHERE NOT have_censored
AND ? <= redact_event.stream_ordering AND redact_event.stream_ordering <= ? AND redactions.received_ts <= ?
ORDER BY redact_event.stream_ordering ASC ORDER BY redactions.received_ts ASC
LIMIT ? LIMIT ?
""" """
rows = yield self._execute( rows = yield self._execute(
"_censor_redactions_fetch", None, sql, -max_pos, max_pos, 100 "_censor_redactions_fetch", None, sql, before_ts, 100
) )
updates = [] updates = []

View file

@ -67,6 +67,10 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update
) )
self.register_background_update_handler(
"redactions_received_ts", self._redactions_received_ts
)
@defer.inlineCallbacks @defer.inlineCallbacks
def _background_reindex_fields_sender(self, progress, batch_size): def _background_reindex_fields_sender(self, progress, batch_size):
target_min_stream_id = progress["target_min_stream_id_inclusive"] target_min_stream_id = progress["target_min_stream_id_inclusive"]
@ -397,3 +401,60 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
) )
return num_handled return num_handled
@defer.inlineCallbacks
def _redactions_received_ts(self, progress, batch_size):
"""Handles filling out the `received_ts` column in redactions.
"""
last_event_id = progress.get("last_event_id", "")
def _redactions_received_ts_txn(txn):
# Fetch the set of event IDs that we want to update
sql = """
SELECT event_id FROM redactions
WHERE event_id > ?
ORDER BY event_id ASC
LIMIT ?
"""
txn.execute(sql, (last_event_id, batch_size))
rows = txn.fetchall()
if not rows:
return 0
upper_event_id, = rows[-1]
# Update the redactions with the received_ts.
#
# Note: Not all events have an associated received_ts, so we
# fallback to using origin_server_ts. If we for some reason don't
# have an origin_server_ts, lets just use the current timestamp.
#
# We don't want to leave it null, as then we'll never try and
# censor those redactions.
sql = """
UPDATE redactions
SET received_ts = (
SELECT COALESCE(received_ts, origin_server_ts, ?) FROM events
WHERE events.event_id = redactions.event_id
)
WHERE ? <= event_id AND event_id <= ?
"""
txn.execute(sql, (self._clock.time_msec(), last_event_id, upper_event_id))
self._background_update_progress_txn(
txn, "redactions_received_ts", {"last_event_id": upper_event_id}
)
return len(rows)
count = yield self.runInteraction(
"_redactions_received_ts", _redactions_received_ts_txn
)
if not count:
yield self._end_background_update("redactions_received_ts")
return count

View file

@ -238,6 +238,20 @@ class EventsWorkerStore(SQLBaseStore):
# we have to recheck auth now. # we have to recheck auth now.
if not allow_rejected and entry.event.type == EventTypes.Redaction: if not allow_rejected and entry.event.type == EventTypes.Redaction:
if not hasattr(entry.event, "redacts"):
# A redacted redaction doesn't have a `redacts` key, in
# which case lets just withhold the event.
#
# Note: Most of the time if the redactions has been
# redacted we still have the un-redacted event in the DB
# and so we'll still see the `redacts` key. However, this
# isn't always true e.g. if we have censored the event.
logger.debug(
"Withholding redaction event %s as we don't have redacts key",
event_id,
)
continue
redacted_event_id = entry.event.redacts redacted_event_id = entry.event.redacts
event_map = yield self._get_events_from_cache_or_db([redacted_event_id]) event_map = yield self._get_events_from_cache_or_db([redacted_event_id])
original_event_entry = event_map.get(redacted_event_id) original_event_entry = event_map.get(redacted_event_id)

View file

@ -0,0 +1,18 @@
/* Copyright 2019 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-- We want to store large retry intervals so we upgrade the column from INT
-- to BIGINT. We don't need to do this on SQLite.
ALTER TABLE destinations ALTER retry_interval SET DATA TYPE BIGINT;

View file

@ -0,0 +1,20 @@
/* Copyright 2019 The Matrix.org Foundation C.I.C.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ALTER TABLE redactions ADD COLUMN received_ts BIGINT;
CREATE INDEX redactions_have_censored_ts ON redactions(received_ts) WHERE not have_censored;
INSERT INTO background_updates (update_name, progress_json) VALUES
('redactions_received_ts', '{}');

View file

@ -29,7 +29,7 @@ MIN_RETRY_INTERVAL = 10 * 60 * 1000
RETRY_MULTIPLIER = 5 RETRY_MULTIPLIER = 5
# a cap on the backoff. (Essentially none) # a cap on the backoff. (Essentially none)
MAX_RETRY_INTERVAL = 2 ** 63 MAX_RETRY_INTERVAL = 2 ** 62
class NotRetryingDestination(Exception): class NotRetryingDestination(Exception):

View file

@ -118,6 +118,8 @@ class RedactionTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.persist_event(event, context)) self.get_success(self.store.persist_event(event, context))
return event
def test_redact(self): def test_redact(self):
self.get_success( self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
@ -361,3 +363,37 @@ class RedactionTestCase(unittest.HomeserverTestCase):
) )
self.assert_dict({"content": {}}, json.loads(event_json)) self.assert_dict({"content": {}}, json.loads(event_json))
def test_redact_redaction(self):
"""Tests that we can redact a redaction and can fetch it again.
"""
self.get_success(
self.inject_room_member(self.room1, self.u_alice, Membership.JOIN)
)
msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t"))
first_redact_event = self.get_success(
self.inject_redaction(
self.room1, msg_event.event_id, self.u_alice, "Redacting message"
)
)
self.get_success(
self.inject_redaction(
self.room1,
first_redact_event.event_id,
self.u_alice,
"Redacting redaction",
)
)
# Now lets jump to the future where we have censored the redaction event
# in the DB.
self.reactor.advance(60 * 60 * 24 * 31)
# We just want to check that fetching the event doesn't raise an exception.
self.get_success(
self.store.get_event(first_redact_event.event_id, allow_none=True)
)

View file

@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from synapse.util.retryutils import MAX_RETRY_INTERVAL
from tests.unittest import HomeserverTestCase from tests.unittest import HomeserverTestCase
@ -45,3 +47,12 @@ class TransactionStoreTestCase(HomeserverTestCase):
""" """
d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100) d = self.store.set_destination_retry_timings("example.com", 1000, 50, 100)
self.get_success(d) self.get_success(d)
def test_large_destination_retry(self):
d = self.store.set_destination_retry_timings(
"example.com", MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL, MAX_RETRY_INTERVAL
)
self.get_success(d)
d = self.store.get_destination_retry_timings("example.com")
self.get_success(d)