forked from MirrorHub/synapse
Fix instantiation of message retention purge jobs
When figuring out which topological token to start a purge job at, we need to do the following: 1. Figure out a timestamp before which events will be purged 2. Select the first stream ordering after that timestamp 3. Select info about the first event after that stream ordering 4. Build a topological token from that info In some situations (e.g. quiet rooms with a short max_lifetime), there might not be an event after the stream ordering at step 3, therefore we abort the purge with the error `No event found`. To mitigate that, this patch fetches the first event _before_ the stream ordering, instead of after.
This commit is contained in:
parent
b5ce7f5874
commit
855af069a4
2 changed files with 48 additions and 13 deletions
|
@ -156,7 +156,7 @@ class PaginationHandler(object):
|
||||||
|
|
||||||
stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts)
|
stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts)
|
||||||
|
|
||||||
r = yield self.store.get_room_event_after_stream_ordering(
|
r = yield self.store.get_room_event_before_stream_ordering(
|
||||||
room_id, stream_ordering,
|
room_id, stream_ordering,
|
||||||
)
|
)
|
||||||
if not r:
|
if not r:
|
||||||
|
|
|
@ -536,21 +536,56 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||||
Deferred[(int, int, str)]:
|
Deferred[(int, int, str)]:
|
||||||
(stream ordering, topological ordering, event_id)
|
(stream ordering, topological ordering, event_id)
|
||||||
"""
|
"""
|
||||||
|
return self.db.runInteraction(
|
||||||
|
"get_room_event_after_stream_ordering",
|
||||||
|
self.get_room_event_around_stream_ordering_txn,
|
||||||
|
room_id, stream_ordering, "f",
|
||||||
|
)
|
||||||
|
|
||||||
def _f(txn):
|
def get_room_event_before_stream_ordering(self, room_id, stream_ordering):
|
||||||
|
"""Gets details of the first event in a room at or before a stream ordering
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str):
|
||||||
|
stream_ordering (int):
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[(int, int, str)]:
|
||||||
|
(stream ordering, topological ordering, event_id)
|
||||||
|
"""
|
||||||
|
return self.db.runInteraction(
|
||||||
|
"get_room_event_before_stream_ordering",
|
||||||
|
self.get_room_event_around_stream_ordering_txn,
|
||||||
|
room_id, stream_ordering, "f",
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_room_event_around_stream_ordering_txn(
|
||||||
|
self, txn, room_id, stream_ordering, dir="f"
|
||||||
|
):
|
||||||
|
"""Gets details of the first event in a room at or either after or before a
|
||||||
|
stream ordering, depending on the provided direction.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str):
|
||||||
|
stream_ordering (int):
|
||||||
|
dir (str): Direction in which we're looking towards in the room's history,
|
||||||
|
either "f" (forward) or "b" (backward).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[(int, int, str)]:
|
||||||
|
(stream ordering, topological ordering, event_id)
|
||||||
|
"""
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT stream_ordering, topological_ordering, event_id"
|
"SELECT stream_ordering, topological_ordering, event_id"
|
||||||
" FROM events"
|
" FROM events"
|
||||||
" WHERE room_id = ? AND stream_ordering >= ?"
|
" WHERE room_id = ? AND stream_ordering %s ?"
|
||||||
" AND NOT outlier"
|
" AND NOT outlier"
|
||||||
" ORDER BY stream_ordering"
|
" ORDER BY stream_ordering"
|
||||||
" LIMIT 1"
|
" LIMIT 1"
|
||||||
)
|
) % ("<=" if dir == "b" else ">=",)
|
||||||
txn.execute(sql, (room_id, stream_ordering))
|
txn.execute(sql, (room_id, stream_ordering))
|
||||||
return txn.fetchone()
|
return txn.fetchone()
|
||||||
|
|
||||||
return self.db.runInteraction("get_room_event_after_stream_ordering", _f)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_room_events_max_id(self, room_id=None):
|
def get_room_events_max_id(self, room_id=None):
|
||||||
"""Returns the current token for rooms stream.
|
"""Returns the current token for rooms stream.
|
||||||
|
|
Loading…
Reference in a new issue