Don't needlessly batch in add_event_to_cache (#10784)

We've already batched up the events previously, and assume in other
places in the events.py file that we have. Removing this makes it easier
to adjust the batch sizes in one place.
This commit is contained in:
Erik Johnston 2021-09-10 10:16:52 +01:00 committed by GitHub
parent 273b6861f2
commit 7f0565e029
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 24 additions and 26 deletions

1
changelog.d/10784.misc Normal file
View file

@ -0,0 +1 @@
Minor speed ups when joining large rooms over federation.

View file

@ -1547,35 +1547,32 @@ class PersistEventsStore:
to_prefill = []
rows = []
N = 200
for i in range(0, len(events_and_contexts), N):
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
if not ev_map:
break
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE "
)
ev_map = {e.event_id: e for e, _ in events_and_contexts}
if not ev_map:
return
clause, args = make_in_list_sql_clause(
self.database_engine, "e.event_id", list(ev_map)
)
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE "
)
txn.execute(sql + clause, args)
rows = self.db_pool.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(
_EventCacheEntry(event=event, redacted_event=None)
)
clause, args = make_in_list_sql_clause(
self.database_engine, "e.event_id", list(ev_map)
)
txn.execute(sql + clause, args)
rows = self.db_pool.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(_EventCacheEntry(event=event, redacted_event=None))
def prefill():
for cache_entry in to_prefill: