forked from MirrorHub/synapse
Split state groups into a separate data store (#6296)
This commit is contained in:
parent
fa780e9721
commit
75d8f26ac8
28 changed files with 1159 additions and 1168 deletions
1
changelog.d/6245.misc
Normal file
1
changelog.d/6245.misc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Split out state storage into separate data store.
|
|
@ -51,11 +51,12 @@ from synapse.storage.data_stores.main.registration import (
|
||||||
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
|
from synapse.storage.data_stores.main.room import RoomBackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
|
from synapse.storage.data_stores.main.roommember import RoomMemberBackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
|
from synapse.storage.data_stores.main.search import SearchBackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.state import StateBackgroundUpdateStore
|
from synapse.storage.data_stores.main.state import MainStateBackgroundUpdateStore
|
||||||
from synapse.storage.data_stores.main.stats import StatsStore
|
from synapse.storage.data_stores.main.stats import StatsStore
|
||||||
from synapse.storage.data_stores.main.user_directory import (
|
from synapse.storage.data_stores.main.user_directory import (
|
||||||
UserDirectoryBackgroundUpdateStore,
|
UserDirectoryBackgroundUpdateStore,
|
||||||
)
|
)
|
||||||
|
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
|
||||||
from synapse.storage.database import Database, make_conn
|
from synapse.storage.database import Database, make_conn
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.prepare_database import prepare_database
|
from synapse.storage.prepare_database import prepare_database
|
||||||
|
@ -138,6 +139,7 @@ class Store(
|
||||||
RoomMemberBackgroundUpdateStore,
|
RoomMemberBackgroundUpdateStore,
|
||||||
SearchBackgroundUpdateStore,
|
SearchBackgroundUpdateStore,
|
||||||
StateBackgroundUpdateStore,
|
StateBackgroundUpdateStore,
|
||||||
|
MainStateBackgroundUpdateStore,
|
||||||
UserDirectoryBackgroundUpdateStore,
|
UserDirectoryBackgroundUpdateStore,
|
||||||
StatsStore,
|
StatsStore,
|
||||||
):
|
):
|
||||||
|
@ -496,9 +498,7 @@ class Porter(object):
|
||||||
def run(self):
|
def run(self):
|
||||||
try:
|
try:
|
||||||
self.sqlite_store = yield self.build_db_store(
|
self.sqlite_store = yield self.build_db_store(
|
||||||
DatabaseConnectionConfig(
|
DatabaseConnectionConfig("master-sqlite", self.sqlite_config)
|
||||||
"master", self.sqlite_config, data_stores=["main"]
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if all background updates are done, abort if not.
|
# Check if all background updates are done, abort if not.
|
||||||
|
|
|
@ -34,10 +34,12 @@ class DatabaseConnectionConfig:
|
||||||
module name, and `args` for the args to give to the database
|
module name, and `args` for the args to give to the database
|
||||||
connector.
|
connector.
|
||||||
data_stores: The list of data stores that should be provisioned on the
|
data_stores: The list of data stores that should be provisioned on the
|
||||||
database.
|
database. Defaults to all data stores.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name: str, db_config: dict, data_stores: List[str]):
|
def __init__(
|
||||||
|
self, name: str, db_config: dict, data_stores: List[str] = ["main", "state"]
|
||||||
|
):
|
||||||
if db_config["name"] not in ("sqlite3", "psycopg2"):
|
if db_config["name"] not in ("sqlite3", "psycopg2"):
|
||||||
raise ConfigError("Unsupported database type %r" % (db_config["name"],))
|
raise ConfigError("Unsupported database type %r" % (db_config["name"],))
|
||||||
|
|
||||||
|
@ -62,9 +64,7 @@ class DatabaseConfig(Config):
|
||||||
if database_config is None:
|
if database_config is None:
|
||||||
database_config = {"name": "sqlite3", "args": {}}
|
database_config = {"name": "sqlite3", "args": {}}
|
||||||
|
|
||||||
self.databases = [
|
self.databases = [DatabaseConnectionConfig("master", database_config)]
|
||||||
DatabaseConnectionConfig("master", database_config, data_stores=["main"])
|
|
||||||
]
|
|
||||||
|
|
||||||
self.set_databasepath(config.get("database_path"))
|
self.set_databasepath(config.get("database_path"))
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from synapse.storage.data_stores.state import StateGroupDataStore
|
||||||
from synapse.storage.database import Database, make_conn
|
from synapse.storage.database import Database, make_conn
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
from synapse.storage.prepare_database import prepare_database
|
from synapse.storage.prepare_database import prepare_database
|
||||||
|
@ -55,6 +56,10 @@ class DataStores(object):
|
||||||
logger.info("Starting 'main' data store")
|
logger.info("Starting 'main' data store")
|
||||||
self.main = main_store_class(database, db_conn, hs)
|
self.main = main_store_class(database, db_conn, hs)
|
||||||
|
|
||||||
|
if "state" in database_config.data_stores:
|
||||||
|
logger.info("Starting 'state' data store")
|
||||||
|
self.state = StateGroupDataStore(database, db_conn, hs)
|
||||||
|
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
|
|
||||||
self.databases.append(database)
|
self.databases.append(database)
|
||||||
|
|
|
@ -1757,163 +1757,6 @@ class EventsStore(
|
||||||
|
|
||||||
return state_groups
|
return state_groups
|
||||||
|
|
||||||
def purge_unreferenced_state_groups(
|
|
||||||
self, room_id: str, state_groups_to_delete
|
|
||||||
) -> defer.Deferred:
|
|
||||||
"""Deletes no longer referenced state groups and de-deltas any state
|
|
||||||
groups that reference them.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id: The room the state groups belong to (must all be in the
|
|
||||||
same room).
|
|
||||||
state_groups_to_delete (Collection[int]): Set of all state groups
|
|
||||||
to delete.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self.db.runInteraction(
|
|
||||||
"purge_unreferenced_state_groups",
|
|
||||||
self._purge_unreferenced_state_groups,
|
|
||||||
room_id,
|
|
||||||
state_groups_to_delete,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
|
|
||||||
logger.info(
|
|
||||||
"[purge] found %i state groups to delete", len(state_groups_to_delete)
|
|
||||||
)
|
|
||||||
|
|
||||||
rows = self.db.simple_select_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
column="prev_state_group",
|
|
||||||
iterable=state_groups_to_delete,
|
|
||||||
keyvalues={},
|
|
||||||
retcols=("state_group",),
|
|
||||||
)
|
|
||||||
|
|
||||||
remaining_state_groups = set(
|
|
||||||
row["state_group"]
|
|
||||||
for row in rows
|
|
||||||
if row["state_group"] not in state_groups_to_delete
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
"[purge] de-delta-ing %i remaining state groups",
|
|
||||||
len(remaining_state_groups),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now we turn the state groups that reference to-be-deleted state
|
|
||||||
# groups to non delta versions.
|
|
||||||
for sg in remaining_state_groups:
|
|
||||||
logger.info("[purge] de-delta-ing remaining state group %s", sg)
|
|
||||||
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
|
|
||||||
curr_state = curr_state[sg]
|
|
||||||
|
|
||||||
self.db.simple_delete_txn(
|
|
||||||
txn, table="state_groups_state", keyvalues={"state_group": sg}
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_delete_txn(
|
|
||||||
txn, table="state_group_edges", keyvalues={"state_group": sg}
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_insert_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
values=[
|
|
||||||
{
|
|
||||||
"state_group": sg,
|
|
||||||
"room_id": room_id,
|
|
||||||
"type": key[0],
|
|
||||||
"state_key": key[1],
|
|
||||||
"event_id": state_id,
|
|
||||||
}
|
|
||||||
for key, state_id in iteritems(curr_state)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("[purge] removing redundant state groups")
|
|
||||||
txn.executemany(
|
|
||||||
"DELETE FROM state_groups_state WHERE state_group = ?",
|
|
||||||
((sg,) for sg in state_groups_to_delete),
|
|
||||||
)
|
|
||||||
txn.executemany(
|
|
||||||
"DELETE FROM state_groups WHERE id = ?",
|
|
||||||
((sg,) for sg in state_groups_to_delete),
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_previous_state_groups(self, state_groups):
|
|
||||||
"""Fetch the previous groups of the given state groups.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
state_groups (Iterable[int])
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[dict[int, int]]: mapping from state group to previous
|
|
||||||
state group.
|
|
||||||
"""
|
|
||||||
|
|
||||||
rows = yield self.db.simple_select_many_batch(
|
|
||||||
table="state_group_edges",
|
|
||||||
column="prev_state_group",
|
|
||||||
iterable=state_groups,
|
|
||||||
keyvalues={},
|
|
||||||
retcols=("prev_state_group", "state_group"),
|
|
||||||
desc="get_previous_state_groups",
|
|
||||||
)
|
|
||||||
|
|
||||||
return {row["state_group"]: row["prev_state_group"] for row in rows}
|
|
||||||
|
|
||||||
def purge_room_state(self, room_id, state_groups_to_delete):
|
|
||||||
"""Deletes all record of a room from state tables
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str):
|
|
||||||
state_groups_to_delete (list[int]): State groups to delete
|
|
||||||
"""
|
|
||||||
|
|
||||||
return self.db.runInteraction(
|
|
||||||
"purge_room_state",
|
|
||||||
self._purge_room_state_txn,
|
|
||||||
room_id,
|
|
||||||
state_groups_to_delete,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
|
|
||||||
# first we have to delete the state groups states
|
|
||||||
logger.info("[purge] removing %s from state_groups_state", room_id)
|
|
||||||
|
|
||||||
self.db.simple_delete_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
column="state_group",
|
|
||||||
iterable=state_groups_to_delete,
|
|
||||||
keyvalues={},
|
|
||||||
)
|
|
||||||
|
|
||||||
# ... and the state group edges
|
|
||||||
logger.info("[purge] removing %s from state_group_edges", room_id)
|
|
||||||
|
|
||||||
self.db.simple_delete_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
column="state_group",
|
|
||||||
iterable=state_groups_to_delete,
|
|
||||||
keyvalues={},
|
|
||||||
)
|
|
||||||
|
|
||||||
# ... and the state groups
|
|
||||||
logger.info("[purge] removing %s from state_groups", room_id)
|
|
||||||
|
|
||||||
self.db.simple_delete_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups",
|
|
||||||
column="id",
|
|
||||||
iterable=state_groups_to_delete,
|
|
||||||
keyvalues={},
|
|
||||||
)
|
|
||||||
|
|
||||||
async def is_event_after(self, event_id1, event_id2):
|
async def is_event_after(self, event_id1, event_id2):
|
||||||
"""Returns True if event_id1 is after event_id2 in the stream
|
"""Returns True if event_id1 is after event_id2 in the stream
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -20,7 +20,6 @@ DROP INDEX IF EXISTS events_room_id; -- Prefix of events_room_stream
|
||||||
DROP INDEX IF EXISTS events_order; -- Prefix of events_order_topo_stream_room
|
DROP INDEX IF EXISTS events_order; -- Prefix of events_order_topo_stream_room
|
||||||
DROP INDEX IF EXISTS events_topological_ordering; -- Prefix of events_order_topo_stream_room
|
DROP INDEX IF EXISTS events_topological_ordering; -- Prefix of events_order_topo_stream_room
|
||||||
DROP INDEX IF EXISTS events_stream_ordering; -- Duplicate of PRIMARY KEY
|
DROP INDEX IF EXISTS events_stream_ordering; -- Duplicate of PRIMARY KEY
|
||||||
DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY
|
|
||||||
DROP INDEX IF EXISTS event_to_state_groups_id; -- Duplicate of PRIMARY KEY
|
DROP INDEX IF EXISTS event_to_state_groups_id; -- Duplicate of PRIMARY KEY
|
||||||
DROP INDEX IF EXISTS event_push_actions_room_id_event_id_user_id_profile_tag; -- Duplicate of UNIQUE CONSTRAINT
|
DROP INDEX IF EXISTS event_push_actions_room_id_event_id_user_id_profile_tag; -- Duplicate of UNIQUE CONSTRAINT
|
||||||
|
|
||||||
|
|
|
@ -975,40 +975,6 @@ CREATE TABLE state_events (
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE state_group_edges (
|
|
||||||
state_group bigint NOT NULL,
|
|
||||||
prev_state_group bigint NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE SEQUENCE state_group_id_seq
|
|
||||||
START WITH 1
|
|
||||||
INCREMENT BY 1
|
|
||||||
NO MINVALUE
|
|
||||||
NO MAXVALUE
|
|
||||||
CACHE 1;
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE state_groups (
|
|
||||||
id bigint NOT NULL,
|
|
||||||
room_id text NOT NULL,
|
|
||||||
event_id text NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE state_groups_state (
|
|
||||||
state_group bigint NOT NULL,
|
|
||||||
room_id text NOT NULL,
|
|
||||||
type text NOT NULL,
|
|
||||||
state_key text NOT NULL,
|
|
||||||
event_id text NOT NULL
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE TABLE stats_stream_pos (
|
CREATE TABLE stats_stream_pos (
|
||||||
lock character(1) DEFAULT 'X'::bpchar NOT NULL,
|
lock character(1) DEFAULT 'X'::bpchar NOT NULL,
|
||||||
stream_id bigint,
|
stream_id bigint,
|
||||||
|
@ -1482,12 +1448,6 @@ ALTER TABLE ONLY state_events
|
||||||
ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
|
ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE ONLY state_groups
|
|
||||||
ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
ALTER TABLE ONLY stats_stream_pos
|
ALTER TABLE ONLY stats_stream_pos
|
||||||
ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock);
|
ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock);
|
||||||
|
|
||||||
|
@ -1928,18 +1888,6 @@ CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE INDEX state_group_edges_idx ON state_group_edges USING btree (state_group);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
|
CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering);
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,8 +42,6 @@ CREATE INDEX ev_edges_id ON event_edges(event_id);
|
||||||
CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
|
CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id);
|
||||||
CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
|
CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) );
|
||||||
CREATE INDEX room_depth_room ON room_depth(room_id);
|
CREATE INDEX room_depth_room ON room_depth(room_id);
|
||||||
CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
|
|
||||||
CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL );
|
|
||||||
CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
|
CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) );
|
||||||
CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) );
|
CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) );
|
||||||
CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
|
CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) );
|
||||||
|
@ -120,9 +118,6 @@ CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL );
|
||||||
CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT);
|
CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT);
|
||||||
CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id );
|
CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id );
|
||||||
CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id );
|
CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id );
|
||||||
CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL );
|
|
||||||
CREATE INDEX state_group_edges_idx ON state_group_edges(state_group);
|
|
||||||
CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group);
|
|
||||||
CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
|
CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL );
|
||||||
CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
|
CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering );
|
||||||
CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
|
CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering );
|
||||||
|
@ -254,6 +249,5 @@ CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen);
|
||||||
CREATE INDEX users_creation_ts ON users (creation_ts);
|
CREATE INDEX users_creation_ts ON users (creation_ts);
|
||||||
CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
|
CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group);
|
||||||
CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
|
CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id);
|
||||||
CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key);
|
|
||||||
CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
|
CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id);
|
||||||
CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
|
CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip);
|
||||||
|
|
|
@ -17,8 +17,7 @@ import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
from typing import Iterable, Tuple
|
from typing import Iterable, Tuple
|
||||||
|
|
||||||
from six import iteritems, itervalues
|
from six import iteritems
|
||||||
from six.moves import range
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
@ -29,11 +28,9 @@ from synapse.events.snapshot import EventContext
|
||||||
from synapse.storage._base import SQLBaseStore
|
from synapse.storage._base import SQLBaseStore
|
||||||
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
|
||||||
from synapse.storage.database import Database
|
from synapse.storage.database import Database
|
||||||
from synapse.storage.engines import PostgresEngine
|
|
||||||
from synapse.storage.state import StateFilter
|
from synapse.storage.state import StateFilter
|
||||||
from synapse.util.caches import get_cache_factor_for, intern_string
|
from synapse.util.caches import intern_string
|
||||||
from synapse.util.caches.descriptors import cached, cachedList
|
from synapse.util.caches.descriptors import cached, cachedList
|
||||||
from synapse.util.caches.dictionary_cache import DictionaryCache
|
|
||||||
from synapse.util.stringutils import to_ascii
|
from synapse.util.stringutils import to_ascii
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -55,207 +52,14 @@ class _GetStateGroupDelta(
|
||||||
return len(self.delta_ids) if self.delta_ids else 0
|
return len(self.delta_ids) if self.delta_ids else 0
|
||||||
|
|
||||||
|
|
||||||
class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
|
||||||
"""Defines functions related to state groups needed to run the state backgroud
|
|
||||||
updates.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _count_state_group_hops_txn(self, txn, state_group):
|
|
||||||
"""Given a state group, count how many hops there are in the tree.
|
|
||||||
|
|
||||||
This is used to ensure the delta chains don't get too long.
|
|
||||||
"""
|
|
||||||
if isinstance(self.database_engine, PostgresEngine):
|
|
||||||
sql = """
|
|
||||||
WITH RECURSIVE state(state_group) AS (
|
|
||||||
VALUES(?::bigint)
|
|
||||||
UNION ALL
|
|
||||||
SELECT prev_state_group FROM state_group_edges e, state s
|
|
||||||
WHERE s.state_group = e.state_group
|
|
||||||
)
|
|
||||||
SELECT count(*) FROM state;
|
|
||||||
"""
|
|
||||||
|
|
||||||
txn.execute(sql, (state_group,))
|
|
||||||
row = txn.fetchone()
|
|
||||||
if row and row[0]:
|
|
||||||
return row[0]
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
else:
|
|
||||||
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
||||||
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
|
||||||
next_group = state_group
|
|
||||||
count = 0
|
|
||||||
|
|
||||||
while next_group:
|
|
||||||
next_group = self.db.simple_select_one_onecol_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
keyvalues={"state_group": next_group},
|
|
||||||
retcol="prev_state_group",
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
if next_group:
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
return count
|
|
||||||
|
|
||||||
def _get_state_groups_from_groups_txn(
|
|
||||||
self, txn, groups, state_filter=StateFilter.all()
|
|
||||||
):
|
|
||||||
results = {group: {} for group in groups}
|
|
||||||
|
|
||||||
where_clause, where_args = state_filter.make_sql_filter_clause()
|
|
||||||
|
|
||||||
# Unless the filter clause is empty, we're going to append it after an
|
|
||||||
# existing where clause
|
|
||||||
if where_clause:
|
|
||||||
where_clause = " AND (%s)" % (where_clause,)
|
|
||||||
|
|
||||||
if isinstance(self.database_engine, PostgresEngine):
|
|
||||||
# Temporarily disable sequential scans in this transaction. This is
|
|
||||||
# a temporary hack until we can add the right indices in
|
|
||||||
txn.execute("SET LOCAL enable_seqscan=off")
|
|
||||||
|
|
||||||
# The below query walks the state_group tree so that the "state"
|
|
||||||
# table includes all state_groups in the tree. It then joins
|
|
||||||
# against `state_groups_state` to fetch the latest state.
|
|
||||||
# It assumes that previous state groups are always numerically
|
|
||||||
# lesser.
|
|
||||||
# The PARTITION is used to get the event_id in the greatest state
|
|
||||||
# group for the given type, state_key.
|
|
||||||
# This may return multiple rows per (type, state_key), but last_value
|
|
||||||
# should be the same.
|
|
||||||
sql = """
|
|
||||||
WITH RECURSIVE state(state_group) AS (
|
|
||||||
VALUES(?::bigint)
|
|
||||||
UNION ALL
|
|
||||||
SELECT prev_state_group FROM state_group_edges e, state s
|
|
||||||
WHERE s.state_group = e.state_group
|
|
||||||
)
|
|
||||||
SELECT DISTINCT type, state_key, last_value(event_id) OVER (
|
|
||||||
PARTITION BY type, state_key ORDER BY state_group ASC
|
|
||||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
|
|
||||||
) AS event_id FROM state_groups_state
|
|
||||||
WHERE state_group IN (
|
|
||||||
SELECT state_group FROM state
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
|
|
||||||
for group in groups:
|
|
||||||
args = [group]
|
|
||||||
args.extend(where_args)
|
|
||||||
|
|
||||||
txn.execute(sql + where_clause, args)
|
|
||||||
for row in txn:
|
|
||||||
typ, state_key, event_id = row
|
|
||||||
key = (typ, state_key)
|
|
||||||
results[group][key] = event_id
|
|
||||||
else:
|
|
||||||
max_entries_returned = state_filter.max_entries_returned()
|
|
||||||
|
|
||||||
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
|
||||||
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
|
||||||
for group in groups:
|
|
||||||
next_group = group
|
|
||||||
|
|
||||||
while next_group:
|
|
||||||
# We did this before by getting the list of group ids, and
|
|
||||||
# then passing that list to sqlite to get latest event for
|
|
||||||
# each (type, state_key). However, that was terribly slow
|
|
||||||
# without the right indices (which we can't add until
|
|
||||||
# after we finish deduping state, which requires this func)
|
|
||||||
args = [next_group]
|
|
||||||
args.extend(where_args)
|
|
||||||
|
|
||||||
txn.execute(
|
|
||||||
"SELECT type, state_key, event_id FROM state_groups_state"
|
|
||||||
" WHERE state_group = ? " + where_clause,
|
|
||||||
args,
|
|
||||||
)
|
|
||||||
results[group].update(
|
|
||||||
((typ, state_key), event_id)
|
|
||||||
for typ, state_key, event_id in txn
|
|
||||||
if (typ, state_key) not in results[group]
|
|
||||||
)
|
|
||||||
|
|
||||||
# If the number of entries in the (type,state_key)->event_id dict
|
|
||||||
# matches the number of (type,state_keys) types we were searching
|
|
||||||
# for, then we must have found them all, so no need to go walk
|
|
||||||
# further down the tree... UNLESS our types filter contained
|
|
||||||
# wildcards (i.e. Nones) in which case we have to do an exhaustive
|
|
||||||
# search
|
|
||||||
if (
|
|
||||||
max_entries_returned is not None
|
|
||||||
and len(results[group]) == max_entries_returned
|
|
||||||
):
|
|
||||||
break
|
|
||||||
|
|
||||||
next_group = self.db.simple_select_one_onecol_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
keyvalues={"state_group": next_group},
|
|
||||||
retcol="prev_state_group",
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
# this inherits from EventsWorkerStore because it calls self.get_events
|
# this inherits from EventsWorkerStore because it calls self.get_events
|
||||||
class StateGroupWorkerStore(
|
class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
|
||||||
EventsWorkerStore, StateGroupBackgroundUpdateStore, SQLBaseStore
|
|
||||||
):
|
|
||||||
"""The parts of StateGroupStore that can be called from workers.
|
"""The parts of StateGroupStore that can be called from workers.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
|
||||||
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
|
||||||
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
|
||||||
|
|
||||||
def __init__(self, database: Database, db_conn, hs):
|
def __init__(self, database: Database, db_conn, hs):
|
||||||
super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
|
super(StateGroupWorkerStore, self).__init__(database, db_conn, hs)
|
||||||
|
|
||||||
# Originally the state store used a single DictionaryCache to cache the
|
|
||||||
# event IDs for the state types in a given state group to avoid hammering
|
|
||||||
# on the state_group* tables.
|
|
||||||
#
|
|
||||||
# The point of using a DictionaryCache is that it can cache a subset
|
|
||||||
# of the state events for a given state group (i.e. a subset of the keys for a
|
|
||||||
# given dict which is an entry in the cache for a given state group ID).
|
|
||||||
#
|
|
||||||
# However, this poses problems when performing complicated queries
|
|
||||||
# on the store - for instance: "give me all the state for this group, but
|
|
||||||
# limit members to this subset of users", as DictionaryCache's API isn't
|
|
||||||
# rich enough to say "please cache any of these fields, apart from this subset".
|
|
||||||
# This is problematic when lazy loading members, which requires this behaviour,
|
|
||||||
# as without it the cache has no choice but to speculatively load all
|
|
||||||
# state events for the group, which negates the efficiency being sought.
|
|
||||||
#
|
|
||||||
# Rather than overcomplicating DictionaryCache's API, we instead split the
|
|
||||||
# state_group_cache into two halves - one for tracking non-member events,
|
|
||||||
# and the other for tracking member_events. This means that lazy loading
|
|
||||||
# queries can be made in a cache-friendly manner by querying both caches
|
|
||||||
# separately and then merging the result. So for the example above, you
|
|
||||||
# would query the members cache for a specific subset of state keys
|
|
||||||
# (which DictionaryCache will handle efficiently and fine) and the non-members
|
|
||||||
# cache for all state (which DictionaryCache will similarly handle fine)
|
|
||||||
# and then just merge the results together.
|
|
||||||
#
|
|
||||||
# We size the non-members cache to be smaller than the members cache as the
|
|
||||||
# vast majority of state in Matrix (today) is member events.
|
|
||||||
|
|
||||||
self._state_group_cache = DictionaryCache(
|
|
||||||
"*stateGroupCache*",
|
|
||||||
# TODO: this hasn't been tuned yet
|
|
||||||
50000 * get_cache_factor_for("stateGroupCache"),
|
|
||||||
)
|
|
||||||
self._state_group_members_cache = DictionaryCache(
|
|
||||||
"*stateGroupMembersCache*",
|
|
||||||
500000 * get_cache_factor_for("stateGroupMembersCache"),
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_room_version(self, room_id):
|
def get_room_version(self, room_id):
|
||||||
"""Get the room_version of a given room
|
"""Get the room_version of a given room
|
||||||
|
@ -431,229 +235,6 @@ class StateGroupWorkerStore(
|
||||||
|
|
||||||
return event.content.get("canonical_alias")
|
return event.content.get("canonical_alias")
|
||||||
|
|
||||||
@cached(max_entries=10000, iterable=True)
|
|
||||||
def get_state_group_delta(self, state_group):
|
|
||||||
"""Given a state group try to return a previous group and a delta between
|
|
||||||
the old and the new.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(prev_group, delta_ids), where both may be None.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _get_state_group_delta_txn(txn):
|
|
||||||
prev_group = self.db.simple_select_one_onecol_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
keyvalues={"state_group": state_group},
|
|
||||||
retcol="prev_state_group",
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if not prev_group:
|
|
||||||
return _GetStateGroupDelta(None, None)
|
|
||||||
|
|
||||||
delta_ids = self.db.simple_select_list_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
keyvalues={"state_group": state_group},
|
|
||||||
retcols=("type", "state_key", "event_id"),
|
|
||||||
)
|
|
||||||
|
|
||||||
return _GetStateGroupDelta(
|
|
||||||
prev_group,
|
|
||||||
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.db.runInteraction(
|
|
||||||
"get_state_group_delta", _get_state_group_delta_txn
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_groups_ids(self, _room_id, event_ids):
|
|
||||||
"""Get the event IDs of all the state for the state groups for the given events
|
|
||||||
|
|
||||||
Args:
|
|
||||||
_room_id (str): id of the room for these events
|
|
||||||
event_ids (iterable[str]): ids of the events
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
|
||||||
"""
|
|
||||||
if not event_ids:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
event_to_groups = yield self._get_state_group_for_events(event_ids)
|
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
|
||||||
group_to_state = yield self._get_state_for_groups(groups)
|
|
||||||
|
|
||||||
return group_to_state
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_ids_for_group(self, state_group):
|
|
||||||
"""Get the event IDs of all the state in the given state group
|
|
||||||
|
|
||||||
Args:
|
|
||||||
state_group (int)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[dict]: Resolves to a map of (type, state_key) -> event_id
|
|
||||||
"""
|
|
||||||
group_to_state = yield self._get_state_for_groups((state_group,))
|
|
||||||
|
|
||||||
return group_to_state[state_group]
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_groups(self, room_id, event_ids):
|
|
||||||
""" Get the state groups for the given list of event_ids
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[dict[int, list[EventBase]]]:
|
|
||||||
dict of state_group_id -> list of state events.
|
|
||||||
"""
|
|
||||||
if not event_ids:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
|
|
||||||
|
|
||||||
state_event_map = yield self.get_events(
|
|
||||||
[
|
|
||||||
ev_id
|
|
||||||
for group_ids in itervalues(group_to_ids)
|
|
||||||
for ev_id in itervalues(group_ids)
|
|
||||||
],
|
|
||||||
get_prev_content=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
return {
|
|
||||||
group: [
|
|
||||||
state_event_map[v]
|
|
||||||
for v in itervalues(event_id_map)
|
|
||||||
if v in state_event_map
|
|
||||||
]
|
|
||||||
for group, event_id_map in iteritems(group_to_ids)
|
|
||||||
}
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _get_state_groups_from_groups(self, groups, state_filter):
|
|
||||||
"""Returns the state groups for a given set of groups, filtering on
|
|
||||||
types of state events.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
groups(list[int]): list of state group IDs to query
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
Returns:
|
|
||||||
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
|
||||||
"""
|
|
||||||
results = {}
|
|
||||||
|
|
||||||
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
|
|
||||||
for chunk in chunks:
|
|
||||||
res = yield self.db.runInteraction(
|
|
||||||
"_get_state_groups_from_groups",
|
|
||||||
self._get_state_groups_from_groups_txn,
|
|
||||||
chunk,
|
|
||||||
state_filter,
|
|
||||||
)
|
|
||||||
results.update(res)
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
|
|
||||||
"""Given a list of event_ids and type tuples, return a list of state
|
|
||||||
dicts for each event.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_ids (list[string])
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
|
|
||||||
"""
|
|
||||||
event_to_groups = yield self._get_state_group_for_events(event_ids)
|
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
|
||||||
group_to_state = yield self._get_state_for_groups(groups, state_filter)
|
|
||||||
|
|
||||||
state_event_map = yield self.get_events(
|
|
||||||
[ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
|
|
||||||
get_prev_content=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
event_to_state = {
|
|
||||||
event_id: {
|
|
||||||
k: state_event_map[v]
|
|
||||||
for k, v in iteritems(group_to_state[group])
|
|
||||||
if v in state_event_map
|
|
||||||
}
|
|
||||||
for event_id, group in iteritems(event_to_groups)
|
|
||||||
}
|
|
||||||
|
|
||||||
return {event: event_to_state[event] for event in event_ids}
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
|
|
||||||
"""
|
|
||||||
Get the state dicts corresponding to a list of events, containing the event_ids
|
|
||||||
of the state events (as opposed to the events themselves)
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_ids(list(str)): events whose state should be returned
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A deferred dict from event_id -> (type, state_key) -> event_id
|
|
||||||
"""
|
|
||||||
event_to_groups = yield self._get_state_group_for_events(event_ids)
|
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
|
||||||
group_to_state = yield self._get_state_for_groups(groups, state_filter)
|
|
||||||
|
|
||||||
event_to_state = {
|
|
||||||
event_id: group_to_state[group]
|
|
||||||
for event_id, group in iteritems(event_to_groups)
|
|
||||||
}
|
|
||||||
|
|
||||||
return {event: event_to_state[event] for event in event_ids}
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
|
|
||||||
"""
|
|
||||||
Get the state dict corresponding to a particular event
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_id(str): event whose state should be returned
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A deferred dict from (type, state_key) -> state_event
|
|
||||||
"""
|
|
||||||
state_map = yield self.get_state_for_events([event_id], state_filter)
|
|
||||||
return state_map[event_id]
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
|
|
||||||
"""
|
|
||||||
Get the state dict corresponding to a particular event
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_id(str): event whose state should be returned
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A deferred dict from (type, state_key) -> state_event
|
|
||||||
"""
|
|
||||||
state_map = yield self.get_state_ids_for_events([event_id], state_filter)
|
|
||||||
return state_map[event_id]
|
|
||||||
|
|
||||||
@cached(max_entries=50000)
|
@cached(max_entries=50000)
|
||||||
def _get_state_group_for_event(self, event_id):
|
def _get_state_group_for_event(self, event_id):
|
||||||
return self.db.simple_select_one_onecol(
|
return self.db.simple_select_one_onecol(
|
||||||
|
@ -684,329 +265,6 @@ class StateGroupWorkerStore(
|
||||||
|
|
||||||
return {row["event_id"]: row["state_group"] for row in rows}
|
return {row["event_id"]: row["state_group"] for row in rows}
|
||||||
|
|
||||||
def _get_state_for_group_using_cache(self, cache, group, state_filter):
|
|
||||||
"""Checks if group is in cache. See `_get_state_for_groups`
|
|
||||||
|
|
||||||
Args:
|
|
||||||
cache(DictionaryCache): the state group cache to use
|
|
||||||
group(int): The state group to lookup
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns 2-tuple (`state_dict`, `got_all`).
|
|
||||||
`got_all` is a bool indicating if we successfully retrieved all
|
|
||||||
requests state from the cache, if False we need to query the DB for the
|
|
||||||
missing state.
|
|
||||||
"""
|
|
||||||
is_all, known_absent, state_dict_ids = cache.get(group)
|
|
||||||
|
|
||||||
if is_all or state_filter.is_full():
|
|
||||||
# Either we have everything or want everything, either way
|
|
||||||
# `is_all` tells us whether we've gotten everything.
|
|
||||||
return state_filter.filter_state(state_dict_ids), is_all
|
|
||||||
|
|
||||||
# tracks whether any of our requested types are missing from the cache
|
|
||||||
missing_types = False
|
|
||||||
|
|
||||||
if state_filter.has_wildcards():
|
|
||||||
# We don't know if we fetched all the state keys for the types in
|
|
||||||
# the filter that are wildcards, so we have to assume that we may
|
|
||||||
# have missed some.
|
|
||||||
missing_types = True
|
|
||||||
else:
|
|
||||||
# There aren't any wild cards, so `concrete_types()` returns the
|
|
||||||
# complete list of event types we're wanting.
|
|
||||||
for key in state_filter.concrete_types():
|
|
||||||
if key not in state_dict_ids and key not in known_absent:
|
|
||||||
missing_types = True
|
|
||||||
break
|
|
||||||
|
|
||||||
return state_filter.filter_state(state_dict_ids), not missing_types
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
|
|
||||||
"""Gets the state at each of a list of state groups, optionally
|
|
||||||
filtering by type/state_key
|
|
||||||
|
|
||||||
Args:
|
|
||||||
groups (iterable[int]): list of state groups for which we want
|
|
||||||
to get the state.
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
Returns:
|
|
||||||
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
|
||||||
"""
|
|
||||||
|
|
||||||
member_filter, non_member_filter = state_filter.get_member_split()
|
|
||||||
|
|
||||||
# Now we look them up in the member and non-member caches
|
|
||||||
(
|
|
||||||
non_member_state,
|
|
||||||
incomplete_groups_nm,
|
|
||||||
) = yield self._get_state_for_groups_using_cache(
|
|
||||||
groups, self._state_group_cache, state_filter=non_member_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
(
|
|
||||||
member_state,
|
|
||||||
incomplete_groups_m,
|
|
||||||
) = yield self._get_state_for_groups_using_cache(
|
|
||||||
groups, self._state_group_members_cache, state_filter=member_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
state = dict(non_member_state)
|
|
||||||
for group in groups:
|
|
||||||
state[group].update(member_state[group])
|
|
||||||
|
|
||||||
# Now fetch any missing groups from the database
|
|
||||||
|
|
||||||
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
|
|
||||||
|
|
||||||
if not incomplete_groups:
|
|
||||||
return state
|
|
||||||
|
|
||||||
cache_sequence_nm = self._state_group_cache.sequence
|
|
||||||
cache_sequence_m = self._state_group_members_cache.sequence
|
|
||||||
|
|
||||||
# Help the cache hit ratio by expanding the filter a bit
|
|
||||||
db_state_filter = state_filter.return_expanded()
|
|
||||||
|
|
||||||
group_to_state_dict = yield self._get_state_groups_from_groups(
|
|
||||||
list(incomplete_groups), state_filter=db_state_filter
|
|
||||||
)
|
|
||||||
|
|
||||||
# Now lets update the caches
|
|
||||||
self._insert_into_cache(
|
|
||||||
group_to_state_dict,
|
|
||||||
db_state_filter,
|
|
||||||
cache_seq_num_members=cache_sequence_m,
|
|
||||||
cache_seq_num_non_members=cache_sequence_nm,
|
|
||||||
)
|
|
||||||
|
|
||||||
# And finally update the result dict, by filtering out any extra
|
|
||||||
# stuff we pulled out of the database.
|
|
||||||
for group, group_state_dict in iteritems(group_to_state_dict):
|
|
||||||
# We just replace any existing entries, as we will have loaded
|
|
||||||
# everything we need from the database anyway.
|
|
||||||
state[group] = state_filter.filter_state(group_state_dict)
|
|
||||||
|
|
||||||
return state
|
|
||||||
|
|
||||||
def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
|
|
||||||
"""Gets the state at each of a list of state groups, optionally
|
|
||||||
filtering by type/state_key, querying from a specific cache.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
groups (iterable[int]): list of state groups for which we want
|
|
||||||
to get the state.
|
|
||||||
cache (DictionaryCache): the cache of group ids to state dicts which
|
|
||||||
we will pass through - either the normal state cache or the specific
|
|
||||||
members state cache.
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
|
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
|
||||||
of entries in the cache, and the state group ids either missing
|
|
||||||
from the cache or incomplete.
|
|
||||||
"""
|
|
||||||
results = {}
|
|
||||||
incomplete_groups = set()
|
|
||||||
for group in set(groups):
|
|
||||||
state_dict_ids, got_all = self._get_state_for_group_using_cache(
|
|
||||||
cache, group, state_filter
|
|
||||||
)
|
|
||||||
results[group] = state_dict_ids
|
|
||||||
|
|
||||||
if not got_all:
|
|
||||||
incomplete_groups.add(group)
|
|
||||||
|
|
||||||
return results, incomplete_groups
|
|
||||||
|
|
||||||
def _insert_into_cache(
|
|
||||||
self,
|
|
||||||
group_to_state_dict,
|
|
||||||
state_filter,
|
|
||||||
cache_seq_num_members,
|
|
||||||
cache_seq_num_non_members,
|
|
||||||
):
|
|
||||||
"""Inserts results from querying the database into the relevant cache.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
group_to_state_dict (dict): The new entries pulled from database.
|
|
||||||
Map from state group to state dict
|
|
||||||
state_filter (StateFilter): The state filter used to fetch state
|
|
||||||
from the database.
|
|
||||||
cache_seq_num_members (int): Sequence number of member cache since
|
|
||||||
last lookup in cache
|
|
||||||
cache_seq_num_non_members (int): Sequence number of member cache since
|
|
||||||
last lookup in cache
|
|
||||||
"""
|
|
||||||
|
|
||||||
# We need to work out which types we've fetched from the DB for the
|
|
||||||
# member vs non-member caches. This should be as accurate as possible,
|
|
||||||
# but can be an underestimate (e.g. when we have wild cards)
|
|
||||||
|
|
||||||
member_filter, non_member_filter = state_filter.get_member_split()
|
|
||||||
if member_filter.is_full():
|
|
||||||
# We fetched all member events
|
|
||||||
member_types = None
|
|
||||||
else:
|
|
||||||
# `concrete_types()` will only return a subset when there are wild
|
|
||||||
# cards in the filter, but that's fine.
|
|
||||||
member_types = member_filter.concrete_types()
|
|
||||||
|
|
||||||
if non_member_filter.is_full():
|
|
||||||
# We fetched all non member events
|
|
||||||
non_member_types = None
|
|
||||||
else:
|
|
||||||
non_member_types = non_member_filter.concrete_types()
|
|
||||||
|
|
||||||
for group, group_state_dict in iteritems(group_to_state_dict):
|
|
||||||
state_dict_members = {}
|
|
||||||
state_dict_non_members = {}
|
|
||||||
|
|
||||||
for k, v in iteritems(group_state_dict):
|
|
||||||
if k[0] == EventTypes.Member:
|
|
||||||
state_dict_members[k] = v
|
|
||||||
else:
|
|
||||||
state_dict_non_members[k] = v
|
|
||||||
|
|
||||||
self._state_group_members_cache.update(
|
|
||||||
cache_seq_num_members,
|
|
||||||
key=group,
|
|
||||||
value=state_dict_members,
|
|
||||||
fetched_keys=member_types,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._state_group_cache.update(
|
|
||||||
cache_seq_num_non_members,
|
|
||||||
key=group,
|
|
||||||
value=state_dict_non_members,
|
|
||||||
fetched_keys=non_member_types,
|
|
||||||
)
|
|
||||||
|
|
||||||
def store_state_group(
|
|
||||||
self, event_id, room_id, prev_group, delta_ids, current_state_ids
|
|
||||||
):
|
|
||||||
"""Store a new set of state, returning a newly assigned state group.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
event_id (str): The event ID for which the state was calculated
|
|
||||||
room_id (str)
|
|
||||||
prev_group (int|None): A previous state group for the room, optional.
|
|
||||||
delta_ids (dict|None): The delta between state at `prev_group` and
|
|
||||||
`current_state_ids`, if `prev_group` was given. Same format as
|
|
||||||
`current_state_ids`.
|
|
||||||
current_state_ids (dict): The state to store. Map of (type, state_key)
|
|
||||||
to event_id.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[int]: The state group ID
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _store_state_group_txn(txn):
|
|
||||||
if current_state_ids is None:
|
|
||||||
# AFAIK, this can never happen
|
|
||||||
raise Exception("current_state_ids cannot be None")
|
|
||||||
|
|
||||||
state_group = self.database_engine.get_next_state_group_id(txn)
|
|
||||||
|
|
||||||
self.db.simple_insert_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups",
|
|
||||||
values={"id": state_group, "room_id": room_id, "event_id": event_id},
|
|
||||||
)
|
|
||||||
|
|
||||||
# We persist as a delta if we can, while also ensuring the chain
|
|
||||||
# of deltas isn't tooo long, as otherwise read performance degrades.
|
|
||||||
if prev_group:
|
|
||||||
is_in_db = self.db.simple_select_one_onecol_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups",
|
|
||||||
keyvalues={"id": prev_group},
|
|
||||||
retcol="id",
|
|
||||||
allow_none=True,
|
|
||||||
)
|
|
||||||
if not is_in_db:
|
|
||||||
raise Exception(
|
|
||||||
"Trying to persist state with unpersisted prev_group: %r"
|
|
||||||
% (prev_group,)
|
|
||||||
)
|
|
||||||
|
|
||||||
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
|
||||||
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
|
|
||||||
self.db.simple_insert_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
values={"state_group": state_group, "prev_state_group": prev_group},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_insert_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
values=[
|
|
||||||
{
|
|
||||||
"state_group": state_group,
|
|
||||||
"room_id": room_id,
|
|
||||||
"type": key[0],
|
|
||||||
"state_key": key[1],
|
|
||||||
"event_id": state_id,
|
|
||||||
}
|
|
||||||
for key, state_id in iteritems(delta_ids)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.db.simple_insert_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
values=[
|
|
||||||
{
|
|
||||||
"state_group": state_group,
|
|
||||||
"room_id": room_id,
|
|
||||||
"type": key[0],
|
|
||||||
"state_key": key[1],
|
|
||||||
"event_id": state_id,
|
|
||||||
}
|
|
||||||
for key, state_id in iteritems(current_state_ids)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Prefill the state group caches with this group.
|
|
||||||
# It's fine to use the sequence like this as the state group map
|
|
||||||
# is immutable. (If the map wasn't immutable then this prefill could
|
|
||||||
# race with another update)
|
|
||||||
|
|
||||||
current_member_state_ids = {
|
|
||||||
s: ev
|
|
||||||
for (s, ev) in iteritems(current_state_ids)
|
|
||||||
if s[0] == EventTypes.Member
|
|
||||||
}
|
|
||||||
txn.call_after(
|
|
||||||
self._state_group_members_cache.update,
|
|
||||||
self._state_group_members_cache.sequence,
|
|
||||||
key=state_group,
|
|
||||||
value=dict(current_member_state_ids),
|
|
||||||
)
|
|
||||||
|
|
||||||
current_non_member_state_ids = {
|
|
||||||
s: ev
|
|
||||||
for (s, ev) in iteritems(current_state_ids)
|
|
||||||
if s[0] != EventTypes.Member
|
|
||||||
}
|
|
||||||
txn.call_after(
|
|
||||||
self._state_group_cache.update,
|
|
||||||
self._state_group_cache.sequence,
|
|
||||||
key=state_group,
|
|
||||||
value=dict(current_non_member_state_ids),
|
|
||||||
)
|
|
||||||
|
|
||||||
return state_group
|
|
||||||
|
|
||||||
return self.db.runInteraction("store_state_group", _store_state_group_txn)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_referenced_state_groups(self, state_groups):
|
def get_referenced_state_groups(self, state_groups):
|
||||||
"""Check if the state groups are referenced by events.
|
"""Check if the state groups are referenced by events.
|
||||||
|
@ -1031,22 +289,14 @@ class StateGroupWorkerStore(
|
||||||
return set(row["state_group"] for row in rows)
|
return set(row["state_group"] for row in rows)
|
||||||
|
|
||||||
|
|
||||||
class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
|
class MainStateBackgroundUpdateStore(SQLBaseStore):
|
||||||
|
|
||||||
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
|
||||||
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
|
||||||
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx"
|
||||||
EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
|
EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index"
|
||||||
|
|
||||||
def __init__(self, database: Database, db_conn, hs):
|
def __init__(self, database: Database, db_conn, hs):
|
||||||
super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
|
super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
|
||||||
self.db.updates.register_background_update_handler(
|
|
||||||
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
|
|
||||||
self._background_deduplicate_state,
|
|
||||||
)
|
|
||||||
self.db.updates.register_background_update_handler(
|
|
||||||
self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
|
|
||||||
)
|
|
||||||
self.db.updates.register_background_index_update(
|
self.db.updates.register_background_index_update(
|
||||||
self.CURRENT_STATE_INDEX_UPDATE_NAME,
|
self.CURRENT_STATE_INDEX_UPDATE_NAME,
|
||||||
index_name="current_state_events_member_index",
|
index_name="current_state_events_member_index",
|
||||||
|
@ -1061,181 +311,8 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
|
||||||
columns=["state_group"],
|
columns=["state_group"],
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _background_deduplicate_state(self, progress, batch_size):
|
|
||||||
"""This background update will slowly deduplicate state by reencoding
|
|
||||||
them as deltas.
|
|
||||||
"""
|
|
||||||
last_state_group = progress.get("last_state_group", 0)
|
|
||||||
rows_inserted = progress.get("rows_inserted", 0)
|
|
||||||
max_group = progress.get("max_group", None)
|
|
||||||
|
|
||||||
BATCH_SIZE_SCALE_FACTOR = 100
|
class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore):
|
||||||
|
|
||||||
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
|
|
||||||
|
|
||||||
if max_group is None:
|
|
||||||
rows = yield self.db.execute(
|
|
||||||
"_background_deduplicate_state",
|
|
||||||
None,
|
|
||||||
"SELECT coalesce(max(id), 0) FROM state_groups",
|
|
||||||
)
|
|
||||||
max_group = rows[0][0]
|
|
||||||
|
|
||||||
def reindex_txn(txn):
|
|
||||||
new_last_state_group = last_state_group
|
|
||||||
for count in range(batch_size):
|
|
||||||
txn.execute(
|
|
||||||
"SELECT id, room_id FROM state_groups"
|
|
||||||
" WHERE ? < id AND id <= ?"
|
|
||||||
" ORDER BY id ASC"
|
|
||||||
" LIMIT 1",
|
|
||||||
(new_last_state_group, max_group),
|
|
||||||
)
|
|
||||||
row = txn.fetchone()
|
|
||||||
if row:
|
|
||||||
state_group, room_id = row
|
|
||||||
|
|
||||||
if not row or not state_group:
|
|
||||||
return True, count
|
|
||||||
|
|
||||||
txn.execute(
|
|
||||||
"SELECT state_group FROM state_group_edges"
|
|
||||||
" WHERE state_group = ?",
|
|
||||||
(state_group,),
|
|
||||||
)
|
|
||||||
|
|
||||||
# If we reach a point where we've already started inserting
|
|
||||||
# edges we should stop.
|
|
||||||
if txn.fetchall():
|
|
||||||
return True, count
|
|
||||||
|
|
||||||
txn.execute(
|
|
||||||
"SELECT coalesce(max(id), 0) FROM state_groups"
|
|
||||||
" WHERE id < ? AND room_id = ?",
|
|
||||||
(state_group, room_id),
|
|
||||||
)
|
|
||||||
(prev_group,) = txn.fetchone()
|
|
||||||
new_last_state_group = state_group
|
|
||||||
|
|
||||||
if prev_group:
|
|
||||||
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
|
||||||
if potential_hops >= MAX_STATE_DELTA_HOPS:
|
|
||||||
# We want to ensure chains are at most this long,#
|
|
||||||
# otherwise read performance degrades.
|
|
||||||
continue
|
|
||||||
|
|
||||||
prev_state = self._get_state_groups_from_groups_txn(
|
|
||||||
txn, [prev_group]
|
|
||||||
)
|
|
||||||
prev_state = prev_state[prev_group]
|
|
||||||
|
|
||||||
curr_state = self._get_state_groups_from_groups_txn(
|
|
||||||
txn, [state_group]
|
|
||||||
)
|
|
||||||
curr_state = curr_state[state_group]
|
|
||||||
|
|
||||||
if not set(prev_state.keys()) - set(curr_state.keys()):
|
|
||||||
# We can only do a delta if the current has a strict super set
|
|
||||||
# of keys
|
|
||||||
|
|
||||||
delta_state = {
|
|
||||||
key: value
|
|
||||||
for key, value in iteritems(curr_state)
|
|
||||||
if prev_state.get(key, None) != value
|
|
||||||
}
|
|
||||||
|
|
||||||
self.db.simple_delete_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
keyvalues={"state_group": state_group},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_insert_txn(
|
|
||||||
txn,
|
|
||||||
table="state_group_edges",
|
|
||||||
values={
|
|
||||||
"state_group": state_group,
|
|
||||||
"prev_state_group": prev_group,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_delete_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
keyvalues={"state_group": state_group},
|
|
||||||
)
|
|
||||||
|
|
||||||
self.db.simple_insert_many_txn(
|
|
||||||
txn,
|
|
||||||
table="state_groups_state",
|
|
||||||
values=[
|
|
||||||
{
|
|
||||||
"state_group": state_group,
|
|
||||||
"room_id": room_id,
|
|
||||||
"type": key[0],
|
|
||||||
"state_key": key[1],
|
|
||||||
"event_id": state_id,
|
|
||||||
}
|
|
||||||
for key, state_id in iteritems(delta_state)
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
progress = {
|
|
||||||
"last_state_group": state_group,
|
|
||||||
"rows_inserted": rows_inserted + batch_size,
|
|
||||||
"max_group": max_group,
|
|
||||||
}
|
|
||||||
|
|
||||||
self.db.updates._background_update_progress_txn(
|
|
||||||
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
|
|
||||||
)
|
|
||||||
|
|
||||||
return False, batch_size
|
|
||||||
|
|
||||||
finished, result = yield self.db.runInteraction(
|
|
||||||
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
|
|
||||||
)
|
|
||||||
|
|
||||||
if finished:
|
|
||||||
yield self.db.updates._end_background_update(
|
|
||||||
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
|
|
||||||
)
|
|
||||||
|
|
||||||
return result * BATCH_SIZE_SCALE_FACTOR
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _background_index_state(self, progress, batch_size):
|
|
||||||
def reindex_txn(conn):
|
|
||||||
conn.rollback()
|
|
||||||
if isinstance(self.database_engine, PostgresEngine):
|
|
||||||
# postgres insists on autocommit for the index
|
|
||||||
conn.set_session(autocommit=True)
|
|
||||||
try:
|
|
||||||
txn = conn.cursor()
|
|
||||||
txn.execute(
|
|
||||||
"CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
|
|
||||||
" ON state_groups_state(state_group, type, state_key)"
|
|
||||||
)
|
|
||||||
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
|
||||||
finally:
|
|
||||||
conn.set_session(autocommit=False)
|
|
||||||
else:
|
|
||||||
txn = conn.cursor()
|
|
||||||
txn.execute(
|
|
||||||
"CREATE INDEX state_groups_state_type_idx"
|
|
||||||
" ON state_groups_state(state_group, type, state_key)"
|
|
||||||
)
|
|
||||||
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
|
||||||
|
|
||||||
yield self.db.runWithConnection(reindex_txn)
|
|
||||||
|
|
||||||
yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
|
|
||||||
|
|
||||||
return 1
|
|
||||||
|
|
||||||
|
|
||||||
class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore):
|
|
||||||
""" Keeps track of the state at a given event.
|
""" Keeps track of the state at a given event.
|
||||||
|
|
||||||
This is done by the concept of `state groups`. Every event is a assigned
|
This is done by the concept of `state groups`. Every event is a assigned
|
||||||
|
|
16
synapse/storage/data_stores/state/__init__.py
Normal file
16
synapse/storage/data_stores/state/__init__.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.storage.data_stores.state.store import StateGroupDataStore # noqa: F401
|
374
synapse/storage/data_stores/state/bg_updates.py
Normal file
374
synapse/storage/data_stores/state/bg_updates.py
Normal file
|
@ -0,0 +1,374 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from six import iteritems
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.database import Database
|
||||||
|
from synapse.storage.engines import PostgresEngine
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
MAX_STATE_DELTA_HOPS = 100
|
||||||
|
|
||||||
|
|
||||||
|
class StateGroupBackgroundUpdateStore(SQLBaseStore):
|
||||||
|
"""Defines functions related to state groups needed to run the state backgroud
|
||||||
|
updates.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _count_state_group_hops_txn(self, txn, state_group):
|
||||||
|
"""Given a state group, count how many hops there are in the tree.
|
||||||
|
|
||||||
|
This is used to ensure the delta chains don't get too long.
|
||||||
|
"""
|
||||||
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
|
sql = """
|
||||||
|
WITH RECURSIVE state(state_group) AS (
|
||||||
|
VALUES(?::bigint)
|
||||||
|
UNION ALL
|
||||||
|
SELECT prev_state_group FROM state_group_edges e, state s
|
||||||
|
WHERE s.state_group = e.state_group
|
||||||
|
)
|
||||||
|
SELECT count(*) FROM state;
|
||||||
|
"""
|
||||||
|
|
||||||
|
txn.execute(sql, (state_group,))
|
||||||
|
row = txn.fetchone()
|
||||||
|
if row and row[0]:
|
||||||
|
return row[0]
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
||||||
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
||||||
|
next_group = state_group
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
while next_group:
|
||||||
|
next_group = self.db.simple_select_one_onecol_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
keyvalues={"state_group": next_group},
|
||||||
|
retcol="prev_state_group",
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
if next_group:
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
return count
|
||||||
|
|
||||||
|
def _get_state_groups_from_groups_txn(
|
||||||
|
self, txn, groups, state_filter=StateFilter.all()
|
||||||
|
):
|
||||||
|
results = {group: {} for group in groups}
|
||||||
|
|
||||||
|
where_clause, where_args = state_filter.make_sql_filter_clause()
|
||||||
|
|
||||||
|
# Unless the filter clause is empty, we're going to append it after an
|
||||||
|
# existing where clause
|
||||||
|
if where_clause:
|
||||||
|
where_clause = " AND (%s)" % (where_clause,)
|
||||||
|
|
||||||
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
|
# Temporarily disable sequential scans in this transaction. This is
|
||||||
|
# a temporary hack until we can add the right indices in
|
||||||
|
txn.execute("SET LOCAL enable_seqscan=off")
|
||||||
|
|
||||||
|
# The below query walks the state_group tree so that the "state"
|
||||||
|
# table includes all state_groups in the tree. It then joins
|
||||||
|
# against `state_groups_state` to fetch the latest state.
|
||||||
|
# It assumes that previous state groups are always numerically
|
||||||
|
# lesser.
|
||||||
|
# The PARTITION is used to get the event_id in the greatest state
|
||||||
|
# group for the given type, state_key.
|
||||||
|
# This may return multiple rows per (type, state_key), but last_value
|
||||||
|
# should be the same.
|
||||||
|
sql = """
|
||||||
|
WITH RECURSIVE state(state_group) AS (
|
||||||
|
VALUES(?::bigint)
|
||||||
|
UNION ALL
|
||||||
|
SELECT prev_state_group FROM state_group_edges e, state s
|
||||||
|
WHERE s.state_group = e.state_group
|
||||||
|
)
|
||||||
|
SELECT DISTINCT type, state_key, last_value(event_id) OVER (
|
||||||
|
PARTITION BY type, state_key ORDER BY state_group ASC
|
||||||
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING
|
||||||
|
) AS event_id FROM state_groups_state
|
||||||
|
WHERE state_group IN (
|
||||||
|
SELECT state_group FROM state
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|
||||||
|
for group in groups:
|
||||||
|
args = [group]
|
||||||
|
args.extend(where_args)
|
||||||
|
|
||||||
|
txn.execute(sql + where_clause, args)
|
||||||
|
for row in txn:
|
||||||
|
typ, state_key, event_id = row
|
||||||
|
key = (typ, state_key)
|
||||||
|
results[group][key] = event_id
|
||||||
|
else:
|
||||||
|
max_entries_returned = state_filter.max_entries_returned()
|
||||||
|
|
||||||
|
# We don't use WITH RECURSIVE on sqlite3 as there are distributions
|
||||||
|
# that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
|
||||||
|
for group in groups:
|
||||||
|
next_group = group
|
||||||
|
|
||||||
|
while next_group:
|
||||||
|
# We did this before by getting the list of group ids, and
|
||||||
|
# then passing that list to sqlite to get latest event for
|
||||||
|
# each (type, state_key). However, that was terribly slow
|
||||||
|
# without the right indices (which we can't add until
|
||||||
|
# after we finish deduping state, which requires this func)
|
||||||
|
args = [next_group]
|
||||||
|
args.extend(where_args)
|
||||||
|
|
||||||
|
txn.execute(
|
||||||
|
"SELECT type, state_key, event_id FROM state_groups_state"
|
||||||
|
" WHERE state_group = ? " + where_clause,
|
||||||
|
args,
|
||||||
|
)
|
||||||
|
results[group].update(
|
||||||
|
((typ, state_key), event_id)
|
||||||
|
for typ, state_key, event_id in txn
|
||||||
|
if (typ, state_key) not in results[group]
|
||||||
|
)
|
||||||
|
|
||||||
|
# If the number of entries in the (type,state_key)->event_id dict
|
||||||
|
# matches the number of (type,state_keys) types we were searching
|
||||||
|
# for, then we must have found them all, so no need to go walk
|
||||||
|
# further down the tree... UNLESS our types filter contained
|
||||||
|
# wildcards (i.e. Nones) in which case we have to do an exhaustive
|
||||||
|
# search
|
||||||
|
if (
|
||||||
|
max_entries_returned is not None
|
||||||
|
and len(results[group]) == max_entries_returned
|
||||||
|
):
|
||||||
|
break
|
||||||
|
|
||||||
|
next_group = self.db.simple_select_one_onecol_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
keyvalues={"state_group": next_group},
|
||||||
|
retcol="prev_state_group",
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore):
|
||||||
|
|
||||||
|
STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication"
|
||||||
|
STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index"
|
||||||
|
STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx"
|
||||||
|
|
||||||
|
def __init__(self, database: Database, db_conn, hs):
|
||||||
|
super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs)
|
||||||
|
self.db.updates.register_background_update_handler(
|
||||||
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME,
|
||||||
|
self._background_deduplicate_state,
|
||||||
|
)
|
||||||
|
self.db.updates.register_background_update_handler(
|
||||||
|
self.STATE_GROUP_INDEX_UPDATE_NAME, self._background_index_state
|
||||||
|
)
|
||||||
|
self.db.updates.register_background_index_update(
|
||||||
|
self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME,
|
||||||
|
index_name="state_groups_room_id_idx",
|
||||||
|
table="state_groups",
|
||||||
|
columns=["room_id"],
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _background_deduplicate_state(self, progress, batch_size):
|
||||||
|
"""This background update will slowly deduplicate state by reencoding
|
||||||
|
them as deltas.
|
||||||
|
"""
|
||||||
|
last_state_group = progress.get("last_state_group", 0)
|
||||||
|
rows_inserted = progress.get("rows_inserted", 0)
|
||||||
|
max_group = progress.get("max_group", None)
|
||||||
|
|
||||||
|
BATCH_SIZE_SCALE_FACTOR = 100
|
||||||
|
|
||||||
|
batch_size = max(1, int(batch_size / BATCH_SIZE_SCALE_FACTOR))
|
||||||
|
|
||||||
|
if max_group is None:
|
||||||
|
rows = yield self.db.execute(
|
||||||
|
"_background_deduplicate_state",
|
||||||
|
None,
|
||||||
|
"SELECT coalesce(max(id), 0) FROM state_groups",
|
||||||
|
)
|
||||||
|
max_group = rows[0][0]
|
||||||
|
|
||||||
|
def reindex_txn(txn):
|
||||||
|
new_last_state_group = last_state_group
|
||||||
|
for count in range(batch_size):
|
||||||
|
txn.execute(
|
||||||
|
"SELECT id, room_id FROM state_groups"
|
||||||
|
" WHERE ? < id AND id <= ?"
|
||||||
|
" ORDER BY id ASC"
|
||||||
|
" LIMIT 1",
|
||||||
|
(new_last_state_group, max_group),
|
||||||
|
)
|
||||||
|
row = txn.fetchone()
|
||||||
|
if row:
|
||||||
|
state_group, room_id = row
|
||||||
|
|
||||||
|
if not row or not state_group:
|
||||||
|
return True, count
|
||||||
|
|
||||||
|
txn.execute(
|
||||||
|
"SELECT state_group FROM state_group_edges"
|
||||||
|
" WHERE state_group = ?",
|
||||||
|
(state_group,),
|
||||||
|
)
|
||||||
|
|
||||||
|
# If we reach a point where we've already started inserting
|
||||||
|
# edges we should stop.
|
||||||
|
if txn.fetchall():
|
||||||
|
return True, count
|
||||||
|
|
||||||
|
txn.execute(
|
||||||
|
"SELECT coalesce(max(id), 0) FROM state_groups"
|
||||||
|
" WHERE id < ? AND room_id = ?",
|
||||||
|
(state_group, room_id),
|
||||||
|
)
|
||||||
|
(prev_group,) = txn.fetchone()
|
||||||
|
new_last_state_group = state_group
|
||||||
|
|
||||||
|
if prev_group:
|
||||||
|
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
||||||
|
if potential_hops >= MAX_STATE_DELTA_HOPS:
|
||||||
|
# We want to ensure chains are at most this long,#
|
||||||
|
# otherwise read performance degrades.
|
||||||
|
continue
|
||||||
|
|
||||||
|
prev_state = self._get_state_groups_from_groups_txn(
|
||||||
|
txn, [prev_group]
|
||||||
|
)
|
||||||
|
prev_state = prev_state[prev_group]
|
||||||
|
|
||||||
|
curr_state = self._get_state_groups_from_groups_txn(
|
||||||
|
txn, [state_group]
|
||||||
|
)
|
||||||
|
curr_state = curr_state[state_group]
|
||||||
|
|
||||||
|
if not set(prev_state.keys()) - set(curr_state.keys()):
|
||||||
|
# We can only do a delta if the current has a strict super set
|
||||||
|
# of keys
|
||||||
|
|
||||||
|
delta_state = {
|
||||||
|
key: value
|
||||||
|
for key, value in iteritems(curr_state)
|
||||||
|
if prev_state.get(key, None) != value
|
||||||
|
}
|
||||||
|
|
||||||
|
self.db.simple_delete_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
keyvalues={"state_group": state_group},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
values={
|
||||||
|
"state_group": state_group,
|
||||||
|
"prev_state_group": prev_group,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_delete_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
keyvalues={"state_group": state_group},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_insert_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
values=[
|
||||||
|
{
|
||||||
|
"state_group": state_group,
|
||||||
|
"room_id": room_id,
|
||||||
|
"type": key[0],
|
||||||
|
"state_key": key[1],
|
||||||
|
"event_id": state_id,
|
||||||
|
}
|
||||||
|
for key, state_id in iteritems(delta_state)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
progress = {
|
||||||
|
"last_state_group": state_group,
|
||||||
|
"rows_inserted": rows_inserted + batch_size,
|
||||||
|
"max_group": max_group,
|
||||||
|
}
|
||||||
|
|
||||||
|
self.db.updates._background_update_progress_txn(
|
||||||
|
txn, self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, progress
|
||||||
|
)
|
||||||
|
|
||||||
|
return False, batch_size
|
||||||
|
|
||||||
|
finished, result = yield self.db.runInteraction(
|
||||||
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, reindex_txn
|
||||||
|
)
|
||||||
|
|
||||||
|
if finished:
|
||||||
|
yield self.db.updates._end_background_update(
|
||||||
|
self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME
|
||||||
|
)
|
||||||
|
|
||||||
|
return result * BATCH_SIZE_SCALE_FACTOR
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _background_index_state(self, progress, batch_size):
|
||||||
|
def reindex_txn(conn):
|
||||||
|
conn.rollback()
|
||||||
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
|
# postgres insists on autocommit for the index
|
||||||
|
conn.set_session(autocommit=True)
|
||||||
|
try:
|
||||||
|
txn = conn.cursor()
|
||||||
|
txn.execute(
|
||||||
|
"CREATE INDEX CONCURRENTLY state_groups_state_type_idx"
|
||||||
|
" ON state_groups_state(state_group, type, state_key)"
|
||||||
|
)
|
||||||
|
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
||||||
|
finally:
|
||||||
|
conn.set_session(autocommit=False)
|
||||||
|
else:
|
||||||
|
txn = conn.cursor()
|
||||||
|
txn.execute(
|
||||||
|
"CREATE INDEX state_groups_state_type_idx"
|
||||||
|
" ON state_groups_state(state_group, type, state_key)"
|
||||||
|
)
|
||||||
|
txn.execute("DROP INDEX IF EXISTS state_groups_state_id")
|
||||||
|
|
||||||
|
yield self.db.runWithConnection(reindex_txn)
|
||||||
|
|
||||||
|
yield self.db.updates._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME)
|
||||||
|
|
||||||
|
return 1
|
|
@ -0,0 +1,19 @@
|
||||||
|
/* Copyright 2016 OpenMarket Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
-- The following indices are redundant, other indices are equivalent or
|
||||||
|
-- supersets
|
||||||
|
DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY
|
|
@ -0,0 +1,17 @@
|
||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
INSERT INTO background_updates (update_name, progress_json) VALUES
|
||||||
|
('state_groups_room_id_idx', '{}');
|
|
@ -0,0 +1,37 @@
|
||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
CREATE TABLE state_groups (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
room_id TEXT NOT NULL,
|
||||||
|
event_id TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE state_groups_state (
|
||||||
|
state_group BIGINT NOT NULL,
|
||||||
|
room_id TEXT NOT NULL,
|
||||||
|
type TEXT NOT NULL,
|
||||||
|
state_key TEXT NOT NULL,
|
||||||
|
event_id TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE state_group_edges (
|
||||||
|
state_group BIGINT NOT NULL,
|
||||||
|
prev_state_group BIGINT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX state_group_edges_idx ON state_group_edges (state_group);
|
||||||
|
CREATE INDEX state_group_edges_prev_idx ON state_group_edges (prev_state_group);
|
||||||
|
CREATE INDEX state_groups_state_type_idx ON state_groups_state (state_group, type, state_key);
|
|
@ -0,0 +1,21 @@
|
||||||
|
/* Copyright 2019 The Matrix.org Foundation C.I.C
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
CREATE SEQUENCE state_group_id_seq
|
||||||
|
START WITH 1
|
||||||
|
INCREMENT BY 1
|
||||||
|
NO MINVALUE
|
||||||
|
NO MAXVALUE
|
||||||
|
CACHE 1;
|
640
synapse/storage/data_stores/state/store.py
Normal file
640
synapse/storage/data_stores/state/store.py
Normal file
|
@ -0,0 +1,640 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2014-2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from six import iteritems
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.constants import EventTypes
|
||||||
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
|
||||||
|
from synapse.storage.database import Database
|
||||||
|
from synapse.storage.state import StateFilter
|
||||||
|
from synapse.util.caches import get_cache_factor_for
|
||||||
|
from synapse.util.caches.descriptors import cached
|
||||||
|
from synapse.util.caches.dictionary_cache import DictionaryCache
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
MAX_STATE_DELTA_HOPS = 100
|
||||||
|
|
||||||
|
|
||||||
|
class _GetStateGroupDelta(
|
||||||
|
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
|
||||||
|
):
|
||||||
|
"""Return type of get_state_group_delta that implements __len__, which lets
|
||||||
|
us use the itrable flag when caching
|
||||||
|
"""
|
||||||
|
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.delta_ids) if self.delta_ids else 0
|
||||||
|
|
||||||
|
|
||||||
|
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
|
||||||
|
"""A data store for fetching/storing state groups.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, database: Database, db_conn, hs):
|
||||||
|
super(StateGroupDataStore, self).__init__(database, db_conn, hs)
|
||||||
|
|
||||||
|
# Originally the state store used a single DictionaryCache to cache the
|
||||||
|
# event IDs for the state types in a given state group to avoid hammering
|
||||||
|
# on the state_group* tables.
|
||||||
|
#
|
||||||
|
# The point of using a DictionaryCache is that it can cache a subset
|
||||||
|
# of the state events for a given state group (i.e. a subset of the keys for a
|
||||||
|
# given dict which is an entry in the cache for a given state group ID).
|
||||||
|
#
|
||||||
|
# However, this poses problems when performing complicated queries
|
||||||
|
# on the store - for instance: "give me all the state for this group, but
|
||||||
|
# limit members to this subset of users", as DictionaryCache's API isn't
|
||||||
|
# rich enough to say "please cache any of these fields, apart from this subset".
|
||||||
|
# This is problematic when lazy loading members, which requires this behaviour,
|
||||||
|
# as without it the cache has no choice but to speculatively load all
|
||||||
|
# state events for the group, which negates the efficiency being sought.
|
||||||
|
#
|
||||||
|
# Rather than overcomplicating DictionaryCache's API, we instead split the
|
||||||
|
# state_group_cache into two halves - one for tracking non-member events,
|
||||||
|
# and the other for tracking member_events. This means that lazy loading
|
||||||
|
# queries can be made in a cache-friendly manner by querying both caches
|
||||||
|
# separately and then merging the result. So for the example above, you
|
||||||
|
# would query the members cache for a specific subset of state keys
|
||||||
|
# (which DictionaryCache will handle efficiently and fine) and the non-members
|
||||||
|
# cache for all state (which DictionaryCache will similarly handle fine)
|
||||||
|
# and then just merge the results together.
|
||||||
|
#
|
||||||
|
# We size the non-members cache to be smaller than the members cache as the
|
||||||
|
# vast majority of state in Matrix (today) is member events.
|
||||||
|
|
||||||
|
self._state_group_cache = DictionaryCache(
|
||||||
|
"*stateGroupCache*",
|
||||||
|
# TODO: this hasn't been tuned yet
|
||||||
|
50000 * get_cache_factor_for("stateGroupCache"),
|
||||||
|
)
|
||||||
|
self._state_group_members_cache = DictionaryCache(
|
||||||
|
"*stateGroupMembersCache*",
|
||||||
|
500000 * get_cache_factor_for("stateGroupMembersCache"),
|
||||||
|
)
|
||||||
|
|
||||||
|
@cached(max_entries=10000, iterable=True)
|
||||||
|
def get_state_group_delta(self, state_group):
|
||||||
|
"""Given a state group try to return a previous group and a delta between
|
||||||
|
the old and the new.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(prev_group, delta_ids), where both may be None.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _get_state_group_delta_txn(txn):
|
||||||
|
prev_group = self.db.simple_select_one_onecol_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
keyvalues={"state_group": state_group},
|
||||||
|
retcol="prev_state_group",
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not prev_group:
|
||||||
|
return _GetStateGroupDelta(None, None)
|
||||||
|
|
||||||
|
delta_ids = self.db.simple_select_list_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
keyvalues={"state_group": state_group},
|
||||||
|
retcols=("type", "state_key", "event_id"),
|
||||||
|
)
|
||||||
|
|
||||||
|
return _GetStateGroupDelta(
|
||||||
|
prev_group,
|
||||||
|
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
|
||||||
|
)
|
||||||
|
|
||||||
|
return self.db.runInteraction(
|
||||||
|
"get_state_group_delta", _get_state_group_delta_txn
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_state_groups_from_groups(self, groups, state_filter):
|
||||||
|
"""Returns the state groups for a given set of groups, filtering on
|
||||||
|
types of state events.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
groups(list[int]): list of state group IDs to query
|
||||||
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
|
from the database.
|
||||||
|
Returns:
|
||||||
|
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
||||||
|
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
|
||||||
|
for chunk in chunks:
|
||||||
|
res = yield self.db.runInteraction(
|
||||||
|
"_get_state_groups_from_groups",
|
||||||
|
self._get_state_groups_from_groups_txn,
|
||||||
|
chunk,
|
||||||
|
state_filter,
|
||||||
|
)
|
||||||
|
results.update(res)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _get_state_for_group_using_cache(self, cache, group, state_filter):
|
||||||
|
"""Checks if group is in cache. See `_get_state_for_groups`
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cache(DictionaryCache): the state group cache to use
|
||||||
|
group(int): The state group to lookup
|
||||||
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
|
from the database.
|
||||||
|
|
||||||
|
Returns 2-tuple (`state_dict`, `got_all`).
|
||||||
|
`got_all` is a bool indicating if we successfully retrieved all
|
||||||
|
requests state from the cache, if False we need to query the DB for the
|
||||||
|
missing state.
|
||||||
|
"""
|
||||||
|
is_all, known_absent, state_dict_ids = cache.get(group)
|
||||||
|
|
||||||
|
if is_all or state_filter.is_full():
|
||||||
|
# Either we have everything or want everything, either way
|
||||||
|
# `is_all` tells us whether we've gotten everything.
|
||||||
|
return state_filter.filter_state(state_dict_ids), is_all
|
||||||
|
|
||||||
|
# tracks whether any of our requested types are missing from the cache
|
||||||
|
missing_types = False
|
||||||
|
|
||||||
|
if state_filter.has_wildcards():
|
||||||
|
# We don't know if we fetched all the state keys for the types in
|
||||||
|
# the filter that are wildcards, so we have to assume that we may
|
||||||
|
# have missed some.
|
||||||
|
missing_types = True
|
||||||
|
else:
|
||||||
|
# There aren't any wild cards, so `concrete_types()` returns the
|
||||||
|
# complete list of event types we're wanting.
|
||||||
|
for key in state_filter.concrete_types():
|
||||||
|
if key not in state_dict_ids and key not in known_absent:
|
||||||
|
missing_types = True
|
||||||
|
break
|
||||||
|
|
||||||
|
return state_filter.filter_state(state_dict_ids), not missing_types
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
|
||||||
|
"""Gets the state at each of a list of state groups, optionally
|
||||||
|
filtering by type/state_key
|
||||||
|
|
||||||
|
Args:
|
||||||
|
groups (iterable[int]): list of state groups for which we want
|
||||||
|
to get the state.
|
||||||
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
|
from the database.
|
||||||
|
Returns:
|
||||||
|
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
||||||
|
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
||||||
|
"""
|
||||||
|
|
||||||
|
member_filter, non_member_filter = state_filter.get_member_split()
|
||||||
|
|
||||||
|
# Now we look them up in the member and non-member caches
|
||||||
|
(
|
||||||
|
non_member_state,
|
||||||
|
incomplete_groups_nm,
|
||||||
|
) = yield self._get_state_for_groups_using_cache(
|
||||||
|
groups, self._state_group_cache, state_filter=non_member_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
(
|
||||||
|
member_state,
|
||||||
|
incomplete_groups_m,
|
||||||
|
) = yield self._get_state_for_groups_using_cache(
|
||||||
|
groups, self._state_group_members_cache, state_filter=member_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
state = dict(non_member_state)
|
||||||
|
for group in groups:
|
||||||
|
state[group].update(member_state[group])
|
||||||
|
|
||||||
|
# Now fetch any missing groups from the database
|
||||||
|
|
||||||
|
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
|
||||||
|
|
||||||
|
if not incomplete_groups:
|
||||||
|
return state
|
||||||
|
|
||||||
|
cache_sequence_nm = self._state_group_cache.sequence
|
||||||
|
cache_sequence_m = self._state_group_members_cache.sequence
|
||||||
|
|
||||||
|
# Help the cache hit ratio by expanding the filter a bit
|
||||||
|
db_state_filter = state_filter.return_expanded()
|
||||||
|
|
||||||
|
group_to_state_dict = yield self._get_state_groups_from_groups(
|
||||||
|
list(incomplete_groups), state_filter=db_state_filter
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now lets update the caches
|
||||||
|
self._insert_into_cache(
|
||||||
|
group_to_state_dict,
|
||||||
|
db_state_filter,
|
||||||
|
cache_seq_num_members=cache_sequence_m,
|
||||||
|
cache_seq_num_non_members=cache_sequence_nm,
|
||||||
|
)
|
||||||
|
|
||||||
|
# And finally update the result dict, by filtering out any extra
|
||||||
|
# stuff we pulled out of the database.
|
||||||
|
for group, group_state_dict in iteritems(group_to_state_dict):
|
||||||
|
# We just replace any existing entries, as we will have loaded
|
||||||
|
# everything we need from the database anyway.
|
||||||
|
state[group] = state_filter.filter_state(group_state_dict)
|
||||||
|
|
||||||
|
return state
|
||||||
|
|
||||||
|
def _get_state_for_groups_using_cache(self, groups, cache, state_filter):
|
||||||
|
"""Gets the state at each of a list of state groups, optionally
|
||||||
|
filtering by type/state_key, querying from a specific cache.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
groups (iterable[int]): list of state groups for which we want
|
||||||
|
to get the state.
|
||||||
|
cache (DictionaryCache): the cache of group ids to state dicts which
|
||||||
|
we will pass through - either the normal state cache or the specific
|
||||||
|
members state cache.
|
||||||
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
|
from the database.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple[dict[int, dict[tuple[str, str], str]], set[int]]: Tuple of
|
||||||
|
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
||||||
|
of entries in the cache, and the state group ids either missing
|
||||||
|
from the cache or incomplete.
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
incomplete_groups = set()
|
||||||
|
for group in set(groups):
|
||||||
|
state_dict_ids, got_all = self._get_state_for_group_using_cache(
|
||||||
|
cache, group, state_filter
|
||||||
|
)
|
||||||
|
results[group] = state_dict_ids
|
||||||
|
|
||||||
|
if not got_all:
|
||||||
|
incomplete_groups.add(group)
|
||||||
|
|
||||||
|
return results, incomplete_groups
|
||||||
|
|
||||||
|
def _insert_into_cache(
|
||||||
|
self,
|
||||||
|
group_to_state_dict,
|
||||||
|
state_filter,
|
||||||
|
cache_seq_num_members,
|
||||||
|
cache_seq_num_non_members,
|
||||||
|
):
|
||||||
|
"""Inserts results from querying the database into the relevant cache.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
group_to_state_dict (dict): The new entries pulled from database.
|
||||||
|
Map from state group to state dict
|
||||||
|
state_filter (StateFilter): The state filter used to fetch state
|
||||||
|
from the database.
|
||||||
|
cache_seq_num_members (int): Sequence number of member cache since
|
||||||
|
last lookup in cache
|
||||||
|
cache_seq_num_non_members (int): Sequence number of member cache since
|
||||||
|
last lookup in cache
|
||||||
|
"""
|
||||||
|
|
||||||
|
# We need to work out which types we've fetched from the DB for the
|
||||||
|
# member vs non-member caches. This should be as accurate as possible,
|
||||||
|
# but can be an underestimate (e.g. when we have wild cards)
|
||||||
|
|
||||||
|
member_filter, non_member_filter = state_filter.get_member_split()
|
||||||
|
if member_filter.is_full():
|
||||||
|
# We fetched all member events
|
||||||
|
member_types = None
|
||||||
|
else:
|
||||||
|
# `concrete_types()` will only return a subset when there are wild
|
||||||
|
# cards in the filter, but that's fine.
|
||||||
|
member_types = member_filter.concrete_types()
|
||||||
|
|
||||||
|
if non_member_filter.is_full():
|
||||||
|
# We fetched all non member events
|
||||||
|
non_member_types = None
|
||||||
|
else:
|
||||||
|
non_member_types = non_member_filter.concrete_types()
|
||||||
|
|
||||||
|
for group, group_state_dict in iteritems(group_to_state_dict):
|
||||||
|
state_dict_members = {}
|
||||||
|
state_dict_non_members = {}
|
||||||
|
|
||||||
|
for k, v in iteritems(group_state_dict):
|
||||||
|
if k[0] == EventTypes.Member:
|
||||||
|
state_dict_members[k] = v
|
||||||
|
else:
|
||||||
|
state_dict_non_members[k] = v
|
||||||
|
|
||||||
|
self._state_group_members_cache.update(
|
||||||
|
cache_seq_num_members,
|
||||||
|
key=group,
|
||||||
|
value=state_dict_members,
|
||||||
|
fetched_keys=member_types,
|
||||||
|
)
|
||||||
|
|
||||||
|
self._state_group_cache.update(
|
||||||
|
cache_seq_num_non_members,
|
||||||
|
key=group,
|
||||||
|
value=state_dict_non_members,
|
||||||
|
fetched_keys=non_member_types,
|
||||||
|
)
|
||||||
|
|
||||||
|
def store_state_group(
|
||||||
|
self, event_id, room_id, prev_group, delta_ids, current_state_ids
|
||||||
|
):
|
||||||
|
"""Store a new set of state, returning a newly assigned state group.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_id (str): The event ID for which the state was calculated
|
||||||
|
room_id (str)
|
||||||
|
prev_group (int|None): A previous state group for the room, optional.
|
||||||
|
delta_ids (dict|None): The delta between state at `prev_group` and
|
||||||
|
`current_state_ids`, if `prev_group` was given. Same format as
|
||||||
|
`current_state_ids`.
|
||||||
|
current_state_ids (dict): The state to store. Map of (type, state_key)
|
||||||
|
to event_id.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[int]: The state group ID
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _store_state_group_txn(txn):
|
||||||
|
if current_state_ids is None:
|
||||||
|
# AFAIK, this can never happen
|
||||||
|
raise Exception("current_state_ids cannot be None")
|
||||||
|
|
||||||
|
state_group = self.database_engine.get_next_state_group_id(txn)
|
||||||
|
|
||||||
|
self.db.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups",
|
||||||
|
values={"id": state_group, "room_id": room_id, "event_id": event_id},
|
||||||
|
)
|
||||||
|
|
||||||
|
# We persist as a delta if we can, while also ensuring the chain
|
||||||
|
# of deltas isn't tooo long, as otherwise read performance degrades.
|
||||||
|
if prev_group:
|
||||||
|
is_in_db = self.db.simple_select_one_onecol_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups",
|
||||||
|
keyvalues={"id": prev_group},
|
||||||
|
retcol="id",
|
||||||
|
allow_none=True,
|
||||||
|
)
|
||||||
|
if not is_in_db:
|
||||||
|
raise Exception(
|
||||||
|
"Trying to persist state with unpersisted prev_group: %r"
|
||||||
|
% (prev_group,)
|
||||||
|
)
|
||||||
|
|
||||||
|
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
|
||||||
|
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
|
||||||
|
self.db.simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
values={"state_group": state_group, "prev_state_group": prev_group},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_insert_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
values=[
|
||||||
|
{
|
||||||
|
"state_group": state_group,
|
||||||
|
"room_id": room_id,
|
||||||
|
"type": key[0],
|
||||||
|
"state_key": key[1],
|
||||||
|
"event_id": state_id,
|
||||||
|
}
|
||||||
|
for key, state_id in iteritems(delta_ids)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.db.simple_insert_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
values=[
|
||||||
|
{
|
||||||
|
"state_group": state_group,
|
||||||
|
"room_id": room_id,
|
||||||
|
"type": key[0],
|
||||||
|
"state_key": key[1],
|
||||||
|
"event_id": state_id,
|
||||||
|
}
|
||||||
|
for key, state_id in iteritems(current_state_ids)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prefill the state group caches with this group.
|
||||||
|
# It's fine to use the sequence like this as the state group map
|
||||||
|
# is immutable. (If the map wasn't immutable then this prefill could
|
||||||
|
# race with another update)
|
||||||
|
|
||||||
|
current_member_state_ids = {
|
||||||
|
s: ev
|
||||||
|
for (s, ev) in iteritems(current_state_ids)
|
||||||
|
if s[0] == EventTypes.Member
|
||||||
|
}
|
||||||
|
txn.call_after(
|
||||||
|
self._state_group_members_cache.update,
|
||||||
|
self._state_group_members_cache.sequence,
|
||||||
|
key=state_group,
|
||||||
|
value=dict(current_member_state_ids),
|
||||||
|
)
|
||||||
|
|
||||||
|
current_non_member_state_ids = {
|
||||||
|
s: ev
|
||||||
|
for (s, ev) in iteritems(current_state_ids)
|
||||||
|
if s[0] != EventTypes.Member
|
||||||
|
}
|
||||||
|
txn.call_after(
|
||||||
|
self._state_group_cache.update,
|
||||||
|
self._state_group_cache.sequence,
|
||||||
|
key=state_group,
|
||||||
|
value=dict(current_non_member_state_ids),
|
||||||
|
)
|
||||||
|
|
||||||
|
return state_group
|
||||||
|
|
||||||
|
return self.db.runInteraction("store_state_group", _store_state_group_txn)
|
||||||
|
|
||||||
|
def purge_unreferenced_state_groups(
|
||||||
|
self, room_id: str, state_groups_to_delete
|
||||||
|
) -> defer.Deferred:
|
||||||
|
"""Deletes no longer referenced state groups and de-deltas any state
|
||||||
|
groups that reference them.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id: The room the state groups belong to (must all be in the
|
||||||
|
same room).
|
||||||
|
state_groups_to_delete (Collection[int]): Set of all state groups
|
||||||
|
to delete.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.db.runInteraction(
|
||||||
|
"purge_unreferenced_state_groups",
|
||||||
|
self._purge_unreferenced_state_groups,
|
||||||
|
room_id,
|
||||||
|
state_groups_to_delete,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
|
||||||
|
logger.info(
|
||||||
|
"[purge] found %i state groups to delete", len(state_groups_to_delete)
|
||||||
|
)
|
||||||
|
|
||||||
|
rows = self.db.simple_select_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
column="prev_state_group",
|
||||||
|
iterable=state_groups_to_delete,
|
||||||
|
keyvalues={},
|
||||||
|
retcols=("state_group",),
|
||||||
|
)
|
||||||
|
|
||||||
|
remaining_state_groups = set(
|
||||||
|
row["state_group"]
|
||||||
|
for row in rows
|
||||||
|
if row["state_group"] not in state_groups_to_delete
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"[purge] de-delta-ing %i remaining state groups",
|
||||||
|
len(remaining_state_groups),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now we turn the state groups that reference to-be-deleted state
|
||||||
|
# groups to non delta versions.
|
||||||
|
for sg in remaining_state_groups:
|
||||||
|
logger.info("[purge] de-delta-ing remaining state group %s", sg)
|
||||||
|
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
|
||||||
|
curr_state = curr_state[sg]
|
||||||
|
|
||||||
|
self.db.simple_delete_txn(
|
||||||
|
txn, table="state_groups_state", keyvalues={"state_group": sg}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_delete_txn(
|
||||||
|
txn, table="state_group_edges", keyvalues={"state_group": sg}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.db.simple_insert_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
values=[
|
||||||
|
{
|
||||||
|
"state_group": sg,
|
||||||
|
"room_id": room_id,
|
||||||
|
"type": key[0],
|
||||||
|
"state_key": key[1],
|
||||||
|
"event_id": state_id,
|
||||||
|
}
|
||||||
|
for key, state_id in iteritems(curr_state)
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("[purge] removing redundant state groups")
|
||||||
|
txn.executemany(
|
||||||
|
"DELETE FROM state_groups_state WHERE state_group = ?",
|
||||||
|
((sg,) for sg in state_groups_to_delete),
|
||||||
|
)
|
||||||
|
txn.executemany(
|
||||||
|
"DELETE FROM state_groups WHERE id = ?",
|
||||||
|
((sg,) for sg in state_groups_to_delete),
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_previous_state_groups(self, state_groups):
|
||||||
|
"""Fetch the previous groups of the given state groups.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state_groups (Iterable[int])
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Deferred[dict[int, int]]: mapping from state group to previous
|
||||||
|
state group.
|
||||||
|
"""
|
||||||
|
|
||||||
|
rows = yield self.db.simple_select_many_batch(
|
||||||
|
table="state_group_edges",
|
||||||
|
column="prev_state_group",
|
||||||
|
iterable=state_groups,
|
||||||
|
keyvalues={},
|
||||||
|
retcols=("prev_state_group", "state_group"),
|
||||||
|
desc="get_previous_state_groups",
|
||||||
|
)
|
||||||
|
|
||||||
|
return {row["state_group"]: row["prev_state_group"] for row in rows}
|
||||||
|
|
||||||
|
def purge_room_state(self, room_id, state_groups_to_delete):
|
||||||
|
"""Deletes all record of a room from state tables
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_id (str):
|
||||||
|
state_groups_to_delete (list[int]): State groups to delete
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.db.runInteraction(
|
||||||
|
"purge_room_state",
|
||||||
|
self._purge_room_state_txn,
|
||||||
|
room_id,
|
||||||
|
state_groups_to_delete,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
|
||||||
|
# first we have to delete the state groups states
|
||||||
|
logger.info("[purge] removing %s from state_groups_state", room_id)
|
||||||
|
|
||||||
|
self.db.simple_delete_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups_state",
|
||||||
|
column="state_group",
|
||||||
|
iterable=state_groups_to_delete,
|
||||||
|
keyvalues={},
|
||||||
|
)
|
||||||
|
|
||||||
|
# ... and the state group edges
|
||||||
|
logger.info("[purge] removing %s from state_group_edges", room_id)
|
||||||
|
|
||||||
|
self.db.simple_delete_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_group_edges",
|
||||||
|
column="state_group",
|
||||||
|
iterable=state_groups_to_delete,
|
||||||
|
keyvalues={},
|
||||||
|
)
|
||||||
|
|
||||||
|
# ... and the state groups
|
||||||
|
logger.info("[purge] removing %s from state_groups", room_id)
|
||||||
|
|
||||||
|
self.db.simple_delete_many_txn(
|
||||||
|
txn,
|
||||||
|
table="state_groups",
|
||||||
|
column="id",
|
||||||
|
iterable=state_groups_to_delete,
|
||||||
|
keyvalues={},
|
||||||
|
)
|
|
@ -183,7 +183,7 @@ class EventsPersistenceStorage(object):
|
||||||
# so we use separate variables here even though they point to the same
|
# so we use separate variables here even though they point to the same
|
||||||
# store for now.
|
# store for now.
|
||||||
self.main_store = stores.main
|
self.main_store = stores.main
|
||||||
self.state_store = stores.main
|
self.state_store = stores.state
|
||||||
|
|
||||||
self._clock = hs.get_clock()
|
self._clock = hs.get_clock()
|
||||||
self.is_mine_id = hs.is_mine_id
|
self.is_mine_id = hs.is_mine_id
|
||||||
|
|
|
@ -42,7 +42,7 @@ class UpgradeDatabaseException(PrepareDatabaseException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def prepare_database(db_conn, database_engine, config, data_stores=["main"]):
|
def prepare_database(db_conn, database_engine, config, data_stores=["main", "state"]):
|
||||||
"""Prepares a database for usage. Will either create all necessary tables
|
"""Prepares a database for usage. Will either create all necessary tables
|
||||||
or upgrade from an older schema version.
|
or upgrade from an older schema version.
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@ class PurgeEventsStorage(object):
|
||||||
|
|
||||||
sg_to_delete = yield self._find_unreferenced_groups(state_groups)
|
sg_to_delete = yield self._find_unreferenced_groups(state_groups)
|
||||||
|
|
||||||
yield self.stores.main.purge_unreferenced_state_groups(room_id, sg_to_delete)
|
yield self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _find_unreferenced_groups(self, state_groups):
|
def _find_unreferenced_groups(self, state_groups):
|
||||||
|
@ -102,7 +102,7 @@ class PurgeEventsStorage(object):
|
||||||
# groups that are referenced.
|
# groups that are referenced.
|
||||||
current_search -= referenced
|
current_search -= referenced
|
||||||
|
|
||||||
edges = yield self.stores.main.get_previous_state_groups(current_search)
|
edges = yield self.stores.state.get_previous_state_groups(current_search)
|
||||||
|
|
||||||
prevs = set(edges.values())
|
prevs = set(edges.values())
|
||||||
# We don't bother re-handling groups we've already seen
|
# We don't bother re-handling groups we've already seen
|
||||||
|
|
|
@ -342,7 +342,7 @@ class StateGroupStorage(object):
|
||||||
(prev_group, delta_ids)
|
(prev_group, delta_ids)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return self.stores.main.get_state_group_delta(state_group)
|
return self.stores.state.get_state_group_delta(state_group)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_groups_ids(self, _room_id, event_ids):
|
def get_state_groups_ids(self, _room_id, event_ids):
|
||||||
|
@ -362,7 +362,7 @@ class StateGroupStorage(object):
|
||||||
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
groups = set(itervalues(event_to_groups))
|
||||||
group_to_state = yield self.stores.main._get_state_for_groups(groups)
|
group_to_state = yield self.stores.state._get_state_for_groups(groups)
|
||||||
|
|
||||||
return group_to_state
|
return group_to_state
|
||||||
|
|
||||||
|
@ -423,7 +423,7 @@ class StateGroupStorage(object):
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return self.stores.main._get_state_groups_from_groups(groups, state_filter)
|
return self.stores.state._get_state_groups_from_groups(groups, state_filter)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
|
def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
|
||||||
|
@ -439,7 +439,7 @@ class StateGroupStorage(object):
|
||||||
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
groups = set(itervalues(event_to_groups))
|
||||||
group_to_state = yield self.stores.main._get_state_for_groups(
|
group_to_state = yield self.stores.state._get_state_for_groups(
|
||||||
groups, state_filter
|
groups, state_filter
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -476,7 +476,7 @@ class StateGroupStorage(object):
|
||||||
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
|
||||||
|
|
||||||
groups = set(itervalues(event_to_groups))
|
groups = set(itervalues(event_to_groups))
|
||||||
group_to_state = yield self.stores.main._get_state_for_groups(
|
group_to_state = yield self.stores.state._get_state_for_groups(
|
||||||
groups, state_filter
|
groups, state_filter
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -532,7 +532,7 @@ class StateGroupStorage(object):
|
||||||
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
Deferred[dict[int, dict[tuple[str, str], str]]]:
|
||||||
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
dict of state_group_id -> (dict of (type, state_key) -> event id)
|
||||||
"""
|
"""
|
||||||
return self.stores.main._get_state_for_groups(groups, state_filter)
|
return self.stores.state._get_state_for_groups(groups, state_filter)
|
||||||
|
|
||||||
def store_state_group(
|
def store_state_group(
|
||||||
self, event_id, room_id, prev_group, delta_ids, current_state_ids
|
self, event_id, room_id, prev_group, delta_ids, current_state_ids
|
||||||
|
@ -552,6 +552,6 @@ class StateGroupStorage(object):
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[int]: The state group ID
|
Deferred[int]: The state group ID
|
||||||
"""
|
"""
|
||||||
return self.stores.main.store_state_group(
|
return self.stores.state.store_state_group(
|
||||||
event_id, room_id, prev_group, delta_ids, current_state_ids
|
event_id, room_id, prev_group, delta_ids, current_state_ids
|
||||||
)
|
)
|
||||||
|
|
|
@ -35,7 +35,7 @@ class StateStoreTestCase(tests.unittest.TestCase):
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore()
|
||||||
self.storage = hs.get_storage()
|
self.storage = hs.get_storage()
|
||||||
self.state_datastore = self.store
|
self.state_datastore = self.storage.state.stores.state
|
||||||
self.event_builder_factory = hs.get_event_builder_factory()
|
self.event_builder_factory = hs.get_event_builder_factory()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
|
||||||
|
|
|
@ -231,7 +231,7 @@ def setup_test_homeserver(
|
||||||
"args": {"database": ":memory:", "cp_min": 1, "cp_max": 1},
|
"args": {"database": ":memory:", "cp_min": 1, "cp_max": 1},
|
||||||
}
|
}
|
||||||
|
|
||||||
database = DatabaseConnectionConfig("master", database_config, ["main"])
|
database = DatabaseConnectionConfig("master", database_config)
|
||||||
config.database.databases = [database]
|
config.database.databases = [database]
|
||||||
|
|
||||||
db_engine = create_engine(database.config)
|
db_engine = create_engine(database.config)
|
||||||
|
|
Loading…
Reference in a new issue