mirror of
https://gitlab.com/famedly/conduit.git
synced 2024-12-26 08:34:20 +01:00
Merge branch 'stateres' into 'next'
Make state resolution more resistant and some sync performance improvements See merge request famedly/conduit!490
This commit is contained in:
commit
3a1a72df98
12 changed files with 256 additions and 293 deletions
22
Cargo.lock
generated
22
Cargo.lock
generated
|
@ -2110,7 +2110,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma"
|
||||
version = "0.8.2"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"assign",
|
||||
"js_int",
|
||||
|
@ -2128,7 +2128,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-appservice-api"
|
||||
version = "0.8.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -2139,7 +2139,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-client-api"
|
||||
version = "0.16.2"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"assign",
|
||||
"bytes",
|
||||
|
@ -2156,7 +2156,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-common"
|
||||
version = "0.11.3"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"base64 0.21.2",
|
||||
"bytes",
|
||||
|
@ -2184,7 +2184,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-federation-api"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -2195,7 +2195,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-identifiers-validation"
|
||||
version = "0.9.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"thiserror",
|
||||
|
@ -2204,7 +2204,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-identity-service-api"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -2214,7 +2214,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-macros"
|
||||
version = "0.11.3"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"once_cell",
|
||||
"proc-macro-crate",
|
||||
|
@ -2229,7 +2229,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-push-gateway-api"
|
||||
version = "0.7.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"js_int",
|
||||
"ruma-common",
|
||||
|
@ -2240,7 +2240,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-signatures"
|
||||
version = "0.13.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"base64 0.21.2",
|
||||
"ed25519-dalek",
|
||||
|
@ -2256,7 +2256,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "ruma-state-res"
|
||||
version = "0.9.1"
|
||||
source = "git+https://github.com/ruma/ruma?rev=de9a5a6ecca197e59623c210bd21f53055f83568#de9a5a6ecca197e59623c210bd21f53055f83568"
|
||||
source = "git+https://github.com/ruma/ruma?rev=38294bd5206498c02b1001227d65654eb548308b#38294bd5206498c02b1001227d65654eb548308b"
|
||||
dependencies = [
|
||||
"itertools",
|
||||
"js_int",
|
||||
|
|
|
@ -26,7 +26,7 @@ tower-http = { version = "0.4.1", features = ["add-extension", "cors", "sensitiv
|
|||
|
||||
# Used for matrix spec type definitions and helpers
|
||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "de9a5a6ecca197e59623c210bd21f53055f83568", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
ruma = { git = "https://github.com/ruma/ruma", rev = "38294bd5206498c02b1001227d65654eb548308b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||
|
||||
|
|
|
@ -743,15 +743,17 @@ async fn join_room_by_id_helper(
|
|||
info!("Saving state from send_join");
|
||||
let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state(
|
||||
room_id,
|
||||
state
|
||||
.into_iter()
|
||||
.map(|(k, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(k, &id)
|
||||
})
|
||||
.collect::<Result<_>>()?,
|
||||
Arc::new(
|
||||
state
|
||||
.into_iter()
|
||||
.map(|(k, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(k, &id)
|
||||
})
|
||||
.collect::<Result<_>>()?,
|
||||
),
|
||||
)?;
|
||||
|
||||
services()
|
||||
|
|
|
@ -16,7 +16,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
.1;
|
||||
let mut result = HashMap::new();
|
||||
let mut i = 0;
|
||||
for compressed in full_state.into_iter() {
|
||||
for compressed in full_state.iter() {
|
||||
let parsed = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
|
@ -45,7 +45,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
|
||||
let mut result = HashMap::new();
|
||||
let mut i = 0;
|
||||
for compressed in full_state {
|
||||
for compressed in full_state.iter() {
|
||||
let (_, eventid) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
|
@ -95,7 +95,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
|||
.expect("there is always one layer")
|
||||
.1;
|
||||
Ok(full_state
|
||||
.into_iter()
|
||||
.iter()
|
||||
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
||||
.and_then(|compressed| {
|
||||
services()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::{collections::HashSet, mem::size_of};
|
||||
use std::{collections::HashSet, mem::size_of, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
database::KeyValueDatabase,
|
||||
|
@ -37,20 +37,20 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase {
|
|||
|
||||
Ok(StateDiff {
|
||||
parent,
|
||||
added,
|
||||
removed,
|
||||
added: Arc::new(added),
|
||||
removed: Arc::new(removed),
|
||||
})
|
||||
}
|
||||
|
||||
fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> {
|
||||
let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec();
|
||||
for new in &diff.added {
|
||||
for new in diff.added.iter() {
|
||||
value.extend_from_slice(&new[..]);
|
||||
}
|
||||
|
||||
if !diff.removed.is_empty() {
|
||||
value.extend_from_slice(&0_u64.to_be_bytes());
|
||||
for removed in &diff.removed {
|
||||
for removed in diff.removed.iter() {
|
||||
value.extend_from_slice(&removed[..]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -587,8 +587,8 @@ impl KeyValueDatabase {
|
|||
|
||||
services().rooms.state_compressor.save_state_from_diff(
|
||||
current_sstatehash,
|
||||
statediffnew,
|
||||
statediffremoved,
|
||||
Arc::new(statediffnew),
|
||||
Arc::new(statediffremoved),
|
||||
2, // every state change is 2 event changes on average
|
||||
states_parents,
|
||||
)?;
|
||||
|
|
|
@ -90,7 +90,7 @@ impl Services {
|
|||
state_compressor: rooms::state_compressor::Service {
|
||||
db,
|
||||
stateinfo_cache: Mutex::new(LruCache::new(
|
||||
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
(1000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||
)),
|
||||
},
|
||||
timeline: rooms::timeline::Service {
|
||||
|
|
|
@ -38,6 +38,8 @@ use tracing::{debug, error, info, trace, warn};
|
|||
|
||||
use crate::{service::*, services, Error, PduEvent, Result};
|
||||
|
||||
use super::state_compressor::CompressedStateEvent;
|
||||
|
||||
pub struct Service;
|
||||
|
||||
impl Service {
|
||||
|
@ -62,9 +64,8 @@ impl Service {
|
|||
/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by
|
||||
/// doing state res where one of the inputs was a previously trusted set of state, don't just
|
||||
/// trust a set of state we got from a remote)
|
||||
/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail"
|
||||
/// it
|
||||
/// 14. Use state resolution to find new room state
|
||||
/// 13. Use state resolution to find new room state
|
||||
/// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it
|
||||
// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively
|
||||
#[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))]
|
||||
pub(crate) async fn handle_incoming_pdu<'a>(
|
||||
|
@ -304,7 +305,7 @@ impl Service {
|
|||
) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {}: {}", event_id, e);
|
||||
warn!("Dropping bad event {}: {}", event_id, e,);
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Signature verification failed",
|
||||
|
@ -735,8 +736,26 @@ impl Service {
|
|||
}
|
||||
info!("Auth check succeeded");
|
||||
|
||||
// We start looking at current room state now, so lets lock the room
|
||||
// Soft fail check before doing state res
|
||||
let auth_events = services().rooms.state.get_auth_events(
|
||||
room_id,
|
||||
&incoming_pdu.kind,
|
||||
&incoming_pdu.sender,
|
||||
incoming_pdu.state_key.as_deref(),
|
||||
&incoming_pdu.content,
|
||||
)?;
|
||||
|
||||
let soft_fail = !state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
None::<PduEvent>,
|
||||
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
|
||||
)
|
||||
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?;
|
||||
|
||||
// 13. Use state resolution to find new room state
|
||||
|
||||
// We start looking at current room state now, so lets lock the room
|
||||
let mutex_state = Arc::clone(
|
||||
services()
|
||||
.globals
|
||||
|
@ -772,35 +791,54 @@ impl Service {
|
|||
});
|
||||
|
||||
info!("Compressing state at event");
|
||||
let state_ids_compressed = state_at_incoming_event
|
||||
.iter()
|
||||
.map(|(shortstatekey, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(*shortstatekey, id)
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
let state_ids_compressed = Arc::new(
|
||||
state_at_incoming_event
|
||||
.iter()
|
||||
.map(|(shortstatekey, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(*shortstatekey, id)
|
||||
})
|
||||
.collect::<Result<_>>()?,
|
||||
);
|
||||
|
||||
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
|
||||
if incoming_pdu.state_key.is_some() {
|
||||
info!("Preparing for stateres to derive new room state");
|
||||
|
||||
// We also add state after incoming event to the fork states
|
||||
let mut state_after = state_at_incoming_event.clone();
|
||||
if let Some(state_key) = &incoming_pdu.state_key {
|
||||
let shortstatekey = services().rooms.short.get_or_create_shortstatekey(
|
||||
&incoming_pdu.kind.to_string().into(),
|
||||
state_key,
|
||||
)?;
|
||||
|
||||
state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
|
||||
}
|
||||
|
||||
let new_room_state = self
|
||||
.resolve_state(room_id, room_version_id, state_after)
|
||||
.await?;
|
||||
|
||||
// Set the new room state to the resolved state
|
||||
info!("Forcing new room state");
|
||||
|
||||
let (sstatehash, new, removed) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.save_state(room_id, new_room_state)?;
|
||||
|
||||
services()
|
||||
.rooms
|
||||
.state
|
||||
.force_state(room_id, sstatehash, new, removed, &state_lock)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it
|
||||
info!("Starting soft fail auth check");
|
||||
|
||||
let auth_events = services().rooms.state.get_auth_events(
|
||||
room_id,
|
||||
&incoming_pdu.kind,
|
||||
&incoming_pdu.sender,
|
||||
incoming_pdu.state_key.as_deref(),
|
||||
&incoming_pdu.content,
|
||||
)?;
|
||||
|
||||
let soft_fail = !state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
None::<PduEvent>,
|
||||
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
|
||||
)
|
||||
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?;
|
||||
|
||||
if soft_fail {
|
||||
services().rooms.timeline.append_incoming_pdu(
|
||||
&incoming_pdu,
|
||||
|
@ -823,181 +861,6 @@ impl Service {
|
|||
));
|
||||
}
|
||||
|
||||
if incoming_pdu.state_key.is_some() {
|
||||
info!("Loading current room state ids");
|
||||
let current_sstatehash = services()
|
||||
.rooms
|
||||
.state
|
||||
.get_room_shortstatehash(room_id)?
|
||||
.expect("every room has state");
|
||||
|
||||
let current_state_ids = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_ids(current_sstatehash)
|
||||
.await?;
|
||||
|
||||
info!("Preparing for stateres to derive new room state");
|
||||
let mut extremity_sstatehashes = HashMap::new();
|
||||
|
||||
info!(?extremities, "Loading extremities");
|
||||
for id in &extremities {
|
||||
match services().rooms.timeline.get_pdu(id)? {
|
||||
Some(leaf_pdu) => {
|
||||
extremity_sstatehashes.insert(
|
||||
services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.pdu_shortstatehash(&leaf_pdu.event_id)?
|
||||
.ok_or_else(|| {
|
||||
error!(
|
||||
"Found extremity pdu with no statehash in db: {:?}",
|
||||
leaf_pdu
|
||||
);
|
||||
Error::bad_database("Found pdu with no statehash in db.")
|
||||
})?,
|
||||
leaf_pdu,
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
error!("Missing state snapshot for {:?}", id);
|
||||
return Err(Error::BadDatabase("Missing state snapshot."));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut fork_states = Vec::new();
|
||||
|
||||
// 12. Ensure that the state is derived from the previous current state (i.e. we calculated
|
||||
// by doing state res where one of the inputs was a previously trusted set of state,
|
||||
// don't just trust a set of state we got from a remote).
|
||||
|
||||
// We do this by adding the current state to the list of fork states
|
||||
extremity_sstatehashes.remove(¤t_sstatehash);
|
||||
fork_states.push(current_state_ids);
|
||||
|
||||
// We also add state after incoming event to the fork states
|
||||
let mut state_after = state_at_incoming_event.clone();
|
||||
if let Some(state_key) = &incoming_pdu.state_key {
|
||||
let shortstatekey = services().rooms.short.get_or_create_shortstatekey(
|
||||
&incoming_pdu.kind.to_string().into(),
|
||||
state_key,
|
||||
)?;
|
||||
|
||||
state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
|
||||
}
|
||||
fork_states.push(state_after);
|
||||
|
||||
let mut update_state = false;
|
||||
// 14. Use state resolution to find new room state
|
||||
let new_room_state = if fork_states.is_empty() {
|
||||
panic!("State is empty");
|
||||
} else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) {
|
||||
info!("State resolution trivial");
|
||||
// There was only one state, so it has to be the room's current state (because that is
|
||||
// always included)
|
||||
fork_states[0]
|
||||
.iter()
|
||||
.map(|(k, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(*k, id)
|
||||
})
|
||||
.collect::<Result<_>>()?
|
||||
} else {
|
||||
info!("Loading auth chains");
|
||||
// We do need to force an update to this room's state
|
||||
update_state = true;
|
||||
|
||||
let mut auth_chain_sets = Vec::new();
|
||||
for state in &fork_states {
|
||||
auth_chain_sets.push(
|
||||
services()
|
||||
.rooms
|
||||
.auth_chain
|
||||
.get_auth_chain(
|
||||
room_id,
|
||||
state.iter().map(|(_, id)| id.clone()).collect(),
|
||||
)
|
||||
.await?
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
|
||||
info!("Loading fork states");
|
||||
|
||||
let fork_states: Vec<_> = fork_states
|
||||
.into_iter()
|
||||
.map(|map| {
|
||||
map.into_iter()
|
||||
.filter_map(|(k, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.short
|
||||
.get_statekey_from_short(k)
|
||||
.map(|(ty, st_key)| ((ty.to_string().into(), st_key), id))
|
||||
.ok()
|
||||
})
|
||||
.collect::<StateMap<_>>()
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("Resolving state");
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(
|
||||
room_version_id,
|
||||
&fork_states,
|
||||
auth_chain_sets,
|
||||
|id| {
|
||||
let res = services().rooms.timeline.get_pdu(id);
|
||||
if let Err(e) = &res {
|
||||
error!("LOOK AT ME Failed to fetch event: {}", e);
|
||||
}
|
||||
res.ok().flatten()
|
||||
},
|
||||
) {
|
||||
Ok(new_state) => new_state,
|
||||
Err(_) => {
|
||||
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
||||
}
|
||||
};
|
||||
|
||||
drop(lock);
|
||||
|
||||
info!("State resolution done. Compressing state");
|
||||
|
||||
state
|
||||
.into_iter()
|
||||
.map(|((event_type, state_key), event_id)| {
|
||||
let shortstatekey = services().rooms.short.get_or_create_shortstatekey(
|
||||
&event_type.to_string().into(),
|
||||
&state_key,
|
||||
)?;
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(shortstatekey, &event_id)
|
||||
})
|
||||
.collect::<Result<_>>()?
|
||||
};
|
||||
|
||||
// Set the new room state to the resolved state
|
||||
if update_state {
|
||||
info!("Forcing new room state");
|
||||
let (sstatehash, new, removed) = services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.save_state(room_id, new_room_state)?;
|
||||
services()
|
||||
.rooms
|
||||
.state
|
||||
.force_state(room_id, sstatehash, new, removed, &state_lock)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
info!("Appending pdu to timeline");
|
||||
extremities.insert(incoming_pdu.event_id.clone());
|
||||
|
||||
|
@ -1021,6 +884,94 @@ impl Service {
|
|||
Ok(pdu_id)
|
||||
}
|
||||
|
||||
async fn resolve_state(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
room_version_id: &RoomVersionId,
|
||||
incoming_state: HashMap<u64, Arc<EventId>>,
|
||||
) -> Result<Arc<HashSet<CompressedStateEvent>>> {
|
||||
info!("Loading current room state ids");
|
||||
let current_sstatehash = services()
|
||||
.rooms
|
||||
.state
|
||||
.get_room_shortstatehash(room_id)?
|
||||
.expect("every room has state");
|
||||
|
||||
let current_state_ids = services()
|
||||
.rooms
|
||||
.state_accessor
|
||||
.state_full_ids(current_sstatehash)
|
||||
.await?;
|
||||
|
||||
let fork_states = [current_state_ids, incoming_state];
|
||||
|
||||
let mut auth_chain_sets = Vec::new();
|
||||
for state in &fork_states {
|
||||
auth_chain_sets.push(
|
||||
services()
|
||||
.rooms
|
||||
.auth_chain
|
||||
.get_auth_chain(room_id, state.iter().map(|(_, id)| id.clone()).collect())
|
||||
.await?
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
|
||||
info!("Loading fork states");
|
||||
|
||||
let fork_states: Vec<_> = fork_states
|
||||
.into_iter()
|
||||
.map(|map| {
|
||||
map.into_iter()
|
||||
.filter_map(|(k, id)| {
|
||||
services()
|
||||
.rooms
|
||||
.short
|
||||
.get_statekey_from_short(k)
|
||||
.map(|(ty, st_key)| ((ty.to_string().into(), st_key), id))
|
||||
.ok()
|
||||
})
|
||||
.collect::<StateMap<_>>()
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("Resolving state");
|
||||
|
||||
let lock = services().globals.stateres_mutex.lock();
|
||||
let state = match state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
|
||||
let res = services().rooms.timeline.get_pdu(id);
|
||||
if let Err(e) = &res {
|
||||
error!("LOOK AT ME Failed to fetch event: {}", e);
|
||||
}
|
||||
res.ok().flatten()
|
||||
}) {
|
||||
Ok(new_state) => new_state,
|
||||
Err(_) => {
|
||||
return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization"));
|
||||
}
|
||||
};
|
||||
|
||||
drop(lock);
|
||||
|
||||
info!("State resolution done. Compressing state");
|
||||
|
||||
let new_room_state = state
|
||||
.into_iter()
|
||||
.map(|((event_type, state_key), event_id)| {
|
||||
let shortstatekey = services()
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?;
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
.compress_state_event(shortstatekey, &event_id)
|
||||
})
|
||||
.collect::<Result<_>>()?;
|
||||
|
||||
Ok(Arc::new(new_room_state))
|
||||
}
|
||||
|
||||
/// Find the event and auth it. Once the event is validated (steps 1 - 8)
|
||||
/// it is appended to the outliers Tree.
|
||||
///
|
||||
|
|
|
@ -32,11 +32,11 @@ impl Service {
|
|||
&self,
|
||||
room_id: &RoomId,
|
||||
shortstatehash: u64,
|
||||
statediffnew: HashSet<CompressedStateEvent>,
|
||||
_statediffremoved: HashSet<CompressedStateEvent>,
|
||||
statediffnew: Arc<HashSet<CompressedStateEvent>>,
|
||||
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
for event_id in statediffnew.into_iter().filter_map(|new| {
|
||||
for event_id in statediffnew.iter().filter_map(|new| {
|
||||
services()
|
||||
.rooms
|
||||
.state_compressor
|
||||
|
@ -107,7 +107,7 @@ impl Service {
|
|||
&self,
|
||||
event_id: &EventId,
|
||||
room_id: &RoomId,
|
||||
state_ids_compressed: HashSet<CompressedStateEvent>,
|
||||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
) -> Result<u64> {
|
||||
let shorteventid = services()
|
||||
.rooms
|
||||
|
@ -152,9 +152,9 @@ impl Service {
|
|||
.copied()
|
||||
.collect();
|
||||
|
||||
(statediffnew, statediffremoved)
|
||||
(Arc::new(statediffnew), Arc::new(statediffremoved))
|
||||
} else {
|
||||
(state_ids_compressed, HashSet::new())
|
||||
(state_ids_compressed, Arc::new(HashSet::new()))
|
||||
};
|
||||
services().rooms.state_compressor.save_state_from_diff(
|
||||
shortstatehash,
|
||||
|
@ -234,8 +234,8 @@ impl Service {
|
|||
|
||||
services().rooms.state_compressor.save_state_from_diff(
|
||||
shortstatehash,
|
||||
statediffnew,
|
||||
statediffremoved,
|
||||
Arc::new(statediffnew),
|
||||
Arc::new(statediffremoved),
|
||||
2,
|
||||
states_parents,
|
||||
)?;
|
||||
|
@ -396,7 +396,7 @@ impl Service {
|
|||
.1;
|
||||
|
||||
Ok(full_state
|
||||
.into_iter()
|
||||
.iter()
|
||||
.filter_map(|compressed| {
|
||||
services()
|
||||
.rooms
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use std::collections::HashSet;
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
use super::CompressedStateEvent;
|
||||
use crate::Result;
|
||||
|
||||
pub struct StateDiff {
|
||||
pub parent: Option<u64>,
|
||||
pub added: HashSet<CompressedStateEvent>,
|
||||
pub removed: HashSet<CompressedStateEvent>,
|
||||
pub added: Arc<HashSet<CompressedStateEvent>>,
|
||||
pub removed: Arc<HashSet<CompressedStateEvent>>,
|
||||
}
|
||||
|
||||
pub trait Data: Send + Sync {
|
||||
|
|
|
@ -20,10 +20,10 @@ pub struct Service {
|
|||
LruCache<
|
||||
u64,
|
||||
Vec<(
|
||||
u64, // sstatehash
|
||||
HashSet<CompressedStateEvent>, // full state
|
||||
HashSet<CompressedStateEvent>, // added
|
||||
HashSet<CompressedStateEvent>, // removed
|
||||
u64, // sstatehash
|
||||
Arc<HashSet<CompressedStateEvent>>, // full state
|
||||
Arc<HashSet<CompressedStateEvent>>, // added
|
||||
Arc<HashSet<CompressedStateEvent>>, // removed
|
||||
)>,
|
||||
>,
|
||||
>,
|
||||
|
@ -39,10 +39,10 @@ impl Service {
|
|||
shortstatehash: u64,
|
||||
) -> Result<
|
||||
Vec<(
|
||||
u64, // sstatehash
|
||||
HashSet<CompressedStateEvent>, // full state
|
||||
HashSet<CompressedStateEvent>, // added
|
||||
HashSet<CompressedStateEvent>, // removed
|
||||
u64, // sstatehash
|
||||
Arc<HashSet<CompressedStateEvent>>, // full state
|
||||
Arc<HashSet<CompressedStateEvent>>, // added
|
||||
Arc<HashSet<CompressedStateEvent>>, // removed
|
||||
)>,
|
||||
> {
|
||||
if let Some(r) = self
|
||||
|
@ -62,13 +62,19 @@ impl Service {
|
|||
|
||||
if let Some(parent) = parent {
|
||||
let mut response = self.load_shortstatehash_info(parent)?;
|
||||
let mut state = response.last().unwrap().1.clone();
|
||||
let mut state = (*response.last().unwrap().1).clone();
|
||||
state.extend(added.iter().copied());
|
||||
let removed = (*removed).clone();
|
||||
for r in &removed {
|
||||
state.remove(r);
|
||||
}
|
||||
|
||||
response.push((shortstatehash, state, added, removed));
|
||||
response.push((shortstatehash, Arc::new(state), added, Arc::new(removed)));
|
||||
|
||||
self.stateinfo_cache
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(shortstatehash, response.clone());
|
||||
|
||||
Ok(response)
|
||||
} else {
|
||||
|
@ -135,14 +141,14 @@ impl Service {
|
|||
pub fn save_state_from_diff(
|
||||
&self,
|
||||
shortstatehash: u64,
|
||||
statediffnew: HashSet<CompressedStateEvent>,
|
||||
statediffremoved: HashSet<CompressedStateEvent>,
|
||||
statediffnew: Arc<HashSet<CompressedStateEvent>>,
|
||||
statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
||||
diff_to_sibling: usize,
|
||||
mut parent_states: Vec<(
|
||||
u64, // sstatehash
|
||||
HashSet<CompressedStateEvent>, // full state
|
||||
HashSet<CompressedStateEvent>, // added
|
||||
HashSet<CompressedStateEvent>, // removed
|
||||
u64, // sstatehash
|
||||
Arc<HashSet<CompressedStateEvent>>, // full state
|
||||
Arc<HashSet<CompressedStateEvent>>, // added
|
||||
Arc<HashSet<CompressedStateEvent>>, // removed
|
||||
)>,
|
||||
) -> Result<()> {
|
||||
let diffsum = statediffnew.len() + statediffremoved.len();
|
||||
|
@ -152,29 +158,29 @@ impl Service {
|
|||
// To many layers, we have to go deeper
|
||||
let parent = parent_states.pop().unwrap();
|
||||
|
||||
let mut parent_new = parent.2;
|
||||
let mut parent_removed = parent.3;
|
||||
let mut parent_new = (*parent.2).clone();
|
||||
let mut parent_removed = (*parent.3).clone();
|
||||
|
||||
for removed in statediffremoved {
|
||||
if !parent_new.remove(&removed) {
|
||||
for removed in statediffremoved.iter() {
|
||||
if !parent_new.remove(removed) {
|
||||
// It was not added in the parent and we removed it
|
||||
parent_removed.insert(removed);
|
||||
parent_removed.insert(removed.clone());
|
||||
}
|
||||
// Else it was added in the parent and we removed it again. We can forget this change
|
||||
}
|
||||
|
||||
for new in statediffnew {
|
||||
if !parent_removed.remove(&new) {
|
||||
for new in statediffnew.iter() {
|
||||
if !parent_removed.remove(new) {
|
||||
// It was not touched in the parent and we added it
|
||||
parent_new.insert(new);
|
||||
parent_new.insert(new.clone());
|
||||
}
|
||||
// Else it was removed in the parent and we added it again. We can forget this change
|
||||
}
|
||||
|
||||
self.save_state_from_diff(
|
||||
shortstatehash,
|
||||
parent_new,
|
||||
parent_removed,
|
||||
Arc::new(parent_new),
|
||||
Arc::new(parent_removed),
|
||||
diffsum,
|
||||
parent_states,
|
||||
)?;
|
||||
|
@ -205,29 +211,29 @@ impl Service {
|
|||
|
||||
if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff {
|
||||
// Diff too big, we replace above layer(s)
|
||||
let mut parent_new = parent.2;
|
||||
let mut parent_removed = parent.3;
|
||||
let mut parent_new = (*parent.2).clone();
|
||||
let mut parent_removed = (*parent.3).clone();
|
||||
|
||||
for removed in statediffremoved {
|
||||
if !parent_new.remove(&removed) {
|
||||
for removed in statediffremoved.iter() {
|
||||
if !parent_new.remove(removed) {
|
||||
// It was not added in the parent and we removed it
|
||||
parent_removed.insert(removed);
|
||||
parent_removed.insert(removed.clone());
|
||||
}
|
||||
// Else it was added in the parent and we removed it again. We can forget this change
|
||||
}
|
||||
|
||||
for new in statediffnew {
|
||||
if !parent_removed.remove(&new) {
|
||||
for new in statediffnew.iter() {
|
||||
if !parent_removed.remove(new) {
|
||||
// It was not touched in the parent and we added it
|
||||
parent_new.insert(new);
|
||||
parent_new.insert(new.clone());
|
||||
}
|
||||
// Else it was removed in the parent and we added it again. We can forget this change
|
||||
}
|
||||
|
||||
self.save_state_from_diff(
|
||||
shortstatehash,
|
||||
parent_new,
|
||||
parent_removed,
|
||||
Arc::new(parent_new),
|
||||
Arc::new(parent_removed),
|
||||
diffsum,
|
||||
parent_states,
|
||||
)?;
|
||||
|
@ -250,11 +256,11 @@ impl Service {
|
|||
pub fn save_state(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
new_state_ids_compressed: HashSet<CompressedStateEvent>,
|
||||
new_state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
) -> Result<(
|
||||
u64,
|
||||
HashSet<CompressedStateEvent>,
|
||||
HashSet<CompressedStateEvent>,
|
||||
Arc<HashSet<CompressedStateEvent>>,
|
||||
Arc<HashSet<CompressedStateEvent>>,
|
||||
)> {
|
||||
let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?;
|
||||
|
||||
|
@ -271,7 +277,11 @@ impl Service {
|
|||
.get_or_create_shortstatehash(&state_hash)?;
|
||||
|
||||
if Some(new_shortstatehash) == previous_shortstatehash {
|
||||
return Ok((new_shortstatehash, HashSet::new(), HashSet::new()));
|
||||
return Ok((
|
||||
new_shortstatehash,
|
||||
Arc::new(HashSet::new()),
|
||||
Arc::new(HashSet::new()),
|
||||
));
|
||||
}
|
||||
|
||||
let states_parents = previous_shortstatehash
|
||||
|
@ -290,9 +300,9 @@ impl Service {
|
|||
.copied()
|
||||
.collect();
|
||||
|
||||
(statediffnew, statediffremoved)
|
||||
(Arc::new(statediffnew), Arc::new(statediffremoved))
|
||||
} else {
|
||||
(new_state_ids_compressed, HashSet::new())
|
||||
(new_state_ids_compressed, Arc::new(HashSet::new()))
|
||||
};
|
||||
|
||||
if !already_existed {
|
||||
|
|
|
@ -946,7 +946,7 @@ impl Service {
|
|||
pdu: &PduEvent,
|
||||
pdu_json: CanonicalJsonObject,
|
||||
new_room_leaves: Vec<OwnedEventId>,
|
||||
state_ids_compressed: HashSet<CompressedStateEvent>,
|
||||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
soft_fail: bool,
|
||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
|
|
Loading…
Reference in a new issue