1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2024-11-04 17:08:52 +01:00

improvement: make more things async

This commit is contained in:
Timo Kösters 2022-06-18 16:38:41 +02:00
parent 9b898248c7
commit 0bc03e90a1
No known key found for this signature in database
GPG key ID: 24DA7517711A2BA4
7 changed files with 244 additions and 155 deletions

View file

@ -137,7 +137,7 @@ pub async fn get_context_route(
.expect("All rooms have state"), .expect("All rooms have state"),
}; };
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
let end_token = events_after let end_token = events_after
.last() .last()

View file

@ -29,7 +29,7 @@ use ruma::{
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap},
iter, iter,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
@ -48,19 +48,20 @@ pub async fn join_room_by_id_route(
) -> Result<join_room_by_id::v3::Response> { ) -> Result<join_room_by_id::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut servers: HashSet<_> = db let mut servers = Vec::new(); // There is no body.server_name for /roomId/join
.rooms servers.extend(
.invite_state(sender_user, &body.room_id)? db.rooms
.unwrap_or_default() .invite_state(sender_user, &body.room_id)?
.iter() .unwrap_or_default()
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .iter()
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.map(|user| user.server_name().to_owned()) .filter_map(|sender| UserId::parse(sender).ok())
.collect(); .map(|user| user.server_name().to_owned()),
);
servers.insert(body.room_id.server_name().to_owned()); servers.push(body.room_id.server_name().to_owned());
let ret = join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
@ -91,19 +92,20 @@ pub async fn join_room_by_id_or_alias_route(
let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) { let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers: HashSet<_> = db let mut servers = body.server_name.clone();
.rooms servers.extend(
.invite_state(sender_user, &room_id)? db.rooms
.unwrap_or_default() .invite_state(sender_user, &room_id)?
.iter() .unwrap_or_default()
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .iter()
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.map(|user| user.server_name().to_owned()) .filter_map(|sender| UserId::parse(sender).ok())
.collect(); .map(|user| user.server_name().to_owned()),
);
servers.insert(room_id.server_name().to_owned()); servers.push(room_id.server_name().to_owned());
(servers, room_id) (servers, room_id)
} }
Err(room_alias) => { Err(room_alias) => {
@ -413,7 +415,8 @@ pub async fn get_member_events_route(
Ok(get_member_events::v3::Response { Ok(get_member_events::v3::Response {
chunk: db chunk: db
.rooms .rooms
.room_state_full(&body.room_id)? .room_state_full(&body.room_id)
.await?
.iter() .iter()
.filter(|(key, _)| key.0 == StateEventType::RoomMember) .filter(|(key, _)| key.0 == StateEventType::RoomMember)
.map(|(_, pdu)| pdu.to_member_event().into()) .map(|(_, pdu)| pdu.to_member_event().into())
@ -462,7 +465,7 @@ async fn join_room_by_id_helper(
db: &Database, db: &Database,
sender_user: Option<&UserId>, sender_user: Option<&UserId>,
room_id: &RoomId, room_id: &RoomId,
servers: &HashSet<Box<ServerName>>, servers: &[Box<ServerName>],
_third_party_signed: Option<&IncomingThirdPartySigned>, _third_party_signed: Option<&IncomingThirdPartySigned>,
) -> Result<join_room_by_id::v3::Response> { ) -> Result<join_room_by_id::v3::Response> {
let sender_user = sender_user.expect("user is authenticated"); let sender_user = sender_user.expect("user is authenticated");
@ -478,7 +481,7 @@ async fn join_room_by_id_helper(
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Ask a remote server if we don't have this room // Ask a remote server if we don't have this room
if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { if !db.rooms.exists(room_id)? {
let mut make_join_response_and_server = Err(Error::BadServerResponse( let mut make_join_response_and_server = Err(Error::BadServerResponse(
"No server available to assist in joining.", "No server available to assist in joining.",
)); ));
@ -1032,6 +1035,13 @@ pub(crate) async fn invite_helper<'a>(
return Ok(()); return Ok(());
} }
if !db.rooms.is_joined(sender_user, &room_id)? {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
.roomid_mutex_state .roomid_mutex_state

View file

@ -124,7 +124,8 @@ pub async fn get_state_events_route(
Ok(get_state_events::v3::Response { Ok(get_state_events::v3::Response {
room_state: db room_state: db
.rooms .rooms
.room_state_full(&body.room_id)? .room_state_full(&body.room_id)
.await?
.values() .values()
.map(|pdu| pdu.to_state_event()) .map(|pdu| pdu.to_state_event())
.collect(), .collect(),

View file

@ -230,18 +230,20 @@ async fn sync_helper(
for room_id in all_joined_rooms { for room_id in all_joined_rooms {
let room_id = room_id?; let room_id = room_id?;
// Get and drop the lock to wait for remaining operations to finish {
// This will make sure the we have all events until next_batch // Get and drop the lock to wait for remaining operations to finish
let mutex_insert = Arc::clone( // This will make sure the we have all events until next_batch
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let timeline_pdus; let timeline_pdus;
let limited; let limited;
@ -296,10 +298,12 @@ async fn sync_helper(
// Database queries: // Database queries:
let current_shortstatehash = db let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? {
.rooms s
.current_shortstatehash(&room_id)? } else {
.expect("All rooms have state"); error!("Room {} has no state", room_id);
continue;
};
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
@ -377,11 +381,12 @@ async fn sync_helper(
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
let mut state_events = Vec::new(); let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new(); let mut lazy_loaded = HashSet::new();
let mut i = 0;
for (shortstatekey, id) in current_state_ids { for (shortstatekey, id) in current_state_ids {
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
@ -394,6 +399,11 @@ async fn sync_helper(
} }
}; };
state_events.push(pdu); state_events.push(pdu);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} else if !lazy_load_enabled } else if !lazy_load_enabled
|| body.full_state || body.full_state
|| timeline_users.contains(&state_key) || timeline_users.contains(&state_key)
@ -411,6 +421,11 @@ async fn sync_helper(
lazy_loaded.insert(uid); lazy_loaded.insert(uid);
} }
state_events.push(pdu); state_events.push(pdu);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} }
} }
@ -462,8 +477,8 @@ async fn sync_helper(
let mut lazy_loaded = HashSet::new(); let mut lazy_loaded = HashSet::new();
if since_shortstatehash != current_shortstatehash { if since_shortstatehash != current_shortstatehash {
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?;
for (key, id) in current_state_ids { for (key, id) in current_state_ids {
if body.full_state || since_state_ids.get(&key) != Some(&id) { if body.full_state || since_state_ids.get(&key) != Some(&id) {
@ -490,6 +505,7 @@ async fn sync_helper(
} }
state_events.push(pdu); state_events.push(pdu);
tokio::task::yield_now().await;
} }
} }
} }
@ -753,17 +769,19 @@ async fn sync_helper(
for result in all_left_rooms { for result in all_left_rooms {
let (room_id, left_state_events) = result?; let (room_id, left_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish {
let mutex_insert = Arc::clone( // Get and drop the lock to wait for remaining operations to finish
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
@ -793,17 +811,19 @@ async fn sync_helper(
for result in all_invited_rooms { for result in all_invited_rooms {
let (room_id, invite_state_events) = result?; let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish {
let mutex_insert = Arc::clone( // Get and drop the lock to wait for remaining operations to finish
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;

View file

@ -116,7 +116,7 @@ impl Admin {
send_message(content, guard, &state_lock); send_message(content, guard, &state_lock);
} }
AdminRoomEvent::ProcessMessage(room_message) => { AdminRoomEvent::ProcessMessage(room_message) => {
let reply_message = process_admin_message(&*guard, room_message); let reply_message = process_admin_message(&*guard, room_message).await;
send_message(reply_message, guard, &state_lock); send_message(reply_message, guard, &state_lock);
} }
@ -143,7 +143,7 @@ impl Admin {
} }
// Parse and process a message from the admin room // Parse and process a message from the admin room
fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent {
let mut lines = room_message.lines(); let mut lines = room_message.lines();
let command_line = lines.next().expect("each string has at least one line"); let command_line = lines.next().expect("each string has at least one line");
let body: Vec<_> = lines.collect(); let body: Vec<_> = lines.collect();
@ -161,7 +161,7 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven
} }
}; };
match process_admin_command(db, admin_command, body) { match process_admin_command(db, admin_command, body).await {
Ok(reply_message) => reply_message, Ok(reply_message) => reply_message,
Err(error) => { Err(error) => {
let markdown_message = format!( let markdown_message = format!(
@ -290,7 +290,7 @@ enum AdminCommand {
EnableRoom { room_id: Box<RoomId> }, EnableRoom { room_id: Box<RoomId> },
} }
fn process_admin_command( async fn process_admin_command(
db: &Database, db: &Database,
command: AdminCommand, command: AdminCommand,
body: Vec<&str>, body: Vec<&str>,
@ -404,7 +404,9 @@ fn process_admin_command(
Error::bad_database("Invalid room id field in event in database") Error::bad_database("Invalid room id field in event in database")
})?; })?;
let start = Instant::now(); let start = Instant::now();
let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); let count = server_server::get_auth_chain(room_id, vec![event_id], db)
.await?
.count();
let elapsed = start.elapsed(); let elapsed = start.elapsed();
RoomMessageEventContent::text_plain(format!( RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {} in {:?}", "Loaded auth chain with length {} in {:?}",

View file

@ -144,20 +144,28 @@ impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> { pub async fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> {
let full_state = self let full_state = self
.load_shortstatehash_info(shortstatehash)? .load_shortstatehash_info(shortstatehash)?
.pop() .pop()
.expect("there is always one layer") .expect("there is always one layer")
.1; .1;
full_state let mut result = BTreeMap::new();
.into_iter() let mut i = 0;
.map(|compressed| self.parse_compressed_state_event(compressed)) for compressed in full_state.into_iter() {
.collect() let parsed = self.parse_compressed_state_event(compressed)?;
result.insert(parsed.0, parsed.1);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
Ok(result)
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full( pub async fn state_full(
&self, &self,
shortstatehash: u64, shortstatehash: u64,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> { ) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
@ -166,14 +174,13 @@ impl Rooms {
.pop() .pop()
.expect("there is always one layer") .expect("there is always one layer")
.1; .1;
Ok(full_state
.into_iter() let mut result = HashMap::new();
.map(|compressed| self.parse_compressed_state_event(compressed)) let mut i = 0;
.filter_map(|r| r.ok()) for compressed in full_state {
.map(|(_, eventid)| self.get_pdu(&eventid)) let (_, eventid) = self.parse_compressed_state_event(compressed)?;
.filter_map(|r| r.ok().flatten()) if let Some(pdu) = self.get_pdu(&eventid)? {
.map(|pdu| { result.insert(
Ok::<_, Error>((
( (
pdu.kind.to_string().into(), pdu.kind.to_string().into(),
pdu.state_key pdu.state_key
@ -182,10 +189,16 @@ impl Rooms {
.clone(), .clone(),
), ),
pdu, pdu,
)) );
}) }
.filter_map(|r| r.ok())
.collect()) i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
Ok(result)
} }
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
@ -228,7 +241,6 @@ impl Rooms {
} }
/// Returns the state hash for this pdu. /// Returns the state hash for this pdu.
#[tracing::instrument(skip(self))]
pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> { pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> {
self.eventid_shorteventid self.eventid_shorteventid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -531,7 +543,6 @@ impl Rooms {
} }
} }
#[tracing::instrument(skip(self, globals))]
pub fn compress_state_event( pub fn compress_state_event(
&self, &self,
shortstatekey: u64, shortstatekey: u64,
@ -548,7 +559,6 @@ impl Rooms {
} }
/// Returns shortstatekey, event id /// Returns shortstatekey, event id
#[tracing::instrument(skip(self, compressed_event))]
pub fn parse_compressed_state_event( pub fn parse_compressed_state_event(
&self, &self,
compressed_event: CompressedStateEvent, compressed_event: CompressedStateEvent,
@ -707,7 +717,6 @@ impl Rooms {
} }
/// Returns (shortstatehash, already_existed) /// Returns (shortstatehash, already_existed)
#[tracing::instrument(skip(self, globals))]
fn get_or_create_shortstatehash( fn get_or_create_shortstatehash(
&self, &self,
state_hash: &StateHashId, state_hash: &StateHashId,
@ -728,7 +737,6 @@ impl Rooms {
}) })
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shorteventid( pub fn get_or_create_shorteventid(
&self, &self,
event_id: &EventId, event_id: &EventId,
@ -759,7 +767,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self))]
pub fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> { pub fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> {
self.roomid_shortroomid self.roomid_shortroomid
.get(room_id.as_bytes())? .get(room_id.as_bytes())?
@ -770,7 +777,6 @@ impl Rooms {
.transpose() .transpose()
} }
#[tracing::instrument(skip(self))]
pub fn get_shortstatekey( pub fn get_shortstatekey(
&self, &self,
event_type: &StateEventType, event_type: &StateEventType,
@ -808,7 +814,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shortroomid( pub fn get_or_create_shortroomid(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
@ -826,7 +831,6 @@ impl Rooms {
}) })
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shortstatekey( pub fn get_or_create_shortstatekey(
&self, &self,
event_type: &StateEventType, event_type: &StateEventType,
@ -867,7 +871,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self))]
pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> { pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> {
if let Some(id) = self if let Some(id) = self
.shorteventid_cache .shorteventid_cache
@ -896,7 +899,6 @@ impl Rooms {
Ok(event_id) Ok(event_id)
} }
#[tracing::instrument(skip(self))]
pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> {
if let Some(id) = self if let Some(id) = self
.shortstatekey_cache .shortstatekey_cache
@ -940,12 +942,12 @@ impl Rooms {
/// Returns the full room state. /// Returns the full room state.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn room_state_full( pub async fn room_state_full(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> { ) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
self.state_full(current_shortstatehash) self.state_full(current_shortstatehash).await
} else { } else {
Ok(HashMap::new()) Ok(HashMap::new())
} }
@ -982,14 +984,12 @@ impl Rooms {
} }
/// Returns the `count` of this pdu's id. /// Returns the `count` of this pdu's id.
#[tracing::instrument(skip(self))]
pub fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> { pub fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> {
utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..]) utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
.map_err(|_| Error::bad_database("PDU has invalid count bytes.")) .map_err(|_| Error::bad_database("PDU has invalid count bytes."))
} }
/// Returns the `count` of this pdu's id. /// Returns the `count` of this pdu's id.
#[tracing::instrument(skip(self))]
pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> { pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1018,7 +1018,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> { pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1037,7 +1036,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> { pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
self.eventid_outlierpdu self.eventid_outlierpdu
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1048,7 +1046,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_non_outlier_pdu_json( pub fn get_non_outlier_pdu_json(
&self, &self,
event_id: &EventId, event_id: &EventId,
@ -1068,7 +1065,6 @@ impl Rooms {
} }
/// Returns the pdu's id. /// Returns the pdu's id.
#[tracing::instrument(skip(self))]
pub fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> { pub fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
self.eventid_pduid.get(event_id.as_bytes()) self.eventid_pduid.get(event_id.as_bytes())
} }
@ -1076,7 +1072,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline. /// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
#[tracing::instrument(skip(self))]
pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> { pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1095,7 +1090,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline. /// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
#[tracing::instrument(skip(self))]
pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> { pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) {
return Ok(Some(Arc::clone(p))); return Ok(Some(Arc::clone(p)));
@ -1132,7 +1126,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// This does __NOT__ check the outliers `Tree`. /// This does __NOT__ check the outliers `Tree`.
#[tracing::instrument(skip(self))]
pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> { pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some( Ok(Some(
@ -1143,7 +1136,6 @@ impl Rooms {
} }
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`. /// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
#[tracing::instrument(skip(self))]
pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> { pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some( Ok(Some(
@ -1232,7 +1224,6 @@ impl Rooms {
} }
/// Returns the pdu from the outlier tree. /// Returns the pdu from the outlier tree.
#[tracing::instrument(skip(self))]
pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result<Option<PduEvent>> { pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_outlierpdu self.eventid_outlierpdu
.get(event_id.as_bytes())? .get(event_id.as_bytes())?

View file

@ -691,7 +691,7 @@ pub async fn send_transaction_message_route(
.roomid_mutex_federation .roomid_mutex_federation
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let mutex_lock = mutex.lock().await; let mutex_lock = mutex.lock().await;
@ -1054,6 +1054,25 @@ pub(crate) async fn handle_incoming_pdu<'a>(
} }
} }
if let Some((time, tries)) = db
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&*prev_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", prev_id);
continue;
}
}
if errors >= 5 { if errors >= 5 {
break; break;
} }
@ -1068,7 +1087,6 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.write() .write()
.unwrap() .unwrap()
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
let event_id = pdu.event_id.clone();
if let Err(e) = upgrade_outlier_to_timeline_pdu( if let Err(e) = upgrade_outlier_to_timeline_pdu(
pdu, pdu,
json, json,
@ -1081,7 +1099,21 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.await .await
{ {
errors += 1; errors += 1;
warn!("Prev event {} failed: {}", event_id, e); warn!("Prev event {} failed: {}", prev_id, e);
match db
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
} }
let elapsed = start_time.elapsed(); let elapsed = start_time.elapsed();
db.globals db.globals
@ -1091,7 +1123,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.remove(&room_id.to_owned()); .remove(&room_id.to_owned());
warn!( warn!(
"Handling prev event {} took {}m{}s", "Handling prev event {} took {}m{}s",
event_id, prev_id,
elapsed.as_secs() / 60, elapsed.as_secs() / 60,
elapsed.as_secs() % 60 elapsed.as_secs() % 60
); );
@ -1321,8 +1353,11 @@ async fn upgrade_outlier_to_timeline_pdu(
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
.map_err(|_| "Failed talking to db".to_owned())?; .map_err(|_| "Failed talking to db".to_owned())?;
let state = let state = if let Some(shortstatehash) = prev_event_sstatehash {
prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); Some(db.rooms.state_full_ids(shortstatehash).await)
} else {
None
};
if let Some(Ok(mut state)) = state { if let Some(Ok(mut state)) = state {
info!("Using cached state"); info!("Using cached state");
@ -1378,6 +1413,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let mut leaf_state: BTreeMap<_, _> = db let mut leaf_state: BTreeMap<_, _> = db
.rooms .rooms
.state_full_ids(sstatehash) .state_full_ids(sstatehash)
.await
.map_err(|_| "Failed to ask db for room state.".to_owned())?; .map_err(|_| "Failed to ask db for room state.".to_owned())?;
if let Some(state_key) = &prev_event.state_key { if let Some(state_key) = &prev_event.state_key {
@ -1409,6 +1445,7 @@ async fn upgrade_outlier_to_timeline_pdu(
auth_chain_sets.push( auth_chain_sets.push(
get_auth_chain(room_id, starting_events, db) get_auth_chain(room_id, starting_events, db)
.await
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.collect(), .collect(),
); );
@ -1535,6 +1572,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let state_at_incoming_event = let state_at_incoming_event =
state_at_incoming_event.expect("we always set this to some above"); state_at_incoming_event.expect("we always set this to some above");
info!("Starting auth check");
// 11. Check the auth of the event passes based on the state of the event // 11. Check the auth of the event passes based on the state of the event
let check_result = state_res::event_auth::auth_check( let check_result = state_res::event_auth::auth_check(
&room_version, &room_version,
@ -1554,7 +1592,7 @@ async fn upgrade_outlier_to_timeline_pdu(
if !check_result { if !check_result {
return Err("Event has failed auth check with state at the event.".into()); return Err("Event has failed auth check with state at the event.".into());
} }
info!("Auth check succeeded."); info!("Auth check succeeded");
// We start looking at current room state now, so lets lock the room // We start looking at current room state now, so lets lock the room
@ -1570,6 +1608,7 @@ async fn upgrade_outlier_to_timeline_pdu(
// Now we calculate the set of extremities this room has after the incoming event has been // Now we calculate the set of extremities this room has after the incoming event has been
// applied. We start with the previous extremities (aka leaves) // applied. We start with the previous extremities (aka leaves)
info!("Calculating extremities");
let mut extremities = db let mut extremities = db
.rooms .rooms
.get_pdu_leaves(room_id) .get_pdu_leaves(room_id)
@ -1585,28 +1624,7 @@ async fn upgrade_outlier_to_timeline_pdu(
// Only keep those extremities were not referenced yet // Only keep those extremities were not referenced yet
extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true)));
let current_sstatehash = db info!("Compressing state at event");
.rooms
.current_shortstatehash(room_id)
.map_err(|_| "Failed to load current state hash.".to_owned())?
.expect("every room has state");
let current_state_ids = db
.rooms
.state_full_ids(current_sstatehash)
.map_err(|_| "Failed to load room state.")?;
let auth_events = db
.rooms
.get_auth_events(
room_id,
&incoming_pdu.kind,
&incoming_pdu.sender,
incoming_pdu.state_key.as_deref(),
&incoming_pdu.content,
)
.map_err(|_| "Failed to get_auth_events.".to_owned())?;
let state_ids_compressed = state_at_incoming_event let state_ids_compressed = state_at_incoming_event
.iter() .iter()
.map(|(shortstatekey, id)| { .map(|(shortstatekey, id)| {
@ -1619,6 +1637,17 @@ async fn upgrade_outlier_to_timeline_pdu(
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
info!("Starting soft fail auth check"); info!("Starting soft fail auth check");
let auth_events = db
.rooms
.get_auth_events(
room_id,
&incoming_pdu.kind,
&incoming_pdu.sender,
incoming_pdu.state_key.as_deref(),
&incoming_pdu.content,
)
.map_err(|_| "Failed to get_auth_events.".to_owned())?;
let soft_fail = !state_res::event_auth::auth_check( let soft_fail = !state_res::event_auth::auth_check(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
@ -1651,6 +1680,19 @@ async fn upgrade_outlier_to_timeline_pdu(
} }
if incoming_pdu.state_key.is_some() { if incoming_pdu.state_key.is_some() {
info!("Loading current room state ids");
let current_sstatehash = db
.rooms
.current_shortstatehash(room_id)
.map_err(|_| "Failed to load current state hash.".to_owned())?
.expect("every room has state");
let current_state_ids = db
.rooms
.state_full_ids(current_sstatehash)
.await
.map_err(|_| "Failed to load room state.")?;
info!("Preparing for stateres to derive new room state"); info!("Preparing for stateres to derive new room state");
let mut extremity_sstatehashes = HashMap::new(); let mut extremity_sstatehashes = HashMap::new();
@ -1738,6 +1780,7 @@ async fn upgrade_outlier_to_timeline_pdu(
state.iter().map(|(_, id)| id.clone()).collect(), state.iter().map(|(_, id)| id.clone()).collect(),
db, db,
) )
.await
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.collect(), .collect(),
); );
@ -1899,11 +1942,17 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
let mut todo_auth_events = vec![Arc::clone(id)]; let mut todo_auth_events = vec![Arc::clone(id)];
let mut events_in_reverse_order = Vec::new(); let mut events_in_reverse_order = Vec::new();
let mut events_all = HashSet::new(); let mut events_all = HashSet::new();
let mut i = 0;
while let Some(next_id) = todo_auth_events.pop() { while let Some(next_id) = todo_auth_events.pop() {
if events_all.contains(&next_id) { if events_all.contains(&next_id) {
continue; continue;
} }
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) {
trace!("Found {} in db", id); trace!("Found {} in db", id);
continue; continue;
@ -2242,7 +2291,7 @@ fn append_incoming_pdu<'a>(
} }
#[tracing::instrument(skip(starting_events, db))] #[tracing::instrument(skip(starting_events, db))]
pub(crate) fn get_auth_chain<'a>( pub(crate) async fn get_auth_chain<'a>(
room_id: &RoomId, room_id: &RoomId,
starting_events: Vec<Arc<EventId>>, starting_events: Vec<Arc<EventId>>,
db: &'a Database, db: &'a Database,
@ -2251,10 +2300,15 @@ pub(crate) fn get_auth_chain<'a>(
let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS];
let mut i = 0;
for id in starting_events { for id in starting_events {
let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?;
let bucket_id = (short % NUM_BUCKETS as u64) as usize; let bucket_id = (short % NUM_BUCKETS as u64) as usize;
buckets[bucket_id].insert((short, id.clone())); buckets[bucket_id].insert((short, id.clone()));
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} }
let mut full_auth_chain = HashSet::new(); let mut full_auth_chain = HashSet::new();
@ -2277,6 +2331,7 @@ pub(crate) fn get_auth_chain<'a>(
let mut chunk_cache = HashSet::new(); let mut chunk_cache = HashSet::new();
let mut hits2 = 0; let mut hits2 = 0;
let mut misses2 = 0; let mut misses2 = 0;
let mut i = 0;
for (sevent_id, event_id) in chunk { for (sevent_id, event_id) in chunk {
if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? {
hits2 += 1; hits2 += 1;
@ -2292,6 +2347,11 @@ pub(crate) fn get_auth_chain<'a>(
auth_chain.len() auth_chain.len()
); );
chunk_cache.extend(auth_chain.iter()); chunk_cache.extend(auth_chain.iter());
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}; };
} }
println!( println!(
@ -2512,7 +2572,7 @@ pub async fn get_event_authorization_route(
let room_id = <&RoomId>::try_from(room_id_str) let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_event_authorization::v1::Response { Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2557,7 +2617,8 @@ pub async fn get_room_state_route(
let pdus = db let pdus = db
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)
.await?
.into_iter() .into_iter()
.map(|(_, id)| { .map(|(_, id)| {
PduEvent::convert_to_outgoing_federation_event( PduEvent::convert_to_outgoing_federation_event(
@ -2566,7 +2627,8 @@ pub async fn get_room_state_route(
}) })
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids =
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_room_state::v1::Response { Ok(get_room_state::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2616,12 +2678,14 @@ pub async fn get_room_state_ids_route(
let pdu_ids = db let pdu_ids = db
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)
.await?
.into_iter() .into_iter()
.map(|(_, id)| (*id).to_owned()) .map(|(_, id)| (*id).to_owned())
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids =
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_room_state_ids::v1::Response { Ok(get_room_state_ids::v1::Response {
auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(),
@ -2927,12 +2991,13 @@ async fn create_join_event(
))?; ))?;
drop(mutex_lock); drop(mutex_lock);
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
let auth_chain_ids = get_auth_chain( let auth_chain_ids = get_auth_chain(
room_id, room_id,
state_ids.iter().map(|(_, id)| id.clone()).collect(), state_ids.iter().map(|(_, id)| id.clone()).collect(),
db, db,
)?; )
.await?;
let servers = db let servers = db
.rooms .rooms