2021-01-05 15:21:41 +01:00
|
|
|
use crate::{client_server, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma};
|
2020-10-05 22:19:22 +02:00
|
|
|
use get_profile_information::v1::ProfileField;
|
2020-09-23 12:03:08 +02:00
|
|
|
use http::header::{HeaderValue, AUTHORIZATION, HOST};
|
2021-03-16 18:00:26 +01:00
|
|
|
use log::{debug, error, info, warn};
|
2021-03-04 11:29:13 +01:00
|
|
|
use regex::Regex;
|
2021-03-18 18:33:43 +01:00
|
|
|
use rocket::{response::content::Json, State};
|
2020-09-12 22:41:33 +02:00
|
|
|
use ruma::{
|
|
|
|
api::{
|
2021-03-18 00:09:57 +01:00
|
|
|
client::error::ErrorKind,
|
2020-09-12 22:41:33 +02:00
|
|
|
federation::{
|
2021-04-21 10:51:34 +02:00
|
|
|
device::get_devices::{self, v1::UserDevice},
|
2020-09-25 12:26:29 +02:00
|
|
|
directory::{get_public_rooms, get_public_rooms_filtered},
|
2020-09-12 22:41:33 +02:00
|
|
|
discovery::{
|
2021-04-11 21:01:27 +02:00
|
|
|
get_remote_server_keys, get_server_keys, get_server_version, ServerSigningKeys,
|
|
|
|
VerifyKey,
|
2020-09-12 22:41:33 +02:00
|
|
|
},
|
2021-01-05 15:21:41 +01:00
|
|
|
event::{get_event, get_missing_events, get_room_state_ids},
|
2021-04-16 18:18:29 +02:00
|
|
|
membership::{
|
|
|
|
create_invite,
|
|
|
|
create_join_event::{self, RoomState},
|
|
|
|
create_join_event_template,
|
|
|
|
},
|
|
|
|
query::{get_profile_information, get_room_information},
|
2021-04-22 11:26:20 +02:00
|
|
|
transactions::{edu::Edu, send_transaction_message},
|
2020-08-06 14:29:59 +02:00
|
|
|
},
|
2021-04-23 18:45:06 +02:00
|
|
|
IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken,
|
2020-08-14 11:29:32 +02:00
|
|
|
},
|
2020-09-14 11:42:16 +02:00
|
|
|
directory::{IncomingFilter, IncomingRoomNetwork},
|
2021-04-11 21:01:27 +02:00
|
|
|
events::{
|
2021-04-16 18:18:29 +02:00
|
|
|
room::{
|
|
|
|
create::CreateEventContent,
|
|
|
|
member::{MemberEventContent, MembershipState},
|
|
|
|
},
|
2021-04-11 21:01:27 +02:00
|
|
|
EventType,
|
|
|
|
},
|
2021-03-26 11:10:45 +01:00
|
|
|
serde::{to_canonical_value, Raw},
|
2021-04-16 18:18:29 +02:00
|
|
|
signatures::{CanonicalJsonObject, CanonicalJsonValue},
|
|
|
|
uint, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId,
|
2020-05-26 10:27:51 +02:00
|
|
|
};
|
2021-01-14 20:39:56 +01:00
|
|
|
use state_res::{Event, EventMap, StateMap};
|
2020-04-22 20:55:11 +02:00
|
|
|
use std::{
|
2021-03-25 23:55:40 +01:00
|
|
|
collections::{btree_map::Entry, BTreeMap, BTreeSet, HashSet},
|
2021-04-16 18:18:29 +02:00
|
|
|
convert::{TryFrom, TryInto},
|
2020-08-14 11:31:31 +02:00
|
|
|
fmt::Debug,
|
2021-01-15 03:32:22 +01:00
|
|
|
future::Future,
|
2020-12-08 12:34:46 +01:00
|
|
|
net::{IpAddr, SocketAddr},
|
2021-01-15 03:32:22 +01:00
|
|
|
pin::Pin,
|
|
|
|
result::Result as StdResult,
|
2021-04-13 21:34:31 +02:00
|
|
|
sync::{Arc, RwLock},
|
2020-04-22 20:55:11 +02:00
|
|
|
time::{Duration, SystemTime},
|
|
|
|
};
|
2020-04-19 14:14:47 +02:00
|
|
|
|
2021-03-18 18:33:43 +01:00
|
|
|
#[cfg(feature = "conduit_bin")]
|
|
|
|
use rocket::{get, post, put};
|
|
|
|
|
2021-04-21 05:35:44 +02:00
|
|
|
/// Wraps either an literal IP address plus port, or a hostname plus complement
|
|
|
|
/// (colon-plus-port if it was specified).
|
|
|
|
///
|
|
|
|
/// Note: A `FedDest::Named` might contain an IP address in string form if there
|
|
|
|
/// was no port specified to construct a SocketAddr with.
|
|
|
|
///
|
|
|
|
/// # Examples:
|
|
|
|
/// ```rust,ignore
|
|
|
|
/// FedDest::Literal("198.51.100.3:8448".parse()?);
|
|
|
|
/// FedDest::Literal("[2001:db8::4:5]:443".parse()?);
|
|
|
|
/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned());
|
|
|
|
/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned());
|
|
|
|
/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned());
|
|
|
|
/// ```
|
2021-04-16 05:27:26 +02:00
|
|
|
#[derive(Clone, Debug, PartialEq)]
|
2021-04-16 17:18:22 +02:00
|
|
|
enum FedDest {
|
2021-04-16 05:27:26 +02:00
|
|
|
Literal(SocketAddr),
|
|
|
|
Named(String, String),
|
|
|
|
}
|
|
|
|
|
2021-04-16 17:18:22 +02:00
|
|
|
impl FedDest {
|
2021-04-21 05:35:44 +02:00
|
|
|
fn into_https_string(self) -> String {
|
2021-04-16 05:27:26 +02:00
|
|
|
match self {
|
|
|
|
Self::Literal(addr) => format!("https://{}", addr),
|
|
|
|
Self::Named(host, port) => format!("https://{}{}", host, port),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-21 05:35:44 +02:00
|
|
|
fn into_uri_string(self) -> String {
|
2021-04-16 05:27:26 +02:00
|
|
|
match self {
|
|
|
|
Self::Literal(addr) => addr.to_string(),
|
|
|
|
Self::Named(host, ref port) => host + port,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-21 05:35:44 +02:00
|
|
|
fn hostname(&self) -> String {
|
2021-04-16 05:27:26 +02:00
|
|
|
match &self {
|
|
|
|
Self::Literal(addr) => addr.ip().to_string(),
|
2021-04-16 17:18:22 +02:00
|
|
|
Self::Named(host, _) => host.clone(),
|
2021-04-16 05:27:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(globals))]
|
2020-08-14 11:31:31 +02:00
|
|
|
pub async fn send_request<T: OutgoingRequest>(
|
2020-09-14 20:23:19 +02:00
|
|
|
globals: &crate::database::globals::Globals,
|
2021-01-14 20:39:56 +01:00
|
|
|
destination: &ServerName,
|
2020-04-19 14:14:47 +02:00
|
|
|
request: T,
|
2020-08-14 11:31:31 +02:00
|
|
|
) -> Result<T::IncomingResponse>
|
|
|
|
where
|
|
|
|
T: Debug,
|
|
|
|
{
|
2021-01-01 13:47:53 +01:00
|
|
|
if !globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2020-12-06 11:05:51 +01:00
|
|
|
let maybe_result = globals
|
|
|
|
.actual_destination_cache
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
2021-01-14 20:39:56 +01:00
|
|
|
.get(destination)
|
2020-12-06 11:05:51 +01:00
|
|
|
.cloned();
|
2020-09-23 12:03:08 +02:00
|
|
|
|
2020-12-06 11:05:51 +01:00
|
|
|
let (actual_destination, host) = if let Some(result) = maybe_result {
|
|
|
|
result
|
|
|
|
} else {
|
|
|
|
let result = find_actual_destination(globals, &destination).await;
|
2021-04-16 05:27:26 +02:00
|
|
|
let (actual_destination, host) = result.clone();
|
2021-04-21 05:35:44 +02:00
|
|
|
let result_string = (result.0.into_https_string(), result.1.into_uri_string());
|
2020-12-06 11:05:51 +01:00
|
|
|
globals
|
|
|
|
.actual_destination_cache
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
2021-04-21 05:35:44 +02:00
|
|
|
.insert(Box::<ServerName>::from(destination), result_string.clone());
|
|
|
|
let dest_hostname = actual_destination.hostname();
|
|
|
|
let host_hostname = host.hostname();
|
|
|
|
if dest_hostname != host_hostname {
|
2021-04-16 03:07:27 +02:00
|
|
|
globals.tls_name_override.write().unwrap().insert(
|
2021-04-21 05:35:44 +02:00
|
|
|
dest_hostname,
|
|
|
|
webpki::DNSNameRef::try_from_ascii_str(&host_hostname)
|
2021-04-16 03:07:27 +02:00
|
|
|
.unwrap()
|
|
|
|
.to_owned(),
|
|
|
|
);
|
|
|
|
}
|
2021-04-21 05:35:44 +02:00
|
|
|
result_string
|
2020-12-06 11:05:51 +01:00
|
|
|
};
|
2020-08-14 11:31:31 +02:00
|
|
|
|
|
|
|
let mut http_request = request
|
2021-04-23 18:45:06 +02:00
|
|
|
.try_into_http_request::<Vec<u8>>(&actual_destination, SendAccessToken::IfRequired(""))
|
2020-09-15 08:55:02 +02:00
|
|
|
.map_err(|e| {
|
2020-11-15 22:48:43 +01:00
|
|
|
warn!("Failed to find destination {}: {}", actual_destination, e);
|
2020-09-15 08:55:02 +02:00
|
|
|
Error::BadServerResponse("Invalid destination")
|
|
|
|
})?;
|
2020-04-22 11:53:06 +02:00
|
|
|
|
2020-04-22 21:14:40 +02:00
|
|
|
let mut request_map = serde_json::Map::new();
|
2020-04-19 14:14:47 +02:00
|
|
|
|
2020-04-22 21:14:40 +02:00
|
|
|
if !http_request.body().is_empty() {
|
2020-04-25 11:47:32 +02:00
|
|
|
request_map.insert(
|
|
|
|
"content".to_owned(),
|
2020-09-15 08:55:02 +02:00
|
|
|
serde_json::from_slice(http_request.body())
|
|
|
|
.expect("body is valid json, we just created it"),
|
2020-04-25 11:47:32 +02:00
|
|
|
);
|
2020-04-22 21:14:40 +02:00
|
|
|
};
|
2020-04-19 14:14:47 +02:00
|
|
|
|
2020-04-22 11:53:06 +02:00
|
|
|
request_map.insert("method".to_owned(), T::METADATA.method.to_string().into());
|
2020-08-14 11:31:31 +02:00
|
|
|
request_map.insert(
|
|
|
|
"uri".to_owned(),
|
|
|
|
http_request
|
|
|
|
.uri()
|
|
|
|
.path_and_query()
|
|
|
|
.expect("all requests have a path")
|
|
|
|
.to_string()
|
|
|
|
.into(),
|
|
|
|
);
|
2020-09-15 08:16:20 +02:00
|
|
|
request_map.insert("origin".to_owned(), globals.server_name().as_str().into());
|
2020-09-14 11:00:31 +02:00
|
|
|
request_map.insert("destination".to_owned(), destination.as_str().into());
|
2020-04-22 21:14:40 +02:00
|
|
|
|
2020-10-28 00:10:09 +01:00
|
|
|
let mut request_json =
|
|
|
|
serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap");
|
|
|
|
|
2020-06-05 18:19:26 +02:00
|
|
|
ruma::signatures::sign_json(
|
2020-09-14 20:23:19 +02:00
|
|
|
globals.server_name().as_str(),
|
|
|
|
globals.keypair(),
|
2020-12-31 21:07:05 +01:00
|
|
|
&mut request_json,
|
2020-05-09 21:47:09 +02:00
|
|
|
)
|
2020-09-15 08:55:02 +02:00
|
|
|
.expect("our request json is what ruma expects");
|
2020-04-19 14:14:47 +02:00
|
|
|
|
2020-10-28 00:10:09 +01:00
|
|
|
let request_json: serde_json::Map<String, serde_json::Value> =
|
|
|
|
serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap();
|
|
|
|
|
2020-04-22 11:53:06 +02:00
|
|
|
let signatures = request_json["signatures"]
|
|
|
|
.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.values()
|
2020-08-14 11:31:31 +02:00
|
|
|
.map(|v| {
|
|
|
|
v.as_object()
|
|
|
|
.unwrap()
|
|
|
|
.iter()
|
|
|
|
.map(|(k, v)| (k, v.as_str().unwrap()))
|
|
|
|
});
|
|
|
|
|
|
|
|
for signature_server in signatures {
|
|
|
|
for s in signature_server {
|
|
|
|
http_request.headers_mut().insert(
|
|
|
|
AUTHORIZATION,
|
|
|
|
HeaderValue::from_str(&format!(
|
|
|
|
"X-Matrix origin={},key=\"{}\",sig=\"{}\"",
|
2020-09-14 20:23:19 +02:00
|
|
|
globals.server_name(),
|
2020-08-14 11:31:31 +02:00
|
|
|
s.0,
|
|
|
|
s.1
|
|
|
|
))
|
|
|
|
.unwrap(),
|
|
|
|
);
|
|
|
|
}
|
2020-04-22 11:53:06 +02:00
|
|
|
}
|
|
|
|
|
2021-03-14 02:31:41 +01:00
|
|
|
http_request
|
|
|
|
.headers_mut()
|
|
|
|
.insert(HOST, HeaderValue::from_str(&host).unwrap());
|
2020-09-23 12:03:08 +02:00
|
|
|
|
|
|
|
let mut reqwest_request = reqwest::Request::try_from(http_request)
|
2020-08-14 11:31:31 +02:00
|
|
|
.expect("all http requests are valid reqwest requests");
|
|
|
|
|
2020-09-23 12:03:08 +02:00
|
|
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
|
|
|
|
2020-09-23 15:23:29 +02:00
|
|
|
let url = reqwest_request.url().clone();
|
2020-09-14 20:23:19 +02:00
|
|
|
let reqwest_response = globals.reqwest_client().execute(reqwest_request).await;
|
2020-04-22 11:53:06 +02:00
|
|
|
|
|
|
|
// Because reqwest::Response -> http::Response is complicated:
|
|
|
|
match reqwest_response {
|
|
|
|
Ok(mut reqwest_response) => {
|
|
|
|
let status = reqwest_response.status();
|
|
|
|
let mut http_response = http::Response::builder().status(status);
|
|
|
|
let headers = http_response.headers_mut().unwrap();
|
|
|
|
|
|
|
|
for (k, v) in reqwest_response.headers_mut().drain() {
|
|
|
|
if let Some(key) = k {
|
|
|
|
headers.insert(key, v);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-05 21:03:43 +01:00
|
|
|
let status = reqwest_response.status();
|
|
|
|
|
2021-04-13 15:00:45 +02:00
|
|
|
let body = reqwest_response.bytes().await.unwrap_or_else(|e| {
|
|
|
|
warn!("server error {}", e);
|
|
|
|
Vec::new().into()
|
|
|
|
}); // TODO: handle timeout
|
2020-12-05 21:03:43 +01:00
|
|
|
|
|
|
|
if status != 200 {
|
2021-03-26 11:10:45 +01:00
|
|
|
info!(
|
|
|
|
"{} {}: {}",
|
|
|
|
url,
|
|
|
|
status,
|
2021-03-26 13:41:05 +01:00
|
|
|
String::from_utf8_lossy(&body)
|
|
|
|
.lines()
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.join(" ")
|
2021-03-26 11:10:45 +01:00
|
|
|
);
|
2020-12-05 21:03:43 +01:00
|
|
|
}
|
2020-09-12 21:30:07 +02:00
|
|
|
|
2021-04-13 15:00:45 +02:00
|
|
|
let response = T::IncomingResponse::try_from_http_response(
|
2020-09-15 08:55:02 +02:00
|
|
|
http_response
|
|
|
|
.body(body)
|
|
|
|
.expect("reqwest body is valid http body"),
|
|
|
|
);
|
2021-03-25 23:55:40 +01:00
|
|
|
response.map_err(|_| Error::BadServerResponse("Server returned bad response."))
|
2020-04-22 11:53:06 +02:00
|
|
|
}
|
2020-08-14 11:31:31 +02:00
|
|
|
Err(e) => Err(e.into()),
|
2020-04-22 11:53:06 +02:00
|
|
|
}
|
2020-04-19 14:14:47 +02:00
|
|
|
}
|
2020-04-22 20:55:11 +02:00
|
|
|
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument]
|
2021-04-16 17:18:22 +02:00
|
|
|
fn get_ip_with_port(destination_str: &str) -> Option<FedDest> {
|
2021-04-16 05:27:26 +02:00
|
|
|
if let Ok(destination) = destination_str.parse::<SocketAddr>() {
|
2021-04-16 17:18:22 +02:00
|
|
|
Some(FedDest::Literal(destination))
|
2020-12-08 12:34:46 +01:00
|
|
|
} else if let Ok(ip_addr) = destination_str.parse::<IpAddr>() {
|
2021-04-16 17:18:22 +02:00
|
|
|
Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448)))
|
2020-12-08 12:34:46 +01:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument]
|
2021-04-16 17:18:22 +02:00
|
|
|
fn add_port_to_hostname(destination_str: &str) -> FedDest {
|
2021-04-16 05:27:26 +02:00
|
|
|
let (host, port) = match destination_str.find(':') {
|
|
|
|
None => (destination_str, ":8448"),
|
|
|
|
Some(pos) => destination_str.split_at(pos),
|
|
|
|
};
|
2021-04-16 17:18:22 +02:00
|
|
|
FedDest::Named(host.to_string(), port.to_string())
|
2020-12-08 12:34:46 +01:00
|
|
|
}
|
|
|
|
|
2020-12-06 11:05:51 +01:00
|
|
|
/// Returns: actual_destination, host header
|
2020-12-08 12:34:46 +01:00
|
|
|
/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names
|
|
|
|
/// Numbers in comments below refer to bullet points in linked section of specification
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(globals))]
|
2020-12-06 11:05:51 +01:00
|
|
|
async fn find_actual_destination(
|
|
|
|
globals: &crate::database::globals::Globals,
|
2021-03-04 13:35:12 +01:00
|
|
|
destination: &'_ ServerName,
|
2021-04-16 17:18:22 +02:00
|
|
|
) -> (FedDest, FedDest) {
|
2020-12-08 12:34:46 +01:00
|
|
|
let destination_str = destination.as_str().to_owned();
|
2021-04-16 05:27:26 +02:00
|
|
|
let mut hostname = destination_str.clone();
|
|
|
|
let actual_destination = match get_ip_with_port(&destination_str) {
|
2021-04-16 17:18:22 +02:00
|
|
|
Some(host_port) => {
|
|
|
|
// 1: IP literal with provided or default port
|
|
|
|
host_port
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
if let Some(pos) = destination_str.find(':') {
|
|
|
|
// 2: Hostname with included port
|
|
|
|
let (host, port) = destination_str.split_at(pos);
|
|
|
|
FedDest::Named(host.to_string(), port.to_string())
|
|
|
|
} else {
|
|
|
|
match request_well_known(globals, &destination.as_str()).await {
|
|
|
|
// 3: A .well-known file is available
|
|
|
|
Some(delegated_hostname) => {
|
|
|
|
hostname = delegated_hostname.clone();
|
|
|
|
match get_ip_with_port(&delegated_hostname) {
|
|
|
|
Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file
|
|
|
|
None => {
|
|
|
|
if let Some(pos) = destination_str.find(':') {
|
|
|
|
// 3.2: Hostname with port in .well-known file
|
|
|
|
let (host, port) = destination_str.split_at(pos);
|
|
|
|
FedDest::Named(host.to_string(), port.to_string())
|
|
|
|
} else {
|
|
|
|
match query_srv_record(globals, &delegated_hostname).await {
|
|
|
|
// 3.3: SRV lookup successful
|
|
|
|
Some(hostname) => hostname,
|
|
|
|
// 3.4: No SRV records, just use the hostname from .well-known
|
|
|
|
None => add_port_to_hostname(&delegated_hostname),
|
2020-12-08 12:34:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-16 17:18:22 +02:00
|
|
|
}
|
|
|
|
// 4: No .well-known or an error occured
|
|
|
|
None => {
|
|
|
|
match query_srv_record(globals, &destination_str).await {
|
|
|
|
// 4: SRV record found
|
|
|
|
Some(hostname) => hostname,
|
|
|
|
// 5: No SRV record found
|
|
|
|
None => add_port_to_hostname(&destination_str),
|
2020-12-08 12:34:46 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-06 11:05:51 +01:00
|
|
|
}
|
2021-04-16 05:27:26 +02:00
|
|
|
}
|
2021-04-16 17:18:22 +02:00
|
|
|
};
|
2020-12-06 11:05:51 +01:00
|
|
|
|
2021-04-21 05:35:44 +02:00
|
|
|
// Can't use get_ip_with_port here because we don't want to add a port
|
|
|
|
// to an IP address if it wasn't specified
|
2021-04-16 17:18:22 +02:00
|
|
|
let hostname = if let Ok(addr) = hostname.parse::<SocketAddr>() {
|
|
|
|
FedDest::Literal(addr)
|
|
|
|
} else if let Ok(addr) = hostname.parse::<IpAddr>() {
|
|
|
|
FedDest::Named(addr.to_string(), "".to_string())
|
|
|
|
} else if let Some(pos) = hostname.find(':') {
|
|
|
|
let (host, port) = hostname.split_at(pos);
|
|
|
|
FedDest::Named(host.to_string(), port.to_string())
|
|
|
|
} else {
|
|
|
|
FedDest::Named(hostname, "".to_string())
|
|
|
|
};
|
2021-04-16 05:27:26 +02:00
|
|
|
(actual_destination, hostname)
|
2020-12-06 11:05:51 +01:00
|
|
|
}
|
|
|
|
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(globals))]
|
2021-01-14 20:39:56 +01:00
|
|
|
async fn query_srv_record(
|
2020-12-08 12:34:46 +01:00
|
|
|
globals: &crate::database::globals::Globals,
|
2021-03-04 13:35:12 +01:00
|
|
|
hostname: &'_ str,
|
2021-04-16 17:18:22 +02:00
|
|
|
) -> Option<FedDest> {
|
2020-12-08 12:34:46 +01:00
|
|
|
if let Ok(Some(host_port)) = globals
|
|
|
|
.dns_resolver()
|
|
|
|
.srv_lookup(format!("_matrix._tcp.{}", hostname))
|
|
|
|
.await
|
|
|
|
.map(|srv| {
|
|
|
|
srv.iter().next().map(|result| {
|
2021-04-16 17:18:22 +02:00
|
|
|
FedDest::Named(
|
|
|
|
result
|
|
|
|
.target()
|
|
|
|
.to_string()
|
|
|
|
.trim_end_matches('.')
|
|
|
|
.to_string(),
|
|
|
|
format!(":{}", result.port()),
|
2020-12-08 12:34:46 +01:00
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
{
|
|
|
|
Some(host_port)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(globals))]
|
2020-12-08 12:34:46 +01:00
|
|
|
pub async fn request_well_known(
|
|
|
|
globals: &crate::database::globals::Globals,
|
|
|
|
destination: &str,
|
|
|
|
) -> Option<String> {
|
|
|
|
let body: serde_json::Value = serde_json::from_str(
|
|
|
|
&globals
|
|
|
|
.reqwest_client()
|
|
|
|
.get(&format!(
|
|
|
|
"https://{}/.well-known/matrix/server",
|
|
|
|
destination
|
|
|
|
))
|
|
|
|
.send()
|
|
|
|
.await
|
|
|
|
.ok()?
|
|
|
|
.text()
|
|
|
|
.await
|
|
|
|
.ok()?,
|
|
|
|
)
|
|
|
|
.ok()?;
|
|
|
|
Some(body.get("m.server")?.as_str()?.to_owned())
|
|
|
|
}
|
|
|
|
|
2020-08-14 11:31:31 +02:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db))]
|
2020-12-05 21:03:43 +01:00
|
|
|
pub fn get_server_version_route(
|
|
|
|
db: State<'_, Database>,
|
2021-04-11 21:01:27 +02:00
|
|
|
) -> ConduitResult<get_server_version::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2021-04-11 21:01:27 +02:00
|
|
|
Ok(get_server_version::v1::Response {
|
|
|
|
server: Some(get_server_version::v1::Server {
|
2020-04-22 20:55:11 +02:00
|
|
|
name: Some("Conduit".to_owned()),
|
|
|
|
version: Some(env!("CARGO_PKG_VERSION").to_owned()),
|
2020-04-28 20:03:14 +02:00
|
|
|
}),
|
2020-08-14 11:31:31 +02:00
|
|
|
}
|
|
|
|
.into())
|
2020-04-22 20:55:11 +02:00
|
|
|
}
|
|
|
|
|
2021-04-13 15:00:45 +02:00
|
|
|
// Response type for this endpoint is Json because we need to calculate a signature for the response
|
2020-08-14 11:31:31 +02:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db))]
|
2020-12-05 21:03:43 +01:00
|
|
|
pub fn get_server_keys_route(db: State<'_, Database>) -> Json<String> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-10-06 21:04:51 +02:00
|
|
|
// TODO: Use proper types
|
|
|
|
return Json("Federation is disabled.".to_owned());
|
|
|
|
}
|
|
|
|
|
2020-04-22 20:55:11 +02:00
|
|
|
let mut verify_keys = BTreeMap::new();
|
|
|
|
verify_keys.insert(
|
2020-12-05 00:16:17 +01:00
|
|
|
ServerSigningKeyId::try_from(
|
|
|
|
format!("ed25519:{}", db.globals.keypair().version()).as_str(),
|
|
|
|
)
|
|
|
|
.expect("found invalid server signing keys in DB"),
|
2020-08-14 11:31:31 +02:00
|
|
|
VerifyKey {
|
2020-05-03 17:25:31 +02:00
|
|
|
key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD),
|
2020-04-22 20:55:11 +02:00
|
|
|
},
|
|
|
|
);
|
|
|
|
let mut response = serde_json::from_slice(
|
2021-04-13 15:00:45 +02:00
|
|
|
get_server_keys::v2::Response {
|
2020-12-05 00:16:17 +01:00
|
|
|
server_key: ServerSigningKeys {
|
2020-08-14 11:31:31 +02:00
|
|
|
server_name: db.globals.server_name().to_owned(),
|
|
|
|
verify_keys,
|
|
|
|
old_verify_keys: BTreeMap::new(),
|
|
|
|
signatures: BTreeMap::new(),
|
|
|
|
valid_until_ts: SystemTime::now() + Duration::from_secs(60 * 2),
|
|
|
|
},
|
2021-04-13 15:00:45 +02:00
|
|
|
}
|
2021-04-23 18:45:06 +02:00
|
|
|
.try_into_http_response::<Vec<u8>>()
|
2020-04-22 20:55:11 +02:00
|
|
|
.unwrap()
|
|
|
|
.body(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-11-15 22:48:43 +01:00
|
|
|
|
2020-06-05 18:19:26 +02:00
|
|
|
ruma::signatures::sign_json(
|
2020-08-14 11:31:31 +02:00
|
|
|
db.globals.server_name().as_str(),
|
2020-05-17 19:56:40 +02:00
|
|
|
db.globals.keypair(),
|
|
|
|
&mut response,
|
|
|
|
)
|
|
|
|
.unwrap();
|
2020-11-15 22:48:43 +01:00
|
|
|
|
2020-10-28 00:10:09 +01:00
|
|
|
Json(ruma::serde::to_canonical_json_string(&response).expect("JSON is canonical"))
|
2020-04-22 20:55:11 +02:00
|
|
|
}
|
|
|
|
|
2020-08-14 11:31:31 +02:00
|
|
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db))]
|
2020-12-05 21:03:43 +01:00
|
|
|
pub fn get_server_keys_deprecated_route(db: State<'_, Database>) -> Json<String> {
|
|
|
|
get_server_keys_route(db)
|
2020-04-22 20:55:11 +02:00
|
|
|
}
|
2020-08-14 11:29:32 +02:00
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
post("/_matrix/federation/v1/publicRooms", data = "<body>")
|
|
|
|
)]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db, body))]
|
2020-09-14 11:42:16 +02:00
|
|
|
pub async fn get_public_rooms_filtered_route(
|
|
|
|
db: State<'_, Database>,
|
|
|
|
body: Ruma<get_public_rooms_filtered::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_public_rooms_filtered::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2020-09-14 11:42:16 +02:00
|
|
|
let response = client_server::get_public_rooms_filtered_helper(
|
|
|
|
&db,
|
|
|
|
None,
|
|
|
|
body.limit,
|
|
|
|
body.since.as_deref(),
|
|
|
|
&body.filter,
|
|
|
|
&body.room_network,
|
|
|
|
)
|
|
|
|
.await?
|
|
|
|
.0;
|
|
|
|
|
|
|
|
Ok(get_public_rooms_filtered::v1::Response {
|
|
|
|
chunk: response
|
|
|
|
.chunk
|
|
|
|
.into_iter()
|
|
|
|
.map(|c| {
|
|
|
|
// Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk
|
|
|
|
// to ruma::api::client::r0::directory::PublicRoomsChunk
|
|
|
|
Ok::<_, Error>(
|
|
|
|
serde_json::from_str(
|
|
|
|
&serde_json::to_string(&c)
|
|
|
|
.expect("PublicRoomsChunk::to_string always works"),
|
|
|
|
)
|
|
|
|
.expect("federation and client-server PublicRoomsChunk are the same type"),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.collect(),
|
|
|
|
prev_batch: response.prev_batch,
|
|
|
|
next_batch: response.next_batch,
|
|
|
|
total_room_count_estimate: response.total_room_count_estimate,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/publicRooms", data = "<body>")
|
|
|
|
)]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db, body))]
|
2020-08-14 11:29:32 +02:00
|
|
|
pub async fn get_public_rooms_route(
|
|
|
|
db: State<'_, Database>,
|
2020-09-08 17:32:03 +02:00
|
|
|
body: Ruma<get_public_rooms::v1::Request<'_>>,
|
2020-08-14 11:29:32 +02:00
|
|
|
) -> ConduitResult<get_public_rooms::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2020-09-14 11:42:16 +02:00
|
|
|
let response = client_server::get_public_rooms_filtered_helper(
|
2020-08-23 14:32:43 +02:00
|
|
|
&db,
|
|
|
|
None,
|
2020-09-14 11:42:16 +02:00
|
|
|
body.limit,
|
|
|
|
body.since.as_deref(),
|
|
|
|
&IncomingFilter::default(),
|
|
|
|
&IncomingRoomNetwork::Matrix,
|
2020-08-14 11:29:32 +02:00
|
|
|
)
|
|
|
|
.await?
|
|
|
|
.0;
|
|
|
|
|
|
|
|
Ok(get_public_rooms::v1::Response {
|
2020-09-14 11:42:16 +02:00
|
|
|
chunk: response
|
|
|
|
.chunk
|
2020-08-14 11:29:32 +02:00
|
|
|
.into_iter()
|
|
|
|
.map(|c| {
|
|
|
|
// Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk
|
|
|
|
// to ruma::api::client::r0::directory::PublicRoomsChunk
|
|
|
|
Ok::<_, Error>(
|
|
|
|
serde_json::from_str(
|
|
|
|
&serde_json::to_string(&c)
|
|
|
|
.expect("PublicRoomsChunk::to_string always works"),
|
|
|
|
)
|
|
|
|
.expect("federation and client-server PublicRoomsChunk are the same type"),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.collect(),
|
2020-09-14 11:42:16 +02:00
|
|
|
prev_batch: response.prev_batch,
|
|
|
|
next_batch: response.next_batch,
|
|
|
|
total_room_count_estimate: response.total_room_count_estimate,
|
2020-08-14 11:29:32 +02:00
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
put("/_matrix/federation/v1/send/<_>", data = "<body>")
|
|
|
|
)]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db, body))]
|
2020-10-28 00:10:09 +01:00
|
|
|
pub async fn send_transaction_message_route<'a>(
|
2020-09-12 22:41:33 +02:00
|
|
|
db: State<'a, Database>,
|
2020-09-08 17:32:03 +02:00
|
|
|
body: Ruma<send_transaction_message::v1::Request<'_>>,
|
2020-08-14 11:29:32 +02:00
|
|
|
) -> ConduitResult<send_transaction_message::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2021-04-22 11:26:20 +02:00
|
|
|
for edu in body
|
|
|
|
.edus
|
|
|
|
.iter()
|
|
|
|
.map(|edu| serde_json::from_str::<Edu>(edu.json().get()))
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
{
|
|
|
|
match edu {
|
|
|
|
Edu::Presence(_) => {}
|
|
|
|
Edu::Receipt(_) => {}
|
|
|
|
Edu::Typing(typing) => {
|
|
|
|
if typing.typing {
|
|
|
|
db.rooms.edus.typing_add(
|
|
|
|
&typing.user_id,
|
|
|
|
&typing.room_id,
|
|
|
|
3000 + utils::millis_since_unix_epoch(),
|
|
|
|
&db.globals,
|
|
|
|
)?;
|
|
|
|
} else {
|
|
|
|
db.rooms
|
|
|
|
.edus
|
|
|
|
.typing_remove(&typing.user_id, &typing.room_id, &db.globals)?;
|
2020-11-08 20:46:26 +01:00
|
|
|
}
|
2020-11-08 19:49:02 +01:00
|
|
|
}
|
2021-04-22 11:26:20 +02:00
|
|
|
Edu::DeviceListUpdate(_) => {}
|
|
|
|
Edu::DirectToDevice(_) => {}
|
|
|
|
Edu::_Custom(_) => {}
|
2020-11-08 20:46:26 +01:00
|
|
|
}
|
|
|
|
}
|
2020-11-08 19:54:59 +01:00
|
|
|
|
2021-02-01 23:02:56 +01:00
|
|
|
let mut resolved_map = BTreeMap::new();
|
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
2021-03-26 11:10:45 +01:00
|
|
|
|
|
|
|
// This is all the auth_events that have been recursively fetched so they don't have to be
|
|
|
|
// deserialized over and over again.
|
|
|
|
// TODO: make this persist across requests but not in a DB Tree (in globals?)
|
|
|
|
// TODO: This could potentially also be some sort of trie (suffix tree) like structure so
|
|
|
|
// that once an auth event is known it would know (using indexes maybe) all of the auth
|
|
|
|
// events that it references.
|
|
|
|
let mut auth_cache = EventMap::new();
|
|
|
|
|
2021-04-05 21:46:10 +02:00
|
|
|
for pdu in &body.pdus {
|
2021-03-25 23:55:40 +01:00
|
|
|
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
|
|
|
let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) {
|
|
|
|
Ok(t) => t,
|
|
|
|
Err(_) => {
|
|
|
|
// Event could not be converted to canonical json
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-26 11:10:45 +01:00
|
|
|
if let Err(e) = handle_incoming_pdu(
|
|
|
|
&body.origin,
|
|
|
|
&event_id,
|
|
|
|
value,
|
|
|
|
true,
|
|
|
|
&db,
|
2021-04-13 21:34:31 +02:00
|
|
|
&pub_key_map,
|
2021-03-26 11:10:45 +01:00
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
resolved_map.insert(event_id, Err(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 11:10:45 +01:00
|
|
|
for pdu in &resolved_map {
|
|
|
|
if let Err(e) = pdu.1 {
|
|
|
|
if e != "Room is unknown to this server." {
|
|
|
|
warn!("Incoming PDU failed {:?}", pdu);
|
|
|
|
}
|
|
|
|
}
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into())
|
|
|
|
}
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
/// An async function that can recursively call itself.
|
|
|
|
type AsyncRecursiveResult<'a, T, E> = Pin<Box<dyn Future<Output = StdResult<T, E>> + 'a + Send>>;
|
2021-03-25 23:55:40 +01:00
|
|
|
|
|
|
|
/// When receiving an event one needs to:
|
|
|
|
/// 0. Skip the PDU if we already know about it
|
|
|
|
/// 1. Check the server is in the room
|
|
|
|
/// 2. Check signatures, otherwise drop
|
|
|
|
/// 3. Check content hash, redact if doesn't match
|
|
|
|
/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not
|
|
|
|
/// timeline events
|
|
|
|
/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are
|
|
|
|
/// also rejected "due to auth events"
|
|
|
|
/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
|
|
|
|
/// 7. Persist this event as an outlier
|
|
|
|
/// 8. If not timeline event: stop
|
|
|
|
/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline
|
|
|
|
/// events
|
|
|
|
/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
|
|
|
|
/// doing all the checks in this list starting at 1. These are not timeline events
|
|
|
|
/// 11. Check the auth of the event passes based on the state of the event
|
|
|
|
/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by
|
|
|
|
/// doing state res where one of the inputs was a previously trusted set of state, don't just
|
|
|
|
/// trust a set of state we got from a remote)
|
|
|
|
/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail"
|
|
|
|
/// it
|
|
|
|
/// 14. Use state resolution to find new room state
|
|
|
|
// We use some AsyncRecursiveResult hacks here so we can call this async funtion recursively
|
2021-04-25 14:10:07 +02:00
|
|
|
pub fn handle_incoming_pdu<'a>(
|
2021-03-25 23:55:40 +01:00
|
|
|
origin: &'a ServerName,
|
|
|
|
event_id: &'a EventId,
|
|
|
|
value: BTreeMap<String, CanonicalJsonValue>,
|
|
|
|
is_timeline_event: bool,
|
|
|
|
db: &'a Database,
|
2021-04-13 21:34:31 +02:00
|
|
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
2021-03-26 11:10:45 +01:00
|
|
|
auth_cache: &'a mut EventMap<Arc<PduEvent>>,
|
2021-04-16 18:18:29 +02:00
|
|
|
) -> AsyncRecursiveResult<'a, Option<Vec<u8>>, String> {
|
2021-03-25 23:55:40 +01:00
|
|
|
Box::pin(async move {
|
|
|
|
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
// 0. Skip the PDU if we already have it as a timeline event
|
|
|
|
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(&event_id) {
|
|
|
|
return Ok(Some(pdu_id.to_vec()));
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// 1. Check the server is in the room
|
|
|
|
let room_id = match value
|
|
|
|
.get("room_id")
|
|
|
|
.map(|id| match id {
|
|
|
|
CanonicalJsonValue::String(id) => RoomId::try_from(id.as_str()).ok(),
|
|
|
|
_ => None,
|
|
|
|
})
|
|
|
|
.flatten()
|
|
|
|
{
|
|
|
|
Some(id) => id,
|
|
|
|
None => {
|
|
|
|
// Event is invalid
|
2021-03-26 11:10:45 +01:00
|
|
|
return Err("Event needs a valid RoomId.".to_string());
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
match db.rooms.exists(&room_id) {
|
|
|
|
Ok(true) => {}
|
|
|
|
_ => {
|
2021-03-26 11:10:45 +01:00
|
|
|
return Err("Room is unknown to this server.".to_string());
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We go through all the signatures we see on the value and fetch the corresponding signing
|
|
|
|
// keys
|
2021-04-13 21:34:31 +02:00
|
|
|
fetch_required_signing_keys(&value, &pub_key_map, db)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.to_string())?;
|
2021-03-25 23:55:40 +01:00
|
|
|
|
|
|
|
// 2. Check signatures, otherwise drop
|
|
|
|
// 3. check content hash, redact if doesn't match
|
2021-03-26 11:10:45 +01:00
|
|
|
let create_event = db
|
|
|
|
.rooms
|
|
|
|
.room_state_get(&room_id, &EventType::RoomCreate, "")
|
2021-03-26 13:41:05 +01:00
|
|
|
.map_err(|_| "Failed to ask database for event.".to_owned())?
|
|
|
|
.ok_or_else(|| "Failed to find create event in db.".to_owned())?;
|
2021-03-26 11:10:45 +01:00
|
|
|
|
|
|
|
let create_event_content =
|
|
|
|
serde_json::from_value::<Raw<CreateEventContent>>(create_event.content.clone())
|
|
|
|
.expect("Raw::from_value always works.")
|
|
|
|
.deserialize()
|
|
|
|
.map_err(|_| "Invalid PowerLevels event in db.".to_owned())?;
|
|
|
|
|
|
|
|
let room_version = create_event_content.room_version;
|
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
let mut val = match ruma::signatures::verify_event(
|
|
|
|
&*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?,
|
|
|
|
&value,
|
|
|
|
&room_version,
|
|
|
|
) {
|
2021-03-26 11:10:45 +01:00
|
|
|
Err(e) => {
|
|
|
|
// Drop
|
2021-04-14 09:39:06 +02:00
|
|
|
warn!("{:?}: {}", value, e);
|
2021-03-26 11:10:45 +01:00
|
|
|
return Err("Signature verification failed".to_string());
|
|
|
|
}
|
|
|
|
Ok(ruma::signatures::Verified::Signatures) => {
|
|
|
|
// Redact
|
2021-04-16 18:18:29 +02:00
|
|
|
warn!("Calculated hash does not match: {}", event_id);
|
2021-03-26 11:10:45 +01:00
|
|
|
match ruma::signatures::redact(&value, &room_version) {
|
|
|
|
Ok(obj) => obj,
|
|
|
|
Err(_) => return Err("Redaction failed".to_string()),
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
2021-03-26 11:10:45 +01:00
|
|
|
}
|
|
|
|
Ok(ruma::signatures::Verified::All) => value,
|
|
|
|
};
|
2021-03-25 23:55:40 +01:00
|
|
|
|
|
|
|
// Now that we have checked the signature and hashes we can add the eventID and convert
|
|
|
|
// to our PduEvent type
|
|
|
|
val.insert(
|
|
|
|
"event_id".to_owned(),
|
|
|
|
to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"),
|
|
|
|
);
|
|
|
|
let incoming_pdu = serde_json::from_value::<PduEvent>(
|
2021-04-16 18:18:29 +02:00
|
|
|
serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"),
|
2021-03-25 23:55:40 +01:00
|
|
|
)
|
|
|
|
.map_err(|_| "Event is not a valid PDU.".to_string())?;
|
|
|
|
|
|
|
|
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
|
|
|
|
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
|
2021-04-13 15:00:45 +02:00
|
|
|
debug!("Fetching auth events for {}", incoming_pdu.event_id);
|
2021-03-26 11:10:45 +01:00
|
|
|
fetch_and_handle_events(
|
|
|
|
db,
|
|
|
|
origin,
|
|
|
|
&incoming_pdu.auth_events,
|
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.to_string())?;
|
2021-02-01 23:02:56 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
|
2021-04-13 15:00:45 +02:00
|
|
|
debug!(
|
|
|
|
"Auth check for {} based on auth events",
|
|
|
|
incoming_pdu.event_id
|
|
|
|
);
|
2021-03-25 23:55:40 +01:00
|
|
|
|
|
|
|
// Build map of auth events
|
|
|
|
let mut auth_events = BTreeMap::new();
|
2021-04-05 21:46:10 +02:00
|
|
|
for id in &incoming_pdu.auth_events {
|
2021-03-25 23:55:40 +01:00
|
|
|
let auth_event = auth_cache.get(id).ok_or_else(|| {
|
|
|
|
"Auth event not found, event failed recursive auth checks.".to_string()
|
|
|
|
})?;
|
|
|
|
|
|
|
|
match auth_events.entry((
|
|
|
|
auth_event.kind.clone(),
|
|
|
|
auth_event
|
|
|
|
.state_key
|
|
|
|
.clone()
|
|
|
|
.expect("all auth events have state keys"),
|
|
|
|
)) {
|
|
|
|
Entry::Vacant(v) => {
|
|
|
|
v.insert(auth_event.clone());
|
|
|
|
}
|
|
|
|
Entry::Occupied(_) => {
|
|
|
|
return Err(
|
|
|
|
"Auth event's type and state_key combination exists multiple times."
|
|
|
|
.to_owned(),
|
|
|
|
)
|
|
|
|
}
|
2021-02-01 23:02:56 +01:00
|
|
|
}
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
2021-02-01 23:02:56 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// The original create event must be in the auth events
|
|
|
|
if auth_events
|
|
|
|
.get(&(EventType::RoomCreate, "".to_owned()))
|
|
|
|
.map(|a| a.as_ref())
|
|
|
|
!= Some(&create_event)
|
|
|
|
{
|
|
|
|
return Err("Incoming event refers to wrong create event.".to_owned());
|
|
|
|
}
|
2021-01-19 01:08:59 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// If the previous event was the create event special rules apply
|
|
|
|
let previous_create = if incoming_pdu.auth_events.len() == 1
|
|
|
|
&& incoming_pdu.prev_events == incoming_pdu.auth_events
|
|
|
|
{
|
|
|
|
auth_cache
|
|
|
|
.get(&incoming_pdu.auth_events[0])
|
|
|
|
.cloned()
|
|
|
|
.filter(|maybe_create| **maybe_create == create_event)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let incoming_pdu = Arc::new(incoming_pdu.clone());
|
2021-01-15 03:32:22 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
if !state_res::event_auth::auth_check(
|
2021-03-26 11:10:45 +01:00
|
|
|
&room_version,
|
2021-03-25 23:55:40 +01:00
|
|
|
&incoming_pdu,
|
|
|
|
previous_create.clone(),
|
|
|
|
&auth_events,
|
|
|
|
None, // TODO: third party invite
|
2021-01-15 03:32:22 +01:00
|
|
|
)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_e| "Auth check failed".to_string())?
|
2021-01-15 03:32:22 +01:00
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
return Err("Event has failed auth check with auth events.".to_string());
|
|
|
|
}
|
2020-12-22 18:45:35 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
debug!("Validation successful.");
|
|
|
|
|
|
|
|
// 7. Persist the event as an outlier.
|
|
|
|
db.rooms
|
2021-04-16 18:18:29 +02:00
|
|
|
.add_pdu_outlier(&incoming_pdu.event_id, &val)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_| "Failed to add pdu as outlier.".to_owned())?;
|
|
|
|
debug!("Added pdu as outlier.");
|
|
|
|
|
|
|
|
// 8. if not timeline event: stop
|
|
|
|
if !is_timeline_event {
|
2021-04-16 18:18:29 +02:00
|
|
|
return Ok(None);
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: 9. fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
|
|
|
|
|
|
|
|
// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
|
|
|
|
// doing all the checks in this list starting at 1. These are not timeline events.
|
2021-01-03 23:26:17 +01:00
|
|
|
|
2021-02-04 02:00:01 +01:00
|
|
|
// TODO: if we know the prev_events of the incoming event we can avoid the request and build
|
|
|
|
// the state from a known point and resolve if > 1 prev_event
|
2021-03-25 23:55:40 +01:00
|
|
|
|
2021-03-16 18:00:26 +01:00
|
|
|
debug!("Requesting state at event.");
|
2021-04-16 18:18:29 +02:00
|
|
|
let mut state_at_incoming_event = None;
|
|
|
|
let mut incoming_auth_events = Vec::new();
|
|
|
|
|
|
|
|
if incoming_pdu.prev_events.len() == 1 {
|
|
|
|
let prev_event = &incoming_pdu.prev_events[0];
|
|
|
|
let state_vec = db
|
|
|
|
.rooms
|
|
|
|
.pdu_shortstatehash(prev_event)
|
|
|
|
.map_err(|_| "Failed talking to db".to_owned())?
|
|
|
|
.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash).ok())
|
|
|
|
.flatten();
|
|
|
|
if let Some(mut state_vec) = state_vec {
|
|
|
|
if db
|
|
|
|
.rooms
|
|
|
|
.get_pdu(prev_event)
|
|
|
|
.ok()
|
|
|
|
.flatten()
|
|
|
|
.ok_or_else(|| "Could not find prev event, but we know the state.".to_owned())?
|
|
|
|
.state_key
|
|
|
|
.is_some()
|
|
|
|
{
|
|
|
|
state_vec.push(prev_event.clone());
|
|
|
|
}
|
|
|
|
state_at_incoming_event = Some(
|
|
|
|
fetch_and_handle_events(db, origin, &state_vec, pub_key_map, auth_cache)
|
|
|
|
.await
|
|
|
|
.map_err(|_| "Failed to fetch state events locally".to_owned())?
|
|
|
|
.into_iter()
|
|
|
|
.map(|pdu| {
|
|
|
|
(
|
|
|
|
(
|
|
|
|
pdu.kind.clone(),
|
|
|
|
pdu.state_key
|
|
|
|
.clone()
|
|
|
|
.expect("events from state_full_ids are state events"),
|
|
|
|
),
|
|
|
|
pdu,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
// TODO: set incoming_auth_events?
|
|
|
|
}
|
|
|
|
|
|
|
|
if state_at_incoming_event.is_none() {
|
2021-03-25 23:55:40 +01:00
|
|
|
// Call /state_ids to find out what the state at this pdu is. We trust the server's
|
|
|
|
// response to some extend, but we still do a lot of checks on the events
|
2021-02-04 02:00:01 +01:00
|
|
|
match db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
2021-03-25 23:55:40 +01:00
|
|
|
origin,
|
2021-02-04 02:00:01 +01:00
|
|
|
get_room_state_ids::v1::Request {
|
2021-03-26 11:10:45 +01:00
|
|
|
room_id: &room_id,
|
2021-03-25 23:55:40 +01:00
|
|
|
event_id: &incoming_pdu.event_id,
|
2021-02-04 02:00:01 +01:00
|
|
|
},
|
2021-01-06 14:52:30 +01:00
|
|
|
)
|
2021-02-04 02:00:01 +01:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(res) => {
|
2021-03-16 18:00:26 +01:00
|
|
|
debug!("Fetching state events at event.");
|
2021-03-25 23:55:40 +01:00
|
|
|
let state_vec = match fetch_and_handle_events(
|
2021-01-15 03:32:22 +01:00
|
|
|
&db,
|
2021-03-25 23:55:40 +01:00
|
|
|
origin,
|
2021-02-04 02:00:01 +01:00
|
|
|
&res.pdu_ids,
|
2021-03-26 11:10:45 +01:00
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
2021-01-15 03:32:22 +01:00
|
|
|
)
|
2021-03-13 16:30:12 +01:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(state) => state,
|
2021-03-25 23:55:40 +01:00
|
|
|
Err(_) => return Err("Failed to fetch state events.".to_owned()),
|
2021-03-13 16:30:12 +01:00
|
|
|
};
|
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
let mut state = BTreeMap::new();
|
2021-04-05 21:46:10 +02:00
|
|
|
for pdu in state_vec {
|
2021-03-25 23:55:40 +01:00
|
|
|
match state.entry((pdu.kind.clone(), pdu.state_key.clone().ok_or_else(|| "Found non-state pdu in state events.".to_owned())?)) {
|
|
|
|
Entry::Vacant(v) => {
|
|
|
|
v.insert(pdu);
|
|
|
|
}
|
|
|
|
Entry::Occupied(_) => {
|
|
|
|
return Err(
|
|
|
|
"State event's type and state_key combination exists multiple times.".to_owned(),
|
|
|
|
)
|
|
|
|
}
|
2021-02-04 02:00:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// The original create event must still be in the state
|
2021-04-16 18:18:29 +02:00
|
|
|
if state
|
|
|
|
.get(&(EventType::RoomCreate, "".to_owned()))
|
|
|
|
.map(|a| a.as_ref())
|
|
|
|
!= Some(&create_event)
|
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
return Err("Incoming event refers to wrong create event.".to_owned());
|
|
|
|
}
|
2021-02-04 02:00:01 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
debug!("Fetching auth chain events at event.");
|
2021-04-16 18:18:29 +02:00
|
|
|
incoming_auth_events = match fetch_and_handle_events(
|
2021-03-13 16:30:12 +01:00
|
|
|
&db,
|
2021-03-25 23:55:40 +01:00
|
|
|
origin,
|
2021-03-13 16:30:12 +01:00
|
|
|
&res.auth_chain_ids,
|
2021-03-26 11:10:45 +01:00
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
2021-02-04 02:00:01 +01:00
|
|
|
)
|
2021-03-13 16:30:12 +01:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(state) => state,
|
2021-03-25 23:55:40 +01:00
|
|
|
Err(_) => return Err("Failed to fetch auth chain.".to_owned()),
|
2021-03-13 16:30:12 +01:00
|
|
|
};
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
state_at_incoming_event = Some(state);
|
2021-02-04 02:00:01 +01:00
|
|
|
}
|
|
|
|
Err(_) => {
|
2021-03-25 23:55:40 +01:00
|
|
|
return Err("Fetching state for event failed".into());
|
2021-02-04 02:00:01 +01:00
|
|
|
}
|
|
|
|
};
|
2021-04-16 18:18:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
let state_at_incoming_event =
|
|
|
|
state_at_incoming_event.expect("we always set this to some above");
|
2020-12-22 18:45:35 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// 11. Check the auth of the event passes based on the state of the event
|
2020-12-22 18:45:35 +01:00
|
|
|
if !state_res::event_auth::auth_check(
|
2021-03-26 11:10:45 +01:00
|
|
|
&room_version,
|
2021-03-25 23:55:40 +01:00
|
|
|
&incoming_pdu,
|
2021-03-13 16:30:12 +01:00
|
|
|
previous_create.clone(),
|
2021-03-25 23:55:40 +01:00
|
|
|
&state_at_incoming_event,
|
2021-01-05 15:21:41 +01:00
|
|
|
None, // TODO: third party invite
|
2020-12-22 18:45:35 +01:00
|
|
|
)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_e| "Auth check failed.".to_owned())?
|
2020-12-22 18:45:35 +01:00
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
return Err("Event has failed auth check with state at the event.".into());
|
2020-12-08 10:33:44 +01:00
|
|
|
}
|
2021-03-16 18:00:26 +01:00
|
|
|
debug!("Auth check succeeded.");
|
2021-01-16 22:37:20 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
|
2021-01-16 22:37:20 +01:00
|
|
|
let current_state = db
|
|
|
|
.rooms
|
2021-03-26 11:10:45 +01:00
|
|
|
.room_state_full(&room_id)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_| "Failed to load room state.".to_owned())?
|
2021-01-16 22:37:20 +01:00
|
|
|
.into_iter()
|
2021-03-25 23:55:40 +01:00
|
|
|
.map(|(k, v)| (k, Arc::new(v)))
|
2021-01-16 22:37:20 +01:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
if !state_res::event_auth::auth_check(
|
2021-03-26 11:10:45 +01:00
|
|
|
&room_version,
|
2021-03-25 23:55:40 +01:00
|
|
|
&incoming_pdu,
|
2021-03-13 16:30:12 +01:00
|
|
|
previous_create,
|
2021-01-16 22:37:20 +01:00
|
|
|
¤t_state,
|
|
|
|
None,
|
|
|
|
)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_e| "Auth check failed.".to_owned())?
|
2021-01-16 22:37:20 +01:00
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
// Soft fail, we leave the event as an outlier but don't add it to the timeline
|
|
|
|
return Err("Event has been soft failed".into());
|
2021-01-16 22:37:20 +01:00
|
|
|
};
|
2021-03-16 18:00:26 +01:00
|
|
|
debug!("Auth check with current state succeeded.");
|
2020-11-08 20:44:02 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// Now we calculate the set of extremities this room has after the incoming event has been
|
|
|
|
// applied. We start with the previous extremities (aka leaves)
|
|
|
|
let mut extremities = db
|
|
|
|
.rooms
|
2021-03-26 11:10:45 +01:00
|
|
|
.get_pdu_leaves(&room_id)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_| "Failed to load room leaves".to_owned())?;
|
|
|
|
|
|
|
|
// Remove any forward extremities that are referenced by this incoming event's prev_events
|
|
|
|
for prev_event in &incoming_pdu.prev_events {
|
|
|
|
if extremities.contains(prev_event) {
|
|
|
|
extremities.remove(prev_event);
|
2021-01-30 03:45:33 +01:00
|
|
|
}
|
2021-03-25 23:55:40 +01:00
|
|
|
}
|
2021-01-30 03:45:33 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
let mut fork_states = BTreeSet::new();
|
|
|
|
for id in &extremities {
|
2021-03-26 11:10:45 +01:00
|
|
|
match db
|
|
|
|
.rooms
|
|
|
|
.get_pdu(&id)
|
|
|
|
.map_err(|_| "Failed to ask db for pdu.".to_owned())?
|
|
|
|
{
|
2021-03-25 23:55:40 +01:00
|
|
|
Some(leaf_pdu) => {
|
|
|
|
let pdu_shortstatehash = db
|
|
|
|
.rooms
|
|
|
|
.pdu_shortstatehash(&leaf_pdu.event_id)
|
|
|
|
.map_err(|_| "Failed to ask db for pdu state hash.".to_owned())?
|
|
|
|
.ok_or_else(|| {
|
|
|
|
error!(
|
|
|
|
"Found extremity pdu with no statehash in db: {:?}",
|
|
|
|
leaf_pdu
|
|
|
|
);
|
|
|
|
"Found pdu with no statehash in db.".to_owned()
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let mut leaf_state = db
|
|
|
|
.rooms
|
|
|
|
.state_full(pdu_shortstatehash)
|
|
|
|
.map_err(|_| "Failed to ask db for room state.".to_owned())?
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k, Arc::new(v)))
|
|
|
|
.collect::<StateMap<_>>();
|
|
|
|
|
|
|
|
if let Some(state_key) = &leaf_pdu.state_key {
|
|
|
|
// Now it's the state after
|
|
|
|
let key = (leaf_pdu.kind.clone(), state_key.clone());
|
|
|
|
leaf_state.insert(key, Arc::new(leaf_pdu));
|
|
|
|
}
|
|
|
|
|
|
|
|
fork_states.insert(leaf_state);
|
2021-03-24 11:52:10 +01:00
|
|
|
}
|
2021-03-25 23:55:40 +01:00
|
|
|
_ => {
|
|
|
|
error!("Missing state snapshot for {:?}", id);
|
|
|
|
return Err("Missing state snapshot.".to_owned());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 12. Ensure that the state is derived from the previous current state (i.e. we calculated
|
|
|
|
// by doing state res where one of the inputs was a previously trusted set of state,
|
|
|
|
// don't just trust a set of state we got from a remote).
|
|
|
|
|
|
|
|
// We do this by adding the current state to the list of fork states
|
|
|
|
fork_states.insert(current_state);
|
2020-12-22 18:45:35 +01:00
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
// We also add state after incoming event to the fork states
|
2021-04-11 10:12:29 +02:00
|
|
|
extremities.insert(incoming_pdu.event_id.clone());
|
2021-03-25 23:55:40 +01:00
|
|
|
let mut state_after = state_at_incoming_event.clone();
|
|
|
|
if let Some(state_key) = &incoming_pdu.state_key {
|
|
|
|
state_after.insert(
|
|
|
|
(incoming_pdu.kind.clone(), state_key.clone()),
|
|
|
|
incoming_pdu.clone(),
|
|
|
|
);
|
|
|
|
}
|
2021-02-04 02:00:01 +01:00
|
|
|
fork_states.insert(state_after.clone());
|
2021-01-19 01:08:59 +01:00
|
|
|
|
|
|
|
let fork_states = fork_states.into_iter().collect::<Vec<_>>();
|
|
|
|
|
2021-01-25 02:18:40 +01:00
|
|
|
let mut update_state = false;
|
2021-03-25 23:55:40 +01:00
|
|
|
// 14. Use state resolution to find new room state
|
|
|
|
let new_room_state = if fork_states.is_empty() {
|
|
|
|
return Err("State is empty.".to_owned());
|
2020-12-22 18:45:35 +01:00
|
|
|
} else if fork_states.len() == 1 {
|
2021-03-25 23:55:40 +01:00
|
|
|
// There was only one state, so it has to be the room's current state (because that is
|
|
|
|
// always included)
|
2021-03-26 11:10:45 +01:00
|
|
|
debug!("Skipping stateres because there is no new state.");
|
2021-03-25 23:55:40 +01:00
|
|
|
fork_states[0]
|
|
|
|
.iter()
|
|
|
|
.map(|(k, pdu)| (k.clone(), pdu.event_id.clone()))
|
|
|
|
.collect()
|
2020-12-22 18:45:35 +01:00
|
|
|
} else {
|
2021-03-25 23:55:40 +01:00
|
|
|
// We do need to force an update to this room's state
|
2021-01-25 02:18:40 +01:00
|
|
|
update_state = true;
|
|
|
|
|
2021-01-15 03:32:22 +01:00
|
|
|
let mut auth_events = vec![];
|
|
|
|
for map in &fork_states {
|
|
|
|
let mut state_auth = vec![];
|
2021-01-15 21:46:47 +01:00
|
|
|
for auth_id in map.values().flat_map(|pdu| &pdu.auth_events) {
|
2021-03-26 11:10:45 +01:00
|
|
|
match fetch_and_handle_events(
|
|
|
|
&db,
|
|
|
|
origin,
|
|
|
|
&[auth_id.clone()],
|
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
2021-03-16 18:00:26 +01:00
|
|
|
{
|
|
|
|
// This should always contain exactly one element when Ok
|
|
|
|
Ok(events) => state_auth.push(events[0].clone()),
|
|
|
|
Err(e) => {
|
|
|
|
debug!("Event was not present: {}", e);
|
2021-02-04 02:00:01 +01:00
|
|
|
}
|
2021-03-16 18:00:26 +01:00
|
|
|
}
|
2021-01-15 03:32:22 +01:00
|
|
|
}
|
|
|
|
auth_events.push(state_auth);
|
|
|
|
}
|
2021-01-06 14:52:30 +01:00
|
|
|
|
2021-01-06 21:05:09 +01:00
|
|
|
// Add everything we will need to event_map
|
2021-01-25 02:18:40 +01:00
|
|
|
auth_cache.extend(
|
2021-01-06 21:05:09 +01:00
|
|
|
auth_events
|
|
|
|
.iter()
|
|
|
|
.map(|pdus| pdus.iter().map(|pdu| (pdu.event_id().clone(), pdu.clone())))
|
|
|
|
.flatten(),
|
|
|
|
);
|
2021-01-25 02:18:40 +01:00
|
|
|
auth_cache.extend(
|
2021-01-03 23:26:17 +01:00
|
|
|
incoming_auth_events
|
|
|
|
.into_iter()
|
2021-01-06 14:52:30 +01:00
|
|
|
.map(|pdu| (pdu.event_id().clone(), pdu)),
|
2021-01-03 23:26:17 +01:00
|
|
|
);
|
2021-01-25 02:18:40 +01:00
|
|
|
auth_cache.extend(
|
2021-02-04 02:00:01 +01:00
|
|
|
state_after
|
2021-01-03 23:26:17 +01:00
|
|
|
.into_iter()
|
|
|
|
.map(|(_, pdu)| (pdu.event_id().clone(), pdu)),
|
|
|
|
);
|
|
|
|
|
2021-03-25 23:55:40 +01:00
|
|
|
match state_res::StateResolution::resolve(
|
2021-03-26 11:10:45 +01:00
|
|
|
&room_id,
|
|
|
|
&room_version,
|
2020-12-22 18:45:35 +01:00
|
|
|
&fork_states
|
|
|
|
.into_iter()
|
|
|
|
.map(|map| {
|
|
|
|
map.into_iter()
|
2020-12-31 14:40:49 +01:00
|
|
|
.map(|(k, v)| (k, v.event_id.clone()))
|
2020-12-22 18:45:35 +01:00
|
|
|
.collect::<StateMap<_>>()
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>(),
|
2021-01-06 21:05:09 +01:00
|
|
|
auth_events
|
|
|
|
.into_iter()
|
|
|
|
.map(|pdus| pdus.into_iter().map(|pdu| pdu.event_id().clone()).collect())
|
|
|
|
.collect(),
|
2021-03-26 11:10:45 +01:00
|
|
|
auth_cache,
|
2020-12-22 18:45:35 +01:00
|
|
|
) {
|
2021-03-25 23:55:40 +01:00
|
|
|
Ok(new_state) => new_state,
|
2021-01-15 21:46:47 +01:00
|
|
|
Err(_) => {
|
2021-03-25 23:55:40 +01:00
|
|
|
return Err("State resolution failed, either an event could not be found or deserialization".into());
|
2021-01-15 21:46:47 +01:00
|
|
|
}
|
2020-12-22 18:45:35 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-04 02:00:01 +01:00
|
|
|
// Now that the event has passed all auth it is added into the timeline.
|
|
|
|
// We use the `state_at_event` instead of `state_after` so we accurately
|
|
|
|
// represent the state for this event.
|
2021-04-16 18:18:29 +02:00
|
|
|
let pdu_id = append_incoming_pdu(
|
|
|
|
&db,
|
|
|
|
&incoming_pdu,
|
|
|
|
val,
|
|
|
|
extremities,
|
|
|
|
&state_at_incoming_event,
|
|
|
|
)
|
|
|
|
.map_err(|_| "Failed to add pdu to db.".to_owned())?;
|
2021-03-25 23:55:40 +01:00
|
|
|
debug!("Appended incoming pdu.");
|
2021-02-04 02:00:01 +01:00
|
|
|
|
|
|
|
// Set the new room state to the resolved state
|
2021-03-25 23:55:40 +01:00
|
|
|
if update_state {
|
|
|
|
db.rooms
|
2021-04-16 18:18:29 +02:00
|
|
|
.force_state(&room_id, new_room_state, &db)
|
2021-03-25 23:55:40 +01:00
|
|
|
.map_err(|_| "Failed to set new room state.".to_owned())?;
|
|
|
|
}
|
2021-03-16 18:00:26 +01:00
|
|
|
debug!("Updated resolved state");
|
2021-01-19 01:08:59 +01:00
|
|
|
|
2021-01-28 21:33:41 +01:00
|
|
|
// Event has passed all auth/stateres checks
|
2021-04-16 18:18:29 +02:00
|
|
|
Ok(Some(pdu_id))
|
2021-01-15 03:32:22 +01:00
|
|
|
})
|
2021-01-14 20:39:56 +01:00
|
|
|
}
|
|
|
|
|
2021-01-30 03:45:33 +01:00
|
|
|
/// Find the event and auth it. Once the event is validated (steps 1 - 8)
|
|
|
|
/// it is appended to the outliers Tree.
|
2021-01-15 03:32:22 +01:00
|
|
|
///
|
2021-03-25 23:55:40 +01:00
|
|
|
/// a. Look in the auth_cache
|
|
|
|
/// b. Look in the main timeline (pduid_pdu tree)
|
|
|
|
/// c. Look at outlier pdu tree
|
|
|
|
/// d. Ask origin server over federation
|
|
|
|
/// e. TODO: Ask other servers over federation?
|
2021-02-04 02:00:01 +01:00
|
|
|
///
|
|
|
|
/// If the event is unknown to the `auth_cache` it is added. This guarantees that any
|
|
|
|
/// event we need to know of will be present.
|
2021-03-23 12:59:27 +01:00
|
|
|
//#[tracing::instrument(skip(db, key_map, auth_cache))]
|
2021-04-16 18:18:29 +02:00
|
|
|
pub(crate) fn fetch_and_handle_events<'a>(
|
|
|
|
db: &'a Database,
|
|
|
|
origin: &'a ServerName,
|
|
|
|
events: &'a [EventId],
|
|
|
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
|
|
|
auth_cache: &'a mut EventMap<Arc<PduEvent>>,
|
|
|
|
) -> AsyncRecursiveResult<'a, Vec<Arc<PduEvent>>, Error> {
|
|
|
|
Box::pin(async move {
|
|
|
|
let mut pdus = vec![];
|
|
|
|
for id in events {
|
|
|
|
// a. Look at auth cache
|
2021-04-19 11:53:46 +02:00
|
|
|
let pdu = match auth_cache.get(id) {
|
|
|
|
Some(pdu) => {
|
|
|
|
debug!("Found {} in cache", id);
|
|
|
|
// We already have the auth chain for events in cache
|
|
|
|
pdu.clone()
|
|
|
|
}
|
|
|
|
// b. Look in the main timeline (pduid_pdu tree)
|
|
|
|
// c. Look at outlier pdu tree
|
|
|
|
// (get_pdu checks both)
|
|
|
|
None => match db.rooms.get_pdu(&id)? {
|
2021-04-16 18:18:29 +02:00
|
|
|
Some(pdu) => {
|
2021-04-19 11:53:46 +02:00
|
|
|
debug!("Found {} in db", id);
|
|
|
|
// We need to fetch the auth chain
|
|
|
|
let _ = fetch_and_handle_events(
|
|
|
|
db,
|
|
|
|
origin,
|
|
|
|
&pdu.auth_events,
|
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
Arc::new(pdu)
|
2021-04-16 18:18:29 +02:00
|
|
|
}
|
2021-04-19 11:53:46 +02:00
|
|
|
None => {
|
|
|
|
// d. Ask origin server over federation
|
|
|
|
debug!("Fetching {} over federation.", id);
|
|
|
|
match db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
2021-04-16 18:18:29 +02:00
|
|
|
origin,
|
2021-04-19 11:53:46 +02:00
|
|
|
get_event::v1::Request { event_id: &id },
|
2021-03-26 11:10:45 +01:00
|
|
|
)
|
2021-04-19 11:53:46 +02:00
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(res) => {
|
|
|
|
debug!("Got {} over federation: {:?}", id, res);
|
|
|
|
let (event_id, mut value) =
|
|
|
|
crate::pdu::gen_event_id_canonical_json(&res.pdu)?;
|
|
|
|
// This will also fetch the auth chain
|
|
|
|
match handle_incoming_pdu(
|
2021-04-16 18:18:29 +02:00
|
|
|
origin,
|
|
|
|
&event_id,
|
|
|
|
value.clone(),
|
|
|
|
false,
|
|
|
|
db,
|
|
|
|
pub_key_map,
|
|
|
|
auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
2021-04-19 11:53:46 +02:00
|
|
|
Ok(_) => {
|
|
|
|
value.insert(
|
|
|
|
"event_id".to_owned(),
|
|
|
|
to_canonical_value(&event_id)
|
|
|
|
.expect("EventId is a valid CanonicalJsonValue"),
|
|
|
|
);
|
|
|
|
|
|
|
|
Arc::new(serde_json::from_value(
|
|
|
|
serde_json::to_value(value).expect("canonicaljsonobject is valid value"),
|
|
|
|
).expect("This is possible because handle_incoming_pdu worked"))
|
|
|
|
}
|
2021-04-16 18:18:29 +02:00
|
|
|
Err(e) => {
|
|
|
|
warn!("Authentication of event {} failed: {:?}", id, e);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2021-04-19 11:53:46 +02:00
|
|
|
}
|
|
|
|
Err(_) => {
|
|
|
|
warn!("Failed to fetch event: {}", id);
|
|
|
|
continue;
|
2021-04-16 18:18:29 +02:00
|
|
|
}
|
2021-04-14 09:39:06 +02:00
|
|
|
}
|
2021-04-19 11:53:46 +02:00
|
|
|
}
|
|
|
|
},
|
|
|
|
};
|
2021-04-16 18:18:29 +02:00
|
|
|
auth_cache.entry(id.clone()).or_insert_with(|| pdu.clone());
|
|
|
|
pdus.push(pdu);
|
|
|
|
}
|
|
|
|
Ok(pdus)
|
|
|
|
})
|
2021-01-12 14:26:52 +01:00
|
|
|
}
|
|
|
|
|
2021-01-15 03:32:22 +01:00
|
|
|
/// Search the DB for the signing keys of the given server, if we don't have them
|
|
|
|
/// fetch them from the server and save to our DB.
|
2021-03-13 16:30:12 +01:00
|
|
|
#[tracing::instrument(skip(db))]
|
2021-01-30 03:45:33 +01:00
|
|
|
pub(crate) async fn fetch_signing_keys(
|
2021-01-03 23:26:17 +01:00
|
|
|
db: &Database,
|
2021-01-14 20:39:56 +01:00
|
|
|
origin: &ServerName,
|
2021-03-22 14:04:11 +01:00
|
|
|
signature_ids: Vec<&String>,
|
2021-03-13 16:30:12 +01:00
|
|
|
) -> Result<BTreeMap<String, String>> {
|
2021-03-23 12:59:27 +01:00
|
|
|
let contains_all_ids =
|
|
|
|
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|&id| keys.contains_key(id));
|
2021-03-13 16:30:12 +01:00
|
|
|
|
2021-03-22 14:04:11 +01:00
|
|
|
let mut result = db
|
|
|
|
.globals
|
|
|
|
.signing_keys_for(origin)?
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k.to_string(), v.key))
|
|
|
|
.collect::<BTreeMap<_, _>>();
|
|
|
|
|
|
|
|
if contains_all_ids(&result) {
|
|
|
|
return Ok(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Ok(get_keys_response) = db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new())
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
db.globals
|
|
|
|
.add_signing_key(origin, &get_keys_response.server_key)?;
|
|
|
|
|
|
|
|
result.extend(
|
|
|
|
get_keys_response
|
|
|
|
.server_key
|
|
|
|
.verify_keys
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k.to_string(), v.key)),
|
|
|
|
);
|
|
|
|
result.extend(
|
|
|
|
get_keys_response
|
|
|
|
.server_key
|
|
|
|
.old_verify_keys
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k.to_string(), v.key)),
|
|
|
|
);
|
|
|
|
|
|
|
|
if contains_all_ids(&result) {
|
|
|
|
return Ok(result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for server in db.globals.trusted_servers() {
|
|
|
|
debug!("Asking {} for {}'s signing key", server, origin);
|
|
|
|
if let Ok(keys) = db
|
|
|
|
.sending
|
|
|
|
.send_federation_request(
|
|
|
|
&db.globals,
|
|
|
|
&server,
|
|
|
|
get_remote_server_keys::v2::Request::new(
|
|
|
|
origin,
|
|
|
|
SystemTime::now()
|
|
|
|
.checked_add(Duration::from_secs(3600))
|
|
|
|
.expect("SystemTime to large"),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
debug!("Got signing keys: {:?}", keys);
|
2021-04-05 21:46:10 +02:00
|
|
|
for k in keys.server_keys {
|
2021-03-22 14:04:11 +01:00
|
|
|
db.globals.add_signing_key(origin, &k)?;
|
|
|
|
result.extend(
|
|
|
|
k.verify_keys
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k.to_string(), v.key)),
|
|
|
|
);
|
|
|
|
result.extend(
|
|
|
|
k.old_verify_keys
|
|
|
|
.into_iter()
|
|
|
|
.map(|(k, v)| (k.to_string(), v.key)),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
if contains_all_ids(&result) {
|
|
|
|
return Ok(result);
|
2021-03-01 14:23:28 +01:00
|
|
|
}
|
2021-01-03 23:26:17 +01:00
|
|
|
}
|
|
|
|
}
|
2021-03-22 14:04:11 +01:00
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
warn!("Failed to find public key for server: {}", origin);
|
2021-03-22 14:04:11 +01:00
|
|
|
Err(Error::BadServerResponse(
|
|
|
|
"Failed to find public key for server",
|
|
|
|
))
|
2021-01-03 23:26:17 +01:00
|
|
|
}
|
2021-01-15 21:46:47 +01:00
|
|
|
|
2021-01-30 03:45:33 +01:00
|
|
|
/// Append the incoming event setting the state snapshot to the state from the
|
|
|
|
/// server that sent the event.
|
2021-03-13 16:30:12 +01:00
|
|
|
#[tracing::instrument(skip(db))]
|
2021-01-30 03:45:33 +01:00
|
|
|
pub(crate) fn append_incoming_pdu(
|
|
|
|
db: &Database,
|
|
|
|
pdu: &PduEvent,
|
2021-04-16 18:18:29 +02:00
|
|
|
pdu_json: CanonicalJsonObject,
|
2021-03-25 23:55:40 +01:00
|
|
|
new_room_leaves: HashSet<EventId>,
|
2021-01-30 03:45:33 +01:00
|
|
|
state: &StateMap<Arc<PduEvent>>,
|
2021-04-16 18:18:29 +02:00
|
|
|
) -> Result<Vec<u8>> {
|
2021-01-29 17:20:33 +01:00
|
|
|
let count = db.globals.next_count()?;
|
|
|
|
let mut pdu_id = pdu.room_id.as_bytes().to_vec();
|
|
|
|
pdu_id.push(0xff);
|
|
|
|
pdu_id.extend_from_slice(&count.to_be_bytes());
|
|
|
|
|
2021-01-15 03:32:22 +01:00
|
|
|
// We append to state before appending the pdu, so we don't have a moment in time with the
|
|
|
|
// pdu without it's state. This is okay because append_pdu can't fail.
|
2021-03-25 23:55:40 +01:00
|
|
|
db.rooms
|
|
|
|
.set_event_state(&pdu.event_id, state, &db.globals)?;
|
2020-12-22 18:45:35 +01:00
|
|
|
|
|
|
|
db.rooms.append_pdu(
|
2021-01-19 01:08:59 +01:00
|
|
|
pdu,
|
2021-04-16 18:18:29 +02:00
|
|
|
pdu_json,
|
2020-12-22 18:45:35 +01:00
|
|
|
count,
|
|
|
|
pdu_id.clone().into(),
|
2021-03-25 23:55:40 +01:00
|
|
|
&new_room_leaves.into_iter().collect::<Vec<_>>(),
|
2021-01-15 17:05:57 +01:00
|
|
|
&db,
|
2020-12-22 18:45:35 +01:00
|
|
|
)?;
|
|
|
|
|
|
|
|
for appservice in db.appservice.iter_all().filter_map(|r| r.ok()) {
|
2021-03-04 14:39:16 +01:00
|
|
|
if let Some(namespaces) = appservice.1.get("namespaces") {
|
|
|
|
let users = namespaces
|
|
|
|
.get("users")
|
|
|
|
.and_then(|users| users.as_sequence())
|
|
|
|
.map_or_else(Vec::new, |users| {
|
|
|
|
users
|
|
|
|
.iter()
|
|
|
|
.map(|users| {
|
|
|
|
users
|
|
|
|
.get("regex")
|
|
|
|
.and_then(|regex| regex.as_str())
|
|
|
|
.and_then(|regex| Regex::new(regex).ok())
|
|
|
|
})
|
|
|
|
.filter_map(|o| o)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
});
|
|
|
|
let aliases = namespaces
|
|
|
|
.get("aliases")
|
|
|
|
.and_then(|users| users.get("regex"))
|
|
|
|
.and_then(|regex| regex.as_str())
|
|
|
|
.and_then(|regex| Regex::new(regex).ok());
|
|
|
|
let rooms = namespaces
|
|
|
|
.get("rooms")
|
|
|
|
.and_then(|rooms| rooms.as_sequence());
|
|
|
|
|
|
|
|
let room_aliases = db.rooms.room_aliases(&pdu.room_id);
|
|
|
|
|
|
|
|
let bridge_user_id = appservice
|
|
|
|
.1
|
|
|
|
.get("sender_localpart")
|
|
|
|
.and_then(|string| string.as_str())
|
|
|
|
.and_then(|string| {
|
|
|
|
UserId::parse_with_server_name(string, db.globals.server_name()).ok()
|
|
|
|
});
|
|
|
|
|
|
|
|
#[allow(clippy::blocks_in_if_conditions)]
|
|
|
|
if bridge_user_id.map_or(false, |bridge_user_id| {
|
|
|
|
db.rooms
|
|
|
|
.is_joined(&bridge_user_id, &pdu.room_id)
|
|
|
|
.unwrap_or(false)
|
|
|
|
}) || users.iter().any(|users| {
|
|
|
|
users.is_match(pdu.sender.as_str())
|
|
|
|
|| pdu.kind == EventType::RoomMember
|
|
|
|
&& pdu
|
|
|
|
.state_key
|
|
|
|
.as_ref()
|
|
|
|
.map_or(false, |state_key| users.is_match(&state_key))
|
|
|
|
}) || aliases.map_or(false, |aliases| {
|
|
|
|
room_aliases
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.any(|room_alias| aliases.is_match(room_alias.as_str()))
|
|
|
|
}) || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into()))
|
|
|
|
|| db
|
|
|
|
.rooms
|
|
|
|
.room_members(&pdu.room_id)
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.any(|member| users.iter().any(|regex| regex.is_match(member.as_str())))
|
|
|
|
{
|
|
|
|
db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?;
|
|
|
|
}
|
|
|
|
}
|
2020-12-22 18:45:35 +01:00
|
|
|
}
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
Ok(pdu_id)
|
2020-12-22 18:45:35 +01:00
|
|
|
}
|
|
|
|
|
2021-04-07 15:56:57 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/event/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub fn get_event_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_event::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_event::v1::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_event::v1::Response {
|
|
|
|
origin: db.globals.server_name().to_owned(),
|
|
|
|
origin_server_ts: SystemTime::now(),
|
|
|
|
pdu: PduEvent::convert_to_outgoing_federation_event(
|
2021-04-11 21:01:27 +02:00
|
|
|
db.rooms
|
|
|
|
.get_pdu_json(&body.event_id)?
|
|
|
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?,
|
2021-04-07 15:56:57 +02:00
|
|
|
),
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2020-09-25 12:26:29 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
post("/_matrix/federation/v1/get_missing_events/<_>", data = "<body>")
|
|
|
|
)]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db, body))]
|
2020-09-25 12:26:29 +02:00
|
|
|
pub fn get_missing_events_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_missing_events::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_missing_events::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2020-09-25 12:26:29 +02:00
|
|
|
let mut queued_events = body.latest_events.clone();
|
|
|
|
let mut events = Vec::new();
|
|
|
|
|
|
|
|
let mut i = 0;
|
|
|
|
while i < queued_events.len() && events.len() < u64::from(body.limit) as usize {
|
|
|
|
if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? {
|
2021-04-14 10:43:31 +02:00
|
|
|
let event_id =
|
|
|
|
serde_json::from_value(
|
2021-04-11 21:01:27 +02:00
|
|
|
serde_json::to_value(pdu.get("event_id").cloned().ok_or_else(|| {
|
|
|
|
Error::bad_database("Event in db has no event_id field.")
|
|
|
|
})?)
|
|
|
|
.expect("canonical json is valid json value"),
|
2020-09-25 12:26:29 +02:00
|
|
|
)
|
2021-04-14 10:43:31 +02:00
|
|
|
.map_err(|_| Error::bad_database("Invalid event_id field in pdu in db."))?;
|
|
|
|
|
|
|
|
if body.earliest_events.contains(&event_id) {
|
2020-09-25 12:26:29 +02:00
|
|
|
i += 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
queued_events.extend_from_slice(
|
|
|
|
&serde_json::from_value::<Vec<EventId>>(
|
2021-04-11 21:01:27 +02:00
|
|
|
serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| {
|
|
|
|
Error::bad_database("Event in db has no prev_events field.")
|
|
|
|
})?)
|
|
|
|
.expect("canonical json is valid json value"),
|
2020-09-25 12:26:29 +02:00
|
|
|
)
|
|
|
|
.map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?,
|
|
|
|
);
|
2021-04-11 21:01:27 +02:00
|
|
|
events.push(PduEvent::convert_to_outgoing_federation_event(pdu));
|
2020-09-25 12:26:29 +02:00
|
|
|
}
|
|
|
|
i += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_missing_events::v1::Response { events }.into())
|
|
|
|
}
|
2020-10-05 22:19:22 +02:00
|
|
|
|
2021-03-18 00:09:57 +01:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/state_ids/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub fn get_room_state_ids_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_room_state_ids::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_room_state_ids::v1::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
let shortstatehash = db
|
|
|
|
.rooms
|
|
|
|
.pdu_shortstatehash(&body.event_id)?
|
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::NotFound,
|
|
|
|
"Pdu state not found.",
|
|
|
|
))?;
|
|
|
|
|
|
|
|
let pdu_ids = db.rooms.state_full_ids(shortstatehash)?;
|
|
|
|
|
|
|
|
let mut auth_chain_ids = BTreeSet::<EventId>::new();
|
|
|
|
let mut todo = BTreeSet::new();
|
|
|
|
todo.insert(body.event_id.clone());
|
|
|
|
|
2021-03-24 11:52:10 +01:00
|
|
|
while let Some(event_id) = todo.iter().next().cloned() {
|
|
|
|
if let Some(pdu) = db.rooms.get_pdu(&event_id)? {
|
|
|
|
todo.extend(
|
|
|
|
pdu.auth_events
|
|
|
|
.clone()
|
|
|
|
.into_iter()
|
|
|
|
.collect::<BTreeSet<_>>()
|
|
|
|
.difference(&auth_chain_ids)
|
|
|
|
.cloned(),
|
|
|
|
);
|
|
|
|
auth_chain_ids.extend(pdu.auth_events.into_iter());
|
2021-03-18 00:09:57 +01:00
|
|
|
} else {
|
2021-03-24 11:52:10 +01:00
|
|
|
warn!("Could not find pdu mentioned in auth events.");
|
2021-03-18 00:09:57 +01:00
|
|
|
}
|
2021-03-24 11:52:10 +01:00
|
|
|
|
|
|
|
todo.remove(&event_id);
|
2021-03-18 00:09:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_room_state_ids::v1::Response {
|
|
|
|
auth_chain_ids: auth_chain_ids.into_iter().collect(),
|
|
|
|
pdu_ids,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/make_join/<_>/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub fn create_join_event_template_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<create_join_event_template::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<create_join_event_template::v1::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
if !db.rooms.exists(&body.room_id)? {
|
|
|
|
return Err(Error::BadRequest(
|
|
|
|
ErrorKind::NotFound,
|
|
|
|
"Server is not in room.",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
if !body.ver.contains(&RoomVersionId::Version6) {
|
|
|
|
return Err(Error::BadRequest(
|
|
|
|
ErrorKind::IncompatibleRoomVersion {
|
|
|
|
room_version: RoomVersionId::Version6,
|
|
|
|
},
|
|
|
|
"Room version not supported.",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let prev_events = db
|
|
|
|
.rooms
|
|
|
|
.get_pdu_leaves(&body.room_id)?
|
|
|
|
.into_iter()
|
|
|
|
.take(20)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
let create_event = db
|
|
|
|
.rooms
|
|
|
|
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?;
|
|
|
|
|
|
|
|
let create_event_content = create_event
|
|
|
|
.as_ref()
|
|
|
|
.map(|create_event| {
|
|
|
|
Ok::<_, Error>(
|
|
|
|
serde_json::from_value::<Raw<CreateEventContent>>(create_event.content.clone())
|
|
|
|
.expect("Raw::from_value always works.")
|
|
|
|
.deserialize()
|
|
|
|
.map_err(|_| Error::bad_database("Invalid PowerLevels event in db."))?,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.transpose()?;
|
|
|
|
|
|
|
|
let create_prev_event = if prev_events.len() == 1
|
|
|
|
&& Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id)
|
|
|
|
{
|
|
|
|
create_event.map(Arc::new)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
// If there was no create event yet, assume we are creating a version 6 room right now
|
|
|
|
let room_version = create_event_content.map_or(RoomVersionId::Version6, |create_event| {
|
|
|
|
create_event.room_version
|
|
|
|
});
|
|
|
|
|
|
|
|
let content = serde_json::to_value(MemberEventContent {
|
|
|
|
avatar_url: None,
|
|
|
|
displayname: None,
|
|
|
|
is_direct: None,
|
|
|
|
membership: MembershipState::Join,
|
|
|
|
third_party_invite: None,
|
|
|
|
})
|
|
|
|
.expect("member event is valid value");
|
|
|
|
|
|
|
|
let state_key = body.user_id.to_string();
|
|
|
|
let kind = EventType::RoomMember;
|
|
|
|
|
|
|
|
let auth_events = db.rooms.get_auth_events(
|
|
|
|
&body.room_id,
|
|
|
|
&kind,
|
|
|
|
&body.user_id,
|
|
|
|
Some(&state_key),
|
|
|
|
&content,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
// Our depth is the maximum depth of prev_events + 1
|
|
|
|
let depth = prev_events
|
|
|
|
.iter()
|
|
|
|
.filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth))
|
|
|
|
.max()
|
|
|
|
.unwrap_or_else(|| uint!(0))
|
|
|
|
+ uint!(1);
|
|
|
|
|
|
|
|
let mut unsigned = BTreeMap::new();
|
|
|
|
|
|
|
|
if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? {
|
|
|
|
unsigned.insert("prev_content".to_owned(), prev_pdu.content);
|
|
|
|
unsigned.insert(
|
|
|
|
"prev_sender".to_owned(),
|
|
|
|
serde_json::to_value(prev_pdu.sender).expect("UserId::to_value always works"),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let pdu = PduEvent {
|
|
|
|
event_id: ruma::event_id!("$thiswillbefilledinlater"),
|
|
|
|
room_id: body.room_id.clone(),
|
|
|
|
sender: body.user_id.clone(),
|
|
|
|
origin_server_ts: utils::millis_since_unix_epoch()
|
|
|
|
.try_into()
|
|
|
|
.expect("time is valid"),
|
|
|
|
kind,
|
|
|
|
content,
|
|
|
|
state_key: Some(state_key),
|
|
|
|
prev_events,
|
|
|
|
depth,
|
|
|
|
auth_events: auth_events
|
|
|
|
.iter()
|
|
|
|
.map(|(_, pdu)| pdu.event_id.clone())
|
|
|
|
.collect(),
|
|
|
|
redacts: None,
|
|
|
|
unsigned,
|
|
|
|
hashes: ruma::events::pdu::EventHash {
|
|
|
|
sha256: "aaa".to_owned(),
|
|
|
|
},
|
|
|
|
signatures: BTreeMap::new(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let auth_check = state_res::auth_check(
|
|
|
|
&room_version,
|
|
|
|
&Arc::new(pdu.clone()),
|
|
|
|
create_prev_event,
|
|
|
|
&auth_events,
|
|
|
|
None, // TODO: third_party_invite
|
|
|
|
)
|
|
|
|
.map_err(|e| {
|
|
|
|
error!("{:?}", e);
|
|
|
|
Error::bad_database("Auth check failed.")
|
|
|
|
})?;
|
|
|
|
|
|
|
|
if !auth_check {
|
|
|
|
return Err(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Event is not authorized.",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash and sign
|
|
|
|
let mut pdu_json =
|
|
|
|
utils::to_canonical_object(&pdu).expect("event is valid, we just created it");
|
|
|
|
|
|
|
|
pdu_json.remove("event_id");
|
|
|
|
|
|
|
|
// Add origin because synapse likes that (and it's required in the spec)
|
|
|
|
pdu_json.insert(
|
|
|
|
"origin".to_owned(),
|
|
|
|
to_canonical_value(db.globals.server_name())
|
|
|
|
.expect("server name is a valid CanonicalJsonValue"),
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(create_join_event_template::v1::Response {
|
|
|
|
room_version: Some(RoomVersionId::Version6),
|
|
|
|
event: serde_json::from_value::<Raw<_>>(
|
|
|
|
serde_json::to_value(pdu_json).expect("CanonicalJson is valid serde_json::Value"),
|
|
|
|
)
|
|
|
|
.expect("Raw::from_value always works"),
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
put("/_matrix/federation/v2/send_join/<_>/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub async fn create_join_event_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<create_join_event::v2::Request<'_>>,
|
|
|
|
) -> ConduitResult<create_join_event::v2::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to return the state prior to joining, let's keep a reference to that here
|
|
|
|
let shortstatehash =
|
|
|
|
db.rooms
|
|
|
|
.current_shortstatehash(&body.room_id)?
|
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::NotFound,
|
|
|
|
"Pdu state not found.",
|
|
|
|
))?;
|
|
|
|
|
|
|
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
|
|
|
let mut auth_cache = EventMap::new();
|
|
|
|
|
|
|
|
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
|
|
|
let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&body.pdu) {
|
|
|
|
Ok(t) => t,
|
|
|
|
Err(_) => {
|
|
|
|
// Event could not be converted to canonical json
|
|
|
|
return Err(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Could not convert event to canonical json.",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let origin = serde_json::from_value::<Box<ServerName>>(
|
|
|
|
serde_json::to_value(value.get("origin").ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Event needs an origin field.",
|
|
|
|
))?)
|
|
|
|
.expect("CanonicalJson is valid json value"),
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
|
|
|
|
|
|
|
|
let pdu_id = handle_incoming_pdu(
|
|
|
|
&origin,
|
|
|
|
&event_id,
|
|
|
|
value,
|
|
|
|
true,
|
|
|
|
&db,
|
|
|
|
&pub_key_map,
|
|
|
|
&mut auth_cache,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(|_| {
|
|
|
|
Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Error while handling incoming PDU.",
|
|
|
|
)
|
|
|
|
})?
|
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Could not accept incoming PDU as timeline event.",
|
|
|
|
))?;
|
|
|
|
|
|
|
|
let state_ids = db.rooms.state_full_ids(shortstatehash)?;
|
|
|
|
|
|
|
|
let mut auth_chain_ids = BTreeSet::<EventId>::new();
|
|
|
|
let mut todo = state_ids.iter().cloned().collect::<BTreeSet<_>>();
|
|
|
|
|
|
|
|
while let Some(event_id) = todo.iter().next().cloned() {
|
|
|
|
if let Some(pdu) = db.rooms.get_pdu(&event_id)? {
|
|
|
|
todo.extend(
|
|
|
|
pdu.auth_events
|
|
|
|
.clone()
|
|
|
|
.into_iter()
|
|
|
|
.collect::<BTreeSet<_>>()
|
|
|
|
.difference(&auth_chain_ids)
|
|
|
|
.cloned(),
|
|
|
|
);
|
|
|
|
auth_chain_ids.extend(pdu.auth_events.into_iter());
|
|
|
|
} else {
|
|
|
|
warn!("Could not find pdu mentioned in auth events.");
|
|
|
|
}
|
|
|
|
|
|
|
|
todo.remove(&event_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
for server in db
|
|
|
|
.rooms
|
|
|
|
.room_servers(&body.room_id)
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.filter(|server| &**server != db.globals.server_name())
|
|
|
|
{
|
|
|
|
db.sending.send_pdu(&server, &pdu_id)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(create_join_event::v2::Response {
|
|
|
|
room_state: RoomState {
|
|
|
|
auth_chain: auth_chain_ids
|
|
|
|
.iter()
|
|
|
|
.filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten())
|
2021-04-21 10:59:07 +02:00
|
|
|
.map(PduEvent::convert_to_outgoing_federation_event)
|
2021-04-16 18:18:29 +02:00
|
|
|
.collect(),
|
|
|
|
state: state_ids
|
|
|
|
.iter()
|
|
|
|
.filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten())
|
2021-04-21 10:59:07 +02:00
|
|
|
.map(PduEvent::convert_to_outgoing_federation_event)
|
2021-04-16 18:18:29 +02:00
|
|
|
.collect(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2021-04-11 21:01:27 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
put("/_matrix/federation/v2/invite/<_>/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
2021-04-13 21:34:31 +02:00
|
|
|
pub async fn create_invite_route<'a>(
|
2021-04-11 21:01:27 +02:00
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<create_invite::v2::Request>,
|
|
|
|
) -> ConduitResult<create_invite::v2::Response> {
|
2021-04-16 18:18:29 +02:00
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
2021-04-11 21:01:27 +02:00
|
|
|
if body.room_version < RoomVersionId::Version6 {
|
|
|
|
return Err(Error::BadRequest(
|
|
|
|
ErrorKind::IncompatibleRoomVersion {
|
|
|
|
room_version: body.room_version.clone(),
|
|
|
|
},
|
|
|
|
"Server does not support this room version.",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut signed_event = utils::to_canonical_object(&body.event)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?;
|
|
|
|
|
|
|
|
ruma::signatures::hash_and_sign_event(
|
|
|
|
db.globals.server_name().as_str(),
|
|
|
|
db.globals.keypair(),
|
|
|
|
&mut signed_event,
|
|
|
|
&body.room_version,
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
|
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
// Generate event id
|
|
|
|
let event_id = EventId::try_from(&*format!(
|
|
|
|
"${}",
|
|
|
|
ruma::signatures::reference_hash(&signed_event, &body.room_version)
|
|
|
|
.expect("ruma can calculate reference hashes")
|
|
|
|
))
|
|
|
|
.expect("ruma's reference hashes are valid event ids");
|
|
|
|
|
|
|
|
// Add event_id back
|
|
|
|
signed_event.insert(
|
|
|
|
"event_id".to_owned(),
|
|
|
|
to_canonical_value(&event_id).expect("EventId is a valid CanonicalJsonValue"),
|
|
|
|
);
|
|
|
|
|
2021-04-11 21:01:27 +02:00
|
|
|
let sender = serde_json::from_value(
|
|
|
|
serde_json::to_value(
|
|
|
|
signed_event
|
|
|
|
.get("sender")
|
2021-04-14 10:43:31 +02:00
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Event had no sender field.",
|
|
|
|
))?
|
2021-04-11 21:01:27 +02:00
|
|
|
.clone(),
|
|
|
|
)
|
|
|
|
.expect("CanonicalJsonValue to serde_json::Value always works"),
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?;
|
|
|
|
let invited_user = serde_json::from_value(
|
|
|
|
serde_json::to_value(
|
|
|
|
signed_event
|
|
|
|
.get("state_key")
|
2021-04-14 10:43:31 +02:00
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::InvalidParam,
|
|
|
|
"Event had no state_key field.",
|
|
|
|
))?
|
2021-04-11 21:01:27 +02:00
|
|
|
.clone(),
|
|
|
|
)
|
|
|
|
.expect("CanonicalJsonValue to serde_json::Value always works"),
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?;
|
|
|
|
|
|
|
|
let mut invite_state = body.invite_room_state.clone();
|
|
|
|
|
|
|
|
let mut event = serde_json::from_str::<serde_json::Map<String, serde_json::Value>>(
|
|
|
|
&body.event.json().to_string(),
|
|
|
|
)
|
|
|
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?;
|
|
|
|
|
|
|
|
event.insert("event_id".to_owned(), "$dummy".into());
|
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
let pdu = serde_json::from_value::<PduEvent>(event.into()).map_err(|e| {
|
|
|
|
warn!("Invalid invite event: {}", e);
|
|
|
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.")
|
|
|
|
})?;
|
|
|
|
|
|
|
|
invite_state.push(pdu.to_stripped_state_event());
|
|
|
|
|
|
|
|
// If the room already exists, the remote server will notify us about the join via /send
|
|
|
|
if !db.rooms.exists(&pdu.room_id)? {
|
|
|
|
db.rooms.update_membership(
|
|
|
|
&body.room_id,
|
|
|
|
&invited_user,
|
|
|
|
MembershipState::Invite,
|
|
|
|
&sender,
|
|
|
|
Some(invite_state),
|
2021-04-14 10:43:31 +02:00
|
|
|
&db,
|
2021-04-13 21:34:31 +02:00
|
|
|
)?;
|
|
|
|
}
|
2021-04-11 21:01:27 +02:00
|
|
|
|
|
|
|
Ok(create_invite::v2::Response {
|
|
|
|
event: PduEvent::convert_to_outgoing_federation_event(signed_event),
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2021-04-21 10:51:34 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/user/devices/<_>", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub fn get_devices_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_devices::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_devices::v1::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_devices::v1::Response {
|
|
|
|
user_id: body.user_id.clone(),
|
|
|
|
stream_id: db
|
|
|
|
.users
|
|
|
|
.get_devicelist_version(&body.user_id)?
|
|
|
|
.unwrap_or(0)
|
|
|
|
.try_into()
|
|
|
|
.expect("version will not grow that large"),
|
|
|
|
devices: db
|
|
|
|
.users
|
|
|
|
.all_devices_metadata(&body.user_id)
|
|
|
|
.filter_map(|r| r.ok())
|
|
|
|
.filter_map(|metadata| {
|
|
|
|
Some(UserDevice {
|
|
|
|
keys: db
|
|
|
|
.users
|
|
|
|
.get_device_keys(&body.user_id, &metadata.device_id)
|
|
|
|
.ok()??,
|
|
|
|
device_id: metadata.device_id,
|
|
|
|
device_display_name: metadata.display_name,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect(),
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2021-04-16 18:18:29 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/query/directory", data = "<body>")
|
|
|
|
)]
|
|
|
|
#[tracing::instrument(skip(db, body))]
|
|
|
|
pub fn get_room_information_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_room_information::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_room_information::v1::Response> {
|
|
|
|
if !db.globals.allow_federation() {
|
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
|
|
|
}
|
|
|
|
|
|
|
|
let room_id = db
|
|
|
|
.rooms
|
|
|
|
.id_from_alias(&body.room_alias)?
|
2021-04-21 14:06:39 +02:00
|
|
|
.ok_or(Error::BadRequest(
|
|
|
|
ErrorKind::NotFound,
|
|
|
|
"Room alias not found.",
|
|
|
|
))?;
|
2021-04-16 18:18:29 +02:00
|
|
|
|
|
|
|
Ok(get_room_information::v1::Response {
|
|
|
|
room_id,
|
|
|
|
servers: vec![db.globals.server_name().to_owned()],
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2020-10-05 22:19:22 +02:00
|
|
|
#[cfg_attr(
|
|
|
|
feature = "conduit_bin",
|
|
|
|
get("/_matrix/federation/v1/query/profile", data = "<body>")
|
|
|
|
)]
|
2021-02-28 12:41:03 +01:00
|
|
|
#[tracing::instrument(skip(db, body))]
|
2020-10-05 22:19:22 +02:00
|
|
|
pub fn get_profile_information_route<'a>(
|
|
|
|
db: State<'a, Database>,
|
|
|
|
body: Ruma<get_profile_information::v1::Request<'_>>,
|
|
|
|
) -> ConduitResult<get_profile_information::v1::Response> {
|
2021-01-01 13:47:53 +01:00
|
|
|
if !db.globals.allow_federation() {
|
2020-11-14 23:13:06 +01:00
|
|
|
return Err(Error::bad_config("Federation is disabled."));
|
2020-10-06 21:04:51 +02:00
|
|
|
}
|
|
|
|
|
2020-10-05 22:19:22 +02:00
|
|
|
let mut displayname = None;
|
|
|
|
let mut avatar_url = None;
|
|
|
|
|
2020-12-05 00:16:17 +01:00
|
|
|
match &body.field {
|
|
|
|
// TODO: what to do with custom
|
|
|
|
Some(ProfileField::_Custom(_s)) => {}
|
2020-10-05 22:19:22 +02:00
|
|
|
Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?,
|
|
|
|
Some(ProfileField::AvatarUrl) => avatar_url = db.users.avatar_url(&body.user_id)?,
|
|
|
|
None => {
|
|
|
|
displayname = db.users.displayname(&body.user_id)?;
|
|
|
|
avatar_url = db.users.avatar_url(&body.user_id)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(get_profile_information::v1::Response {
|
|
|
|
displayname,
|
|
|
|
avatar_url,
|
|
|
|
}
|
|
|
|
.into())
|
|
|
|
}
|
|
|
|
|
2021-04-13 18:17:51 +02:00
|
|
|
pub async fn fetch_required_signing_keys(
|
|
|
|
event: &BTreeMap<String, CanonicalJsonValue>,
|
2021-04-13 21:34:31 +02:00
|
|
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
2021-04-13 18:17:51 +02:00
|
|
|
db: &Database,
|
|
|
|
) -> Result<()> {
|
|
|
|
// We go through all the signatures we see on the value and fetch the corresponding signing
|
|
|
|
// keys
|
2021-04-14 10:43:31 +02:00
|
|
|
for (signature_server, signature) in match event.get("signatures").ok_or(
|
|
|
|
Error::BadServerResponse("No signatures in server response pdu."),
|
|
|
|
)? {
|
2021-04-13 18:17:51 +02:00
|
|
|
CanonicalJsonValue::Object(map) => map,
|
|
|
|
_ => {
|
|
|
|
return Err(Error::BadServerResponse(
|
|
|
|
"Invalid signatures object in server response pdu.",
|
|
|
|
))
|
|
|
|
}
|
|
|
|
} {
|
|
|
|
let signature_object = match signature {
|
|
|
|
CanonicalJsonValue::Object(map) => map,
|
|
|
|
_ => {
|
|
|
|
return Err(Error::BadServerResponse(
|
|
|
|
"Invalid signatures content object in server response pdu.",
|
|
|
|
))
|
|
|
|
}
|
|
|
|
};
|
2020-10-06 21:04:51 +02:00
|
|
|
|
2021-04-13 18:17:51 +02:00
|
|
|
let signature_ids = signature_object.keys().collect::<Vec<_>>();
|
2020-10-05 22:19:22 +02:00
|
|
|
|
2021-04-13 18:17:51 +02:00
|
|
|
debug!("Fetching signing keys for {}", signature_server);
|
|
|
|
let keys = match fetch_signing_keys(
|
|
|
|
db,
|
|
|
|
&Box::<ServerName>::try_from(&**signature_server).map_err(|_| {
|
|
|
|
Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
|
|
|
|
})?,
|
|
|
|
signature_ids,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(keys) => keys,
|
2021-04-14 09:39:06 +02:00
|
|
|
Err(_) => {
|
|
|
|
warn!("Signature verification failed: Could not fetch signing key.",);
|
|
|
|
continue;
|
2021-04-13 18:17:51 +02:00
|
|
|
}
|
|
|
|
};
|
2020-10-05 22:19:22 +02:00
|
|
|
|
2021-04-13 21:34:31 +02:00
|
|
|
pub_key_map
|
|
|
|
.write()
|
|
|
|
.map_err(|_| Error::bad_database("RwLock is poisoned."))?
|
|
|
|
.insert(signature_server.clone(), keys);
|
2020-10-05 22:19:22 +02:00
|
|
|
}
|
2021-04-13 18:17:51 +02:00
|
|
|
|
|
|
|
Ok(())
|
2020-10-05 22:19:22 +02:00
|
|
|
}
|
2020-12-08 12:34:46 +01:00
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
2021-04-24 12:27:46 +02:00
|
|
|
use super::{add_port_to_hostname, get_ip_with_port, FedDest};
|
2020-12-08 12:34:46 +01:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn ips_get_default_ports() {
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
get_ip_with_port("1.1.1.1"),
|
|
|
|
Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap()))
|
2020-12-08 12:34:46 +01:00
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
get_ip_with_port("dead:beef::"),
|
|
|
|
Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap()))
|
2020-12-08 12:34:46 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn ips_keep_custom_ports() {
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
get_ip_with_port("1.1.1.1:1234"),
|
|
|
|
Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap()))
|
2020-12-08 12:34:46 +01:00
|
|
|
);
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
get_ip_with_port("[dead::beef]:8933"),
|
|
|
|
Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap()))
|
2020-12-08 12:34:46 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hostnames_get_default_ports() {
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
add_port_to_hostname("example.com"),
|
|
|
|
FedDest::Named(String::from("example.com"), String::from(":8448"))
|
2020-12-08 12:34:46 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn hostnames_keep_custom_ports() {
|
|
|
|
assert_eq!(
|
2021-04-21 05:35:44 +02:00
|
|
|
add_port_to_hostname("example.com:1337"),
|
|
|
|
FedDest::Named(String::from("example.com"), String::from(":1337"))
|
2020-12-08 12:34:46 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|