mirror of
https://gitlab.com/famedly/conduit.git
synced 2024-11-05 05:59:12 +01:00
Merged current next
This commit is contained in:
commit
960ba8bd99
38 changed files with 1895 additions and 894 deletions
|
@ -21,21 +21,18 @@ variables:
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
- if: '$CI_COMMIT_BRANCH == "next"'
|
||||||
- if: "$CI_COMMIT_TAG"
|
- if: "$CI_COMMIT_TAG"
|
||||||
interruptible: true
|
interruptible: true
|
||||||
image: "rust:latest"
|
image: "rust:1.56"
|
||||||
tags: ["docker"]
|
tags: ["docker"]
|
||||||
cache:
|
|
||||||
paths:
|
|
||||||
- cargohome
|
|
||||||
- target/
|
|
||||||
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release"
|
|
||||||
variables:
|
variables:
|
||||||
CARGO_PROFILE_RELEASE_LTO: "true"
|
CARGO_PROFILE_RELEASE_LTO: "true"
|
||||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
|
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
|
||||||
|
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||||
before_script:
|
before_script:
|
||||||
- 'echo "Building for target $TARGET"'
|
- 'echo "Building for target $TARGET"'
|
||||||
- 'mkdir -p cargohome && CARGOHOME="cargohome"'
|
|
||||||
- "rustc --version && cargo --version && rustup show" # Print version info for debugging
|
- "rustc --version && cargo --version && rustup show" # Print version info for debugging
|
||||||
- "rustup target add $TARGET"
|
- "rustup target add $TARGET"
|
||||||
|
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
||||||
|
- if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi
|
||||||
script:
|
script:
|
||||||
- time cargo build --target $TARGET --release
|
- time cargo build --target $TARGET --release
|
||||||
- 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
- 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
||||||
|
@ -216,20 +213,16 @@ test:cargo:
|
||||||
image: "rust:latest"
|
image: "rust:latest"
|
||||||
tags: ["docker"]
|
tags: ["docker"]
|
||||||
variables:
|
variables:
|
||||||
CARGO_HOME: "cargohome"
|
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
||||||
cache:
|
|
||||||
paths:
|
|
||||||
- target
|
|
||||||
- cargohome
|
|
||||||
key: test_cache
|
|
||||||
interruptible: true
|
interruptible: true
|
||||||
before_script:
|
before_script:
|
||||||
- mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps"
|
# - mkdir -p $CARGO_HOME
|
||||||
- apt-get update -yqq
|
- apt-get update -yqq
|
||||||
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget
|
- apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev
|
||||||
- rustup component add clippy rustfmt
|
- rustup component add clippy rustfmt
|
||||||
- wget "https://faulty-storage.de/gitlab-report"
|
- curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report
|
||||||
- chmod +x ./gitlab-report
|
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
||||||
|
- if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi
|
||||||
script:
|
script:
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
- cargo fmt --all -- --check
|
- cargo fmt --all -- --check
|
||||||
|
|
|
@ -42,6 +42,14 @@ could help.
|
||||||
|
|
||||||
## Appservice-specific instructions
|
## Appservice-specific instructions
|
||||||
|
|
||||||
|
### Remove an appservice
|
||||||
|
|
||||||
|
To remove an appservice go to your admin room and execute
|
||||||
|
|
||||||
|
```@conduit:your.server.name: unregister_appservice <name>```
|
||||||
|
|
||||||
|
where `<name>` one of the output of `list_appservices`.
|
||||||
|
|
||||||
### Tested appservices
|
### Tested appservices
|
||||||
|
|
||||||
These appservices have been tested and work with Conduit without any extra steps:
|
These appservices have been tested and work with Conduit without any extra steps:
|
||||||
|
|
789
Cargo.lock
generated
789
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
14
Cargo.toml
14
Cargo.toml
|
@ -7,7 +7,8 @@ homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
edition = "2018"
|
rust-version = "1.56"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
@ -19,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "08d60b3d376b63462f769d4b9bd3bbfb560d501a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
|
|
||||||
|
@ -36,7 +37,7 @@ http = "0.2.4"
|
||||||
# Used to find data directory for default db path
|
# Used to find data directory for default db path
|
||||||
directories = "3.0.2"
|
directories = "3.0.2"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_json = { version = "1.0.67", features = ["raw_value"] }
|
serde_json = { version = "1.0.70", features = ["raw_value"] }
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
serde_yaml = "0.8.20"
|
serde_yaml = "0.8.20"
|
||||||
# Used for pdu definition
|
# Used for pdu definition
|
||||||
|
@ -78,17 +79,20 @@ crossbeam = { version = "0.8.1", optional = true }
|
||||||
num_cpus = "1.13.0"
|
num_cpus = "1.13.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
|
rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true }
|
||||||
|
|
||||||
thread_local = "1.1.3"
|
thread_local = "1.1.3"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.11.0"
|
hmac = "0.11.0"
|
||||||
sha-1 = "0.9.8"
|
sha-1 = "0.9.8"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["conduit_bin", "backend_sqlite"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"]
|
||||||
backend_sled = ["sled"]
|
backend_sled = ["sled"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"]
|
backend_rocksdb = ["rocksdb"]
|
||||||
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
conduit_bin = [] # TODO: add rocket to this when it is optional
|
conduit_bin = [] # TODO: add rocket to this when it is optional
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
FROM docker.io/rust:1.53-alpine AS builder
|
FROM docker.io/rust:1.58-alpine AS builder
|
||||||
WORKDIR /usr/src/conduit
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
# Install required packages to build Conduit and it's dependencies
|
# Install required packages to build Conduit and it's dependencies
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
[global]
|
[global]
|
||||||
# The server_name is the name of this server. It is used as a suffix for user
|
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||||
# and room ids. Examples: matrix.org, conduit.rs
|
# and room ids. Examples: matrix.org, conduit.rs
|
||||||
# The Conduit server needs to be reachable at https://your.server.name/ on port
|
|
||||||
# 443 (client-server) and 8448 (federation) OR you can create /.well-known
|
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||||
# files to redirect requests. See
|
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||||
|
|
||||||
|
# If that's not possible for you, you can create /.well-known files to redirect
|
||||||
|
# requests. See
|
||||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||||
# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
# and
|
||||||
|
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||||
# for more information
|
# for more information
|
||||||
|
|
||||||
# YOU NEED TO EDIT THIS
|
# YOU NEED TO EDIT THIS
|
||||||
|
@ -13,6 +17,7 @@
|
||||||
|
|
||||||
# This is the only directory where Conduit will save its data
|
# This is the only directory where Conduit will save its data
|
||||||
database_path = "/var/lib/conduit/"
|
database_path = "/var/lib/conduit/"
|
||||||
|
database_backend = "rocksdb"
|
||||||
|
|
||||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||||
|
|
|
@ -94,26 +94,20 @@ So...step by step:
|
||||||
server_name <SUBDOMAIN>.<DOMAIN>;
|
server_name <SUBDOMAIN>.<DOMAIN>;
|
||||||
listen 80 default_server;
|
listen 80 default_server;
|
||||||
|
|
||||||
location /.well-known/matrix/ {
|
location /.well-known/matrix/server {
|
||||||
root /var/www;
|
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
||||||
default_type application/json;
|
add_header Content-Type application/json;
|
||||||
add_header Access-Control-Allow-Origin *;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping)
|
location /.well-known/matrix/client {
|
||||||
```json
|
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
||||||
{
|
add_header Content-Type application/json;
|
||||||
"m.homeserver": {
|
add_header "Access-Control-Allow-Origin" *;
|
||||||
"base_url": "https://<SUBDOMAIN>.<DOMAIN>"
|
}
|
||||||
}
|
|
||||||
}
|
location / {
|
||||||
```
|
return 404;
|
||||||
- `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping)
|
}
|
||||||
```json
|
|
||||||
{
|
|
||||||
"m.server": "<SUBDOMAIN>.<DOMAIN>:443"
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
1.53
|
|
|
@ -1,6 +1,9 @@
|
||||||
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::api::client::{error::ErrorKind, r0::context::get_context};
|
use ruma::{
|
||||||
use std::convert::TryFrom;
|
api::client::{error::ErrorKind, r0::context::get_context},
|
||||||
|
events::EventType,
|
||||||
|
};
|
||||||
|
use std::{collections::HashSet, convert::TryFrom};
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
#[cfg(feature = "conduit_bin")]
|
||||||
use rocket::get;
|
use rocket::get;
|
||||||
|
@ -21,6 +24,7 @@ pub async fn get_context_route(
|
||||||
body: Ruma<get_context::Request<'_>>,
|
body: Ruma<get_context::Request<'_>>,
|
||||||
) -> ConduitResult<get_context::Response> {
|
) -> ConduitResult<get_context::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -29,6 +33,8 @@ pub async fn get_context_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_pdu_id = db
|
let base_pdu_id = db
|
||||||
.rooms
|
.rooms
|
||||||
.get_pdu_id(&body.event_id)?
|
.get_pdu_id(&body.event_id)?
|
||||||
|
@ -45,8 +51,18 @@ pub async fn get_context_route(
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event not found.",
|
"Base event not found.",
|
||||||
))?
|
))?;
|
||||||
.to_room_event();
|
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&base_event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(base_event.sender.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
let events_before: Vec<_> = db
|
let events_before: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -60,6 +76,17 @@ pub async fn get_context_route(
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_before {
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
||||||
|
@ -82,6 +109,17 @@ pub async fn get_context_route(
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_after {
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
||||||
|
@ -92,18 +130,24 @@ pub async fn get_context_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut resp = get_context::Response::new();
|
let mut state = Vec::new();
|
||||||
resp.start = start_token;
|
for ll_id in &lazy_loaded {
|
||||||
resp.end = end_token;
|
if let Some(member_event) =
|
||||||
resp.events_before = events_before;
|
db.rooms
|
||||||
resp.event = Some(base_event);
|
.room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())?
|
||||||
resp.events_after = events_after;
|
{
|
||||||
resp.state = db // TODO: State at event
|
state.push(member_event.to_state_event());
|
||||||
.rooms
|
}
|
||||||
.room_state_full(&body.room_id)?
|
}
|
||||||
.values()
|
|
||||||
.map(|pdu| pdu.to_state_event())
|
let resp = get_context::Response {
|
||||||
.collect();
|
start: start_token,
|
||||||
|
end: end_token,
|
||||||
|
events_before,
|
||||||
|
event: Some(base_event),
|
||||||
|
events_after,
|
||||||
|
state,
|
||||||
|
};
|
||||||
|
|
||||||
Ok(resp.into())
|
Ok(resp.into())
|
||||||
}
|
}
|
||||||
|
|
|
@ -85,7 +85,7 @@ pub async fn update_device_route(
|
||||||
Ok(update_device::Response {}.into())
|
Ok(update_device::Response {}.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/devices/{deviceId}`
|
/// # `DELETE /_matrix/client/r0/devices/{deviceId}`
|
||||||
///
|
///
|
||||||
/// Deletes the given device.
|
/// Deletes the given device.
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,32 +1,47 @@
|
||||||
use crate::{utils, ConduitResult};
|
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
||||||
use ruma::api::client::r0::filter::{self, create_filter, get_filter};
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::filter::{create_filter, get_filter},
|
||||||
|
};
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
#[cfg(feature = "conduit_bin")]
|
||||||
use rocket::{get, post};
|
use rocket::{get, post};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
||||||
///
|
///
|
||||||
/// TODO: Loads a filter that was previously created.
|
/// Loads a filter that was previously created.
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))]
|
///
|
||||||
#[tracing::instrument]
|
/// - A user can only access their own filters
|
||||||
pub async fn get_filter_route() -> ConduitResult<get_filter::Response> {
|
#[cfg_attr(
|
||||||
// TODO
|
feature = "conduit_bin",
|
||||||
Ok(get_filter::Response::new(filter::IncomingFilterDefinition {
|
get("/_matrix/client/r0/user/<_>/filter/<_>", data = "<body>")
|
||||||
event_fields: None,
|
)]
|
||||||
event_format: filter::EventFormat::default(),
|
#[tracing::instrument(skip(db, body))]
|
||||||
account_data: filter::IncomingFilter::default(),
|
pub async fn get_filter_route(
|
||||||
room: filter::IncomingRoomFilter::default(),
|
db: DatabaseGuard,
|
||||||
presence: filter::IncomingFilter::default(),
|
body: Ruma<get_filter::Request<'_>>,
|
||||||
})
|
) -> ConduitResult<get_filter::Response> {
|
||||||
.into())
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let filter = match db.users.get_filter(sender_user, &body.filter_id)? {
|
||||||
|
Some(filter) => filter,
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_filter::Response::new(filter).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
||||||
///
|
///
|
||||||
/// TODO: Creates a new filter to be used by other endpoints.
|
/// Creates a new filter to be used by other endpoints.
|
||||||
#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))]
|
#[cfg_attr(
|
||||||
#[tracing::instrument]
|
feature = "conduit_bin",
|
||||||
pub async fn create_filter_route() -> ConduitResult<create_filter::Response> {
|
post("/_matrix/client/r0/user/<_>/filter", data = "<body>")
|
||||||
// TODO
|
)]
|
||||||
Ok(create_filter::Response::new(utils::random_string(10)).into())
|
#[tracing::instrument(skip(db, body))]
|
||||||
|
pub async fn create_filter_route(
|
||||||
|
db: DatabaseGuard,
|
||||||
|
body: Ruma<create_filter::Request<'_>>,
|
||||||
|
) -> ConduitResult<create_filter::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into())
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
encryption::UnsignedDeviceInfo,
|
serde::Raw,
|
||||||
DeviceId, DeviceKeyAlgorithm, UserId,
|
DeviceId, DeviceKeyAlgorithm, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
@ -42,16 +42,9 @@ pub async fn upload_keys_route(
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if let Some(one_time_keys) = &body.one_time_keys {
|
for (key_key, key_value) in &body.one_time_keys {
|
||||||
for (key_key, key_value) in one_time_keys {
|
db.users
|
||||||
db.users.add_one_time_key(
|
.add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?;
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
key_key,
|
|
||||||
key_value,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device_keys) = &body.device_keys {
|
if let Some(device_keys) = &body.device_keys {
|
||||||
|
@ -279,7 +272,7 @@ pub async fn get_key_changes_route(
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
db.users
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
&sender_user.to_string(),
|
sender_user.as_str(),
|
||||||
body.from
|
body.from
|
||||||
.parse()
|
.parse()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?,
|
||||||
|
@ -350,10 +343,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
Error::bad_database("all_device_keys contained nonexistent device.")
|
Error::bad_database("all_device_keys contained nonexistent device.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
keys.unsigned = UnsignedDeviceInfo {
|
add_unsigned_device_display_name(&mut keys, metadata)
|
||||||
device_display_name: metadata.display_name,
|
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
||||||
};
|
|
||||||
|
|
||||||
container.insert(device_id, keys);
|
container.insert(device_id, keys);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -369,10 +360,8 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
),
|
),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
keys.unsigned = UnsignedDeviceInfo {
|
add_unsigned_device_display_name(&mut keys, metadata)
|
||||||
device_display_name: metadata.display_name,
|
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
||||||
};
|
|
||||||
|
|
||||||
container.insert(device_id.to_owned(), keys);
|
container.insert(device_id.to_owned(), keys);
|
||||||
}
|
}
|
||||||
device_keys.insert(user_id.to_owned(), container);
|
device_keys.insert(user_id.to_owned(), container);
|
||||||
|
@ -441,6 +430,24 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_unsigned_device_display_name(
|
||||||
|
keys: &mut Raw<ruma::encryption::DeviceKeys>,
|
||||||
|
metadata: ruma::api::client::r0::device::Device,
|
||||||
|
) -> serde_json::Result<()> {
|
||||||
|
if let Some(display_name) = metadata.display_name {
|
||||||
|
let mut object = keys.deserialize_as::<serde_json::Map<String, serde_json::Value>>()?;
|
||||||
|
|
||||||
|
let unsigned = object.entry("unsigned").or_insert_with(|| json!({}));
|
||||||
|
if let serde_json::Value::Object(unsigned_object) = unsigned {
|
||||||
|
unsigned_object.insert("device_display_name".to_owned(), display_name.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
*keys = Raw::from_json(serde_json::value::to_raw_value(&object)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn claim_keys_helper(
|
pub(crate) async fn claim_keys_helper(
|
||||||
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
|
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
|
|
|
@ -23,7 +23,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
EventType,
|
EventType,
|
||||||
},
|
},
|
||||||
serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue},
|
serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue},
|
||||||
state_res::{self, RoomVersion},
|
state_res::{self, RoomVersion},
|
||||||
uint, EventId, RoomId, RoomVersionId, ServerName, UserId,
|
uint, EventId, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
|
@ -787,7 +787,7 @@ async fn join_room_by_id_helper(
|
||||||
fn validate_and_add_event_id(
|
fn validate_and_add_event_id(
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
) -> Result<(Box<EventId>, CanonicalJsonObject)> {
|
) -> Result<(Box<EventId>, CanonicalJsonObject)> {
|
||||||
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||||
|
|
|
@ -6,7 +6,11 @@ use ruma::{
|
||||||
},
|
},
|
||||||
events::EventType,
|
events::EventType,
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
|
use std::{
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
convert::TryInto,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
#[cfg(feature = "conduit_bin")]
|
||||||
use rocket::{get, put};
|
use rocket::{get, put};
|
||||||
|
@ -70,11 +74,11 @@ pub async fn send_message_event_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = db.rooms.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: EventType::from(&body.event_type),
|
event_type: EventType::from(&*body.event_type),
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||||
unsigned: Some(unsigned),
|
unsigned: Some(unsigned),
|
||||||
|
@ -117,6 +121,7 @@ pub async fn get_message_events_route(
|
||||||
body: Ruma<get_message_events::Request<'_>>,
|
body: Ruma<get_message_events::Request<'_>>,
|
||||||
) -> ConduitResult<get_message_events::Response> {
|
) -> ConduitResult<get_message_events::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -133,9 +138,18 @@ pub async fn get_message_events_route(
|
||||||
|
|
||||||
let to = body.to.as_ref().map(|t| t.parse());
|
let to = body.to.as_ref().map(|t| t.parse());
|
||||||
|
|
||||||
|
db.rooms
|
||||||
|
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
|
||||||
|
|
||||||
// Use limit or else 10
|
// Use limit or else 10
|
||||||
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
||||||
|
|
||||||
|
let next_token;
|
||||||
|
|
||||||
|
let mut resp = get_message_events::Response::new();
|
||||||
|
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
match body.dir {
|
match body.dir {
|
||||||
get_message_events::Direction::Forward => {
|
get_message_events::Direction::Forward => {
|
||||||
let events_after: Vec<_> = db
|
let events_after: Vec<_> = db
|
||||||
|
@ -152,20 +166,27 @@ pub async fn get_message_events_route(
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let end_token = events_after.last().map(|(count, _)| count.to_string());
|
for (_, event) in &events_after {
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut resp = get_message_events::Response::new();
|
resp.start = body.from.to_owned();
|
||||||
resp.start = Some(body.from.to_owned());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.end = end_token;
|
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
resp.state = Vec::new();
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
}
|
||||||
get_message_events::Direction::Backward => {
|
get_message_events::Direction::Backward => {
|
||||||
let events_before: Vec<_> = db
|
let events_before: Vec<_> = db
|
||||||
|
@ -182,20 +203,49 @@ pub async fn get_message_events_route(
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let start_token = events_before.last().map(|(count, _)| count.to_string());
|
for (_, event) in &events_before {
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut resp = get_message_events::Response::new();
|
resp.start = body.from.to_owned();
|
||||||
resp.start = Some(body.from.to_owned());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.end = start_token;
|
|
||||||
resp.chunk = events_before;
|
resp.chunk = events_before;
|
||||||
resp.state = Vec::new();
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resp.state = Vec::new();
|
||||||
|
for ll_id in &lazy_loaded {
|
||||||
|
if let Some(member_event) =
|
||||||
|
db.rooms
|
||||||
|
.room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())?
|
||||||
|
{
|
||||||
|
resp.state.push(member_event.to_state_event());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(next_token) = next_token {
|
||||||
|
db.rooms.lazy_load_mark_sent(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_token,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(resp.into())
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ pub async fn set_displayname_route(
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&EventType::RoomMember,
|
&EventType::RoomMember,
|
||||||
&sender_user.to_string(),
|
sender_user.as_str(),
|
||||||
)?
|
)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
Error::bad_database(
|
Error::bad_database(
|
||||||
|
@ -195,7 +195,7 @@ pub async fn set_avatar_url_route(
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&EventType::RoomMember,
|
&EventType::RoomMember,
|
||||||
&sender_user.to_string(),
|
sender_user.as_str(),
|
||||||
)?
|
)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
Error::bad_database(
|
Error::bad_database(
|
||||||
|
|
|
@ -44,7 +44,7 @@ pub async fn send_state_event_for_key_route(
|
||||||
&db,
|
&db,
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
EventType::from(&body.event_type),
|
EventType::from(&*body.event_type),
|
||||||
&body.body.body, // Yes, I hate it too
|
&body.body.body, // Yes, I hate it too
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
)
|
)
|
||||||
|
@ -86,7 +86,7 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
&db,
|
&db,
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
EventType::from(&body.event_type),
|
EventType::from(&*body.event_type),
|
||||||
&body.body.body,
|
&body.body.body,
|
||||||
body.state_key.to_owned(),
|
body.state_key.to_owned(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,6 +1,10 @@
|
||||||
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
|
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
|
api::client::r0::{
|
||||||
|
filter::{IncomingFilterDefinition, LazyLoadOptions},
|
||||||
|
sync::sync_events,
|
||||||
|
uiaa::UiaaResponse,
|
||||||
|
},
|
||||||
events::{
|
events::{
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
room::member::{MembershipState, RoomMemberEventContent},
|
||||||
AnySyncEphemeralRoomEvent, EventType,
|
AnySyncEphemeralRoomEvent, EventType,
|
||||||
|
@ -36,13 +40,15 @@ use rocket::{get, tokio};
|
||||||
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
|
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
|
||||||
/// For joined rooms:
|
/// For joined rooms:
|
||||||
/// - Some of the most recent events of each timeline that happened after since
|
/// - Some of the most recent events of each timeline that happened after since
|
||||||
/// - If user joined the room after since: All state events and device list updates in that room
|
/// - If user joined the room after since: All state events (unless lazy loading is activated) and
|
||||||
|
/// all device list updates in that room
|
||||||
/// - If the user was already in the room: A list of all events that are in the state now, but were
|
/// - If the user was already in the room: A list of all events that are in the state now, but were
|
||||||
/// not in the state at `since`
|
/// not in the state at `since`
|
||||||
/// - If the state we send contains a member event: Joined and invited member counts, heroes
|
/// - If the state we send contains a member event: Joined and invited member counts, heroes
|
||||||
/// - Device list updates that happened after `since`
|
/// - Device list updates that happened after `since`
|
||||||
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
|
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
|
||||||
/// - EDUs that are active now (read receipts, typing updates, presence)
|
/// - EDUs that are active now (read receipts, typing updates, presence)
|
||||||
|
/// - TODO: Allow multiple sync streams to support Pantalaimon
|
||||||
///
|
///
|
||||||
/// For invited rooms:
|
/// For invited rooms:
|
||||||
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
|
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
|
||||||
|
@ -77,34 +83,32 @@ pub async fn sync_events_route(
|
||||||
Entry::Vacant(v) => {
|
Entry::Vacant(v) => {
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
let (tx, rx) = tokio::sync::watch::channel(None);
|
||||||
|
|
||||||
|
v.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
tokio::spawn(sync_helper_wrapper(
|
||||||
Arc::clone(&arc_db),
|
Arc::clone(&arc_db),
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
body.since.clone(),
|
body,
|
||||||
body.full_state,
|
|
||||||
body.timeout,
|
|
||||||
tx,
|
tx,
|
||||||
));
|
));
|
||||||
|
|
||||||
v.insert((body.since.clone(), rx)).1.clone()
|
rx
|
||||||
}
|
}
|
||||||
Entry::Occupied(mut o) => {
|
Entry::Occupied(mut o) => {
|
||||||
if o.get().0 != body.since {
|
if o.get().0 != body.since {
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
let (tx, rx) = tokio::sync::watch::channel(None);
|
||||||
|
|
||||||
|
o.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
tokio::spawn(sync_helper_wrapper(
|
||||||
Arc::clone(&arc_db),
|
Arc::clone(&arc_db),
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
body.since.clone(),
|
body,
|
||||||
body.full_state,
|
|
||||||
body.timeout,
|
|
||||||
tx,
|
tx,
|
||||||
));
|
));
|
||||||
|
|
||||||
o.insert((body.since.clone(), rx.clone()));
|
|
||||||
|
|
||||||
rx
|
rx
|
||||||
} else {
|
} else {
|
||||||
o.get().1.clone()
|
o.get().1.clone()
|
||||||
|
@ -135,18 +139,16 @@ async fn sync_helper_wrapper(
|
||||||
db: Arc<DatabaseGuard>,
|
db: Arc<DatabaseGuard>,
|
||||||
sender_user: Box<UserId>,
|
sender_user: Box<UserId>,
|
||||||
sender_device: Box<DeviceId>,
|
sender_device: Box<DeviceId>,
|
||||||
since: Option<String>,
|
body: sync_events::IncomingRequest,
|
||||||
full_state: bool,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
tx: Sender<Option<ConduitResult<sync_events::Response>>>,
|
tx: Sender<Option<ConduitResult<sync_events::Response>>>,
|
||||||
) {
|
) {
|
||||||
|
let since = body.since.clone();
|
||||||
|
|
||||||
let r = sync_helper(
|
let r = sync_helper(
|
||||||
Arc::clone(&db),
|
Arc::clone(&db),
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
since.clone(),
|
body,
|
||||||
full_state,
|
|
||||||
timeout,
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -179,9 +181,7 @@ async fn sync_helper(
|
||||||
db: Arc<DatabaseGuard>,
|
db: Arc<DatabaseGuard>,
|
||||||
sender_user: Box<UserId>,
|
sender_user: Box<UserId>,
|
||||||
sender_device: Box<DeviceId>,
|
sender_device: Box<DeviceId>,
|
||||||
since: Option<String>,
|
body: sync_events::IncomingRequest,
|
||||||
full_state: bool,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
// bool = caching allowed
|
// bool = caching allowed
|
||||||
) -> Result<(sync_events::Response, bool), Error> {
|
) -> Result<(sync_events::Response, bool), Error> {
|
||||||
// TODO: match body.set_presence {
|
// TODO: match body.set_presence {
|
||||||
|
@ -193,8 +193,26 @@ async fn sync_helper(
|
||||||
let next_batch = db.globals.current_count()?;
|
let next_batch = db.globals.current_count()?;
|
||||||
let next_batch_string = next_batch.to_string();
|
let next_batch_string = next_batch.to_string();
|
||||||
|
|
||||||
|
// Load filter
|
||||||
|
let filter = match body.filter {
|
||||||
|
None => IncomingFilterDefinition::default(),
|
||||||
|
Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter,
|
||||||
|
Some(sync_events::IncomingFilter::FilterId(filter_id)) => db
|
||||||
|
.users
|
||||||
|
.get_filter(&sender_user, &filter_id)?
|
||||||
|
.unwrap_or_default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
|
||||||
|
LazyLoadOptions::Enabled {
|
||||||
|
include_redundant_members: redundant,
|
||||||
|
} => (true, redundant),
|
||||||
|
_ => (false, false),
|
||||||
|
};
|
||||||
|
|
||||||
let mut joined_rooms = BTreeMap::new();
|
let mut joined_rooms = BTreeMap::new();
|
||||||
let since = since
|
let since = body
|
||||||
|
.since
|
||||||
.clone()
|
.clone()
|
||||||
.and_then(|string| string.parse().ok())
|
.and_then(|string| string.parse().ok())
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
@ -264,6 +282,14 @@ async fn sync_helper(
|
||||||
// limited unless there are events in non_timeline_pdus
|
// limited unless there are events in non_timeline_pdus
|
||||||
let limited = non_timeline_pdus.next().is_some();
|
let limited = non_timeline_pdus.next().is_some();
|
||||||
|
|
||||||
|
let mut timeline_users = HashSet::new();
|
||||||
|
for (_, event) in &timeline_pdus {
|
||||||
|
timeline_users.insert(event.sender.as_str().to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
db.rooms
|
||||||
|
.lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?;
|
||||||
|
|
||||||
// Database queries:
|
// Database queries:
|
||||||
|
|
||||||
let current_shortstatehash = db
|
let current_shortstatehash = db
|
||||||
|
@ -344,14 +370,58 @@ async fn sync_helper(
|
||||||
state_events,
|
state_events,
|
||||||
) = if since_shortstatehash.is_none() {
|
) = if since_shortstatehash.is_none() {
|
||||||
// Probably since = 0, we will do an initial sync
|
// Probably since = 0, we will do an initial sync
|
||||||
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
||||||
let state_events: Vec<_> = current_state_ids
|
|
||||||
.iter()
|
let mut state_events = Vec::new();
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
let mut lazy_loaded = HashSet::new();
|
||||||
.filter_map(|r| r.ok().flatten())
|
|
||||||
.collect();
|
for (shortstatekey, id) in current_state_ids {
|
||||||
|
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
|
if event_type != EventType::RoomMember {
|
||||||
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
state_events.push(pdu);
|
||||||
|
} else if !lazy_load_enabled
|
||||||
|
|| body.full_state
|
||||||
|
|| timeline_users.contains(&state_key)
|
||||||
|
{
|
||||||
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
lazy_loaded.insert(
|
||||||
|
UserId::parse(state_key.as_ref())
|
||||||
|
.expect("they are in timeline_users, so they should be correct"),
|
||||||
|
);
|
||||||
|
state_events.push(pdu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset lazy loading because this is an initial sync
|
||||||
|
db.rooms
|
||||||
|
.lazy_load_reset(&sender_user, &sender_device, &room_id)?;
|
||||||
|
|
||||||
|
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||||
|
// loaded.
|
||||||
|
db.rooms.lazy_load_mark_sent(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batch,
|
||||||
|
);
|
||||||
|
|
||||||
(
|
(
|
||||||
heroes,
|
heroes,
|
||||||
|
@ -383,24 +453,72 @@ async fn sync_helper(
|
||||||
let joined_since_last_sync = since_sender_member
|
let joined_since_last_sync = since_sender_member
|
||||||
.map_or(true, |member| member.membership != MembershipState::Join);
|
.map_or(true, |member| member.membership != MembershipState::Join);
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
let mut state_events = Vec::new();
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
|
if since_shortstatehash != current_shortstatehash {
|
||||||
|
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
||||||
|
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
|
||||||
|
|
||||||
let state_events = if joined_since_last_sync {
|
for (key, id) in current_state_ids {
|
||||||
current_state_ids
|
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
||||||
.iter()
|
let pdu = match db.rooms.get_pdu(&id)? {
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
Some(pdu) => pdu,
|
||||||
.filter_map(|r| r.ok().flatten())
|
None => {
|
||||||
.collect::<Vec<_>>()
|
error!("Pdu in state not found: {}", id);
|
||||||
} else {
|
continue;
|
||||||
current_state_ids
|
}
|
||||||
.iter()
|
};
|
||||||
.filter(|(key, id)| since_state_ids.get(key) != Some(id))
|
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
if pdu.kind == EventType::RoomMember {
|
||||||
.filter_map(|r| r.ok().flatten())
|
match UserId::parse(
|
||||||
.collect()
|
pdu.state_key
|
||||||
};
|
.as_ref()
|
||||||
|
.expect("State event has state key")
|
||||||
|
.clone(),
|
||||||
|
) {
|
||||||
|
Ok(state_key_userid) => {
|
||||||
|
lazy_loaded.insert(state_key_userid);
|
||||||
|
}
|
||||||
|
Err(e) => error!("Invalid state key for member event: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
state_events.push(pdu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (_, event) in &timeline_pdus {
|
||||||
|
if lazy_loaded.contains(&event.sender) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !db.rooms.lazy_load_was_sent_before(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? || lazy_load_send_redundant
|
||||||
|
{
|
||||||
|
if let Some(member_event) = db.rooms.room_state_get(
|
||||||
|
&room_id,
|
||||||
|
&EventType::RoomMember,
|
||||||
|
event.sender.as_str(),
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
state_events.push(member_event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.rooms.lazy_load_mark_sent(
|
||||||
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_batch,
|
||||||
|
);
|
||||||
|
|
||||||
let encrypted_room = db
|
let encrypted_room = db
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -762,10 +880,12 @@ async fn sync_helper(
|
||||||
.users
|
.users
|
||||||
.get_to_device_events(&sender_user, &sender_device)?,
|
.get_to_device_events(&sender_user, &sender_device)?,
|
||||||
},
|
},
|
||||||
|
// Fallback keys are not yet supported
|
||||||
|
device_unused_fallback_key_types: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: Retry the endpoint instead of returning (waiting for #118)
|
// TODO: Retry the endpoint instead of returning (waiting for #118)
|
||||||
if !full_state
|
if !body.full_state
|
||||||
&& response.rooms.is_empty()
|
&& response.rooms.is_empty()
|
||||||
&& response.presence.is_empty()
|
&& response.presence.is_empty()
|
||||||
&& response.account_data.is_empty()
|
&& response.account_data.is_empty()
|
||||||
|
@ -774,7 +894,7 @@ async fn sync_helper(
|
||||||
{
|
{
|
||||||
// Hang a few seconds so requests are not spammed
|
// Hang a few seconds so requests are not spammed
|
||||||
// Stop hanging if new info arrives
|
// Stop hanging if new info arrives
|
||||||
let mut duration = timeout.unwrap_or_default();
|
let mut duration = body.timeout.unwrap_or_default();
|
||||||
if duration.as_secs() > 30 {
|
if duration.as_secs() > 30 {
|
||||||
duration = Duration::from_secs(30);
|
duration = Duration::from_secs(30);
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,8 +53,8 @@ pub async fn send_event_to_device_route(
|
||||||
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
||||||
DirectDeviceContent {
|
DirectDeviceContent {
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
ev_type: EventType::from(&body.event_type),
|
ev_type: EventType::from(&*body.event_type),
|
||||||
message_id: body.txn_id.clone(),
|
message_id: body.txn_id.to_string(),
|
||||||
messages,
|
messages,
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use std::{collections::BTreeMap, iter::FromIterator};
|
||||||
|
|
||||||
use crate::ConduitResult;
|
use crate::ConduitResult;
|
||||||
use ruma::api::client::unversioned::get_supported_versions;
|
use ruma::api::client::unversioned::get_supported_versions;
|
||||||
|
|
||||||
|
@ -17,11 +19,10 @@ use rocket::get;
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))]
|
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))]
|
||||||
#[tracing::instrument]
|
#[tracing::instrument]
|
||||||
pub async fn get_supported_versions_route() -> ConduitResult<get_supported_versions::Response> {
|
pub async fn get_supported_versions_route() -> ConduitResult<get_supported_versions::Response> {
|
||||||
let mut resp =
|
let resp = get_supported_versions::Response {
|
||||||
get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]);
|
versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()],
|
||||||
|
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||||
resp.unstable_features
|
};
|
||||||
.insert("org.matrix.e2e_cross_signing".to_owned(), true);
|
|
||||||
|
|
||||||
Ok(resp.into())
|
Ok(resp.into())
|
||||||
}
|
}
|
||||||
|
|
159
src/database.rs
159
src/database.rs
|
@ -44,13 +44,17 @@ use self::proxy::ProxyConfig;
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
server_name: Box<ServerName>,
|
server_name: Box<ServerName>,
|
||||||
|
#[serde(default = "default_database_backend")]
|
||||||
|
database_backend: String,
|
||||||
database_path: String,
|
database_path: String,
|
||||||
#[serde(default = "default_db_cache_capacity_mb")]
|
#[serde(default = "default_db_cache_capacity_mb")]
|
||||||
db_cache_capacity_mb: f64,
|
db_cache_capacity_mb: f64,
|
||||||
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
rocksdb_max_open_files: i32,
|
||||||
#[serde(default = "default_pdu_cache_capacity")]
|
#[serde(default = "default_pdu_cache_capacity")]
|
||||||
pdu_cache_capacity: u32,
|
pdu_cache_capacity: u32,
|
||||||
#[serde(default = "default_sqlite_wal_clean_second_interval")]
|
#[serde(default = "default_cleanup_second_interval")]
|
||||||
sqlite_wal_clean_second_interval: u32,
|
cleanup_second_interval: u32,
|
||||||
#[serde(default = "default_max_request_size")]
|
#[serde(default = "default_max_request_size")]
|
||||||
max_request_size: u32,
|
max_request_size: u32,
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
|
@ -117,15 +121,23 @@ fn true_fn() -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_database_backend() -> String {
|
||||||
|
"sqlite".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
200.0
|
10.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
|
512
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
100_000
|
150_000
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_sqlite_wal_clean_second_interval() -> u32 {
|
fn default_cleanup_second_interval() -> u32 {
|
||||||
1 * 60 // every minute
|
1 * 60 // every minute
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,17 +157,8 @@ fn default_turn_ttl() -> u64 {
|
||||||
60 * 60 * 24
|
60 * 60 * 24
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "sled")]
|
|
||||||
pub type Engine = abstraction::sled::Engine;
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
pub type Engine = abstraction::sqlite::Engine;
|
|
||||||
|
|
||||||
#[cfg(feature = "heed")]
|
|
||||||
pub type Engine = abstraction::heed::Engine;
|
|
||||||
|
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
_db: Arc<Engine>,
|
_db: Arc<dyn DatabaseEngine>,
|
||||||
pub globals: globals::Globals,
|
pub globals: globals::Globals,
|
||||||
pub users: users::Users,
|
pub users: users::Users,
|
||||||
pub uiaa: uiaa::Uiaa,
|
pub uiaa: uiaa::Uiaa,
|
||||||
|
@ -183,28 +186,48 @@ impl Database {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
|
fn check_db_setup(config: &Config) -> Result<()> {
|
||||||
#[cfg(feature = "backend_sqlite")]
|
let path = Path::new(&config.database_path);
|
||||||
{
|
|
||||||
let path = Path::new(&config.database_path);
|
|
||||||
|
|
||||||
let sled_exists = path.join("db").exists();
|
let sled_exists = path.join("db").exists();
|
||||||
let sqlite_exists = path.join("conduit.db").exists();
|
let sqlite_exists = path.join("conduit.db").exists();
|
||||||
if sled_exists {
|
let rocksdb_exists = path.join("IDENTITY").exists();
|
||||||
if sqlite_exists {
|
|
||||||
// most likely an in-place directory, only warn
|
let mut count = 0;
|
||||||
warn!("Both sled and sqlite databases are detected in database directory");
|
|
||||||
warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
|
if sled_exists {
|
||||||
} else {
|
count += 1;
|
||||||
error!(
|
}
|
||||||
"Sled database detected, conduit now uses sqlite for database operations"
|
|
||||||
);
|
if sqlite_exists {
|
||||||
error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
|
count += 1;
|
||||||
return Err(Error::bad_config(
|
}
|
||||||
"sled database detected, migrate to sqlite",
|
|
||||||
));
|
if rocksdb_exists {
|
||||||
}
|
count += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if count > 1 {
|
||||||
|
warn!("Multiple databases at database_path detected");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if sled_exists && config.database_backend != "sled" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found sled at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if sqlite_exists && config.database_backend != "sqlite" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found sqlite at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if rocksdb_exists && config.database_backend != "rocksdb" {
|
||||||
|
return Err(Error::bad_config(
|
||||||
|
"Found rocksdb at database_path, but is not specified in config.",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -212,14 +235,30 @@ impl Database {
|
||||||
|
|
||||||
/// Load an existing database or create a new one.
|
/// Load an existing database or create a new one.
|
||||||
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
|
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
|
||||||
Self::check_sled_or_sqlite_db(config)?;
|
Self::check_db_setup(config)?;
|
||||||
|
|
||||||
if !Path::new(&config.database_path).exists() {
|
if !Path::new(&config.database_path).exists() {
|
||||||
std::fs::create_dir_all(&config.database_path)
|
std::fs::create_dir_all(&config.database_path)
|
||||||
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let builder = Engine::open(config)?;
|
let builder: Arc<dyn DatabaseEngine> = match &*config.database_backend {
|
||||||
|
"sqlite" => {
|
||||||
|
#[cfg(not(feature = "sqlite"))]
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
#[cfg(feature = "sqlite")]
|
||||||
|
Arc::new(Arc::<abstraction::sqlite::Engine>::open(config)?)
|
||||||
|
}
|
||||||
|
"rocksdb" => {
|
||||||
|
#[cfg(not(feature = "rocksdb"))]
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
Arc::new(Arc::<abstraction::rocksdb::Engine>::open(config)?)
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
return Err(Error::BadConfig("Database backend not found."));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if config.max_request_size < 1024 {
|
if config.max_request_size < 1024 {
|
||||||
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
||||||
|
@ -246,12 +285,12 @@ impl Database {
|
||||||
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
||||||
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
||||||
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
||||||
|
userfilterid_filter: builder.open_tree("userfilterid_filter")?,
|
||||||
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
||||||
},
|
},
|
||||||
uiaa: uiaa::Uiaa {
|
uiaa: uiaa::Uiaa {
|
||||||
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
|
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
|
||||||
userdevicesessionid_uiaarequest: builder
|
userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()),
|
||||||
.open_tree("userdevicesessionid_uiaarequest")?,
|
|
||||||
},
|
},
|
||||||
rooms: rooms::Rooms {
|
rooms: rooms::Rooms {
|
||||||
edus: rooms::RoomEdus {
|
edus: rooms::RoomEdus {
|
||||||
|
@ -286,6 +325,8 @@ impl Database {
|
||||||
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
||||||
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
||||||
|
|
||||||
|
lazyloadedids: builder.open_tree("lazyloadedids")?,
|
||||||
|
|
||||||
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
|
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
|
||||||
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
|
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
|
||||||
|
|
||||||
|
@ -321,6 +362,7 @@ impl Database {
|
||||||
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
||||||
our_real_users_cache: RwLock::new(HashMap::new()),
|
our_real_users_cache: RwLock::new(HashMap::new()),
|
||||||
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
||||||
|
lazy_load_waiting: Mutex::new(HashMap::new()),
|
||||||
stateinfo_cache: Mutex::new(LruCache::new(1000)),
|
stateinfo_cache: Mutex::new(LruCache::new(1000)),
|
||||||
},
|
},
|
||||||
account_data: account_data::AccountData {
|
account_data: account_data::AccountData {
|
||||||
|
@ -755,6 +797,15 @@ impl Database {
|
||||||
|
|
||||||
println!("Migration: 9 -> 10 finished");
|
println!("Migration: 9 -> 10 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if db.globals.database_version()? < 11 {
|
||||||
|
db._db
|
||||||
|
.open_tree("userdevicesessionid_uiaarequest")?
|
||||||
|
.clear()?;
|
||||||
|
db.globals.bump_database_version(11)?;
|
||||||
|
|
||||||
|
println!("Migration: 10 -> 11 finished");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let guard = db.read().await;
|
let guard = db.read().await;
|
||||||
|
@ -769,10 +820,7 @@ impl Database {
|
||||||
|
|
||||||
drop(guard);
|
drop(guard);
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
Self::start_cleanup_task(Arc::clone(&db), config).await;
|
||||||
{
|
|
||||||
Self::start_wal_clean_task(Arc::clone(&db), config).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(db)
|
Ok(db)
|
||||||
}
|
}
|
||||||
|
@ -910,15 +958,8 @@ impl Database {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub fn flush_wal(&self) -> Result<()> {
|
|
||||||
self._db.flush_wal()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[tracing::instrument(skip(db, config))]
|
#[tracing::instrument(skip(db, config))]
|
||||||
pub async fn start_wal_clean_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
|
pub async fn start_cleanup_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
|
||||||
use tokio::time::interval;
|
use tokio::time::interval;
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
|
@ -927,7 +968,7 @@ impl Database {
|
||||||
|
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
|
let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64);
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut i = interval(timer_interval);
|
let mut i = interval(timer_interval);
|
||||||
|
@ -938,23 +979,23 @@ impl Database {
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = i.tick() => {
|
_ = i.tick() => {
|
||||||
info!("wal-trunc: Timer ticked");
|
info!("cleanup: Timer ticked");
|
||||||
}
|
}
|
||||||
_ = s.recv() => {
|
_ = s.recv() => {
|
||||||
info!("wal-trunc: Received SIGHUP");
|
info!("cleanup: Received SIGHUP");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
#[cfg(not(unix))]
|
#[cfg(not(unix))]
|
||||||
{
|
{
|
||||||
i.tick().await;
|
i.tick().await;
|
||||||
info!("wal-trunc: Timer ticked")
|
info!("cleanup: Timer ticked")
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
if let Err(e) = db.read().await.flush_wal() {
|
if let Err(e) = db.read().await._db.cleanup() {
|
||||||
error!("wal-trunc: Errored: {}", e);
|
error!("cleanup: Errored: {}", e);
|
||||||
} else {
|
} else {
|
||||||
info!("wal-trunc: Flushed in {:?}", start.elapsed());
|
info!("cleanup: Finished in {:?}", start.elapsed());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -12,10 +12,24 @@ pub mod sqlite;
|
||||||
#[cfg(feature = "heed")]
|
#[cfg(feature = "heed")]
|
||||||
pub mod heed;
|
pub mod heed;
|
||||||
|
|
||||||
pub trait DatabaseEngine: Sized {
|
#[cfg(feature = "rocksdb")]
|
||||||
fn open(config: &Config) -> Result<Arc<Self>>;
|
pub mod rocksdb;
|
||||||
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>;
|
|
||||||
fn flush(self: &Arc<Self>) -> Result<()>;
|
#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))]
|
||||||
|
pub mod watchers;
|
||||||
|
|
||||||
|
pub trait DatabaseEngine: Send + Sync {
|
||||||
|
fn open(config: &Config) -> Result<Self>
|
||||||
|
where
|
||||||
|
Self: Sized;
|
||||||
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>>;
|
||||||
|
fn flush(&self) -> Result<()>;
|
||||||
|
fn cleanup(&self) -> Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
fn memory_usage(&self) -> Result<String> {
|
||||||
|
Ok("Current database engine does not support memory usage reporting.".to_owned())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Tree: Send + Sync {
|
pub trait Tree: Send + Sync {
|
||||||
|
|
|
@ -1,15 +1,13 @@
|
||||||
use super::super::Config;
|
use super::{super::Config, watchers::Watchers};
|
||||||
use crossbeam::channel::{bounded, Sender as ChannelSender};
|
use crossbeam::channel::{bounded, Sender as ChannelSender};
|
||||||
use threadpool::ThreadPool;
|
use threadpool::ThreadPool;
|
||||||
|
|
||||||
use crate::{Error, Result};
|
use crate::{Error, Result};
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
|
||||||
future::Future,
|
future::Future,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, Mutex, RwLock},
|
sync::{Arc, Mutex},
|
||||||
};
|
};
|
||||||
use tokio::sync::oneshot::Sender;
|
|
||||||
|
|
||||||
use super::{DatabaseEngine, Tree};
|
use super::{DatabaseEngine, Tree};
|
||||||
|
|
||||||
|
@ -23,7 +21,7 @@ pub struct Engine {
|
||||||
pub struct EngineTree {
|
pub struct EngineTree {
|
||||||
engine: Arc<Engine>,
|
engine: Arc<Engine>,
|
||||||
tree: Arc<heed::UntypedDatabase>,
|
tree: Arc<heed::UntypedDatabase>,
|
||||||
watchers: RwLock<HashMap<Vec<u8>, Vec<Sender<()>>>>,
|
watchers: Watchers,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn convert_error(error: heed::Error) -> Error {
|
fn convert_error(error: heed::Error) -> Error {
|
||||||
|
@ -60,7 +58,7 @@ impl DatabaseEngine for Engine {
|
||||||
.create_database(Some(name))
|
.create_database(Some(name))
|
||||||
.map_err(convert_error)?,
|
.map_err(convert_error)?,
|
||||||
),
|
),
|
||||||
watchers: RwLock::new(HashMap::new()),
|
watchers: Default::default(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,29 +143,7 @@ impl Tree for EngineTree {
|
||||||
.put(&mut txn, &key, &value)
|
.put(&mut txn, &key, &value)
|
||||||
.map_err(convert_error)?;
|
.map_err(convert_error)?;
|
||||||
txn.commit().map_err(convert_error)?;
|
txn.commit().map_err(convert_error)?;
|
||||||
|
self.watchers.wake(key);
|
||||||
let watchers = self.watchers.read().unwrap();
|
|
||||||
let mut triggered = Vec::new();
|
|
||||||
|
|
||||||
for length in 0..=key.len() {
|
|
||||||
if watchers.contains_key(&key[..length]) {
|
|
||||||
triggered.push(&key[..length]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(watchers);
|
|
||||||
|
|
||||||
if !triggered.is_empty() {
|
|
||||||
let mut watchers = self.watchers.write().unwrap();
|
|
||||||
for prefix in triggered {
|
|
||||||
if let Some(txs) = watchers.remove(prefix) {
|
|
||||||
for tx in txs {
|
|
||||||
let _ = tx.send(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,18 +199,6 @@ impl Tree for EngineTree {
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, prefix))]
|
#[tracing::instrument(skip(self, prefix))]
|
||||||
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
self.watchers.watch(prefix)
|
||||||
|
|
||||||
self.watchers
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(prefix.to_vec())
|
|
||||||
.or_default()
|
|
||||||
.push(tx);
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
// Tx is never destroyed
|
|
||||||
rx.await.unwrap();
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
231
src/database/abstraction/rocksdb.rs
Normal file
231
src/database/abstraction/rocksdb.rs
Normal file
|
@ -0,0 +1,231 @@
|
||||||
|
use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree};
|
||||||
|
use crate::{utils, Result};
|
||||||
|
use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock};
|
||||||
|
|
||||||
|
pub struct Engine {
|
||||||
|
rocks: rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>,
|
||||||
|
max_open_files: i32,
|
||||||
|
cache: rocksdb::Cache,
|
||||||
|
old_cfs: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RocksDbEngineTree<'a> {
|
||||||
|
db: Arc<Engine>,
|
||||||
|
name: &'a str,
|
||||||
|
watchers: Watchers,
|
||||||
|
write_lock: RwLock<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||||
|
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||||
|
block_based_options.set_block_cache(rocksdb_cache);
|
||||||
|
|
||||||
|
// "Difference of spinning disk"
|
||||||
|
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
||||||
|
block_based_options.set_block_size(4 * 1024);
|
||||||
|
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||||
|
|
||||||
|
let mut db_opts = rocksdb::Options::default();
|
||||||
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
|
db_opts.set_optimize_filters_for_hits(true);
|
||||||
|
db_opts.set_skip_stats_update_on_db_open(true);
|
||||||
|
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||||
|
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
||||||
|
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
||||||
|
//db_opts.set_use_direct_reads(true);
|
||||||
|
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
||||||
|
db_opts.create_if_missing(true);
|
||||||
|
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||||
|
db_opts.set_max_open_files(max_open_files);
|
||||||
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||||
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
|
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||||
|
|
||||||
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
|
db_opts.set_prefix_extractor(prefix_extractor);
|
||||||
|
|
||||||
|
db_opts
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseEngine for Arc<Engine> {
|
||||||
|
fn open(config: &Config) -> Result<Self> {
|
||||||
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
||||||
|
|
||||||
|
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
||||||
|
|
||||||
|
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
)
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
cfs.iter().map(|name| {
|
||||||
|
rocksdb::ColumnFamilyDescriptor::new(
|
||||||
|
name,
|
||||||
|
db_options(config.rocksdb_max_open_files, &rocksdb_cache),
|
||||||
|
)
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(Arc::new(Engine {
|
||||||
|
rocks: db,
|
||||||
|
max_open_files: config.rocksdb_max_open_files,
|
||||||
|
cache: rocksdb_cache,
|
||||||
|
old_cfs: cfs,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
||||||
|
if !self.old_cfs.contains(&name.to_owned()) {
|
||||||
|
// Create if it didn't exist
|
||||||
|
let _ = self
|
||||||
|
.rocks
|
||||||
|
.create_cf(name, &db_options(self.max_open_files, &self.cache));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(RocksDbEngineTree {
|
||||||
|
name,
|
||||||
|
db: Arc::clone(self),
|
||||||
|
watchers: Watchers::default(),
|
||||||
|
write_lock: RwLock::new(()),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<()> {
|
||||||
|
// TODO?
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn memory_usage(&self) -> Result<String> {
|
||||||
|
let stats =
|
||||||
|
rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?;
|
||||||
|
Ok(format!(
|
||||||
|
"Approximate memory usage of all the mem-tables: {:.3} MB\n\
|
||||||
|
Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\
|
||||||
|
Approximate memory usage of all the table readers: {:.3} MB\n\
|
||||||
|
Approximate memory usage by cache: {:.3} MB\n\
|
||||||
|
Approximate memory usage by cache pinned: {:.3} MB\n\
|
||||||
|
",
|
||||||
|
stats.mem_table_total as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.mem_table_unflushed as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.mem_table_readers_total as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.cache_total as f64 / 1024.0 / 1024.0,
|
||||||
|
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RocksDbEngineTree<'_> {
|
||||||
|
fn cf(&self) -> Arc<rocksdb::BoundColumnFamily<'_>> {
|
||||||
|
self.db.rocks.cf_handle(self.name).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tree for RocksDbEngineTree<'_> {
|
||||||
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
|
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
|
let lock = self.write_lock.read().unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
|
drop(lock);
|
||||||
|
|
||||||
|
self.watchers.wake(key);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
|
for (key, value) in iter {
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
|
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from<'a>(
|
||||||
|
&'a self,
|
||||||
|
from: &[u8],
|
||||||
|
backwards: bool,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
&self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(
|
||||||
|
from,
|
||||||
|
if backwards {
|
||||||
|
rocksdb::Direction::Reverse
|
||||||
|
} else {
|
||||||
|
rocksdb::Direction::Forward
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
|
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||||
|
|
||||||
|
drop(lock);
|
||||||
|
Ok(new)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
|
for key in iter {
|
||||||
|
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(lock);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_prefix<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: Vec<u8>,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
&self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
|
)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
self.watchers.watch(prefix)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,17 +1,15 @@
|
||||||
use super::{DatabaseEngine, Tree};
|
use super::{watchers::Watchers, DatabaseEngine, Tree};
|
||||||
use crate::{database::Config, Result};
|
use crate::{database::Config, Result};
|
||||||
use parking_lot::{Mutex, MutexGuard, RwLock};
|
use parking_lot::{Mutex, MutexGuard};
|
||||||
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
||||||
use std::{
|
use std::{
|
||||||
cell::RefCell,
|
cell::RefCell,
|
||||||
collections::{hash_map, HashMap},
|
|
||||||
future::Future,
|
future::Future,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
use thread_local::ThreadLocal;
|
use thread_local::ThreadLocal;
|
||||||
use tokio::sync::watch;
|
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
thread_local! {
|
thread_local! {
|
||||||
|
@ -82,8 +80,8 @@ impl Engine {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Engine {
|
impl DatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Arc<Self>> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let path = Path::new(&config.database_path).join("conduit.db");
|
let path = Path::new(&config.database_path).join("conduit.db");
|
||||||
|
|
||||||
// calculates cache-size per permanent connection
|
// calculates cache-size per permanent connection
|
||||||
|
@ -94,7 +92,7 @@ impl DatabaseEngine for Engine {
|
||||||
/ ((num_cpus::get().max(1) * 2) + 1) as f64)
|
/ ((num_cpus::get().max(1) * 2) + 1) as f64)
|
||||||
as u32;
|
as u32;
|
||||||
|
|
||||||
let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?);
|
let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?);
|
||||||
|
|
||||||
let arc = Arc::new(Engine {
|
let arc = Arc::new(Engine {
|
||||||
writer,
|
writer,
|
||||||
|
@ -107,26 +105,30 @@ impl DatabaseEngine for Engine {
|
||||||
Ok(arc)
|
Ok(arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &str) -> Result<Arc<dyn Tree>> {
|
||||||
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?;
|
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?;
|
||||||
|
|
||||||
Ok(Arc::new(SqliteTable {
|
Ok(Arc::new(SqliteTable {
|
||||||
engine: Arc::clone(self),
|
engine: Arc::clone(self),
|
||||||
name: name.to_owned(),
|
name: name.to_owned(),
|
||||||
watchers: RwLock::new(HashMap::new()),
|
watchers: Watchers::default(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn flush(self: &Arc<Self>) -> Result<()> {
|
fn flush(&self) -> Result<()> {
|
||||||
// we enabled PRAGMA synchronous=normal, so this should not be necessary
|
// we enabled PRAGMA synchronous=normal, so this should not be necessary
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn cleanup(&self) -> Result<()> {
|
||||||
|
self.flush_wal()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct SqliteTable {
|
pub struct SqliteTable {
|
||||||
engine: Arc<Engine>,
|
engine: Arc<Engine>,
|
||||||
name: String,
|
name: String,
|
||||||
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
watchers: Watchers,
|
||||||
}
|
}
|
||||||
|
|
||||||
type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
||||||
|
@ -200,27 +202,7 @@ impl Tree for SqliteTable {
|
||||||
let guard = self.engine.write_lock();
|
let guard = self.engine.write_lock();
|
||||||
self.insert_with_guard(&guard, key, value)?;
|
self.insert_with_guard(&guard, key, value)?;
|
||||||
drop(guard);
|
drop(guard);
|
||||||
|
self.watchers.wake(key);
|
||||||
let watchers = self.watchers.read();
|
|
||||||
let mut triggered = Vec::new();
|
|
||||||
|
|
||||||
for length in 0..=key.len() {
|
|
||||||
if watchers.contains_key(&key[..length]) {
|
|
||||||
triggered.push(&key[..length]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(watchers);
|
|
||||||
|
|
||||||
if !triggered.is_empty() {
|
|
||||||
let mut watchers = self.watchers.write();
|
|
||||||
for prefix in triggered {
|
|
||||||
if let Some(tx) = watchers.remove(prefix) {
|
|
||||||
let _ = tx.0.send(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,19 +347,7 @@ impl Tree for SqliteTable {
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, prefix))]
|
#[tracing::instrument(skip(self, prefix))]
|
||||||
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
let mut rx = match self.watchers.write().entry(prefix.to_vec()) {
|
self.watchers.watch(prefix)
|
||||||
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
|
||||||
hash_map::Entry::Vacant(v) => {
|
|
||||||
let (tx, rx) = tokio::sync::watch::channel(());
|
|
||||||
v.insert((tx, rx.clone()));
|
|
||||||
rx
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Box::pin(async move {
|
|
||||||
// Tx is never destroyed
|
|
||||||
rx.changed().await.unwrap();
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
|
54
src/database/abstraction/watchers.rs
Normal file
54
src/database/abstraction/watchers.rs
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
use std::{
|
||||||
|
collections::{hash_map, HashMap},
|
||||||
|
future::Future,
|
||||||
|
pin::Pin,
|
||||||
|
sync::RwLock,
|
||||||
|
};
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub(super) struct Watchers {
|
||||||
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Watchers {
|
||||||
|
pub(super) fn watch<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: &[u8],
|
||||||
|
) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
||||||
|
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
||||||
|
hash_map::Entry::Vacant(v) => {
|
||||||
|
let (tx, rx) = tokio::sync::watch::channel(());
|
||||||
|
v.insert((tx, rx.clone()));
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
// Tx is never destroyed
|
||||||
|
rx.changed().await.unwrap();
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pub(super) fn wake(&self, key: &[u8]) {
|
||||||
|
let watchers = self.watchers.read().unwrap();
|
||||||
|
let mut triggered = Vec::new();
|
||||||
|
|
||||||
|
for length in 0..=key.len() {
|
||||||
|
if watchers.contains_key(&key[..length]) {
|
||||||
|
triggered.push(&key[..length]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(watchers);
|
||||||
|
|
||||||
|
if !triggered.is_empty() {
|
||||||
|
let mut watchers = self.watchers.write().unwrap();
|
||||||
|
for prefix in triggered {
|
||||||
|
if let Some(tx) = watchers.remove(prefix) {
|
||||||
|
let _ = tx.0.send(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
|
@ -12,8 +12,10 @@ use tracing::warn;
|
||||||
|
|
||||||
pub enum AdminCommand {
|
pub enum AdminCommand {
|
||||||
RegisterAppservice(serde_yaml::Value),
|
RegisterAppservice(serde_yaml::Value),
|
||||||
|
UnregisterAppservice(String),
|
||||||
ListAppservices,
|
ListAppservices,
|
||||||
ListLocalUsers,
|
ListLocalUsers,
|
||||||
|
ShowMemoryUsage,
|
||||||
SendMessage(RoomMessageEventContent),
|
SendMessage(RoomMessageEventContent),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,6 +111,9 @@ impl Admin {
|
||||||
AdminCommand::RegisterAppservice(yaml) => {
|
AdminCommand::RegisterAppservice(yaml) => {
|
||||||
guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error
|
guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error
|
||||||
}
|
}
|
||||||
|
AdminCommand::UnregisterAppservice(service_name) => {
|
||||||
|
guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above
|
||||||
|
}
|
||||||
AdminCommand::ListAppservices => {
|
AdminCommand::ListAppservices => {
|
||||||
if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
|
if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
|
||||||
let count = appservices.len();
|
let count = appservices.len();
|
||||||
|
@ -122,6 +127,13 @@ impl Admin {
|
||||||
send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock);
|
send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
AdminCommand::ShowMemoryUsage => {
|
||||||
|
if let Ok(response) = guard._db.memory_usage() {
|
||||||
|
send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock);
|
||||||
|
} else {
|
||||||
|
send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_owned()), guard, &state_lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
AdminCommand::SendMessage(message) => {
|
AdminCommand::SendMessage(message) => {
|
||||||
send_message(message, guard, &state_lock);
|
send_message(message, guard, &state_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,21 @@ impl Appservice {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Remove an appservice registration
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `service_name` - the name you send to register the service previously
|
||||||
|
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
|
self.id_appserviceregistrations
|
||||||
|
.remove(service_name.as_bytes())?;
|
||||||
|
self.cached_registrations
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.remove(service_name);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||||
self.cached_registrations
|
self.cached_registrations
|
||||||
.read()
|
.read()
|
||||||
|
|
|
@ -4,8 +4,10 @@ use ruma::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
||||||
},
|
},
|
||||||
|
serde::Raw,
|
||||||
RoomId, UserId,
|
RoomId, UserId,
|
||||||
};
|
};
|
||||||
|
use serde_json::json;
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use super::abstraction::Tree;
|
||||||
|
@ -20,7 +22,7 @@ impl KeyBackups {
|
||||||
pub fn create_backup(
|
pub fn create_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
backup_metadata: &BackupAlgorithm,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let version = globals.next_count()?.to_string();
|
let version = globals.next_count()?.to_string();
|
||||||
|
@ -59,7 +61,7 @@ impl KeyBackups {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
backup_metadata: &BackupAlgorithm,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
@ -73,12 +75,8 @@ impl KeyBackups {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.backupid_algorithm.insert(
|
self.backupid_algorithm
|
||||||
&key,
|
.insert(&key, backup_metadata.json().get().as_bytes())?;
|
||||||
serde_json::to_string(backup_metadata)
|
|
||||||
.expect("BackupAlgorithm::to_string always works")
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
||||||
Ok(version.to_owned())
|
Ok(version.to_owned())
|
||||||
|
@ -105,7 +103,10 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_latest_backup(&self, user_id: &UserId) -> Result<Option<(String, BackupAlgorithm)>> {
|
pub fn get_latest_backup(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
let mut last_possible_key = prefix.clone();
|
let mut last_possible_key = prefix.clone();
|
||||||
|
@ -133,7 +134,11 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<BackupAlgorithm>> {
|
pub fn get_backup(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
version: &str,
|
||||||
|
) -> Result<Option<Raw<BackupAlgorithm>>> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -152,7 +157,7 @@ impl KeyBackups {
|
||||||
version: &str,
|
version: &str,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
session_id: &str,
|
session_id: &str,
|
||||||
key_data: &KeyBackupData,
|
key_data: &Raw<KeyBackupData>,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
@ -174,10 +179,8 @@ impl KeyBackups {
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(session_id.as_bytes());
|
key.extend_from_slice(session_id.as_bytes());
|
||||||
|
|
||||||
self.backupkeyid_backup.insert(
|
self.backupkeyid_backup
|
||||||
&key,
|
.insert(&key, key_data.json().get().as_bytes())?;
|
||||||
&serde_json::to_vec(&key_data).expect("KeyBackupData::to_vec always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -209,13 +212,13 @@ impl KeyBackups {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
|
) -> Result<BTreeMap<Box<RoomId>, Raw<RoomKeyBackup>>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
|
let mut rooms = BTreeMap::<Box<RoomId>, Raw<RoomKeyBackup>>::new();
|
||||||
|
|
||||||
for result in self
|
for result in self
|
||||||
.backupkeyid_backup
|
.backupkeyid_backup
|
||||||
|
@ -241,7 +244,7 @@ impl KeyBackups {
|
||||||
Error::bad_database("backupkeyid_backup room_id is invalid room id.")
|
Error::bad_database("backupkeyid_backup room_id is invalid room id.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let key_data = serde_json::from_slice(&value).map_err(|_| {
|
let key_data: serde_json::Value = serde_json::from_slice(&value).map_err(|_| {
|
||||||
Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.")
|
Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -249,13 +252,25 @@ impl KeyBackups {
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
let (room_id, session_id, key_data) = result?;
|
let (room_id, session_id, key_data) = result?;
|
||||||
rooms
|
let room_key_backup = rooms.entry(room_id).or_insert_with(|| {
|
||||||
.entry(room_id)
|
Raw::new(&RoomKeyBackup {
|
||||||
.or_insert_with(|| RoomKeyBackup {
|
|
||||||
sessions: BTreeMap::new(),
|
sessions: BTreeMap::new(),
|
||||||
})
|
})
|
||||||
.sessions
|
.expect("RoomKeyBackup serialization")
|
||||||
.insert(session_id, key_data);
|
});
|
||||||
|
|
||||||
|
let mut object = room_key_backup
|
||||||
|
.deserialize_as::<serde_json::Map<String, serde_json::Value>>()
|
||||||
|
.map_err(|_| Error::bad_database("RoomKeyBackup is not an object"))?;
|
||||||
|
|
||||||
|
let sessions = object.entry("session").or_insert_with(|| json!({}));
|
||||||
|
if let serde_json::Value::Object(unsigned_object) = sessions {
|
||||||
|
unsigned_object.insert(session_id, key_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
*room_key_backup = Raw::from_json(
|
||||||
|
serde_json::value::to_raw_value(&object).expect("Value => RawValue serialization"),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(rooms)
|
Ok(rooms)
|
||||||
|
@ -266,7 +281,7 @@ impl KeyBackups {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<BTreeMap<String, KeyBackupData>> {
|
) -> Result<BTreeMap<String, Raw<KeyBackupData>>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
|
@ -304,7 +319,7 @@ impl KeyBackups {
|
||||||
version: &str,
|
version: &str,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
session_id: &str,
|
session_id: &str,
|
||||||
) -> Result<Option<KeyBackupData>> {
|
) -> Result<Option<Raw<KeyBackupData>>> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
|
|
@ -28,7 +28,7 @@ use ruma::{
|
||||||
push::{Action, Ruleset, Tweak},
|
push::{Action, Ruleset, Tweak},
|
||||||
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
|
serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
|
||||||
state_res::{self, RoomVersion, StateMap},
|
state_res::{self, RoomVersion, StateMap},
|
||||||
uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
|
@ -79,6 +79,8 @@ pub struct Rooms {
|
||||||
pub(super) userroomid_leftstate: Arc<dyn Tree>,
|
pub(super) userroomid_leftstate: Arc<dyn Tree>,
|
||||||
pub(super) roomuserid_leftcount: Arc<dyn Tree>,
|
pub(super) roomuserid_leftcount: Arc<dyn Tree>,
|
||||||
|
|
||||||
|
pub(super) lazyloadedids: Arc<dyn Tree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
|
||||||
|
|
||||||
pub(super) userroomid_notificationcount: Arc<dyn Tree>, // NotifyCount = u64
|
pub(super) userroomid_notificationcount: Arc<dyn Tree>, // NotifyCount = u64
|
||||||
pub(super) userroomid_highlightcount: Arc<dyn Tree>, // HightlightCount = u64
|
pub(super) userroomid_highlightcount: Arc<dyn Tree>, // HightlightCount = u64
|
||||||
|
|
||||||
|
@ -117,6 +119,8 @@ pub struct Rooms {
|
||||||
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
|
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
|
||||||
pub(super) our_real_users_cache: RwLock<HashMap<Box<RoomId>, Arc<HashSet<Box<UserId>>>>>,
|
pub(super) our_real_users_cache: RwLock<HashMap<Box<RoomId>, Arc<HashSet<Box<UserId>>>>>,
|
||||||
pub(super) appservice_in_room_cache: RwLock<HashMap<Box<RoomId>, HashMap<String, bool>>>,
|
pub(super) appservice_in_room_cache: RwLock<HashMap<Box<RoomId>, HashMap<String, bool>>>,
|
||||||
|
pub(super) lazy_load_waiting:
|
||||||
|
Mutex<HashMap<(Box<UserId>, Box<DeviceId>, Box<RoomId>, u64), HashSet<Box<UserId>>>>,
|
||||||
pub(super) stateinfo_cache: Mutex<
|
pub(super) stateinfo_cache: Mutex<
|
||||||
LruCache<
|
LruCache<
|
||||||
u64,
|
u64,
|
||||||
|
@ -1528,6 +1532,19 @@ impl Rooms {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
"unregister_appservice" => {
|
||||||
|
if args.len() == 1 {
|
||||||
|
db.admin.send(AdminCommand::UnregisterAppservice(
|
||||||
|
args[0].to_owned(),
|
||||||
|
));
|
||||||
|
} else {
|
||||||
|
db.admin.send(AdminCommand::SendMessage(
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Missing appservice identifier",
|
||||||
|
),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
"list_appservices" => {
|
"list_appservices" => {
|
||||||
db.admin.send(AdminCommand::ListAppservices);
|
db.admin.send(AdminCommand::ListAppservices);
|
||||||
}
|
}
|
||||||
|
@ -1679,6 +1696,9 @@ impl Rooms {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
"database_memory_usage" => {
|
||||||
|
db.admin.send(AdminCommand::ShowMemoryUsage);
|
||||||
|
}
|
||||||
_ => {
|
_ => {
|
||||||
db.admin.send(AdminCommand::SendMessage(
|
db.admin.send(AdminCommand::SendMessage(
|
||||||
RoomMessageEventContent::text_plain(format!(
|
RoomMessageEventContent::text_plain(format!(
|
||||||
|
@ -2710,7 +2730,7 @@ impl Rooms {
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||||
self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())?
|
self.room_state_get(room_id, &EventType::RoomMember, user_id.as_str())?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::BadState,
|
ErrorKind::BadState,
|
||||||
"Cannot leave a room you are not a member of.",
|
"Cannot leave a room you are not a member of.",
|
||||||
|
@ -3445,8 +3465,7 @@ impl Rooms {
|
||||||
&key[0].to_be_bytes(),
|
&key[0].to_be_bytes(),
|
||||||
&chain
|
&chain
|
||||||
.iter()
|
.iter()
|
||||||
.map(|s| s.to_be_bytes().to_vec())
|
.flat_map(|s| s.to_be_bytes().to_vec())
|
||||||
.flatten()
|
|
||||||
.collect::<Vec<u8>>(),
|
.collect::<Vec<u8>>(),
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -3456,4 +3475,94 @@ impl Rooms {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn lazy_load_was_sent_before(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
ll_user: &UserId,
|
||||||
|
) -> Result<bool> {
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(device_id.as_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(ll_user.as_bytes());
|
||||||
|
Ok(self.lazyloadedids.get(&key)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn lazy_load_mark_sent(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
lazy_load: HashSet<Box<UserId>>,
|
||||||
|
count: u64,
|
||||||
|
) {
|
||||||
|
self.lazy_load_waiting.lock().unwrap().insert(
|
||||||
|
(
|
||||||
|
user_id.to_owned(),
|
||||||
|
device_id.to_owned(),
|
||||||
|
room_id.to_owned(),
|
||||||
|
count,
|
||||||
|
),
|
||||||
|
lazy_load,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn lazy_load_confirm_delivery(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Result<()> {
|
||||||
|
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
||||||
|
user_id.to_owned(),
|
||||||
|
device_id.to_owned(),
|
||||||
|
room_id.to_owned(),
|
||||||
|
since,
|
||||||
|
)) {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(room_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for ll_id in user_ids {
|
||||||
|
let mut key = prefix.clone();
|
||||||
|
key.extend_from_slice(ll_id.as_bytes());
|
||||||
|
self.lazyloadedids.insert(&key, &[])?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn lazy_load_reset(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(room_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.lazyloadedids.scan_prefix(prefix) {
|
||||||
|
self.lazyloadedids.remove(&key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -524,7 +524,7 @@ impl Sending {
|
||||||
.unwrap(), // TODO: handle error
|
.unwrap(), // TODO: handle error
|
||||||
appservice::event::push_events::v1::Request {
|
appservice::event::push_events::v1::Request {
|
||||||
events: &pdu_jsons,
|
events: &pdu_jsons,
|
||||||
txn_id: &base64::encode_config(
|
txn_id: (&*base64::encode_config(
|
||||||
Self::calculate_hash(
|
Self::calculate_hash(
|
||||||
&events
|
&events
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -534,7 +534,8 @@ impl Sending {
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
),
|
),
|
||||||
base64::URL_SAFE_NO_PAD,
|
base64::URL_SAFE_NO_PAD,
|
||||||
),
|
))
|
||||||
|
.into(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
@ -682,7 +683,7 @@ impl Sending {
|
||||||
pdus: &pdu_jsons,
|
pdus: &pdu_jsons,
|
||||||
edus: &edu_jsons,
|
edus: &edu_jsons,
|
||||||
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
|
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
|
||||||
transaction_id: &base64::encode_config(
|
transaction_id: (&*base64::encode_config(
|
||||||
Self::calculate_hash(
|
Self::calculate_hash(
|
||||||
&events
|
&events
|
||||||
.iter()
|
.iter()
|
||||||
|
@ -692,7 +693,8 @@ impl Sending {
|
||||||
.collect::<Vec<_>>(),
|
.collect::<Vec<_>>(),
|
||||||
),
|
),
|
||||||
base64::URL_SAFE_NO_PAD,
|
base64::URL_SAFE_NO_PAD,
|
||||||
),
|
))
|
||||||
|
.into(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use ruma::{DeviceId, UserId};
|
use ruma::{identifiers::TransactionId, DeviceId, UserId};
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use super::abstraction::Tree;
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ impl TransactionIds {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: Option<&DeviceId>,
|
device_id: Option<&DeviceId>,
|
||||||
txn_id: &str,
|
txn_id: &TransactionId,
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
@ -32,7 +32,7 @@ impl TransactionIds {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: Option<&DeviceId>,
|
device_id: Option<&DeviceId>,
|
||||||
txn_id: &str,
|
txn_id: &TransactionId,
|
||||||
) -> Result<Option<Vec<u8>>> {
|
) -> Result<Option<Vec<u8>>> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
|
||||||
use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result};
|
use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -18,7 +20,8 @@ use super::abstraction::Tree;
|
||||||
|
|
||||||
pub struct Uiaa {
|
pub struct Uiaa {
|
||||||
pub(super) userdevicesessionid_uiaainfo: Arc<dyn Tree>, // User-interactive authentication
|
pub(super) userdevicesessionid_uiaainfo: Arc<dyn Tree>, // User-interactive authentication
|
||||||
pub(super) userdevicesessionid_uiaarequest: Arc<dyn Tree>, // UiaaRequest = canonical json value
|
pub(super) userdevicesessionid_uiaarequest:
|
||||||
|
RwLock<BTreeMap<(Box<UserId>, Box<DeviceId>, String), CanonicalJsonValue>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Uiaa {
|
impl Uiaa {
|
||||||
|
@ -147,16 +150,13 @@ impl Uiaa {
|
||||||
session: &str,
|
session: &str,
|
||||||
request: &CanonicalJsonValue,
|
request: &CanonicalJsonValue,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut userdevicesessionid = user_id.as_bytes().to_vec();
|
self.userdevicesessionid_uiaarequest
|
||||||
userdevicesessionid.push(0xff);
|
.write()
|
||||||
userdevicesessionid.extend_from_slice(device_id.as_bytes());
|
.unwrap()
|
||||||
userdevicesessionid.push(0xff);
|
.insert(
|
||||||
userdevicesessionid.extend_from_slice(session.as_bytes());
|
(user_id.to_owned(), device_id.to_owned(), session.to_owned()),
|
||||||
|
request.to_owned(),
|
||||||
self.userdevicesessionid_uiaarequest.insert(
|
);
|
||||||
&userdevicesessionid,
|
|
||||||
&serde_json::to_vec(request).expect("json value to vec always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -166,23 +166,12 @@ impl Uiaa {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
session: &str,
|
session: &str,
|
||||||
) -> Result<Option<CanonicalJsonValue>> {
|
) -> Option<CanonicalJsonValue> {
|
||||||
let mut userdevicesessionid = user_id.as_bytes().to_vec();
|
|
||||||
userdevicesessionid.push(0xff);
|
|
||||||
userdevicesessionid.extend_from_slice(device_id.as_bytes());
|
|
||||||
userdevicesessionid.push(0xff);
|
|
||||||
userdevicesessionid.extend_from_slice(session.as_bytes());
|
|
||||||
|
|
||||||
self.userdevicesessionid_uiaarequest
|
self.userdevicesessionid_uiaarequest
|
||||||
.get(&userdevicesessionid)?
|
.read()
|
||||||
.map(|bytes| {
|
.unwrap()
|
||||||
serde_json::from_str::<CanonicalJsonValue>(
|
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
|
||||||
&utils::string_from_bytes(&bytes)
|
.map(|j| j.to_owned())
|
||||||
.map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?,
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid uiaa request in db."))
|
|
||||||
})
|
|
||||||
.transpose()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_uiaa_session(
|
fn update_uiaa_session(
|
||||||
|
|
|
@ -1,6 +1,9 @@
|
||||||
use crate::{utils, Error, Result};
|
use crate::{utils, Error, Result};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, r0::device::Device},
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
r0::{device::Device, filter::IncomingFilterDefinition},
|
||||||
|
},
|
||||||
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
|
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
|
||||||
events::{AnyToDeviceEvent, EventType},
|
events::{AnyToDeviceEvent, EventType},
|
||||||
identifiers::MxcUri,
|
identifiers::MxcUri,
|
||||||
|
@ -8,7 +11,12 @@ use ruma::{
|
||||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
|
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc};
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
convert::{TryFrom, TryInto},
|
||||||
|
mem,
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use super::abstraction::Tree;
|
||||||
|
@ -31,6 +39,8 @@ pub struct Users {
|
||||||
pub(super) userid_selfsigningkeyid: Arc<dyn Tree>,
|
pub(super) userid_selfsigningkeyid: Arc<dyn Tree>,
|
||||||
pub(super) userid_usersigningkeyid: Arc<dyn Tree>,
|
pub(super) userid_usersigningkeyid: Arc<dyn Tree>,
|
||||||
|
|
||||||
|
pub(super) userfilterid_filter: Arc<dyn Tree>, // UserFilterId = UserId + FilterId
|
||||||
|
|
||||||
pub(super) todeviceid_events: Arc<dyn Tree>, // ToDeviceId = UserId + DeviceId + Count
|
pub(super) todeviceid_events: Arc<dyn Tree>, // ToDeviceId = UserId + DeviceId + Count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -395,7 +405,7 @@ impl Users {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
one_time_key_key: &DeviceKeyId,
|
one_time_key_key: &DeviceKeyId,
|
||||||
one_time_key_value: &OneTimeKey,
|
one_time_key_value: &Raw<OneTimeKey>,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
@ -445,7 +455,7 @@ impl Users {
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
key_algorithm: &DeviceKeyAlgorithm,
|
key_algorithm: &DeviceKeyAlgorithm,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<Option<(Box<DeviceKeyId>, OneTimeKey)>> {
|
) -> Result<Option<(Box<DeviceKeyId>, Raw<OneTimeKey>)>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(device_id.as_bytes());
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -516,7 +526,7 @@ impl Users {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
device_keys: &DeviceKeys,
|
device_keys: &Raw<DeviceKeys>,
|
||||||
rooms: &super::rooms::Rooms,
|
rooms: &super::rooms::Rooms,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
@ -545,9 +555,9 @@ impl Users {
|
||||||
pub fn add_cross_signing_keys(
|
pub fn add_cross_signing_keys(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
master_key: &CrossSigningKey,
|
master_key: &Raw<CrossSigningKey>,
|
||||||
self_signing_key: &Option<CrossSigningKey>,
|
self_signing_key: &Option<Raw<CrossSigningKey>>,
|
||||||
user_signing_key: &Option<CrossSigningKey>,
|
user_signing_key: &Option<Raw<CrossSigningKey>>,
|
||||||
rooms: &super::rooms::Rooms,
|
rooms: &super::rooms::Rooms,
|
||||||
globals: &super::globals::Globals,
|
globals: &super::globals::Globals,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
@ -557,7 +567,12 @@ impl Users {
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
// Master key
|
// Master key
|
||||||
let mut master_key_ids = master_key.keys.values();
|
let mut master_key_ids = master_key
|
||||||
|
.deserialize()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))?
|
||||||
|
.keys
|
||||||
|
.into_values();
|
||||||
|
|
||||||
let master_key_id = master_key_ids.next().ok_or(Error::BadRequest(
|
let master_key_id = master_key_ids.next().ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Master key contained no key.",
|
"Master key contained no key.",
|
||||||
|
@ -573,17 +588,22 @@ impl Users {
|
||||||
let mut master_key_key = prefix.clone();
|
let mut master_key_key = prefix.clone();
|
||||||
master_key_key.extend_from_slice(master_key_id.as_bytes());
|
master_key_key.extend_from_slice(master_key_id.as_bytes());
|
||||||
|
|
||||||
self.keyid_key.insert(
|
self.keyid_key
|
||||||
&master_key_key,
|
.insert(&master_key_key, master_key.json().get().as_bytes())?;
|
||||||
&serde_json::to_vec(&master_key).expect("CrossSigningKey::to_vec always works"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
self.userid_masterkeyid
|
self.userid_masterkeyid
|
||||||
.insert(user_id.as_bytes(), &master_key_key)?;
|
.insert(user_id.as_bytes(), &master_key_key)?;
|
||||||
|
|
||||||
// Self-signing key
|
// Self-signing key
|
||||||
if let Some(self_signing_key) = self_signing_key {
|
if let Some(self_signing_key) = self_signing_key {
|
||||||
let mut self_signing_key_ids = self_signing_key.keys.values();
|
let mut self_signing_key_ids = self_signing_key
|
||||||
|
.deserialize()
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key")
|
||||||
|
})?
|
||||||
|
.keys
|
||||||
|
.into_values();
|
||||||
|
|
||||||
let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest(
|
let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Self signing key contained no key.",
|
"Self signing key contained no key.",
|
||||||
|
@ -601,8 +621,7 @@ impl Users {
|
||||||
|
|
||||||
self.keyid_key.insert(
|
self.keyid_key.insert(
|
||||||
&self_signing_key_key,
|
&self_signing_key_key,
|
||||||
&serde_json::to_vec(&self_signing_key)
|
self_signing_key.json().get().as_bytes(),
|
||||||
.expect("CrossSigningKey::to_vec always works"),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.userid_selfsigningkeyid
|
self.userid_selfsigningkeyid
|
||||||
|
@ -611,7 +630,14 @@ impl Users {
|
||||||
|
|
||||||
// User-signing key
|
// User-signing key
|
||||||
if let Some(user_signing_key) = user_signing_key {
|
if let Some(user_signing_key) = user_signing_key {
|
||||||
let mut user_signing_key_ids = user_signing_key.keys.values();
|
let mut user_signing_key_ids = user_signing_key
|
||||||
|
.deserialize()
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key")
|
||||||
|
})?
|
||||||
|
.keys
|
||||||
|
.into_values();
|
||||||
|
|
||||||
let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest(
|
let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"User signing key contained no key.",
|
"User signing key contained no key.",
|
||||||
|
@ -629,8 +655,7 @@ impl Users {
|
||||||
|
|
||||||
self.keyid_key.insert(
|
self.keyid_key.insert(
|
||||||
&user_signing_key_key,
|
&user_signing_key_key,
|
||||||
&serde_json::to_vec(&user_signing_key)
|
user_signing_key.json().get().as_bytes(),
|
||||||
.expect("CrossSigningKey::to_vec always works"),
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.userid_usersigningkeyid
|
self.userid_usersigningkeyid
|
||||||
|
@ -763,7 +788,7 @@ impl Users {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
) -> Result<Option<DeviceKeys>> {
|
) -> Result<Option<Raw<DeviceKeys>>> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(device_id.as_bytes());
|
key.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -780,25 +805,19 @@ impl Users {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: F,
|
||||||
) -> Result<Option<CrossSigningKey>> {
|
) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
self.userid_masterkeyid
|
self.userid_masterkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |key| {
|
.map_or(Ok(None), |key| {
|
||||||
self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| {
|
self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| {
|
||||||
let mut cross_signing_key = serde_json::from_slice::<CrossSigningKey>(&bytes)
|
let mut cross_signing_key = serde_json::from_slice::<serde_json::Value>(&bytes)
|
||||||
.map_err(|_| {
|
.map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?;
|
||||||
Error::bad_database("CrossSigningKey in db is invalid.")
|
clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?;
|
||||||
})?;
|
|
||||||
|
|
||||||
// A user is not allowed to see signatures from users other than himself and
|
Ok(Some(Raw::from_json(
|
||||||
// the target user
|
serde_json::value::to_raw_value(&cross_signing_key)
|
||||||
cross_signing_key.signatures = cross_signing_key
|
.expect("Value to RawValue serialization"),
|
||||||
.signatures
|
)))
|
||||||
.into_iter()
|
|
||||||
.filter(|(user, _)| allowed_signatures(user))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(Some(cross_signing_key))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -808,31 +827,25 @@ impl Users {
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: F,
|
||||||
) -> Result<Option<CrossSigningKey>> {
|
) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
self.userid_selfsigningkeyid
|
self.userid_selfsigningkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |key| {
|
.map_or(Ok(None), |key| {
|
||||||
self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| {
|
self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| {
|
||||||
let mut cross_signing_key = serde_json::from_slice::<CrossSigningKey>(&bytes)
|
let mut cross_signing_key = serde_json::from_slice::<serde_json::Value>(&bytes)
|
||||||
.map_err(|_| {
|
.map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?;
|
||||||
Error::bad_database("CrossSigningKey in db is invalid.")
|
clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?;
|
||||||
})?;
|
|
||||||
|
|
||||||
// A user is not allowed to see signatures from users other than himself and
|
Ok(Some(Raw::from_json(
|
||||||
// the target user
|
serde_json::value::to_raw_value(&cross_signing_key)
|
||||||
cross_signing_key.signatures = cross_signing_key
|
.expect("Value to RawValue serialization"),
|
||||||
.signatures
|
)))
|
||||||
.into_iter()
|
|
||||||
.filter(|(user, _)| user == user_id || allowed_signatures(user))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(Some(cross_signing_key))
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
#[tracing::instrument(skip(self, user_id))]
|
||||||
pub fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<CrossSigningKey>> {
|
pub fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
self.userid_usersigningkeyid
|
self.userid_usersigningkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |key| {
|
.map_or(Ok(None), |key| {
|
||||||
|
@ -1026,4 +1039,72 @@ impl Users {
|
||||||
// TODO: Unhook 3PID
|
// TODO: Unhook 3PID
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a new sync filter. Returns the filter id.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn create_filter(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
filter: &IncomingFilterDefinition,
|
||||||
|
) -> Result<String> {
|
||||||
|
let filter_id = utils::random_string(4);
|
||||||
|
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(filter_id.as_bytes());
|
||||||
|
|
||||||
|
self.userfilterid_filter.insert(
|
||||||
|
&key,
|
||||||
|
&serde_json::to_vec(&filter).expect("filter is valid json"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(filter_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn get_filter(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
filter_id: &str,
|
||||||
|
) -> Result<Option<IncomingFilterDefinition>> {
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(filter_id.as_bytes());
|
||||||
|
|
||||||
|
let raw = self.userfilterid_filter.get(&key)?;
|
||||||
|
|
||||||
|
if let Some(raw) = raw {
|
||||||
|
serde_json::from_slice(&raw)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid filter event in db."))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure that a user only sees signatures from themselves and the target user
|
||||||
|
fn clean_signatures<F: Fn(&UserId) -> bool>(
|
||||||
|
cross_signing_key: &mut serde_json::Value,
|
||||||
|
user_id: &UserId,
|
||||||
|
allowed_signatures: F,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if let Some(signatures) = cross_signing_key
|
||||||
|
.get_mut("signatures")
|
||||||
|
.and_then(|v| v.as_object_mut())
|
||||||
|
{
|
||||||
|
// Don't allocate for the full size of the current signatures, but require
|
||||||
|
// at most one resize if nothing is dropped
|
||||||
|
let new_capacity = signatures.len() / 2;
|
||||||
|
for (user, signature) in
|
||||||
|
mem::replace(signatures, serde_json::Map::with_capacity(new_capacity))
|
||||||
|
{
|
||||||
|
let id = <&UserId>::try_from(user.as_str())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid user ID in database."))?;
|
||||||
|
if id == user_id || allowed_signatures(id) {
|
||||||
|
signatures.insert(user, signature);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,12 @@ pub enum Error {
|
||||||
#[cfg(feature = "heed")]
|
#[cfg(feature = "heed")]
|
||||||
#[error("There was a problem with the connection to the heed database: {error}")]
|
#[error("There was a problem with the connection to the heed database: {error}")]
|
||||||
HeedError { error: String },
|
HeedError { error: String },
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
#[error("There was a problem with the connection to the rocksdb database: {source}")]
|
||||||
|
RocksDbError {
|
||||||
|
#[from]
|
||||||
|
source: rocksdb::Error,
|
||||||
|
},
|
||||||
#[error("Could not generate an image.")]
|
#[error("Could not generate an image.")]
|
||||||
ImageError {
|
ImageError {
|
||||||
#[from]
|
#[from]
|
||||||
|
|
10
src/lib.rs
10
src/lib.rs
|
@ -7,21 +7,23 @@
|
||||||
#![allow(clippy::suspicious_else_formatting)]
|
#![allow(clippy::suspicious_else_formatting)]
|
||||||
#![deny(clippy::dbg_macro)]
|
#![deny(clippy::dbg_macro)]
|
||||||
|
|
||||||
pub mod appservice_server;
|
use std::ops::Deref;
|
||||||
pub mod client_server;
|
|
||||||
mod database;
|
mod database;
|
||||||
mod error;
|
mod error;
|
||||||
mod pdu;
|
mod pdu;
|
||||||
mod ruma_wrapper;
|
mod ruma_wrapper;
|
||||||
pub mod server_server;
|
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
|
pub mod appservice_server;
|
||||||
|
pub mod client_server;
|
||||||
|
pub mod server_server;
|
||||||
|
|
||||||
pub use database::{Config, Database};
|
pub use database::{Config, Database};
|
||||||
pub use error::{Error, Result};
|
pub use error::{Error, Result};
|
||||||
pub use pdu::PduEvent;
|
pub use pdu::PduEvent;
|
||||||
pub use rocket::Config as RocketConfig;
|
pub use rocket::Config as RocketConfig;
|
||||||
pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse};
|
pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse};
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
pub struct State<'r, T: Send + Sync + 'static>(pub &'r T);
|
pub struct State<'r, T: Send + Sync + 'static>(pub &'r T);
|
||||||
|
|
||||||
|
|
22
src/main.rs
22
src/main.rs
|
@ -7,27 +7,9 @@
|
||||||
#![allow(clippy::suspicious_else_formatting)]
|
#![allow(clippy::suspicious_else_formatting)]
|
||||||
#![deny(clippy::dbg_macro)]
|
#![deny(clippy::dbg_macro)]
|
||||||
|
|
||||||
pub mod appservice_server;
|
|
||||||
pub mod client_server;
|
|
||||||
pub mod server_server;
|
|
||||||
|
|
||||||
mod database;
|
|
||||||
mod error;
|
|
||||||
mod pdu;
|
|
||||||
mod ruma_wrapper;
|
|
||||||
mod utils;
|
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use database::Config;
|
|
||||||
pub use database::Database;
|
|
||||||
pub use error::{Error, Result};
|
|
||||||
use opentelemetry::trace::{FutureExt, Tracer};
|
use opentelemetry::trace::{FutureExt, Tracer};
|
||||||
pub use pdu::PduEvent;
|
|
||||||
pub use rocket::State;
|
|
||||||
use ruma::api::client::error::ErrorKind;
|
|
||||||
pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse};
|
|
||||||
|
|
||||||
use rocket::{
|
use rocket::{
|
||||||
catch, catchers,
|
catch, catchers,
|
||||||
figment::{
|
figment::{
|
||||||
|
@ -36,9 +18,13 @@ use rocket::{
|
||||||
},
|
},
|
||||||
routes, Request,
|
routes, Request,
|
||||||
};
|
};
|
||||||
|
use ruma::api::client::error::ErrorKind;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tracing_subscriber::{prelude::*, EnvFilter};
|
use tracing_subscriber::{prelude::*, EnvFilter};
|
||||||
|
|
||||||
|
pub use conduit::*; // Re-export everything from the library crate
|
||||||
|
pub use rocket::State;
|
||||||
|
|
||||||
fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<rocket::Build> {
|
fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<rocket::Build> {
|
||||||
rocket::custom(config)
|
rocket::custom(config)
|
||||||
.manage(data)
|
.manage(data)
|
||||||
|
|
|
@ -296,14 +296,11 @@ where
|
||||||
.and_then(|auth| auth.get("session"))
|
.and_then(|auth| auth.get("session"))
|
||||||
.and_then(|session| session.as_str())
|
.and_then(|session| session.as_str())
|
||||||
.and_then(|session| {
|
.and_then(|session| {
|
||||||
db.uiaa
|
db.uiaa.get_uiaa_request(
|
||||||
.get_uiaa_request(
|
&user_id,
|
||||||
&user_id,
|
&sender_device.clone().unwrap_or_else(|| "".into()),
|
||||||
&sender_device.clone().unwrap_or_else(|| "".into()),
|
session,
|
||||||
session,
|
)
|
||||||
)
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
for (key, value) in initial_request {
|
for (key, value) in initial_request {
|
||||||
|
|
|
@ -44,12 +44,13 @@ use ruma::{
|
||||||
room::{
|
room::{
|
||||||
create::RoomCreateEventContent,
|
create::RoomCreateEventContent,
|
||||||
member::{MembershipState, RoomMemberEventContent},
|
member::{MembershipState, RoomMemberEventContent},
|
||||||
|
server_acl::RoomServerAclEventContent,
|
||||||
},
|
},
|
||||||
AnyEphemeralRoomEvent, EventType,
|
AnyEphemeralRoomEvent, EventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
receipt::ReceiptType,
|
receipt::ReceiptType,
|
||||||
serde::JsonObject,
|
serde::{Base64, JsonObject},
|
||||||
signatures::{CanonicalJsonObject, CanonicalJsonValue},
|
signatures::{CanonicalJsonObject, CanonicalJsonValue},
|
||||||
state_res::{self, RoomVersion, StateMap},
|
state_res::{self, RoomVersion, StateMap},
|
||||||
to_device::DeviceIdOrAllDevices,
|
to_device::DeviceIdOrAllDevices,
|
||||||
|
@ -551,7 +552,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json<String> {
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("found invalid server signing keys in DB"),
|
.expect("found invalid server signing keys in DB"),
|
||||||
VerifyKey {
|
VerifyKey {
|
||||||
key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD),
|
key: Base64::new(db.globals.keypair().public_key().to_vec()),
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
let mut response = serde_json::from_slice(
|
let mut response = serde_json::from_slice(
|
||||||
|
@ -740,6 +741,8 @@ pub async fn send_transaction_message_route(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
acl_check(&body.origin, &room_id, &db)?;
|
||||||
|
|
||||||
let mutex = Arc::clone(
|
let mutex = Arc::clone(
|
||||||
db.globals
|
db.globals
|
||||||
.roomid_mutex_federation
|
.roomid_mutex_federation
|
||||||
|
@ -854,7 +857,7 @@ pub async fn send_transaction_message_route(
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if db
|
if db
|
||||||
.transaction_ids
|
.transaction_ids
|
||||||
.existing_txnid(&sender, None, &message_id)?
|
.existing_txnid(&sender, None, (&*message_id).into())?
|
||||||
.is_some()
|
.is_some()
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
|
@ -902,7 +905,7 @@ pub async fn send_transaction_message_route(
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
db.transaction_ids
|
db.transaction_ids
|
||||||
.add_txnid(&sender, None, &message_id, &[])?;
|
.add_txnid(&sender, None, (&*message_id).into(), &[])?;
|
||||||
}
|
}
|
||||||
Edu::_Custom(_) => {}
|
Edu::_Custom(_) => {}
|
||||||
}
|
}
|
||||||
|
@ -948,7 +951,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
|
||||||
value: BTreeMap<String, CanonicalJsonValue>,
|
value: BTreeMap<String, CanonicalJsonValue>,
|
||||||
is_timeline_event: bool,
|
is_timeline_event: bool,
|
||||||
db: &'a Database,
|
db: &'a Database,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
) -> Result<Option<Vec<u8>>, String> {
|
) -> Result<Option<Vec<u8>>, String> {
|
||||||
match db.rooms.exists(room_id) {
|
match db.rooms.exists(room_id) {
|
||||||
Ok(true) => {}
|
Ok(true) => {}
|
||||||
|
@ -1123,7 +1126,7 @@ fn handle_outlier_pdu<'a>(
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
value: BTreeMap<String, CanonicalJsonValue>,
|
value: BTreeMap<String, CanonicalJsonValue>,
|
||||||
db: &'a Database,
|
db: &'a Database,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>), String>> {
|
) -> AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>), String>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
|
||||||
|
@ -1285,7 +1288,7 @@ async fn upgrade_outlier_to_timeline_pdu(
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
) -> Result<Option<Vec<u8>>, String> {
|
) -> Result<Option<Vec<u8>>, String> {
|
||||||
if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) {
|
if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) {
|
||||||
return Ok(Some(pduid));
|
return Ok(Some(pduid));
|
||||||
|
@ -1392,12 +1395,11 @@ async fn upgrade_outlier_to_timeline_pdu(
|
||||||
let mut starting_events = Vec::with_capacity(leaf_state.len());
|
let mut starting_events = Vec::with_capacity(leaf_state.len());
|
||||||
|
|
||||||
for (k, id) in leaf_state {
|
for (k, id) in leaf_state {
|
||||||
let k = db
|
if let Ok(k) = db.rooms.get_statekey_from_short(k) {
|
||||||
.rooms
|
state.insert(k, id.clone());
|
||||||
.get_statekey_from_short(k)
|
} else {
|
||||||
.map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
|
warn!("Failed to get_statekey_from_short.");
|
||||||
|
}
|
||||||
state.insert(k, id.clone());
|
|
||||||
starting_events.push(id);
|
starting_events.push(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1687,25 +1689,6 @@ async fn upgrade_outlier_to_timeline_pdu(
|
||||||
// We do this by adding the current state to the list of fork states
|
// We do this by adding the current state to the list of fork states
|
||||||
extremity_sstatehashes.remove(¤t_sstatehash);
|
extremity_sstatehashes.remove(¤t_sstatehash);
|
||||||
fork_states.push(current_state_ids);
|
fork_states.push(current_state_ids);
|
||||||
dbg!(&extremity_sstatehashes);
|
|
||||||
|
|
||||||
for (sstatehash, leaf_pdu) in extremity_sstatehashes {
|
|
||||||
let mut leaf_state = db
|
|
||||||
.rooms
|
|
||||||
.state_full_ids(sstatehash)
|
|
||||||
.map_err(|_| "Failed to ask db for room state.".to_owned())?;
|
|
||||||
|
|
||||||
if let Some(state_key) = &leaf_pdu.state_key {
|
|
||||||
let shortstatekey = db
|
|
||||||
.rooms
|
|
||||||
.get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
|
|
||||||
.map_err(|_| "Failed to create shortstatekey.".to_owned())?;
|
|
||||||
leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id));
|
|
||||||
// Now it's the state after the pdu
|
|
||||||
}
|
|
||||||
|
|
||||||
fork_states.push(leaf_state);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We also add state after incoming event to the fork states
|
// We also add state after incoming event to the fork states
|
||||||
let mut state_after = state_at_incoming_event.clone();
|
let mut state_after = state_at_incoming_event.clone();
|
||||||
|
@ -1755,11 +1738,16 @@ async fn upgrade_outlier_to_timeline_pdu(
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|map| {
|
.map(|map| {
|
||||||
map.into_iter()
|
map.into_iter()
|
||||||
.map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id)))
|
.filter_map(|(k, id)| {
|
||||||
.collect::<Result<StateMap<_>>>()
|
db.rooms
|
||||||
|
.get_statekey_from_short(k)
|
||||||
|
.map(|k| (k, id))
|
||||||
|
.map_err(|e| warn!("Failed to get_statekey_from_short: {}", e))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect::<StateMap<_>>()
|
||||||
})
|
})
|
||||||
.collect::<Result<_>>()
|
.collect();
|
||||||
.map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
|
|
||||||
|
|
||||||
let state = match state_res::resolve(
|
let state = match state_res::resolve(
|
||||||
room_version_id,
|
room_version_id,
|
||||||
|
@ -1842,7 +1830,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||||
events: &'a [Arc<EventId>],
|
events: &'a [Arc<EventId>],
|
||||||
create_event: &'a PduEvent,
|
create_event: &'a PduEvent,
|
||||||
room_id: &'a RoomId,
|
room_id: &'a RoomId,
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> {
|
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
|
let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
|
||||||
|
@ -1871,73 +1859,104 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||||
// a. Look in the main timeline (pduid_pdu tree)
|
// a. Look in the main timeline (pduid_pdu tree)
|
||||||
// b. Look at outlier pdu tree
|
// b. Look at outlier pdu tree
|
||||||
// (get_pdu_json checks both)
|
// (get_pdu_json checks both)
|
||||||
let local_pdu = db.rooms.get_pdu(id);
|
if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) {
|
||||||
let pdu = match local_pdu {
|
trace!("Found {} in db", id);
|
||||||
Ok(Some(pdu)) => {
|
pdus.push((local_pdu, None));
|
||||||
trace!("Found {} in db", id);
|
continue;
|
||||||
(pdu, None)
|
}
|
||||||
}
|
|
||||||
Ok(None) => {
|
|
||||||
// c. Ask origin server over federation
|
|
||||||
warn!("Fetching {} over federation.", id);
|
|
||||||
match db
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
&db.globals,
|
|
||||||
origin,
|
|
||||||
get_event::v1::Request { event_id: id },
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(res) => {
|
|
||||||
warn!("Got {} over federation", id);
|
|
||||||
let (calculated_event_id, value) =
|
|
||||||
match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
|
|
||||||
Ok(t) => t,
|
|
||||||
Err(_) => {
|
|
||||||
back_off((**id).to_owned());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if calculated_event_id != **id {
|
// c. Ask origin server over federation
|
||||||
warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
|
// We also handle its auth chain here so we don't get a stack overflow in
|
||||||
id, calculated_event_id, &res.pdu);
|
// handle_outlier_pdu.
|
||||||
}
|
let mut todo_auth_events = vec![Arc::clone(id)];
|
||||||
|
let mut events_in_reverse_order = Vec::new();
|
||||||
// This will also fetch the auth chain
|
let mut events_all = HashSet::new();
|
||||||
match handle_outlier_pdu(
|
while let Some(next_id) = todo_auth_events.pop() {
|
||||||
origin,
|
if events_all.contains(&next_id) {
|
||||||
create_event,
|
|
||||||
id,
|
|
||||||
room_id,
|
|
||||||
value.clone(),
|
|
||||||
db,
|
|
||||||
pub_key_map,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok((pdu, json)) => (pdu, Some(json)),
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Authentication of event {} failed: {:?}", id, e);
|
|
||||||
back_off((**id).to_owned());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
warn!("Failed to fetch event: {}", id);
|
|
||||||
back_off((**id).to_owned());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Error loading {}: {}", id, e);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
|
||||||
pdus.push(pdu);
|
if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) {
|
||||||
|
trace!("Found {} in db", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
warn!("Fetching {} over federation.", next_id);
|
||||||
|
match db
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
&db.globals,
|
||||||
|
origin,
|
||||||
|
get_event::v1::Request { event_id: &next_id },
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => {
|
||||||
|
warn!("Got {} over federation", next_id);
|
||||||
|
let (calculated_event_id, value) =
|
||||||
|
match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
|
||||||
|
Ok(t) => t,
|
||||||
|
Err(_) => {
|
||||||
|
back_off((*next_id).to_owned());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if calculated_event_id != *next_id {
|
||||||
|
warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
|
||||||
|
next_id, calculated_event_id, &res.pdu);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(auth_events) =
|
||||||
|
value.get("auth_events").and_then(|c| c.as_array())
|
||||||
|
{
|
||||||
|
for auth_event in auth_events {
|
||||||
|
if let Ok(auth_event) =
|
||||||
|
serde_json::from_value(auth_event.clone().into())
|
||||||
|
{
|
||||||
|
let a: Arc<EventId> = auth_event;
|
||||||
|
todo_auth_events.push(a);
|
||||||
|
} else {
|
||||||
|
warn!("Auth event id is not valid");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Auth event list invalid");
|
||||||
|
}
|
||||||
|
|
||||||
|
events_in_reverse_order.push((next_id.clone(), value));
|
||||||
|
events_all.insert(next_id);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
warn!("Failed to fetch event: {}", next_id);
|
||||||
|
back_off((*next_id).to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (next_id, value) in events_in_reverse_order.iter().rev() {
|
||||||
|
match handle_outlier_pdu(
|
||||||
|
origin,
|
||||||
|
create_event,
|
||||||
|
next_id,
|
||||||
|
room_id,
|
||||||
|
value.clone(),
|
||||||
|
db,
|
||||||
|
pub_key_map,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok((pdu, json)) => {
|
||||||
|
if next_id == id {
|
||||||
|
pdus.push((pdu, Some(json)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Authentication of event {} failed: {:?}", next_id, e);
|
||||||
|
back_off((**next_id).to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
pdus
|
pdus
|
||||||
})
|
})
|
||||||
|
@ -1950,9 +1969,9 @@ pub(crate) async fn fetch_signing_keys(
|
||||||
db: &Database,
|
db: &Database,
|
||||||
origin: &ServerName,
|
origin: &ServerName,
|
||||||
signature_ids: Vec<String>,
|
signature_ids: Vec<String>,
|
||||||
) -> Result<BTreeMap<String, String>> {
|
) -> Result<BTreeMap<String, Base64>> {
|
||||||
let contains_all_ids =
|
let contains_all_ids =
|
||||||
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id));
|
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
||||||
|
|
||||||
let permit = db
|
let permit = db
|
||||||
.globals
|
.globals
|
||||||
|
@ -2340,7 +2359,10 @@ pub fn get_event_route(
|
||||||
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
||||||
|
|
||||||
if !db.rooms.server_in_room(sender_servername, room_id)? {
|
if !db.rooms.server_in_room(sender_servername, room_id)? {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Server is not in room",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_event::v1::Response {
|
Ok(get_event::v1::Response {
|
||||||
|
@ -2379,6 +2401,8 @@ pub fn get_missing_events_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
let mut queued_events = body.latest_events.clone();
|
let mut queued_events = body.latest_events.clone();
|
||||||
let mut events = Vec::new();
|
let mut events = Vec::new();
|
||||||
|
|
||||||
|
@ -2448,6 +2472,15 @@ pub fn get_event_authorization_route(
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.expect("server is authenticated");
|
.expect("server is authenticated");
|
||||||
|
|
||||||
|
if !db.rooms.server_in_room(sender_servername, &body.room_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Server is not in room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
let event = db
|
let event = db
|
||||||
.rooms
|
.rooms
|
||||||
.get_pdu_json(&body.event_id)?
|
.get_pdu_json(&body.event_id)?
|
||||||
|
@ -2461,10 +2494,6 @@ pub fn get_event_authorization_route(
|
||||||
let room_id = <&RoomId>::try_from(room_id_str)
|
let room_id = <&RoomId>::try_from(room_id_str)
|
||||||
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
|
||||||
|
|
||||||
if !db.rooms.server_in_room(sender_servername, room_id)? {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
|
|
||||||
}
|
|
||||||
|
|
||||||
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?;
|
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?;
|
||||||
|
|
||||||
Ok(get_event_authorization::v1::Response {
|
Ok(get_event_authorization::v1::Response {
|
||||||
|
@ -2504,6 +2533,8 @@ pub fn get_room_state_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
let shortstatehash = db
|
let shortstatehash = db
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_shortstatehash(&body.event_id)?
|
.pdu_shortstatehash(&body.event_id)?
|
||||||
|
@ -2567,6 +2598,8 @@ pub fn get_room_state_ids_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
let shortstatehash = db
|
let shortstatehash = db
|
||||||
.rooms
|
.rooms
|
||||||
.pdu_shortstatehash(&body.event_id)?
|
.pdu_shortstatehash(&body.event_id)?
|
||||||
|
@ -2610,10 +2643,17 @@ pub fn create_join_event_template_route(
|
||||||
if !db.rooms.exists(&body.room_id)? {
|
if !db.rooms.exists(&body.room_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Server is not in room.",
|
"Room is unknown to this server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let sender_servername = body
|
||||||
|
.sender_servername
|
||||||
|
.as_ref()
|
||||||
|
.expect("server is authenticated");
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
let prev_events: Vec<_> = db
|
let prev_events: Vec<_> = db
|
||||||
.rooms
|
.rooms
|
||||||
.get_pdu_leaves(&body.room_id)?
|
.get_pdu_leaves(&body.room_id)?
|
||||||
|
@ -2766,6 +2806,7 @@ pub fn create_join_event_template_route(
|
||||||
|
|
||||||
async fn create_join_event(
|
async fn create_join_event(
|
||||||
db: &DatabaseGuard,
|
db: &DatabaseGuard,
|
||||||
|
sender_servername: &ServerName,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
) -> Result<RoomState> {
|
) -> Result<RoomState> {
|
||||||
|
@ -2773,6 +2814,15 @@ async fn create_join_event(
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
return Err(Error::bad_config("Federation is disabled."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !db.rooms.exists(room_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Room is unknown to this server.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
acl_check(sender_servername, room_id, db)?;
|
||||||
|
|
||||||
// We need to return the state prior to joining, let's keep a reference to that here
|
// We need to return the state prior to joining, let's keep a reference to that here
|
||||||
let shortstatehash = db
|
let shortstatehash = db
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -2872,7 +2922,12 @@ pub async fn create_join_event_v1_route(
|
||||||
db: DatabaseGuard,
|
db: DatabaseGuard,
|
||||||
body: Ruma<create_join_event::v1::Request<'_>>,
|
body: Ruma<create_join_event::v1::Request<'_>>,
|
||||||
) -> ConduitResult<create_join_event::v1::Response> {
|
) -> ConduitResult<create_join_event::v1::Response> {
|
||||||
let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
|
let sender_servername = body
|
||||||
|
.sender_servername
|
||||||
|
.as_ref()
|
||||||
|
.expect("server is authenticated");
|
||||||
|
|
||||||
|
let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?;
|
||||||
|
|
||||||
Ok(create_join_event::v1::Response { room_state }.into())
|
Ok(create_join_event::v1::Response { room_state }.into())
|
||||||
}
|
}
|
||||||
|
@ -2889,7 +2944,12 @@ pub async fn create_join_event_v2_route(
|
||||||
db: DatabaseGuard,
|
db: DatabaseGuard,
|
||||||
body: Ruma<create_join_event::v2::Request<'_>>,
|
body: Ruma<create_join_event::v2::Request<'_>>,
|
||||||
) -> ConduitResult<create_join_event::v2::Response> {
|
) -> ConduitResult<create_join_event::v2::Response> {
|
||||||
let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
|
let sender_servername = body
|
||||||
|
.sender_servername
|
||||||
|
.as_ref()
|
||||||
|
.expect("server is authenticated");
|
||||||
|
|
||||||
|
let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?;
|
||||||
|
|
||||||
Ok(create_join_event::v2::Response { room_state }.into())
|
Ok(create_join_event::v2::Response { room_state }.into())
|
||||||
}
|
}
|
||||||
|
@ -2910,6 +2970,13 @@ pub async fn create_invite_route(
|
||||||
return Err(Error::bad_config("Federation is disabled."));
|
return Err(Error::bad_config("Federation is disabled."));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let sender_servername = body
|
||||||
|
.sender_servername
|
||||||
|
.as_ref()
|
||||||
|
.expect("server is authenticated");
|
||||||
|
|
||||||
|
acl_check(sender_servername, &body.room_id, &db)?;
|
||||||
|
|
||||||
if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 {
|
if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::IncompatibleRoomVersion {
|
ErrorKind::IncompatibleRoomVersion {
|
||||||
|
@ -3183,7 +3250,7 @@ pub async fn claim_keys_route(
|
||||||
#[tracing::instrument(skip(event, pub_key_map, db))]
|
#[tracing::instrument(skip(event, pub_key_map, db))]
|
||||||
pub(crate) async fn fetch_required_signing_keys(
|
pub(crate) async fn fetch_required_signing_keys(
|
||||||
event: &BTreeMap<String, CanonicalJsonValue>,
|
event: &BTreeMap<String, CanonicalJsonValue>,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let signatures = event
|
let signatures = event
|
||||||
|
@ -3237,7 +3304,7 @@ fn get_server_keys_from_cache(
|
||||||
pdu: &RawJsonValue,
|
pdu: &RawJsonValue,
|
||||||
servers: &mut BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>>,
|
servers: &mut BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>>,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||||
|
@ -3290,7 +3357,7 @@ fn get_server_keys_from_cache(
|
||||||
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
||||||
|
|
||||||
let contains_all_ids =
|
let contains_all_ids =
|
||||||
|keys: &BTreeMap<String, String>| signature_ids.iter().all(|id| keys.contains_key(id));
|
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
||||||
|
|
||||||
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
|
let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| {
|
||||||
Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
|
Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
|
||||||
|
@ -3323,7 +3390,7 @@ fn get_server_keys_from_cache(
|
||||||
pub(crate) async fn fetch_join_signing_keys(
|
pub(crate) async fn fetch_join_signing_keys(
|
||||||
event: &create_join_event::v2::Response,
|
event: &create_join_event::v2::Response,
|
||||||
room_version: &RoomVersionId,
|
room_version: &RoomVersionId,
|
||||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, String>>>,
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
db: &Database,
|
db: &Database,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut servers: BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>> =
|
let mut servers: BTreeMap<Box<ServerName>, BTreeMap<Box<ServerSigningKeyId>, QueryCriteria>> =
|
||||||
|
@ -3423,6 +3490,35 @@ pub(crate) async fn fetch_join_signing_keys(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns Ok if the acl allows the server
|
||||||
|
fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> {
|
||||||
|
let acl_event = match db
|
||||||
|
.rooms
|
||||||
|
.room_state_get(room_id, &EventType::RoomServerAcl, "")?
|
||||||
|
{
|
||||||
|
Some(acl) => acl,
|
||||||
|
None => return Ok(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let acl_event_content: RoomServerAclEventContent =
|
||||||
|
match serde_json::from_str(acl_event.content.get()) {
|
||||||
|
Ok(content) => content,
|
||||||
|
Err(_) => {
|
||||||
|
warn!("Invalid ACL event");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if acl_event_content.is_allowed(server_name) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Server was denied by ACL",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{add_port_to_hostname, get_ip_with_port, FedDest};
|
use super::{add_port_to_hostname, get_ip_with_port, FedDest};
|
||||||
|
|
Loading…
Reference in a new issue