1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2024-12-26 20:34:26 +01:00

Merge remote-tracking branch 'origin/master' into federation

This commit is contained in:
Timo Kösters 2020-09-12 22:13:53 +02:00
commit e6b1f54857
No known key found for this signature in database
GPG key ID: 24DA7517711A2BA4
22 changed files with 925 additions and 169 deletions

28
Cargo.lock generated
View file

@ -1634,7 +1634,7 @@ dependencies = [
[[package]]
name = "ruma"
version = "0.0.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"ruma-api",
"ruma-appservice-api",
@ -1650,7 +1650,7 @@ dependencies = [
[[package]]
name = "ruma-api"
version = "0.17.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"http",
"percent-encoding",
@ -1665,7 +1665,7 @@ dependencies = [
[[package]]
name = "ruma-api-macros"
version = "0.17.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@ -1676,7 +1676,7 @@ dependencies = [
[[package]]
name = "ruma-appservice-api"
version = "0.2.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"ruma-api",
"ruma-common",
@ -1689,7 +1689,7 @@ dependencies = [
[[package]]
name = "ruma-client-api"
version = "0.10.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"assign",
"http",
@ -1708,7 +1708,7 @@ dependencies = [
[[package]]
name = "ruma-common"
version = "0.2.0"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"js_int",
"ruma-api",
@ -1722,7 +1722,7 @@ dependencies = [
[[package]]
name = "ruma-events"
version = "0.22.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"js_int",
"ruma-common",
@ -1737,7 +1737,7 @@ dependencies = [
[[package]]
name = "ruma-events-macros"
version = "0.22.0-alpha.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"proc-macro-crate",
"proc-macro2",
@ -1748,7 +1748,7 @@ dependencies = [
[[package]]
name = "ruma-federation-api"
version = "0.0.3"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"js_int",
"ruma-api",
@ -1763,7 +1763,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers"
version = "0.17.4"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"rand",
"ruma-identifiers-macros",
@ -1775,7 +1775,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-macros"
version = "0.17.4"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"proc-macro2",
"quote",
@ -1786,7 +1786,7 @@ dependencies = [
[[package]]
name = "ruma-identifiers-validation"
version = "0.1.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"serde",
"strum",
@ -1795,7 +1795,7 @@ dependencies = [
[[package]]
name = "ruma-serde"
version = "0.2.3"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"form_urlencoded",
"itoa",
@ -1807,7 +1807,7 @@ dependencies = [
[[package]]
name = "ruma-signatures"
version = "0.6.0-dev.1"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#ca07bb61d88fd665464dab9707de6d47048fc225"
source = "git+https://github.com/timokoesters/ruma?branch=timo-fed-fixes#63341000fbabce9b230b6665ce65c617944408fa"
dependencies = [
"base64",
"ring",

View file

@ -53,10 +53,10 @@ LABEL org.opencontainers.image.created=${CREATED} \
org.opencontainers.image.url="https://conduit.rs/" \
org.opencontainers.image.revision=${GIT_REF} \
org.opencontainers.image.source="https://git.koesters.xyz/timo/conduit.git" \
org.opencontainers.image.documentation.="" \
org.opencontainers.image.licenses="AGPL-3.0-only" \
org.opencontainers.image.documentation="" \
org.opencontainers.image.ref.name="" \
org.label-schema.docker.build="docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
maintainer="Weasy666"
# Standard port on which Rocket launches
@ -81,11 +81,15 @@ RUN chown -cR www-data:www-data /srv/conduit
# Install packages needed to run Conduit
RUN apk add --no-cache \
ca-certificates \
curl \
libgcc
# Create a volume for the database, to persist its contents
VOLUME ["/srv/conduit/.local/share/conduit"]
# Test if Conduit is still alive, uses the same endpoint as Element
HEALTHCHECK --start-period=2s CMD curl --fail -s http://localhost:8000/_matrix/client/versions || curl -k --fail -s https://localhost:8000/_matrix/client/versions || exit 1
# Set user to www-data
USER www-data
# Set container home directory

View file

@ -25,7 +25,14 @@ Clone the repo, build it with `cargo build --release` and call the binary
##### Using Docker
Build the docker image and run it with docker or docker-compose. [Read more](docker/README.md)
Pull and run the docker image with
``` bash
docker pull matrixconduit/matrix-conduit:latest
docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit
```
Or build and run it with docker or docker-compose. [Read more](docker/README.md)
#### What is it build on?

View file

@ -3,18 +3,19 @@ version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build', then you can uncomment the
### 'image' line and comment out the 'build' option.
# image: conduit_homeserver:latest
### If you want meaningful labels in you built Conduit image, you should run docker-compose like this:
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
build:
context: .
args:
CREATED:
VERSION:
LOCAL: "false"
GIT_REF: HEAD
# build:
# context: .
# args:
# CREATED:
# VERSION:
# LOCAL: 'false'
# GIT_REF: HEAD
restart: unless-stopped
ports:
- 8448:8000

View file

@ -28,10 +28,10 @@ ARG GIT_REF=HEAD
To build the image you can use the following command
``` bash
docker build . -t conduit_homeserver:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
```
which also will tag the resulting image as `conduit_homeserver:latest`.
which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`.
**Note:** it ommits the two optional `build-arg`s.
@ -40,7 +40,7 @@ which also will tag the resulting image as `conduit_homeserver:latest`.
After building the image you can simply run it with
``` bash
docker run conduit_homeserver:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000"
docker run -d matrixconduit/matrix-conduit:latest -p 8448:8000 -v db:/srv/conduit/.local/share/conduit -e ROCKET_SERVER_NAME="localhost:8000"
```
For detached mode, you also need to use the `-d` flag. You can pass in more env vars as are shown here, for an overview of possible values, you can take a look at the `docker-compose.yml` file.
@ -49,7 +49,7 @@ If you just want to test Conduit for a short time, you can use the `--rm` flag,
## Docker-compose
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the `docker-compose.traefik.yml` including `docker-compose.override.traefik.yml` or the normal `docker-compose.yml` for every other reverse proxy.
If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) including [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy.
### Build

View file

@ -3,18 +3,19 @@ version: '3'
services:
homeserver:
### If you already built the Conduit image with 'docker build', then you can uncomment the
### 'image' line and comment out the 'build' option.
# image: conduit_homeserver:latest
### If you want meaningful labels in you built Conduit image, you should run docker-compose like this:
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
### then you are ready to go.
image: matrixconduit/matrix-conduit:latest
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
build:
context: .
args:
CREATED:
VERSION:
LOCAL: false
GIT_REF: HEAD
# build:
# context: .
# args:
# CREATED:
# VERSION:
# LOCAL: 'false'
# GIT_REF: HEAD
restart: unless-stopped
volumes:
- db:/srv/conduit/.local/share/conduit

View file

@ -3,13 +3,15 @@ use crate::{ConduitResult, Database, Error, Ruma};
use ruma::api::client::{
error::ErrorKind,
r0::backup::{
add_backup_keys, create_backup, get_backup, get_backup_keys, get_latest_backup,
update_backup,
add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup,
delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys,
get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys,
get_latest_backup, update_backup,
},
};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post, put};
use rocket::{delete, get, post, put};
#[cfg_attr(
feature = "conduit_bin",
@ -95,7 +97,22 @@ pub fn get_backup_route(
.into())
}
/// Add the received backup_keys to the database.
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
)]
pub fn delete_backup_route(
db: State<'_, Database>,
body: Ruma<delete_backup::Request>,
) -> ConduitResult<delete_backup::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
db.key_backups.delete_backup(&sender_id, &body.version)?;
Ok(delete_backup::Response.into())
}
/// Add the received backup keys to the database.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys", data = "<body>")
@ -126,6 +143,62 @@ pub fn add_backup_keys_route(
.into())
}
/// Add the received backup keys to the database.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
pub fn add_backup_key_sessions_route(
db: State<'_, Database>,
body: Ruma<add_backup_key_sessions::Request>,
) -> ConduitResult<add_backup_key_sessions::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
for (session_id, key_data) in &body.sessions {
db.key_backups.add_key(
&sender_id,
&body.version,
&body.room_id,
&session_id,
&key_data,
&db.globals,
)?
}
Ok(add_backup_key_sessions::Response {
count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(),
etag: db.key_backups.get_etag(sender_id, &body.version)?,
}
.into())
}
/// Add the received backup key to the database.
#[cfg_attr(
feature = "conduit_bin",
put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
pub fn add_backup_key_session_route(
db: State<'_, Database>,
body: Ruma<add_backup_key_session::Request>,
) -> ConduitResult<add_backup_key_session::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
db.key_backups.add_key(
&sender_id,
&body.version,
&body.room_id,
&body.session_id,
&body.session_data,
&db.globals,
)?;
Ok(add_backup_key_session::Response {
count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(),
etag: db.key_backups.get_etag(sender_id, &body.version)?,
}
.into())
}
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys", data = "<body>")
@ -140,3 +213,96 @@ pub fn get_backup_keys_route(
Ok(get_backup_keys::Response { rooms }.into())
}
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
pub fn get_backup_key_sessions_route(
db: State<'_, Database>,
body: Ruma<get_backup_key_sessions::Request>,
) -> ConduitResult<get_backup_key_sessions::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
let sessions = db
.key_backups
.get_room(&sender_id, &body.version, &body.room_id);
Ok(get_backup_key_sessions::Response { sessions }.into())
}
#[cfg_attr(
feature = "conduit_bin",
get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
pub fn get_backup_key_session_route(
db: State<'_, Database>,
body: Ruma<get_backup_key_session::Request>,
) -> ConduitResult<get_backup_key_session::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
let key_data =
db.key_backups
.get_session(&sender_id, &body.version, &body.room_id, &body.session_id)?;
Ok(get_backup_key_session::Response { key_data }.into())
}
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys", data = "<body>")
)]
pub fn delete_backup_keys_route(
db: State<'_, Database>,
body: Ruma<delete_backup_keys::Request>,
) -> ConduitResult<delete_backup_keys::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
db.key_backups.delete_all_keys(&sender_id, &body.version)?;
Ok(delete_backup_keys::Response {
count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(),
etag: db.key_backups.get_etag(sender_id, &body.version)?,
}
.into())
}
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
)]
pub fn delete_backup_key_sessions_route(
db: State<'_, Database>,
body: Ruma<delete_backup_key_sessions::Request>,
) -> ConduitResult<delete_backup_key_sessions::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
db.key_backups
.delete_room_keys(&sender_id, &body.version, &body.room_id)?;
Ok(delete_backup_key_sessions::Response {
count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(),
etag: db.key_backups.get_etag(sender_id, &body.version)?,
}
.into())
}
#[cfg_attr(
feature = "conduit_bin",
delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
)]
pub fn delete_backup_key_session_route(
db: State<'_, Database>,
body: Ruma<delete_backup_key_session::Request>,
) -> ConduitResult<delete_backup_key_session::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
db.key_backups
.delete_room_key(&sender_id, &body.version, &body.room_id, &body.session_id)?;
Ok(delete_backup_key_session::Response {
count: (db.key_backups.count_keys(sender_id, &body.version)? as u32).into(),
etag: db.key_backups.get_etag(sender_id, &body.version)?,
}
.into())
}

View file

@ -1,13 +1,14 @@
use super::State;
use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma};
use crate::{pdu::PduBuilder, utils, ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
r0::message::{get_message_events, send_message_event},
},
events::EventContent,
EventId,
};
use std::convert::TryInto;
use std::convert::{TryFrom, TryInto};
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};
@ -21,6 +22,29 @@ pub fn send_message_event_route(
body: Ruma<send_message_event::Request<'_>>,
) -> ConduitResult<send_message_event::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
let device_id = body.device_id.as_ref().expect("user is authenticated");
// Check if this is a new transaction id
if let Some(response) = db
.transaction_ids
.existing_txnid(sender_id, device_id, &body.txn_id)?
{
// The client might have sent a txnid of the /sendToDevice endpoint
// This txnid has no response associated with it
if response.is_empty() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Tried to use txn id already used for an incompatible endpoint.",
));
}
let event_id = EventId::try_from(
utils::string_from_bytes(&response)
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?,
)
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
return Ok(send_message_event::Response { event_id }.into());
}
let mut unsigned = serde_json::Map::new();
unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into());
@ -45,6 +69,9 @@ pub fn send_message_event_route(
&db.account_data,
)?;
db.transaction_ids
.add_txnid(sender_id, device_id, &body.txn_id, event_id.as_bytes())?;
Ok(send_message_event::Response::new(event_id).into())
}

View file

@ -217,11 +217,8 @@ pub fn get_profile_route(
db: State<'_, Database>,
body: Ruma<get_profile::Request<'_>>,
) -> ConduitResult<get_profile::Response> {
let avatar_url = db.users.avatar_url(&body.user_id)?;
let displayname = db.users.displayname(&body.user_id)?;
if avatar_url.is_none() && displayname.is_none() {
// Return 404 if we don't have a profile for this id
if !db.users.exists(&body.user_id)? {
// Return 404 if this user doesn't exist
return Err(Error::BadRequest(
ErrorKind::NotFound,
"Profile was not found.",
@ -229,8 +226,8 @@ pub fn get_profile_route(
}
Ok(get_profile::Response {
avatar_url,
displayname,
avatar_url: db.users.avatar_url(&body.user_id)?,
displayname: db.users.displayname(&body.user_id)?,
}
.into())
}

View file

@ -34,13 +34,14 @@ pub fn set_read_marker_route(
)?;
if let Some(event) = &body.read_receipt {
db.rooms.edus.room_read_set(
db.rooms.edus.private_read_set(
&body.room_id,
&sender_id,
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
"Event does not exist.",
))?,
&db.globals,
)?;
let mut user_receipts = BTreeMap::new();
@ -58,7 +59,7 @@ pub fn set_read_marker_route(
},
);
db.rooms.edus.roomlatest_update(
db.rooms.edus.readreceipt_update(
&sender_id,
&body.room_id,
AnyEvent::Ephemeral(AnyEphemeralRoomEvent::Receipt(

View file

@ -3,15 +3,15 @@ use crate::{pdu::PduBuilder, ConduitResult, Database, Error, Ruma};
use ruma::{
api::client::{
error::ErrorKind,
r0::room::{self, create_room, get_room_event},
r0::room::{self, create_room, get_room_event, upgrade_room},
},
events::{
room::{guest_access, history_visibility, join_rules, member, name, topic},
EventType,
},
RoomAliasId, RoomId, RoomVersionId,
Raw, RoomAliasId, RoomId, RoomVersionId,
};
use std::{collections::BTreeMap, convert::TryFrom};
use std::{cmp::max, collections::BTreeMap, convert::TryFrom};
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};
@ -332,3 +332,196 @@ pub fn get_room_event_route(
}
.into())
}
#[cfg_attr(
feature = "conduit_bin",
post("/_matrix/client/r0/rooms/<_room_id>/upgrade", data = "<body>")
)]
pub fn upgrade_room_route(
db: State<'_, Database>,
body: Ruma<upgrade_room::Request<'_>>,
_room_id: String,
) -> ConduitResult<upgrade_room::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
// Validate the room version requested
let new_version =
RoomVersionId::try_from(body.new_version.clone()).expect("invalid room version id");
if !matches!(
new_version,
RoomVersionId::Version5 | RoomVersionId::Version6
) {
return Err(Error::BadRequest(
ErrorKind::UnsupportedRoomVersion,
"This server does not support that room version.",
));
}
// Create a replacement room
let replacement_room = RoomId::new(db.globals.server_name());
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
// Fail if the sender does not have the required permissions
let tombstone_event_id = db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomTombstone,
content: serde_json::to_value(ruma::events::room::tombstone::TombstoneEventContent {
body: "This room has been replaced".to_string(),
replacement_room: replacement_room.clone(),
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_id,
&body.room_id,
&db.globals,
&db.account_data,
)?;
// Get the old room federations status
let federate = serde_json::from_value::<Raw<ruma::events::room::create::CreateEventContent>>(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content,
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?
.federate;
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(),
tombstone_event_id,
));
// Send a m.room.create event containing a predecessor field and the applicable room_version
let mut create_event_content =
ruma::events::room::create::CreateEventContent::new(sender_id.clone());
create_event_content.federate = federate;
create_event_content.room_version = new_version;
create_event_content.predecessor = predecessor;
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomCreate,
content: serde_json::to_value(create_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_id,
&replacement_room,
&db.globals,
&db.account_data,
)?;
// Join the new room
db.rooms.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomMember,
content: serde_json::to_value(member::MemberEventContent {
membership: member::MembershipState::Join,
displayname: db.users.displayname(&sender_id)?,
avatar_url: db.users.avatar_url(&sender_id)?,
is_direct: None,
third_party_invite: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(sender_id.to_string()),
redacts: None,
},
sender_id,
&replacement_room,
&db.globals,
&db.account_data,
)?;
// Recommended transferable state events list from the specs
let transferable_state_events = vec![
EventType::RoomServerAcl,
EventType::RoomEncryption,
EventType::RoomName,
EventType::RoomAvatar,
EventType::RoomTopic,
EventType::RoomGuestAccess,
EventType::RoomHistoryVisibility,
EventType::RoomJoinRules,
EventType::RoomPowerLevels,
];
// Replicate transferable state events to the new room
for event_type in transferable_state_events {
let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? {
Some(v) => v.content.clone(),
None => continue, // Skipping missing events.
};
db.rooms.build_and_append_pdu(
PduBuilder {
event_type,
content: event_content,
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_id,
&replacement_room,
&db.globals,
&db.account_data,
)?;
}
// Moves any local aliases to the new room
for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) {
db.rooms
.set_alias(&alias, Some(&replacement_room), &db.globals)?;
}
// Get the old room power levels
let mut power_levels_event_content =
serde_json::from_value::<Raw<ruma::events::room::power_levels::PowerLevelsEventContent>>(
db.rooms
.room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")?
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
.content,
)
.expect("database contains invalid PDU")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?;
// Setting events_default and invite to the greater of 50 and users_default + 1
let new_level = max(
50.into(),
power_levels_event_content.users_default + 1.into(),
);
power_levels_event_content.events_default = new_level;
power_levels_event_content.invite = new_level;
// Modify the power levels in the old room to prevent sending of events and inviting new users
db.rooms
.build_and_append_pdu(
PduBuilder {
event_type: EventType::RoomPowerLevels,
content: serde_json::to_value(power_levels_event_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
sender_id,
&body.room_id,
&db.globals,
&db.account_data,
)
.ok();
// Return the replacement room id
Ok(upgrade_room::Response { replacement_room }.into())
}

View file

@ -81,7 +81,12 @@ pub async fn sync_events_route(
.rev()
.collect::<Vec<_>>();
let send_notification_counts = !timeline_pdus.is_empty();
let send_notification_counts = !timeline_pdus.is_empty()
|| db
.rooms
.edus
.last_privateread_update(&sender_id, &room_id)?
> since;
// They /sync response doesn't always return all messages, so we say the output is
// limited unless there are events in non_timeline_pdus
@ -234,7 +239,7 @@ pub async fn sync_events_route(
};
let notification_count = if send_notification_counts {
if let Some(last_read) = db.rooms.edus.room_read_get(&room_id, &sender_id)? {
if let Some(last_read) = db.rooms.edus.private_read_get(&room_id, &sender_id)? {
Some(
(db.rooms
.pdus_since(&sender_id, &room_id, last_read)?
@ -272,20 +277,15 @@ pub async fn sync_events_route(
let mut edus = db
.rooms
.edus
.roomlatests_since(&room_id, since)?
.readreceipts_since(&room_id, since)?
.filter_map(|r| r.ok()) // Filter out buggy events
.collect::<Vec<_>>();
if db
.rooms
.edus
.last_roomactive_update(&room_id, &db.globals)?
> since
{
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
edus.push(
serde_json::from_str(
&serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing(
db.rooms.edus.roomactives_all(&room_id)?,
db.rooms.edus.typings_all(&room_id)?,
))
.expect("event is valid, we just created it"),
)

View file

@ -17,6 +17,16 @@ pub fn send_event_to_device_route(
body: Ruma<send_event_to_device::Request<'_>>,
) -> ConduitResult<send_event_to_device::Response> {
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
let device_id = body.device_id.as_ref().expect("user is authenticated");
// Check if this is a new transaction id
if db
.transaction_ids
.existing_txnid(sender_id, device_id, &body.txn_id)?
.is_some()
{
return Ok(send_event_to_device::Response.into());
}
for (target_user_id, map) in &body.messages {
for (target_device_id_maybe, event) in map {
@ -52,5 +62,9 @@ pub fn send_event_to_device_route(
}
}
// Save transaction id with empty data
db.transaction_ids
.add_txnid(sender_id, device_id, &body.txn_id, &[])?;
Ok(send_event_to_device::Response.into())
}

View file

@ -17,7 +17,7 @@ pub fn create_typing_event_route(
let sender_id = body.sender_id.as_ref().expect("user is authenticated");
if let Typing::Yes(duration) = body.state {
db.rooms.edus.roomactive_add(
db.rooms.edus.typing_add(
&sender_id,
&body.room_id,
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
@ -26,7 +26,7 @@ pub fn create_typing_event_route(
} else {
db.rooms
.edus
.roomactive_remove(&sender_id, &body.room_id, &db.globals)?;
.typing_remove(&sender_id, &body.room_id, &db.globals)?;
}
Ok(create_typing_event::Response.into())

View file

@ -3,6 +3,7 @@ pub mod globals;
pub mod key_backups;
pub mod media;
pub mod rooms;
pub mod transaction_ids;
pub mod uiaa;
pub mod users;
@ -23,6 +24,7 @@ pub struct Database {
pub account_data: account_data::AccountData,
pub media: media::Media,
pub key_backups: key_backups::KeyBackups,
pub transaction_ids: transaction_ids::TransactionIds,
pub _db: sled::Db,
}
@ -88,10 +90,12 @@ impl Database {
},
rooms: rooms::Rooms {
edus: rooms::RoomEdus {
roomuserid_lastread: db.open_tree("roomuserid_lastread")?, // "Private" read receipt
roomlatestid_roomlatest: db.open_tree("roomlatestid_roomlatest")?, // Read receipts
roomactiveid_userid: db.open_tree("roomactiveid_userid")?, // Typing notifs
roomid_lastroomactiveupdate: db.open_tree("roomid_lastroomactiveupdate")?,
readreceiptid_readreceipt: db.open_tree("readreceiptid_readreceipt")?,
roomuserid_privateread: db.open_tree("roomuserid_privateread")?, // "Private" read receipt
roomuserid_lastprivatereadupdate: db
.open_tree("roomid_lastprivatereadupdate")?,
typingid_userid: db.open_tree("typingid_userid")?,
roomid_lasttypingupdate: db.open_tree("roomid_lasttypingupdate")?,
presenceid_presence: db.open_tree("presenceid_presence")?,
userid_lastpresenceupdate: db.open_tree("userid_lastpresenceupdate")?,
},
@ -107,6 +111,7 @@ impl Database {
userroomid_joined: db.open_tree("userroomid_joined")?,
roomuserid_joined: db.open_tree("roomuserid_joined")?,
roomuseroncejoinedids: db.open_tree("roomuseroncejoinedids")?,
userroomid_invited: db.open_tree("userroomid_invited")?,
roomuserid_invited: db.open_tree("roomuserid_invited")?,
userroomid_left: db.open_tree("userroomid_left")?,
@ -126,6 +131,9 @@ impl Database {
backupid_etag: db.open_tree("backupid_etag")?,
backupkeyid_backup: db.open_tree("backupkeyid_backupmetadata")?,
},
transaction_ids: transaction_ids::TransactionIds {
userdevicetxnid_response: db.open_tree("userdevicetxnid_response")?,
},
_db: db,
})
}
@ -166,14 +174,14 @@ impl Database {
futures.push(
self.rooms
.edus
.roomid_lastroomactiveupdate
.roomid_lasttypingupdate
.watch_prefix(&roomid_bytes),
);
futures.push(
self.rooms
.edus
.roomlatestid_roomlatest
.readreceiptid_readreceipt
.watch_prefix(&roomid_prefix),
);

View file

@ -37,6 +37,28 @@ impl KeyBackups {
Ok(version)
}
pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
let mut key = user_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&version.as_bytes());
self.backupid_algorithm.remove(&key)?;
self.backupid_etag.remove(&key)?;
key.push(0xff);
for outdated_key in self
.backupkeyid_backup
.scan_prefix(&key)
.keys()
.filter_map(|r| r.ok())
{
self.backupkeyid_backup.remove(outdated_key)?;
}
Ok(())
}
pub fn update_backup(
&self,
user_id: &UserId,
@ -163,6 +185,7 @@ impl KeyBackups {
let mut prefix = user_id.to_string().as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff);
let mut rooms = BTreeMap::<RoomId, Sessions>::new();
@ -204,4 +227,135 @@ impl KeyBackups {
Ok(rooms)
}
pub fn get_room(
&self,
user_id: &UserId,
version: &str,
room_id: &RoomId,
) -> BTreeMap<String, KeyData> {
let mut prefix = user_id.to_string().as_bytes().to_vec();
prefix.push(0xff);
prefix.extend_from_slice(version.as_bytes());
prefix.push(0xff);
prefix.extend_from_slice(room_id.as_bytes());
prefix.push(0xff);
self.backupkeyid_backup
.scan_prefix(&prefix)
.map(|r| {
let (key, value) = r?;
let mut parts = key.rsplit(|&b| b == 0xff);
let session_id =
utils::string_from_bytes(&parts.next().ok_or_else(|| {
Error::bad_database("backupkeyid_backup key is invalid.")
})?)
.map_err(|_| {
Error::bad_database("backupkeyid_backup session_id is invalid.")
})?;
let key_data = serde_json::from_slice(&value).map_err(|_| {
Error::bad_database("KeyData in backupkeyid_backup is invalid.")
})?;
Ok::<_, Error>((session_id, key_data))
})
.filter_map(|r| r.ok())
.collect()
}
pub fn get_session(
&self,
user_id: &UserId,
version: &str,
room_id: &RoomId,
session_id: &str,
) -> Result<Option<KeyData>> {
let mut key = user_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(version.as_bytes());
key.push(0xff);
key.extend_from_slice(room_id.as_bytes());
key.push(0xff);
key.extend_from_slice(session_id.as_bytes());
self.backupkeyid_backup
.get(&key)?
.map(|value| {
serde_json::from_slice(&value)
.map_err(|_| Error::bad_database("KeyData in backupkeyid_backup is invalid."))
})
.transpose()
}
pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
let mut key = user_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
for outdated_key in self
.backupkeyid_backup
.scan_prefix(&key)
.keys()
.filter_map(|r| r.ok())
{
self.backupkeyid_backup.remove(outdated_key)?;
}
Ok(())
}
pub fn delete_room_keys(
&self,
user_id: &UserId,
version: &str,
room_id: &RoomId,
) -> Result<()> {
let mut key = user_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
key.extend_from_slice(&room_id.as_bytes());
key.push(0xff);
for outdated_key in self
.backupkeyid_backup
.scan_prefix(&key)
.keys()
.filter_map(|r| r.ok())
{
self.backupkeyid_backup.remove(outdated_key)?;
}
Ok(())
}
pub fn delete_room_key(
&self,
user_id: &UserId,
version: &str,
room_id: &RoomId,
session_id: &str,
) -> Result<()> {
let mut key = user_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&version.as_bytes());
key.push(0xff);
key.extend_from_slice(&room_id.as_bytes());
key.push(0xff);
key.extend_from_slice(&session_id.as_bytes());
for outdated_key in self
.backupkeyid_backup
.scan_prefix(&key)
.keys()
.filter_map(|r| r.ok())
{
self.backupkeyid_backup.remove(outdated_key)?;
}
Ok(())
}
}

View file

@ -12,7 +12,6 @@ use ruma::{
room::{
member,
power_levels::{self, PowerLevelsEventContent},
redaction,
},
EventType,
},
@ -47,6 +46,7 @@ pub struct Rooms {
pub(super) userroomid_joined: sled::Tree,
pub(super) roomuserid_joined: sled::Tree,
pub(super) roomuseroncejoinedids: sled::Tree,
pub(super) userroomid_invited: sled::Tree,
pub(super) roomuserid_invited: sled::Tree,
pub(super) userroomid_left: sled::Tree,
@ -477,30 +477,16 @@ impl Rooms {
self.append_to_state(&pdu_id, &pdu)?;
}
match &pdu.kind {
match pdu.kind {
EventType::RoomRedaction => {
if let Some(redact_id) = &pdu.redacts {
// TODO: Reason
let _reason = serde_json::from_value::<Raw<redaction::RedactionEventContent>>(
pdu.content,
)
.expect("Raw::from_value always works.")
.deserialize()
.map_err(|_| {
Error::BadRequest(
ErrorKind::InvalidParam,
"Invalid redaction event content.",
)
})?
.reason;
self.redact_pdu(&redact_id)?;
self.redact_pdu(&redact_id, &pdu)?;
}
}
EventType::RoomMember => {
if let Some(state_key) = pdu.state_key.as_ref() {
if let Some(state_key) = pdu.state_key {
// if the state_key fails
let target_user_id = UserId::try_from(state_key.as_str())
let target_user_id = UserId::try_from(state_key)
.expect("This state_key was previously validated");
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
@ -527,7 +513,7 @@ impl Rooms {
.split_terminator(|c: char| !c.is_alphanumeric())
.map(str::to_lowercase)
{
let mut key = pdu.room_id.as_bytes().to_vec();
let mut key = pdu.room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(word.as_bytes());
key.push(0xff);
@ -538,7 +524,9 @@ impl Rooms {
}
_ => {}
}
self.edus.room_read_set(&pdu.room_id, &pdu.sender, index)?;
self.edus
.private_read_set(&pdu.room_id, &pdu.sender, index, &globals)?;
Ok(pdu.event_id)
}
@ -922,12 +910,12 @@ impl Rooms {
}
/// Replace a PDU with the redacted form.
pub fn redact_pdu(&self, event_id: &EventId) -> Result<()> {
pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
let mut pdu = self
.get_pdu_from_id(&pdu_id)?
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
pdu.redact()?;
pdu.redact(&reason)?;
self.replace_pdu(&pdu_id, &pdu)?;
Ok(())
} else {
@ -959,6 +947,91 @@ impl Rooms {
match &membership {
member::MembershipState::Join => {
// Check if the user never joined this room
if !self.once_joined(&user_id, &room_id)? {
// Add the user ID to the join list then
self.roomuseroncejoinedids.insert(&userroom_id, &[])?;
// Check if the room has a predecessor
if let Some(predecessor) = serde_json::from_value::<
Raw<ruma::events::room::create::CreateEventContent>,
>(
self.room_state_get(&room_id, &EventType::RoomCreate, "")?
.ok_or_else(|| {
Error::bad_database("Found room without m.room.create event.")
})?
.content,
)
.expect("Raw::from_value always works")
.deserialize()
.map_err(|_| Error::bad_database("Invalid room event in database."))?
.predecessor
{
// Copy user settings from predecessor to the current room:
// - Push rules
//
// TODO: finish this once push rules are implemented.
//
// let mut push_rules_event_content = account_data
// .get::<ruma::events::push_rules::PushRulesEvent>(
// None,
// user_id,
// EventType::PushRules,
// )?;
//
// NOTE: find where `predecessor.room_id` match
// and update to `room_id`.
//
// account_data
// .update(
// None,
// user_id,
// EventType::PushRules,
// &push_rules_event_content,
// globals,
// )
// .ok();
// Copy old tags to new room
if let Some(tag_event) = account_data.get::<ruma::events::tag::TagEvent>(
Some(&predecessor.room_id),
user_id,
EventType::Tag,
)? {
account_data
.update(Some(room_id), user_id, EventType::Tag, &tag_event, globals)
.ok();
};
// Copy direct chat flag
if let Some(mut direct_event) = account_data
.get::<ruma::events::direct::DirectEvent>(
None,
user_id,
EventType::Direct,
)? {
let mut room_ids_updated = false;
for room_ids in direct_event.content.0.values_mut() {
if room_ids.iter().any(|r| r == &predecessor.room_id) {
room_ids.push(room_id.clone());
room_ids_updated = true;
}
}
if room_ids_updated {
account_data.update(
None,
user_id,
EventType::Direct,
&direct_event,
globals,
)?;
}
};
}
}
self.userroomid_joined.insert(&userroom_id, &[])?;
self.roomuserid_joined.insert(&roomuser_id, &[])?;
self.userroomid_invited.remove(&userroom_id)?;
@ -1218,6 +1291,27 @@ impl Rooms {
})
}
/// Returns an iterator over all User IDs who ever joined a room.
pub fn room_useroncejoined(&self, room_id: &RoomId) -> impl Iterator<Item = Result<UserId>> {
self.roomuseroncejoinedids
.scan_prefix(room_id.to_string())
.keys()
.map(|key| {
Ok(UserId::try_from(
utils::string_from_bytes(
&key?
.rsplit(|&b| b == 0xff)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| {
Error::bad_database("User ID in room_useroncejoined is invalid unicode.")
})?,
)
.map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid."))?)
})
}
/// Returns an iterator over all invited members of a room.
pub fn room_members_invited(&self, room_id: &RoomId) -> impl Iterator<Item = Result<UserId>> {
self.roomuserid_invited
@ -1302,6 +1396,14 @@ impl Rooms {
})
}
pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let mut userroom_id = user_id.to_string().as_bytes().to_vec();
userroom_id.push(0xff);
userroom_id.extend_from_slice(room_id.to_string().as_bytes());
Ok(self.roomuseroncejoinedids.get(userroom_id)?.is_some())
}
pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let mut userroom_id = user_id.as_bytes().to_vec();
userroom_id.push(0xff);

View file

@ -14,17 +14,18 @@ use std::{
};
pub struct RoomEdus {
pub(in super::super) roomuserid_lastread: sled::Tree, // RoomUserId = Room + User
pub(in super::super) roomlatestid_roomlatest: sled::Tree, // Read Receipts, RoomLatestId = RoomId + Count + UserId
pub(in super::super) roomactiveid_userid: sled::Tree, // Typing, RoomActiveId = RoomId + TimeoutTime + Count
pub(in super::super) roomid_lastroomactiveupdate: sled::Tree, // LastRoomActiveUpdate = Count
pub(in super::super) readreceiptid_readreceipt: sled::Tree, // ReadReceiptId = RoomId + Count + UserId
pub(in super::super) roomuserid_privateread: sled::Tree, // RoomUserId = Room + User, PrivateRead = Count
pub(in super::super) roomuserid_lastprivatereadupdate: sled::Tree, // LastPrivateReadUpdate = Count
pub(in super::super) typingid_userid: sled::Tree, // TypingId = RoomId + TimeoutTime + Count
pub(in super::super) roomid_lasttypingupdate: sled::Tree, // LastRoomTypingUpdate = Count
pub(in super::super) presenceid_presence: sled::Tree, // PresenceId = RoomId + Count + UserId
pub(in super::super) userid_lastpresenceupdate: sled::Tree, // LastPresenceUpdate = Count
}
impl RoomEdus {
/// Adds an event which will be saved until a new event replaces it (e.g. read receipt).
pub fn roomlatest_update(
pub fn readreceipt_update(
&self,
user_id: &UserId,
room_id: &RoomId,
@ -36,7 +37,7 @@ impl RoomEdus {
// Remove old entry
if let Some(old) = self
.roomlatestid_roomlatest
.readreceiptid_readreceipt
.scan_prefix(&prefix)
.keys()
.rev()
@ -50,7 +51,7 @@ impl RoomEdus {
})
{
// This is the old room_latest
self.roomlatestid_roomlatest.remove(old)?;
self.readreceiptid_readreceipt.remove(old)?;
}
let mut room_latest_id = prefix;
@ -58,7 +59,7 @@ impl RoomEdus {
room_latest_id.push(0xff);
room_latest_id.extend_from_slice(&user_id.to_string().as_bytes());
self.roomlatestid_roomlatest.insert(
self.readreceiptid_readreceipt.insert(
room_latest_id,
&*serde_json::to_string(&event).expect("EduEvent::to_string always works"),
)?;
@ -67,7 +68,7 @@ impl RoomEdus {
}
/// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`.
pub fn roomlatests_since(
pub fn readreceipts_since(
&self,
room_id: &RoomId,
since: u64,
@ -79,7 +80,7 @@ impl RoomEdus {
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
Ok(self
.roomlatestid_roomlatest
.readreceiptid_readreceipt
.range(&*first_possible_edu..)
.filter_map(|r| r.ok())
.take_while(move |(k, _)| k.starts_with(&prefix))
@ -90,9 +91,60 @@ impl RoomEdus {
}))
}
/// Sets a user as typing until the timeout timestamp is reached or roomactive_remove is
/// Sets a private read marker at `count`.
pub fn private_read_set(
&self,
room_id: &RoomId,
user_id: &UserId,
count: u64,
globals: &super::super::globals::Globals,
) -> Result<()> {
let mut key = room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&user_id.to_string().as_bytes());
self.roomuserid_privateread
.insert(&key, &count.to_be_bytes())?;
self.roomuserid_lastprivatereadupdate
.insert(&key, &globals.next_count()?.to_be_bytes())?;
Ok(())
}
/// Returns the private read marker.
pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
let mut key = room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&user_id.to_string().as_bytes());
self.roomuserid_privateread.get(key)?.map_or(Ok(None), |v| {
Ok(Some(utils::u64_from_bytes(&v).map_err(|_| {
Error::bad_database("Invalid private read marker bytes")
})?))
})
}
/// Returns the count of the last typing update in this room.
pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
let mut key = room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&user_id.to_string().as_bytes());
Ok(self
.roomuserid_lastprivatereadupdate
.get(&key)?
.map_or(Ok::<_, Error>(None), |bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
})?))
})?
.unwrap_or(0))
}
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
/// called.
pub fn roomactive_add(
pub fn typing_add(
&self,
user_id: &UserId,
room_id: &RoomId,
@ -104,22 +156,22 @@ impl RoomEdus {
let count = globals.next_count()?.to_be_bytes();
let mut room_active_id = prefix;
room_active_id.extend_from_slice(&timeout.to_be_bytes());
room_active_id.push(0xff);
room_active_id.extend_from_slice(&count);
let mut room_typing_id = prefix;
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
room_typing_id.push(0xff);
room_typing_id.extend_from_slice(&count);
self.roomactiveid_userid
.insert(&room_active_id, &*user_id.to_string().as_bytes())?;
self.typingid_userid
.insert(&room_typing_id, &*user_id.to_string().as_bytes())?;
self.roomid_lastroomactiveupdate
self.roomid_lasttypingupdate
.insert(&room_id.to_string().as_bytes(), &count)?;
Ok(())
}
/// Removes a user from typing before the timeout is reached.
pub fn roomactive_remove(
pub fn typing_remove(
&self,
user_id: &UserId,
room_id: &RoomId,
@ -132,19 +184,19 @@ impl RoomEdus {
let mut found_outdated = false;
// Maybe there are multiple ones from calling roomactive_add multiple times
// Maybe there are multiple ones from calling roomtyping_add multiple times
for outdated_edu in self
.roomactiveid_userid
.typingid_userid
.scan_prefix(&prefix)
.filter_map(|r| r.ok())
.filter(|(_, v)| v == user_id.as_bytes())
{
self.roomactiveid_userid.remove(outdated_edu.0)?;
self.typingid_userid.remove(outdated_edu.0)?;
found_outdated = true;
}
if found_outdated {
self.roomid_lastroomactiveupdate.insert(
self.roomid_lasttypingupdate.insert(
&room_id.to_string().as_bytes(),
&globals.next_count()?.to_be_bytes(),
)?;
@ -154,7 +206,7 @@ impl RoomEdus {
}
/// Makes sure that typing events with old timestamps get removed.
fn roomactives_maintain(
fn typings_maintain(
&self,
room_id: &RoomId,
globals: &super::super::globals::Globals,
@ -168,7 +220,7 @@ impl RoomEdus {
// Find all outdated edus before inserting a new one
for outdated_edu in self
.roomactiveid_userid
.typingid_userid
.scan_prefix(&prefix)
.keys()
.map(|key| {
@ -176,21 +228,21 @@ impl RoomEdus {
Ok::<_, Error>((
key.clone(),
utils::u64_from_bytes(key.split(|&b| b == 0xff).nth(1).ok_or_else(|| {
Error::bad_database("RoomActive has invalid timestamp or delimiters.")
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
})?)
.map_err(|_| Error::bad_database("RoomActive has invalid timestamp bytes."))?,
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
))
})
.filter_map(|r| r.ok())
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
{
// This is an outdated edu (time > timestamp)
self.roomactiveid_userid.remove(outdated_edu.0)?;
self.typingid_userid.remove(outdated_edu.0)?;
found_outdated = true;
}
if found_outdated {
self.roomid_lastroomactiveupdate.insert(
self.roomid_lasttypingupdate.insert(
&room_id.to_string().as_bytes(),
&globals.next_count()?.to_be_bytes(),
)?;
@ -199,16 +251,16 @@ impl RoomEdus {
Ok(())
}
/// Returns an iterator over all active events (e.g. typing notifications).
pub fn last_roomactive_update(
/// Returns the count of the last typing update in this room.
pub fn last_typing_update(
&self,
room_id: &RoomId,
globals: &super::super::globals::Globals,
) -> Result<u64> {
self.roomactives_maintain(room_id, globals)?;
self.typings_maintain(room_id, globals)?;
Ok(self
.roomid_lastroomactiveupdate
.roomid_lasttypingupdate
.get(&room_id.to_string().as_bytes())?
.map_or(Ok::<_, Error>(None), |bytes| {
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
@ -218,7 +270,7 @@ impl RoomEdus {
.unwrap_or(0))
}
pub fn roomactives_all(
pub fn typings_all(
&self,
room_id: &RoomId,
) -> Result<SyncEphemeralRoomEvent<ruma::events::typing::TypingEventContent>> {
@ -228,17 +280,15 @@ impl RoomEdus {
let mut user_ids = Vec::new();
for user_id in self
.roomactiveid_userid
.typingid_userid
.scan_prefix(prefix)
.values()
.map(|user_id| {
Ok::<_, Error>(
UserId::try_from(utils::string_from_bytes(&user_id?).map_err(|_| {
Error::bad_database("User ID in roomactiveid_userid is invalid unicode.")
Error::bad_database("User ID in typingid_userid is invalid unicode.")
})?)
.map_err(|_| {
Error::bad_database("User ID in roomactiveid_userid is invalid.")
})?,
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?,
)
})
{
@ -250,30 +300,6 @@ impl RoomEdus {
})
}
/// Sets a private read marker at `count`.
pub fn room_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> {
let mut key = room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&user_id.to_string().as_bytes());
self.roomuserid_lastread.insert(key, &count.to_be_bytes())?;
Ok(())
}
/// Returns the private read marker.
pub fn room_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
let mut key = room_id.to_string().as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(&user_id.to_string().as_bytes());
self.roomuserid_lastread.get(key)?.map_or(Ok(None), |v| {
Ok(Some(utils::u64_from_bytes(&v).map_err(|_| {
Error::bad_database("Invalid private read marker bytes")
})?))
})
}
/// Adds a presence event which will be saved until a new event replaces it.
///
/// Note: This method takes a RoomId because presence updates are always bound to rooms to

View file

@ -0,0 +1,43 @@
use crate::Result;
use ruma::{DeviceId, UserId};
use sled::IVec;
pub struct TransactionIds {
pub(super) userdevicetxnid_response: sled::Tree, // Response can be empty (/sendToDevice) or the event id (/send)
}
impl TransactionIds {
pub fn add_txnid(
&self,
user_id: &UserId,
device_id: &DeviceId,
txn_id: &str,
data: &[u8],
) -> Result<()> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.as_bytes());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());
self.userdevicetxnid_response.insert(key, data)?;
Ok(())
}
pub fn existing_txnid(
&self,
user_id: &UserId,
device_id: &DeviceId,
txn_id: &str,
) -> Result<Option<IVec>> {
let mut key = user_id.as_bytes().to_vec();
key.push(0xff);
key.extend_from_slice(device_id.as_bytes());
key.push(0xff);
key.extend_from_slice(txn_id.as_bytes());
// If there's no entry, this is a new transaction
Ok(self.userdevicetxnid_response.get(key)?)
}
}

View file

@ -53,9 +53,16 @@ fn setup_rocket() -> rocket::Rocket {
client_server::claim_keys_route,
client_server::create_backup_route,
client_server::update_backup_route,
client_server::delete_backup_route,
client_server::get_latest_backup_route,
client_server::get_backup_route,
client_server::add_backup_key_sessions_route,
client_server::add_backup_keys_route,
client_server::delete_backup_key_session_route,
client_server::delete_backup_key_sessions_route,
client_server::delete_backup_keys_route,
client_server::get_backup_key_session_route,
client_server::get_backup_key_sessions_route,
client_server::get_backup_keys_route,
client_server::set_read_marker_route,
client_server::create_typing_event_route,
@ -111,6 +118,7 @@ fn setup_rocket() -> rocket::Rocket {
client_server::get_key_changes_route,
client_server::get_pushers_route,
client_server::set_pushers_route,
client_server::upgrade_room_route,
server_server::well_known_server,
server_server::get_server_version,
server_server::get_server_keys,

View file

@ -34,7 +34,7 @@ pub struct PduEvent {
}
impl PduEvent {
pub fn redact(&mut self) -> Result<()> {
pub fn redact(&mut self, reason: &PduEvent) -> Result<()> {
self.unsigned.clear();
let allowed: &[&str] = match self.kind {
@ -70,7 +70,9 @@ impl PduEvent {
self.unsigned.insert(
"redacted_because".to_owned(),
json!({"content": {}, "type": "m.room.redaction"}),
serde_json::to_string(reason)
.expect("PduEvent::to_string always works")
.into(),
);
self.content = new_content.into();

View file

@ -17,6 +17,7 @@ Can invite users to invite-only rooms
Can list tags for a room
Can logout all devices
Can logout current device
Can re-join room if re-invited
Can read configuration endpoint
Can recv a device message using /sync
Can recv device messages until they are acknowledged
@ -37,6 +38,7 @@ Current state appears in timeline in private history with many messages before
Deleted tags appear in an incremental v2 /sync
Deleting a non-existent alias should return a 404
Device messages wake up /sync
Device messages with the same txn_id are deduplicated
Events come down the correct room
GET /device/{deviceId}
GET /device/{deviceId} gives a 404 for unknown devices
@ -87,6 +89,7 @@ POST /rooms/:room_id/join can join a room
POST /rooms/:room_id/leave can leave a room
POST /rooms/:room_id/state/m.room.name sets name
POST /rooms/:room_id/state/m.room.topic sets topic
POST /rooms/:room_id/upgrade can upgrade a room version
POSTed media can be thumbnailed
PUT /device/{deviceId} gives a 404 for unknown devices
PUT /device/{deviceId} updates device fields
@ -113,7 +116,6 @@ Typing events appear in incremental sync
Typing events appear in initial sync
Uninvited users cannot join the room
User appears in user directory
User directory correctly update on display name change
User in dir while user still shares private rooms
User in shared private room does appear in user directory
User is offline if they set_presence=offline in their sync