2021-06-08 18:10:00 +02:00
|
|
|
pub mod abstraction;
|
|
|
|
|
2020-07-29 21:27:49 +02:00
|
|
|
pub mod account_data;
|
2020-11-09 12:21:04 +01:00
|
|
|
pub mod admin;
|
2020-12-08 10:33:44 +01:00
|
|
|
pub mod appservice;
|
2020-07-29 21:27:49 +02:00
|
|
|
pub mod globals;
|
|
|
|
pub mod key_backups;
|
|
|
|
pub mod media;
|
2021-07-01 21:38:25 +02:00
|
|
|
pub mod proxy;
|
2021-01-27 03:54:35 +01:00
|
|
|
pub mod pusher;
|
2020-07-29 21:27:49 +02:00
|
|
|
pub mod rooms;
|
2020-09-15 16:13:54 +02:00
|
|
|
pub mod sending;
|
2020-08-25 13:24:38 +02:00
|
|
|
pub mod transaction_ids;
|
2020-07-29 21:27:49 +02:00
|
|
|
pub mod uiaa;
|
|
|
|
pub mod users;
|
2020-05-03 17:25:31 +02:00
|
|
|
|
2021-05-30 21:55:43 +02:00
|
|
|
use crate::{utils, Error, Result};
|
2021-06-08 18:10:00 +02:00
|
|
|
use abstraction::DatabaseEngine;
|
2020-03-30 13:46:18 +02:00
|
|
|
use directories::ProjectDirs;
|
2021-06-08 14:35:13 +02:00
|
|
|
use log::error;
|
2021-06-30 09:52:01 +02:00
|
|
|
use lru_cache::LruCache;
|
2021-07-14 09:07:08 +02:00
|
|
|
use rocket::{
|
|
|
|
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
|
|
|
|
outcome::IntoOutcome,
|
|
|
|
request::{FromRequest, Request},
|
|
|
|
try_outcome, State,
|
|
|
|
};
|
2020-12-05 21:03:43 +01:00
|
|
|
use ruma::{DeviceId, ServerName, UserId};
|
2021-07-14 09:07:08 +02:00
|
|
|
use serde::{de::IgnoredAny, Deserialize};
|
2021-06-08 18:24:36 +02:00
|
|
|
use std::{
|
2021-07-14 09:07:08 +02:00
|
|
|
collections::{BTreeMap, HashMap},
|
2021-06-08 18:24:36 +02:00
|
|
|
fs::{self, remove_dir_all},
|
|
|
|
io::Write,
|
2021-07-14 09:07:08 +02:00
|
|
|
ops::Deref,
|
|
|
|
path::Path,
|
2021-06-08 18:24:36 +02:00
|
|
|
sync::{Arc, RwLock},
|
|
|
|
};
|
2021-07-14 09:07:08 +02:00
|
|
|
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
|
2020-12-05 21:03:43 +01:00
|
|
|
|
2021-07-01 21:38:25 +02:00
|
|
|
use self::proxy::ProxyConfig;
|
|
|
|
|
2021-01-15 03:32:22 +01:00
|
|
|
#[derive(Clone, Debug, Deserialize)]
|
2020-12-05 21:03:43 +01:00
|
|
|
pub struct Config {
|
|
|
|
server_name: Box<ServerName>,
|
2020-12-31 21:07:05 +01:00
|
|
|
database_path: String,
|
2021-07-14 09:07:08 +02:00
|
|
|
#[serde(default = "default_db_cache_capacity_mb")]
|
|
|
|
db_cache_capacity_mb: f64,
|
|
|
|
#[serde(default = "default_sqlite_read_pool_size")]
|
|
|
|
sqlite_read_pool_size: usize,
|
|
|
|
#[serde(default = "true_fn")]
|
|
|
|
sqlite_wal_clean_timer: bool,
|
|
|
|
#[serde(default = "default_sqlite_wal_clean_second_interval")]
|
|
|
|
sqlite_wal_clean_second_interval: u32,
|
|
|
|
#[serde(default = "default_sqlite_wal_clean_second_timeout")]
|
|
|
|
sqlite_wal_clean_second_timeout: u32,
|
2020-12-05 21:03:43 +01:00
|
|
|
#[serde(default = "default_max_request_size")]
|
|
|
|
max_request_size: u32,
|
2020-12-19 16:00:11 +01:00
|
|
|
#[serde(default = "default_max_concurrent_requests")]
|
|
|
|
max_concurrent_requests: u16,
|
2021-02-07 13:20:00 +01:00
|
|
|
#[serde(default = "true_fn")]
|
2021-01-01 13:47:53 +01:00
|
|
|
allow_registration: bool,
|
|
|
|
#[serde(default = "true_fn")]
|
|
|
|
allow_encryption: bool,
|
|
|
|
#[serde(default = "false_fn")]
|
|
|
|
allow_federation: bool,
|
2021-02-28 12:41:03 +01:00
|
|
|
#[serde(default = "false_fn")]
|
|
|
|
pub allow_jaeger: bool,
|
2021-04-13 20:15:58 +02:00
|
|
|
#[serde(default)]
|
|
|
|
proxy: ProxyConfig,
|
2021-02-07 17:38:45 +01:00
|
|
|
jwt_secret: Option<String>,
|
2021-03-01 15:17:53 +01:00
|
|
|
#[serde(default = "Vec::new")]
|
|
|
|
trusted_servers: Vec<Box<ServerName>>,
|
2021-03-23 22:01:14 +01:00
|
|
|
#[serde(default = "default_log")]
|
|
|
|
pub log: String,
|
2021-07-14 09:07:08 +02:00
|
|
|
|
|
|
|
#[serde(flatten)]
|
|
|
|
catchall: BTreeMap<String, IgnoredAny>,
|
|
|
|
}
|
|
|
|
|
|
|
|
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
|
|
|
|
|
|
|
impl Config {
|
|
|
|
pub fn warn_deprecated(&self) {
|
|
|
|
let mut was_deprecated = false;
|
|
|
|
for key in self
|
|
|
|
.catchall
|
|
|
|
.keys()
|
|
|
|
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
|
|
|
|
{
|
|
|
|
log::warn!("Config parameter {} is deprecated", key);
|
|
|
|
was_deprecated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if was_deprecated {
|
|
|
|
log::warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
|
|
|
|
}
|
|
|
|
}
|
2021-01-01 13:47:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn false_fn() -> bool {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
fn true_fn() -> bool {
|
|
|
|
true
|
2020-12-05 21:03:43 +01:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
fn default_db_cache_capacity_mb() -> f64 {
|
|
|
|
200.0
|
|
|
|
}
|
|
|
|
|
|
|
|
fn default_sqlite_read_pool_size() -> usize {
|
|
|
|
num_cpus::get().max(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn default_sqlite_wal_clean_second_interval() -> u32 {
|
|
|
|
60 * 60
|
|
|
|
}
|
|
|
|
|
|
|
|
fn default_sqlite_wal_clean_second_timeout() -> u32 {
|
|
|
|
2
|
2020-12-05 21:03:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn default_max_request_size() -> u32 {
|
|
|
|
20 * 1024 * 1024 // Default to 20 MB
|
|
|
|
}
|
2020-05-06 15:36:44 +02:00
|
|
|
|
2020-12-19 16:00:11 +01:00
|
|
|
fn default_max_concurrent_requests() -> u16 {
|
2021-05-24 17:59:06 +02:00
|
|
|
100
|
2020-12-19 16:00:11 +01:00
|
|
|
}
|
|
|
|
|
2021-03-23 22:01:14 +01:00
|
|
|
fn default_log() -> String {
|
2021-03-26 11:10:45 +01:00
|
|
|
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
|
2021-03-23 22:01:14 +01:00
|
|
|
}
|
|
|
|
|
2021-06-12 15:04:28 +02:00
|
|
|
#[cfg(feature = "sled")]
|
2021-07-14 09:07:08 +02:00
|
|
|
pub type Engine = abstraction::sled::Engine;
|
2021-06-12 15:04:28 +02:00
|
|
|
|
|
|
|
#[cfg(feature = "rocksdb")]
|
2021-07-14 09:07:08 +02:00
|
|
|
pub type Engine = abstraction::rocksdb::Engine;
|
|
|
|
|
|
|
|
#[cfg(feature = "sqlite")]
|
|
|
|
pub type Engine = abstraction::sqlite::Engine;
|
2021-06-08 18:10:00 +02:00
|
|
|
|
2020-03-30 13:46:18 +02:00
|
|
|
pub struct Database {
|
2021-07-14 09:07:08 +02:00
|
|
|
_db: Arc<Engine>,
|
2020-05-03 17:25:31 +02:00
|
|
|
pub globals: globals::Globals,
|
|
|
|
pub users: users::Users,
|
2020-06-06 18:44:50 +02:00
|
|
|
pub uiaa: uiaa::Uiaa,
|
2020-05-03 17:25:31 +02:00
|
|
|
pub rooms: rooms::Rooms,
|
|
|
|
pub account_data: account_data::AccountData,
|
2020-05-18 17:53:34 +02:00
|
|
|
pub media: media::Media,
|
2020-06-16 12:11:38 +02:00
|
|
|
pub key_backups: key_backups::KeyBackups,
|
2020-08-25 13:24:38 +02:00
|
|
|
pub transaction_ids: transaction_ids::TransactionIds,
|
2020-09-15 16:13:54 +02:00
|
|
|
pub sending: sending::Sending,
|
2020-11-09 12:21:04 +01:00
|
|
|
pub admin: admin::Admin,
|
2020-12-08 10:33:44 +01:00
|
|
|
pub appservice: appservice::Appservice,
|
2021-01-27 03:54:35 +01:00
|
|
|
pub pusher: pusher::PushData,
|
2020-03-30 13:46:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Database {
|
2020-04-10 13:36:57 +02:00
|
|
|
/// Tries to remove the old database but ignores all errors.
|
2020-06-09 15:13:17 +02:00
|
|
|
pub fn try_remove(server_name: &str) -> Result<()> {
|
2020-04-11 20:03:22 +02:00
|
|
|
let mut path = ProjectDirs::from("xyz", "koesters", "conduit")
|
2020-11-15 12:17:21 +01:00
|
|
|
.ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))?
|
2020-04-10 13:36:57 +02:00
|
|
|
.data_dir()
|
|
|
|
.to_path_buf();
|
2020-05-06 15:36:44 +02:00
|
|
|
path.push(server_name);
|
2020-04-10 13:36:57 +02:00
|
|
|
let _ = remove_dir_all(path);
|
2020-06-09 15:13:17 +02:00
|
|
|
|
|
|
|
Ok(())
|
2020-04-10 13:36:57 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
|
|
|
|
let path = Path::new(&config.database_path);
|
|
|
|
|
|
|
|
#[cfg(feature = "backend_sqlite")]
|
|
|
|
{
|
|
|
|
let sled_exists = path.join("db").exists();
|
|
|
|
let sqlite_exists = path.join("conduit.db").exists();
|
|
|
|
if sled_exists {
|
|
|
|
if sqlite_exists {
|
|
|
|
// most likely an in-place directory, only warn
|
|
|
|
log::warn!("Both sled and sqlite databases are detected in database directory");
|
|
|
|
log::warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
|
|
|
|
} else {
|
|
|
|
log::error!(
|
|
|
|
"Sled database detected, conduit now uses sqlite for database operations"
|
|
|
|
);
|
|
|
|
log::error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
|
|
|
|
return Err(Error::bad_config(
|
|
|
|
"sled database detected, migrate to sqlite",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-03-30 13:46:18 +02:00
|
|
|
/// Load an existing database or create a new one.
|
2021-07-14 09:07:08 +02:00
|
|
|
pub async fn load_or_create(config: Config) -> Result<Arc<TokioRwLock<Self>>> {
|
|
|
|
Self::check_sled_or_sqlite_db(&config)?;
|
|
|
|
|
2021-06-08 18:10:00 +02:00
|
|
|
let builder = Engine::open(&config)?;
|
2020-10-21 21:43:59 +02:00
|
|
|
|
2021-05-22 10:34:19 +02:00
|
|
|
if config.max_request_size < 1024 {
|
|
|
|
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
|
|
|
}
|
2020-03-30 13:46:18 +02:00
|
|
|
|
2020-11-09 12:21:04 +01:00
|
|
|
let (admin_sender, admin_receiver) = mpsc::unbounded();
|
2021-06-08 18:10:00 +02:00
|
|
|
let (sending_sender, sending_receiver) = mpsc::unbounded();
|
2020-11-09 12:21:04 +01:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
let db = Arc::new(TokioRwLock::from(Self {
|
|
|
|
_db: builder.clone(),
|
2020-05-03 17:25:31 +02:00
|
|
|
users: users::Users {
|
2021-06-08 18:10:00 +02:00
|
|
|
userid_password: builder.open_tree("userid_password")?,
|
|
|
|
userid_displayname: builder.open_tree("userid_displayname")?,
|
|
|
|
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
|
|
|
|
userdeviceid_token: builder.open_tree("userdeviceid_token")?,
|
|
|
|
userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?,
|
|
|
|
userid_devicelistversion: builder.open_tree("userid_devicelistversion")?,
|
|
|
|
token_userdeviceid: builder.open_tree("token_userdeviceid")?,
|
|
|
|
onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?,
|
|
|
|
userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?,
|
|
|
|
keychangeid_userid: builder.open_tree("keychangeid_userid")?,
|
|
|
|
keyid_key: builder.open_tree("keyid_key")?,
|
|
|
|
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
|
|
|
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
|
|
|
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
|
|
|
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
2020-05-03 17:25:31 +02:00
|
|
|
},
|
2020-06-06 18:44:50 +02:00
|
|
|
uiaa: uiaa::Uiaa {
|
2021-06-08 18:10:00 +02:00
|
|
|
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
|
|
|
|
userdevicesessionid_uiaarequest: builder
|
|
|
|
.open_tree("userdevicesessionid_uiaarequest")?,
|
2020-06-06 18:44:50 +02:00
|
|
|
},
|
2020-05-03 17:25:31 +02:00
|
|
|
rooms: rooms::Rooms {
|
|
|
|
edus: rooms::RoomEdus {
|
2021-06-08 18:10:00 +02:00
|
|
|
readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?,
|
|
|
|
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
|
|
|
roomuserid_lastprivatereadupdate: builder
|
2021-03-23 12:59:27 +01:00
|
|
|
.open_tree("roomuserid_lastprivatereadupdate")?,
|
2021-06-08 18:10:00 +02:00
|
|
|
typingid_userid: builder.open_tree("typingid_userid")?,
|
|
|
|
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
|
|
|
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
|
|
|
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
2020-05-03 17:25:31 +02:00
|
|
|
},
|
2021-06-08 18:10:00 +02:00
|
|
|
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
|
|
|
eventid_pduid: builder.open_tree("eventid_pduid")?,
|
|
|
|
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
|
|
|
|
|
|
|
|
alias_roomid: builder.open_tree("alias_roomid")?,
|
|
|
|
aliasid_alias: builder.open_tree("aliasid_alias")?,
|
|
|
|
publicroomids: builder.open_tree("publicroomids")?,
|
|
|
|
|
|
|
|
tokenids: builder.open_tree("tokenids")?,
|
|
|
|
|
|
|
|
roomserverids: builder.open_tree("roomserverids")?,
|
|
|
|
serverroomids: builder.open_tree("serverroomids")?,
|
|
|
|
userroomid_joined: builder.open_tree("userroomid_joined")?,
|
|
|
|
roomuserid_joined: builder.open_tree("roomuserid_joined")?,
|
|
|
|
roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?,
|
|
|
|
userroomid_invitestate: builder.open_tree("userroomid_invitestate")?,
|
|
|
|
roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?,
|
|
|
|
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
|
|
|
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
|
|
|
|
|
|
|
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
|
|
|
|
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
|
|
|
|
|
|
|
|
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
|
|
|
|
stateid_shorteventid: builder.open_tree("stateid_shorteventid")?,
|
|
|
|
eventid_shorteventid: builder.open_tree("eventid_shorteventid")?,
|
|
|
|
shorteventid_eventid: builder.open_tree("shorteventid_eventid")?,
|
|
|
|
shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?,
|
|
|
|
roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?,
|
|
|
|
statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?,
|
|
|
|
|
|
|
|
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
|
|
|
|
prevevent_parent: builder.open_tree("prevevent_parent")?,
|
2021-06-30 09:52:01 +02:00
|
|
|
pdu_cache: RwLock::new(LruCache::new(1_000_000)),
|
2020-05-03 17:25:31 +02:00
|
|
|
},
|
|
|
|
account_data: account_data::AccountData {
|
2021-06-08 18:10:00 +02:00
|
|
|
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
|
2020-05-03 17:25:31 +02:00
|
|
|
},
|
2020-05-18 17:53:34 +02:00
|
|
|
media: media::Media {
|
2021-06-08 18:10:00 +02:00
|
|
|
mediaid_file: builder.open_tree("mediaid_file")?,
|
2020-05-18 17:53:34 +02:00
|
|
|
},
|
2020-06-16 12:11:38 +02:00
|
|
|
key_backups: key_backups::KeyBackups {
|
2021-06-08 18:10:00 +02:00
|
|
|
backupid_algorithm: builder.open_tree("backupid_algorithm")?,
|
|
|
|
backupid_etag: builder.open_tree("backupid_etag")?,
|
|
|
|
backupkeyid_backup: builder.open_tree("backupkeyid_backup")?,
|
2020-06-16 12:11:38 +02:00
|
|
|
},
|
2020-08-25 13:24:38 +02:00
|
|
|
transaction_ids: transaction_ids::TransactionIds {
|
2021-06-08 18:10:00 +02:00
|
|
|
userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?,
|
2020-08-25 13:24:38 +02:00
|
|
|
},
|
2020-09-15 16:13:54 +02:00
|
|
|
sending: sending::Sending {
|
2021-06-08 18:10:00 +02:00
|
|
|
servername_educount: builder.open_tree("servername_educount")?,
|
|
|
|
servernamepduids: builder.open_tree("servernamepduids")?,
|
|
|
|
servercurrentevents: builder.open_tree("servercurrentevents")?,
|
2021-03-18 00:09:57 +01:00
|
|
|
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
|
2021-06-08 18:10:00 +02:00
|
|
|
sender: sending_sender,
|
2020-09-15 16:13:54 +02:00
|
|
|
},
|
2020-11-09 12:21:04 +01:00
|
|
|
admin: admin::Admin {
|
|
|
|
sender: admin_sender,
|
|
|
|
},
|
2020-12-08 10:33:44 +01:00
|
|
|
appservice: appservice::Appservice {
|
|
|
|
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
2021-06-08 18:10:00 +02:00
|
|
|
id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?,
|
|
|
|
},
|
|
|
|
pusher: pusher::PushData {
|
|
|
|
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
|
2020-12-08 10:33:44 +01:00
|
|
|
},
|
2021-03-18 00:09:57 +01:00
|
|
|
globals: globals::Globals::load(
|
2021-06-08 18:10:00 +02:00
|
|
|
builder.open_tree("global")?,
|
|
|
|
builder.open_tree("server_signingkeys")?,
|
2021-07-14 09:07:08 +02:00
|
|
|
config.clone(),
|
2021-03-18 00:09:57 +01:00
|
|
|
)?,
|
2021-07-14 09:07:08 +02:00
|
|
|
}));
|
|
|
|
|
|
|
|
{
|
|
|
|
let db = db.read().await;
|
|
|
|
// MIGRATIONS
|
|
|
|
// TODO: database versions of new dbs should probably not be 0
|
|
|
|
if db.globals.database_version()? < 1 {
|
|
|
|
for (roomserverid, _) in db.rooms.roomserverids.iter() {
|
|
|
|
let mut parts = roomserverid.split(|&b| b == 0xff);
|
|
|
|
let room_id = parts.next().expect("split always returns one element");
|
|
|
|
let servername = match parts.next() {
|
|
|
|
Some(s) => s,
|
|
|
|
None => {
|
|
|
|
error!("Migration: Invalid roomserverid in db.");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let mut serverroomid = servername.to_vec();
|
|
|
|
serverroomid.push(0xff);
|
|
|
|
serverroomid.extend_from_slice(room_id);
|
2020-11-09 12:21:04 +01:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
db.rooms.serverroomids.insert(&serverroomid, &[])?;
|
|
|
|
}
|
|
|
|
|
|
|
|
db.globals.bump_database_version(1)?;
|
2021-05-17 10:25:27 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
println!("Migration: 0 -> 1 finished");
|
2021-05-17 10:25:27 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
if db.globals.database_version()? < 2 {
|
|
|
|
// We accidentally inserted hashed versions of "" into the db instead of just ""
|
|
|
|
for (userid, password) in db.users.userid_password.iter() {
|
|
|
|
let password = utils::string_from_bytes(&password);
|
2021-05-17 10:25:27 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
let empty_hashed_password = password.map_or(false, |password| {
|
|
|
|
argon2::verify_encoded(&password, b"").unwrap_or(false)
|
|
|
|
});
|
2021-05-17 10:25:27 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
if empty_hashed_password {
|
|
|
|
db.users.userid_password.insert(&userid, b"")?;
|
|
|
|
}
|
|
|
|
}
|
2021-05-30 21:55:43 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
db.globals.bump_database_version(2)?;
|
2021-06-08 18:23:24 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
println!("Migration: 1 -> 2 finished");
|
2021-05-30 21:55:43 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
if db.globals.database_version()? < 3 {
|
|
|
|
// Move media to filesystem
|
|
|
|
for (key, content) in db.media.mediaid_file.iter() {
|
|
|
|
if content.len() == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
2021-05-30 21:55:43 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
let path = db.globals.get_media_file(&key);
|
|
|
|
let mut file = fs::File::create(path)?;
|
|
|
|
file.write_all(&content)?;
|
|
|
|
db.media.mediaid_file.insert(&key, &[])?;
|
2021-06-08 18:23:24 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
db.globals.bump_database_version(3)?;
|
2021-06-08 18:23:24 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
println!("Migration: 2 -> 3 finished");
|
|
|
|
}
|
2021-06-12 18:40:33 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
if db.globals.database_version()? < 4 {
|
|
|
|
// Add federated users to db as deactivated
|
|
|
|
for our_user in db.users.iter() {
|
|
|
|
let our_user = our_user?;
|
|
|
|
if db.users.is_deactivated(&our_user)? {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for room in db.rooms.rooms_joined(&our_user) {
|
|
|
|
for user in db.rooms.room_members(&room?) {
|
|
|
|
let user = user?;
|
|
|
|
if user.server_name() != db.globals.server_name() {
|
|
|
|
println!("Migration: Creating user {}", user);
|
|
|
|
db.users.create(&user, None)?;
|
|
|
|
}
|
2021-06-12 18:40:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
db.globals.bump_database_version(4)?;
|
2021-06-12 18:40:33 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
println!("Migration: 3 -> 4 finished");
|
|
|
|
}
|
2021-06-12 18:40:33 +02:00
|
|
|
}
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
let guard = db.read().await;
|
|
|
|
|
2021-05-12 20:04:28 +02:00
|
|
|
// This data is probably outdated
|
2021-07-14 09:07:08 +02:00
|
|
|
guard.rooms.edus.presenceid_presence.clear()?;
|
|
|
|
|
|
|
|
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
|
|
|
|
guard
|
|
|
|
.sending
|
|
|
|
.start_handler(Arc::clone(&db), sending_receiver);
|
2021-05-12 20:04:28 +02:00
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
drop(guard);
|
|
|
|
|
|
|
|
#[cfg(feature = "sqlite")]
|
|
|
|
Self::start_wal_clean_task(&db, &config).await;
|
2020-11-09 12:21:04 +01:00
|
|
|
|
|
|
|
Ok(db)
|
2020-03-30 13:46:18 +02:00
|
|
|
}
|
2020-07-27 17:36:54 +02:00
|
|
|
|
2020-07-28 15:00:23 +02:00
|
|
|
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
|
2021-04-05 21:44:21 +02:00
|
|
|
let userid_bytes = user_id.as_bytes().to_vec();
|
2020-07-30 14:05:08 +02:00
|
|
|
let mut userid_prefix = userid_bytes.clone();
|
2020-07-27 17:36:54 +02:00
|
|
|
userid_prefix.push(0xff);
|
2020-07-30 14:05:08 +02:00
|
|
|
|
2020-07-27 17:36:54 +02:00
|
|
|
let mut userdeviceid_prefix = userid_prefix.clone();
|
|
|
|
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
|
|
|
userdeviceid_prefix.push(0xff);
|
|
|
|
|
2021-06-08 18:10:00 +02:00
|
|
|
let mut futures = FuturesUnordered::new();
|
2020-07-27 17:36:54 +02:00
|
|
|
|
|
|
|
// Return when *any* user changed his key
|
|
|
|
// TODO: only send for user they share a room with
|
|
|
|
futures.push(
|
|
|
|
self.users
|
|
|
|
.todeviceid_events
|
|
|
|
.watch_prefix(&userdeviceid_prefix),
|
|
|
|
);
|
|
|
|
|
|
|
|
futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix));
|
2021-04-11 21:01:27 +02:00
|
|
|
futures.push(
|
|
|
|
self.rooms
|
|
|
|
.userroomid_invitestate
|
|
|
|
.watch_prefix(&userid_prefix),
|
|
|
|
);
|
2021-04-13 15:00:45 +02:00
|
|
|
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
|
2020-07-27 17:36:54 +02:00
|
|
|
|
|
|
|
// Events for rooms we are in
|
|
|
|
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
2021-04-05 21:44:21 +02:00
|
|
|
let roomid_bytes = room_id.as_bytes().to_vec();
|
2020-08-22 22:02:32 +02:00
|
|
|
let mut roomid_prefix = roomid_bytes.clone();
|
2020-07-27 17:36:54 +02:00
|
|
|
roomid_prefix.push(0xff);
|
|
|
|
|
|
|
|
// PDUs
|
|
|
|
futures.push(self.rooms.pduid_pdu.watch_prefix(&roomid_prefix));
|
|
|
|
|
|
|
|
// EDUs
|
|
|
|
futures.push(
|
|
|
|
self.rooms
|
|
|
|
.edus
|
2020-08-23 17:29:39 +02:00
|
|
|
.roomid_lasttypingupdate
|
2020-08-22 22:02:32 +02:00
|
|
|
.watch_prefix(&roomid_bytes),
|
2020-07-27 17:36:54 +02:00
|
|
|
);
|
|
|
|
|
|
|
|
futures.push(
|
|
|
|
self.rooms
|
|
|
|
.edus
|
2020-08-23 17:29:39 +02:00
|
|
|
.readreceiptid_readreceipt
|
2020-07-27 17:36:54 +02:00
|
|
|
.watch_prefix(&roomid_prefix),
|
|
|
|
);
|
|
|
|
|
2020-07-30 14:05:08 +02:00
|
|
|
// Key changes
|
|
|
|
futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix));
|
|
|
|
|
2020-07-27 17:36:54 +02:00
|
|
|
// Room account data
|
|
|
|
let mut roomuser_prefix = roomid_prefix.clone();
|
|
|
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
|
|
|
|
|
|
|
futures.push(
|
|
|
|
self.account_data
|
|
|
|
.roomuserdataid_accountdata
|
|
|
|
.watch_prefix(&roomuser_prefix),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut globaluserdata_prefix = vec![0xff];
|
|
|
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
|
|
|
|
|
|
|
futures.push(
|
|
|
|
self.account_data
|
|
|
|
.roomuserdataid_accountdata
|
|
|
|
.watch_prefix(&globaluserdata_prefix),
|
|
|
|
);
|
|
|
|
|
2020-07-30 14:05:08 +02:00
|
|
|
// More key changes (used when user is not joined to any rooms)
|
|
|
|
futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix));
|
|
|
|
|
|
|
|
// One time keys
|
|
|
|
futures.push(
|
|
|
|
self.users
|
|
|
|
.userid_lastonetimekeyupdate
|
|
|
|
.watch_prefix(&userid_bytes),
|
|
|
|
);
|
|
|
|
|
2021-07-14 09:07:08 +02:00
|
|
|
futures.push(Box::pin(self.globals.rotate.watch()));
|
|
|
|
|
2020-07-27 17:36:54 +02:00
|
|
|
// Wait until one of them finds something
|
|
|
|
futures.next().await;
|
|
|
|
}
|
2020-10-21 21:28:02 +02:00
|
|
|
|
|
|
|
pub async fn flush(&self) -> Result<()> {
|
2021-07-14 09:07:08 +02:00
|
|
|
let start = std::time::Instant::now();
|
|
|
|
|
|
|
|
let res = self._db.flush();
|
|
|
|
|
|
|
|
log::debug!("flush: took {:?}", start.elapsed());
|
|
|
|
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "sqlite")]
|
|
|
|
pub fn flush_wal(&self) -> Result<()> {
|
|
|
|
self._db.flush_wal()
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(feature = "sqlite")]
|
|
|
|
pub async fn start_wal_clean_task(lock: &Arc<TokioRwLock<Self>>, config: &Config) {
|
|
|
|
use tokio::{
|
|
|
|
select,
|
|
|
|
signal::unix::{signal, SignalKind},
|
|
|
|
time::{interval, timeout},
|
|
|
|
};
|
|
|
|
|
|
|
|
use std::{
|
|
|
|
sync::Weak,
|
|
|
|
time::{Duration, Instant},
|
|
|
|
};
|
|
|
|
|
|
|
|
let weak: Weak<TokioRwLock<Database>> = Arc::downgrade(&lock);
|
|
|
|
|
|
|
|
let lock_timeout = Duration::from_secs(config.sqlite_wal_clean_second_timeout as u64);
|
|
|
|
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
|
|
|
|
let do_timer = config.sqlite_wal_clean_timer;
|
|
|
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
let mut i = interval(timer_interval);
|
|
|
|
let mut s = signal(SignalKind::hangup()).unwrap();
|
|
|
|
|
|
|
|
loop {
|
|
|
|
select! {
|
|
|
|
_ = i.tick(), if do_timer => {
|
|
|
|
log::info!(target: "wal-trunc", "Timer ticked")
|
|
|
|
}
|
|
|
|
_ = s.recv() => {
|
|
|
|
log::info!(target: "wal-trunc", "Received SIGHUP")
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Some(arc) = Weak::upgrade(&weak) {
|
|
|
|
log::info!(target: "wal-trunc", "Rotating sync helpers...");
|
|
|
|
// This actually creates a very small race condition between firing this and trying to acquire the subsequent write lock.
|
|
|
|
// Though it is not a huge deal if the write lock doesn't "catch", as it'll harmlessly time out.
|
|
|
|
arc.read().await.globals.rotate.fire();
|
|
|
|
|
|
|
|
log::info!(target: "wal-trunc", "Locking...");
|
|
|
|
let guard = {
|
|
|
|
if let Ok(guard) = timeout(lock_timeout, arc.write()).await {
|
|
|
|
guard
|
|
|
|
} else {
|
|
|
|
log::info!(target: "wal-trunc", "Lock failed in timeout, canceled.");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
log::info!(target: "wal-trunc", "Locked, flushing...");
|
|
|
|
let start = Instant::now();
|
|
|
|
if let Err(e) = guard.flush_wal() {
|
|
|
|
log::error!(target: "wal-trunc", "Errored: {}", e);
|
|
|
|
} else {
|
|
|
|
log::info!(target: "wal-trunc", "Flushed in {:?}", start.elapsed());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
|
|
|
|
|
|
|
|
impl Deref for DatabaseGuard {
|
|
|
|
type Target = OwnedRwLockReadGuard<Database>;
|
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[rocket::async_trait]
|
|
|
|
impl<'r> FromRequest<'r> for DatabaseGuard {
|
|
|
|
type Error = ();
|
|
|
|
|
|
|
|
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
|
|
|
|
let db = try_outcome!(req.guard::<State<'_, Arc<TokioRwLock<Database>>>>().await);
|
|
|
|
|
|
|
|
Ok(DatabaseGuard(Arc::clone(&db).read_owned().await)).or_forward(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Into<DatabaseGuard> for OwnedRwLockReadGuard<Database> {
|
|
|
|
fn into(self) -> DatabaseGuard {
|
|
|
|
DatabaseGuard(self)
|
2020-10-21 21:28:02 +02:00
|
|
|
}
|
2020-03-30 13:46:18 +02:00
|
|
|
}
|