mirror of
https://gitlab.com/famedly/conduit.git
synced 2024-11-16 14:20:52 +01:00
improvement: allow rocksdb again
This commit is contained in:
parent
b25354c747
commit
1d647a1a9a
8 changed files with 664 additions and 297 deletions
727
Cargo.lock
generated
727
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -78,6 +78,7 @@ crossbeam = { version = "0.8.1", optional = true }
|
||||||
num_cpus = "1.13.0"
|
num_cpus = "1.13.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
|
||||||
|
rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true }
|
||||||
thread_local = "1.1.3"
|
thread_local = "1.1.3"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.11.0"
|
hmac = "0.11.0"
|
||||||
|
@ -87,7 +88,8 @@ sha-1 = "0.9.8"
|
||||||
default = ["conduit_bin", "backend_sqlite"]
|
default = ["conduit_bin", "backend_sqlite"]
|
||||||
backend_sled = ["sled"]
|
backend_sled = ["sled"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam", "parking_lot"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
|
backend_rocksdb = ["rocksdb"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"]
|
||||||
conduit_bin = [] # TODO: add rocket to this when it is optional
|
conduit_bin = [] # TODO: add rocket to this when it is optional
|
||||||
|
|
||||||
|
|
|
@ -154,6 +154,9 @@ pub type Engine = abstraction::sqlite::Engine;
|
||||||
#[cfg(feature = "heed")]
|
#[cfg(feature = "heed")]
|
||||||
pub type Engine = abstraction::heed::Engine;
|
pub type Engine = abstraction::heed::Engine;
|
||||||
|
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub type Engine = abstraction::rocksdb::Engine;
|
||||||
|
|
||||||
pub struct Database {
|
pub struct Database {
|
||||||
_db: Arc<Engine>,
|
_db: Arc<Engine>,
|
||||||
pub globals: globals::Globals,
|
pub globals: globals::Globals,
|
||||||
|
@ -314,10 +317,10 @@ impl Database {
|
||||||
.expect("pdu cache capacity fits into usize"),
|
.expect("pdu cache capacity fits into usize"),
|
||||||
)),
|
)),
|
||||||
auth_chain_cache: Mutex::new(LruCache::new(1_000_000)),
|
auth_chain_cache: Mutex::new(LruCache::new(1_000_000)),
|
||||||
shorteventid_cache: Mutex::new(LruCache::new(1_000_000)),
|
shorteventid_cache: Mutex::new(LruCache::new(100_000_000)),
|
||||||
eventidshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
eventidshort_cache: Mutex::new(LruCache::new(100_000_000)),
|
||||||
shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)),
|
shortstatekey_cache: Mutex::new(LruCache::new(100_000_000)),
|
||||||
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
statekeyshort_cache: Mutex::new(LruCache::new(100_000_000)),
|
||||||
our_real_users_cache: RwLock::new(HashMap::new()),
|
our_real_users_cache: RwLock::new(HashMap::new()),
|
||||||
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
||||||
stateinfo_cache: Mutex::new(LruCache::new(1000)),
|
stateinfo_cache: Mutex::new(LruCache::new(1000)),
|
||||||
|
|
|
@ -12,7 +12,10 @@ pub mod sqlite;
|
||||||
#[cfg(feature = "heed")]
|
#[cfg(feature = "heed")]
|
||||||
pub mod heed;
|
pub mod heed;
|
||||||
|
|
||||||
#[cfg(any(feature = "sqlite", feature = "heed"))]
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub mod rocksdb;
|
||||||
|
|
||||||
|
#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))]
|
||||||
pub mod watchers;
|
pub mod watchers;
|
||||||
|
|
||||||
pub trait DatabaseEngine: Sized {
|
pub trait DatabaseEngine: Sized {
|
||||||
|
|
183
src/database/abstraction/rocksdb.rs
Normal file
183
src/database/abstraction/rocksdb.rs
Normal file
|
@ -0,0 +1,183 @@
|
||||||
|
use super::super::Config;
|
||||||
|
use crate::{utils, Result};
|
||||||
|
|
||||||
|
use std::{future::Future, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
|
use super::{DatabaseEngine, Tree};
|
||||||
|
|
||||||
|
use std::{collections::HashMap, sync::RwLock};
|
||||||
|
|
||||||
|
pub struct Engine {
|
||||||
|
rocks: rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>,
|
||||||
|
old_cfs: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RocksDbEngineTree<'a> {
|
||||||
|
db: Arc<Engine>,
|
||||||
|
name: &'a str,
|
||||||
|
watchers: Watchers,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseEngine for Engine {
|
||||||
|
fn open(config: &Config) -> Result<Arc<Self>> {
|
||||||
|
let mut db_opts = rocksdb::Options::default();
|
||||||
|
db_opts.create_if_missing(true);
|
||||||
|
db_opts.set_max_open_files(16);
|
||||||
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy);
|
||||||
|
db_opts.set_target_file_size_base(256 << 20);
|
||||||
|
db_opts.set_write_buffer_size(256 << 20);
|
||||||
|
|
||||||
|
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||||
|
block_based_options.set_block_size(512 << 10);
|
||||||
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
|
|
||||||
|
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
)
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
cfs.iter().map(|name| {
|
||||||
|
let mut options = rocksdb::Options::default();
|
||||||
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
|
options.set_prefix_extractor(prefix_extractor);
|
||||||
|
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
|
||||||
|
|
||||||
|
rocksdb::ColumnFamilyDescriptor::new(name, options)
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(Arc::new(Engine {
|
||||||
|
rocks: db,
|
||||||
|
old_cfs: cfs,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>> {
|
||||||
|
if !self.old_cfs.contains(&name.to_owned()) {
|
||||||
|
// Create if it didn't exist
|
||||||
|
let mut options = rocksdb::Options::default();
|
||||||
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
|
options.set_prefix_extractor(prefix_extractor);
|
||||||
|
options.set_merge_operator_associative("increment", utils::increment_rocksdb);
|
||||||
|
|
||||||
|
let _ = self.rocks.create_cf(name, &options);
|
||||||
|
println!("created cf");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(RocksDbEngineTree {
|
||||||
|
name,
|
||||||
|
db: Arc::clone(self),
|
||||||
|
watchers: Watchers::default(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(self: &Arc<Self>) -> Result<()> {
|
||||||
|
// TODO?
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RocksDbEngineTree<'_> {
|
||||||
|
fn cf(&self) -> rocksdb::BoundColumnFamily<'_> {
|
||||||
|
self.db.rocks.cf_handle(self.name).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tree for RocksDbEngineTree<'_> {
|
||||||
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
|
Ok(self.db.rocks.get_cf(self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
|
self.db.rocks.put_cf(self.cf(), key, value)?;
|
||||||
|
self.watchers.wake(key);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
|
for (key, value) in iter {
|
||||||
|
self.db.rocks.put_cf(self.cf(), key, value)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
|
Ok(self.db.rocks.delete_cf(self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(self.cf(), rocksdb::IteratorMode::Start)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from<'a>(
|
||||||
|
&'a self,
|
||||||
|
from: &[u8],
|
||||||
|
backwards: bool,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(
|
||||||
|
from,
|
||||||
|
if backwards {
|
||||||
|
rocksdb::Direction::Reverse
|
||||||
|
} else {
|
||||||
|
rocksdb::Direction::Forward
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
// TODO: make atomic
|
||||||
|
let old = self.db.rocks.get_cf(self.cf(), &key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(self.cf(), key, &new)?;
|
||||||
|
Ok(new)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
|
for key in iter {
|
||||||
|
let old = self.db.rocks.get_cf(self.cf(), &key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(self.cf(), key, new)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_prefix<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: Vec<u8>,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
|
)
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
self.watchers.watch(prefix)
|
||||||
|
}
|
||||||
|
}
|
|
@ -132,7 +132,7 @@ type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
||||||
impl SqliteTable {
|
impl SqliteTable {
|
||||||
#[tracing::instrument(skip(self, guard, key))]
|
#[tracing::instrument(skip(self, guard, key))]
|
||||||
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
//dbg!(&self.name);
|
dbg!(&self.name);
|
||||||
Ok(guard
|
Ok(guard
|
||||||
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
||||||
.query_row([key], |row| row.get(0))
|
.query_row([key], |row| row.get(0))
|
||||||
|
@ -141,7 +141,7 @@ impl SqliteTable {
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, guard, key, value))]
|
#[tracing::instrument(skip(self, guard, key, value))]
|
||||||
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
//dbg!(&self.name);
|
dbg!(&self.name);
|
||||||
guard.execute(
|
guard.execute(
|
||||||
format!(
|
format!(
|
||||||
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
||||||
|
@ -168,14 +168,14 @@ impl SqliteTable {
|
||||||
|
|
||||||
let statement_ref = NonAliasingBox(statement);
|
let statement_ref = NonAliasingBox(statement);
|
||||||
|
|
||||||
//let name = self.name.clone();
|
let name = self.name.clone();
|
||||||
|
|
||||||
let iterator = Box::new(
|
let iterator = Box::new(
|
||||||
statement
|
statement
|
||||||
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| {
|
||||||
//dbg!(&name);
|
dbg!(&name);
|
||||||
r.unwrap()
|
r.unwrap()
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
@ -263,7 +263,7 @@ impl Tree for SqliteTable {
|
||||||
let guard = self.engine.read_lock_iterator();
|
let guard = self.engine.read_lock_iterator();
|
||||||
let from = from.to_vec(); // TODO change interface?
|
let from = from.to_vec(); // TODO change interface?
|
||||||
|
|
||||||
//let name = self.name.clone();
|
let name = self.name.clone();
|
||||||
|
|
||||||
if backwards {
|
if backwards {
|
||||||
let statement = Box::leak(Box::new(
|
let statement = Box::leak(Box::new(
|
||||||
|
@ -282,7 +282,7 @@ impl Tree for SqliteTable {
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| {
|
||||||
//dbg!(&name);
|
dbg!(&name);
|
||||||
r.unwrap()
|
r.unwrap()
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
@ -307,7 +307,7 @@ impl Tree for SqliteTable {
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| {
|
||||||
//dbg!(&name);
|
dbg!(&name);
|
||||||
r.unwrap()
|
r.unwrap()
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
|
|
|
@ -39,6 +39,12 @@ pub enum Error {
|
||||||
#[cfg(feature = "heed")]
|
#[cfg(feature = "heed")]
|
||||||
#[error("There was a problem with the connection to the heed database: {error}")]
|
#[error("There was a problem with the connection to the heed database: {error}")]
|
||||||
HeedError { error: String },
|
HeedError { error: String },
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
#[error("There was a problem with the connection to the rocksdb database: {source}")]
|
||||||
|
RocksDbError {
|
||||||
|
#[from]
|
||||||
|
source: rocksdb::Error,
|
||||||
|
},
|
||||||
#[error("Could not generate an image.")]
|
#[error("Could not generate an image.")]
|
||||||
ImageError {
|
ImageError {
|
||||||
#[from]
|
#[from]
|
||||||
|
|
11
src/utils.rs
11
src/utils.rs
|
@ -29,6 +29,17 @@ pub fn increment(old: Option<&[u8]>) -> Option<Vec<u8>> {
|
||||||
Some(number.to_be_bytes().to_vec())
|
Some(number.to_be_bytes().to_vec())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub fn increment_rocksdb(
|
||||||
|
_new_key: &[u8],
|
||||||
|
old: Option<&[u8]>,
|
||||||
|
_operands: &mut rocksdb::MergeOperands,
|
||||||
|
) -> Option<Vec<u8>> {
|
||||||
|
dbg!(_new_key);
|
||||||
|
dbg!(old);
|
||||||
|
increment(old)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn generate_keypair() -> Vec<u8> {
|
pub fn generate_keypair() -> Vec<u8> {
|
||||||
let mut value = random_string(8).as_bytes().to_vec();
|
let mut value = random_string(8).as_bytes().to_vec();
|
||||||
value.push(0xff);
|
value.push(0xff);
|
||||||
|
|
Loading…
Reference in a new issue