1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2024-12-29 10:24:34 +01:00

refactor: fix warnings

This commit is contained in:
Timo Kösters 2022-01-17 14:39:37 +01:00
parent ee8e72f7a8
commit 8c90e7adfb
No known key found for this signature in database
GPG key ID: 356E705610F626D5
4 changed files with 24 additions and 37 deletions

View file

@ -4,7 +4,6 @@ use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock};
pub struct Engine { pub struct Engine {
rocks: rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>, rocks: rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>,
cache_capacity_bytes: usize,
max_open_files: i32, max_open_files: i32,
cache: rocksdb::Cache, cache: rocksdb::Cache,
old_cfs: Vec<String>, old_cfs: Vec<String>,
@ -17,11 +16,7 @@ pub struct RocksDbEngineTree<'a> {
write_lock: RwLock<()>, write_lock: RwLock<()>,
} }
fn db_options( fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
cache_capacity_bytes: usize,
max_open_files: i32,
rocksdb_cache: &rocksdb::Cache,
) -> rocksdb::Options {
let mut block_based_options = rocksdb::BlockBasedOptions::default(); let mut block_based_options = rocksdb::BlockBasedOptions::default();
block_based_options.set_block_cache(rocksdb_cache); block_based_options.set_block_cache(rocksdb_cache);
@ -57,11 +52,7 @@ impl DatabaseEngine for Arc<Engine> {
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
let db_opts = db_options( let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
cache_capacity_bytes,
config.rocksdb_max_open_files,
&rocksdb_cache,
);
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf( let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
&db_opts, &db_opts,
@ -75,18 +66,13 @@ impl DatabaseEngine for Arc<Engine> {
cfs.iter().map(|name| { cfs.iter().map(|name| {
rocksdb::ColumnFamilyDescriptor::new( rocksdb::ColumnFamilyDescriptor::new(
name, name,
db_options( db_options(config.rocksdb_max_open_files, &rocksdb_cache),
cache_capacity_bytes,
config.rocksdb_max_open_files,
&rocksdb_cache,
),
) )
}), }),
)?; )?;
Ok(Arc::new(Engine { Ok(Arc::new(Engine {
rocks: db, rocks: db,
cache_capacity_bytes,
max_open_files: config.rocksdb_max_open_files, max_open_files: config.rocksdb_max_open_files,
cache: rocksdb_cache, cache: rocksdb_cache,
old_cfs: cfs, old_cfs: cfs,
@ -96,10 +82,9 @@ impl DatabaseEngine for Arc<Engine> {
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> { fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
if !self.old_cfs.contains(&name.to_owned()) { if !self.old_cfs.contains(&name.to_owned()) {
// Create if it didn't exist // Create if it didn't exist
let _ = self.rocks.create_cf( let _ = self
name, .rocks
&db_options(self.cache_capacity_bytes, self.max_open_files, &self.cache), .create_cf(name, &db_options(self.max_open_files, &self.cache));
);
} }
Ok(Arc::new(RocksDbEngineTree { Ok(Arc::new(RocksDbEngineTree {

View file

@ -534,7 +534,8 @@ impl Sending {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
), ),
base64::URL_SAFE_NO_PAD, base64::URL_SAFE_NO_PAD,
)).into(), ))
.into(),
}, },
) )
.await .await
@ -692,7 +693,8 @@ impl Sending {
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
), ),
base64::URL_SAFE_NO_PAD, base64::URL_SAFE_NO_PAD,
)).into(), ))
.into(),
}, },
) )
.await .await

View file

@ -1,7 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use crate::Result; use crate::Result;
use ruma::{DeviceId, UserId, identifiers::TransactionId}; use ruma::{identifiers::TransactionId, DeviceId, UserId};
use super::abstraction::Tree; use super::abstraction::Tree;

View file

@ -42,9 +42,9 @@ use ruma::{
events::{ events::{
receipt::{ReceiptEvent, ReceiptEventContent}, receipt::{ReceiptEvent, ReceiptEventContent},
room::{ room::{
server_acl::RoomServerAclEventContent,
create::RoomCreateEventContent, create::RoomCreateEventContent,
member::{MembershipState, RoomMemberEventContent}, member::{MembershipState, RoomMemberEventContent},
server_acl::RoomServerAclEventContent,
}, },
AnyEphemeralRoomEvent, EventType, AnyEphemeralRoomEvent, EventType,
}, },
@ -3491,20 +3491,17 @@ pub(crate) async fn fetch_join_signing_keys(
} }
/// Returns Ok if the acl allows the server /// Returns Ok if the acl allows the server
fn acl_check( fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> {
server_name: &ServerName,
room_id: &RoomId,
db: &Database,
) -> Result<()> {
let acl_event = match db let acl_event = match db
.rooms .rooms
.room_state_get(room_id, &EventType::RoomServerAcl, "")? { .room_state_get(room_id, &EventType::RoomServerAcl, "")?
Some(acl) => acl, {
None => return Ok(()), Some(acl) => acl,
}; None => return Ok(()),
};
let acl_event_content: RoomServerAclEventContent = match let acl_event_content: RoomServerAclEventContent =
serde_json::from_str(acl_event.content.get()) { match serde_json::from_str(acl_event.content.get()) {
Ok(content) => content, Ok(content) => content,
Err(_) => { Err(_) => {
warn!("Invalid ACL event"); warn!("Invalid ACL event");
@ -3515,7 +3512,10 @@ fn acl_check(
if acl_event_content.is_allowed(server_name) { if acl_event_content.is_allowed(server_name) {
Ok(()) Ok(())
} else { } else {
Err(Error::BadRequest(ErrorKind::Forbidden, "Server was denied by ACL")) Err(Error::BadRequest(
ErrorKind::Forbidden,
"Server was denied by ACL",
))
} }
} }