2020-03-25 19:06:29 +01:00
|
|
|
// The Construct
|
2018-01-26 21:26:09 +01:00
|
|
|
//
|
2020-03-25 19:06:29 +01:00
|
|
|
// Copyright (C) The Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2020 Jason Volk <jason@zemos.net>
|
2018-01-26 21:26:09 +01:00
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
|
|
|
|
2018-02-08 22:23:01 +01:00
|
|
|
/// Residence of the events database instance pointer.
|
|
|
|
decltype(ircd::m::dbs::events)
|
2020-03-25 19:06:29 +01:00
|
|
|
ircd::m::dbs::events;
|
2019-05-12 02:48:35 +02:00
|
|
|
|
2018-09-15 10:12:47 +02:00
|
|
|
/// Coarse variable for enabling the uncompressed cache on the events database;
|
|
|
|
/// note this conf item is only effective by setting an environmental variable
|
|
|
|
/// before daemon startup. It has no effect in any other regard.
|
2020-03-25 19:06:29 +01:00
|
|
|
decltype(ircd::m::dbs::cache_enable)
|
|
|
|
ircd::m::dbs::cache_enable
|
2018-09-15 10:12:47 +02:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
{ "name", "ircd.m.dbs.cache.enable" },
|
|
|
|
{ "default", true },
|
2018-09-15 10:12:47 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Coarse variable for enabling the compressed cache on the events database;
|
|
|
|
/// note this conf item is only effective by setting an environmental variable
|
|
|
|
/// before daemon startup. It has no effect in any other regard.
|
2020-03-25 19:06:29 +01:00
|
|
|
decltype(ircd::m::dbs::cache_comp_enable)
|
|
|
|
ircd::m::dbs::cache_comp_enable
|
|
|
|
{
|
|
|
|
{ "name", "ircd.m.dbs.cache.comp.enable" },
|
|
|
|
{ "default", false },
|
|
|
|
};
|
|
|
|
|
2020-10-31 15:02:41 +01:00
|
|
|
/// Coarse toggle for the prefetch phase before the transaction building
|
|
|
|
/// handlers (indexers) are called. If this is false, prefetching will be
|
|
|
|
/// disabled; otherwise the write_opts passed to write() control whether
|
|
|
|
/// prefetching is enabled.
|
|
|
|
decltype(ircd::m::dbs::prefetch_enable)
|
|
|
|
ircd::m::dbs::prefetch_enable
|
|
|
|
{
|
|
|
|
{ "name", "ircd.m.dbs.prefetch.enable" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
/// The size of the memory buffer for new writes to the DB (backed by the WAL
|
|
|
|
/// on disk). When this buffer is full it is flushed to sorted SST files on
|
|
|
|
/// disk. If this is 0, a per-column value can be used; otherwise this value
|
|
|
|
/// takes precedence as a total value for all columns. (db_write_buffer_size)
|
|
|
|
decltype(ircd::m::dbs::mem_write_buffer_size)
|
|
|
|
ircd::m::dbs::mem_write_buffer_size
|
2018-09-15 10:12:47 +02:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
{ "name", "ircd.m.dbs.mem.write_buffer_size" },
|
|
|
|
{ "default", 0L },
|
2018-09-15 10:12:47 +02:00
|
|
|
};
|
|
|
|
|
2018-12-17 21:47:39 +01:00
|
|
|
/// Value determines the size of writes when creating SST files (i.e during
|
|
|
|
/// compaction). Consider that write calls are yield-points for IRCd and the
|
|
|
|
/// time spent filling the write buffer between calls may hog the CPU doing
|
|
|
|
/// compression during that time etc. (writable_file_max_buffer_size)
|
2020-03-25 19:06:29 +01:00
|
|
|
decltype(ircd::m::dbs::sst_write_buffer_size)
|
|
|
|
ircd::m::dbs::sst_write_buffer_size
|
2018-12-17 21:47:39 +01:00
|
|
|
{
|
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
{ "name", "ircd.m.dbs.sst.write_buffer_size" },
|
|
|
|
{ "default", long(1_MiB) },
|
2018-12-17 21:47:39 +01:00
|
|
|
}, []
|
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
static const string_view key{"writable_file_max_buffer_size"};
|
|
|
|
const size_t &value{sst_write_buffer_size};
|
2020-07-28 06:57:35 +02:00
|
|
|
if(events && !events->slave)
|
2018-12-17 21:47:39 +01:00
|
|
|
db::setopt(*events, key, lex_cast(value));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-02-08 22:23:01 +01:00
|
|
|
//
|
|
|
|
// init
|
|
|
|
//
|
2018-01-28 17:45:09 +01:00
|
|
|
|
2018-02-08 22:23:01 +01:00
|
|
|
/// Initializes the m::dbs subsystem; sets up the events database. Held/called
|
|
|
|
/// by m::init. Most of the extern variables in m::dbs are not ready until
|
|
|
|
/// this call completes.
|
2019-06-07 02:34:33 +02:00
|
|
|
///
|
|
|
|
/// We also update the fs::basepath for the database directory to include our
|
|
|
|
/// servername in the path component. The fs::base::DB setting was generated
|
|
|
|
/// during the build and install process, and is unaware of our servername
|
|
|
|
/// at runtime. This change deconflicts multiple instances of IRCd running in
|
|
|
|
/// the same installation prefix using different servernames (i.e clustering
|
|
|
|
/// on the same machine).
|
|
|
|
///
|
|
|
|
ircd::m::dbs::init::init(const string_view &servername,
|
|
|
|
std::string dbopts)
|
|
|
|
:our_dbpath
|
2018-02-08 22:23:01 +01:00
|
|
|
{
|
2020-04-19 09:26:54 +02:00
|
|
|
fs::path_string(fs::path_views
|
2019-06-07 02:34:33 +02:00
|
|
|
{
|
2020-04-19 09:26:54 +02:00
|
|
|
fs::base::db, servername
|
2019-06-07 02:34:33 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
,their_dbpath
|
|
|
|
{
|
2020-04-19 09:26:54 +02:00
|
|
|
fs::base::db
|
2019-06-07 02:34:33 +02:00
|
|
|
}
|
|
|
|
{
|
2020-04-19 09:26:54 +02:00
|
|
|
// NOTE that this is a global change that leaks outside of ircd::m. The
|
|
|
|
// database directory for the entire process is being changed here.
|
|
|
|
fs::base::db.set(our_dbpath);
|
|
|
|
|
2019-06-07 02:34:33 +02:00
|
|
|
// Recall the db directory init manually with the now-updated basepath
|
|
|
|
db::init::directory();
|
|
|
|
|
2018-02-08 22:23:01 +01:00
|
|
|
// Open the events database
|
2019-06-07 02:34:33 +02:00
|
|
|
static const string_view &dbname{"events"};
|
2018-04-09 21:56:43 +02:00
|
|
|
events = std::make_shared<database>(dbname, std::move(dbopts), desc::events);
|
2018-02-08 22:23:01 +01:00
|
|
|
|
|
|
|
// Cache the columns for the event tuple in order for constant time lookup
|
|
|
|
assert(event_columns == event::size());
|
|
|
|
std::array<string_view, event::size()> keys; //TODO: why did this happen?
|
|
|
|
_key_transform(event{}, begin(keys), end(keys)); //TODO: how did this happen?
|
2019-01-16 22:47:45 +01:00
|
|
|
|
|
|
|
// Construct global convenience references for the event property columns.
|
2018-02-08 22:23:01 +01:00
|
|
|
for(size_t i(0); i < keys.size(); ++i)
|
2019-02-13 02:50:44 +01:00
|
|
|
event_column.at(i) = db::column
|
2018-02-08 22:23:01 +01:00
|
|
|
{
|
2019-02-13 02:50:44 +01:00
|
|
|
*events, keys.at(i), std::nothrow
|
2018-02-08 22:23:01 +01:00
|
|
|
};
|
2018-01-28 17:45:09 +01:00
|
|
|
|
2019-01-16 22:47:45 +01:00
|
|
|
// Construct global convenience references for the metadata columns
|
2020-03-25 19:06:29 +01:00
|
|
|
event_idx = db::column{*events, desc::event_idx.name};
|
|
|
|
event_json = db::column{*events, desc::event_json.name};
|
|
|
|
event_refs = db::domain{*events, desc::event_refs.name};
|
|
|
|
event_horizon = db::domain{*events, desc::event_horizon.name};
|
|
|
|
event_sender = db::domain{*events, desc::event_sender.name};
|
|
|
|
event_type = db::domain{*events, desc::event_type.name};
|
|
|
|
event_state = db::domain{*events, desc::event_state.name};
|
|
|
|
room_head = db::domain{*events, desc::room_head.name};
|
|
|
|
room_events = db::domain{*events, desc::room_events.name};
|
|
|
|
room_type = db::domain{*events, desc::room_type.name};
|
|
|
|
room_joined = db::domain{*events, desc::room_joined.name};
|
|
|
|
room_state = db::domain{*events, desc::room_state.name};
|
|
|
|
room_state_space = db::domain{*events, desc::room_state_space.name};
|
2018-02-08 22:23:01 +01:00
|
|
|
}
|
2018-01-28 17:45:09 +01:00
|
|
|
|
2018-02-08 22:23:01 +01:00
|
|
|
/// Shuts down the m::dbs subsystem; closes the events database. The extern
|
|
|
|
/// variables in m::dbs will no longer be functioning after this call.
|
|
|
|
ircd::m::dbs::init::~init()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
// Unref DB (should close)
|
|
|
|
events = {};
|
2019-06-07 02:34:33 +02:00
|
|
|
|
|
|
|
// restore the fs::base::DB path the way we found it.
|
2020-04-19 09:26:54 +02:00
|
|
|
fs::base::db.set(their_dbpath);
|
2018-02-08 22:23:01 +01:00
|
|
|
}
|
2018-01-28 17:45:09 +01:00
|
|
|
|
2020-11-08 08:30:45 +01:00
|
|
|
/// Cancels all background work by the events database. This will make the
|
|
|
|
/// database shutdown more fluid, without waiting for large compactions.
|
|
|
|
static const ircd::run::changed
|
|
|
|
ircd_m_dbs_handle_quit
|
|
|
|
{
|
|
|
|
ircd::run::level::QUIT, []
|
|
|
|
{
|
|
|
|
if(ircd::m::dbs::events)
|
|
|
|
ircd::db::bgcancel(*ircd::m::dbs::events, false); // non-blocking
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-14 22:11:37 +01:00
|
|
|
//
|
|
|
|
// write_opts
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::dbs::write_opts::event_refs_all)
|
2019-05-07 22:40:23 +02:00
|
|
|
ircd::m::dbs::write_opts::event_refs_all{[]
|
|
|
|
{
|
|
|
|
char full[event_refs_all.size()];
|
2019-02-14 22:11:37 +01:00
|
|
|
memset(full, '1', sizeof(full));
|
2019-05-07 22:40:23 +02:00
|
|
|
return decltype(event_refs_all)
|
|
|
|
{
|
|
|
|
full, sizeof(full)
|
|
|
|
};
|
2019-02-14 22:11:37 +01:00
|
|
|
}()};
|
|
|
|
|
2019-05-07 23:37:06 +02:00
|
|
|
decltype(ircd::m::dbs::write_opts::appendix_all)
|
|
|
|
ircd::m::dbs::write_opts::appendix_all{[]
|
|
|
|
{
|
|
|
|
char full[appendix_all.size()];
|
|
|
|
memset(full, '1', sizeof(full));
|
|
|
|
return decltype(appendix_all)
|
|
|
|
{
|
|
|
|
full, sizeof(full)
|
|
|
|
};
|
|
|
|
}()};
|
|
|
|
|
2018-05-06 03:54:02 +02:00
|
|
|
//
|
|
|
|
// Basic write suite
|
|
|
|
//
|
2018-01-28 17:45:09 +01:00
|
|
|
|
2019-05-11 23:30:15 +02:00
|
|
|
namespace ircd::m::dbs
|
|
|
|
{
|
2020-10-30 07:03:32 +01:00
|
|
|
static size_t _prefetch(db::txn &, const event &, const write_opts &);
|
|
|
|
static size_t _index(db::txn &, const event &, const write_opts &);
|
|
|
|
static size_t blacklist(db::txn &txn, const event::id &, const write_opts &);
|
2019-05-11 23:30:15 +02:00
|
|
|
}
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
2019-05-11 23:30:15 +02:00
|
|
|
ircd::m::dbs::write(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
2019-05-13 21:46:38 +02:00
|
|
|
try
|
2019-05-11 23:30:15 +02:00
|
|
|
{
|
|
|
|
if(opts.event_idx == 0 && opts.blacklist)
|
2019-07-06 07:58:12 +02:00
|
|
|
return blacklist(txn, event.event_id, opts);
|
2019-05-11 23:30:15 +02:00
|
|
|
|
|
|
|
if(unlikely(opts.event_idx == 0))
|
|
|
|
throw panic
|
|
|
|
{
|
|
|
|
"Cannot write to database: no index specified for event."
|
|
|
|
};
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t ret(0);
|
2020-10-31 15:02:41 +01:00
|
|
|
if(prefetch_enable && opts.prefetch)
|
2020-10-30 07:03:32 +01:00
|
|
|
ret = _prefetch(txn, event, opts);
|
|
|
|
|
|
|
|
if(likely(opts.index))
|
|
|
|
ret = _index(txn, event, opts);
|
|
|
|
|
|
|
|
return ret;
|
2019-05-11 23:30:15 +02:00
|
|
|
}
|
2019-05-13 21:46:38 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Event %s txn building error :%s",
|
2019-07-06 07:58:12 +02:00
|
|
|
string_view{event.event_id},
|
2019-05-13 21:46:38 +02:00
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2019-05-11 23:30:15 +02:00
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
2018-05-05 00:14:19 +02:00
|
|
|
ircd::m::dbs::blacklist(db::txn &txn,
|
|
|
|
const event::id &event_id,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
2018-11-03 03:11:52 +01:00
|
|
|
// An entry in the event_idx column with a value 0 is blacklisting
|
|
|
|
// because 0 is not a valid event_idx. Thus a value here can only
|
|
|
|
// have the value zero.
|
|
|
|
assert(opts.event_idx == 0);
|
|
|
|
assert(!event_id.empty());
|
|
|
|
|
|
|
|
static const m::event::idx &zero_idx{0UL};
|
|
|
|
static const byte_view<string_view> zero_value
|
|
|
|
{
|
|
|
|
zero_idx
|
|
|
|
};
|
|
|
|
|
2018-05-05 00:14:19 +02:00
|
|
|
db::txn::append
|
|
|
|
{
|
2018-11-03 03:11:52 +01:00
|
|
|
txn, event_idx,
|
2018-05-05 00:14:19 +02:00
|
|
|
{
|
|
|
|
opts.op,
|
|
|
|
string_view{event_id},
|
2018-11-03 03:11:52 +01:00
|
|
|
zero_value
|
2018-05-05 00:14:19 +02:00
|
|
|
}
|
|
|
|
};
|
2020-10-30 07:03:32 +01:00
|
|
|
|
|
|
|
return true;
|
2018-05-05 00:14:19 +02:00
|
|
|
}
|
|
|
|
|
2018-05-06 03:54:02 +02:00
|
|
|
//
|
|
|
|
// Internal interface
|
|
|
|
//
|
|
|
|
|
2019-05-10 04:03:30 +02:00
|
|
|
namespace ircd::m::dbs
|
|
|
|
{
|
2020-10-30 07:03:32 +01:00
|
|
|
static size_t _prefetch_room_redact(db::txn &, const event &, const write_opts &);
|
2020-03-25 19:06:29 +01:00
|
|
|
static void _index_room_redact(db::txn &, const event &, const write_opts &);
|
2020-10-30 07:03:32 +01:00
|
|
|
|
|
|
|
static size_t _prefetch_room(db::txn &, const event &, const write_opts &);
|
2020-03-25 19:06:29 +01:00
|
|
|
static void _index_room(db::txn &, const event &, const write_opts &);
|
2020-10-30 07:03:32 +01:00
|
|
|
|
|
|
|
static size_t _prefetch_event(db::txn &, const event &, const write_opts &);
|
2020-03-25 19:06:29 +01:00
|
|
|
static void _index_event(db::txn &, const event &, const write_opts &);
|
|
|
|
}
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
2020-03-25 19:06:29 +01:00
|
|
|
ircd::m::dbs::_index(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t ret(0);
|
2020-03-25 19:06:29 +01:00
|
|
|
_index_event(txn, event, opts);
|
|
|
|
|
|
|
|
if(json::get<"room_id"_>(event))
|
|
|
|
_index_room(txn, event, opts);
|
2020-10-30 07:03:32 +01:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::m::dbs::_prefetch(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
|
|
|
size_t ret(0);
|
|
|
|
ret += _prefetch_event(txn, event, opts);
|
|
|
|
|
|
|
|
if(json::get<"room_id"_>(event))
|
|
|
|
ret += _prefetch_room(txn, event, opts);
|
|
|
|
|
|
|
|
return ret;
|
2019-05-10 04:03:30 +02:00
|
|
|
}
|
|
|
|
|
2018-05-18 08:47:05 +02:00
|
|
|
void
|
2019-05-07 23:37:06 +02:00
|
|
|
ircd::m::dbs::_index_event(db::txn &txn,
|
2019-01-24 20:26:31 +01:00
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
2019-01-23 23:39:04 +01:00
|
|
|
{
|
2019-05-07 23:37:06 +02:00
|
|
|
if(opts.appendix.test(appendix::EVENT_ID))
|
|
|
|
_index_event_id(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_COLS))
|
|
|
|
_index_event_cols(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_JSON))
|
|
|
|
_index_event_json(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_SENDER))
|
|
|
|
_index_event_sender(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_TYPE))
|
|
|
|
_index_event_type(txn, event, opts);
|
2019-06-05 05:43:23 +02:00
|
|
|
|
2019-10-06 07:06:10 +02:00
|
|
|
if(opts.appendix.test(appendix::EVENT_STATE))
|
|
|
|
_index_event_state(txn, event, opts);
|
|
|
|
|
2019-06-05 05:43:23 +02:00
|
|
|
if(opts.appendix.test(appendix::EVENT_REFS) && opts.event_refs.any())
|
|
|
|
_index_event_refs(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_HORIZON_RESOLVE) && opts.horizon_resolve.any())
|
|
|
|
_index_event_horizon_resolve(txn, event, opts);
|
2019-05-07 23:37:06 +02:00
|
|
|
}
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
|
|
|
ircd::m::dbs::_prefetch_event(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
|
|
|
size_t ret(0);
|
|
|
|
if(opts.appendix.test(appendix::EVENT_ID))
|
|
|
|
;//ret += _prefetch_event_id(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_COLS))
|
|
|
|
;//ret += _prefetch_event_cols(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_JSON))
|
|
|
|
;//ret += _prefetch_event_json(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_SENDER))
|
|
|
|
;//ret += _prefetch_event_sender(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_TYPE))
|
|
|
|
;//ret += _prefetch_event_type(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_STATE))
|
|
|
|
;//ret += _prefetch_event_state(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_REFS) && opts.event_refs.any())
|
|
|
|
ret += _prefetch_event_refs(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::EVENT_HORIZON_RESOLVE) && opts.horizon_resolve.any())
|
|
|
|
ret += _prefetch_event_horizon_resolve(txn, event, opts);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-05-07 23:37:06 +02:00
|
|
|
void
|
2020-03-25 19:06:29 +01:00
|
|
|
ircd::m::dbs::_index_room(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
2019-05-07 23:37:06 +02:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
assert(!empty(json::get<"room_id"_>(event)));
|
2019-05-07 23:37:06 +02:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_EVENTS))
|
|
|
|
_index_room_events(txn, event, opts);
|
2019-07-10 09:01:25 +02:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_TYPE))
|
|
|
|
_index_room_type(txn, event, opts);
|
2019-07-10 09:01:25 +02:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_HEAD))
|
|
|
|
_index_room_head(txn, event, opts);
|
2019-05-07 23:37:06 +02:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_HEAD_RESOLVE))
|
|
|
|
_index_room_head_resolve(txn, event, opts);
|
2019-01-24 00:05:12 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(defined(json::get<"state_key"_>(event)))
|
2019-01-24 00:05:12 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_STATE))
|
|
|
|
_index_room_state(txn, event, opts);
|
2019-01-24 00:05:12 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_STATE_SPACE))
|
|
|
|
_index_room_state_space(txn, event, opts);
|
2019-01-24 00:05:12 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_JOINED) && at<"type"_>(event) == "m.room.member")
|
|
|
|
_index_room_joined(txn, event, opts);
|
|
|
|
}
|
2019-01-24 00:05:12 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::ROOM_REDACT) && json::get<"type"_>(event) == "m.room.redaction")
|
|
|
|
_index_room_redact(txn, event, opts);
|
2019-01-23 23:39:04 +01:00
|
|
|
}
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
|
|
|
ircd::m::dbs::_prefetch_room(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
|
|
|
assert(!empty(json::get<"room_id"_>(event)));
|
|
|
|
|
|
|
|
size_t ret(0);
|
|
|
|
if(opts.appendix.test(appendix::ROOM_EVENTS))
|
|
|
|
;//ret += _prefetch_room_events(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_TYPE))
|
|
|
|
;//ret += _prefetch_room_type(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_HEAD))
|
|
|
|
;//ret += _prefetch_room_head(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_HEAD_RESOLVE))
|
|
|
|
;//ret += _prefetch_room_head_resolve(txn, event, opts);
|
|
|
|
|
|
|
|
if(defined(json::get<"state_key"_>(event)))
|
|
|
|
{
|
|
|
|
if(opts.appendix.test(appendix::ROOM_STATE))
|
|
|
|
;//ret += _prefetch_room_state(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_STATE_SPACE))
|
|
|
|
;//ret += _prefetch_room_state_space(txn, event, opts);
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_JOINED) && at<"type"_>(event) == "m.room.member")
|
|
|
|
;//ret += _prefetch_room_joined(txn, event, opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
if(opts.appendix.test(appendix::ROOM_REDACT) && json::get<"type"_>(event) == "m.room.redaction")
|
|
|
|
ret += _prefetch_room_redact(txn, event, opts);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
// NOTE: QUERY
|
2019-01-23 23:39:04 +01:00
|
|
|
void
|
2020-03-25 19:06:29 +01:00
|
|
|
ircd::m::dbs::_index_room_redact(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
2019-01-15 22:01:29 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
assert(opts.appendix.test(appendix::ROOM_REDACT));
|
|
|
|
assert(json::get<"type"_>(event) == "m.room.redaction");
|
2019-01-15 22:01:29 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
const auto &target_id
|
2019-01-15 22:01:29 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
at<"redacts"_>(event)
|
2019-01-15 22:01:29 +01:00
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
const m::event::idx target_idx
|
2019-01-15 22:01:29 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
find_event_idx(target_id, opts)
|
2019-01-15 22:01:29 +01:00
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(unlikely(!target_idx))
|
2019-02-06 04:04:13 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
log::dwarning
|
2019-02-06 04:04:13 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
"Redaction from '%s' missing redaction target '%s'",
|
|
|
|
string_view{event.event_id},
|
|
|
|
target_id
|
2019-02-06 04:04:13 +01:00
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(opts.appendix.test(appendix::EVENT_HORIZON))
|
|
|
|
_index_event_horizon(txn, event, opts, target_id);
|
2019-08-18 08:19:05 +02:00
|
|
|
|
2019-02-14 22:11:37 +01:00
|
|
|
return;
|
2019-02-08 11:49:13 +01:00
|
|
|
}
|
2019-03-11 21:38:54 +01:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
char state_key_buf[event::STATE_KEY_MAX_SIZE];
|
|
|
|
const string_view &state_key
|
2019-03-11 21:38:54 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
m::get(std::nothrow, target_idx, "state_key", state_key_buf)
|
2019-03-11 21:38:54 +01:00
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
if(!state_key)
|
2019-02-14 22:11:37 +01:00
|
|
|
return;
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
char type_buf[event::TYPE_MAX_SIZE];
|
|
|
|
const string_view &type
|
2019-02-14 22:11:37 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
m::get(std::nothrow, target_idx, "type", type_buf)
|
2019-02-14 22:11:37 +01:00
|
|
|
};
|
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
assert(!empty(type));
|
|
|
|
const ctx::critical_assertion ca;
|
|
|
|
thread_local char buf[ROOM_STATE_SPACE_KEY_MAX_SIZE];
|
2019-02-14 22:11:37 +01:00
|
|
|
const string_view &key
|
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
room_state_key(buf, at<"room_id"_>(event), type, state_key)
|
2019-02-14 22:11:37 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
db::txn::append
|
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
txn, room_state,
|
2019-02-14 22:11:37 +01:00
|
|
|
{
|
2020-03-25 19:06:29 +01:00
|
|
|
db::op::DELETE,
|
|
|
|
key,
|
2019-02-14 22:11:37 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-10-30 07:03:32 +01:00
|
|
|
size_t
|
|
|
|
ircd::m::dbs::_prefetch_room_redact(db::txn &txn,
|
|
|
|
const event &event,
|
|
|
|
const write_opts &opts)
|
|
|
|
{
|
|
|
|
assert(opts.appendix.test(appendix::ROOM_REDACT));
|
|
|
|
assert(json::get<"type"_>(event) == "m.room.redaction");
|
|
|
|
|
|
|
|
const auto &target_id
|
|
|
|
{
|
|
|
|
at<"redacts"_>(event)
|
|
|
|
};
|
|
|
|
|
|
|
|
// If the prefetch was launched we can't do anything more here.
|
|
|
|
if(prefetch_event_idx(target_id, opts))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// If the result is cached we can peek at it for more prefetches.
|
|
|
|
const m::event::idx target_idx
|
|
|
|
{
|
|
|
|
find_event_idx(target_id, opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(unlikely(!target_idx))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
size_t ret(0);
|
|
|
|
ret += m::prefetch(target_idx, "state_key");
|
|
|
|
ret += m::prefetch(target_idx, "type");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-15 09:27:05 +02:00
|
|
|
// NOTE: QUERY
|
2020-09-17 08:31:19 +02:00
|
|
|
size_t
|
|
|
|
ircd::m::dbs::find_event_idx(const vector_view<event::idx> &idx,
|
|
|
|
const vector_view<const event::id> &event_id,
|
2020-03-25 19:06:29 +01:00
|
|
|
const write_opts &wopts)
|
2019-06-22 01:33:45 +02:00
|
|
|
{
|
2020-09-17 08:31:19 +02:00
|
|
|
const size_t num
|
|
|
|
{
|
|
|
|
std::min(idx.size(), event_id.size())
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t ret(0);
|
2020-03-25 19:06:29 +01:00
|
|
|
if(wopts.interpose)
|
2020-09-17 08:31:19 +02:00
|
|
|
for(size_t i(0); i < num; ++i)
|
|
|
|
{
|
|
|
|
idx[i] = wopts.interpose->val(db::op::SET, "_event_idx", event_id[i], 0UL);
|
2020-09-24 13:21:05 +02:00
|
|
|
assert(!idx[i] || idx[i] >= vm::sequence::retired);
|
2020-09-17 08:31:19 +02:00
|
|
|
ret += idx[i] != 0;
|
|
|
|
}
|
2019-06-22 01:33:45 +02:00
|
|
|
|
2020-09-17 08:31:19 +02:00
|
|
|
// Taken when everything satisfied by interpose
|
|
|
|
if(ret == num || !wopts.allow_queries)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
// Only do parallel m::index() if there's no results from the prior
|
|
|
|
// queries; they'll get clobbered by the parallel m::index().
|
|
|
|
if(likely(!ret))
|
|
|
|
return m::index(idx, event_id);
|
|
|
|
|
|
|
|
// Fallback to serial queries.
|
|
|
|
for(size_t i(0); i < num; ++i)
|
|
|
|
{
|
|
|
|
idx[i] = m::index(std::nothrow, event_id[i]);
|
|
|
|
ret += idx[i] != 0;
|
|
|
|
}
|
2019-06-22 01:33:45 +02:00
|
|
|
|
2020-03-25 19:06:29 +01:00
|
|
|
return ret;
|
2019-10-06 07:06:10 +02:00
|
|
|
}
|
2020-10-30 07:03:32 +01:00
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::m::dbs::prefetch_event_idx(const vector_view<const event::id> &event_id,
|
|
|
|
const write_opts &wopts)
|
|
|
|
{
|
|
|
|
size_t ret(0);
|
|
|
|
for(size_t i(0); i < event_id.size(); ++i)
|
|
|
|
{
|
|
|
|
if(wopts.interpose)
|
|
|
|
if(wopts.interpose->has(db::op::SET, "_event_idx", event_id[i]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if(wopts.allow_queries)
|
|
|
|
ret += m::prefetch(event_id[i], "_event_idx");
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|