0
0
Fork 0
mirror of https://github.com/matrix-construct/construct synced 2024-06-08 13:08:56 +02:00

ircd:Ⓜ️:vm::eval: Improve mfetch_keys related during eval.

This commit is contained in:
Jason Volk 2020-12-10 02:15:26 -08:00
parent f8ad44b16a
commit d8115cccc9
3 changed files with 57 additions and 81 deletions

View file

@ -22,6 +22,7 @@ namespace ircd::m::vm
string_view loghead(const mutable_buffer &, const eval &);
string_view loghead(const eval &); // single tls buffer
size_t fetch_keys(const eval &);
}
/// Event Evaluation Device
@ -63,8 +64,6 @@ struct ircd::m::vm::eval
vm::phase phase {vm::phase(0)};
bool room_internal {false};
void mfetch_keys() const;
public:
operator const event::id::buf &() const
{

View file

@ -37,6 +37,39 @@ ircd::m::vm::eval::executing;
decltype(ircd::m::vm::eval::injecting)
ircd::m::vm::eval::injecting;
size_t
ircd::m::vm::fetch_keys(const eval &eval)
{
using m::fed::key::server_key;
assert(eval.opts);
const auto &opts
{
*eval.opts
};
std::set<server_key> miss;
for(const auto &event : eval.pdus)
for(const auto &[server_name, signatures] : at<"signatures"_>(event))
for(const auto &[key_id, signature] : json::object(signatures))
if(!m::keys::cache::has(json::get<"origin"_>(event), key_id))
miss.emplace(json::get<"origin"_>(event), key_id);
const std::vector<server_key> queries
(
begin(miss), end(miss)
);
const size_t fetched
{
!queries.empty()?
m::keys::fetch(queries):
0UL
};
return fetched;
}
ircd::string_view
ircd::m::vm::loghead(const eval &eval)
{
@ -412,68 +445,3 @@ ircd::m::vm::eval::for_each(const std::function<bool (eval &)> &closure)
return true;
}
void
ircd::m::vm::eval::mfetch_keys()
const
{
using m::fed::key::server_key;
// Determine federation keys which we don't have.
std::set<server_key> miss;
for(const auto &event : this->pdus)
{
assert(opts);
const auto &origin
{
json::get<"origin"_>(event)?
string_view{json::get<"origin"_>(event)}:
m::user::id{json::get<"sender"_>(event)}.host()
};
// When the node_id is set (eval on behalf of remote) we only parallel
// fetch keys from that node for events from that node. This is to
// prevent amplification. Note that these will still be evaluated and
// key fetching may be attempted, but not here.
if(opts->node_id && opts->node_id != origin)
continue;
for(const auto &[server_name, signatures] : at<"signatures"_>(event))
for(const auto &[key_id, signature] : json::object(signatures))
if(!m::keys::cache::has(origin, key_id))
miss.emplace(origin, key_id);
}
if(miss.empty())
return;
log::debug
{
log, "%s fetching %zu new keys from %zu events...",
loghead(*this),
miss.size(),
this->pdus.size(),
};
const std::vector<server_key> queries
(
begin(miss), end(miss)
);
const size_t fetched
{
m::keys::fetch(queries)
};
if(!fetched)
return;
log::info
{
log, "%s fetched %zu of %zu new keys from %zu events",
loghead(*this),
fetched,
miss.size(),
this->pdus.size(),
};
}

View file

@ -197,9 +197,29 @@ ircd::m::vm::execute(eval &eval,
eval.pdus, events
};
if(likely(opts.phase[phase::VERIFY] && opts.mfetch_keys))
if(events.size() > 1)
eval.mfetch_keys();
const scope_count executing
{
eval::executing
};
const scope_restore eval_phase
{
eval.phase, phase::EXECUTE
};
const bool prefetch_keys
{
opts.phase[phase::VERIFY]
&& opts.mfetch_keys
&& events.size() > 1
};
const size_t prefetched_keys
{
prefetch_keys?
fetch_keys(eval):
0UL
};
size_t accepted(0), existed(0), i, j, k;
for(i = 0; i < events.size(); i += j)
@ -266,17 +286,6 @@ try
// danger close; try increasing your stack size.
const ctx::stack_usage_assertion sua;
// m::vm bookkeeping that someone entered this function
const scope_count executing
{
eval::executing
};
const scope_restore eval_phase
{
eval.phase, phase::EXECUTE
};
const scope_notify notify
{
vm::dock