0
0
Fork 0
mirror of https://github.com/matrix-construct/construct synced 2024-06-09 21:48:55 +02:00

ircd:Ⓜ️:fetch: Add future result interface; refactor eval out of flow. (fixes #103) (closes #131)

modules: Split m_vm_fetch from m_fetch.
This commit is contained in:
Jason Volk 2019-08-26 15:12:43 -07:00
parent 13e2d56850
commit c57bc9077c
8 changed files with 497 additions and 591 deletions

View file

@ -17,6 +17,7 @@
///
namespace ircd::m::fetch
{
struct result;
struct request;
// Observers
@ -24,16 +25,22 @@ namespace ircd::m::fetch
bool exists(const m::event::id &);
size_t count();
// Control panel
bool start(const m::room::id &, const m::event::id &);
bool start(const room &);
bool cancel(request &);
size_t clear();
// Primary operations
ctx::future<result> start(const m::room::id &, const m::event::id &);
// Composed operations
void auth_chain(const room &, const net::hostport &);
}
struct ircd::m::fetch::result
:m::event
{
unique_buffer<mutable_buffer> buf;
result(request &);
result() = default;
};
/// Fetch entity state. This is not meant for construction by users of this
/// interface.
struct ircd::m::fetch::request
@ -43,6 +50,7 @@ struct ircd::m::fetch::request
m::room::id::buf room_id;
m::event::id::buf event_id;
ctx::promise<result> promise;
unique_buffer<mutable_buffer> buf;
std::set<std::string, std::less<>> attempted;
string_view origin;

View file

@ -332,6 +332,7 @@ ircd::m::module_names
"web_root",
"web_hook",
"m_listen",
"m_vm_fetch",
"m_vm",
"m_init_backfill",
"stats",

View file

@ -98,7 +98,6 @@ endif IMAGEMAGICK
m_moduledir = @moduledir@
m_vm_la_SOURCES = m_vm.cc
m_noop_la_SOURCES = m_noop.cc
m_event_la_SOURCES = m_event.cc
m_event_append_la_SOURCES = m_event_append.cc
@ -154,9 +153,10 @@ m_init_bootstrap_la_SOURCES = m_init_bootstrap.cc
m_init_backfill_la_SOURCES = m_init_backfill.cc
m_listen_la_SOURCES = m_listen.cc
m_users_la_SOURCES = m_users.cc
m_vm_la_SOURCES = m_vm.cc
m_vm_fetch_la_SOURCES = m_vm_fetch.cc
m_module_LTLIBRARIES = \
m_vm.la \
m_noop.la \
m_event.la \
m_event_append.la \
@ -212,6 +212,8 @@ m_module_LTLIBRARIES = \
m_init_backfill.la \
m_listen.la \
m_users.la \
m_vm.la \
m_vm_fetch.la \
###
###############################################################################

View file

@ -6978,7 +6978,7 @@ console_cmd__event__horizon__flush(opt &out, const string_view &line)
if(!room_id)
return true;
m::fetch::start(room_id, event_id);
//m::fetch::start(room_id, event_id);
++count;
//TODO: XXX
@ -13767,33 +13767,30 @@ console_cmd__fetch(opt &out, const string_view &line)
const m::event::id &event_id
{
param["event_id"]?
m::event::id{param["event_id"]}:
m::event::id{}
m::event::id{param.at("event_id")}
};
if(!event_id)
auto future
{
if(!m::fetch::start(room_id))
{
out << "failed to start for "
<< room_id
<< std::endl;
m::fetch::start(room_id, event_id)
};
return true;
}
}
else if(!m::fetch::start(room_id, event_id))
const auto result
{
out << "failed to start for "
<< event_id << " in "
<< room_id
<< std::endl;
future.get()
};
return true;
}
out << "Received "
<< event_id << " in "
<< room_id
<< std::endl
<< std::endl
;
out << m::event{result}
<< std::endl
;
out << "starting..." << std::endl;
return true;
}
@ -13820,18 +13817,6 @@ console_cmd__fetch__list(opt &out, const string_view &line)
return true;
}
bool
console_cmd__fetch__clear(opt &out, const string_view &line)
{
const size_t cleared
{
m::fetch::clear()
};
out << "Cleared " << cleared << std::endl;
return true;
}
//
// synchron
//

View file

@ -50,36 +50,21 @@ ircd::m::fetch::requests_max
{ "default", 256L },
};
decltype(ircd::m::fetch::hook)
ircd::m::fetch::hook
{
hook_handle,
{
{ "_site", "vm.fetch" }
}
};
decltype(ircd::m::fetch::request_context)
ircd::m::fetch::request_context
{
"m.fetch.req", 512_KiB, &request_worker, context::POST
};
decltype(ircd::m::fetch::eval_context)
ircd::m::fetch::eval_context
{
"m.fetch.eval", 512_KiB, &eval_worker, context::POST
};
decltype(ircd::m::fetch::complete)
ircd::m::fetch::complete;
decltype(ircd::m::fetch::rooms)
ircd::m::fetch::rooms;
decltype(ircd::m::fetch::requests)
ircd::m::fetch::requests;
decltype(ircd::m::fetch::requests_mutex)
ircd::m::fetch::requests_mutex;
decltype(ircd::m::fetch::dock)
ircd::m::fetch::dock;
@ -95,246 +80,11 @@ ircd::m::fetch::init()
void
ircd::m::fetch::fini()
{
clear();
request_context.terminate();
eval_context.terminate();
request_context.join();
eval_context.join();
requests.clear();
complete.clear();
assert(requests.empty());
assert(complete.empty());
}
//
// fetch_phase
//
void
ircd::m::fetch::hook_handle(const event &event,
vm::eval &eval)
try
{
assert(eval.opts);
assert(eval.opts->fetch);
const auto &opts{*eval.opts};
const auto &type
{
at<"type"_>(event)
};
if(type == "m.room.create")
return;
const m::event::id &event_id
{
event.event_id
};
const m::room::id &room_id
{
at<"room_id"_>(event)
};
// Can't construct m::room with the event_id argument because it
// won't be found (we're evaluating that event here!) so we just set
// the member manually to make further use of the room struct.
m::room room{room_id};
room.event_id = event_id;
evaltab tab;
if(opts.fetch_auth_check)
hook_handle_auth(event, eval, tab, room);
if(opts.fetch_prev_check)
hook_handle_prev(event, eval, tab, room);
log::debug
{
log, "%s %s ac:%zu ae:%zu pc:%zu pe:%zu pf:%zu",
loghead(eval),
json::get<"room_id"_>(event),
tab.auth_count,
tab.auth_exists,
tab.prev_count,
tab.prev_exists,
tab.prev_fetched,
};
}
catch(const std::exception &e)
{
log::derror
{
log, "%s :%s",
loghead(eval),
e.what(),
};
throw;
}
void
ircd::m::fetch::hook_handle_auth(const event &event,
vm::eval &eval,
evaltab &tab,
const room &room)
{
// Count how many of the auth_events provided exist locally.
const auto &opts{*eval.opts};
const event::prev prev{event};
tab.auth_count = prev.auth_events_count();
for(size_t i(0); i < tab.auth_count; ++i)
{
const auto &auth_id
{
prev.auth_event(i)
};
tab.auth_exists += bool(m::exists(auth_id));
}
// We are satisfied at this point if all auth_events for this event exist,
// as those events have themselves been successfully evaluated and so forth.
assert(tab.auth_exists <= tab.auth_count);
if(tab.auth_exists == tab.auth_count)
return;
// At this point we are missing one or more auth_events for this event.
log::dwarning
{
log, "%s auth_events:%zu hit:%zu miss:%zu",
loghead(eval),
tab.auth_count,
tab.auth_exists,
tab.auth_count - tab.auth_exists,
};
// We need to figure out where best to sling a request to fetch these
// missing auth_events. We prefer the remote client conducting this eval
// with their /federation/send/ request which we stored in the opts.
const string_view &remote
{
opts.node_id?
opts.node_id:
!my_host(json::get<"origin"_>(event))?
string_view(json::get<"origin"_>(event)):
!my_host(room.room_id.host())? //TODO: XXX
room.room_id.host():
string_view{}
};
// Bail out here if we can't or won't attempt fetching auth_events.
if(!opts.fetch_auth || !bool(m::fetch::enable) || !remote)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch auth_events for %s in %s",
string_view{event.event_id},
json::get<"room_id"_>(event)
};
// This is a blocking call to recursively fetch and evaluate the auth_chain
// for this event. Upon return all of the auth_events for this event will
// have themselves been fetched and auth'ed recursively or throws.
auth_chain(room, remote);
tab.auth_exists = tab.auth_count;
}
void
ircd::m::fetch::hook_handle_prev(const event &event,
vm::eval &eval,
evaltab &tab,
const room &room)
{
const auto &opts{*eval.opts};
const event::prev prev{event};
tab.prev_count = prev.prev_events_count();
for(size_t i(0); i < tab.prev_count; ++i)
{
const auto &prev_id
{
prev.prev_event(i)
};
if(m::exists(prev_id))
{
++tab.prev_exists;
continue;
}
const bool can_fetch
{
opts.fetch_prev && bool(m::fetch::enable)
};
const bool fetching
{
can_fetch && start(room.room_id, prev_id)
};
tab.prev_fetching += fetching;
}
// If we have all of the referenced prev_events we are satisfied here.
assert(tab.prev_exists <= tab.prev_count);
if(tab.prev_exists == tab.prev_count)
return;
// At this point one or more prev_events are missing; the fetches were
// launched asynchronously if the options allowed for it.
log::dwarning
{
log, "%s prev_events:%zu hit:%zu miss:%zu fetching:%zu",
loghead(eval),
tab.prev_count,
tab.prev_exists,
tab.prev_count - tab.prev_exists,
tab.prev_fetching,
};
// If the options want to wait for the fetch+evals of the prev_events to occur
// before we continue processing this event further, we block in here.
const bool &prev_wait{opts.fetch_prev_wait};
if(prev_wait && tab.prev_fetching) for(size_t i(0); i < tab.prev_count; ++i)
{
const auto &prev_id
{
prev.prev_event(i)
};
dock.wait([&prev_id]
{
return !requests.count(prev_id);
});
tab.prev_fetched += m::exists(prev_id);
}
// Aborts this event if the options want us to guarantee at least one
// prev_event was fetched and evaluated for this event. This is generally
// used in conjunction with the fetch_prev_wait option to be effective.
const bool &prev_any{opts.fetch_prev_any};
if(prev_any && tab.prev_exists + tab.prev_fetched == 0)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch any prev_events for %s in %s",
string_view{event.event_id},
json::get<"room_id"_>(event)
};
// Aborts this event if the options want us to guarantee ALL of the
// prev_events were fetched and evaluated for this event.
const bool &prev_all{opts.fetch_prev_all};
if(prev_all && tab.prev_exists + tab.prev_fetched < tab.prev_count)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch all %zu required prev_events for %s in %s",
tab.prev_count,
string_view{event.event_id},
json::get<"room_id"_>(event)
};
}
///////////////////////////////////////////////////////////////////////////////
@ -411,71 +161,7 @@ catch(const std::exception &e)
throw;
}
size_t
IRCD_MODULE_EXPORT
ircd::m::fetch::clear()
{
size_t ret{0};
for_each([&ret](auto &request)
{
ret += cancel(request);
return true;
});
return ret;
}
bool
IRCD_MODULE_EXPORT
ircd::m::fetch::cancel(request &request)
{
bool ret{false};
if(request.finished == -1)
return ret;
if(request.finished == 0)
{
assert(request.started);
ret |= server::cancel(request);
}
request.finished = -1;
return ret;
}
bool
IRCD_MODULE_EXPORT
ircd::m::fetch::start(const m::room &room)
{
if(room.event_id)
return start(room.room_id, room.event_id);
feds::opts opts;
opts.op = feds::op::head;
opts.room_id = room.room_id;
opts.closure_errors = false;
feds::execute
{
opts, [](const auto &result)
{
const json::object &event
{
result.object["event"]
};
return m::for_each(event::prev(event), [&result]
(const event::id &event_id)
{
start(result.request->room_id, event_id);
return true;
});
}
};
return true;
}
bool
ircd::ctx::future<ircd::m::fetch::result>
IRCD_MODULE_EXPORT
ircd::m::fetch::start(const m::room::id &room_id,
const m::event::id &event_id)
@ -495,8 +181,10 @@ ircd::m::fetch::start(const m::room::id &room_id,
reflect(run::level)
};
if(count() > size_t(requests_max))
return false;
dock.wait([]
{
return count() < size_t(requests_max);
});
return submit(event_id, room_id);
}
@ -532,46 +220,43 @@ ircd::m::fetch::for_each(const std::function<bool (request &)> &closure)
//
template<class... args>
bool
ircd::ctx::future<ircd::m::fetch::result>
ircd::m::fetch::submit(const m::event::id &event_id,
const m::room::id &room_id,
const size_t &bufsz,
args&&... a)
try
{
assert(room_id && event_id);
std::unique_lock lock
{
requests_mutex
};
const scope_notify dock
{
fetch::dock
};
auto it(requests.lower_bound(string_view(event_id)));
if(it != end(requests) && it->event_id == event_id)
{
assert(it->room_id == room_id);
return false;
return ctx::future<result>{}; //TODO: shared_future.
}
it = requests.emplace_hint(it, room_id, event_id, bufsz, std::forward<args>(a)...);
auto &request(const_cast<fetch::request &>(*it)); try
auto &request
{
while(!start(request))
request.origin = {};
}
catch(const std::exception &e)
{
fetch::cancel(request);
throw;
}
return true;
}
catch(const std::exception &e)
{
log::error
{
log, "Failed to start any fetch for %s in %s :%s",
string_view{event_id},
string_view{room_id},
e.what(),
const_cast<fetch::request &>(*it)
};
return false;
ctx::future<result> ret
{
request.promise
};
start(request);
return ret;
}
//
@ -587,18 +272,12 @@ try
dock.wait([]
{
return std::any_of(begin(requests), end(requests), []
(const request &r)
(const auto &request)
{
return r.finished <= 0;
return request.started || request.finished;
});
});
if(request_cleanup())
continue;
if(requests.empty())
continue;
request_handle();
}
}
@ -613,57 +292,30 @@ catch(const std::exception &e)
throw;
}
size_t
ircd::m::fetch::request_cleanup()
{
// assert that there is no race starting from here.
const ctx::critical_assertion ca;
size_t ret(0);
auto it(begin(requests));
while(it != end(requests))
{
if(it->finished == -1)
{
it = requests.erase(it);
++ret;
}
else ++it;
}
return ret;
}
void
ircd::m::fetch::request_handle()
{
std::unique_lock lock
{
requests_mutex
};
if(requests.empty())
return;
auto next
{
ctx::when_any(requests.begin(), requests.end())
};
if(!next.wait(seconds(timeout), std::nothrow))
bool timeout{true};
{
const auto now(ircd::time());
for(auto it(begin(requests)); it != end(requests); ++it)
{
auto &request(const_cast<fetch::request &>(*it));
if(request.finished < 0 || request.last == std::numeric_limits<time_t>::max())
continue;
if(request.finished == 0 && timedout(request, now))
{
retry(request);
continue;
}
if(request.finished > 0 && timedout(request, now))
{
request.finished = -1;
continue;
}
}
const unlock_guard unlock{lock};
timeout = !next.wait(seconds(timeout), std::nothrow);
};
if(timeout)
{
request_cleanup();
return;
}
@ -682,147 +334,56 @@ ircd::m::fetch::request_handle()
void
ircd::m::fetch::request_handle(const decltype(requests)::iterator &it)
try
{
auto &request
{
const_cast<fetch::request &>(*it)
};
if(!request.started || !request.last || request.finished < 0)
return;
if(!request.finished && !handle(request))
return;
assert(request.finished);
if(request.eptr)
{
request.finished = -1;
std::rethrow_exception(request.eptr);
__builtin_unreachable();
}
assert(!request.eptr);
request.last = std::numeric_limits<time_t>::max();
complete.emplace_back(it);
}
catch(const std::exception &e)
{
log::error
{
log, "request %s in %s :%s",
string_view{it->event_id},
string_view{it->room_id},
e.what()
};
requests.erase(it);
}
//
// eval worker
//
void
ircd::m::fetch::eval_worker()
try
size_t
ircd::m::fetch::request_cleanup()
{
while(1)
size_t ret(0);
const auto now(ircd::time());
for(auto it(begin(requests)); it != end(requests); ++it)
{
dock.wait([]
auto &request
{
return !complete.empty();
});
const_cast<fetch::request &>(*it)
};
eval_handle();
if(!request.started)
{
start(request);
continue;
}
if(!request.finished && timedout(request, now))
retry(request);
}
}
catch(const std::exception &e)
{
log::critical
for(auto it(begin(requests)); it != end(requests); )
{
log, "fetch eval worker :%s",
e.what()
};
auto &request
{
const_cast<fetch::request &>(*it)
};
throw;
}
if(request.finished)
{
it = requests.erase(it);
++ret;
}
else ++it;
}
void
ircd::m::fetch::eval_handle()
{
assert(!complete.empty());
const unwind pop{[]
{
assert(!complete.empty());
complete.pop_front();
}};
const auto it
{
complete.front()
};
eval_handle(it);
}
void
ircd::m::fetch::eval_handle(const decltype(requests)::iterator &it)
try
{
auto &request
{
const_cast<fetch::request &>(*it)
};
const unwind free{[&request]
{
request.finished = -1;
dock.notify_all();
}};
assert(!request.eptr);
log::debug
{
log, "eval handling %s in %s (r:%zu c:%zu)",
string_view{request.event_id},
string_view{request.room_id},
requests.size(),
complete.size(),
};
const m::event event
{
json::object{request}, request.event_id
};
m::vm::opts opts;
opts.infolog_accept = true;
opts.fetch_prev = false;
opts.fetch_state_wait = false;
opts.fetch_auth_wait = false;
opts.fetch_prev_wait = false;
m::vm::eval
{
event, opts
};
}
catch(const std::exception &e)
{
auto &request
{
const_cast<fetch::request &>(*it)
};
if(!request.eptr)
request.eptr = std::current_exception();
log::error
{
log, "fetch eval %s in %s :%s",
string_view{request.event_id},
string_view{request.room_id},
e.what()
};
return ret;
}
//
@ -830,15 +391,40 @@ catch(const std::exception &e)
//
bool
ircd::m::fetch::start(request &request)
ircd::m::fetch::start(request &request) try
{
m::v1::event::opts opts;
opts.dynamic = true;
if(!request.origin)
select_random_origin(request);
opts.remote = request.origin;
return start(request, opts);
assert(request.finished == 0);
if(!request.started)
request.started = ircd::time();
if(!request.origin)
{
select_random_origin(request);
opts.remote = request.origin;
}
while(request.origin)
{
if(start(request, opts))
return true;
select_random_origin(request);
opts.remote = request.origin;
}
assert(!request.finished);
finish(request);
return false;
}
catch(...)
{
assert(!request.finished);
request.eptr = std::current_exception();
finish(request);
return false;
}
bool
@ -865,7 +451,7 @@ try
log::debug
{
log, "Started request for %s in %s from '%s'",
log, "Starting request for %s in %s from '%s'",
string_view{request.event_id},
string_view{request.room_id},
string_view{request.origin},
@ -879,7 +465,7 @@ catch(const http::error &e)
log::logf
{
log, run::level == run::level::QUIT? log::DERROR: log::ERROR,
"Failed to start request for %s in %s to '%s' :%s %s",
"Starting request for %s in %s to '%s' :%s %s",
string_view{request.event_id},
string_view{request.room_id},
string_view{request.origin},
@ -894,7 +480,7 @@ catch(const std::exception &e)
log::logf
{
log, run::level == run::level::QUIT? log::DERROR: log::ERROR,
"Failed to start request for %s in %s to '%s' :%s",
"Starting request for %s in %s to '%s' :%s",
string_view{request.event_id},
string_view{request.room_id},
string_view{request.origin},
@ -975,7 +561,7 @@ ircd::m::fetch::handle(request &request)
log::debug
{
log, "%u %s for %s in %s from '%s'",
log, "Received %u %s good %s in %s from '%s'",
uint(code),
status(code),
string_view{request.event_id},
@ -989,7 +575,7 @@ ircd::m::fetch::handle(request &request)
log::derror
{
log, "Failure for %s in %s from '%s' :%s",
log, "Erroneous remote for %s in %s from '%s' :%s",
string_view{request.event_id},
string_view{request.room_id},
string_view{request.origin},
@ -1011,6 +597,7 @@ try
{
assert(!request.finished);
assert(request.started && request.last);
server::cancel(request);
request.eptr = std::exception_ptr{};
request.origin = {};
@ -1025,8 +612,34 @@ catch(...)
void
ircd::m::fetch::finish(request &request)
{
assert(request.started);
request.finished = ircd::time();
#if 0
log::logf
{
log, request.eptr? log::DERROR: log::DEBUG,
"%s in %s started:%ld finished:%d attempted:%zu abandon:%b %S%s",
string_view{request.event_id},
string_view{request.room_id},
request.started,
request.finished,
request.attempted.size(),
!request.promise,
request.eptr? " :" : "",
what(request.eptr),
};
#endif
if(!request.promise)
return;
if(request.eptr)
{
request.promise.set_exception(std::move(request.eptr));
return;
}
request.promise.set_value(result{request});
}
bool
@ -1073,3 +686,19 @@ ircd::m::fetch::request::request(const m::room::id &room_id,
,buf{bufsz}
{
}
//
// result::result
//
ircd::m::fetch::result::result(request &request)
:m::event
{
static_cast<json::object>(request)["event"]
}
,buf
{
std::move(request.in.dynamic)
}
{
}

View file

@ -11,20 +11,15 @@
// Fetch unit state
namespace ircd::m::fetch
{
struct request; // m/fetch.h
struct evaltab;
static bool operator<(const request &a, const request &b) noexcept;
static bool operator<(const request &a, const string_view &b) noexcept;
static bool operator<(const string_view &a, const request &b) noexcept;
extern ctx::dock dock;
extern ctx::mutex requests_mutex;
extern std::set<request, std::less<>> requests;
extern std::multimap<room::id, request *> rooms;
extern std::deque<decltype(requests)::iterator> complete;
extern ctx::context eval_context;
extern ctx::context request_context;
extern hookfn<vm::eval &> hook;
extern conf::item<size_t> requests_max;
extern conf::item<seconds> auth_timeout;
extern conf::item<seconds> timeout;
@ -40,30 +35,18 @@ namespace ircd::m::fetch
static bool start(request &);
static bool handle(request &);
static void eval_handle(const decltype(requests)::iterator &);
static void eval_handle();
static void eval_worker();
static void request_handle(const decltype(requests)::iterator &);
static void request_handle();
static size_t request_cleanup();
static void request_worker();
template<class... args> static bool submit(const event::id &, const room::id &, const size_t &bufsz = 8_KiB, args&&...);
static void hook_handle_prev(const event &, vm::eval &, evaltab &, const room &);
static void hook_handle_auth(const event &, vm::eval &, evaltab &, const room &);
static void hook_handle(const event &, vm::eval &);
template<class... args>
static ctx::future<result>
submit(const event::id &,
const room::id &,
const size_t &bufsz = 8_KiB,
args&&...);
static void init();
static void fini();
}
struct ircd::m::fetch::evaltab
{
size_t auth_count {0};
size_t auth_exists {0};
size_t prev_count {0};
size_t prev_exists {0};
size_t prev_fetching {0};
size_t prev_fetched {0};
};

View file

@ -222,7 +222,17 @@ try
return true;
}
fetching += fetch::start(room_id, event_id);
auto future
{
fetch::start(room_id, event_id)
};
m::fetch::result result
{
future.get()
};
//TODO: XXX
return true;
});

288
modules/m_vm_fetch.cc Normal file
View file

@ -0,0 +1,288 @@
// Matrix Construct
//
// Copyright (C) Matrix Construct Developers, Authors & Contributors
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice is present in all copies. The
// full license for this software is available in the LICENSE file.
namespace ircd::m::vm::fetch
{
struct evaltab;
static void hook_handle_prev(const event &, vm::eval &, evaltab &, const room &);
static void hook_handle_auth(const event &, vm::eval &, evaltab &, const room &);
static void hook_handle(const event &, vm::eval &);
extern conf::item<bool> enable;
extern hookfn<vm::eval &> hook;
extern log::log log;
}
struct ircd::m::vm::fetch::evaltab
{
size_t auth_count {0};
size_t auth_exists {0};
size_t prev_count {0};
size_t prev_exists {0};
size_t prev_fetching {0};
size_t prev_fetched {0};
};
ircd::mapi::header
IRCD_MODULE
{
"Matrix VM Fetch Unit"
};
decltype(ircd::m::vm::fetch::log)
ircd::m::vm::fetch::log
{
"m.vm.fetch"
};
decltype(ircd::m::vm::fetch::enable)
ircd::m::vm::fetch::enable
{
{ "name", "ircd.m.vm.fetch.enable" },
{ "default", true },
};
decltype(ircd::m::vm::fetch::hook)
ircd::m::vm::fetch::hook
{
hook_handle,
{
{ "_site", "vm.fetch" }
}
};
//
// fetch_phase
//
void
ircd::m::vm::fetch::hook_handle(const event &event,
vm::eval &eval)
try
{
assert(eval.opts);
assert(eval.opts->fetch);
const auto &opts{*eval.opts};
const auto &type
{
at<"type"_>(event)
};
if(type == "m.room.create")
return;
const m::event::id &event_id
{
event.event_id
};
const m::room::id &room_id
{
at<"room_id"_>(event)
};
// Can't construct m::room with the event_id argument because it
// won't be found (we're evaluating that event here!) so we just set
// the member manually to make further use of the room struct.
m::room room{room_id};
room.event_id = event_id;
evaltab tab;
if(opts.fetch_auth_check)
hook_handle_auth(event, eval, tab, room);
if(opts.fetch_prev_check)
hook_handle_prev(event, eval, tab, room);
log::debug
{
log, "%s %s ac:%zu ae:%zu pc:%zu pe:%zu pf:%zu",
loghead(eval),
json::get<"room_id"_>(event),
tab.auth_count,
tab.auth_exists,
tab.prev_count,
tab.prev_exists,
tab.prev_fetched,
};
}
catch(const std::exception &e)
{
log::derror
{
log, "%s :%s",
loghead(eval),
e.what(),
};
throw;
}
void
ircd::m::vm::fetch::hook_handle_auth(const event &event,
vm::eval &eval,
evaltab &tab,
const room &room)
{
// Count how many of the auth_events provided exist locally.
const auto &opts{*eval.opts};
const event::prev prev{event};
tab.auth_count = prev.auth_events_count();
for(size_t i(0); i < tab.auth_count; ++i)
{
const auto &auth_id
{
prev.auth_event(i)
};
tab.auth_exists += bool(m::exists(auth_id));
}
// We are satisfied at this point if all auth_events for this event exist,
// as those events have themselves been successfully evaluated and so forth.
assert(tab.auth_exists <= tab.auth_count);
if(tab.auth_exists == tab.auth_count)
return;
// At this point we are missing one or more auth_events for this event.
log::dwarning
{
log, "%s auth_events:%zu hit:%zu miss:%zu",
loghead(eval),
tab.auth_count,
tab.auth_exists,
tab.auth_count - tab.auth_exists,
};
// We need to figure out where best to sling a request to fetch these
// missing auth_events. We prefer the remote client conducting this eval
// with their /federation/send/ request which we stored in the opts.
const string_view &remote
{
opts.node_id?
opts.node_id:
!my_host(json::get<"origin"_>(event))?
string_view(json::get<"origin"_>(event)):
!my_host(room.room_id.host())? //TODO: XXX
room.room_id.host():
string_view{}
};
// Bail out here if we can't or won't attempt fetching auth_events.
if(!opts.fetch_auth || !bool(m::vm::fetch::enable) || !remote)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch auth_events for %s in %s",
string_view{event.event_id},
json::get<"room_id"_>(event)
};
// This is a blocking call to recursively fetch and evaluate the auth_chain
// for this event. Upon return all of the auth_events for this event will
// have themselves been fetched and auth'ed recursively or throws.
m::fetch::auth_chain(room, remote);
tab.auth_exists = tab.auth_count;
}
void
ircd::m::vm::fetch::hook_handle_prev(const event &event,
vm::eval &eval,
evaltab &tab,
const room &room)
{
const auto &opts{*eval.opts};
const event::prev prev{event};
tab.prev_count = prev.prev_events_count();
for(size_t i(0); i < tab.prev_count; ++i)
{
const auto &prev_id
{
prev.prev_event(i)
};
if(m::exists(prev_id))
{
++tab.prev_exists;
continue;
}
const bool can_fetch
{
opts.fetch_prev && bool(m::vm::fetch::enable)
};
const bool fetching
{
//TODO: XXX
can_fetch && false //start(room.room_id, prev_id)
};
tab.prev_fetching += fetching;
}
// If we have all of the referenced prev_events we are satisfied here.
assert(tab.prev_exists <= tab.prev_count);
if(tab.prev_exists == tab.prev_count)
return;
// At this point one or more prev_events are missing; the fetches were
// launched asynchronously if the options allowed for it.
log::dwarning
{
log, "%s prev_events:%zu hit:%zu miss:%zu fetching:%zu",
loghead(eval),
tab.prev_count,
tab.prev_exists,
tab.prev_count - tab.prev_exists,
tab.prev_fetching,
};
// If the options want to wait for the fetch+evals of the prev_events to occur
// before we continue processing this event further, we block in here.
const bool &prev_wait{opts.fetch_prev_wait};
if(prev_wait && tab.prev_fetching) for(size_t i(0); i < tab.prev_count; ++i)
{
const auto &prev_id
{
prev.prev_event(i)
};
//TODO: XXX
assert(0);
tab.prev_fetched += m::exists(prev_id);
}
// Aborts this event if the options want us to guarantee at least one
// prev_event was fetched and evaluated for this event. This is generally
// used in conjunction with the fetch_prev_wait option to be effective.
const bool &prev_any{opts.fetch_prev_any};
if(prev_any && tab.prev_exists + tab.prev_fetched == 0)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch any prev_events for %s in %s",
string_view{event.event_id},
json::get<"room_id"_>(event)
};
// Aborts this event if the options want us to guarantee ALL of the
// prev_events were fetched and evaluated for this event.
const bool &prev_all{opts.fetch_prev_all};
if(prev_all && tab.prev_exists + tab.prev_fetched < tab.prev_count)
throw vm::error
{
vm::fault::EVENT, "Failed to fetch all %zu required prev_events for %s in %s",
tab.prev_count,
string_view{event.event_id},
json::get<"room_id"_>(event)
};
}