2019-08-14 07:59:27 +02:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2019 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
|
|
|
|
2019-10-04 23:34:46 +02:00
|
|
|
namespace ircd::m::init::backfill
|
2019-10-01 05:50:58 +02:00
|
|
|
{
|
2019-12-03 23:42:00 +01:00
|
|
|
extern conf::item<bool> gossip_enable;
|
|
|
|
extern conf::item<seconds> gossip_timeout;
|
|
|
|
size_t gossip(const room::id &, const event::id &, const string_view &remote);
|
|
|
|
|
2019-10-04 23:34:46 +02:00
|
|
|
bool handle_event(const room::id &, const event::id &, const string_view &hint, const bool &ask_one);
|
|
|
|
void handle_missing(const room::id &);
|
|
|
|
void handle_room(const room::id &);
|
|
|
|
void worker();
|
|
|
|
|
|
|
|
extern std::unique_ptr<context> worker_context;
|
|
|
|
extern conf::item<bool> enable;
|
|
|
|
extern conf::item<size_t> pool_size;
|
2020-03-02 02:23:46 +01:00
|
|
|
extern conf::item<bool> local_joined_only;
|
2019-10-04 23:34:46 +02:00
|
|
|
extern log::log log;
|
2019-08-14 07:59:27 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::init::backfill::log)
|
|
|
|
ircd::m::init::backfill::log
|
|
|
|
{
|
|
|
|
"m.init.backfill"
|
|
|
|
};
|
|
|
|
|
2019-08-14 11:45:01 +02:00
|
|
|
decltype(ircd::m::init::backfill::enable)
|
|
|
|
ircd::m::init::backfill::enable
|
|
|
|
{
|
2019-08-24 03:01:43 +02:00
|
|
|
{ "name", "m.init.backfill.enable" },
|
|
|
|
{ "default", true },
|
2019-08-14 11:45:01 +02:00
|
|
|
};
|
|
|
|
|
2019-09-07 01:27:29 +02:00
|
|
|
decltype(ircd::m::init::backfill::pool_size)
|
|
|
|
ircd::m::init::backfill::pool_size
|
|
|
|
{
|
|
|
|
{ "name", "m.init.backfill.pool_size" },
|
2019-09-11 18:10:22 +02:00
|
|
|
{ "default", 12L },
|
2019-09-07 01:27:29 +02:00
|
|
|
};
|
|
|
|
|
2020-03-02 02:23:46 +01:00
|
|
|
decltype(ircd::m::init::backfill::local_joined_only)
|
|
|
|
ircd::m::init::backfill::local_joined_only
|
|
|
|
{
|
|
|
|
{ "name", "m.init.backfill.local_joined_only" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
2019-09-07 01:27:29 +02:00
|
|
|
decltype(ircd::m::init::backfill::worker_context)
|
|
|
|
ircd::m::init::backfill::worker_context;
|
|
|
|
|
2019-08-14 07:59:27 +02:00
|
|
|
void
|
|
|
|
ircd::m::init::backfill::init()
|
|
|
|
{
|
2019-08-14 11:45:01 +02:00
|
|
|
if(!enable)
|
|
|
|
{
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Initial synchronization of rooms from remote servers has"
|
|
|
|
" been disabled by the configuration. Not fetching latest events."
|
|
|
|
};
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-25 03:49:27 +02:00
|
|
|
if(ircd::read_only || ircd::write_avoid)
|
|
|
|
{
|
|
|
|
log::warning
|
|
|
|
{
|
2020-02-20 21:35:13 +01:00
|
|
|
log, "Not performing initial backfill because write-avoid flag is set."
|
2019-08-25 03:49:27 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-14 07:59:27 +02:00
|
|
|
assert(!worker_context);
|
|
|
|
worker_context.reset(new context
|
|
|
|
{
|
|
|
|
"m.init.backfill",
|
|
|
|
512_KiB,
|
|
|
|
&worker,
|
|
|
|
context::POST
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::init::backfill::fini()
|
2019-10-04 23:34:46 +02:00
|
|
|
noexcept
|
2019-08-14 07:59:27 +02:00
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
if(!worker_context)
|
|
|
|
return;
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "Terminating worker context..."
|
|
|
|
};
|
|
|
|
|
2019-08-14 07:59:27 +02:00
|
|
|
worker_context.reset(nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::init::backfill::worker()
|
|
|
|
try
|
|
|
|
{
|
|
|
|
run::changed::dock.wait([]
|
|
|
|
{
|
2019-10-04 23:34:46 +02:00
|
|
|
return run::level == run::level::RUN;
|
2019-08-14 07:59:27 +02:00
|
|
|
});
|
|
|
|
|
2020-01-07 00:05:48 +01:00
|
|
|
// Set a low priority for this context; see related pool_opts
|
|
|
|
ionice(ctx::cur(), 4);
|
|
|
|
nice(ctx::cur(), 4);
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
// Prepare to iterate all of the rooms this server is aware of which
|
2019-09-10 21:25:48 +02:00
|
|
|
// contain at least one member from another server in any state, and
|
|
|
|
// one member from our server in a joined state.
|
2019-09-07 23:53:44 +02:00
|
|
|
rooms::opts opts;
|
2019-09-08 01:34:46 +02:00
|
|
|
opts.remote_only = true;
|
2020-03-02 02:23:46 +01:00
|
|
|
opts.local_joined_only = local_joined_only;
|
2019-09-07 23:53:44 +02:00
|
|
|
|
|
|
|
// This is only an estimate because the rooms on the server can change
|
|
|
|
// before this task completes.
|
|
|
|
const auto estimate
|
2019-09-07 01:27:29 +02:00
|
|
|
{
|
2019-09-08 07:11:18 +02:00
|
|
|
1UL //rooms::count(opts)
|
2019-09-07 01:27:29 +02:00
|
|
|
};
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
if(!estimate)
|
|
|
|
return;
|
|
|
|
|
|
|
|
log::notice
|
2019-08-14 07:59:27 +02:00
|
|
|
{
|
2019-09-08 07:11:18 +02:00
|
|
|
log, "Starting initial backfill of rooms from other servers...",
|
2019-09-07 23:53:44 +02:00
|
|
|
estimate,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Prepare a pool of child contexts to process rooms concurrently.
|
|
|
|
// The context pool lives directly in this frame.
|
|
|
|
static const ctx::pool::opts pool_opts
|
|
|
|
{
|
|
|
|
512_KiB, // stack sz
|
|
|
|
size_t(pool_size), // pool sz
|
2020-01-07 00:05:48 +01:00
|
|
|
-1, // queue max hard
|
|
|
|
0, // queue max soft
|
|
|
|
true, // queue max blocking
|
|
|
|
true, // queue max warning
|
|
|
|
3, // ionice
|
|
|
|
3, // nice
|
2019-08-14 07:59:27 +02:00
|
|
|
};
|
|
|
|
|
2019-09-07 01:27:29 +02:00
|
|
|
ctx::pool pool
|
|
|
|
{
|
|
|
|
"m.init.backfill", pool_opts
|
|
|
|
};
|
2019-08-14 10:41:17 +02:00
|
|
|
|
2019-09-07 01:27:29 +02:00
|
|
|
ctx::dock dock;
|
|
|
|
size_t count(0), complete(0);
|
2019-09-07 23:53:44 +02:00
|
|
|
const auto each_room{[&estimate, &count, &complete, &dock]
|
2019-08-14 10:41:17 +02:00
|
|
|
(const room::id &room_id)
|
2019-08-14 07:59:27 +02:00
|
|
|
{
|
2019-09-07 01:27:29 +02:00
|
|
|
const unwind completed{[&complete, &dock]
|
|
|
|
{
|
|
|
|
++complete;
|
|
|
|
dock.notify_one();
|
|
|
|
}};
|
|
|
|
|
2019-08-14 07:59:27 +02:00
|
|
|
handle_room(room_id);
|
2019-09-10 20:36:01 +02:00
|
|
|
ctx::interruption_point();
|
|
|
|
|
2019-09-07 00:46:22 +02:00
|
|
|
handle_missing(room_id);
|
2019-09-10 20:36:01 +02:00
|
|
|
ctx::interruption_point();
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
log::info
|
|
|
|
{
|
2019-09-08 07:11:18 +02:00
|
|
|
log, "Initial backfill of %s complete:%zu", //estimate:%zu %02.2lf%%",
|
2019-09-07 23:53:44 +02:00
|
|
|
string_view{room_id},
|
|
|
|
complete,
|
|
|
|
estimate,
|
|
|
|
(complete / double(estimate)) * 100.0,
|
|
|
|
};
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
return true;
|
2019-09-07 01:27:29 +02:00
|
|
|
}};
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
// Iterate the room_id's, submitting a copy of each to the next pool
|
|
|
|
// worker; the submission blocks when all pool workers are busy, as per
|
|
|
|
// the pool::opts.
|
2019-09-10 20:36:01 +02:00
|
|
|
const ctx::uninterruptible ui;
|
2019-09-07 01:27:29 +02:00
|
|
|
rooms::for_each(opts, [&pool, &each_room, &count]
|
|
|
|
(const room::id &room_id)
|
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return false;
|
|
|
|
|
2019-08-14 10:41:17 +02:00
|
|
|
++count;
|
2019-09-07 01:27:29 +02:00
|
|
|
pool([&each_room, room_id(std::string(room_id))]
|
|
|
|
{
|
|
|
|
each_room(room_id);
|
|
|
|
});
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
return true;
|
2019-08-14 07:59:27 +02:00
|
|
|
});
|
2019-08-14 10:41:17 +02:00
|
|
|
|
2019-09-07 01:27:29 +02:00
|
|
|
if(complete < count)
|
|
|
|
log::dwarning
|
|
|
|
{
|
|
|
|
log, "Waiting for initial resynchronization count:%zu complete:%zu rooms...",
|
|
|
|
count,
|
|
|
|
complete,
|
|
|
|
};
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
pool.terminate();
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
// All rooms have been submitted to the pool but the pool workers might
|
|
|
|
// still be busy. If we unwind now the pool's dtor will kill the workers
|
|
|
|
// so we synchronize their completion here.
|
2019-09-07 01:27:29 +02:00
|
|
|
dock.wait([&complete, &count]
|
|
|
|
{
|
|
|
|
return complete >= count;
|
|
|
|
});
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return;
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
log::notice
|
2019-08-14 10:41:17 +02:00
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
log, "Initial resynchronization of %zu rooms completed.",
|
2019-08-14 10:41:17 +02:00
|
|
|
count,
|
|
|
|
};
|
2019-08-14 07:59:27 +02:00
|
|
|
}
|
|
|
|
catch(const ctx::interrupted &e)
|
|
|
|
{
|
|
|
|
log::derror
|
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
log, "Worker interrupted without completing resynchronization of all rooms."
|
2019-08-14 10:41:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
catch(const ctx::terminated &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
log, "Worker terminated without completing resynchronization of all rooms."
|
2019-08-14 07:59:27 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
log, "Worker fatal :%s",
|
|
|
|
e.what(),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::init::backfill::handle_room(const room::id &room_id)
|
|
|
|
try
|
|
|
|
{
|
|
|
|
const m::room room
|
|
|
|
{
|
|
|
|
room_id
|
|
|
|
};
|
|
|
|
|
2019-08-14 10:41:17 +02:00
|
|
|
const room::origins origins
|
2019-08-14 07:59:27 +02:00
|
|
|
{
|
|
|
|
room
|
|
|
|
};
|
|
|
|
|
2019-08-14 10:41:17 +02:00
|
|
|
// When the room isn't public we need to supply a user_id of one of our
|
|
|
|
// users in the room to satisfy matrix protocol requirements upstack.
|
|
|
|
const auto user_id
|
|
|
|
{
|
|
|
|
m::any_user(room, my_host(), "join")
|
|
|
|
};
|
|
|
|
|
2019-08-28 03:27:11 +02:00
|
|
|
size_t respond(0), behind(0), equal(0), ahead(0);
|
|
|
|
size_t exists(0), fetching(0), evaluated(0);
|
|
|
|
std::set<std::string, std::less<>> errors;
|
2020-02-02 03:13:39 +01:00
|
|
|
const auto &[top_event_id, top_event_depth, top_event_idx]
|
2019-08-14 10:41:17 +02:00
|
|
|
{
|
|
|
|
m::top(std::nothrow, room)
|
|
|
|
};
|
|
|
|
|
2020-02-20 21:35:13 +01:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "Resynchronizing %s from %s [idx:%lu depth:%ld] from %zu joined servers...",
|
|
|
|
string_view{room_id},
|
|
|
|
string_view{top_event_id},
|
|
|
|
top_event_idx,
|
|
|
|
top_event_depth,
|
|
|
|
origins.count(),
|
|
|
|
};
|
|
|
|
|
2019-08-14 10:41:17 +02:00
|
|
|
feds::opts opts;
|
|
|
|
opts.op = feds::op::head;
|
|
|
|
opts.room_id = room_id;
|
|
|
|
opts.user_id = user_id;
|
2019-09-10 20:36:01 +02:00
|
|
|
opts.closure_errors = false; // exceptions wil not propagate feds::execute
|
2019-08-14 10:41:17 +02:00
|
|
|
opts.exclude_myself = true;
|
2020-02-02 03:13:39 +01:00
|
|
|
const auto &top_depth(top_event_depth); // clang structured-binding & closure oops
|
2019-08-14 10:41:17 +02:00
|
|
|
feds::execute(opts, [&](const auto &result)
|
|
|
|
{
|
|
|
|
const m::event event
|
|
|
|
{
|
|
|
|
result.object.get("event")
|
|
|
|
};
|
|
|
|
|
|
|
|
// The depth comes back as one greater than any existing
|
|
|
|
// depth so we subtract one.
|
|
|
|
const auto &depth
|
|
|
|
{
|
|
|
|
std::max(json::get<"depth"_>(event) - 1L, 0L)
|
|
|
|
};
|
|
|
|
|
2019-08-28 03:27:11 +02:00
|
|
|
++respond;
|
2019-08-14 10:41:17 +02:00
|
|
|
ahead += depth > top_depth;
|
|
|
|
equal += depth == top_depth;
|
|
|
|
behind += depth < top_depth;
|
2019-08-28 03:27:11 +02:00
|
|
|
const event::prev prev
|
|
|
|
{
|
|
|
|
event
|
|
|
|
};
|
2019-08-14 10:41:17 +02:00
|
|
|
|
2019-12-03 22:51:59 +01:00
|
|
|
return m::for_each(prev, [&](const event::id &event_id)
|
2019-08-14 10:41:17 +02:00
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return false;
|
|
|
|
|
2019-08-28 03:27:11 +02:00
|
|
|
if(errors.count(event_id))
|
|
|
|
return true;
|
|
|
|
|
2019-12-03 22:51:59 +01:00
|
|
|
if(!m::exists(event::id(event_id)))
|
2019-08-14 10:41:17 +02:00
|
|
|
{
|
2019-12-03 22:51:59 +01:00
|
|
|
++fetching;
|
|
|
|
if(!handle_event(room_id, event_id, result.origin, true))
|
|
|
|
{
|
2019-12-03 23:42:00 +01:00
|
|
|
// If we fail the process the event we cache that and cease here.
|
2019-12-03 22:51:59 +01:00
|
|
|
errors.emplace(event_id);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else ++evaluated;
|
2019-08-28 03:27:11 +02:00
|
|
|
}
|
2019-12-03 22:51:59 +01:00
|
|
|
else ++exists;
|
2019-08-27 00:12:43 +02:00
|
|
|
|
2019-12-03 23:42:00 +01:00
|
|
|
// If the event already exists or was successfully obtained we
|
|
|
|
// reward the remote with gossip of events which reference this
|
|
|
|
// event which it is unlikely to have.
|
|
|
|
if(gossip_enable)
|
|
|
|
gossip(room_id, event_id, result.origin);
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
return true;
|
2019-08-14 10:41:17 +02:00
|
|
|
});
|
|
|
|
});
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return;
|
2019-09-07 00:46:22 +02:00
|
|
|
|
2019-08-14 10:41:17 +02:00
|
|
|
log::info
|
|
|
|
{
|
2019-09-08 02:32:09 +02:00
|
|
|
log, "Acquired %s remote head; servers:%zu online:%zu"
|
2019-08-28 03:27:11 +02:00
|
|
|
" depth:%ld lt:eq:gt %zu:%zu:%zu exist:%zu eval:%zu error:%zu",
|
2019-08-14 10:41:17 +02:00
|
|
|
string_view{room_id},
|
|
|
|
origins.count(),
|
|
|
|
origins.count_online(),
|
2019-08-28 03:27:11 +02:00
|
|
|
top_depth,
|
2019-08-14 10:41:17 +02:00
|
|
|
behind,
|
|
|
|
equal,
|
|
|
|
ahead,
|
2019-08-28 03:27:11 +02:00
|
|
|
exists,
|
|
|
|
evaluated,
|
|
|
|
errors.size(),
|
2019-08-14 10:41:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
assert(ahead + equal + behind == respond);
|
2019-08-14 07:59:27 +02:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
2019-09-07 00:46:22 +02:00
|
|
|
log, "Failed to synchronize recent %s :%s",
|
|
|
|
string_view{room_id},
|
|
|
|
e.what(),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::init::backfill::handle_missing(const room::id &room_id)
|
|
|
|
try
|
|
|
|
{
|
|
|
|
const m::room room
|
|
|
|
{
|
|
|
|
room_id
|
|
|
|
};
|
|
|
|
|
|
|
|
const m::room::events::missing missing
|
|
|
|
{
|
|
|
|
room
|
|
|
|
};
|
|
|
|
|
|
|
|
const int64_t &room_depth
|
|
|
|
{
|
|
|
|
m::depth(std::nothrow, room)
|
|
|
|
};
|
|
|
|
|
2019-09-09 01:54:26 +02:00
|
|
|
const ssize_t &viewport_size
|
|
|
|
{
|
|
|
|
m::room::events::viewport_size
|
|
|
|
};
|
|
|
|
|
|
|
|
const int64_t min_depth
|
|
|
|
{
|
|
|
|
std::max(room_depth - viewport_size * 2, 0L)
|
|
|
|
};
|
2019-09-07 00:46:22 +02:00
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
ssize_t attempted(0);
|
|
|
|
std::set<std::string, std::less<>> fail;
|
2019-09-08 02:32:09 +02:00
|
|
|
missing.for_each(min_depth, [&room_id, &fail, &attempted, &room_depth, &min_depth]
|
2019-09-07 00:46:22 +02:00
|
|
|
(const auto &event_id, const int64_t &ref_depth, const auto &ref_idx)
|
|
|
|
{
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return false;
|
|
|
|
|
2019-09-07 00:46:22 +02:00
|
|
|
auto it{fail.lower_bound(event_id)};
|
|
|
|
if(it == end(fail) || *it != event_id)
|
2019-09-08 02:32:09 +02:00
|
|
|
{
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "Fetching missing %s ref_depth:%zd in %s head_depth:%zu min_depth:%zd",
|
|
|
|
string_view{event_id},
|
|
|
|
ref_depth,
|
|
|
|
string_view{room_id},
|
|
|
|
room_depth,
|
|
|
|
min_depth,
|
|
|
|
};
|
|
|
|
|
2019-09-16 21:30:44 +02:00
|
|
|
if(!handle_event(room_id, event_id, string_view{}, false))
|
2019-09-07 00:46:22 +02:00
|
|
|
fail.emplace_hint(it, event_id);
|
2019-09-08 02:32:09 +02:00
|
|
|
}
|
2019-09-07 00:46:22 +02:00
|
|
|
|
|
|
|
++attempted;
|
2019-09-10 20:36:01 +02:00
|
|
|
return true;
|
2019-09-07 00:46:22 +02:00
|
|
|
});
|
|
|
|
|
2019-09-10 20:36:01 +02:00
|
|
|
if(unlikely(ctx::interruption_requested()))
|
|
|
|
return;
|
|
|
|
|
2019-09-07 23:53:44 +02:00
|
|
|
if(attempted - ssize_t(fail.size()) > 0L)
|
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "Fetched %zu recent missing events in %s attempted:%zu fail:%zu",
|
|
|
|
attempted - fail.size(),
|
|
|
|
string_view{room_id},
|
|
|
|
attempted,
|
|
|
|
fail.size(),
|
|
|
|
};
|
2019-09-07 00:46:22 +02:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Failed to synchronize missing %s :%s",
|
2019-08-14 07:59:27 +02:00
|
|
|
string_view{room_id},
|
|
|
|
e.what(),
|
|
|
|
};
|
|
|
|
}
|
2019-08-28 03:27:11 +02:00
|
|
|
|
|
|
|
bool
|
2019-09-08 02:32:09 +02:00
|
|
|
ircd::m::init::backfill::handle_event(const room::id &room_id,
|
|
|
|
const event::id &event_id,
|
2019-09-16 21:30:44 +02:00
|
|
|
const string_view &hint,
|
|
|
|
const bool &ask_hint_only)
|
2019-08-28 03:27:11 +02:00
|
|
|
try
|
|
|
|
{
|
2019-08-30 03:30:47 +02:00
|
|
|
fetch::opts opts;
|
|
|
|
opts.op = fetch::op::event;
|
|
|
|
opts.room_id = room_id;
|
|
|
|
opts.event_id = event_id;
|
2019-09-16 21:06:48 +02:00
|
|
|
opts.backfill_limit = 1;
|
2019-08-30 07:18:27 +02:00
|
|
|
opts.hint = hint;
|
2019-09-16 21:30:44 +02:00
|
|
|
opts.attempt_limit = ask_hint_only;
|
2019-08-28 03:27:11 +02:00
|
|
|
auto future
|
|
|
|
{
|
2019-08-30 03:30:47 +02:00
|
|
|
fetch::start(opts)
|
2019-08-28 03:27:11 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
m::fetch::result result
|
|
|
|
{
|
|
|
|
future.get()
|
|
|
|
};
|
|
|
|
|
2019-08-30 07:18:27 +02:00
|
|
|
const json::object response
|
|
|
|
{
|
|
|
|
result
|
|
|
|
};
|
|
|
|
|
|
|
|
const json::array &pdus
|
2019-08-30 03:30:47 +02:00
|
|
|
{
|
|
|
|
json::object(result).at("pdus")
|
|
|
|
};
|
|
|
|
|
|
|
|
const m::event event
|
|
|
|
{
|
2019-08-30 07:18:27 +02:00
|
|
|
pdus.at(0), event_id
|
2019-08-30 03:30:47 +02:00
|
|
|
};
|
|
|
|
|
2019-09-07 01:02:26 +02:00
|
|
|
const auto &[viewport_depth, _]
|
|
|
|
{
|
|
|
|
m::viewport(room_id)
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool below_viewport
|
|
|
|
{
|
|
|
|
json::get<"depth"_>(event) < viewport_depth
|
|
|
|
};
|
|
|
|
|
|
|
|
if(below_viewport)
|
|
|
|
log::debug
|
|
|
|
{
|
2019-09-08 02:32:09 +02:00
|
|
|
log, "Will not fetch children of %s depth:%ld below viewport:%ld in %s",
|
2019-09-07 01:02:26 +02:00
|
|
|
string_view{event_id},
|
|
|
|
json::get<"depth"_>(event),
|
|
|
|
viewport_depth,
|
2019-09-08 02:32:09 +02:00
|
|
|
string_view{room_id},
|
2019-09-07 01:02:26 +02:00
|
|
|
};
|
|
|
|
|
2019-08-30 03:30:47 +02:00
|
|
|
m::vm::opts vmopts;
|
|
|
|
vmopts.infolog_accept = true;
|
2019-09-07 01:02:26 +02:00
|
|
|
vmopts.fetch_prev = !below_viewport;
|
|
|
|
vmopts.fetch_state = below_viewport;
|
2019-09-04 22:15:25 +02:00
|
|
|
vmopts.warnlog &= ~vm::fault::EXISTS;
|
2019-08-30 07:18:27 +02:00
|
|
|
vmopts.node_id = hint;
|
2019-08-28 03:27:11 +02:00
|
|
|
m::vm::eval eval
|
|
|
|
{
|
2019-08-30 03:30:47 +02:00
|
|
|
event, vmopts
|
2019-08-28 03:27:11 +02:00
|
|
|
};
|
|
|
|
|
2019-08-30 07:18:27 +02:00
|
|
|
log::info
|
|
|
|
{
|
2019-09-08 02:32:09 +02:00
|
|
|
log, "acquired %s in %s depth:%ld viewport:%ld state:%b",
|
2019-08-30 07:18:27 +02:00
|
|
|
string_view{event_id},
|
2019-09-08 02:32:09 +02:00
|
|
|
string_view{room_id},
|
2019-08-30 07:18:27 +02:00
|
|
|
json::get<"depth"_>(event),
|
2019-09-07 01:02:26 +02:00
|
|
|
viewport_depth,
|
2019-09-08 02:32:09 +02:00
|
|
|
defined(json::get<"state_key"_>(event)),
|
2019-08-30 07:18:27 +02:00
|
|
|
};
|
|
|
|
|
2019-08-28 03:27:11 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::derror
|
|
|
|
{
|
2019-09-08 02:32:09 +02:00
|
|
|
log, "Failed to acquire %s synchronizing %s :%s",
|
2019-08-28 03:27:11 +02:00
|
|
|
string_view{event_id},
|
2019-09-08 02:32:09 +02:00
|
|
|
string_view{room_id},
|
2019-08-28 03:27:11 +02:00
|
|
|
e.what(),
|
|
|
|
};
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-03 23:42:00 +01:00
|
|
|
|
|
|
|
decltype(ircd::m::init::backfill::gossip_enable)
|
|
|
|
ircd::m::init::backfill::gossip_enable
|
|
|
|
{
|
|
|
|
{ "name", "m.init.backfill.gossip.enable" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::init::backfill::gossip_timeout)
|
|
|
|
ircd::m::init::backfill::gossip_timeout
|
|
|
|
{
|
|
|
|
{ "name", "m.init.backfill.gossip.timeout" },
|
|
|
|
{ "default", 5L },
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Initial gossip protocol works by sending the remote server some events which
|
|
|
|
/// reference an event contained in the remote's head which we just obtained.
|
|
|
|
/// This is part of a family of active measures taken to reduce forward
|
|
|
|
/// extremities on other servers but without polluting the chain with
|
|
|
|
/// permanent data for this purpose such as with org.matrix.dummy_event.
|
|
|
|
size_t
|
|
|
|
ircd::m::init::backfill::gossip(const room::id &room_id,
|
|
|
|
const event::id &event_id,
|
|
|
|
const string_view &remote)
|
|
|
|
{
|
|
|
|
size_t ret{0};
|
|
|
|
const m::event::refs refs
|
|
|
|
{
|
|
|
|
m::index(event_id, std::nothrow)
|
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
static const size_t max{48};
|
2019-12-03 23:42:00 +01:00
|
|
|
const size_t count
|
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
std::min(refs.count(dbs::ref::NEXT), max)
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(!count)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
const unique_mutable_buffer buf[]
|
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
{ event::MAX_SIZE * (count + 1) },
|
|
|
|
{ 16_KiB },
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
size_t i{0};
|
|
|
|
std::array<event::idx, max> next_idx;
|
|
|
|
refs.for_each(dbs::ref::NEXT, [&next_idx, &i]
|
|
|
|
(const event::idx &event_idx, const auto &ref_type)
|
2019-12-03 23:42:00 +01:00
|
|
|
{
|
|
|
|
assert(ref_type == dbs::ref::NEXT);
|
2019-12-04 01:08:37 +01:00
|
|
|
next_idx.at(i) = event_idx;
|
|
|
|
return ++i < next_idx.size();
|
|
|
|
});
|
2019-12-03 23:42:00 +01:00
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
json::stack out{buf[0]};
|
|
|
|
{
|
|
|
|
json::stack::object top
|
2019-12-03 23:42:00 +01:00
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
out
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
json::stack::member
|
2019-12-03 23:42:00 +01:00
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
top, "origin", m::my_host()
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
json::stack::member
|
2019-12-03 23:42:00 +01:00
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
top, "origin_server_ts", json::value
|
|
|
|
{
|
|
|
|
long(ircd::time<milliseconds>())
|
|
|
|
}
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
json::stack::array pdus
|
2019-12-03 23:42:00 +01:00
|
|
|
{
|
2019-12-04 01:08:37 +01:00
|
|
|
top, "pdus"
|
2019-12-03 23:42:00 +01:00
|
|
|
};
|
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
m::event::fetch event;
|
|
|
|
for(assert(ret == 0); ret < i; ++ret)
|
|
|
|
if(seek(event, next_idx.at(ret), std::nothrow))
|
|
|
|
pdus.append(event.source);
|
|
|
|
}
|
2019-12-03 23:42:00 +01:00
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
const string_view txn
|
|
|
|
{
|
|
|
|
out.completed()
|
|
|
|
};
|
2019-12-03 23:42:00 +01:00
|
|
|
|
2019-12-04 01:08:37 +01:00
|
|
|
char idbuf[64];
|
|
|
|
const string_view txnid
|
|
|
|
{
|
|
|
|
m::txn::create_id(idbuf, txn)
|
|
|
|
};
|
|
|
|
|
2020-03-06 04:35:20 +01:00
|
|
|
m::fed::send::opts opts;
|
2019-12-04 01:08:37 +01:00
|
|
|
opts.remote = remote;
|
2020-03-06 04:35:20 +01:00
|
|
|
m::fed::send request
|
2019-12-04 01:08:37 +01:00
|
|
|
{
|
|
|
|
txnid, txn, buf[1], std::move(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
http::code code{0};
|
|
|
|
std::exception_ptr eptr;
|
|
|
|
if(request.wait(seconds(gossip_timeout), std::nothrow)) try
|
|
|
|
{
|
|
|
|
code = request.get();
|
|
|
|
ret += code == http::OK;
|
|
|
|
}
|
|
|
|
catch(...)
|
|
|
|
{
|
|
|
|
eptr = std::current_exception();
|
|
|
|
}
|
|
|
|
|
|
|
|
log::logf
|
|
|
|
{
|
|
|
|
log, code == http::OK? log::DEBUG : log::DERROR,
|
|
|
|
"gossip %zu:%zu to %s reference to %s in %s :%s %s",
|
|
|
|
ret,
|
|
|
|
count,
|
|
|
|
remote,
|
|
|
|
string_view{event_id},
|
|
|
|
string_view{room_id},
|
|
|
|
code?
|
|
|
|
status(code):
|
|
|
|
"failed",
|
|
|
|
eptr?
|
|
|
|
what(eptr):
|
|
|
|
string_view{},
|
|
|
|
};
|
2019-12-03 23:42:00 +01:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|