0
0
Fork 0
mirror of https://github.com/matrix-construct/construct synced 2024-05-28 15:53:46 +02:00

ircd:Ⓜ️:sync: Add flag to trigger re-request forcing focused full_state.

This commit is contained in:
Jason Volk 2020-06-03 21:57:28 -07:00
parent 1d45c9aecd
commit da43ac3d95
4 changed files with 93 additions and 26 deletions

View file

@ -101,6 +101,10 @@ struct ircd::m::sync::data
/// The json::stack master object
json::stack *out {nullptr};
/// Set by a linear sync handler; indicates the handler cannot fulfill
/// the request because the polylog sync handler should be used instead;
bool reflow_full_state {false};
data(const m::user &user,
const m::events::range &range,
ircd::client *const &client = nullptr,

View file

@ -468,9 +468,15 @@ ircd::m::sync::loghead(const data &data)
data.range.second,
vm::sequence::retired,
data.phased?
"|CRAZY"_sv : ""_sv,
"|CRAZY"_sv:
data.reflow_full_state?
"|REFLOW"_sv:
(data.args && data.args->full_state)?
"|FULLSTATE"_sv:
""_sv,
data.prefetch?
"|PREFETCH"_sv : ""_sv,
"|PREFETCH"_sv:
""_sv,
flush_count,
ircd::pretty(iecbuf[1], iec(flush_bytes)),
data.out?
@ -608,6 +614,7 @@ try
,full_state
{
request.query.get("full_state", false)
|| has(std::get<2>(since), 'P')
}
,set_presence
{

View file

@ -638,14 +638,31 @@ ircd::m::sync::longpoll::polled(data &data,
linear_proffer_event(data, scratch)
};
if(!consumed)
return false;
// In semaphore-mode we're just here to ride the longpoll's blocking
// behavior. We want the client to get an empty response.
if(args.semaphore)
return false;
if(!consumed && !data.reflow_full_state)
return false;
assert(!data.reflow_full_state || data.event_idx);
const auto next
{
data.event_idx && data.reflow_full_state?
std::min(data.event_idx, vm::sequence::retired + 1):
data.event_idx?
std::min(data.event_idx + 1, vm::sequence::retired + 1):
std::min(data.range.second + 1, vm::sequence::retired + 1)
};
const auto &flags
{
data.reflow_full_state?
"P"_sv:
string_view{}
};
const json::vector vector
{
string_view
@ -659,21 +676,15 @@ ircd::m::sync::longpoll::polled(data &data,
*data.out
};
json::merge(top, vector);
const auto next
{
data.event_idx?
std::min(data.event_idx + 1, vm::sequence::retired + 1):
std::min(data.range.second + 1, vm::sequence::retired + 1)
};
if(likely(consumed))
json::merge(top, vector);
char since_buf[64];
json::stack::member
{
top, "next_batch", json::value
{
make_since(since_buf, next), json::STRING
make_since(since_buf, next, flags), json::STRING
}
};
@ -747,19 +758,30 @@ try
{
last && completed?
data.range.second:
last && data.reflow_full_state?
std::min(last, data.range.second):
last?
std::min(last + 1, data.range.second):
0UL
};
assert(!data.reflow_full_state || (last && !completed));
if(last)
{
const auto &flags
{
data.reflow_full_state?
"P"_sv:
string_view{}
};
char buf[64];
json::stack::member
{
top, "next_batch", json::value
{
make_since(buf, next), json::STRING
make_since(buf, next, flags), json::STRING
}
};
@ -833,6 +855,10 @@ ircd::m::sync::linear_proffer(data &data,
// to continue with the iteration. Otherwise if the next
// worst-case event does not fit, bad things.
wb.remaining() >= 68_KiB
// When the handler reports this special-case we have
// to stop at this iteration.
&& !data.reflow_full_state
};
return enough_space_for_more;

View file

@ -103,11 +103,22 @@ ircd::m::sync::room_state_linear_events(data &data)
const bool is_own_join
{
is_own_membership && data.membership == "join"
is_own_membership
&& data.membership == "join"
};
if(is_own_join)
{
// Special case gimmick; this effectively stops the linear-sync at this
// event and has /sync respond with a token containing a flag. When the
// client makes the next request with this flag we treat it as if they
// were using the ?full_state=true query parameter. This will enter the
// polylog handler instead of the linear handler (here) so as to
// efficiently sync the entire room's state to the client; as we cannot
// perform that feat from this handler.
data.reflow_full_state = true;
return false;
}
const ssize_t &viewport_size
{
@ -234,7 +245,14 @@ bool
ircd::m::sync::_room_state_polylog(data &data)
{
assert(data.args);
if(likely(!data.args->full_state))
const bool full_state_all
{
data.args->full_state &&
!has(std::get<2>(data.args->since), 'P')
};
if(likely(!full_state_all))
if(!data.phased && int64_t(data.range.first) > 0)
if(!apropos(data, data.room_head))
return false;
@ -330,29 +348,41 @@ ircd::m::sync::room_state_polylog_events(data &data)
json::get<"state"_>(room_filter)
};
const auto &lazyload_members
{
lazyload_members_enable &&
json::get<"lazy_load_members"_>(state_filter)
};
const room::state state
{
*data.room
};
state.for_each([&data, &concurrent, &lazyload_members]
const auto &lazyload_members
{
lazyload_members_enable
&& json::get<"lazy_load_members"_>(state_filter)
};
const bool full_state_reflow
{
data.args->full_state
&& has(std::get<2>(data.args->since), 'P')
};
const bool full_state_all
{
data.args->full_state
&& !full_state_reflow
};
state.for_each([&data, &concurrent, &lazyload_members, &full_state_all]
(const string_view &type, const string_view &state_key, const event::idx &event_idx)
{
// Skip this event if it's not in the sync range, except
// when the request came with a `?full_state=true`
assert(data.args);
if(likely(!data.args->full_state))
if(likely(!full_state_all))
if(!apropos(data, event_idx))
return true;
// For crazyloading/lazyloading related membership event optimiztions.
if(!data.args->full_state && type == "m.room.member")
if(!full_state_all && type == "m.room.member")
{
if(lazyload_members)
return true;