2018-02-04 03:22:01 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
#include "sync.h"
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::mapi::header
|
2018-04-11 00:20:47 +02:00
|
|
|
IRCD_MODULE
|
|
|
|
{
|
2018-04-22 08:50:16 +02:00
|
|
|
"Client 6.2.1 :Sync"
|
2018-04-11 00:20:47 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::resource)
|
|
|
|
ircd::m::sync::resource
|
2018-04-11 00:20:47 +02:00
|
|
|
{
|
|
|
|
"/_matrix/client/r0/sync",
|
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
description
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::description)
|
|
|
|
ircd::m::sync::description
|
|
|
|
{R"(6.2.1
|
2017-09-26 06:42:07 +02:00
|
|
|
|
|
|
|
Synchronise the client's state with the latest state on the server. Clients
|
|
|
|
use this API when they first log in to get an initial snapshot of the state
|
|
|
|
on the server, and then continue to call this API to get incremental deltas
|
|
|
|
to the state, and to receive new messages.
|
|
|
|
)"};
|
|
|
|
|
2019-03-17 21:24:24 +01:00
|
|
|
const auto linear_delta_max_help
|
|
|
|
{R"(
|
|
|
|
|
|
|
|
Maximum number of events to scan sequentially for a /sync. This determines
|
|
|
|
whether linear-sync or polylog-sync mode is used to satisfy the request. If
|
|
|
|
the difference between the since token (lower-bound) and the upper-bound of
|
|
|
|
the sync is within this value, the linear-sync mode is used. If it is more
|
|
|
|
than this value a polylog-sync mode is used. The latter is used because at
|
|
|
|
some threshold it becomes too expensive to scan a huge number of events to
|
|
|
|
grab only those that the client requires; it is cheaper to conduct a series
|
|
|
|
of random-access queries with polylog-sync instead. Note the exclusive
|
|
|
|
upper-bound of a sync is determined either by a non-spec query parameter
|
2019-03-19 19:45:01 +01:00
|
|
|
'next_batch' or the vm::sequence::retired+1.
|
2019-03-17 21:24:24 +01:00
|
|
|
|
|
|
|
)"};
|
|
|
|
|
|
|
|
const auto linear_buffer_size_help
|
|
|
|
{R"(
|
|
|
|
|
|
|
|
The size of the coalescing buffer used when conducting a linear-sync. During
|
|
|
|
the sequential scan of events, when an event is marked as required for the
|
|
|
|
client's sync it is stringified and appended to this buffer. The buffer has
|
|
|
|
the format of a json::vector of individual events. At the end of the linear
|
|
|
|
sync, the objects in this buffer are merged into a single spec /sync response.
|
|
|
|
|
|
|
|
When this buffer is full the linear sync must finish and respond to the client
|
|
|
|
with whatever it has. The event::idx of the last event that fit into the buffer
|
|
|
|
forms the basis for the next_batch so the client can continue with another linear
|
|
|
|
/sync to complete the range.
|
|
|
|
|
|
|
|
)"};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
decltype(ircd::m::sync::flush_hiwat)
|
|
|
|
ircd::m::sync::flush_hiwat
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.flush.hiwat" },
|
|
|
|
{ "default", long(48_KiB) },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::buffer_size)
|
|
|
|
ircd::m::sync::buffer_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.buffer_size" },
|
|
|
|
{ "default", long(128_KiB) },
|
2019-03-17 21:24:24 +01:00
|
|
|
{ "help", "Response chunk buffer size" },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::linear_buffer_size)
|
|
|
|
ircd::m::sync::linear_buffer_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.linear.buffer_size" },
|
|
|
|
{ "default", long(96_KiB) },
|
|
|
|
{ "help", linear_buffer_size_help },
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
decltype(ircd::m::sync::linear_delta_max)
|
|
|
|
ircd::m::sync::linear_delta_max
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-03-17 21:24:24 +01:00
|
|
|
{ "name", "ircd.client.sync.linear.delta.max" },
|
|
|
|
{ "default", 1024 },
|
|
|
|
{ "help", linear_delta_max_help },
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
decltype(ircd::m::sync::polylog_phased)
|
|
|
|
ircd::m::sync::polylog_phased
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.polylog.phased" },
|
2019-04-08 15:54:54 +02:00
|
|
|
{ "default", false },
|
|
|
|
{ "persist", false },
|
2019-04-08 11:04:24 +02:00
|
|
|
};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
decltype(ircd::m::sync::polylog_only)
|
|
|
|
ircd::m::sync::polylog_only
|
|
|
|
{
|
2019-04-08 09:03:17 +02:00
|
|
|
{ "name", "ircd.client.sync.polylog.only" },
|
2019-03-07 20:53:58 +01:00
|
|
|
{ "default", false },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll_enable)
|
|
|
|
ircd::m::sync::longpoll_enable
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.longpoll.enable" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
//
|
|
|
|
// GET sync
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::method_get)
|
|
|
|
ircd::m::sync::method_get
|
2018-05-14 04:23:23 +02:00
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
resource, "GET", handle_get,
|
|
|
|
{
|
|
|
|
method_get.REQUIRES_AUTH,
|
|
|
|
-1s,
|
|
|
|
}
|
2018-05-14 04:23:23 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::resource::response
|
|
|
|
ircd::m::sync::handle_get(client &client,
|
|
|
|
const resource::request &request)
|
2017-08-23 23:10:28 +02:00
|
|
|
{
|
2019-01-10 22:19:07 +01:00
|
|
|
// Parse the request options
|
2018-09-01 09:51:40 +02:00
|
|
|
const args args
|
|
|
|
{
|
|
|
|
request
|
|
|
|
};
|
2018-04-11 00:20:47 +02:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
// The range to `/sync`. We involve events starting at the range.first
|
|
|
|
// index in this sync. We will not involve events with an index equal
|
|
|
|
// or greater than the range.second. In this case the range.second does not
|
2019-03-19 19:45:01 +01:00
|
|
|
// exist yet because it is one past the server's sequence::retired counter.
|
2019-01-10 22:19:07 +01:00
|
|
|
const m::events::range range
|
2019-01-04 02:21:02 +01:00
|
|
|
{
|
2019-03-19 19:45:01 +01:00
|
|
|
args.since, std::min(args.next_batch, m::vm::sequence::retired + 1)
|
2019-01-04 02:21:02 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
// The phased initial sync feature uses negative since tokens.
|
|
|
|
const bool phased_range
|
|
|
|
{
|
|
|
|
int64_t(range.first) < 0L
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check if the admin disabled phased sync.
|
|
|
|
if(!polylog_phased && phased_range)
|
2019-01-10 22:19:07 +01:00
|
|
|
throw m::NOT_FOUND
|
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
"Since parameter '%ld' must be >= 0.",
|
2019-03-16 20:26:03 +01:00
|
|
|
range.first,
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
// When the range indexes are the same, the client is polling for the next
|
|
|
|
// event which doesn't exist yet. There is no reason for the since parameter
|
|
|
|
// to be greater than that, unless it's a negative integer and phased
|
|
|
|
// sync is enabled
|
|
|
|
if(!polylog_phased || !phased_range)
|
|
|
|
if(range.first > range.second)
|
|
|
|
throw m::NOT_FOUND
|
|
|
|
{
|
|
|
|
"Since parameter '%lu' is too far in the future."
|
|
|
|
" Cannot be greater than '%lu'.",
|
|
|
|
range.first,
|
|
|
|
range.second
|
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
// Keep state for statistics of this sync here on the stack.
|
|
|
|
stats stats;
|
|
|
|
data data
|
|
|
|
{
|
|
|
|
request.user_id,
|
|
|
|
range,
|
|
|
|
&client,
|
|
|
|
nullptr,
|
|
|
|
&stats,
|
|
|
|
args.filter_id
|
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
const bool initial_sync
|
|
|
|
{
|
|
|
|
range.first == 0UL
|
|
|
|
};
|
|
|
|
|
|
|
|
// Conditions for phased sync for this client
|
|
|
|
data.phased =
|
|
|
|
{
|
|
|
|
(polylog_phased && args.phased) &&
|
|
|
|
(phased_range || initial_sync)
|
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
// Start the chunked encoded response.
|
2019-01-10 22:19:07 +01:00
|
|
|
resource::response::chunked response
|
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
client, http::OK, buffer_size
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
json::stack out
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
response.buf,
|
2019-01-10 22:19:07 +01:00
|
|
|
std::bind(&sync::flush, std::ref(data), std::ref(response), ph::_1),
|
2019-02-27 00:50:58 +01:00
|
|
|
size_t(flush_hiwat)
|
2018-04-22 08:50:16 +02:00
|
|
|
};
|
2019-02-27 00:50:58 +01:00
|
|
|
data.out = &out;
|
2018-04-22 08:50:16 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "request %s", loghead(data)
|
|
|
|
};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
const bool should_longpoll
|
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
!data.phased &&
|
2019-03-19 19:45:01 +01:00
|
|
|
range.first > vm::sequence::retired
|
2019-03-07 20:53:58 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
const bool should_linear
|
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
!data.phased &&
|
2019-03-07 20:53:58 +01:00
|
|
|
!should_longpoll &&
|
|
|
|
!bool(polylog_only) &&
|
|
|
|
range.second - range.first <= size_t(linear_delta_max)
|
|
|
|
};
|
|
|
|
|
2018-09-01 11:57:15 +02:00
|
|
|
const bool shortpolled
|
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
should_longpoll?
|
2018-04-22 08:50:16 +02:00
|
|
|
false:
|
2019-03-07 20:53:58 +01:00
|
|
|
should_linear?
|
2019-02-28 00:25:26 +01:00
|
|
|
linear_handle(data):
|
2019-02-27 02:02:21 +01:00
|
|
|
polylog_handle(data)
|
2018-09-01 11:57:15 +02:00
|
|
|
};
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
// When shortpoll was successful, do nothing else.
|
|
|
|
if(shortpolled)
|
2019-03-01 01:25:39 +01:00
|
|
|
return {};
|
2018-09-01 11:57:15 +02:00
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
if(longpoll_enable && (!data.phased || initial_sync))
|
|
|
|
if(longpoll::poll(data, args))
|
|
|
|
return {};
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
const auto &next_batch
|
|
|
|
{
|
|
|
|
polylog_only?
|
|
|
|
data.range.first:
|
|
|
|
data.range.second
|
|
|
|
};
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
// A user-timeout occurred. According to the spec we return a
|
|
|
|
// 200 with empty fields rather than a 408.
|
2019-03-07 20:53:58 +01:00
|
|
|
empty_response(data, next_batch);
|
2019-03-01 01:25:39 +01:00
|
|
|
return {};
|
2019-01-10 22:19:07 +01:00
|
|
|
}
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
void
|
2019-03-07 20:53:58 +01:00
|
|
|
ircd::m::sync::empty_response(data &data,
|
|
|
|
const uint64_t &next_batch)
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
// Empty objects added to output otherwise Riot b0rks.
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "rooms"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "presence"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
json::stack::member
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "next_batch", json::value
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
lex_cast(next_batch), json::STRING
|
2018-12-02 02:23:42 +01:00
|
|
|
}
|
|
|
|
};
|
2019-03-08 23:17:52 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s timeout @%lu",
|
2019-03-08 23:17:52 +01:00
|
|
|
loghead(data),
|
|
|
|
next_batch
|
|
|
|
};
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2019-01-10 22:19:07 +01:00
|
|
|
|
|
|
|
ircd::const_buffer
|
|
|
|
ircd::m::sync::flush(data &data,
|
|
|
|
resource::response::chunked &response,
|
|
|
|
const const_buffer &buffer)
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
const auto wrote
|
2018-04-17 02:57:41 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
response.flush(buffer)
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(data.stats)
|
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
data.stats->flush_bytes += size(wrote);
|
2019-01-10 22:19:07 +01:00
|
|
|
data.stats->flush_count++;
|
|
|
|
}
|
|
|
|
|
2019-03-01 01:22:14 +01:00
|
|
|
return wrote;
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2018-04-17 02:57:41 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
// polylog
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Random access approach for large `since` ranges. The /sync schema itself is
|
|
|
|
// recursed. For every component in the schema, the handler seeks the events
|
|
|
|
// appropriate for the user and appends it to the output. Concretely, this
|
|
|
|
// involves a full iteration of the rooms a user is a member of, and a full
|
|
|
|
// iteration of the presence status for all users visible to a user, etc.
|
|
|
|
//
|
|
|
|
// This entire process occurs in a single pass. The schema is traced with
|
|
|
|
// json::stack and its buffer is flushed to the client periodically with
|
|
|
|
// chunked encoding.
|
2019-01-09 00:10:06 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::polylog_handle(data &data)
|
2019-01-09 00:10:06 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
bool ret{false};
|
|
|
|
m::sync::for_each(string_view{}, [&data, &ret]
|
2019-01-09 00:10:06 +01:00
|
|
|
(item &item)
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object object
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, item.member_name()
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(item.polylog(data))
|
2019-03-30 23:02:18 +01:00
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
ret = true;
|
2019-03-30 23:02:18 +01:00
|
|
|
data.out->invalidate_checkpoints();
|
|
|
|
}
|
|
|
|
else checkpoint.decommit();
|
2019-02-24 20:59:53 +01:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
return true;
|
|
|
|
});
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(ret)
|
2019-04-08 11:04:24 +02:00
|
|
|
{
|
|
|
|
const int64_t next_batch
|
|
|
|
{
|
|
|
|
data.phased?
|
|
|
|
int64_t(data.range.first) - 1L:
|
|
|
|
int64_t(data.range.second)
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, "next_batch", json::value
|
2019-02-24 20:59:53 +01:00
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
lex_cast(next_batch), json::STRING
|
2019-02-24 20:59:53 +01:00
|
|
|
}
|
|
|
|
};
|
2019-04-08 11:04:24 +02:00
|
|
|
}
|
2019-02-24 20:59:53 +01:00
|
|
|
|
|
|
|
if(!ret)
|
2019-03-30 22:02:51 +01:00
|
|
|
checkpoint.decommit();
|
2019-01-09 00:10:06 +01:00
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
if(!data.phased && stats_info) log::info
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
log, "request %s polylog commit:%b complete @%ld",
|
2019-02-24 20:59:53 +01:00
|
|
|
loghead(data),
|
2019-03-09 00:29:10 +01:00
|
|
|
ret,
|
2019-04-08 11:04:24 +02:00
|
|
|
data.phased?
|
|
|
|
data.range.first:
|
|
|
|
data.range.second
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
return ret;
|
2019-01-09 00:10:06 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "polylog %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
//
|
|
|
|
// linear
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Approach for small `since` ranges. The range of events is iterated and
|
|
|
|
// the event itself is presented to each handler in the schema. This also
|
|
|
|
// involves a json::stack trace of the schema so that if the handler determines
|
|
|
|
// the event is appropriate for syncing to the user the output buffer will
|
|
|
|
// contain a residue of a /sync response with a single event.
|
|
|
|
//
|
|
|
|
// After the iteration of events is complete we are left with several buffers
|
|
|
|
// of properly formatted individual /sync responses which we rewrite into a
|
|
|
|
// single response to overcome the inefficiency of request ping-pong under
|
|
|
|
// heavy load.
|
|
|
|
|
|
|
|
namespace ircd::m::sync
|
|
|
|
{
|
|
|
|
static bool linear_proffer_event_one(data &);
|
|
|
|
static size_t linear_proffer_event(data &, const mutable_buffer &);
|
2019-02-28 21:36:14 +01:00
|
|
|
static std::pair<event::idx, bool> linear_proffer(data &, window_buffer &);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_handle(data &data)
|
2019-01-10 05:39:12 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
*data.out
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const unique_buffer<mutable_buffer> buf
|
|
|
|
{
|
2019-03-17 21:24:24 +01:00
|
|
|
// must be at least worst-case size of m::event plus some.
|
|
|
|
std::max(size_t(linear_buffer_size), size_t(96_KiB))
|
2019-02-27 02:02:21 +01:00
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
window_buffer wb{buf};
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto &[last, completed]
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
|
|
|
linear_proffer(data, wb)
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const json::vector vector
|
|
|
|
{
|
|
|
|
wb.completed()
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
const auto next
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
last && completed?
|
|
|
|
data.range.second:
|
|
|
|
last?
|
2019-03-17 01:42:21 +01:00
|
|
|
std::min(last + 1, data.range.second):
|
2019-03-09 00:29:10 +01:00
|
|
|
0UL
|
|
|
|
};
|
2019-02-28 21:36:14 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
if(last)
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:25:26 +01:00
|
|
|
top, "next_batch", json::value
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
2019-02-28 21:36:14 +01:00
|
|
|
lex_cast(next), json::STRING
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:25:26 +01:00
|
|
|
json::merge(top, vector);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
2019-03-30 22:02:51 +01:00
|
|
|
else checkpoint.decommit();
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-17 21:18:33 +01:00
|
|
|
log, "request %s linear last:%lu %s@%lu",
|
2019-02-27 02:02:21 +01:00
|
|
|
loghead(data),
|
2019-02-28 21:36:14 +01:00
|
|
|
last,
|
2019-03-17 21:18:33 +01:00
|
|
|
completed? "complete "_sv : string_view{},
|
2019-03-09 00:29:10 +01:00
|
|
|
next
|
2019-01-10 05:39:12 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
return last;
|
2019-01-10 05:39:12 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "linear %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
/// Iterates the events in the data.range and creates a json::vector in
|
|
|
|
/// the supplied window_buffer. The return value is the event_idx of the
|
|
|
|
/// last event which fit in the buffer, or 0 of nothing was of interest
|
|
|
|
/// to our client in the event iteration.
|
2019-02-28 21:36:14 +01:00
|
|
|
std::pair<ircd::m::event::idx, bool>
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_proffer(data &data,
|
|
|
|
window_buffer &wb)
|
|
|
|
{
|
|
|
|
event::idx ret(0);
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto closure{[&data, &wb, &ret]
|
2019-02-27 02:02:21 +01:00
|
|
|
(const m::event::idx &event_idx, const m::event &event)
|
|
|
|
{
|
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event_idx
|
|
|
|
};
|
|
|
|
|
|
|
|
wb([&data, &ret, &event_idx]
|
|
|
|
(const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
const auto consumed
|
|
|
|
{
|
|
|
|
linear_proffer_event(data, buf)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(consumed)
|
|
|
|
ret = event_idx;
|
|
|
|
|
|
|
|
return consumed;
|
|
|
|
});
|
|
|
|
|
2019-03-17 21:24:24 +01:00
|
|
|
const bool enough_space_for_more
|
|
|
|
{
|
|
|
|
// The buffer must have at least this much more space
|
|
|
|
// to continue with the iteration. Otherwise if the next
|
|
|
|
// worst-case event does not fit, bad things.
|
|
|
|
wb.remaining() >= 68_KiB
|
|
|
|
};
|
|
|
|
|
|
|
|
return enough_space_for_more;
|
2019-02-28 21:36:14 +01:00
|
|
|
}};
|
2019-02-27 02:02:21 +01:00
|
|
|
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto completed
|
|
|
|
{
|
|
|
|
m::events::for_each(data.range, closure)
|
|
|
|
};
|
|
|
|
|
|
|
|
return
|
|
|
|
{
|
|
|
|
ret, completed
|
|
|
|
};
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets up a json::stack for the iteration of handlers for
|
|
|
|
/// one event.
|
|
|
|
size_t
|
|
|
|
ircd::m::sync::linear_proffer_event(data &data,
|
|
|
|
const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
json::stack out{buf};
|
|
|
|
const scope_restore their_out
|
|
|
|
{
|
|
|
|
data.out, &out
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool success
|
|
|
|
{
|
|
|
|
linear_proffer_event_one(data)
|
|
|
|
};
|
|
|
|
|
|
|
|
top.~object();
|
|
|
|
return success?
|
2019-02-27 02:02:21 +01:00
|
|
|
size(out.completed()):
|
|
|
|
0UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generates a candidate /sync response for a single event by
|
|
|
|
/// iterating all of the handlers.
|
|
|
|
bool
|
|
|
|
ircd::m::sync::linear_proffer_event_one(data &data)
|
|
|
|
{
|
2019-03-28 03:32:54 +01:00
|
|
|
bool ret{false};
|
|
|
|
m::sync::for_each(string_view{}, [&data, &ret]
|
2019-02-27 02:02:21 +01:00
|
|
|
(item &item)
|
|
|
|
{
|
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
if(item.linear(data))
|
2019-03-28 03:32:54 +01:00
|
|
|
ret = true;
|
|
|
|
else
|
|
|
|
checkpoint.rollback();
|
2019-02-27 02:02:21 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
});
|
2019-03-28 03:32:54 +01:00
|
|
|
|
|
|
|
return ret;
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
//
|
|
|
|
// longpoll
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll::notified)
|
|
|
|
ircd::m::sync::longpoll::notified
|
|
|
|
{
|
|
|
|
handle_notify,
|
|
|
|
{
|
|
|
|
{ "_site", "vm.notify" },
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::sync::longpoll::handle_notify(const m::event &event,
|
|
|
|
m::vm::eval &eval)
|
|
|
|
{
|
|
|
|
assert(eval.opts);
|
|
|
|
if(!eval.opts->notify_clients)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if(!polling)
|
2018-11-16 03:36:26 +01:00
|
|
|
{
|
|
|
|
queue.clear();
|
2018-10-07 07:17:46 +02:00
|
|
|
return;
|
2018-11-16 03:36:26 +01:00
|
|
|
}
|
2018-10-07 07:17:46 +02:00
|
|
|
|
|
|
|
queue.emplace_back(eval);
|
|
|
|
dock.notify_all();
|
|
|
|
}
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
bool
|
2019-01-10 05:39:12 +01:00
|
|
|
ircd::m::sync::longpoll::poll(data &data,
|
2018-09-01 09:51:40 +02:00
|
|
|
const args &args)
|
2019-02-28 03:49:38 +01:00
|
|
|
try
|
2017-09-25 03:05:42 +02:00
|
|
|
{
|
2019-03-28 07:37:30 +01:00
|
|
|
const unique_buffer<mutable_buffer> scratch
|
|
|
|
{
|
|
|
|
96_KiB
|
|
|
|
};
|
|
|
|
|
2019-02-28 22:33:21 +01:00
|
|
|
const scope_count polling{longpoll::polling}; do
|
2017-09-26 06:42:07 +02:00
|
|
|
{
|
2018-11-16 03:36:26 +01:00
|
|
|
if(!dock.wait_until(args.timesout))
|
2019-02-28 03:49:38 +01:00
|
|
|
break;
|
2018-11-16 03:36:26 +01:00
|
|
|
|
2019-03-12 22:32:58 +01:00
|
|
|
assert(data.client && data.client->sock);
|
2019-04-20 03:21:17 +02:00
|
|
|
if(unlikely(!data.client || !data.client->sock))
|
|
|
|
break;
|
2019-03-12 22:32:58 +01:00
|
|
|
|
2019-04-20 03:21:17 +02:00
|
|
|
check(*data.client->sock);
|
2018-10-07 07:17:46 +02:00
|
|
|
if(queue.empty())
|
2018-05-28 04:56:04 +02:00
|
|
|
continue;
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
const auto &accepted
|
|
|
|
{
|
|
|
|
queue.front()
|
|
|
|
};
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
const unwind pop{[]
|
|
|
|
{
|
2019-02-28 22:33:21 +01:00
|
|
|
if(longpoll::polling <= 1)
|
2018-11-16 03:36:26 +01:00
|
|
|
queue.pop_front();
|
2018-10-07 07:17:46 +02:00
|
|
|
}};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
if(polylog_only)
|
2019-04-20 03:21:17 +02:00
|
|
|
break;
|
2019-03-07 20:53:58 +01:00
|
|
|
|
2019-03-28 07:37:30 +01:00
|
|
|
if(handle(data, args, accepted, scratch))
|
2018-12-02 02:23:42 +01:00
|
|
|
return true;
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
2019-02-28 03:49:38 +01:00
|
|
|
while(1);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-03-12 22:32:58 +01:00
|
|
|
catch(const std::system_error &e)
|
|
|
|
{
|
|
|
|
log::derror
|
|
|
|
{
|
|
|
|
log, "longpoll %s failed :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2019-02-28 03:49:38 +01:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "longpoll %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
2017-09-25 03:05:42 +02:00
|
|
|
|
2018-04-22 08:50:16 +02:00
|
|
|
bool
|
2019-01-10 05:39:12 +01:00
|
|
|
ircd::m::sync::longpoll::handle(data &data,
|
2018-09-01 09:51:40 +02:00
|
|
|
const args &args,
|
2019-03-28 07:37:30 +01:00
|
|
|
const accepted &event,
|
|
|
|
const mutable_buffer &scratch)
|
2017-09-26 06:42:07 +02:00
|
|
|
{
|
2019-02-28 00:25:48 +01:00
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event.event_idx
|
|
|
|
};
|
|
|
|
|
2019-03-01 03:28:45 +01:00
|
|
|
const scope_restore client_txnid
|
|
|
|
{
|
|
|
|
data.client_txnid, event.client_txnid
|
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
const size_t consumed
|
|
|
|
{
|
2019-03-28 07:37:30 +01:00
|
|
|
linear_proffer_event(data, scratch)
|
2019-03-28 04:28:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(!consumed)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const json::vector vector
|
|
|
|
{
|
|
|
|
string_view
|
|
|
|
{
|
2019-03-28 07:37:30 +01:00
|
|
|
buffer::data(scratch), consumed
|
2019-03-28 04:28:59 +01:00
|
|
|
}
|
2019-02-28 00:25:48 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
json::merge(top, vector);
|
|
|
|
|
|
|
|
const auto next
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
data.event_idx?
|
|
|
|
std::min(data.event_idx + 1, vm::sequence::retired + 1):
|
|
|
|
data.range.first
|
2019-02-28 00:25:48 +01:00
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
json::stack::member
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
top, "next_batch", json::value
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
lex_cast(next), json::STRING
|
|
|
|
}
|
|
|
|
};
|
2019-02-28 00:25:48 +01:00
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "request %s longpoll hit:%lu complete @%lu",
|
|
|
|
loghead(data),
|
|
|
|
event.event_idx,
|
|
|
|
next
|
|
|
|
};
|
2019-02-28 00:25:48 +01:00
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
return true;
|
2017-09-26 06:42:07 +02:00
|
|
|
}
|
2019-01-26 22:19:16 +01:00
|
|
|
|
|
|
|
//
|
|
|
|
// sync/args.h
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_max
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.max" },
|
|
|
|
{ "default", 15 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_min
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.min" },
|
|
|
|
{ "default", 5 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.default" },
|
|
|
|
{ "default", 10 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
//
|
|
|
|
// args::args
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::m::sync::args::args(const resource::request &request)
|
|
|
|
try
|
|
|
|
:request
|
|
|
|
{
|
|
|
|
request
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
catch(const bad_lex_cast &e)
|
|
|
|
{
|
|
|
|
throw m::BAD_REQUEST
|
|
|
|
{
|
|
|
|
"Since parameter invalid :%s", e.what()
|
|
|
|
};
|
|
|
|
}
|