2018-02-04 03:22:01 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2019-09-09 21:05:53 +02:00
|
|
|
|
|
|
|
namespace ircd::m::sync
|
|
|
|
{
|
|
|
|
struct args;
|
|
|
|
struct stats;
|
|
|
|
struct data;
|
|
|
|
struct response;
|
|
|
|
|
|
|
|
static const_buffer flush(data &, resource::response::chunked &, const const_buffer &);
|
|
|
|
static void empty_response(data &, const uint64_t &next_batch);
|
|
|
|
static bool linear_handle(data &);
|
|
|
|
static bool polylog_handle(data &);
|
|
|
|
static bool longpoll_handle(data &);
|
|
|
|
static resource::response handle_get(client &, const resource::request &);
|
|
|
|
|
|
|
|
extern conf::item<size_t> flush_hiwat;
|
|
|
|
extern conf::item<size_t> buffer_size;
|
|
|
|
extern conf::item<size_t> linear_buffer_size;
|
|
|
|
extern conf::item<size_t> linear_delta_max;
|
|
|
|
extern conf::item<bool> longpoll_enable;
|
|
|
|
extern conf::item<bool> polylog_phased;
|
|
|
|
extern conf::item<bool> polylog_only;
|
|
|
|
|
|
|
|
extern resource::method method_get;
|
|
|
|
extern const string_view description;
|
|
|
|
extern resource resource;
|
|
|
|
}
|
|
|
|
|
|
|
|
#include "sync/args.h"
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::mapi::header
|
2018-04-11 00:20:47 +02:00
|
|
|
IRCD_MODULE
|
|
|
|
{
|
2018-04-22 08:50:16 +02:00
|
|
|
"Client 6.2.1 :Sync"
|
2018-04-11 00:20:47 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::resource)
|
|
|
|
ircd::m::sync::resource
|
2018-04-11 00:20:47 +02:00
|
|
|
{
|
|
|
|
"/_matrix/client/r0/sync",
|
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
description
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::description)
|
|
|
|
ircd::m::sync::description
|
|
|
|
{R"(6.2.1
|
2017-09-26 06:42:07 +02:00
|
|
|
|
|
|
|
Synchronise the client's state with the latest state on the server. Clients
|
|
|
|
use this API when they first log in to get an initial snapshot of the state
|
|
|
|
on the server, and then continue to call this API to get incremental deltas
|
|
|
|
to the state, and to receive new messages.
|
|
|
|
)"};
|
|
|
|
|
2019-03-17 21:24:24 +01:00
|
|
|
const auto linear_delta_max_help
|
|
|
|
{R"(
|
|
|
|
|
|
|
|
Maximum number of events to scan sequentially for a /sync. This determines
|
|
|
|
whether linear-sync or polylog-sync mode is used to satisfy the request. If
|
|
|
|
the difference between the since token (lower-bound) and the upper-bound of
|
|
|
|
the sync is within this value, the linear-sync mode is used. If it is more
|
|
|
|
than this value a polylog-sync mode is used. The latter is used because at
|
|
|
|
some threshold it becomes too expensive to scan a huge number of events to
|
|
|
|
grab only those that the client requires; it is cheaper to conduct a series
|
|
|
|
of random-access queries with polylog-sync instead. Note the exclusive
|
|
|
|
upper-bound of a sync is determined either by a non-spec query parameter
|
2019-03-19 19:45:01 +01:00
|
|
|
'next_batch' or the vm::sequence::retired+1.
|
2019-03-17 21:24:24 +01:00
|
|
|
|
|
|
|
)"};
|
|
|
|
|
|
|
|
const auto linear_buffer_size_help
|
|
|
|
{R"(
|
|
|
|
|
|
|
|
The size of the coalescing buffer used when conducting a linear-sync. During
|
|
|
|
the sequential scan of events, when an event is marked as required for the
|
|
|
|
client's sync it is stringified and appended to this buffer. The buffer has
|
|
|
|
the format of a json::vector of individual events. At the end of the linear
|
|
|
|
sync, the objects in this buffer are merged into a single spec /sync response.
|
|
|
|
|
|
|
|
When this buffer is full the linear sync must finish and respond to the client
|
|
|
|
with whatever it has. The event::idx of the last event that fit into the buffer
|
|
|
|
forms the basis for the next_batch so the client can continue with another linear
|
|
|
|
/sync to complete the range.
|
|
|
|
|
|
|
|
)"};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
decltype(ircd::m::sync::flush_hiwat)
|
|
|
|
ircd::m::sync::flush_hiwat
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.flush.hiwat" },
|
2019-07-18 03:34:42 +02:00
|
|
|
{ "default", long(64_KiB) },
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::buffer_size)
|
|
|
|
ircd::m::sync::buffer_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.buffer_size" },
|
2019-07-08 13:25:31 +02:00
|
|
|
{ "default", long(512_KiB) },
|
2019-03-17 21:24:24 +01:00
|
|
|
{ "help", "Response chunk buffer size" },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::linear_buffer_size)
|
|
|
|
ircd::m::sync::linear_buffer_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.linear.buffer_size" },
|
2019-08-12 10:24:59 +02:00
|
|
|
{ "default", long(256_KiB) },
|
2019-03-17 21:24:24 +01:00
|
|
|
{ "help", linear_buffer_size_help },
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
decltype(ircd::m::sync::linear_delta_max)
|
|
|
|
ircd::m::sync::linear_delta_max
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-03-17 21:24:24 +01:00
|
|
|
{ "name", "ircd.client.sync.linear.delta.max" },
|
|
|
|
{ "default", 1024 },
|
|
|
|
{ "help", linear_delta_max_help },
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
decltype(ircd::m::sync::polylog_phased)
|
|
|
|
ircd::m::sync::polylog_phased
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.polylog.phased" },
|
2019-07-06 11:06:46 +02:00
|
|
|
{ "default", true },
|
2019-04-08 11:04:24 +02:00
|
|
|
};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
decltype(ircd::m::sync::polylog_only)
|
|
|
|
ircd::m::sync::polylog_only
|
|
|
|
{
|
2019-04-08 09:03:17 +02:00
|
|
|
{ "name", "ircd.client.sync.polylog.only" },
|
2019-03-07 20:53:58 +01:00
|
|
|
{ "default", false },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll_enable)
|
|
|
|
ircd::m::sync::longpoll_enable
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.longpoll.enable" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
//
|
|
|
|
// GET sync
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::method_get)
|
|
|
|
ircd::m::sync::method_get
|
2018-05-14 04:23:23 +02:00
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
resource, "GET", handle_get,
|
|
|
|
{
|
|
|
|
method_get.REQUIRES_AUTH,
|
|
|
|
-1s,
|
|
|
|
}
|
2018-05-14 04:23:23 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::resource::response
|
|
|
|
ircd::m::sync::handle_get(client &client,
|
|
|
|
const resource::request &request)
|
2017-08-23 23:10:28 +02:00
|
|
|
{
|
2019-01-10 22:19:07 +01:00
|
|
|
// Parse the request options
|
2018-09-01 09:51:40 +02:00
|
|
|
const args args
|
|
|
|
{
|
|
|
|
request
|
|
|
|
};
|
2018-04-11 00:20:47 +02:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
// The range to `/sync`. We involve events starting at the range.first
|
|
|
|
// index in this sync. We will not involve events with an index equal
|
|
|
|
// or greater than the range.second. In this case the range.second does not
|
2019-03-19 19:45:01 +01:00
|
|
|
// exist yet because it is one past the server's sequence::retired counter.
|
2019-01-10 22:19:07 +01:00
|
|
|
const m::events::range range
|
2019-01-04 02:21:02 +01:00
|
|
|
{
|
2019-03-19 19:45:01 +01:00
|
|
|
args.since, std::min(args.next_batch, m::vm::sequence::retired + 1)
|
2019-01-04 02:21:02 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
// The phased initial sync feature uses negative since tokens.
|
|
|
|
const bool phased_range
|
|
|
|
{
|
|
|
|
int64_t(range.first) < 0L
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check if the admin disabled phased sync.
|
|
|
|
if(!polylog_phased && phased_range)
|
2019-01-10 22:19:07 +01:00
|
|
|
throw m::NOT_FOUND
|
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
"Since parameter '%ld' must be >= 0.",
|
2019-03-16 20:26:03 +01:00
|
|
|
range.first,
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
// When the range indexes are the same, the client is polling for the next
|
|
|
|
// event which doesn't exist yet. There is no reason for the since parameter
|
|
|
|
// to be greater than that, unless it's a negative integer and phased
|
|
|
|
// sync is enabled
|
|
|
|
if(!polylog_phased || !phased_range)
|
|
|
|
if(range.first > range.second)
|
|
|
|
throw m::NOT_FOUND
|
|
|
|
{
|
|
|
|
"Since parameter '%lu' is too far in the future."
|
|
|
|
" Cannot be greater than '%lu'.",
|
|
|
|
range.first,
|
|
|
|
range.second
|
|
|
|
};
|
|
|
|
|
2019-08-07 04:18:03 +02:00
|
|
|
// Query and cache the device ID for the access token of this request on
|
|
|
|
// the stack here for this sync.
|
|
|
|
const device::id::buf device_id
|
|
|
|
{
|
|
|
|
device::access_token_to_id(request.access_token)
|
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
// Keep state for statistics of this sync here on the stack.
|
|
|
|
stats stats;
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// The ubiquitous /sync data object is constructed on the stack here.
|
|
|
|
// This is the main state structure for the sync::item iteration which
|
|
|
|
// composes the /sync response.
|
2019-02-27 00:50:58 +01:00
|
|
|
data data
|
|
|
|
{
|
|
|
|
request.user_id,
|
|
|
|
range,
|
|
|
|
&client,
|
|
|
|
nullptr,
|
|
|
|
&stats,
|
2019-07-08 04:55:56 +02:00
|
|
|
&args,
|
2019-08-07 04:18:03 +02:00
|
|
|
device_id,
|
2019-02-27 00:50:58 +01:00
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
// Determine if this is an initial-sync request.
|
2019-04-08 11:04:24 +02:00
|
|
|
const bool initial_sync
|
|
|
|
{
|
|
|
|
range.first == 0UL
|
|
|
|
};
|
|
|
|
|
|
|
|
// Conditions for phased sync for this client
|
|
|
|
data.phased =
|
|
|
|
{
|
2019-08-11 17:06:45 +02:00
|
|
|
polylog_phased && args.phased &&
|
|
|
|
(
|
|
|
|
phased_range ||
|
|
|
|
(initial_sync && empty(args.since_token.second))
|
|
|
|
)
|
2019-04-08 11:04:24 +02:00
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
// Start the chunked encoded response.
|
2019-01-10 22:19:07 +01:00
|
|
|
resource::response::chunked response
|
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
client, http::OK, buffer_size
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
// Start the JSON stream for this response. As the sync items are iterated
|
|
|
|
// the supplied response buffer will be flushed out to the supplied
|
|
|
|
// callback; in this case, both are provided by the chunked encoding
|
|
|
|
// response. Each flush will create and send a chunk containing in-progress
|
|
|
|
// JSON. This will yield the ircd::ctx as this chunk is copied to the
|
|
|
|
// kernel's TCP buffer, providing flow control for the sync composition.
|
2019-02-27 00:50:58 +01:00
|
|
|
json::stack out
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
response.buf,
|
2019-01-10 22:19:07 +01:00
|
|
|
std::bind(&sync::flush, std::ref(data), std::ref(response), ph::_1),
|
2019-02-27 00:50:58 +01:00
|
|
|
size_t(flush_hiwat)
|
2018-04-22 08:50:16 +02:00
|
|
|
};
|
2019-02-27 00:50:58 +01:00
|
|
|
data.out = &out;
|
2018-04-22 08:50:16 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "request %s", loghead(data)
|
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
// Pre-determine if longpoll sync mode should be used. This may
|
|
|
|
// indicate false now but after conducting a linear or even polylog
|
|
|
|
// sync if we don't find any events for the client then we might
|
|
|
|
// longpoll later.
|
2019-03-07 20:53:58 +01:00
|
|
|
const bool should_longpoll
|
|
|
|
{
|
2019-07-07 05:19:08 +02:00
|
|
|
// longpoll can be disabled by a conf item (for developers).
|
2019-07-12 23:02:42 +02:00
|
|
|
longpoll_enable
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// polylog-phased sync and longpoll are totally exclusive.
|
2019-07-12 23:02:42 +02:00
|
|
|
&& !data.phased
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// initial_sync cannot hang on a longpoll otherwise bad things clients
|
2019-07-12 23:02:42 +02:00
|
|
|
&& !initial_sync
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// When the since token is in advance of the vm sequence number
|
|
|
|
// there's no events to consider for a sync.
|
2019-07-12 23:02:42 +02:00
|
|
|
&& range.first > vm::sequence::retired
|
2019-08-02 22:48:14 +02:00
|
|
|
|
|
|
|
// Spec sez that when ?full_state=1 to return immediately, so
|
|
|
|
// that rules out longpoll
|
|
|
|
&& !args.full_state
|
2019-03-07 20:53:58 +01:00
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
// Determine if linear sync mode should be used. If this is not used, and
|
|
|
|
// longpoll mode is not used, then polylog mode must be used.
|
2019-03-07 20:53:58 +01:00
|
|
|
const bool should_linear
|
|
|
|
{
|
2019-07-07 05:19:08 +02:00
|
|
|
// There is a conf item (for developers) to force polylog mode.
|
2019-07-12 23:02:42 +02:00
|
|
|
!polylog_only
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// polylog-phased sync and linear are totally exclusive.
|
2019-07-12 23:02:42 +02:00
|
|
|
&& !data.phased
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// If longpoll was already determined there's no need for linear
|
2019-07-12 23:02:42 +02:00
|
|
|
&& !should_longpoll
|
2019-07-07 05:19:08 +02:00
|
|
|
|
|
|
|
// The primary condition for a linear sync is the number of events
|
|
|
|
// in the range being considered by the sync. That threshold is
|
|
|
|
// supplied by a conf item.
|
2019-07-12 23:02:42 +02:00
|
|
|
&& range.second - range.first <= size_t(linear_delta_max)
|
2019-07-12 23:32:51 +02:00
|
|
|
|
|
|
|
// When the semaphore query param is set we don't need linear mode.
|
|
|
|
&& !args.semaphore
|
2019-08-02 22:48:14 +02:00
|
|
|
|
|
|
|
// When full_state is requested we skip to polylog sync because those
|
|
|
|
// handlers are best suited for syncing a full room state.
|
|
|
|
&& !args.full_state
|
2019-03-07 20:53:58 +01:00
|
|
|
};
|
|
|
|
|
2019-07-12 23:02:42 +02:00
|
|
|
// Determine if polylog sync mode should be used.
|
2019-07-10 04:21:00 +02:00
|
|
|
const bool should_polylog
|
|
|
|
{
|
2019-07-12 23:02:42 +02:00
|
|
|
// Polylog mode is only used when neither of the other two modes
|
|
|
|
// are determined.
|
2019-07-10 04:21:00 +02:00
|
|
|
!should_longpoll && !should_linear
|
2019-07-12 23:32:51 +02:00
|
|
|
|
|
|
|
// When the semaphore query param is set we don't need polylog mode.
|
|
|
|
&& !args.semaphore
|
2019-07-10 04:21:00 +02:00
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
// Determine if an empty sync response should be returned to the user.
|
|
|
|
// This is done by actually performing the sync operation based on the
|
|
|
|
// mode decided. The return value from the operation will be false if
|
|
|
|
// no output was generated by the sync operation, indicating we should
|
|
|
|
// finally send an empty response.
|
|
|
|
const bool complete
|
2018-09-01 11:57:15 +02:00
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
should_linear?
|
2019-02-28 00:25:26 +01:00
|
|
|
linear_handle(data):
|
2019-07-10 04:21:00 +02:00
|
|
|
should_polylog?
|
|
|
|
polylog_handle(data):
|
|
|
|
false
|
2018-09-01 11:57:15 +02:00
|
|
|
};
|
|
|
|
|
2019-07-07 05:19:08 +02:00
|
|
|
if(complete)
|
|
|
|
return response;
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-07-10 04:21:00 +02:00
|
|
|
if(longpoll_handle(data))
|
|
|
|
return response;
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
const auto &next_batch
|
|
|
|
{
|
|
|
|
polylog_only?
|
|
|
|
data.range.first:
|
|
|
|
data.range.second
|
|
|
|
};
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
// A user-timeout occurred. According to the spec we return a
|
|
|
|
// 200 with empty fields rather than a 408.
|
2019-03-07 20:53:58 +01:00
|
|
|
empty_response(data, next_batch);
|
2019-07-07 05:19:08 +02:00
|
|
|
return response;
|
2019-01-10 22:19:07 +01:00
|
|
|
}
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
void
|
2019-03-07 20:53:58 +01:00
|
|
|
ircd::m::sync::empty_response(data &data,
|
|
|
|
const uint64_t &next_batch)
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
// Empty objects added to output otherwise Riot b0rks.
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "rooms"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "presence"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
json::stack::member
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "next_batch", json::value
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
lex_cast(next_batch), json::STRING
|
2018-12-02 02:23:42 +01:00
|
|
|
}
|
|
|
|
};
|
2019-03-08 23:17:52 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s timeout @%lu",
|
2019-03-08 23:17:52 +01:00
|
|
|
loghead(data),
|
|
|
|
next_batch
|
|
|
|
};
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2019-01-10 22:19:07 +01:00
|
|
|
|
|
|
|
ircd::const_buffer
|
|
|
|
ircd::m::sync::flush(data &data,
|
|
|
|
resource::response::chunked &response,
|
|
|
|
const const_buffer &buffer)
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
const auto wrote
|
2018-04-17 02:57:41 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
response.flush(buffer)
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(data.stats)
|
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
data.stats->flush_bytes += size(wrote);
|
2019-01-10 22:19:07 +01:00
|
|
|
data.stats->flush_count++;
|
|
|
|
}
|
|
|
|
|
2019-03-01 01:22:14 +01:00
|
|
|
return wrote;
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2018-04-17 02:57:41 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
// polylog
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Random access approach for large `since` ranges. The /sync schema itself is
|
|
|
|
// recursed. For every component in the schema, the handler seeks the events
|
|
|
|
// appropriate for the user and appends it to the output. Concretely, this
|
|
|
|
// involves a full iteration of the rooms a user is a member of, and a full
|
|
|
|
// iteration of the presence status for all users visible to a user, etc.
|
|
|
|
//
|
|
|
|
// This entire process occurs in a single pass. The schema is traced with
|
|
|
|
// json::stack and its buffer is flushed to the client periodically with
|
|
|
|
// chunked encoding.
|
2019-01-09 00:10:06 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::polylog_handle(data &data)
|
2019-01-09 00:10:06 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
bool ret{false};
|
|
|
|
m::sync::for_each(string_view{}, [&data, &ret]
|
2019-01-09 00:10:06 +01:00
|
|
|
(item &item)
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object object
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, item.member_name()
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(item.polylog(data))
|
2019-03-30 23:02:18 +01:00
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
ret = true;
|
2019-03-30 23:02:18 +01:00
|
|
|
data.out->invalidate_checkpoints();
|
|
|
|
}
|
|
|
|
else checkpoint.decommit();
|
2019-02-24 20:59:53 +01:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
return true;
|
|
|
|
});
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(ret)
|
2019-04-08 11:04:24 +02:00
|
|
|
{
|
|
|
|
const int64_t next_batch
|
|
|
|
{
|
|
|
|
data.phased?
|
|
|
|
int64_t(data.range.first) - 1L:
|
|
|
|
int64_t(data.range.second)
|
|
|
|
};
|
|
|
|
|
2019-07-07 06:58:57 +02:00
|
|
|
char buf[48];
|
|
|
|
const string_view &next_batch_token
|
|
|
|
{
|
|
|
|
// The polylog phased since token. We pack two numbers separted by a '_'
|
|
|
|
// character which cannot be urlencoded atm. The first is the usual
|
|
|
|
// since token integer, which is negative for phased initial sync. The
|
|
|
|
// second part is the next_batch upper-bound integer which is a snapshot
|
|
|
|
// of the server's sequence number when the phased sync started.
|
|
|
|
data.phased?
|
|
|
|
fmt::sprintf
|
|
|
|
{
|
|
|
|
buf, "%ld_%lu", next_batch, data.range.second
|
|
|
|
}:
|
|
|
|
|
|
|
|
// The normal integer since token.
|
|
|
|
fmt::sprintf
|
|
|
|
{
|
|
|
|
buf, "%ld", next_batch
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, "next_batch", json::value
|
2019-02-24 20:59:53 +01:00
|
|
|
{
|
2019-07-07 06:58:57 +02:00
|
|
|
next_batch_token ,json::STRING
|
2019-02-24 20:59:53 +01:00
|
|
|
}
|
|
|
|
};
|
2019-04-08 11:04:24 +02:00
|
|
|
}
|
2019-02-24 20:59:53 +01:00
|
|
|
|
|
|
|
if(!ret)
|
2019-03-30 22:02:51 +01:00
|
|
|
checkpoint.decommit();
|
2019-01-09 00:10:06 +01:00
|
|
|
|
2019-04-08 11:04:24 +02:00
|
|
|
if(!data.phased && stats_info) log::info
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-04-08 11:04:24 +02:00
|
|
|
log, "request %s polylog commit:%b complete @%ld",
|
2019-02-24 20:59:53 +01:00
|
|
|
loghead(data),
|
2019-03-09 00:29:10 +01:00
|
|
|
ret,
|
2019-04-08 11:04:24 +02:00
|
|
|
data.phased?
|
|
|
|
data.range.first:
|
|
|
|
data.range.second
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
return ret;
|
2019-01-09 00:10:06 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "polylog %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
//
|
|
|
|
// linear
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Approach for small `since` ranges. The range of events is iterated and
|
|
|
|
// the event itself is presented to each handler in the schema. This also
|
|
|
|
// involves a json::stack trace of the schema so that if the handler determines
|
|
|
|
// the event is appropriate for syncing to the user the output buffer will
|
|
|
|
// contain a residue of a /sync response with a single event.
|
|
|
|
//
|
|
|
|
// After the iteration of events is complete we are left with several buffers
|
|
|
|
// of properly formatted individual /sync responses which we rewrite into a
|
|
|
|
// single response to overcome the inefficiency of request ping-pong under
|
|
|
|
// heavy load.
|
|
|
|
|
|
|
|
namespace ircd::m::sync
|
|
|
|
{
|
|
|
|
static bool linear_proffer_event_one(data &);
|
|
|
|
static size_t linear_proffer_event(data &, const mutable_buffer &);
|
2019-02-28 21:36:14 +01:00
|
|
|
static std::pair<event::idx, bool> linear_proffer(data &, window_buffer &);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_handle(data &data)
|
2019-01-10 05:39:12 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
*data.out
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const unique_buffer<mutable_buffer> buf
|
|
|
|
{
|
2019-03-17 21:24:24 +01:00
|
|
|
// must be at least worst-case size of m::event plus some.
|
|
|
|
std::max(size_t(linear_buffer_size), size_t(96_KiB))
|
2019-02-27 02:02:21 +01:00
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
window_buffer wb{buf};
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto &[last, completed]
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
|
|
|
linear_proffer(data, wb)
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const json::vector vector
|
|
|
|
{
|
|
|
|
wb.completed()
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
const auto next
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
last && completed?
|
|
|
|
data.range.second:
|
|
|
|
last?
|
2019-03-17 01:42:21 +01:00
|
|
|
std::min(last + 1, data.range.second):
|
2019-03-09 00:29:10 +01:00
|
|
|
0UL
|
|
|
|
};
|
2019-02-28 21:36:14 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
if(last)
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:25:26 +01:00
|
|
|
top, "next_batch", json::value
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
2019-02-28 21:36:14 +01:00
|
|
|
lex_cast(next), json::STRING
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:25:26 +01:00
|
|
|
json::merge(top, vector);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
2019-03-30 22:02:51 +01:00
|
|
|
else checkpoint.decommit();
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-07-10 04:21:00 +02:00
|
|
|
log, "request %s linear last:%lu %s@%lu events:%zu",
|
2019-02-27 02:02:21 +01:00
|
|
|
loghead(data),
|
2019-02-28 21:36:14 +01:00
|
|
|
last,
|
2019-03-17 21:18:33 +01:00
|
|
|
completed? "complete "_sv : string_view{},
|
2019-07-10 04:21:00 +02:00
|
|
|
next,
|
|
|
|
vector.size(),
|
2019-01-10 05:39:12 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
return last;
|
2019-01-10 05:39:12 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "linear %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
/// Iterates the events in the data.range and creates a json::vector in
|
|
|
|
/// the supplied window_buffer. The return value is the event_idx of the
|
|
|
|
/// last event which fit in the buffer, or 0 of nothing was of interest
|
|
|
|
/// to our client in the event iteration.
|
2019-02-28 21:36:14 +01:00
|
|
|
std::pair<ircd::m::event::idx, bool>
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_proffer(data &data,
|
|
|
|
window_buffer &wb)
|
|
|
|
{
|
|
|
|
event::idx ret(0);
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto closure{[&data, &wb, &ret]
|
2019-02-27 02:02:21 +01:00
|
|
|
(const m::event::idx &event_idx, const m::event &event)
|
|
|
|
{
|
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event_idx
|
|
|
|
};
|
|
|
|
|
|
|
|
wb([&data, &ret, &event_idx]
|
|
|
|
(const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
const auto consumed
|
|
|
|
{
|
|
|
|
linear_proffer_event(data, buf)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(consumed)
|
|
|
|
ret = event_idx;
|
|
|
|
|
|
|
|
return consumed;
|
|
|
|
});
|
|
|
|
|
2019-03-17 21:24:24 +01:00
|
|
|
const bool enough_space_for_more
|
|
|
|
{
|
|
|
|
// The buffer must have at least this much more space
|
|
|
|
// to continue with the iteration. Otherwise if the next
|
|
|
|
// worst-case event does not fit, bad things.
|
|
|
|
wb.remaining() >= 68_KiB
|
|
|
|
};
|
|
|
|
|
|
|
|
return enough_space_for_more;
|
2019-02-28 21:36:14 +01:00
|
|
|
}};
|
2019-02-27 02:02:21 +01:00
|
|
|
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto completed
|
|
|
|
{
|
|
|
|
m::events::for_each(data.range, closure)
|
|
|
|
};
|
|
|
|
|
|
|
|
return
|
|
|
|
{
|
|
|
|
ret, completed
|
|
|
|
};
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets up a json::stack for the iteration of handlers for
|
|
|
|
/// one event.
|
|
|
|
size_t
|
|
|
|
ircd::m::sync::linear_proffer_event(data &data,
|
|
|
|
const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
json::stack out{buf};
|
|
|
|
const scope_restore their_out
|
|
|
|
{
|
|
|
|
data.out, &out
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool success
|
|
|
|
{
|
|
|
|
linear_proffer_event_one(data)
|
|
|
|
};
|
|
|
|
|
|
|
|
top.~object();
|
|
|
|
return success?
|
2019-02-27 02:02:21 +01:00
|
|
|
size(out.completed()):
|
|
|
|
0UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generates a candidate /sync response for a single event by
|
|
|
|
/// iterating all of the handlers.
|
|
|
|
bool
|
|
|
|
ircd::m::sync::linear_proffer_event_one(data &data)
|
|
|
|
{
|
2019-03-28 03:32:54 +01:00
|
|
|
bool ret{false};
|
|
|
|
m::sync::for_each(string_view{}, [&data, &ret]
|
2019-02-27 02:02:21 +01:00
|
|
|
(item &item)
|
|
|
|
{
|
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
if(item.linear(data))
|
2019-03-28 03:32:54 +01:00
|
|
|
ret = true;
|
|
|
|
else
|
|
|
|
checkpoint.rollback();
|
2019-02-27 02:02:21 +01:00
|
|
|
|
|
|
|
return true;
|
|
|
|
});
|
2019-03-28 03:32:54 +01:00
|
|
|
|
|
|
|
return ret;
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
//
|
|
|
|
// longpoll
|
|
|
|
//
|
|
|
|
|
2019-07-08 04:56:32 +02:00
|
|
|
namespace ircd::m::sync::longpoll
|
|
|
|
{
|
|
|
|
static bool polled(data &, const args &);
|
|
|
|
static int poll(data &);
|
|
|
|
static void handle_notify(const m::event &, m::vm::eval &);
|
|
|
|
|
|
|
|
extern m::hookfn<m::vm::eval &> notified;
|
|
|
|
extern ctx::dock dock;
|
|
|
|
}
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll::dock)
|
|
|
|
ircd::m::sync::longpoll::dock;
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
decltype(ircd::m::sync::longpoll::notified)
|
|
|
|
ircd::m::sync::longpoll::notified
|
|
|
|
{
|
|
|
|
handle_notify,
|
|
|
|
{
|
|
|
|
{ "_site", "vm.notify" },
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::sync::longpoll::handle_notify(const m::event &event,
|
|
|
|
m::vm::eval &eval)
|
2019-07-14 08:07:29 +02:00
|
|
|
try
|
2018-10-07 07:17:46 +02:00
|
|
|
{
|
|
|
|
assert(eval.opts);
|
|
|
|
if(!eval.opts->notify_clients)
|
|
|
|
return;
|
|
|
|
|
|
|
|
dock.notify_all();
|
|
|
|
}
|
2019-07-14 08:07:29 +02:00
|
|
|
catch(const ctx::interrupted &)
|
|
|
|
{
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
log, "request %s longpoll notify :%s",
|
|
|
|
loghead(eval),
|
|
|
|
e.what(),
|
|
|
|
};
|
|
|
|
}
|
2018-10-07 07:17:46 +02:00
|
|
|
|
2019-07-10 04:19:56 +02:00
|
|
|
/// Longpolling blocks the client's request until a relevant event is processed
|
|
|
|
/// by the m::vm. If no event is processed by a timeout this returns false.
|
2018-12-02 02:23:42 +01:00
|
|
|
bool
|
2019-07-08 04:56:32 +02:00
|
|
|
ircd::m::sync::longpoll_handle(data &data)
|
2019-02-28 03:49:38 +01:00
|
|
|
try
|
2017-09-25 03:05:42 +02:00
|
|
|
{
|
2019-07-08 04:56:32 +02:00
|
|
|
int ret;
|
2019-07-10 04:21:00 +02:00
|
|
|
while((ret = longpoll::poll(data)) == -1)
|
|
|
|
{
|
|
|
|
// When the client explicitly gives a next_batch token we have to
|
|
|
|
// adhere to it and return an empty response before going past their
|
|
|
|
// desired upper-bound for this /sync.
|
|
|
|
if(data.args->next_batch_token)
|
|
|
|
if(data.range.first >= data.range.second || data.range.second >= vm::sequence::retired)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
++data.range.second;
|
|
|
|
assert(data.range.first <= data.range.second);
|
|
|
|
}
|
|
|
|
|
2019-07-08 04:56:32 +02:00
|
|
|
return ret;
|
2019-02-28 03:49:38 +01:00
|
|
|
}
|
2019-03-12 22:32:58 +01:00
|
|
|
catch(const std::system_error &e)
|
|
|
|
{
|
|
|
|
log::derror
|
|
|
|
{
|
|
|
|
log, "longpoll %s failed :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2019-02-28 03:49:38 +01:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "longpoll %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
2017-09-25 03:05:42 +02:00
|
|
|
|
2019-07-10 04:19:56 +02:00
|
|
|
/// When the vm's sequence number is incremented our dock is notified and the
|
|
|
|
/// event at that next sequence number is fetched. That event gets proffered
|
|
|
|
/// around the linear sync handlers for whether it's relevant to the user
|
|
|
|
/// making the request on this stack.
|
|
|
|
///
|
|
|
|
/// If relevant, we respond immediately with that one event and finish the
|
|
|
|
/// request right there, providing them the next since token of one-past the
|
|
|
|
/// event_idx that was just synchronized.
|
|
|
|
///
|
|
|
|
/// If not relevant, we send nothing and continue checking events that come
|
|
|
|
/// through until the timeout. This will be an empty response providing the
|
|
|
|
/// client with the next since token of one past where we left off (vm's
|
|
|
|
/// current sequence number) to start the next /sync.
|
|
|
|
///
|
|
|
|
/// @returns
|
|
|
|
/// - true if a relevant event was hit and output to the client. If so, this
|
|
|
|
/// request is finished and nothing else can be sent to the client.
|
|
|
|
/// - false if a timeout occurred. Nothing was sent to the client so the
|
|
|
|
/// request must be finished upstack, or an exception may be thrown, etc.
|
|
|
|
/// - -1 to continue the polling loop when no relevant event was hit. Nothing
|
|
|
|
/// has been sent to the client yet here either.
|
|
|
|
///
|
2019-07-08 04:56:32 +02:00
|
|
|
int
|
|
|
|
ircd::m::sync::longpoll::poll(data &data)
|
|
|
|
{
|
2019-07-10 04:21:00 +02:00
|
|
|
const auto ready{[&data]
|
|
|
|
{
|
|
|
|
assert(data.range.second <= m::vm::sequence::retired + 1);
|
|
|
|
return data.range.second <= m::vm::sequence::retired;
|
|
|
|
}};
|
2019-07-08 04:56:32 +02:00
|
|
|
|
|
|
|
assert(data.args);
|
2019-07-10 04:21:00 +02:00
|
|
|
if(!dock.wait_until(data.args->timesout, ready))
|
2019-07-08 04:56:32 +02:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if client went away while we were sleeping,
|
|
|
|
// if so, just returning true is the easiest way out w/o throwing
|
|
|
|
assert(data.client && data.client->sock);
|
|
|
|
if(unlikely(!data.client || !data.client->sock))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// slightly more involved check of the socket before
|
|
|
|
// we waste resources on the operation; throws.
|
2019-07-21 06:42:32 +02:00
|
|
|
const auto &client(*data.client);
|
|
|
|
net::check(*client.sock);
|
2019-07-08 04:56:32 +02:00
|
|
|
|
|
|
|
// Keep in mind if the handler returns true that means
|
|
|
|
// it made a hit and we can return true to exit longpoll
|
|
|
|
// and end the request cleanly.
|
|
|
|
if(polled(data, *data.args))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-07-10 04:19:56 +02:00
|
|
|
/// Evaluate the event indexed by data.range.second (the upper-bound). The
|
|
|
|
/// sync system sees a data.range window of [since, U] where U is a counter
|
|
|
|
/// that starts at the `vm::sequence::retired` event_idx
|
2018-04-22 08:50:16 +02:00
|
|
|
bool
|
2019-07-08 04:56:32 +02:00
|
|
|
ircd::m::sync::longpoll::polled(data &data,
|
|
|
|
const args &args)
|
2017-09-26 06:42:07 +02:00
|
|
|
{
|
2019-07-08 04:56:32 +02:00
|
|
|
const m::event::fetch event
|
|
|
|
{
|
|
|
|
data.range.second, std::nothrow
|
|
|
|
};
|
|
|
|
|
|
|
|
if(!event.valid)
|
|
|
|
return false;
|
|
|
|
|
2019-02-28 00:25:48 +01:00
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event.event_idx
|
|
|
|
};
|
|
|
|
|
2019-07-08 04:56:32 +02:00
|
|
|
const unique_buffer<mutable_buffer> scratch
|
2019-03-01 03:28:45 +01:00
|
|
|
{
|
2019-07-08 04:56:32 +02:00
|
|
|
96_KiB
|
2019-03-01 03:28:45 +01:00
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
const size_t consumed
|
|
|
|
{
|
2019-03-28 07:37:30 +01:00
|
|
|
linear_proffer_event(data, scratch)
|
2019-03-28 04:28:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(!consumed)
|
|
|
|
return false;
|
|
|
|
|
2019-07-12 23:32:51 +02:00
|
|
|
// In semaphore-mode we're just here to ride the longpoll's blocking
|
|
|
|
// behavior. We want the client to get an empty response.
|
|
|
|
if(args.semaphore)
|
|
|
|
return false;
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
const json::vector vector
|
|
|
|
{
|
|
|
|
string_view
|
|
|
|
{
|
2019-03-28 07:37:30 +01:00
|
|
|
buffer::data(scratch), consumed
|
2019-03-28 04:28:59 +01:00
|
|
|
}
|
2019-02-28 00:25:48 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
json::merge(top, vector);
|
|
|
|
|
|
|
|
const auto next
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
data.event_idx?
|
|
|
|
std::min(data.event_idx + 1, vm::sequence::retired + 1):
|
2019-07-10 04:21:00 +02:00
|
|
|
std::min(data.range.second + 1, vm::sequence::retired + 1)
|
2019-02-28 00:25:48 +01:00
|
|
|
};
|
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
json::stack::member
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
top, "next_batch", json::value
|
2019-02-28 00:25:48 +01:00
|
|
|
{
|
2019-03-28 04:28:59 +01:00
|
|
|
lex_cast(next), json::STRING
|
|
|
|
}
|
|
|
|
};
|
2019-02-28 00:25:48 +01:00
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2019-07-08 04:56:32 +02:00
|
|
|
log, "request %s longpoll hit:%lu consumed:%zu complete @%lu",
|
2019-03-28 04:28:59 +01:00
|
|
|
loghead(data),
|
|
|
|
event.event_idx,
|
2019-07-08 04:56:32 +02:00
|
|
|
consumed,
|
2019-03-28 04:28:59 +01:00
|
|
|
next
|
|
|
|
};
|
2019-02-28 00:25:48 +01:00
|
|
|
|
2019-03-28 04:28:59 +01:00
|
|
|
return true;
|
2017-09-26 06:42:07 +02:00
|
|
|
}
|
2019-07-07 05:17:42 +02:00
|
|
|
|
2019-07-08 05:04:41 +02:00
|
|
|
//
|
|
|
|
// data
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::m::sync::data::data(const m::user &user,
|
|
|
|
const m::events::range &range,
|
|
|
|
ircd::client *const &client,
|
|
|
|
json::stack *const &out,
|
|
|
|
sync::stats *const &stats,
|
2019-08-07 04:18:03 +02:00
|
|
|
const sync::args *const &args,
|
|
|
|
const device::id &device_id)
|
2019-07-08 05:04:41 +02:00
|
|
|
:range
|
|
|
|
{
|
|
|
|
range
|
|
|
|
}
|
|
|
|
,stats
|
|
|
|
{
|
|
|
|
stats
|
|
|
|
}
|
|
|
|
,client
|
|
|
|
{
|
|
|
|
client
|
|
|
|
}
|
|
|
|
,args
|
|
|
|
{
|
|
|
|
args
|
|
|
|
}
|
|
|
|
,user
|
|
|
|
{
|
|
|
|
user
|
|
|
|
}
|
|
|
|
,user_room
|
|
|
|
{
|
|
|
|
user
|
|
|
|
}
|
|
|
|
,user_state
|
|
|
|
{
|
|
|
|
user_room
|
|
|
|
}
|
|
|
|
,user_rooms
|
|
|
|
{
|
|
|
|
user
|
|
|
|
}
|
|
|
|
,filter_buf
|
|
|
|
{
|
|
|
|
this->args?
|
|
|
|
m::filter::get(this->args->filter_id, user):
|
|
|
|
std::string{}
|
|
|
|
}
|
|
|
|
,filter
|
|
|
|
{
|
|
|
|
json::object{filter_buf}
|
|
|
|
}
|
2019-08-07 04:18:03 +02:00
|
|
|
,device_id
|
|
|
|
{
|
|
|
|
device_id
|
|
|
|
}
|
2019-07-08 05:04:41 +02:00
|
|
|
,out
|
|
|
|
{
|
|
|
|
out
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::m::sync::data::~data()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-08-02 23:14:02 +02:00
|
|
|
//
|
|
|
|
// sync/args.h
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_max
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.max" },
|
|
|
|
{ "default", 180 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_min
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.min" },
|
|
|
|
{ "default", 15 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.default" },
|
|
|
|
{ "default", 90 * 1000L },
|
|
|
|
};
|
|
|
|
|
2019-07-07 05:17:42 +02:00
|
|
|
//
|
|
|
|
// args::args
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::m::sync::args::args(const resource::request &request)
|
|
|
|
try
|
2019-08-02 23:14:02 +02:00
|
|
|
:filter_id
|
|
|
|
{
|
|
|
|
request.query["filter"]
|
|
|
|
}
|
|
|
|
,since_token
|
|
|
|
{
|
|
|
|
split(request.query.get("since", "0"_sv), '_')
|
|
|
|
}
|
|
|
|
,since
|
2019-07-07 05:17:42 +02:00
|
|
|
{
|
2019-08-02 23:14:02 +02:00
|
|
|
lex_cast<uint64_t>(since_token.first)
|
|
|
|
}
|
|
|
|
,next_batch_token
|
|
|
|
{
|
|
|
|
request.query.get("next_batch", since_token.second)
|
|
|
|
}
|
|
|
|
,next_batch
|
|
|
|
{
|
|
|
|
uint64_t(lex_cast<int64_t>(next_batch_token?: "-1"_sv))
|
|
|
|
}
|
|
|
|
,timesout{[&request]
|
|
|
|
{
|
|
|
|
auto ret
|
|
|
|
{
|
|
|
|
request.query.get("timeout", milliseconds(timeout_default))
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = std::min(ret, milliseconds(timeout_max));
|
|
|
|
ret = std::max(ret, milliseconds(timeout_min));
|
|
|
|
return now<steady_point>() + ret;
|
|
|
|
}()}
|
|
|
|
,full_state
|
|
|
|
{
|
|
|
|
request.query.get("full_state", false)
|
|
|
|
}
|
|
|
|
,set_presence
|
|
|
|
{
|
|
|
|
request.query.get("set_presence", true)
|
|
|
|
}
|
|
|
|
,phased
|
|
|
|
{
|
|
|
|
request.query.get("phased", true)
|
|
|
|
}
|
|
|
|
,semaphore
|
|
|
|
{
|
|
|
|
request.query.get("semaphore", false)
|
2019-07-07 05:17:42 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
catch(const bad_lex_cast &e)
|
|
|
|
{
|
2019-08-02 23:14:02 +02:00
|
|
|
throw m::BAD_REQUEST
|
|
|
|
{
|
|
|
|
"Since parameter invalid :%s", e.what()
|
|
|
|
};
|
2019-07-07 05:17:42 +02:00
|
|
|
}
|