2018-02-04 03:22:01 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
#include "sync.h"
|
2017-08-23 23:10:28 +02:00
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::mapi::header
|
2018-04-11 00:20:47 +02:00
|
|
|
IRCD_MODULE
|
|
|
|
{
|
2018-04-22 08:50:16 +02:00
|
|
|
"Client 6.2.1 :Sync"
|
2018-04-11 00:20:47 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::resource)
|
|
|
|
ircd::m::sync::resource
|
2018-04-11 00:20:47 +02:00
|
|
|
{
|
|
|
|
"/_matrix/client/r0/sync",
|
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
description
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
decltype(ircd::m::sync::description)
|
|
|
|
ircd::m::sync::description
|
|
|
|
{R"(6.2.1
|
2017-09-26 06:42:07 +02:00
|
|
|
|
|
|
|
Synchronise the client's state with the latest state on the server. Clients
|
|
|
|
use this API when they first log in to get an initial snapshot of the state
|
|
|
|
on the server, and then continue to call this API to get incremental deltas
|
|
|
|
to the state, and to receive new messages.
|
|
|
|
)"};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
decltype(ircd::m::sync::flush_hiwat)
|
|
|
|
ircd::m::sync::flush_hiwat
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.flush.hiwat" },
|
|
|
|
{ "default", long(48_KiB) },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::buffer_size)
|
|
|
|
ircd::m::sync::buffer_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.buffer_size" },
|
|
|
|
{ "default", long(128_KiB) },
|
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
decltype(ircd::m::sync::linear_delta_max)
|
|
|
|
ircd::m::sync::linear_delta_max
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.linear.delta.max" },
|
|
|
|
{ "default", 1024 },
|
|
|
|
};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
decltype(ircd::m::sync::polylog_only)
|
|
|
|
ircd::m::sync::polylog_only
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.polylog_only" },
|
|
|
|
{ "default", false },
|
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll_enable)
|
|
|
|
ircd::m::sync::longpoll_enable
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.longpoll.enable" },
|
|
|
|
{ "default", true },
|
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
//
|
|
|
|
// GET sync
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::method_get)
|
|
|
|
ircd::m::sync::method_get
|
2018-05-14 04:23:23 +02:00
|
|
|
{
|
2018-09-01 09:51:40 +02:00
|
|
|
resource, "GET", handle_get,
|
|
|
|
{
|
|
|
|
method_get.REQUIRES_AUTH,
|
|
|
|
-1s,
|
|
|
|
}
|
2018-05-14 04:23:23 +02:00
|
|
|
};
|
|
|
|
|
2018-09-01 09:51:40 +02:00
|
|
|
ircd::resource::response
|
|
|
|
ircd::m::sync::handle_get(client &client,
|
|
|
|
const resource::request &request)
|
2017-08-23 23:10:28 +02:00
|
|
|
{
|
2019-01-10 22:19:07 +01:00
|
|
|
// Parse the request options
|
2018-09-01 09:51:40 +02:00
|
|
|
const args args
|
|
|
|
{
|
|
|
|
request
|
|
|
|
};
|
2018-04-11 00:20:47 +02:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
// The range to `/sync`. We involve events starting at the range.first
|
|
|
|
// index in this sync. We will not involve events with an index equal
|
|
|
|
// or greater than the range.second. In this case the range.second does not
|
|
|
|
// exist yet because it is one past the server's current_sequence counter.
|
|
|
|
const m::events::range range
|
2019-01-04 02:21:02 +01:00
|
|
|
{
|
2019-01-26 23:30:18 +01:00
|
|
|
args.since, std::min(args.next_batch, m::vm::current_sequence + 1)
|
2019-01-04 02:21:02 +01:00
|
|
|
};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
// When the range indexes are the same, the client is polling for the next
|
|
|
|
// event which doesn't exist yet. There is no reason for the since parameter
|
|
|
|
// to be greater than that.
|
|
|
|
if(range.first > range.second)
|
|
|
|
throw m::NOT_FOUND
|
|
|
|
{
|
|
|
|
"Since parameter is too far in the future..."
|
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
// Keep state for statistics of this sync here on the stack.
|
|
|
|
stats stats;
|
|
|
|
data data
|
|
|
|
{
|
|
|
|
request.user_id,
|
|
|
|
range,
|
|
|
|
&client,
|
|
|
|
nullptr,
|
|
|
|
&stats,
|
|
|
|
args.filter_id
|
|
|
|
};
|
|
|
|
|
|
|
|
// Start the chunked encoded response.
|
2019-01-10 22:19:07 +01:00
|
|
|
resource::response::chunked response
|
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
client, http::OK, buffer_size
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 00:50:58 +01:00
|
|
|
json::stack out
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-02-27 02:33:16 +01:00
|
|
|
response.buf,
|
2019-01-10 22:19:07 +01:00
|
|
|
std::bind(&sync::flush, std::ref(data), std::ref(response), ph::_1),
|
2019-02-27 00:50:58 +01:00
|
|
|
size_t(flush_hiwat)
|
2018-04-22 08:50:16 +02:00
|
|
|
};
|
2019-02-27 00:50:58 +01:00
|
|
|
data.out = &out;
|
2018-04-22 08:50:16 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "request %s", loghead(data)
|
|
|
|
};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
const bool should_longpoll
|
|
|
|
{
|
2019-03-09 02:47:29 +01:00
|
|
|
range.first > vm::current_sequence
|
2019-03-07 20:53:58 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
const bool should_linear
|
|
|
|
{
|
|
|
|
!should_longpoll &&
|
|
|
|
!bool(polylog_only) &&
|
|
|
|
range.second - range.first <= size_t(linear_delta_max)
|
|
|
|
};
|
|
|
|
|
2018-09-01 11:57:15 +02:00
|
|
|
const bool shortpolled
|
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
should_longpoll?
|
2018-04-22 08:50:16 +02:00
|
|
|
false:
|
2019-03-07 20:53:58 +01:00
|
|
|
should_linear?
|
2019-02-28 00:25:26 +01:00
|
|
|
linear_handle(data):
|
2019-02-27 02:02:21 +01:00
|
|
|
polylog_handle(data)
|
2018-09-01 11:57:15 +02:00
|
|
|
};
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
// When shortpoll was successful, do nothing else.
|
|
|
|
if(shortpolled)
|
2019-03-01 01:25:39 +01:00
|
|
|
return {};
|
2018-09-01 11:57:15 +02:00
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
if(longpoll_enable && longpoll::poll(data, args))
|
2019-03-01 01:25:39 +01:00
|
|
|
return {};
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
const auto &next_batch
|
|
|
|
{
|
|
|
|
polylog_only?
|
|
|
|
data.range.first:
|
|
|
|
data.range.second
|
|
|
|
};
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
// A user-timeout occurred. According to the spec we return a
|
|
|
|
// 200 with empty fields rather than a 408.
|
2019-03-07 20:53:58 +01:00
|
|
|
empty_response(data, next_batch);
|
2019-03-01 01:25:39 +01:00
|
|
|
return {};
|
2019-01-10 22:19:07 +01:00
|
|
|
}
|
2018-12-02 02:23:42 +01:00
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
void
|
2019-03-07 20:53:58 +01:00
|
|
|
ircd::m::sync::empty_response(data &data,
|
|
|
|
const uint64_t &next_batch)
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
// Empty objects added to output otherwise Riot b0rks.
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "rooms"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "presence"
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-01-10 22:19:07 +01:00
|
|
|
json::stack::member
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
top, "next_batch", json::value
|
2018-12-02 02:23:42 +01:00
|
|
|
{
|
2019-03-07 20:53:58 +01:00
|
|
|
lex_cast(next_batch), json::STRING
|
2018-12-02 02:23:42 +01:00
|
|
|
}
|
|
|
|
};
|
2019-03-08 23:17:52 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s timeout @%lu",
|
2019-03-08 23:17:52 +01:00
|
|
|
loghead(data),
|
|
|
|
next_batch
|
|
|
|
};
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2019-01-10 22:19:07 +01:00
|
|
|
|
|
|
|
ircd::const_buffer
|
|
|
|
ircd::m::sync::flush(data &data,
|
|
|
|
resource::response::chunked &response,
|
|
|
|
const const_buffer &buffer)
|
2018-04-22 08:50:16 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
const auto wrote
|
2018-04-17 02:57:41 +02:00
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
response.flush(buffer)
|
2019-01-10 22:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(data.stats)
|
|
|
|
{
|
2019-03-01 01:22:14 +01:00
|
|
|
data.stats->flush_bytes += size(wrote);
|
2019-01-10 22:19:07 +01:00
|
|
|
data.stats->flush_count++;
|
|
|
|
}
|
|
|
|
|
2019-03-01 01:22:14 +01:00
|
|
|
return wrote;
|
2018-04-22 08:50:16 +02:00
|
|
|
}
|
2018-04-17 02:57:41 +02:00
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
// polylog
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Random access approach for large `since` ranges. The /sync schema itself is
|
|
|
|
// recursed. For every component in the schema, the handler seeks the events
|
|
|
|
// appropriate for the user and appends it to the output. Concretely, this
|
|
|
|
// involves a full iteration of the rooms a user is a member of, and a full
|
|
|
|
// iteration of the presence status for all users visible to a user, etc.
|
|
|
|
//
|
|
|
|
// This entire process occurs in a single pass. The schema is traced with
|
|
|
|
// json::stack and its buffer is flushed to the client periodically with
|
|
|
|
// chunked encoding.
|
2019-01-09 00:10:06 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::polylog_handle(data &data)
|
2019-01-09 00:10:06 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
bool ret{false};
|
|
|
|
m::sync::for_each(string_view{}, [&data, &ret]
|
2019-01-09 00:10:06 +01:00
|
|
|
(item &item)
|
|
|
|
{
|
2019-02-24 20:59:53 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out
|
2019-02-24 20:59:53 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object object
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, item.member_name()
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(item.polylog(data))
|
|
|
|
ret = true;
|
|
|
|
else
|
|
|
|
checkpoint.rollback();
|
|
|
|
|
2019-01-09 00:10:06 +01:00
|
|
|
return true;
|
|
|
|
});
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
if(ret)
|
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-27 00:50:58 +01:00
|
|
|
*data.out, "next_batch", json::value
|
2019-02-24 20:59:53 +01:00
|
|
|
{
|
|
|
|
lex_cast(data.range.second), json::STRING
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if(!ret)
|
|
|
|
checkpoint.rollback();
|
2019-01-09 00:10:06 +01:00
|
|
|
|
2019-02-22 20:13:55 +01:00
|
|
|
if(stats_info) log::info
|
2019-01-09 00:10:06 +01:00
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s polylog commit:%b complete @%lu",
|
2019-02-24 20:59:53 +01:00
|
|
|
loghead(data),
|
2019-03-09 00:29:10 +01:00
|
|
|
ret,
|
|
|
|
data.range.second
|
2019-01-09 00:10:06 +01:00
|
|
|
};
|
|
|
|
|
2019-02-24 20:59:53 +01:00
|
|
|
return ret;
|
2019-01-09 00:10:06 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "polylog %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
//
|
|
|
|
// linear
|
|
|
|
//
|
2019-02-27 02:02:21 +01:00
|
|
|
// Approach for small `since` ranges. The range of events is iterated and
|
|
|
|
// the event itself is presented to each handler in the schema. This also
|
|
|
|
// involves a json::stack trace of the schema so that if the handler determines
|
|
|
|
// the event is appropriate for syncing to the user the output buffer will
|
|
|
|
// contain a residue of a /sync response with a single event.
|
|
|
|
//
|
|
|
|
// After the iteration of events is complete we are left with several buffers
|
|
|
|
// of properly formatted individual /sync responses which we rewrite into a
|
|
|
|
// single response to overcome the inefficiency of request ping-pong under
|
|
|
|
// heavy load.
|
|
|
|
|
|
|
|
namespace ircd::m::sync
|
|
|
|
{
|
|
|
|
static bool linear_proffer_event_one(data &);
|
|
|
|
static size_t linear_proffer_event(data &, const mutable_buffer &);
|
2019-02-28 21:36:14 +01:00
|
|
|
static std::pair<event::idx, bool> linear_proffer(data &, window_buffer &);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
bool
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_handle(data &data)
|
2019-01-10 05:39:12 +01:00
|
|
|
try
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
*data.out
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const unique_buffer<mutable_buffer> buf
|
|
|
|
{
|
|
|
|
96_KiB //TODO: XXX
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
window_buffer wb{buf};
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto &[last, completed]
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
|
|
|
linear_proffer(data, wb)
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
const json::vector vector
|
|
|
|
{
|
|
|
|
wb.completed()
|
|
|
|
};
|
2019-01-10 05:39:12 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
const auto next
|
2019-01-10 05:39:12 +01:00
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
last && completed?
|
|
|
|
data.range.second:
|
|
|
|
last?
|
|
|
|
last + 1:
|
|
|
|
0UL
|
|
|
|
};
|
2019-02-28 21:36:14 +01:00
|
|
|
|
2019-03-09 00:29:10 +01:00
|
|
|
if(last)
|
|
|
|
{
|
2019-02-27 02:02:21 +01:00
|
|
|
json::stack::member
|
2019-01-10 22:19:07 +01:00
|
|
|
{
|
2019-02-28 00:25:26 +01:00
|
|
|
top, "next_batch", json::value
|
2019-02-27 02:02:21 +01:00
|
|
|
{
|
2019-02-28 21:36:14 +01:00
|
|
|
lex_cast(next), json::STRING
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:25:26 +01:00
|
|
|
json::merge(top, vector);
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
else checkpoint.rollback();
|
2019-01-10 05:39:12 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s linear last:%lu complete:%b @%lu",
|
2019-02-27 02:02:21 +01:00
|
|
|
loghead(data),
|
2019-02-28 21:36:14 +01:00
|
|
|
last,
|
2019-03-09 00:29:10 +01:00
|
|
|
completed,
|
|
|
|
next
|
2019-01-10 05:39:12 +01:00
|
|
|
};
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
return last;
|
2019-01-10 05:39:12 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "linear %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
2019-02-27 02:02:21 +01:00
|
|
|
/// Iterates the events in the data.range and creates a json::vector in
|
|
|
|
/// the supplied window_buffer. The return value is the event_idx of the
|
|
|
|
/// last event which fit in the buffer, or 0 of nothing was of interest
|
|
|
|
/// to our client in the event iteration.
|
2019-02-28 21:36:14 +01:00
|
|
|
std::pair<ircd::m::event::idx, bool>
|
2019-02-27 02:02:21 +01:00
|
|
|
ircd::m::sync::linear_proffer(data &data,
|
|
|
|
window_buffer &wb)
|
|
|
|
{
|
|
|
|
event::idx ret(0);
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto closure{[&data, &wb, &ret]
|
2019-02-27 02:02:21 +01:00
|
|
|
(const m::event::idx &event_idx, const m::event &event)
|
|
|
|
{
|
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event_idx
|
|
|
|
};
|
|
|
|
|
|
|
|
wb([&data, &ret, &event_idx]
|
|
|
|
(const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
const auto consumed
|
|
|
|
{
|
|
|
|
linear_proffer_event(data, buf)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(consumed)
|
|
|
|
ret = event_idx;
|
|
|
|
|
|
|
|
return consumed;
|
|
|
|
});
|
|
|
|
|
|
|
|
return wb.remaining() >= 65_KiB; //TODO: XXX
|
2019-02-28 21:36:14 +01:00
|
|
|
}};
|
2019-02-27 02:02:21 +01:00
|
|
|
|
2019-02-28 21:36:14 +01:00
|
|
|
const auto completed
|
|
|
|
{
|
|
|
|
m::events::for_each(data.range, closure)
|
|
|
|
};
|
|
|
|
|
|
|
|
return
|
|
|
|
{
|
|
|
|
ret, completed
|
|
|
|
};
|
2019-02-27 02:02:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets up a json::stack for the iteration of handlers for
|
|
|
|
/// one event.
|
|
|
|
size_t
|
|
|
|
ircd::m::sync::linear_proffer_event(data &data,
|
|
|
|
const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
json::stack out{buf};
|
|
|
|
const scope_restore their_out
|
|
|
|
{
|
|
|
|
data.out, &out
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:24:48 +01:00
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool success
|
|
|
|
{
|
|
|
|
linear_proffer_event_one(data)
|
|
|
|
};
|
|
|
|
|
|
|
|
top.~object();
|
|
|
|
return success?
|
2019-02-27 02:02:21 +01:00
|
|
|
size(out.completed()):
|
|
|
|
0UL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Generates a candidate /sync response for a single event by
|
|
|
|
/// iterating all of the handlers.
|
|
|
|
bool
|
|
|
|
ircd::m::sync::linear_proffer_event_one(data &data)
|
|
|
|
{
|
|
|
|
return !m::sync::for_each(string_view{}, [&data]
|
|
|
|
(item &item)
|
|
|
|
{
|
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
if(item.linear(data))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
checkpoint.rollback();
|
|
|
|
return true;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
//
|
|
|
|
// longpoll
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::m::sync::longpoll::notified)
|
|
|
|
ircd::m::sync::longpoll::notified
|
|
|
|
{
|
|
|
|
handle_notify,
|
|
|
|
{
|
|
|
|
{ "_site", "vm.notify" },
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::m::sync::longpoll::handle_notify(const m::event &event,
|
|
|
|
m::vm::eval &eval)
|
|
|
|
{
|
|
|
|
assert(eval.opts);
|
|
|
|
if(!eval.opts->notify_clients)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if(!polling)
|
2018-11-16 03:36:26 +01:00
|
|
|
{
|
|
|
|
queue.clear();
|
2018-10-07 07:17:46 +02:00
|
|
|
return;
|
2018-11-16 03:36:26 +01:00
|
|
|
}
|
2018-10-07 07:17:46 +02:00
|
|
|
|
|
|
|
queue.emplace_back(eval);
|
|
|
|
dock.notify_all();
|
|
|
|
}
|
|
|
|
|
2018-12-02 02:23:42 +01:00
|
|
|
bool
|
2019-01-10 05:39:12 +01:00
|
|
|
ircd::m::sync::longpoll::poll(data &data,
|
2018-09-01 09:51:40 +02:00
|
|
|
const args &args)
|
2019-02-28 03:49:38 +01:00
|
|
|
try
|
2017-09-25 03:05:42 +02:00
|
|
|
{
|
2019-02-28 22:33:21 +01:00
|
|
|
const scope_count polling{longpoll::polling}; do
|
2017-09-26 06:42:07 +02:00
|
|
|
{
|
2018-11-16 03:36:26 +01:00
|
|
|
if(!dock.wait_until(args.timesout))
|
2019-02-28 03:49:38 +01:00
|
|
|
break;
|
2018-11-16 03:36:26 +01:00
|
|
|
|
2019-03-12 22:32:58 +01:00
|
|
|
assert(data.client && data.client->sock);
|
|
|
|
check(*data.client->sock);
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
if(queue.empty())
|
2018-05-28 04:56:04 +02:00
|
|
|
continue;
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
const auto &accepted
|
|
|
|
{
|
|
|
|
queue.front()
|
|
|
|
};
|
|
|
|
|
2018-10-07 07:17:46 +02:00
|
|
|
const unwind pop{[]
|
|
|
|
{
|
2019-02-28 22:33:21 +01:00
|
|
|
if(longpoll::polling <= 1)
|
2018-11-16 03:36:26 +01:00
|
|
|
queue.pop_front();
|
2018-10-07 07:17:46 +02:00
|
|
|
}};
|
|
|
|
|
2019-03-07 20:53:58 +01:00
|
|
|
if(polylog_only)
|
|
|
|
return false;
|
|
|
|
|
2019-01-10 05:39:12 +01:00
|
|
|
if(handle(data, args, accepted))
|
2018-12-02 02:23:42 +01:00
|
|
|
return true;
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
2019-02-28 03:49:38 +01:00
|
|
|
while(1);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-03-12 22:32:58 +01:00
|
|
|
catch(const std::system_error &e)
|
|
|
|
{
|
|
|
|
log::derror
|
|
|
|
{
|
|
|
|
log, "longpoll %s failed :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2019-02-28 03:49:38 +01:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "longpoll %s FAILED :%s",
|
|
|
|
loghead(data),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
2018-04-11 00:20:47 +02:00
|
|
|
}
|
2017-09-25 03:05:42 +02:00
|
|
|
|
2018-04-22 08:50:16 +02:00
|
|
|
bool
|
2019-01-10 05:39:12 +01:00
|
|
|
ircd::m::sync::longpoll::handle(data &data,
|
2018-09-01 09:51:40 +02:00
|
|
|
const args &args,
|
2018-10-07 07:17:46 +02:00
|
|
|
const accepted &event)
|
2017-09-26 06:42:07 +02:00
|
|
|
{
|
2019-02-28 00:25:48 +01:00
|
|
|
const scope_restore their_event
|
|
|
|
{
|
|
|
|
data.event, &event
|
|
|
|
};
|
|
|
|
|
|
|
|
const scope_restore their_event_idx
|
|
|
|
{
|
|
|
|
data.event_idx, event.event_idx
|
|
|
|
};
|
|
|
|
|
2019-03-01 03:28:45 +01:00
|
|
|
const scope_restore client_txnid
|
|
|
|
{
|
|
|
|
data.client_txnid, event.client_txnid
|
|
|
|
};
|
|
|
|
|
2019-02-28 00:25:48 +01:00
|
|
|
json::stack::checkpoint checkpoint
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
json::stack::object top
|
|
|
|
{
|
|
|
|
*data.out
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool ret
|
|
|
|
{
|
|
|
|
linear_proffer_event_one(data)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(ret)
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
const auto next
|
|
|
|
{
|
2019-03-08 23:17:52 +01:00
|
|
|
data.event_idx?
|
|
|
|
data.event_idx + 1:
|
|
|
|
data.range.second
|
2019-02-28 03:49:38 +01:00
|
|
|
};
|
|
|
|
|
2019-02-28 00:25:48 +01:00
|
|
|
json::stack::member
|
|
|
|
{
|
|
|
|
*data.out, "next_batch", json::value
|
|
|
|
{
|
2019-02-28 03:49:38 +01:00
|
|
|
lex_cast(next), json::STRING
|
2019-02-28 00:25:48 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
2019-03-09 00:29:10 +01:00
|
|
|
log, "request %s longpoll got:%lu complete @%lu",
|
2019-02-28 00:25:48 +01:00
|
|
|
loghead(data),
|
2019-03-09 00:29:10 +01:00
|
|
|
event.event_idx,
|
|
|
|
next
|
2019-02-28 00:25:48 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
else checkpoint.rollback();
|
|
|
|
|
|
|
|
return ret;
|
2017-09-26 06:42:07 +02:00
|
|
|
}
|
2019-01-26 22:19:16 +01:00
|
|
|
|
|
|
|
//
|
|
|
|
// sync/args.h
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_max
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.max" },
|
|
|
|
{ "default", 15 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_min
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.min" },
|
|
|
|
{ "default", 5 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::milliseconds>
|
|
|
|
ircd::m::sync::args::timeout_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.sync.timeout.default" },
|
|
|
|
{ "default", 10 * 1000L },
|
|
|
|
};
|
|
|
|
|
|
|
|
//
|
|
|
|
// args::args
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::m::sync::args::args(const resource::request &request)
|
|
|
|
try
|
|
|
|
:request
|
|
|
|
{
|
|
|
|
request
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
catch(const bad_lex_cast &e)
|
|
|
|
{
|
|
|
|
throw m::BAD_REQUEST
|
|
|
|
{
|
|
|
|
"Since parameter invalid :%s", e.what()
|
|
|
|
};
|
|
|
|
}
|