2018-02-04 03:22:01 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
2018-01-14 02:55:21 +01:00
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
2018-02-04 03:22:01 +01:00
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
2018-01-14 02:55:21 +01:00
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
2018-02-04 03:22:01 +01:00
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2007-01-25 07:40:21 +01:00
|
|
|
|
2018-04-20 01:35:59 +02:00
|
|
|
//
|
|
|
|
// client::settings conf::item's
|
|
|
|
//
|
|
|
|
|
2018-09-02 07:03:25 +02:00
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::client::settings::max_client
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.max_client" },
|
|
|
|
{ "default", 16384L },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::client::settings::max_client_per_peer
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.max_client_per_peer" },
|
|
|
|
{ "default", 24L },
|
|
|
|
};
|
|
|
|
|
2018-04-20 01:35:59 +02:00
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::client::settings::stack_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.stack_size" },
|
|
|
|
{ "default", ssize_t(1_MiB) },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::client::settings::pool_size
|
|
|
|
{
|
2018-08-22 23:11:55 +02:00
|
|
|
{
|
2020-04-05 15:07:00 +02:00
|
|
|
{ "name", "ircd.client.pool_size" },
|
2020-04-23 13:47:00 +02:00
|
|
|
{ "default", 96L },
|
2018-08-22 23:11:55 +02:00
|
|
|
}, []
|
|
|
|
{
|
|
|
|
using client = ircd::client;
|
2018-09-02 00:08:48 +02:00
|
|
|
client::pool.set(client::settings::pool_size);
|
2018-08-22 23:11:55 +02:00
|
|
|
}
|
2018-04-20 01:35:59 +02:00
|
|
|
};
|
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
/// Linkage for the default settings
|
|
|
|
decltype(ircd::client::settings)
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::settings;
|
2016-09-23 08:59:24 +02:00
|
|
|
|
2018-04-20 01:35:59 +02:00
|
|
|
//
|
|
|
|
// client::conf conf::item's
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::seconds>
|
|
|
|
ircd::client::conf::async_timeout_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.conf.async_timeout" },
|
2019-04-25 12:16:12 +02:00
|
|
|
{ "default", 305L },
|
2018-04-20 01:35:59 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<ircd::seconds>
|
|
|
|
ircd::client::conf::request_timeout_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.conf.request_timeout" },
|
2019-04-25 12:16:12 +02:00
|
|
|
{ "default", 33L },
|
2018-04-20 01:35:59 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::client::conf::header_max_size_default
|
|
|
|
{
|
|
|
|
{ "name", "ircd.client.conf.header_max_size" },
|
|
|
|
{ "default", ssize_t(8_KiB) },
|
|
|
|
};
|
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
/// Linkage for the default conf
|
|
|
|
decltype(ircd::client::default_conf)
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::default_conf;
|
2007-01-25 07:40:21 +01:00
|
|
|
|
2018-10-24 21:39:48 +02:00
|
|
|
decltype(ircd::client::log)
|
|
|
|
ircd::client::log
|
|
|
|
{
|
|
|
|
"client", 'C'
|
|
|
|
};
|
|
|
|
|
2018-12-28 21:57:32 +01:00
|
|
|
decltype(ircd::client::pool_opts)
|
|
|
|
ircd::client::pool_opts
|
|
|
|
{
|
|
|
|
size_t(settings.stack_size),
|
|
|
|
size_t(settings.pool_size),
|
|
|
|
};
|
2018-09-28 01:36:50 +02:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
/// The pool of request contexts. When a client makes a request it does so by acquiring
|
|
|
|
/// a stack from this pool. The request handling and response logic can then be written
|
|
|
|
/// in a synchronous manner as if each connection had its own thread.
|
2022-06-15 18:36:03 +02:00
|
|
|
[[clang::always_destroy]]
|
2018-12-28 21:57:32 +01:00
|
|
|
decltype(ircd::client::pool)
|
2018-09-02 00:08:48 +02:00
|
|
|
ircd::client::pool
|
2018-01-14 02:55:21 +01:00
|
|
|
{
|
2018-12-28 21:57:32 +01:00
|
|
|
"client", pool_opts
|
2018-01-14 02:55:21 +01:00
|
|
|
};
|
2016-08-24 01:08:40 +02:00
|
|
|
|
2018-12-28 21:57:32 +01:00
|
|
|
/// A general semaphore for the client system; used for coarse operations
|
|
|
|
/// like waiting for all clients to disconnect / system shutdown et al.
|
|
|
|
decltype(ircd::client::dock)
|
|
|
|
ircd::client::dock;
|
|
|
|
|
2018-04-15 01:21:52 +02:00
|
|
|
decltype(ircd::client::ctr)
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::ctr;
|
2018-04-15 01:21:52 +02:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
// Linkage for the container of all active clients for iteration purposes.
|
|
|
|
template<>
|
2018-09-30 01:51:11 +02:00
|
|
|
decltype(ircd::util::instance_multimap<ircd::net::ipport, ircd::client, ircd::net::ipport::cmp_ip>::map)
|
|
|
|
ircd::util::instance_multimap<ircd::net::ipport, ircd::client, ircd::net::ipport::cmp_ip>::map
|
2018-01-14 02:55:21 +01:00
|
|
|
{};
|
2016-09-23 08:59:24 +02:00
|
|
|
|
2018-01-09 03:08:26 +01:00
|
|
|
//
|
|
|
|
// init
|
|
|
|
//
|
2016-09-23 08:59:24 +02:00
|
|
|
|
2016-11-29 16:23:38 +01:00
|
|
|
ircd::client::init::init()
|
2016-09-11 08:05:38 +02:00
|
|
|
{
|
2018-09-01 12:39:35 +02:00
|
|
|
spawn();
|
2016-09-11 08:05:38 +02:00
|
|
|
}
|
2016-08-22 03:57:43 +02:00
|
|
|
|
2022-05-26 21:00:07 +02:00
|
|
|
[[gnu::cold]]
|
2018-03-12 21:30:24 +01:00
|
|
|
ircd::client::init::~init()
|
|
|
|
noexcept
|
|
|
|
{
|
2018-08-20 03:09:43 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
|
2018-09-21 02:54:18 +02:00
|
|
|
terminate_all();
|
2018-09-25 09:21:07 +02:00
|
|
|
close_all();
|
2018-09-18 01:47:36 +02:00
|
|
|
wait_all();
|
2018-08-20 03:09:43 +02:00
|
|
|
|
2018-08-28 20:48:09 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "All client contexts, connections, and requests are clear.",
|
2018-08-28 20:48:09 +02:00
|
|
|
};
|
|
|
|
|
2018-09-02 06:05:45 +02:00
|
|
|
assert(client::map.empty());
|
2018-03-12 21:30:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// util
|
|
|
|
//
|
|
|
|
|
2022-06-14 20:21:51 +02:00
|
|
|
const ircd::ipport &
|
|
|
|
ircd::local(const client &client)
|
|
|
|
{
|
|
|
|
return client.local;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ircd::ipport &
|
|
|
|
ircd::remote(const client &client)
|
|
|
|
{
|
|
|
|
return client.it->first;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// tool
|
|
|
|
//
|
|
|
|
|
2018-09-01 12:39:35 +02:00
|
|
|
void
|
|
|
|
ircd::client::spawn()
|
|
|
|
{
|
2018-09-02 00:08:48 +02:00
|
|
|
pool.add(size_t(settings.pool_size));
|
2018-09-01 12:39:35 +02:00
|
|
|
}
|
|
|
|
|
2018-03-12 21:30:24 +01:00
|
|
|
void
|
2018-09-18 01:47:36 +02:00
|
|
|
ircd::client::wait_all()
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-09-02 00:08:48 +02:00
|
|
|
if(pool.active())
|
2018-09-18 01:47:36 +02:00
|
|
|
log::dwarning
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Waiting on %zu active of %zu client request contexts; %zu pending; %zu queued.",
|
2018-09-02 00:08:48 +02:00
|
|
|
pool.active(),
|
|
|
|
pool.size(),
|
|
|
|
pool.pending(),
|
|
|
|
pool.queued()
|
2018-03-12 21:30:24 +01:00
|
|
|
};
|
2017-09-21 04:31:54 +02:00
|
|
|
|
2019-07-26 00:11:57 +02:00
|
|
|
static const auto is_empty
|
|
|
|
{
|
2022-06-24 04:18:05 +02:00
|
|
|
[]() noexcept { return client::map.empty(); }
|
2019-07-26 00:11:57 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
while(!dock.wait_for(seconds(3), is_empty))
|
|
|
|
{
|
|
|
|
for(const auto &[remote, client] : client::map)
|
2020-05-24 22:57:03 +02:00
|
|
|
{
|
2019-07-26 00:11:57 +02:00
|
|
|
log::dwarning
|
2018-09-18 01:47:36 +02:00
|
|
|
{
|
2019-07-26 00:11:57 +02:00
|
|
|
log, "Waiting for client %s",
|
|
|
|
client->loghead(),
|
2018-09-18 01:47:36 +02:00
|
|
|
};
|
|
|
|
|
2020-05-24 22:57:03 +02:00
|
|
|
assert(!client->sock || client->sock->fini);
|
|
|
|
}
|
|
|
|
|
2019-07-26 00:11:57 +02:00
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Waiting for %zu clients to close...",
|
|
|
|
client::map.size(),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-09-18 01:47:36 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Joining %zu active of %zu client request contexts; %zu pending; %zu queued",
|
2018-09-18 01:47:36 +02:00
|
|
|
pool.active(),
|
|
|
|
pool.size(),
|
|
|
|
pool.pending(),
|
|
|
|
pool.queued()
|
|
|
|
};
|
|
|
|
|
|
|
|
pool.join();
|
2017-11-06 21:14:50 +01:00
|
|
|
}
|
|
|
|
|
2018-03-12 21:30:24 +01:00
|
|
|
void
|
|
|
|
ircd::client::close_all()
|
2017-11-06 21:14:50 +01:00
|
|
|
{
|
2018-09-02 06:05:45 +02:00
|
|
|
if(!client::map.empty())
|
2018-03-12 21:30:24 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Closing %zu clients", client::map.size()
|
2018-03-12 21:30:24 +01:00
|
|
|
};
|
2017-11-06 21:14:50 +01:00
|
|
|
|
2018-09-02 06:05:45 +02:00
|
|
|
auto it(begin(client::map));
|
|
|
|
while(it != end(client::map))
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-09-02 06:05:45 +02:00
|
|
|
auto c(shared_from(*it->second)); ++it; try
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2019-04-11 17:53:38 +02:00
|
|
|
c->close(net::dc::RST, net::close_ignore);
|
2018-03-12 21:30:24 +01:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2018-05-02 21:44:32 +02:00
|
|
|
log::derror
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Error disconnecting client @%p: %s", c.get(), e.what()
|
2018-03-12 21:30:24 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-09-18 01:47:36 +02:00
|
|
|
ircd::client::interrupt_all()
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-09-02 00:08:48 +02:00
|
|
|
if(pool.active())
|
2018-09-18 01:47:36 +02:00
|
|
|
log::warning
|
2018-03-12 21:30:24 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Interrupting %zu active of %zu client request contexts; %zu pending; %zu queued",
|
2018-09-02 00:08:48 +02:00
|
|
|
pool.active(),
|
|
|
|
pool.size(),
|
|
|
|
pool.pending(),
|
|
|
|
pool.queued()
|
2018-03-12 21:30:24 +01:00
|
|
|
};
|
2017-11-06 21:14:50 +01:00
|
|
|
|
2018-09-18 01:47:36 +02:00
|
|
|
pool.interrupt();
|
|
|
|
}
|
2018-05-28 10:35:10 +02:00
|
|
|
|
2018-09-18 01:47:36 +02:00
|
|
|
void
|
|
|
|
ircd::client::terminate_all()
|
|
|
|
{
|
|
|
|
if(pool.active())
|
|
|
|
log::warning
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "Terminating %zu active of %zu client request contexts; %zu pending; %zu queued",
|
2018-09-18 01:47:36 +02:00
|
|
|
pool.active(),
|
|
|
|
pool.size(),
|
|
|
|
pool.pending(),
|
|
|
|
pool.queued()
|
|
|
|
};
|
2018-05-28 10:35:10 +02:00
|
|
|
|
2018-09-18 01:47:36 +02:00
|
|
|
pool.terminate();
|
2016-09-11 08:05:38 +02:00
|
|
|
}
|
2016-08-22 03:57:43 +02:00
|
|
|
|
2018-09-02 07:31:58 +02:00
|
|
|
void
|
2019-01-18 18:42:44 +01:00
|
|
|
ircd::client::create(net::listener &,
|
|
|
|
const std::shared_ptr<socket> &sock)
|
2018-09-02 07:31:58 +02:00
|
|
|
{
|
|
|
|
const auto client
|
|
|
|
{
|
|
|
|
std::make_shared<ircd::client>(sock)
|
|
|
|
};
|
|
|
|
|
|
|
|
client->async();
|
|
|
|
}
|
|
|
|
|
2018-09-02 06:19:15 +02:00
|
|
|
size_t
|
2018-09-30 01:51:11 +02:00
|
|
|
ircd::client::count(const net::ipport &remote)
|
2018-09-02 06:19:15 +02:00
|
|
|
{
|
|
|
|
return client::map.count(remote);
|
|
|
|
}
|
|
|
|
|
2017-03-14 02:39:54 +01:00
|
|
|
ircd::parse::read_closure
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::read_closure(client &client)
|
2017-03-14 02:39:54 +01:00
|
|
|
{
|
2017-03-31 00:46:40 +02:00
|
|
|
// Returns a function the parser can call when it wants more data
|
2017-03-14 02:39:54 +01:00
|
|
|
return [&client](char *&start, char *const &stop)
|
|
|
|
{
|
2018-01-15 08:26:47 +01:00
|
|
|
char *const got(start);
|
|
|
|
read(client, start, stop);
|
|
|
|
//std::cout << ">>>> " << std::distance(got, start) << std::endl;
|
|
|
|
//std::cout << string_view{got, start} << std::endl;
|
|
|
|
//std::cout << "----" << std::endl;
|
2017-03-14 02:39:54 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-10-12 02:44:00 +02:00
|
|
|
char *
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::read(client &client,
|
|
|
|
char *&start,
|
|
|
|
char *const &stop)
|
2017-10-12 02:44:00 +02:00
|
|
|
{
|
2018-01-07 06:36:17 +01:00
|
|
|
assert(client.sock);
|
2017-10-12 02:44:00 +02:00
|
|
|
auto &sock(*client.sock);
|
2018-01-07 06:36:17 +01:00
|
|
|
const mutable_buffer buf
|
|
|
|
{
|
|
|
|
start, stop
|
|
|
|
};
|
2017-10-12 02:44:00 +02:00
|
|
|
|
|
|
|
char *const base(start);
|
2018-01-07 06:36:17 +01:00
|
|
|
start += net::read(sock, buf);
|
2017-10-12 02:44:00 +02:00
|
|
|
return base;
|
|
|
|
}
|
|
|
|
|
2018-01-09 03:08:26 +01:00
|
|
|
//
|
|
|
|
// async loop
|
|
|
|
//
|
|
|
|
|
|
|
|
namespace ircd
|
|
|
|
{
|
|
|
|
static bool handle_ec_default(client &, const error_code &);
|
|
|
|
static bool handle_ec_timeout(client &);
|
|
|
|
static bool handle_ec_short_read(client &);
|
|
|
|
static bool handle_ec_eof(client &);
|
|
|
|
static bool handle_ec(client &, const error_code &);
|
2017-12-24 08:34:11 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// This function is the basis for the client's request loop. We still use
|
|
|
|
/// an asynchronous pattern until there is activity on the socket (a request)
|
|
|
|
/// in which case the switch to synchronous mode is made by jumping into an
|
|
|
|
/// ircd::context drawn from the request pool. When the request is finished,
|
|
|
|
/// the client exits back into asynchronous mode until the next request is
|
|
|
|
/// received and rinse and repeat.
|
|
|
|
//
|
|
|
|
/// This sequence exists to avoid any possible c10k-style limitation imposed by
|
|
|
|
/// dedicating a context and its stack space to the lifetime of a connection.
|
|
|
|
/// This is similar to the thread-per-request pattern before async was in vogue.
|
|
|
|
///
|
2018-01-09 03:08:26 +01:00
|
|
|
/// This call returns immediately so we no longer block the current context and
|
|
|
|
/// its stack while waiting for activity on idle connections between requests.
|
2018-03-15 21:44:29 +01:00
|
|
|
bool
|
2018-01-14 02:55:21 +01:00
|
|
|
ircd::client::async()
|
2017-12-24 08:34:11 +01:00
|
|
|
{
|
2018-01-14 02:55:21 +01:00
|
|
|
assert(bool(this->sock));
|
|
|
|
assert(bool(this->conf));
|
|
|
|
auto &sock(*this->sock);
|
2020-01-24 23:04:21 +01:00
|
|
|
if(unlikely(sock.fini))
|
|
|
|
return false;
|
|
|
|
|
2018-04-21 04:54:18 +02:00
|
|
|
const auto &timeout
|
2018-02-22 02:41:28 +01:00
|
|
|
{
|
2018-04-21 04:54:18 +02:00
|
|
|
conf->async_timeout
|
2018-02-22 02:41:28 +01:00
|
|
|
};
|
|
|
|
|
2018-01-08 22:25:13 +01:00
|
|
|
const net::wait_opts opts
|
|
|
|
{
|
2018-02-22 02:41:28 +01:00
|
|
|
net::ready::READ, timeout
|
2018-01-08 22:25:13 +01:00
|
|
|
};
|
|
|
|
|
2018-01-09 03:08:26 +01:00
|
|
|
auto handler
|
2017-12-24 08:34:11 +01:00
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
std::bind(client::handle_ready, shared_from(*this), ph::_1)
|
2018-01-09 03:08:26 +01:00
|
|
|
};
|
|
|
|
|
2018-12-31 00:13:00 +01:00
|
|
|
// Re-purpose the request time counter into an async timer by marking it.
|
|
|
|
timer = ircd::timer{};
|
|
|
|
|
2018-01-09 03:08:26 +01:00
|
|
|
sock(opts, std::move(handler));
|
2018-03-15 21:44:29 +01:00
|
|
|
return true;
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// The client's socket is ready for reading. This intermediate handler
|
|
|
|
/// intercepts any errors otherwise dispatches the client to the request
|
|
|
|
/// pool to be married with a stack. Right here this handler is executing on
|
|
|
|
/// the main stack (not in any ircd::context).
|
|
|
|
///
|
|
|
|
/// The context the closure ends up getting is the next available from the
|
|
|
|
/// request pool, which may not be available immediately so this handler might
|
|
|
|
/// be queued for some time after this call returns.
|
|
|
|
void
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::handle_ready(std::shared_ptr<client> client,
|
|
|
|
const error_code &ec)
|
2018-01-09 03:08:26 +01:00
|
|
|
{
|
|
|
|
if(!handle_ec(*client, ec))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto handler
|
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
std::bind(client::handle_requests, std::move(client))
|
2018-01-09 03:08:26 +01:00
|
|
|
};
|
|
|
|
|
2022-06-14 20:21:51 +02:00
|
|
|
if(pool.avail() == 0)
|
2018-04-20 01:25:39 +02:00
|
|
|
log::dwarning
|
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
log, "Client context pool exhausted. %zu requests queued.",
|
|
|
|
pool.queued()
|
2018-04-20 01:25:39 +02:00
|
|
|
};
|
|
|
|
|
2022-06-14 20:21:51 +02:00
|
|
|
pool(std::move(handler));
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A request context has been dispatched and is now handling this client.
|
|
|
|
/// This function is executing on that ircd::ctx stack. client::main() will
|
|
|
|
/// now be called and synchronous programming is possible. Afterward, the
|
|
|
|
/// client will release this ctx and its stack and fall back to async mode
|
|
|
|
/// or die.
|
|
|
|
void
|
2022-06-14 20:21:51 +02:00
|
|
|
ircd::client::handle_requests(std::shared_ptr<client> client)
|
2018-08-20 03:09:43 +02:00
|
|
|
try
|
2018-01-09 03:08:26 +01:00
|
|
|
{
|
2018-04-14 02:03:17 +02:00
|
|
|
// The ircd::ctx now handling this request is referenced and accessible
|
|
|
|
// in client for the duration of this handling.
|
2018-04-21 04:53:36 +02:00
|
|
|
assert(ctx::current);
|
|
|
|
assert(!client->reqctx);
|
2018-04-14 02:03:17 +02:00
|
|
|
client->reqctx = ctx::current;
|
2018-11-07 06:34:28 +01:00
|
|
|
client->ready_count++;
|
2018-04-14 02:03:17 +02:00
|
|
|
const unwind reset{[&client]
|
|
|
|
{
|
|
|
|
assert(bool(client));
|
2018-04-21 04:53:36 +02:00
|
|
|
assert(client->reqctx);
|
2018-04-22 08:45:25 +02:00
|
|
|
assert(client->reqctx == ctx::current);
|
2018-04-14 02:03:17 +02:00
|
|
|
client->reqctx = nullptr;
|
2022-06-14 20:21:51 +02:00
|
|
|
if(pool.avail() <= 1)
|
|
|
|
dock.notify_all();
|
2018-04-14 02:03:17 +02:00
|
|
|
}};
|
|
|
|
|
2019-02-07 14:12:27 +01:00
|
|
|
#ifdef RB_DEBUG
|
2022-06-14 20:21:51 +02:00
|
|
|
util::timer timer;
|
2019-02-07 14:12:27 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
log, "%s enter",
|
2019-02-07 14:12:27 +01:00
|
|
|
client->loghead()
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2018-08-28 20:48:09 +02:00
|
|
|
if(!client->main())
|
|
|
|
{
|
2018-01-14 02:55:21 +01:00
|
|
|
client->close(net::dc::SSL_NOTIFY).wait();
|
2018-08-28 20:48:09 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-02-07 14:12:27 +01:00
|
|
|
#ifdef RB_DEBUG
|
2020-12-21 15:43:15 +01:00
|
|
|
char buf[64];
|
2019-02-07 14:12:27 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
log, "%s leave %s",
|
2019-02-07 14:12:27 +01:00
|
|
|
client->loghead(),
|
|
|
|
pretty(buf, timer.at<microseconds>(), true)
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2018-08-28 20:48:09 +02:00
|
|
|
client->async();
|
2018-03-17 04:41:40 +01:00
|
|
|
}
|
2018-08-20 03:09:43 +02:00
|
|
|
catch(const std::exception &e)
|
2018-03-17 04:41:40 +01:00
|
|
|
{
|
2019-02-07 14:12:27 +01:00
|
|
|
log::error
|
2018-03-17 04:41:40 +01:00
|
|
|
{
|
2022-06-14 20:21:51 +02:00
|
|
|
log, "%s fault :%s",
|
2018-12-20 02:06:30 +01:00
|
|
|
client->loghead(),
|
2018-08-20 03:09:43 +02:00
|
|
|
e.what()
|
2018-03-17 04:41:40 +01:00
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::handle_ec(client &client,
|
|
|
|
const error_code &ec)
|
|
|
|
{
|
2018-11-09 06:18:39 +01:00
|
|
|
using std::errc;
|
2018-01-09 03:08:26 +01:00
|
|
|
using boost::asio::error::get_ssl_category;
|
|
|
|
using boost::asio::error::get_misc_category;
|
|
|
|
|
2019-01-18 17:55:06 +01:00
|
|
|
if(unlikely(run::level != run::level::RUN && !ec))
|
2018-09-25 09:21:07 +02:00
|
|
|
{
|
|
|
|
log::dwarning
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s refusing client request in runlevel %s",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead(),
|
2019-01-18 17:55:06 +01:00
|
|
|
reflect(run::level)
|
2018-09-25 09:21:07 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
client.close(net::dc::RST, net::close_ignore);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-11-09 06:18:39 +01:00
|
|
|
if(system_category(ec)) switch(ec.value())
|
2018-01-09 03:08:26 +01:00
|
|
|
{
|
2018-11-09 06:18:39 +01:00
|
|
|
case 0: return true;
|
|
|
|
case int(errc::operation_canceled): return false;
|
|
|
|
case int(errc::timed_out): return handle_ec_timeout(client);
|
|
|
|
default: return handle_ec_default(client, ec);
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
|
|
|
else if(ec.category() == get_ssl_category()) switch(uint8_t(ec.value()))
|
|
|
|
{
|
2019-04-21 02:36:45 +02:00
|
|
|
#ifdef SSL_R_SHORT_READ
|
2018-11-09 06:18:39 +01:00
|
|
|
case SSL_R_SHORT_READ: return handle_ec_short_read(client);
|
2019-04-21 02:36:45 +02:00
|
|
|
#endif
|
2018-11-09 06:18:39 +01:00
|
|
|
default: return handle_ec_default(client, ec);
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
2019-09-13 20:49:08 +02:00
|
|
|
else if(ec == net::eof)
|
|
|
|
return handle_ec_eof(client);
|
|
|
|
else
|
|
|
|
return handle_ec_default(client, ec);
|
2018-01-09 03:08:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// The client indicated they will not be sending the data we have been
|
|
|
|
/// waiting for. The proper behavior now is to initiate a clean shutdown.
|
|
|
|
bool
|
|
|
|
ircd::handle_ec_eof(client &client)
|
|
|
|
try
|
|
|
|
{
|
2018-03-15 20:26:29 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s end of file",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead()
|
2018-03-15 20:26:29 +01:00
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
client.close(net::dc::SSL_NOTIFY, net::close_ignore);
|
2018-01-09 03:08:26 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2018-05-30 11:56:08 +02:00
|
|
|
log::error
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s end of file :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead(),
|
2018-05-30 11:56:08 +02:00
|
|
|
e.what()
|
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The client terminated the connection, likely improperly, and SSL
|
|
|
|
/// is informing us with an opportunity to prevent truncation attacks.
|
|
|
|
/// Best behavior here is to just close the sd.
|
|
|
|
bool
|
|
|
|
ircd::handle_ec_short_read(client &client)
|
|
|
|
try
|
|
|
|
{
|
2018-03-15 20:26:29 +01:00
|
|
|
log::dwarning
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s short_read",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead()
|
2018-03-15 20:26:29 +01:00
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
client.close(net::dc::RST, net::close_ignore);
|
2018-01-09 03:08:26 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2018-05-30 11:56:08 +02:00
|
|
|
log::error
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s short_read :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead(),
|
2018-05-30 11:56:08 +02:00
|
|
|
e.what()
|
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The net:: system determined the client timed out because we set a timer
|
|
|
|
/// on the socket waiting for data which never arrived. The client may very
|
|
|
|
/// well still be there, so the best thing to do is to attempt a clean
|
|
|
|
/// disconnect.
|
|
|
|
bool
|
|
|
|
ircd::handle_ec_timeout(client &client)
|
|
|
|
try
|
|
|
|
{
|
|
|
|
assert(bool(client.sock));
|
2018-04-09 19:17:57 +02:00
|
|
|
log::debug
|
2018-01-24 23:32:05 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s disconnecting after inactivity timeout",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead()
|
2018-01-24 23:32:05 +01:00
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
client.close(net::dc::SSL_NOTIFY, net::close_ignore);
|
2018-01-09 03:08:26 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2018-03-15 20:26:29 +01:00
|
|
|
log::derror
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s timeout :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead(),
|
2018-03-15 20:26:29 +01:00
|
|
|
e.what()
|
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unknown/untreated error. Probably not worth attempting a clean shutdown
|
|
|
|
/// so a hard / immediate disconnect given instead.
|
|
|
|
bool
|
|
|
|
ircd::handle_ec_default(client &client,
|
|
|
|
const error_code &ec)
|
|
|
|
{
|
2020-12-21 15:43:15 +01:00
|
|
|
char buf[256];
|
2018-09-30 02:08:17 +02:00
|
|
|
log::derror
|
2018-03-15 20:26:29 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
client::log, "%s :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
client.loghead(),
|
|
|
|
string(buf, ec)
|
2018-03-15 20:26:29 +01:00
|
|
|
};
|
2018-01-09 03:08:26 +01:00
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
client.close(net::dc::RST, net::close_ignore);
|
2018-01-09 03:08:26 +01:00
|
|
|
return false;
|
2017-12-24 08:34:11 +01:00
|
|
|
}
|
|
|
|
|
2017-10-12 02:44:00 +02:00
|
|
|
//
|
|
|
|
// client
|
|
|
|
//
|
|
|
|
|
2018-09-02 06:05:45 +02:00
|
|
|
ircd::client::client(std::shared_ptr<socket> sock)
|
2018-09-02 07:03:25 +02:00
|
|
|
:instance_multimap{[&sock]
|
|
|
|
() -> net::ipport
|
|
|
|
{
|
|
|
|
assert(bool(sock));
|
2018-09-15 15:42:52 +02:00
|
|
|
const auto &ep(sock->remote());
|
2018-09-30 01:51:11 +02:00
|
|
|
return { ep.address(), ep.port() };
|
2018-09-02 07:03:25 +02:00
|
|
|
}()}
|
2018-09-02 06:05:45 +02:00
|
|
|
,head_buffer
|
|
|
|
{
|
|
|
|
conf->header_max_size
|
|
|
|
}
|
|
|
|
,sock
|
|
|
|
{
|
|
|
|
std::move(sock)
|
2017-03-11 02:46:25 +01:00
|
|
|
}
|
2018-09-30 01:51:11 +02:00
|
|
|
,local
|
|
|
|
{
|
|
|
|
net::local_ipport(*this->sock)
|
|
|
|
}
|
2017-03-11 02:46:25 +01:00
|
|
|
{
|
2018-04-20 01:35:59 +02:00
|
|
|
assert(size(head_buffer) >= 8_KiB);
|
2017-03-11 02:46:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::client::~client()
|
2017-10-25 18:49:16 +02:00
|
|
|
noexcept try
|
|
|
|
{
|
2019-04-11 17:53:38 +02:00
|
|
|
dock.notify_all();
|
2017-11-01 23:51:24 +01:00
|
|
|
//assert(!sock || !connected(*sock));
|
2017-10-25 18:49:16 +02:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
2017-03-11 02:46:25 +01:00
|
|
|
{
|
2018-03-15 20:26:29 +01:00
|
|
|
log::critical
|
|
|
|
{
|
2019-06-24 07:17:49 +02:00
|
|
|
log, "~client(%p): %s",
|
2018-03-15 20:26:29 +01:00
|
|
|
this,
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
2017-10-25 18:49:16 +02:00
|
|
|
return;
|
2017-03-11 02:46:25 +01:00
|
|
|
}
|
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
/// Client main loop.
|
2017-12-22 00:12:36 +01:00
|
|
|
///
|
|
|
|
/// Before main(), the client had been sitting in async mode waiting for
|
|
|
|
/// socket activity. Once activity with data was detected indicating a request,
|
|
|
|
/// the client was dispatched to the request pool where it is paired to an
|
|
|
|
/// ircd::ctx with a stack. main() is then invoked on that ircd::ctx stack.
|
|
|
|
/// Nothing from the socket has been read into userspace before main().
|
|
|
|
///
|
2018-01-07 06:36:17 +01:00
|
|
|
/// This function parses requests off the socket in a loop until there are no
|
|
|
|
/// more requests or there is a fatal error. The ctx will "block" to wait for
|
|
|
|
/// more data off the socket during the middle of a request until the request
|
|
|
|
/// timeout is reached. main() will not "block" to wait for more data after a
|
|
|
|
/// request; it will simply `return true` which puts this client back into
|
|
|
|
/// async mode and relinquishes this stack. returning false will disconnect
|
2018-01-09 03:08:26 +01:00
|
|
|
/// the client rather than putting it back into async mode.
|
|
|
|
///
|
2018-08-20 03:09:43 +02:00
|
|
|
/// Normal exceptions do not pass below main() therefor anything unhandled is an
|
2018-01-09 03:08:26 +01:00
|
|
|
/// internal server error and the client is disconnected. The exception handler
|
|
|
|
/// here though is executing on a request ctx stack, and we can choose to take
|
|
|
|
/// advantage of that; in contrast to the handle_ec() switch which handles
|
|
|
|
/// errors on the main/callback stack and must be asynchronous.
|
2018-01-07 06:36:17 +01:00
|
|
|
///
|
|
|
|
bool
|
2018-01-14 02:55:21 +01:00
|
|
|
ircd::client::main()
|
2018-08-20 03:09:43 +02:00
|
|
|
try
|
2018-01-07 06:36:17 +01:00
|
|
|
{
|
2018-02-17 23:28:06 +01:00
|
|
|
parse::buffer pb{head_buffer};
|
2017-09-12 19:04:40 +02:00
|
|
|
parse::capstan pc{pb, read_closure(*this)}; do
|
|
|
|
{
|
2018-01-14 02:55:21 +01:00
|
|
|
if(!handle_request(pc))
|
2017-09-12 19:04:40 +02:00
|
|
|
return false;
|
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
// After the request, the head and content has been read off the socket
|
|
|
|
// and the capstan has advanced to the end of the content. The catch is
|
|
|
|
// that reading off the socket could have read too much, bleeding into
|
|
|
|
// the next request. This is rare, but pb.remove() will memmove() the
|
|
|
|
// bleed back to the beginning of the head buffer for the next loop.
|
2017-09-12 19:04:40 +02:00
|
|
|
pb.remove();
|
|
|
|
}
|
|
|
|
while(pc.unparsed());
|
|
|
|
|
|
|
|
return true;
|
2017-03-11 02:46:25 +01:00
|
|
|
}
|
2018-11-09 06:18:39 +01:00
|
|
|
catch(const std::system_error &e)
|
2016-09-12 23:07:46 +02:00
|
|
|
{
|
2018-11-09 06:18:39 +01:00
|
|
|
return handle_ec(*this, e.code());
|
2016-09-11 08:05:38 +02:00
|
|
|
}
|
2018-05-07 00:09:12 +02:00
|
|
|
catch(const ctx::interrupted &e)
|
|
|
|
{
|
|
|
|
log::warning
|
|
|
|
{
|
2019-02-07 14:12:27 +01:00
|
|
|
log, "%s request interrupted :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
loghead(),
|
2018-05-07 00:09:12 +02:00
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
close(net::dc::SSL_NOTIFY, net::close_ignore);
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-14 02:55:21 +01:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2018-03-15 20:26:29 +01:00
|
|
|
log::critical
|
2018-03-08 16:40:41 +01:00
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "%s :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
loghead(),
|
2018-03-09 20:35:55 +01:00
|
|
|
e.what()
|
2018-03-08 16:40:41 +01:00
|
|
|
};
|
2018-01-14 02:55:21 +01:00
|
|
|
|
2018-03-08 16:40:41 +01:00
|
|
|
return false;
|
2018-01-14 02:55:21 +01:00
|
|
|
}
|
2018-05-07 00:09:12 +02:00
|
|
|
catch(const ctx::terminated &)
|
|
|
|
{
|
|
|
|
close(net::dc::RST, net::close_ignore);
|
2018-08-20 03:09:43 +02:00
|
|
|
throw;
|
2018-05-07 00:09:12 +02:00
|
|
|
}
|
2018-01-14 02:55:21 +01:00
|
|
|
|
2018-01-12 03:45:25 +01:00
|
|
|
/// Handle a single request within the client main() loop.
|
|
|
|
///
|
|
|
|
/// This function returns false if the main() loop should exit
|
|
|
|
/// and thus disconnect the client. It should return true in most
|
|
|
|
/// cases even for lightly erroneous requests that won't affect
|
|
|
|
/// the next requests on the tape.
|
|
|
|
///
|
|
|
|
/// This function is timed. The timeout will prevent a client from
|
|
|
|
/// sending a partial request and leave us waiting for the rest.
|
|
|
|
bool
|
2018-01-14 02:55:21 +01:00
|
|
|
ircd::client::handle_request(parse::capstan &pc)
|
2018-01-12 03:45:25 +01:00
|
|
|
try
|
|
|
|
{
|
2018-11-07 06:34:28 +01:00
|
|
|
timer = ircd::timer{};
|
|
|
|
++request_count;
|
|
|
|
|
|
|
|
// This timeout covers the reception of a complete HTTP head. If the
|
|
|
|
// head was fragmented and has not entirely arrived yet this function
|
|
|
|
// will block this request context below. The timeout limits that.
|
2018-04-16 01:42:13 +02:00
|
|
|
net::scope_timeout timeout
|
2018-01-12 03:45:25 +01:00
|
|
|
{
|
2018-01-14 02:55:21 +01:00
|
|
|
*sock, conf->request_timeout
|
2018-01-12 03:45:25 +01:00
|
|
|
};
|
|
|
|
|
2018-02-12 20:58:40 +01:00
|
|
|
// This is the first read off the wire. The headers are entirely read and
|
|
|
|
// the tape is advanced.
|
2018-02-18 00:44:53 +01:00
|
|
|
const http::request::head head{pc};
|
2018-02-17 23:28:06 +01:00
|
|
|
head_length = pc.parsed - data(head_buffer);
|
2018-02-12 20:58:40 +01:00
|
|
|
content_consumed = std::min(pc.unparsed(), head.content_length);
|
|
|
|
pc.parsed += content_consumed;
|
2018-01-14 02:55:21 +01:00
|
|
|
assert(pc.parsed <= pc.read);
|
2018-02-12 20:58:40 +01:00
|
|
|
|
2018-04-16 01:42:13 +02:00
|
|
|
// The resource being sought will have its own specific timeout, or none
|
|
|
|
// at all. This timeout is now canceled to not conflict. Note that the
|
|
|
|
// time spent so far is still being accumulated by client.timer.
|
|
|
|
timeout.cancel();
|
|
|
|
|
2018-03-15 20:26:29 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-25 03:17:06 +02:00
|
|
|
resource::log, "%s HTTP %s `%s' content-length:%zu have:%zu",
|
2018-09-28 02:01:56 +02:00
|
|
|
loghead(),
|
2018-03-15 20:26:29 +01:00
|
|
|
head.method,
|
|
|
|
head.path,
|
|
|
|
head.content_length,
|
|
|
|
content_consumed
|
|
|
|
};
|
2018-01-14 02:55:21 +01:00
|
|
|
|
2019-03-11 21:20:14 +01:00
|
|
|
// Sets values in this->client::request based on everything we know from
|
|
|
|
// the head for this scope. This gets updated again in the resource::
|
|
|
|
// unit for their scope with more data including the content.
|
|
|
|
const scope_restore request
|
|
|
|
{
|
|
|
|
this->request, resource::request
|
|
|
|
{
|
|
|
|
head, string_view{} // no content considered yet
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-01-12 03:45:25 +01:00
|
|
|
bool ret
|
|
|
|
{
|
2018-02-18 00:44:53 +01:00
|
|
|
resource_request(head)
|
2018-01-12 03:45:25 +01:00
|
|
|
};
|
|
|
|
|
2018-02-12 20:58:40 +01:00
|
|
|
if(ret && iequals(head.connection, "close"_sv))
|
2018-01-12 03:45:25 +01:00
|
|
|
ret = false;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2019-03-01 22:31:43 +01:00
|
|
|
catch(const ctx::interrupted &e)
|
|
|
|
{
|
|
|
|
throw;
|
|
|
|
}
|
2018-11-09 06:18:39 +01:00
|
|
|
catch(const std::system_error &e)
|
2018-01-15 08:26:47 +01:00
|
|
|
{
|
2018-11-09 06:18:39 +01:00
|
|
|
static const auto operation_canceled
|
|
|
|
{
|
|
|
|
make_error_code(std::errc::operation_canceled)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(e.code() != operation_canceled)
|
2018-01-15 08:26:47 +01:00
|
|
|
throw;
|
|
|
|
|
2018-04-14 00:36:10 +02:00
|
|
|
if(!sock || sock->fini)
|
|
|
|
return false;
|
|
|
|
|
2018-03-25 01:41:57 +01:00
|
|
|
const ctx::exception_handler eh;
|
2018-01-15 08:26:47 +01:00
|
|
|
resource::response
|
|
|
|
{
|
2020-03-17 01:51:12 +01:00
|
|
|
*this,
|
|
|
|
http::REQUEST_TIMEOUT,
|
|
|
|
{},
|
|
|
|
0L,
|
|
|
|
{}
|
2018-01-15 08:26:47 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2019-03-01 22:31:43 +01:00
|
|
|
catch(const http::error &e)
|
2018-01-12 03:45:25 +01:00
|
|
|
{
|
2020-03-17 01:58:26 +01:00
|
|
|
log::logf
|
2018-03-16 00:51:18 +01:00
|
|
|
{
|
2020-03-17 01:58:26 +01:00
|
|
|
log, log::level::DERROR,
|
|
|
|
"%s HTTP %u %s :%s",
|
2018-09-28 02:01:56 +02:00
|
|
|
loghead(),
|
2019-03-01 22:31:43 +01:00
|
|
|
uint(e.code),
|
|
|
|
http::status(e.code),
|
|
|
|
e.content
|
2018-03-16 00:51:18 +01:00
|
|
|
};
|
|
|
|
|
2019-03-01 22:31:43 +01:00
|
|
|
if(!sock || sock->fini)
|
|
|
|
return false;
|
|
|
|
|
2018-04-14 00:36:10 +02:00
|
|
|
const ctx::exception_handler eh;
|
2018-01-12 03:45:25 +01:00
|
|
|
resource::response
|
|
|
|
{
|
2020-03-17 01:51:12 +01:00
|
|
|
*this,
|
|
|
|
e.content,
|
|
|
|
"text/html; charset=utf-8",
|
|
|
|
e.code,
|
|
|
|
e.headers
|
2018-01-12 03:45:25 +01:00
|
|
|
};
|
|
|
|
|
2018-03-16 00:51:18 +01:00
|
|
|
return false;
|
2018-01-12 03:45:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-02-18 00:44:53 +01:00
|
|
|
ircd::client::resource_request(const http::request::head &head)
|
2018-01-12 03:45:25 +01:00
|
|
|
try
|
|
|
|
{
|
2018-11-07 06:35:30 +01:00
|
|
|
auto &resource
|
2018-02-12 20:58:40 +01:00
|
|
|
{
|
2018-11-07 06:35:30 +01:00
|
|
|
// throws HTTP 404 if not found.
|
|
|
|
ircd::resource::find(head.path)
|
2018-02-12 20:58:40 +01:00
|
|
|
};
|
|
|
|
|
2018-11-07 06:35:30 +01:00
|
|
|
auto &method
|
2018-01-12 03:45:25 +01:00
|
|
|
{
|
2018-11-07 06:35:30 +01:00
|
|
|
// throws HTTP 405 if not found.
|
|
|
|
resource[head.method]
|
|
|
|
};
|
|
|
|
|
|
|
|
const string_view content_partial
|
|
|
|
{
|
|
|
|
data(head_buffer) + head_length, content_consumed
|
2018-01-12 03:45:25 +01:00
|
|
|
};
|
|
|
|
|
2018-11-07 06:35:30 +01:00
|
|
|
method(*this, head, content_partial);
|
2018-02-18 00:44:53 +01:00
|
|
|
discard_unconsumed(head);
|
2018-01-12 03:45:25 +01:00
|
|
|
return true;
|
|
|
|
}
|
2019-03-01 22:31:43 +01:00
|
|
|
catch(const ctx::interrupted &)
|
|
|
|
{
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
catch(const std::system_error &)
|
|
|
|
{
|
|
|
|
throw;
|
|
|
|
}
|
2018-03-16 00:51:18 +01:00
|
|
|
catch(const http::error &e)
|
2018-01-12 03:45:25 +01:00
|
|
|
{
|
2018-03-25 01:41:57 +01:00
|
|
|
const ctx::exception_handler eh;
|
2020-03-17 01:58:26 +01:00
|
|
|
|
2019-03-12 21:12:59 +01:00
|
|
|
if(!empty(e.content))
|
2020-03-17 01:58:26 +01:00
|
|
|
log::logf
|
2019-03-12 21:12:59 +01:00
|
|
|
{
|
2020-03-17 01:58:26 +01:00
|
|
|
resource::log, log::level::DERROR,
|
|
|
|
"%s HTTP %u `%s' %s :%s",
|
2019-03-12 21:12:59 +01:00
|
|
|
loghead(),
|
|
|
|
uint(e.code),
|
|
|
|
head.uri,
|
|
|
|
http::status(e.code),
|
|
|
|
e.content
|
|
|
|
};
|
2018-02-24 06:57:00 +01:00
|
|
|
|
2019-08-18 14:29:18 +02:00
|
|
|
if(!sock || sock->fini)
|
|
|
|
return false;
|
|
|
|
|
2018-01-12 03:45:25 +01:00
|
|
|
resource::response
|
|
|
|
{
|
2020-03-17 01:51:12 +01:00
|
|
|
*this,
|
|
|
|
e.content,
|
|
|
|
"text/html; charset=utf-8",
|
|
|
|
e.code,
|
|
|
|
e.headers
|
2018-01-12 03:45:25 +01:00
|
|
|
};
|
|
|
|
|
2018-03-16 00:51:18 +01:00
|
|
|
switch(e.code)
|
2018-01-12 03:45:25 +01:00
|
|
|
{
|
|
|
|
// These codes are "unrecoverable" errors and no more HTTP can be
|
|
|
|
// conducted with this tape. The client must be disconnected.
|
|
|
|
case http::BAD_REQUEST:
|
|
|
|
case http::REQUEST_TIMEOUT:
|
2018-01-15 08:26:47 +01:00
|
|
|
case http::PAYLOAD_TOO_LARGE:
|
|
|
|
case http::INTERNAL_SERVER_ERROR:
|
2018-01-12 03:45:25 +01:00
|
|
|
return false;
|
|
|
|
|
2018-01-15 08:26:47 +01:00
|
|
|
// These codes are "recoverable" and allow the next HTTP request in
|
|
|
|
// a pipeline to take place.
|
|
|
|
default:
|
2018-02-18 00:44:53 +01:00
|
|
|
discard_unconsumed(head);
|
2018-01-15 08:26:47 +01:00
|
|
|
return true;
|
2018-01-12 03:45:25 +01:00
|
|
|
}
|
|
|
|
}
|
2019-03-01 22:31:43 +01:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
const ctx::exception_handler eh;
|
|
|
|
|
|
|
|
log::error
|
|
|
|
{
|
2020-03-17 01:58:26 +01:00
|
|
|
resource::log, "%s HTTP 500 Internal Error `%s' :%s",
|
2019-03-01 22:31:43 +01:00
|
|
|
loghead(),
|
|
|
|
head.uri,
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
2019-08-18 14:29:18 +02:00
|
|
|
if(!sock || sock->fini)
|
|
|
|
return false;
|
|
|
|
|
2019-03-01 22:31:43 +01:00
|
|
|
resource::response
|
|
|
|
{
|
2020-03-17 01:51:12 +01:00
|
|
|
*this,
|
|
|
|
e.what(),
|
|
|
|
"text/html; charset=utf-8",
|
|
|
|
http::INTERNAL_SERVER_ERROR
|
2019-03-01 22:31:43 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2018-01-14 02:55:21 +01:00
|
|
|
|
|
|
|
void
|
2018-02-18 00:44:53 +01:00
|
|
|
ircd::client::discard_unconsumed(const http::request::head &head)
|
2018-01-14 02:55:21 +01:00
|
|
|
{
|
|
|
|
if(unlikely(!sock))
|
|
|
|
return;
|
|
|
|
|
|
|
|
const size_t unconsumed
|
|
|
|
{
|
2018-02-12 20:58:40 +01:00
|
|
|
head.content_length - content_consumed
|
2018-01-14 02:55:21 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
if(!unconsumed)
|
|
|
|
return;
|
|
|
|
|
2018-03-15 20:26:29 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-24 21:39:48 +02:00
|
|
|
log, "%s discarding %zu unconsumed of %zu bytes content...",
|
2018-09-28 02:01:56 +02:00
|
|
|
loghead(),
|
2018-03-15 20:26:29 +01:00
|
|
|
unconsumed,
|
|
|
|
head.content_length
|
|
|
|
};
|
2018-01-14 02:55:21 +01:00
|
|
|
|
2018-02-12 20:58:40 +01:00
|
|
|
content_consumed += net::discard_all(*sock, unconsumed);
|
|
|
|
assert(content_consumed == head.content_length);
|
2018-01-14 02:55:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::ctx::future<void>
|
|
|
|
ircd::client::close(const net::close_opts &opts)
|
|
|
|
{
|
2018-09-28 02:01:56 +02:00
|
|
|
return likely(sock) && !sock->fini?
|
|
|
|
net::close(*sock, opts):
|
2019-09-01 08:05:06 +02:00
|
|
|
ctx::already;
|
2018-01-14 02:55:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::client::close(const net::close_opts &opts,
|
|
|
|
net::close_callback callback)
|
|
|
|
{
|
2018-03-11 19:29:31 +01:00
|
|
|
if(!sock)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if(sock->fini)
|
|
|
|
return callback({});
|
|
|
|
|
2018-01-14 02:55:21 +01:00
|
|
|
net::close(*sock, opts, std::move(callback));
|
|
|
|
}
|
2018-03-11 18:48:58 +01:00
|
|
|
|
|
|
|
size_t
|
2020-12-25 16:11:25 +01:00
|
|
|
ircd::client::write_all(const net::const_buffers &bufs)
|
2018-03-11 18:48:58 +01:00
|
|
|
{
|
|
|
|
if(unlikely(!sock))
|
2019-09-15 22:39:38 +02:00
|
|
|
throw std::system_error
|
2018-09-28 02:01:56 +02:00
|
|
|
{
|
2019-09-15 22:39:38 +02:00
|
|
|
make_error_code(std::errc::bad_file_descriptor)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(unlikely(sock->fini))
|
|
|
|
throw std::system_error
|
|
|
|
{
|
|
|
|
make_error_code(std::errc::not_connected)
|
2018-09-28 02:01:56 +02:00
|
|
|
};
|
2018-03-11 18:48:58 +01:00
|
|
|
|
2020-12-25 16:11:25 +01:00
|
|
|
return net::write_all(*sock, bufs);
|
2018-03-11 18:48:58 +01:00
|
|
|
}
|
2018-09-28 02:01:56 +02:00
|
|
|
|
|
|
|
/// Returns a string_view to a static (tls) buffer containing common
|
|
|
|
/// information used to prefix log calls for this client: i.e id, remote
|
|
|
|
/// address, etc. This is meant to be used as the first argument to all log
|
|
|
|
/// calls apropos this client and should not be held over a context switch
|
|
|
|
/// as there is only one static buffer.
|
|
|
|
ircd::string_view
|
|
|
|
ircd::client::loghead()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
thread_local char buf[512];
|
2020-05-31 08:08:09 +02:00
|
|
|
|
|
|
|
const string_view alpn
|
|
|
|
{
|
|
|
|
sock?
|
|
|
|
sock->alpn:
|
|
|
|
nullptr
|
|
|
|
};
|
|
|
|
|
2020-12-21 15:43:15 +01:00
|
|
|
char rembuf[64], locbuf[64];
|
2018-09-28 02:01:56 +02:00
|
|
|
return fmt::sprintf
|
|
|
|
{
|
2020-05-31 08:08:09 +02:00
|
|
|
buf, "socket:%lu local:%s remote:%s client:%lu %s %lu:%lu",
|
2018-09-30 04:56:40 +02:00
|
|
|
sock? net::id(*sock) : -1UL,
|
2018-09-30 01:51:11 +02:00
|
|
|
string(locbuf, ircd::local(*this)),
|
2018-12-31 22:04:35 +01:00
|
|
|
string(rembuf, ircd::remote(*this)),
|
|
|
|
id,
|
2020-05-31 08:08:09 +02:00
|
|
|
alpn?: "h1"_sv,
|
2019-02-07 13:45:37 +01:00
|
|
|
ready_count,
|
2020-05-31 08:08:09 +02:00
|
|
|
request_count,
|
2018-09-28 02:01:56 +02:00
|
|
|
};
|
|
|
|
}
|