2018-02-04 03:22:01 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2016-09-26 00:46:54 +02:00
|
|
|
|
|
|
|
#pragma once
|
|
|
|
#define HAVE_IRCD_CTX_PROF_H
|
|
|
|
|
2017-09-09 16:30:45 +02:00
|
|
|
/// Profiling for the context system.
|
|
|
|
///
|
|
|
|
/// These facilities provide tools and statistics. The primary purpose here is
|
|
|
|
/// to alert developers of unwanted context behavior, in addition to optimizing
|
|
|
|
/// the overall performance of the context system.
|
|
|
|
///
|
|
|
|
/// The original use case is for the embedded database backend. Function calls
|
|
|
|
/// are made which may conduct blocking I/O before returning. This will hang the
|
|
|
|
/// current userspace context while it is running and thus BLOCK EVERY CONTEXT
|
|
|
|
/// in the entire IRCd. Since this is still an asynchronous system, it just
|
|
|
|
/// doesn't have callbacks: we do not do I/O without a cooperative yield.
|
|
|
|
/// Fortunately there are mechanisms to mitigate this -- but we have to know
|
|
|
|
/// for sure. A database call which has been passed over for mitigation may
|
|
|
|
/// start doing some blocking flush under load, etc. The profiler will alert
|
|
|
|
/// us of this so it doesn't silently degrade performance.
|
|
|
|
///
|
2017-08-28 23:51:22 +02:00
|
|
|
namespace ircd::ctx::prof
|
2016-09-26 00:46:54 +02:00
|
|
|
{
|
2018-12-22 23:53:23 +01:00
|
|
|
enum class event :uint8_t;
|
|
|
|
struct ticker;
|
2016-09-26 00:46:54 +02:00
|
|
|
|
2018-12-22 23:53:23 +01:00
|
|
|
// util
|
2019-03-27 09:18:30 +01:00
|
|
|
unsigned long long rdpmc();
|
2019-03-27 05:59:29 +01:00
|
|
|
unsigned long long rdtsc();
|
2019-03-27 09:18:30 +01:00
|
|
|
unsigned long long rdtscp();
|
|
|
|
uint64_t cycles();
|
|
|
|
|
2018-12-22 23:53:23 +01:00
|
|
|
string_view reflect(const event &);
|
2018-11-15 01:55:46 +01:00
|
|
|
|
2018-12-22 23:53:23 +01:00
|
|
|
// totals
|
|
|
|
const ticker &get();
|
|
|
|
const uint64_t &get(const event &);
|
|
|
|
|
|
|
|
// specific context
|
|
|
|
const ticker &get(const ctx &c);
|
|
|
|
const uint64_t &get(const ctx &c, const event &);
|
|
|
|
|
|
|
|
// current slice state
|
2018-05-06 07:09:20 +02:00
|
|
|
const ulong &cur_slice_start();
|
|
|
|
ulong cur_slice_cycles();
|
|
|
|
|
2018-12-18 00:24:41 +01:00
|
|
|
// test accessors
|
|
|
|
bool slice_exceeded_warning(const ulong &cycles);
|
|
|
|
bool slice_exceeded_assertion(const ulong &cycles);
|
|
|
|
bool slice_exceeded_interrupt(const ulong &cycles);
|
|
|
|
bool stack_exceeded_warning(const size_t &size);
|
|
|
|
bool stack_exceeded_assertion(const size_t &size);
|
|
|
|
|
2018-11-15 01:55:46 +01:00
|
|
|
// called at the appropriate point to mark the event (internal use).
|
2017-08-28 23:51:22 +02:00
|
|
|
void mark(const event &);
|
2016-09-26 00:46:54 +02:00
|
|
|
}
|
|
|
|
|
2018-11-15 02:35:23 +01:00
|
|
|
namespace ircd::ctx::prof::settings
|
2017-08-28 23:51:22 +02:00
|
|
|
{
|
2018-11-15 02:35:23 +01:00
|
|
|
extern conf::item<double> stack_usage_warning; // percentage
|
|
|
|
extern conf::item<double> stack_usage_assertion; // percentage
|
2016-09-26 00:46:54 +02:00
|
|
|
|
2018-11-15 02:35:23 +01:00
|
|
|
extern conf::item<ulong> slice_warning; // Warn when the yield-to-yield cycles exceeds
|
|
|
|
extern conf::item<ulong> slice_interrupt; // Interrupt exception when exceeded (not a signal)
|
|
|
|
extern conf::item<ulong> slice_assertion; // abort() when exceeded (not a signal, must yield)
|
|
|
|
}
|
2018-12-22 22:47:13 +01:00
|
|
|
|
2018-12-22 23:53:23 +01:00
|
|
|
/// Profiling events for marking. These are currently used internally at the
|
|
|
|
/// appropriate point to mark(): the user of ircd::ctx has no reason to mark()
|
|
|
|
/// these events; this interface is not quite developed for general use yet.
|
|
|
|
enum class ircd::ctx::prof::event
|
|
|
|
:uint8_t
|
|
|
|
{
|
|
|
|
SPAWN, // Context spawn requested
|
|
|
|
JOIN, // Context join requested
|
|
|
|
JOINED, // Context join completed
|
|
|
|
ENTER, // Current context entered
|
|
|
|
LEAVE, // Current context leaving
|
|
|
|
YIELD, // Current context yielding
|
|
|
|
CONTINUE, // Current context continuing
|
|
|
|
INTERRUPT, // Current context detects interruption
|
|
|
|
TERMINATE, // Current context detects termination
|
|
|
|
|
|
|
|
_NUM_
|
|
|
|
};
|
|
|
|
|
2018-12-22 22:47:13 +01:00
|
|
|
/// structure aggregating any profiling related state for a ctx
|
2018-12-22 23:53:23 +01:00
|
|
|
struct ircd::ctx::prof::ticker
|
2018-12-22 22:47:13 +01:00
|
|
|
{
|
2018-12-22 23:53:23 +01:00
|
|
|
// monotonic counter (rdtsc)
|
|
|
|
ulong cycles {0};
|
|
|
|
|
|
|
|
// monotonic counters for events
|
|
|
|
std::array<uint64_t, num_of<prof::event>()> event {{0}};
|
2018-12-22 22:47:13 +01:00
|
|
|
};
|
2019-03-27 05:59:29 +01:00
|
|
|
|
2019-03-27 09:18:30 +01:00
|
|
|
inline uint64_t
|
|
|
|
__attribute__((flatten, always_inline, gnu_inline, artificial))
|
|
|
|
ircd::ctx::prof::cycles()
|
|
|
|
{
|
|
|
|
return rdtsc();
|
|
|
|
}
|
|
|
|
|
2019-03-27 05:59:29 +01:00
|
|
|
#if defined(__x86_64__) || defined(__i386__)
|
|
|
|
inline unsigned long long
|
2019-03-27 09:18:30 +01:00
|
|
|
__attribute__((always_inline, gnu_inline, artificial))
|
|
|
|
ircd::ctx::prof::rdtscp()
|
|
|
|
{
|
|
|
|
uint32_t ia32_tsc_aux;
|
|
|
|
return __builtin_ia32_rdtscp(&ia32_tsc_aux);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
inline unsigned long long
|
|
|
|
ircd::ctx::prof::rdtscp()
|
|
|
|
{
|
|
|
|
static_assert(false, "TODO: Implement fallback here");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__x86_64__) || defined(__i386__)
|
|
|
|
inline unsigned long long
|
|
|
|
__attribute__((always_inline, gnu_inline, artificial))
|
2019-03-27 05:59:29 +01:00
|
|
|
ircd::ctx::prof::rdtsc()
|
|
|
|
{
|
|
|
|
return __builtin_ia32_rdtsc();
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
inline unsigned long long
|
|
|
|
ircd::ctx::prof::rdtsc()
|
|
|
|
{
|
|
|
|
static_assert(false, "TODO: Implement fallback here");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
2019-03-27 09:18:30 +01:00
|
|
|
|
|
|
|
#if defined(__x86_64__) || defined(__i386__)
|
|
|
|
inline unsigned long long
|
|
|
|
__attribute__((always_inline, gnu_inline, artificial))
|
|
|
|
ircd::ctx::prof::rdpmc()
|
|
|
|
{
|
|
|
|
return __builtin_ia32_rdpmc(0);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
inline unsigned long long
|
|
|
|
ircd::ctx::prof::rdpmc()
|
|
|
|
{
|
|
|
|
static_assert(false, "TODO: Implement fallback here");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|