2018-05-25 09:15:49 +02:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
|
|
|
|
2018-06-09 22:31:54 +02:00
|
|
|
#include <RB_INC_MALLOC_H
|
|
|
|
|
2018-05-30 08:03:19 +02:00
|
|
|
// Uncomment or -D this #define to enable our own crude but simple ability to
|
|
|
|
// profile dynamic memory usage. Global `new` and `delete` will be captured
|
|
|
|
// here by this definition file into thread_local counters accessible via
|
|
|
|
// ircd::allocator::profile. This feature allows the developer to find out if
|
|
|
|
// allocations are occurring during some scope by sampling the counters.
|
|
|
|
//
|
|
|
|
// #define RB_PROF_ALLOC
|
|
|
|
|
2018-06-09 23:28:10 +02:00
|
|
|
#if defined(__GNU_LIBRARY__) && defined(HAVE_MALLOC_H)
|
|
|
|
ircd::string_view
|
|
|
|
ircd::allocator::info(const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
std::stringstream out;
|
|
|
|
pubsetbuf(out, buf);
|
|
|
|
|
|
|
|
const auto ma
|
|
|
|
{
|
|
|
|
::mallinfo()
|
|
|
|
};
|
|
|
|
|
|
|
|
out << "arena: " << ma.arena << std::endl
|
|
|
|
<< "ordblks: " << ma.ordblks << std::endl
|
|
|
|
<< "smblks: " << ma.smblks << std::endl
|
|
|
|
<< "hblks: " << ma.hblks << std::endl
|
|
|
|
<< "hblkhd: " << ma.hblkhd << std::endl
|
|
|
|
<< "usmblks: " << ma.usmblks << std::endl
|
|
|
|
<< "fsmblks: " << ma.fsmblks << std::endl
|
|
|
|
<< "uordblks: " << ma.uordblks << std::endl
|
|
|
|
<< "fordblks: " << ma.fordblks << std::endl
|
|
|
|
<< "keepcost: " << ma.keepcost << std::endl
|
|
|
|
;
|
|
|
|
|
|
|
|
return view(out, buf);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
ircd::string_view
|
|
|
|
ircd::allocator::info(const mutable_buffer &buf)
|
|
|
|
{
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-06-09 22:31:54 +02:00
|
|
|
#if defined(__GNU_LIBRARY__) && defined(HAVE_MALLOC_H)
|
|
|
|
bool
|
|
|
|
ircd::allocator::trim(const size_t &pad)
|
|
|
|
{
|
|
|
|
return malloc_trim(pad);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
bool
|
|
|
|
ircd::allocator::trim(const size_t &pad)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-05-25 09:54:29 +02:00
|
|
|
//
|
|
|
|
// allocator::state
|
|
|
|
//
|
|
|
|
|
2018-05-25 09:15:49 +02:00
|
|
|
void
|
|
|
|
ircd::allocator::state::deallocate(const uint &pos,
|
|
|
|
const size_type &n)
|
|
|
|
{
|
|
|
|
for(size_t i(0); i < n; ++i)
|
|
|
|
{
|
|
|
|
assert(test(pos + i));
|
|
|
|
btc(pos + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
last = pos;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint
|
|
|
|
ircd::allocator::state::allocate(const size_type &n,
|
|
|
|
const uint &hint)
|
|
|
|
{
|
|
|
|
const auto next(this->next(n));
|
|
|
|
if(unlikely(next >= size)) // No block of n was found anywhere (next is past-the-end)
|
|
|
|
throw std::bad_alloc();
|
|
|
|
|
|
|
|
for(size_t i(0); i < n; ++i)
|
|
|
|
{
|
|
|
|
assert(!test(next + i));
|
|
|
|
bts(next + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
last = next + n;
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint
|
|
|
|
ircd::allocator::state::next(const size_t &n)
|
|
|
|
const
|
|
|
|
{
|
|
|
|
uint ret(last), rem(n);
|
|
|
|
for(; ret < size && rem; ++ret)
|
|
|
|
if(test(ret))
|
|
|
|
rem = n;
|
|
|
|
else
|
|
|
|
--rem;
|
|
|
|
|
|
|
|
if(likely(!rem))
|
|
|
|
return ret - n;
|
|
|
|
|
|
|
|
for(ret = 0, rem = n; ret < last && rem; ++ret)
|
|
|
|
if(test(ret))
|
|
|
|
rem = n;
|
|
|
|
else
|
|
|
|
--rem;
|
|
|
|
|
|
|
|
if(unlikely(rem)) // The allocator should throw std::bad_alloc if !rem
|
|
|
|
return size;
|
|
|
|
|
|
|
|
return ret - n;
|
|
|
|
}
|
2018-05-25 09:54:29 +02:00
|
|
|
|
|
|
|
//
|
|
|
|
// allocator::profile
|
|
|
|
//
|
|
|
|
|
|
|
|
thread_local ircd::allocator::profile
|
|
|
|
ircd::allocator::profile::this_thread
|
|
|
|
{};
|
|
|
|
|
|
|
|
ircd::allocator::profile
|
|
|
|
ircd::allocator::operator-(const profile &a,
|
|
|
|
const profile &b)
|
|
|
|
{
|
|
|
|
profile ret(a);
|
|
|
|
ret -= b;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::allocator::profile
|
|
|
|
ircd::allocator::operator+(const profile &a,
|
|
|
|
const profile &b)
|
|
|
|
{
|
|
|
|
profile ret(a);
|
|
|
|
ret += b;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::allocator::profile &
|
|
|
|
ircd::allocator::operator-=(profile &a,
|
|
|
|
const profile &b)
|
|
|
|
{
|
|
|
|
a.alloc_count -= b.alloc_count;
|
|
|
|
a.free_count -= b.free_count;
|
|
|
|
a.alloc_bytes -= b.alloc_bytes;
|
|
|
|
a.free_bytes -= b.free_bytes;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::allocator::profile &
|
|
|
|
ircd::allocator::operator+=(profile &a,
|
|
|
|
const profile &b)
|
|
|
|
{
|
|
|
|
a.alloc_count += b.alloc_count;
|
|
|
|
a.free_count += b.free_count;
|
|
|
|
a.alloc_bytes += b.alloc_bytes;
|
|
|
|
a.free_bytes += b.free_bytes;
|
|
|
|
return a;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef RB_PROF_ALLOC // --------------------------------------------------
|
|
|
|
|
2018-05-30 08:05:19 +02:00
|
|
|
__attribute__((alloc_size(1), malloc, returns_nonnull))
|
2018-05-25 09:54:29 +02:00
|
|
|
void *
|
|
|
|
operator new(const size_t size)
|
|
|
|
{
|
|
|
|
void *const &ptr(::malloc(size));
|
|
|
|
if(unlikely(!ptr))
|
|
|
|
throw std::bad_alloc();
|
|
|
|
|
|
|
|
auto &this_thread(ircd::allocator::profile::this_thread);
|
|
|
|
this_thread.alloc_bytes += size;
|
|
|
|
this_thread.alloc_count++;
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
operator delete(void *const ptr)
|
|
|
|
{
|
|
|
|
::free(ptr);
|
|
|
|
|
|
|
|
auto &this_thread(ircd::allocator::profile::this_thread);
|
|
|
|
this_thread.free_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
operator delete(void *const ptr,
|
|
|
|
const size_t size)
|
|
|
|
{
|
|
|
|
::free(ptr);
|
|
|
|
|
|
|
|
auto &this_thread(ircd::allocator::profile::this_thread);
|
|
|
|
this_thread.free_bytes += size;
|
|
|
|
this_thread.free_count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // RB_PROF_ALLOC --------------------------------------------------
|