2018-01-18 03:14:13 +01:00
|
|
|
// Matrix Construct
|
|
|
|
//
|
|
|
|
// Copyright (C) Matrix Construct Developers, Authors & Contributors
|
|
|
|
// Copyright (C) 2016-2018 Jason Volk <jason@zemos.net>
|
|
|
|
//
|
|
|
|
// Permission to use, copy, modify, and/or distribute this software for any
|
|
|
|
// purpose with or without fee is hereby granted, provided that the above
|
2018-02-04 03:22:01 +01:00
|
|
|
// copyright notice and this permission notice is present in all copies. The
|
|
|
|
// full license for this software is available in the LICENSE file.
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2018-09-25 06:34:08 +02:00
|
|
|
#include "db.h"
|
2018-01-18 03:34:20 +01:00
|
|
|
|
2018-05-30 09:14:07 +02:00
|
|
|
//
|
|
|
|
// Misc / General linkages
|
|
|
|
//
|
|
|
|
|
2018-08-19 03:29:00 +02:00
|
|
|
/// Dedicated logging facility for the database subsystem
|
2018-01-24 00:39:44 +01:00
|
|
|
decltype(ircd::db::log)
|
|
|
|
ircd::db::log
|
2017-09-20 06:40:53 +02:00
|
|
|
{
|
|
|
|
"db", 'D'
|
|
|
|
};
|
|
|
|
|
2018-08-19 03:29:00 +02:00
|
|
|
/// Dedicated logging facility for rocksdb's log callbacks
|
2018-01-24 00:39:44 +01:00
|
|
|
decltype(ircd::db::rog)
|
|
|
|
ircd::db::rog
|
2017-09-30 08:07:55 +02:00
|
|
|
{
|
2019-04-05 23:37:10 +02:00
|
|
|
"db.rocksdb"
|
2018-01-24 00:39:44 +01:00
|
|
|
};
|
2017-09-30 08:07:55 +02:00
|
|
|
|
2018-08-22 23:08:27 +02:00
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::db::request_pool_stack_size
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.request_pool.stack_size" },
|
|
|
|
{ "default", long(128_KiB) },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::conf::item<size_t>
|
|
|
|
ircd::db::request_pool_size
|
|
|
|
{
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.request_pool.size" },
|
|
|
|
{ "default", 32L },
|
|
|
|
}, []
|
|
|
|
{
|
|
|
|
request.set(size_t(request_pool_size));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-12-28 21:57:32 +01:00
|
|
|
decltype(ircd::db::request_pool_opts)
|
|
|
|
ircd::db::request_pool_opts
|
|
|
|
{
|
|
|
|
size_t(request_pool_stack_size),
|
|
|
|
size_t(request_pool_size),
|
|
|
|
-1, // No hard limit
|
|
|
|
0, // Soft limit at any queued
|
|
|
|
true, // Yield before hitting soft limit
|
|
|
|
};
|
|
|
|
|
2018-08-19 04:30:25 +02:00
|
|
|
/// Concurrent request pool. Requests to seek may be executed on this
|
|
|
|
/// pool in cases where a single context would find it advantageous.
|
|
|
|
/// Some examples are a db::row seek, or asynchronous prefetching.
|
|
|
|
///
|
|
|
|
/// The number of workers in this pool should upper bound at the
|
|
|
|
/// number of concurrent AIO requests which are effective on this
|
|
|
|
/// system. This is a static pool shared by all databases.
|
|
|
|
decltype(ircd::db::request)
|
|
|
|
ircd::db::request
|
|
|
|
{
|
2018-12-28 21:57:32 +01:00
|
|
|
"db req", request_pool_opts
|
2018-08-19 07:10:39 +02:00
|
|
|
};
|
|
|
|
|
2018-08-30 16:21:44 +02:00
|
|
|
/// This mutex is necessary to serialize entry into rocksdb's write impl
|
|
|
|
/// otherwise there's a risk of a deadlock if their internal pthread
|
|
|
|
/// mutexes are contended. This is because a few parts of rocksdb are
|
|
|
|
/// incorrectly using std::mutex directly when they ought to be using their
|
|
|
|
/// rocksdb::port wrapper.
|
2018-08-27 05:24:38 +02:00
|
|
|
decltype(ircd::db::write_mutex)
|
|
|
|
ircd::db::write_mutex;
|
|
|
|
|
2017-08-23 22:37:47 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// init
|
|
|
|
//
|
|
|
|
|
2017-09-08 11:09:34 +02:00
|
|
|
namespace ircd::db
|
|
|
|
{
|
2018-09-04 11:09:49 +02:00
|
|
|
static std::string direct_io_test_file_path();
|
2017-09-08 11:09:34 +02:00
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-06-01 01:06:55 +02:00
|
|
|
decltype(ircd::db::version_api)
|
|
|
|
ircd::db::version_api
|
2018-09-04 06:43:31 +02:00
|
|
|
{
|
2019-06-01 01:06:55 +02:00
|
|
|
"RocksDB", info::versions::API, 0,
|
|
|
|
{
|
|
|
|
ROCKSDB_MAJOR, ROCKSDB_MINOR, ROCKSDB_PATCH,
|
|
|
|
}
|
2018-11-02 04:14:00 +01:00
|
|
|
};
|
|
|
|
|
2019-06-01 01:06:55 +02:00
|
|
|
decltype(ircd::db::version_abi)
|
|
|
|
ircd::db::version_abi
|
2018-12-03 21:30:59 +01:00
|
|
|
{
|
2019-06-02 23:21:40 +02:00
|
|
|
"RocksDB", info::versions::ABI //TODO: get this
|
2018-12-03 21:30:59 +01:00
|
|
|
};
|
|
|
|
|
2018-09-04 06:43:31 +02:00
|
|
|
//
|
|
|
|
// init::init
|
|
|
|
//
|
|
|
|
|
2018-06-12 09:24:59 +02:00
|
|
|
ircd::db::init::init()
|
2019-05-24 07:43:41 +02:00
|
|
|
try
|
2018-06-12 09:24:59 +02:00
|
|
|
{
|
2019-06-07 03:43:18 +02:00
|
|
|
compressions();
|
|
|
|
directory();
|
|
|
|
test_direct_io();
|
|
|
|
test_hw_crc32();
|
2018-08-22 23:08:27 +02:00
|
|
|
request.add(request_pool_size);
|
2018-06-12 09:24:59 +02:00
|
|
|
}
|
2019-05-24 07:43:41 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
log, "Cannot start database system :%s",
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2018-06-12 09:24:59 +02:00
|
|
|
|
|
|
|
ircd::db::init::~init()
|
|
|
|
noexcept
|
2017-08-23 23:37:06 +02:00
|
|
|
{
|
2018-08-28 20:51:36 +02:00
|
|
|
if(request.active())
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Terminating %zu active of %zu client request contexts; %zu pending; %zu queued",
|
|
|
|
request.active(),
|
|
|
|
request.size(),
|
|
|
|
request.pending(),
|
|
|
|
request.queued()
|
|
|
|
};
|
|
|
|
|
|
|
|
request.terminate();
|
2018-08-28 23:04:43 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "Waiting for %zu active of %zu client request contexts; %zu pending; %zu queued",
|
|
|
|
request.active(),
|
|
|
|
request.size(),
|
|
|
|
request.pending(),
|
|
|
|
request.queued()
|
|
|
|
};
|
2018-08-28 20:51:36 +02:00
|
|
|
|
2018-08-28 23:04:43 +02:00
|
|
|
request.join();
|
2018-08-28 20:51:36 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "All contexts joined; all requests are clear."
|
|
|
|
};
|
2017-08-23 23:37:06 +02:00
|
|
|
}
|
|
|
|
|
2018-06-12 09:24:59 +02:00
|
|
|
void
|
2019-06-07 03:43:18 +02:00
|
|
|
ircd::db::init::directory()
|
2017-08-23 22:37:47 +02:00
|
|
|
try
|
|
|
|
{
|
2018-04-03 02:18:02 +02:00
|
|
|
const auto dbdir
|
|
|
|
{
|
2019-01-25 19:35:39 +01:00
|
|
|
fs::path(fs::DB)
|
2018-04-03 02:18:02 +02:00
|
|
|
};
|
|
|
|
|
2019-04-15 20:08:40 +02:00
|
|
|
if(!fs::is_dir(dbdir) && (ircd::read_only || ircd::write_avoid))
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Not creating database directory `%s' in read-only/write-avoid mode.", dbdir
|
|
|
|
};
|
|
|
|
else if(fs::mkdir(dbdir))
|
2018-08-16 10:27:43 +02:00
|
|
|
log::notice
|
|
|
|
{
|
|
|
|
log, "Created new database directory at `%s'", dbdir
|
|
|
|
};
|
2017-08-23 22:37:47 +02:00
|
|
|
else
|
2018-08-16 10:27:43 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "Using database directory at `%s'", dbdir
|
|
|
|
};
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
2018-12-16 04:50:15 +01:00
|
|
|
catch(const fs::error &e)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-05-24 07:43:41 +02:00
|
|
|
log::error
|
2018-08-16 10:27:43 +02:00
|
|
|
{
|
2019-05-24 07:43:41 +02:00
|
|
|
log, "Database directory error: %s", e.what()
|
2018-08-16 10:27:43 +02:00
|
|
|
};
|
|
|
|
|
2018-12-16 04:50:15 +01:00
|
|
|
throw;
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
|
|
|
|
2018-09-04 11:09:49 +02:00
|
|
|
void
|
2019-06-07 03:43:18 +02:00
|
|
|
ircd::db::init::test_direct_io()
|
2018-09-04 11:09:49 +02:00
|
|
|
try
|
|
|
|
{
|
2018-09-13 14:43:30 +02:00
|
|
|
const auto test_file_path
|
|
|
|
{
|
|
|
|
direct_io_test_file_path()
|
|
|
|
};
|
|
|
|
|
2018-12-03 21:43:42 +01:00
|
|
|
if(fs::support::direct_io(test_file_path))
|
2018-09-04 11:09:49 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "Detected Direct-IO works by opening test file at `%s'",
|
2018-09-13 14:43:30 +02:00
|
|
|
test_file_path
|
2018-09-04 11:09:49 +02:00
|
|
|
};
|
|
|
|
else
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Direct-IO is not supported in the database directory `%s'"
|
|
|
|
"; Concurrent database queries will not be possible.",
|
2019-01-25 19:35:39 +01:00
|
|
|
fs::basepath::get(fs::DB)
|
2018-09-04 11:09:49 +02:00
|
|
|
};
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Failed to test if Direct-IO possible with test file `%s'"
|
|
|
|
"; Concurrent database queries will not be possible :%s",
|
|
|
|
direct_io_test_file_path(),
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
ircd::db::direct_io_test_file_path()
|
|
|
|
{
|
2018-12-30 00:27:58 +01:00
|
|
|
static const auto &test_file_name
|
2018-09-04 11:09:49 +02:00
|
|
|
{
|
2018-12-30 00:27:58 +01:00
|
|
|
"SUPPORTS_DIRECT_IO"_sv
|
2018-09-04 11:09:49 +02:00
|
|
|
};
|
|
|
|
|
2019-02-08 05:56:48 +01:00
|
|
|
return fs::path_string(fs::DB, test_file_name);
|
2018-09-04 11:09:49 +02:00
|
|
|
}
|
|
|
|
|
2019-04-23 00:13:22 +02:00
|
|
|
namespace rocksdb::crc32c
|
|
|
|
{
|
|
|
|
extern std::string IsFastCrc32Supported();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-06-07 03:43:18 +02:00
|
|
|
ircd::db::init::test_hw_crc32()
|
2019-05-24 07:43:41 +02:00
|
|
|
try
|
2019-04-23 00:13:22 +02:00
|
|
|
{
|
|
|
|
const auto supported_str
|
|
|
|
{
|
|
|
|
rocksdb::crc32c::IsFastCrc32Supported()
|
|
|
|
};
|
|
|
|
|
|
|
|
const bool supported
|
|
|
|
{
|
|
|
|
startswith(supported_str, "Supported")
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(supported || startswith(supported_str, "Not supported"));
|
|
|
|
|
|
|
|
if(!supported)
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "crc32c hardware acceleration is not available on this platform."
|
|
|
|
};
|
|
|
|
}
|
2019-05-24 07:43:41 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Failed to test crc32c hardware acceleration support :%s",
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
}
|
2019-04-23 00:13:22 +02:00
|
|
|
|
2018-11-03 02:02:27 +01:00
|
|
|
decltype(ircd::db::compressions)
|
|
|
|
ircd::db::compressions;
|
|
|
|
|
2018-06-12 09:24:59 +02:00
|
|
|
void
|
2019-06-07 03:43:18 +02:00
|
|
|
ircd::db::init::compressions()
|
2019-05-24 07:43:41 +02:00
|
|
|
try
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2018-11-03 02:02:27 +01:00
|
|
|
auto supported
|
2018-06-12 09:24:59 +02:00
|
|
|
{
|
|
|
|
rocksdb::GetSupportedCompressions()
|
|
|
|
};
|
|
|
|
|
2019-06-02 10:03:28 +02:00
|
|
|
size_t i(0);
|
|
|
|
for(const rocksdb::CompressionType &type_ : supported) try
|
2018-11-03 02:02:27 +01:00
|
|
|
{
|
2019-06-02 10:03:28 +02:00
|
|
|
auto &[string, type]
|
|
|
|
{
|
2019-06-07 03:43:18 +02:00
|
|
|
db::compressions.at(i++)
|
2019-06-02 10:03:28 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
type = type_;
|
2018-11-03 02:02:27 +01:00
|
|
|
throw_on_error
|
|
|
|
{
|
2019-06-02 10:03:28 +02:00
|
|
|
rocksdb::GetStringFromCompressionType(&string, type_)
|
|
|
|
};
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "Detected supported compression #%zu type:%lu :%s",
|
|
|
|
i,
|
|
|
|
type,
|
|
|
|
string,
|
2018-11-03 02:02:27 +01:00
|
|
|
};
|
|
|
|
}
|
2019-05-24 07:43:41 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Failed to identify compression type:%u :%s",
|
2019-06-02 10:03:28 +02:00
|
|
|
uint(type_),
|
2019-05-24 07:43:41 +02:00
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
}
|
2018-11-03 02:02:27 +01:00
|
|
|
|
|
|
|
if(supported.empty())
|
2018-06-12 09:24:59 +02:00
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
"No compression libraries have been linked with the DB."
|
|
|
|
" This is probably not what you want."
|
|
|
|
};
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
2019-05-24 07:43:41 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "Failed to initialize database compressions :%s",
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw;
|
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database
|
|
|
|
//
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2018-12-09 00:17:13 +01:00
|
|
|
/// Conf item toggles if full database checksum verification should occur
|
|
|
|
/// when any database is opened.
|
|
|
|
decltype(ircd::db::open_check)
|
|
|
|
ircd::db::open_check
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.open.check" },
|
|
|
|
{ "default", false },
|
2018-12-09 01:15:22 +01:00
|
|
|
{ "persist", false },
|
2018-12-09 00:17:13 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Conf item determines the recovery mode to use when opening any database.
|
|
|
|
///
|
|
|
|
/// "absolute" - The default and is the same for an empty value. This means
|
|
|
|
/// any database corruptions are treated as an error on open and an exception
|
|
|
|
/// is thrown with nothing else done.
|
|
|
|
///
|
|
|
|
/// "point" - The database is rolled back to before any corruption. This will
|
|
|
|
/// lose some of the latest data last committed, but will open the database
|
|
|
|
/// and continue normally thereafter.
|
|
|
|
///
|
|
|
|
/// "skip" - The corrupted areas are skipped over and the database continues
|
|
|
|
/// normally with just those assets missing. This option is dangerous because
|
|
|
|
/// the database continues in a logically incoherent state which is only ok
|
|
|
|
/// for very specific applications.
|
|
|
|
///
|
|
|
|
/// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
///
|
2018-12-12 17:03:59 +01:00
|
|
|
/// IRCd's applications are NOT tolerant of skip recovery. You will create an
|
|
|
|
/// incoherent database. NEVER USE "skip" RECOVERY MODE.
|
2018-12-09 00:17:13 +01:00
|
|
|
///
|
|
|
|
/// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
|
|
///
|
|
|
|
decltype(ircd::db::open_recover)
|
|
|
|
ircd::db::open_recover
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.open.recover" },
|
|
|
|
{ "default", "absolute" },
|
2018-12-09 01:15:22 +01:00
|
|
|
{ "persist", false },
|
2018-12-09 00:17:13 +01:00
|
|
|
};
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
void
|
|
|
|
ircd::db::sync(database &d)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2018-08-29 06:53:17 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': @%lu SYNC WAL",
|
|
|
|
name(d),
|
|
|
|
sequence(d)
|
|
|
|
};
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->SyncWAL()
|
|
|
|
};
|
2016-09-24 06:01:57 +02:00
|
|
|
}
|
|
|
|
|
2018-03-23 03:41:50 +01:00
|
|
|
/// Flushes all columns. Note that if blocking=true, blocking may occur for
|
|
|
|
/// each column individually.
|
|
|
|
void
|
|
|
|
ircd::db::flush(database &d,
|
2018-04-27 02:19:29 +02:00
|
|
|
const bool &sync)
|
|
|
|
{
|
2018-08-29 06:53:17 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': @%lu FLUSH WAL",
|
|
|
|
name(d),
|
|
|
|
sequence(d)
|
|
|
|
};
|
|
|
|
|
2018-04-27 02:19:29 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->FlushWAL(sync)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Moves memory structures to SST files for all columns. This doesn't
|
|
|
|
/// necessarily sort anything that wasn't previously sorted, but it may create
|
|
|
|
/// new SST files and shouldn't be confused with a typical fflush().
|
|
|
|
/// Note that if blocking=true, blocking may occur for each column individually.
|
|
|
|
void
|
|
|
|
ircd::db::sort(database &d,
|
2018-12-19 22:50:42 +01:00
|
|
|
const bool &blocking,
|
|
|
|
const bool &now)
|
2018-03-23 03:41:50 +01:00
|
|
|
{
|
2018-04-20 23:12:57 +02:00
|
|
|
for(const auto &c : d.columns)
|
|
|
|
{
|
|
|
|
db::column column{*c};
|
2018-12-19 22:50:42 +01:00
|
|
|
db::sort(column, blocking, now);
|
2018-04-27 02:19:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-09-20 01:36:01 +02:00
|
|
|
ircd::db::compact(database &d,
|
|
|
|
const compactor &cb)
|
2018-04-27 02:19:29 +02:00
|
|
|
{
|
2018-05-11 01:41:34 +02:00
|
|
|
static const std::pair<string_view, string_view> range
|
|
|
|
{
|
|
|
|
{}, {}
|
|
|
|
};
|
|
|
|
|
2018-04-27 02:19:29 +02:00
|
|
|
for(const auto &c : d.columns)
|
|
|
|
{
|
|
|
|
db::column column{*c};
|
2018-09-21 08:30:00 +02:00
|
|
|
compact(column, range, -1, cb);
|
2018-12-12 01:20:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::compact(database &d,
|
2018-12-13 00:53:16 +01:00
|
|
|
const std::pair<int, int> &level,
|
2018-12-12 01:20:34 +01:00
|
|
|
const compactor &cb)
|
|
|
|
{
|
|
|
|
for(const auto &c : d.columns)
|
|
|
|
{
|
|
|
|
db::column column{*c};
|
|
|
|
compact(column, level, cb);
|
2018-04-20 23:12:57 +02:00
|
|
|
}
|
2018-03-23 03:41:50 +01:00
|
|
|
}
|
|
|
|
|
2018-05-23 01:52:34 +02:00
|
|
|
void
|
|
|
|
ircd::db::check(database &d)
|
|
|
|
{
|
|
|
|
assert(d.d);
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->VerifyChecksum()
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-10-31 23:03:32 +01:00
|
|
|
void
|
|
|
|
ircd::db::resume(database &d)
|
|
|
|
{
|
|
|
|
assert(d.d);
|
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-10-31 23:29:00 +01:00
|
|
|
const auto errors
|
|
|
|
{
|
|
|
|
db::errors(d)
|
|
|
|
};
|
|
|
|
|
2018-10-31 23:03:32 +01:00
|
|
|
log::debug
|
|
|
|
{
|
2018-10-31 23:29:00 +01:00
|
|
|
log, "'%s': Attempting to resume from %zu errors @%lu",
|
2018-10-31 23:03:32 +01:00
|
|
|
name(d),
|
2018-10-31 23:29:00 +01:00
|
|
|
errors.size(),
|
2018-10-31 23:03:32 +01:00
|
|
|
sequence(d)
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->Resume()
|
|
|
|
};
|
|
|
|
|
2018-10-31 23:29:00 +01:00
|
|
|
d.errors.clear();
|
|
|
|
|
2018-10-31 23:03:32 +01:00
|
|
|
log::info
|
|
|
|
{
|
2018-10-31 23:29:00 +01:00
|
|
|
log, "'%s': Resumed normal operation at sequence number %lu; cleared %zu errors",
|
2018-10-31 23:03:32 +01:00
|
|
|
name(d),
|
2018-10-31 23:29:00 +01:00
|
|
|
sequence(d),
|
|
|
|
errors.size()
|
2018-10-31 23:03:32 +01:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-12-15 02:27:48 +01:00
|
|
|
void
|
|
|
|
ircd::db::bgpause(database &d)
|
|
|
|
{
|
|
|
|
assert(d.d);
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->PauseBackgroundWork()
|
|
|
|
};
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': Paused all background work",
|
|
|
|
name(d)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::bgcontinue(database &d)
|
|
|
|
{
|
|
|
|
assert(d.d);
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': Continuing background work",
|
|
|
|
name(d)
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->ContinueBackgroundWork()
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-12-17 03:00:54 +01:00
|
|
|
void
|
|
|
|
ircd::db::bgcancel(database &d,
|
|
|
|
const bool &blocking)
|
|
|
|
{
|
|
|
|
assert(d.d);
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': Canceling all background work...",
|
|
|
|
name(d)
|
|
|
|
};
|
|
|
|
|
|
|
|
rocksdb::CancelAllBackgroundWork(d.d.get(), blocking);
|
|
|
|
if(!blocking)
|
|
|
|
return;
|
|
|
|
|
|
|
|
assert(d.env);
|
|
|
|
assert(d.env->st);
|
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2019-03-27 20:40:29 +01:00
|
|
|
for(auto &pool : d.env->st->pool) if(pool)
|
|
|
|
{
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': Waiting for tasks:%zu queued:%zu active:%zu in pool '%s'",
|
|
|
|
name(d),
|
|
|
|
pool->tasks.size(),
|
|
|
|
pool->p.pending(),
|
|
|
|
pool->p.active(),
|
|
|
|
ctx::name(pool->p),
|
|
|
|
};
|
|
|
|
|
|
|
|
pool->wait();
|
|
|
|
}
|
2018-12-17 03:00:54 +01:00
|
|
|
|
|
|
|
const auto errors
|
|
|
|
{
|
|
|
|
property<uint64_t>(d, rocksdb::DB::Properties::kBackgroundErrors)
|
|
|
|
};
|
|
|
|
|
2018-12-19 21:52:08 +01:00
|
|
|
const auto level
|
2018-12-17 03:00:54 +01:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
errors? log::level::ERROR : log::level::DEBUG
|
2018-12-17 03:00:54 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
log::logf
|
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
log, level,
|
2018-12-17 03:00:54 +01:00
|
|
|
"'%s': Canceled all background work; errors:%lu",
|
|
|
|
name(d),
|
|
|
|
errors
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-03-23 03:56:08 +01:00
|
|
|
/// Writes a snapshot of this database to the directory specified. The
|
|
|
|
/// snapshot consists of hardlinks to the bulk data files of this db, but
|
|
|
|
/// copies the other stuff that usually gets corrupted. The directory can
|
|
|
|
/// then be opened as its own database either read-only or read-write.
|
|
|
|
/// Incremental backups and rollbacks can begin from this interface. Note
|
|
|
|
/// this may be an expensive blocking operation.
|
2018-05-25 05:01:52 +02:00
|
|
|
uint64_t
|
|
|
|
ircd::db::checkpoint(database &d)
|
2018-03-23 03:56:08 +01:00
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
if(!d.checkpointer)
|
2018-03-23 03:56:08 +01:00
|
|
|
throw error
|
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
"Checkpointing is not available for db(%p) '%s",
|
|
|
|
&d,
|
|
|
|
name(d)
|
2018-03-23 03:56:08 +01:00
|
|
|
};
|
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-12-01 02:40:19 +01:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2018-05-25 05:01:52 +02:00
|
|
|
const auto seqnum
|
|
|
|
{
|
|
|
|
sequence(d)
|
|
|
|
};
|
|
|
|
|
|
|
|
const std::string dir
|
|
|
|
{
|
|
|
|
db::path(name(d), seqnum)
|
|
|
|
};
|
|
|
|
|
2018-03-23 03:56:08 +01:00
|
|
|
throw_on_error
|
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
d.checkpointer->CreateCheckpoint(dir, 0)
|
2018-03-23 03:56:08 +01:00
|
|
|
};
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': Checkpoint at sequence %lu in `%s' complete",
|
|
|
|
name(d),
|
|
|
|
seqnum,
|
|
|
|
dir
|
|
|
|
};
|
|
|
|
|
|
|
|
return seqnum;
|
2018-03-23 03:56:08 +01:00
|
|
|
}
|
|
|
|
|
2018-04-03 20:26:56 +02:00
|
|
|
/// This wraps RocksDB's "File Deletions" which means after RocksDB
|
|
|
|
/// compresses some file it then destroys the uncompressed version;
|
|
|
|
/// setting this to false will disable that and retain both versions.
|
|
|
|
/// This is useful when a direct reference is being manually held by
|
|
|
|
/// us into the uncompressed version which must remain valid.
|
|
|
|
void
|
|
|
|
ircd::db::fdeletions(database &d,
|
2018-04-20 22:38:49 +02:00
|
|
|
const bool &enable,
|
|
|
|
const bool &force)
|
2018-04-03 20:26:56 +02:00
|
|
|
{
|
|
|
|
if(enable) throw_on_error
|
|
|
|
{
|
2018-04-20 22:38:49 +02:00
|
|
|
d.d->EnableFileDeletions(force)
|
2018-04-03 20:26:56 +02:00
|
|
|
};
|
|
|
|
else throw_on_error
|
|
|
|
{
|
|
|
|
d.d->DisableFileDeletions()
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-04-20 23:10:59 +02:00
|
|
|
void
|
|
|
|
ircd::db::setopt(database &d,
|
|
|
|
const string_view &key,
|
|
|
|
const string_view &val)
|
|
|
|
{
|
|
|
|
const std::unordered_map<std::string, std::string> options
|
|
|
|
{
|
|
|
|
{ std::string{key}, std::string{val} }
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->SetDBOptions(options)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-12-19 21:52:08 +01:00
|
|
|
/// Set the rdb logging level by translating our ircd::log::level to the
|
2018-12-12 17:47:52 +01:00
|
|
|
/// RocksDB enum. This translation is a reasonable convenience, as both
|
|
|
|
/// enums are similar enough.
|
|
|
|
void
|
|
|
|
ircd::db::loglevel(database &d,
|
2018-12-19 21:52:08 +01:00
|
|
|
const ircd::log::level &fac)
|
2018-12-12 17:47:52 +01:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
using ircd::log::level;
|
2018-12-12 17:47:52 +01:00
|
|
|
|
|
|
|
rocksdb::InfoLogLevel lev
|
|
|
|
{
|
|
|
|
rocksdb::WARN_LEVEL
|
|
|
|
};
|
|
|
|
|
|
|
|
switch(fac)
|
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
case level::CRITICAL: lev = rocksdb::FATAL_LEVEL; break;
|
|
|
|
case level::ERROR: lev = rocksdb::ERROR_LEVEL; break;
|
|
|
|
case level::WARNING:
|
|
|
|
case level::NOTICE: lev = rocksdb::WARN_LEVEL; break;
|
|
|
|
case level::INFO: lev = rocksdb::INFO_LEVEL; break;
|
|
|
|
case level::DERROR:
|
|
|
|
case level::DWARNING:
|
|
|
|
case level::DEBUG: lev = rocksdb::DEBUG_LEVEL; break;
|
|
|
|
case level::_NUM_: assert(0); break;
|
2018-12-12 17:47:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
d.logger->SetInfoLogLevel(lev);
|
|
|
|
}
|
|
|
|
|
2018-12-19 21:52:08 +01:00
|
|
|
/// Set the rdb logging level by translating our ircd::log::level to the
|
2018-12-12 17:47:52 +01:00
|
|
|
/// RocksDB enum. This translation is a reasonable convenience, as both
|
|
|
|
/// enums are similar enough.
|
2018-12-19 21:52:08 +01:00
|
|
|
ircd::log::level
|
2018-12-12 17:47:52 +01:00
|
|
|
ircd::db::loglevel(const database &d)
|
|
|
|
{
|
|
|
|
const auto &level
|
|
|
|
{
|
|
|
|
d.logger->GetInfoLogLevel()
|
|
|
|
};
|
|
|
|
|
|
|
|
switch(level)
|
|
|
|
{
|
|
|
|
default:
|
|
|
|
case rocksdb::NUM_INFO_LOG_LEVELS:
|
|
|
|
assert(0);
|
|
|
|
|
|
|
|
case rocksdb::HEADER_LEVEL:
|
2018-12-19 21:52:08 +01:00
|
|
|
case rocksdb::FATAL_LEVEL: return log::level::CRITICAL;
|
|
|
|
case rocksdb::ERROR_LEVEL: return log::level::ERROR;
|
|
|
|
case rocksdb::WARN_LEVEL: return log::level::WARNING;
|
|
|
|
case rocksdb::INFO_LEVEL: return log::level::INFO;
|
|
|
|
case rocksdb::DEBUG_LEVEL: return log::level::DEBUG;
|
2018-12-12 17:47:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-12 18:57:46 +01:00
|
|
|
ircd::db::options
|
|
|
|
ircd::db::getopt(const database &d)
|
|
|
|
{
|
|
|
|
return options
|
|
|
|
{
|
|
|
|
d.d->GetDBOptions()
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-04-17 23:13:36 +02:00
|
|
|
size_t
|
|
|
|
ircd::db::bytes(const database &d)
|
|
|
|
{
|
|
|
|
return std::accumulate(begin(d.columns), end(d.columns), size_t(0), []
|
|
|
|
(auto ret, const auto &colptr)
|
|
|
|
{
|
|
|
|
db::column c{*colptr};
|
|
|
|
return ret += db::bytes(c);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::file_count(const database &d)
|
|
|
|
{
|
|
|
|
return std::accumulate(begin(d.columns), end(d.columns), size_t(0), []
|
|
|
|
(auto ret, const auto &colptr)
|
|
|
|
{
|
|
|
|
db::column c{*colptr};
|
|
|
|
return ret += db::file_count(c);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2018-12-10 23:04:30 +01:00
|
|
|
/// Get the list of WAL (Write Ahead Log) files.
|
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::wals(const database &cd)
|
|
|
|
{
|
|
|
|
auto &d
|
|
|
|
{
|
|
|
|
const_cast<database &>(cd)
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<std::unique_ptr<rocksdb::LogFile>> vec;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetSortedWalFiles(vec)
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<std::string> ret(vec.size());
|
|
|
|
std::transform(begin(vec), end(vec), begin(ret), []
|
|
|
|
(const auto &file)
|
|
|
|
{
|
|
|
|
return file->PathName();
|
|
|
|
});
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-04-03 20:44:57 +02:00
|
|
|
/// Get the live file list for db; see overlord documentation.
|
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::files(const database &d)
|
|
|
|
{
|
|
|
|
uint64_t ignored;
|
|
|
|
return files(d, ignored);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the live file list for database relative to the database's directory.
|
|
|
|
/// One of the files is a manifest file which is over-allocated and its used
|
|
|
|
/// size is returned in the integer passed to the `msz` argument.
|
|
|
|
///
|
|
|
|
/// This list may not be completely up to date. The reliable way to get the
|
|
|
|
/// most current list is to flush all columns first and ensure no database
|
|
|
|
/// activity took place between the flushing and this query.
|
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::files(const database &cd,
|
|
|
|
uint64_t &msz)
|
|
|
|
{
|
|
|
|
std::vector<std::string> ret;
|
|
|
|
auto &d(const_cast<database &>(cd));
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetLiveFiles(ret, &msz, false)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-31 23:29:00 +01:00
|
|
|
const std::vector<std::string> &
|
|
|
|
ircd::db::errors(const database &d)
|
|
|
|
{
|
|
|
|
return d.errors;
|
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
uint64_t
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::sequence(const database &cd)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2018-03-23 03:35:50 +01:00
|
|
|
database &d(const_cast<database &>(cd));
|
2017-03-31 00:57:08 +02:00
|
|
|
return d.d->GetLatestSequenceNumber();
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:17:43 +02:00
|
|
|
rocksdb::Cache *
|
|
|
|
ircd::db::cache(database &d)
|
|
|
|
{
|
2018-09-03 02:29:44 +02:00
|
|
|
return d.row_cache.get();
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::Cache *
|
|
|
|
ircd::db::cache(const database &d)
|
|
|
|
{
|
2018-09-03 02:29:44 +02:00
|
|
|
return d.row_cache.get();
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
template<>
|
2018-04-03 19:58:16 +02:00
|
|
|
ircd::db::prop_int
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::property(const database &cd,
|
2017-03-31 00:57:08 +02:00
|
|
|
const string_view &name)
|
|
|
|
{
|
2018-09-26 00:07:56 +02:00
|
|
|
uint64_t ret(0);
|
2018-03-23 03:35:50 +01:00
|
|
|
database &d(const_cast<database &>(cd));
|
2017-03-31 00:57:08 +02:00
|
|
|
if(!d.d->GetAggregatedIntProperty(slice(name), &ret))
|
2018-04-13 03:57:03 +02:00
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"property '%s' for all columns in '%s' not found or not an integer.",
|
|
|
|
name,
|
|
|
|
db::name(d)
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
|
|
|
|
return ret;
|
2016-09-24 06:01:57 +02:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
std::shared_ptr<ircd::db::database::column>
|
|
|
|
ircd::db::shared_from(database::column &column)
|
|
|
|
{
|
|
|
|
return column.shared_from_this();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<const ircd::db::database::column>
|
|
|
|
ircd::db::shared_from(const database::column &column)
|
|
|
|
{
|
|
|
|
return column.shared_from_this();
|
|
|
|
}
|
|
|
|
|
2018-05-23 02:01:01 +02:00
|
|
|
const std::string &
|
|
|
|
ircd::db::uuid(const database &d)
|
|
|
|
{
|
|
|
|
return d.uuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string &
|
|
|
|
ircd::db::name(const database &d)
|
|
|
|
{
|
|
|
|
return d.name;
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
//
|
2017-03-31 00:57:08 +02:00
|
|
|
// database
|
2017-03-24 02:36:49 +01:00
|
|
|
//
|
2018-05-25 05:01:52 +02:00
|
|
|
|
2017-08-31 07:13:50 +02:00
|
|
|
namespace ircd::db
|
|
|
|
{
|
2018-09-20 00:38:37 +02:00
|
|
|
extern const description default_description;
|
2017-08-31 07:13:50 +02:00
|
|
|
}
|
2016-11-29 16:23:38 +01:00
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
// Instance list linkage
|
2019-04-17 05:48:00 +02:00
|
|
|
template<>
|
|
|
|
decltype(ircd::util::instance_list<ircd::db::database>::allocator)
|
|
|
|
ircd::util::instance_list<ircd::db::database>::allocator
|
|
|
|
{};
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
template<>
|
|
|
|
decltype(ircd::util::instance_list<ircd::db::database>::list)
|
|
|
|
ircd::util::instance_list<ircd::db::database>::list
|
2019-04-17 05:48:00 +02:00
|
|
|
{
|
|
|
|
allocator
|
|
|
|
};
|
2018-05-25 05:01:52 +02:00
|
|
|
|
2018-04-27 01:22:16 +02:00
|
|
|
decltype(ircd::db::default_description)
|
|
|
|
ircd::db::default_description
|
|
|
|
{
|
|
|
|
/// Requirement of RocksDB going back to LevelDB. This column must
|
|
|
|
/// always exist in all descriptions and probably should be at idx[0].
|
|
|
|
{ "default" }
|
|
|
|
};
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
ircd::db::database &
|
|
|
|
ircd::db::database::get(column &column)
|
|
|
|
{
|
|
|
|
assert(column.d);
|
|
|
|
return *column.d;
|
|
|
|
}
|
|
|
|
|
|
|
|
const ircd::db::database &
|
|
|
|
ircd::db::database::get(const column &column)
|
|
|
|
{
|
|
|
|
assert(column.d);
|
|
|
|
return *column.d;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database &
|
|
|
|
ircd::db::database::get(const string_view &name)
|
|
|
|
{
|
|
|
|
const auto pair
|
|
|
|
{
|
|
|
|
namepoint(name)
|
|
|
|
};
|
|
|
|
|
|
|
|
return get(pair.first, pair.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database &
|
|
|
|
ircd::db::database::get(const string_view &name,
|
|
|
|
const uint64_t &checkpoint)
|
|
|
|
{
|
|
|
|
auto *const &d
|
|
|
|
{
|
|
|
|
get(std::nothrow, name, checkpoint)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(likely(d))
|
|
|
|
return *d;
|
|
|
|
|
|
|
|
throw checkpoint == uint64_t(-1)?
|
|
|
|
std::out_of_range{"No database with that name exists"}:
|
|
|
|
std::out_of_range{"No database with that name at that checkpoint exists"};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database *
|
|
|
|
ircd::db::database::get(std::nothrow_t,
|
|
|
|
const string_view &name)
|
|
|
|
{
|
|
|
|
const auto pair
|
|
|
|
{
|
|
|
|
namepoint(name)
|
|
|
|
};
|
|
|
|
|
|
|
|
return get(std::nothrow, pair.first, pair.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database *
|
|
|
|
ircd::db::database::get(std::nothrow_t,
|
|
|
|
const string_view &name,
|
|
|
|
const uint64_t &checkpoint)
|
|
|
|
{
|
|
|
|
for(auto *const &d : list)
|
|
|
|
if(name == d->name)
|
|
|
|
if(checkpoint == uint64_t(-1) || checkpoint == d->checkpoint)
|
|
|
|
return d;
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// database::database
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::database(const string_view &name,
|
2017-08-23 22:37:47 +02:00
|
|
|
std::string optstr)
|
2017-08-23 23:37:06 +02:00
|
|
|
:database
|
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
name, std::move(optstr), default_description
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::database(const string_view &name,
|
|
|
|
std::string optstr,
|
|
|
|
description description)
|
|
|
|
:database
|
|
|
|
{
|
|
|
|
namepoint(name).first, namepoint(name).second, std::move(optstr), std::move(description)
|
2017-08-23 23:37:06 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
ircd::db::database::database(const string_view &name,
|
|
|
|
const uint64_t &checkpoint,
|
2017-08-23 22:37:47 +02:00
|
|
|
std::string optstr,
|
2017-09-19 05:53:45 +02:00
|
|
|
description description)
|
2016-09-24 06:01:57 +02:00
|
|
|
try
|
2017-03-31 00:57:08 +02:00
|
|
|
:name
|
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
namepoint(name).first
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2018-05-25 05:01:52 +02:00
|
|
|
,checkpoint
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
// a -1 may have been generated by the db::namepoint() util when the user
|
|
|
|
// supplied just a name without a checkpoint. In the context of database
|
|
|
|
// opening/creation -1 just defaults to 0.
|
|
|
|
checkpoint == uint64_t(-1)? 0 : checkpoint
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2018-12-03 02:39:33 +01:00
|
|
|
,path
|
|
|
|
{
|
|
|
|
db::path(this->name, this->checkpoint)
|
|
|
|
}
|
2017-09-19 04:17:36 +02:00
|
|
|
,optstr
|
|
|
|
{
|
|
|
|
std::move(optstr)
|
|
|
|
}
|
2018-12-03 02:39:33 +01:00
|
|
|
,fsck
|
2018-05-25 05:01:52 +02:00
|
|
|
{
|
2018-12-03 02:39:33 +01:00
|
|
|
false
|
|
|
|
}
|
|
|
|
,read_only
|
|
|
|
{
|
2019-04-15 20:08:40 +02:00
|
|
|
ircd::read_only
|
2018-05-25 05:01:52 +02:00
|
|
|
}
|
2018-01-18 04:22:35 +01:00
|
|
|
,env
|
|
|
|
{
|
|
|
|
std::make_shared<struct env>(this)
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
,stats
|
|
|
|
{
|
|
|
|
std::make_shared<struct stats>(this)
|
|
|
|
}
|
2018-12-10 22:26:46 +01:00
|
|
|
,logger
|
|
|
|
{
|
|
|
|
std::make_shared<struct logger>(this)
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
,events
|
|
|
|
{
|
|
|
|
std::make_shared<struct events>(this)
|
|
|
|
}
|
|
|
|
,mergeop
|
|
|
|
{
|
|
|
|
std::make_shared<struct mergeop>(this)
|
|
|
|
}
|
2019-04-20 23:22:08 +02:00
|
|
|
,wal_filter
|
|
|
|
{
|
|
|
|
std::make_unique<struct wal_filter>(this)
|
|
|
|
}
|
2018-04-21 00:45:57 +02:00
|
|
|
,ssts
|
|
|
|
{
|
2018-08-21 09:20:54 +02:00
|
|
|
// note: the sst file manager cannot be used for now because it will spawn
|
|
|
|
// note: a pthread internally in rocksdb which does not use our callbacks
|
|
|
|
// note: we gave in the supplied env. we really don't want that.
|
|
|
|
|
2018-12-10 22:26:46 +01:00
|
|
|
//rocksdb::NewSstFileManager(env.get(), logger, {}, 0, true, nullptr, 0.05)
|
2018-04-21 00:45:57 +02:00
|
|
|
}
|
2018-09-03 02:29:44 +02:00
|
|
|
,row_cache
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2019-01-12 01:21:04 +01:00
|
|
|
std::make_shared<database::cache>(this, this->stats, this->name, 16_MiB)
|
2018-09-03 02:29:44 +02:00
|
|
|
}
|
2017-11-16 02:29:54 +01:00
|
|
|
,descriptors
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2017-11-16 02:29:54 +01:00
|
|
|
std::move(description)
|
|
|
|
}
|
2018-12-03 02:39:33 +01:00
|
|
|
,opts{[this]
|
2017-11-16 02:29:54 +01:00
|
|
|
{
|
2018-04-03 02:18:02 +02:00
|
|
|
auto opts
|
|
|
|
{
|
2018-12-03 02:39:33 +01:00
|
|
|
std::make_unique<rocksdb::DBOptions>(make_dbopts(this->optstr, &this->optstr, &read_only, &fsck))
|
2018-04-03 02:18:02 +02:00
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
// Setup sundry
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->create_if_missing = true;
|
|
|
|
opts->create_missing_column_families = true;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// Uses thread_local counters in rocksdb and probably useless for ircd::ctx.
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->enable_thread_tracking = false;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// MUST be 0 or std::threads are spawned in rocksdb.
|
|
|
|
opts->max_file_opening_threads = 0;
|
|
|
|
|
|
|
|
// TODO: We should hint rocksdb with a harder value so it doesn't
|
|
|
|
// potentially eat up all our fd's.
|
2018-12-12 20:22:22 +01:00
|
|
|
opts->max_open_files = ircd::info::rlimit_nofile / 2;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
2018-12-12 20:22:22 +01:00
|
|
|
// TODO: Check if these values can be increased; RocksDB may keep
|
|
|
|
// thread_local state preventing values > 1.
|
2018-12-19 02:36:51 +01:00
|
|
|
opts->max_background_jobs = 16;
|
2018-12-03 20:54:35 +01:00
|
|
|
opts->max_background_flushes = 1;
|
2018-12-12 20:22:22 +01:00
|
|
|
opts->max_background_compactions = 1;
|
|
|
|
|
2019-02-26 03:07:50 +01:00
|
|
|
opts->max_total_wal_size = 32_MiB; //TODO: conf
|
|
|
|
opts->db_write_buffer_size = 32_MiB; //TODO: conf
|
|
|
|
//opts->max_log_file_size = 32_MiB; //TODO: conf
|
2019-01-12 02:37:51 +01:00
|
|
|
|
|
|
|
//TODO: range_sync
|
|
|
|
opts->bytes_per_sync = 0;
|
|
|
|
opts->wal_bytes_per_sync = 0;
|
2018-12-19 02:36:51 +01:00
|
|
|
|
|
|
|
// For the write-side of a compaction process: writes will be of approx
|
|
|
|
// this size. The compaction process is composing a buffer of this size
|
|
|
|
// between those writes. Too large a buffer will hog the CPU and starve
|
|
|
|
// other ircd::ctx's. Too small a buffer will be inefficient.
|
2019-01-12 02:37:51 +01:00
|
|
|
opts->writable_file_max_buffer_size = 4_MiB; //TODO: conf
|
2018-12-19 02:36:51 +01:00
|
|
|
|
|
|
|
// For the read-side of the compaction process.
|
2019-01-12 02:37:51 +01:00
|
|
|
opts->compaction_readahead_size = 128_KiB; //TODO: conf
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// MUST be 1 (no subcompactions) or rocksdb spawns internal std::thread.
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->max_subcompactions = 1;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// Disable noise
|
|
|
|
opts->stats_dump_period_sec = 0;
|
|
|
|
|
|
|
|
// Disables the timer to delete unused files; this operation occurs
|
|
|
|
// instead with our compaction operations so we don't need to complicate.
|
|
|
|
opts->delete_obsolete_files_period_micros = 0;
|
2019-01-12 02:37:51 +01:00
|
|
|
opts->keep_log_file_num = 16;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// These values prevent codepaths from being taken in rocksdb which may
|
|
|
|
// introduce issues for ircd::ctx. We should still fully investigate
|
|
|
|
// if any of these features can safely be used.
|
2019-04-19 09:17:00 +02:00
|
|
|
opts->allow_concurrent_memtable_write = true;
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->enable_write_thread_adaptive_yield = false;
|
|
|
|
opts->enable_pipelined_write = false;
|
|
|
|
opts->write_thread_max_yield_usec = 0;
|
|
|
|
opts->write_thread_slow_yield_usec = 0;
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2018-09-04 11:09:49 +02:00
|
|
|
// Detect if O_DIRECT is possible if db::init left a file in the
|
|
|
|
// database directory claiming such. User can force no direct io
|
|
|
|
// with program option at startup (i.e -nodirect).
|
2018-12-09 00:17:13 +01:00
|
|
|
opts->use_direct_reads = bool(fs::fd::opts::direct_io_enable)?
|
|
|
|
fs::exists(direct_io_test_file_path()):
|
|
|
|
false;
|
2018-09-04 11:09:49 +02:00
|
|
|
|
2018-12-03 20:54:35 +01:00
|
|
|
// Use the determined direct io value for writes as well.
|
2018-12-19 02:36:51 +01:00
|
|
|
//opts->use_direct_io_for_flush_and_compaction = opts->use_direct_reads;
|
2018-11-02 09:07:42 +01:00
|
|
|
|
2018-12-03 22:01:36 +01:00
|
|
|
// Doesn't appear to be in effect when direct io is used. Not supported by
|
|
|
|
// all filesystems so disabled for now.
|
|
|
|
// TODO: use fs::support::fallocate() test similar to direct_io_test_file.
|
|
|
|
opts->allow_fallocate = false;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
2018-04-13 07:46:33 +02:00
|
|
|
#ifdef RB_DEBUG
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->dump_malloc_stats = true;
|
2018-04-13 07:46:33 +02:00
|
|
|
#endif
|
|
|
|
|
2018-12-03 20:54:35 +01:00
|
|
|
// Default corruption tolerance is zero-tolerance; db fails to open with
|
|
|
|
// error by default to inform the user. The rest of the options are
|
|
|
|
// various relaxations for how to proceed.
|
|
|
|
opts->wal_recovery_mode = rocksdb::WALRecoveryMode::kAbsoluteConsistency;
|
|
|
|
|
|
|
|
// When corrupted after crash, the DB is rolled back before the first
|
|
|
|
// corruption and erases everything after it, giving a consistent
|
|
|
|
// state up at that point, though losing some recent data.
|
2018-12-09 00:17:13 +01:00
|
|
|
if(string_view(open_recover) == "point")
|
2018-12-03 20:54:35 +01:00
|
|
|
opts->wal_recovery_mode = rocksdb::WALRecoveryMode::kPointInTimeRecovery;
|
|
|
|
|
|
|
|
// Skipping corrupted records will create gaps in the DB timeline where the
|
|
|
|
// application (like a matrix timeline) cannot tolerate the unexpected gap.
|
2018-12-09 00:17:13 +01:00
|
|
|
if(string_view(open_recover) == "skip")
|
|
|
|
opts->wal_recovery_mode = rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
|
|
|
// Tolerating corrupted records is very last-ditch for getting the database to
|
|
|
|
// open in a catastrophe. We have no use for this option but should use it for
|
|
|
|
//TODO: emergency salvage-mode.
|
2018-12-09 00:17:13 +01:00
|
|
|
if(string_view(open_recover) == "tolerate")
|
|
|
|
opts->wal_recovery_mode = rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
2019-01-12 02:37:51 +01:00
|
|
|
// This prevents the creation of additional SST files and lots of I/O on
|
|
|
|
// either DB open and close.
|
2018-12-03 20:54:35 +01:00
|
|
|
opts->avoid_flush_during_recovery = true;
|
2019-01-12 02:37:51 +01:00
|
|
|
opts->avoid_flush_during_shutdown = true;
|
2018-12-03 20:54:35 +01:00
|
|
|
|
2018-01-18 04:22:35 +01:00
|
|
|
// Setup env
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->env = env.get();
|
2018-01-18 04:22:35 +01:00
|
|
|
|
2019-04-20 23:22:08 +02:00
|
|
|
// Setup WAL filter
|
|
|
|
opts->wal_filter = this->wal_filter.get();
|
|
|
|
|
2018-04-21 00:45:57 +02:00
|
|
|
// Setup SST file mgmt
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->sst_file_manager = this->ssts;
|
2018-04-21 00:45:57 +02:00
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
// Setup logging
|
2018-12-10 22:26:46 +01:00
|
|
|
logger->SetInfoLogLevel(ircd::debugmode? rocksdb::DEBUG_LEVEL : rocksdb::WARN_LEVEL);
|
|
|
|
opts->info_log_level = logger->GetInfoLogLevel();
|
|
|
|
opts->info_log = logger;
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2017-03-23 22:58:24 +01:00
|
|
|
// Setup event and statistics callbacks
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->listeners.emplace_back(this->events);
|
2018-04-15 09:24:38 +02:00
|
|
|
|
|
|
|
// Setup histogram collecting
|
|
|
|
//this->stats->stats_level_ = rocksdb::kAll;
|
|
|
|
this->stats->stats_level_ = rocksdb::kExceptTimeForMutex;
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->statistics = this->stats;
|
2017-03-23 22:58:24 +01:00
|
|
|
|
|
|
|
// Setup performance metric options
|
|
|
|
//rocksdb::SetPerfLevel(rocksdb::PerfLevel::kDisable);
|
|
|
|
|
2018-12-03 20:54:35 +01:00
|
|
|
// Setup row cache.
|
2018-12-03 02:39:33 +01:00
|
|
|
opts->row_cache = this->row_cache;
|
|
|
|
|
|
|
|
return opts;
|
|
|
|
}()}
|
|
|
|
,column_names{[this]
|
|
|
|
{
|
|
|
|
// Existing columns at path. If any are left the descriptor set did not
|
|
|
|
// describe all of the columns found in the database at path.
|
|
|
|
const auto required
|
|
|
|
{
|
|
|
|
db::column_names(path, *opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
// As we find descriptors for all of the columns on the disk we'll
|
|
|
|
// remove their names from this set. Anything remaining is undescribed
|
|
|
|
// and that's a fatal error.
|
|
|
|
std::set<string_view> existing
|
|
|
|
{
|
|
|
|
begin(required), end(required)
|
|
|
|
};
|
|
|
|
|
|
|
|
// The names of the columns extracted from the descriptor set
|
|
|
|
decltype(this->column_names) ret;
|
|
|
|
for(auto &descriptor : descriptors)
|
|
|
|
{
|
|
|
|
// Deprecated columns which have already been dropped won't appear
|
|
|
|
// in the existing (required) list. We don't need to construct those.
|
|
|
|
if(!existing.count(descriptor.name) && descriptor.drop)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Construct the column instance and indicate that we have a description
|
|
|
|
// for it by removing it from existing.
|
|
|
|
ret.emplace(descriptor.name, std::make_shared<column>(*this, descriptor));
|
|
|
|
existing.erase(descriptor.name);
|
|
|
|
}
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2018-12-03 02:39:33 +01:00
|
|
|
for(const auto &remain : existing)
|
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"Failed to describe existing column '%s' (and %zd others...)",
|
|
|
|
remain,
|
|
|
|
existing.size() - 1
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}()}
|
|
|
|
,d{[this]
|
|
|
|
{
|
2018-10-22 16:09:55 +02:00
|
|
|
std::vector<rocksdb::ColumnFamilyHandle *> handles; // filled by DB::Open()
|
|
|
|
std::vector<rocksdb::ColumnFamilyDescriptor> columns(this->column_names.size());
|
|
|
|
std::transform(begin(this->column_names), end(this->column_names), begin(columns), []
|
|
|
|
(const auto &pair)
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-10-22 16:09:55 +02:00
|
|
|
const auto &column(*pair.second);
|
|
|
|
return static_cast<const rocksdb::ColumnFamilyDescriptor &>(column);
|
2017-03-31 00:57:08 +02:00
|
|
|
});
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2018-04-09 21:01:58 +02:00
|
|
|
// NOTE: rocksdb sez RepairDB is broken; can't use now
|
2017-08-23 22:37:47 +02:00
|
|
|
if(fsck && fs::is_dir(path))
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2018-05-29 10:09:15 +02:00
|
|
|
log::notice
|
|
|
|
{
|
|
|
|
log, "Checking database @ `%s' columns[%zu]", path, columns.size()
|
|
|
|
};
|
2017-08-23 22:37:47 +02:00
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
2018-12-03 02:39:33 +01:00
|
|
|
rocksdb::RepairDB(path, *opts, columns)
|
2017-08-23 22:37:47 +02:00
|
|
|
};
|
|
|
|
|
2018-05-29 10:09:15 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "Database @ `%s' check complete", path
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2018-04-03 02:19:01 +02:00
|
|
|
// If the directory does not exist, though rocksdb will create it, we can
|
|
|
|
// avoid scaring the user with an error log message if we just do that..
|
2019-04-15 20:08:40 +02:00
|
|
|
if(opts->create_if_missing && !fs::is_dir(path) && !ircd::write_avoid)
|
2018-04-03 02:19:01 +02:00
|
|
|
fs::mkdir(path);
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
// Announce attempt before usual point where exceptions are thrown
|
2018-05-29 10:09:15 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "Opening database \"%s\" @ `%s' with %zu columns...",
|
|
|
|
this->name,
|
|
|
|
path,
|
|
|
|
columns.size()
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-04-15 20:08:40 +02:00
|
|
|
if(read_only)
|
|
|
|
log::warning
|
|
|
|
{
|
|
|
|
log, "Database \"%s\" @ `%s' will be opened in read-only mode.",
|
|
|
|
this->name,
|
|
|
|
path,
|
|
|
|
};
|
|
|
|
|
2017-09-20 06:40:53 +02:00
|
|
|
// Open DB into ptr
|
|
|
|
rocksdb::DB *ptr;
|
2017-08-23 22:37:47 +02:00
|
|
|
if(read_only)
|
|
|
|
throw_on_error
|
|
|
|
{
|
2018-12-03 02:39:33 +01:00
|
|
|
rocksdb::DB::OpenForReadOnly(*opts, path, columns, &handles, &ptr)
|
2017-08-23 22:37:47 +02:00
|
|
|
};
|
|
|
|
else
|
|
|
|
throw_on_error
|
|
|
|
{
|
2018-12-03 02:39:33 +01:00
|
|
|
rocksdb::DB::Open(*opts, path, columns, &handles, &ptr)
|
2017-08-23 22:37:47 +02:00
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2018-04-27 01:39:59 +02:00
|
|
|
std::unique_ptr<rocksdb::DB> ret
|
2017-09-19 04:17:36 +02:00
|
|
|
{
|
2018-04-27 01:39:59 +02:00
|
|
|
ptr
|
|
|
|
};
|
|
|
|
|
2018-10-22 16:09:55 +02:00
|
|
|
// Set the handles. We can't throw here so we just log an error.
|
|
|
|
for(const auto &handle : handles) try
|
2017-11-16 02:29:54 +01:00
|
|
|
{
|
2018-10-22 16:09:55 +02:00
|
|
|
this->column_names.at(handle->GetName())->handle.reset(handle);
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
"'%s': Error finding described handle '%s' which RocksDB opened :%s",
|
|
|
|
this->name,
|
|
|
|
handle->GetName(),
|
|
|
|
e.what()
|
|
|
|
};
|
2017-11-16 02:29:54 +01:00
|
|
|
}
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2018-04-27 01:39:59 +02:00
|
|
|
return ret;
|
2017-03-24 02:36:49 +01:00
|
|
|
}()}
|
2018-10-22 16:09:55 +02:00
|
|
|
,column_index{[this]
|
|
|
|
{
|
|
|
|
size_t size{0};
|
|
|
|
for(const auto &p : column_names)
|
|
|
|
{
|
|
|
|
const auto &column(*p.second);
|
|
|
|
if(db::id(column) + 1 > size)
|
|
|
|
size = db::id(column) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This may have some gaps containing nullptrs where a CFID is unused.
|
|
|
|
decltype(this->column_index) ret(size);
|
|
|
|
for(const auto &p : column_names)
|
|
|
|
{
|
|
|
|
const auto &colptr(p.second);
|
|
|
|
ret.at(db::id(*colptr)) = colptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}()}
|
|
|
|
,columns{[this]
|
|
|
|
{
|
|
|
|
// Skip the gaps in the column_index vector to make the columns list
|
|
|
|
// only contain active column instances.
|
|
|
|
decltype(this->columns) ret;
|
|
|
|
for(const auto &ptr : this->column_index)
|
|
|
|
if(ptr)
|
|
|
|
ret.emplace_back(ptr);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}()}
|
2018-05-23 02:01:01 +02:00
|
|
|
,uuid{[this]
|
|
|
|
{
|
|
|
|
std::string ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d->GetDbIdentity(ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}()}
|
2018-05-25 05:01:52 +02:00
|
|
|
,checkpointer{[this]
|
2018-03-23 03:56:08 +01:00
|
|
|
{
|
|
|
|
rocksdb::Checkpoint *checkpointer{nullptr};
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::Checkpoint::Create(this->d.get(), &checkpointer)
|
|
|
|
};
|
|
|
|
|
|
|
|
return checkpointer;
|
|
|
|
}()}
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-11-30 23:43:51 +01:00
|
|
|
// Conduct drops from schema changes. The database must be fully opened
|
|
|
|
// as if they were not dropped first, then we conduct the drop operation
|
|
|
|
// here. The drop operation has no effects until the database is next
|
|
|
|
// closed; the dropped columns will still work during this instance.
|
|
|
|
for(const auto &colptr : columns)
|
|
|
|
if(describe(*colptr).drop)
|
|
|
|
db::drop(*colptr);
|
|
|
|
|
|
|
|
// Database integrity check branch.
|
2018-12-09 00:17:13 +01:00
|
|
|
if(bool(open_check))
|
2018-04-27 01:48:35 +02:00
|
|
|
{
|
2018-05-31 21:47:37 +02:00
|
|
|
log::notice
|
|
|
|
{
|
|
|
|
log, "'%s': Verifying database integrity. This may take several minutes...",
|
|
|
|
this->name
|
|
|
|
};
|
|
|
|
|
|
|
|
check(*this);
|
|
|
|
}
|
2018-04-27 01:48:35 +02:00
|
|
|
|
2018-05-29 10:09:15 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "'%s': Opened database @ `%s' with %zu columns at sequence number %lu.",
|
|
|
|
this->name,
|
|
|
|
path,
|
|
|
|
columns.size(),
|
|
|
|
d->GetLatestSequenceNumber()
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
catch(const error &e)
|
2018-07-07 01:53:10 +02:00
|
|
|
{
|
2019-02-22 20:30:30 +01:00
|
|
|
log::error
|
|
|
|
{
|
|
|
|
"Error opening db '%s': %s",
|
|
|
|
this->name,
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
2018-12-24 22:32:22 +01:00
|
|
|
throw;
|
2018-07-07 01:53:10 +02:00
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
2019-02-22 20:30:30 +01:00
|
|
|
log::error
|
|
|
|
{
|
|
|
|
"Error opening db '%s': %s",
|
|
|
|
this->name,
|
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
2018-04-03 02:18:02 +02:00
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"Failed to open db '%s': %s",
|
|
|
|
this->name,
|
|
|
|
e.what()
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::~database()
|
2018-04-21 01:22:01 +02:00
|
|
|
noexcept try
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::unique_lock lock{write_mutex};
|
2018-05-29 10:09:15 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "'%s': closing database @ `%s'...",
|
|
|
|
name,
|
|
|
|
path
|
|
|
|
};
|
2018-04-13 07:43:42 +02:00
|
|
|
|
2018-12-17 03:34:52 +01:00
|
|
|
bgcancel(*this, true);
|
2018-01-18 06:44:56 +01:00
|
|
|
|
2018-10-22 16:09:55 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2019-05-03 00:47:56 +02:00
|
|
|
log, "'%s': closing columns...",
|
2018-10-22 16:09:55 +02:00
|
|
|
name
|
|
|
|
};
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
this->checkpointer.reset(nullptr);
|
2018-10-22 16:09:55 +02:00
|
|
|
this->column_names.clear();
|
|
|
|
this->column_index.clear();
|
2018-04-27 02:19:29 +02:00
|
|
|
this->columns.clear();
|
2018-05-29 10:09:15 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2019-05-03 00:47:56 +02:00
|
|
|
log, "'%s': closed columns; flushing...",
|
2018-05-29 10:09:15 +02:00
|
|
|
name
|
|
|
|
};
|
2018-04-27 02:19:29 +02:00
|
|
|
|
2019-05-03 00:47:56 +02:00
|
|
|
if(!read_only)
|
|
|
|
flush(*this);
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': flushed; synchronizing...",
|
|
|
|
name
|
|
|
|
};
|
|
|
|
|
|
|
|
if(!read_only)
|
|
|
|
sync(*this);
|
|
|
|
|
2018-05-29 10:09:15 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': synchronized with hardware.",
|
|
|
|
name
|
|
|
|
};
|
2018-04-21 01:22:01 +02:00
|
|
|
|
|
|
|
const auto sequence
|
|
|
|
{
|
|
|
|
d->GetLatestSequenceNumber()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d->Close()
|
|
|
|
};
|
|
|
|
|
2018-12-17 03:34:52 +01:00
|
|
|
env->st.reset(nullptr);
|
|
|
|
|
2018-05-29 10:09:15 +02:00
|
|
|
log::info
|
|
|
|
{
|
|
|
|
log, "'%s': closed database @ `%s' at sequence number %lu.",
|
|
|
|
name,
|
|
|
|
path,
|
|
|
|
sequence
|
|
|
|
};
|
2018-04-21 01:22:01 +02:00
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
2018-08-24 06:32:28 +02:00
|
|
|
log, "'%s': Error closing database(%p) :%s",
|
2018-04-21 01:22:01 +02:00
|
|
|
name,
|
2018-08-24 06:32:28 +02:00
|
|
|
this,
|
2018-04-21 01:22:01 +02:00
|
|
|
e.what()
|
|
|
|
};
|
|
|
|
|
|
|
|
return;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2018-08-28 23:04:43 +02:00
|
|
|
catch(...)
|
|
|
|
{
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
log, "'%s': Unknown error closing database(%p)",
|
|
|
|
name,
|
|
|
|
this
|
|
|
|
};
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2017-08-30 23:07:36 +02:00
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const delta &delta)
|
|
|
|
{
|
|
|
|
operator()(sopts{}, delta);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const std::initializer_list<delta> &deltas)
|
|
|
|
{
|
|
|
|
operator()(sopts{}, deltas);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const delta *const &begin,
|
|
|
|
const delta *const &end)
|
|
|
|
{
|
|
|
|
operator()(sopts{}, begin, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const sopts &sopts,
|
|
|
|
const delta &delta)
|
|
|
|
{
|
|
|
|
operator()(sopts, &delta, &delta + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const sopts &sopts,
|
|
|
|
const std::initializer_list<delta> &deltas)
|
|
|
|
{
|
|
|
|
operator()(sopts, std::begin(deltas), std::end(deltas));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::operator()(const sopts &sopts,
|
|
|
|
const delta *const &begin,
|
|
|
|
const delta *const &end)
|
|
|
|
{
|
|
|
|
rocksdb::WriteBatch batch;
|
|
|
|
std::for_each(begin, end, [this, &batch]
|
|
|
|
(const delta &delta)
|
|
|
|
{
|
|
|
|
const auto &op(std::get<op>(delta));
|
|
|
|
const auto &col(std::get<1>(delta));
|
|
|
|
const auto &key(std::get<2>(delta));
|
|
|
|
const auto &val(std::get<3>(delta));
|
|
|
|
db::column column(operator[](col));
|
|
|
|
append(batch, column, db::column::delta
|
|
|
|
{
|
|
|
|
op,
|
|
|
|
key,
|
|
|
|
val
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
2017-09-08 11:14:17 +02:00
|
|
|
commit(*this, batch, sopts);
|
2017-08-30 23:07:36 +02:00
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::column &
|
|
|
|
ircd::db::database::operator[](const string_view &name)
|
2017-09-19 04:17:36 +02:00
|
|
|
{
|
2019-01-16 22:21:36 +01:00
|
|
|
return operator[](cfid(name));
|
2017-09-19 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::column &
|
|
|
|
ircd::db::database::operator[](const uint32_t &id)
|
2017-04-03 06:02:32 +02:00
|
|
|
try
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-10-22 16:09:55 +02:00
|
|
|
auto &ret(*column_index.at(id));
|
|
|
|
assert(db::id(ret) == id);
|
|
|
|
return ret;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
catch(const std::out_of_range &e)
|
|
|
|
{
|
2018-12-24 22:32:22 +01:00
|
|
|
throw not_found
|
2018-05-29 10:09:15 +02:00
|
|
|
{
|
|
|
|
"'%s': column id[%u] is not available or specified in schema",
|
|
|
|
this->name,
|
|
|
|
id
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
|
|
|
const ircd::db::database::column &
|
|
|
|
ircd::db::database::operator[](const string_view &name)
|
2017-09-19 04:17:36 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-16 22:21:36 +01:00
|
|
|
return operator[](cfid(name));
|
2017-09-19 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
const ircd::db::database::column &
|
|
|
|
ircd::db::database::operator[](const uint32_t &id)
|
2017-04-03 06:02:32 +02:00
|
|
|
const try
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-10-22 16:09:55 +02:00
|
|
|
auto &ret(*column_index.at(id));
|
|
|
|
assert(db::id(ret) == id);
|
|
|
|
return ret;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
catch(const std::out_of_range &e)
|
|
|
|
{
|
2018-12-24 22:32:22 +01:00
|
|
|
throw not_found
|
2018-05-29 10:09:15 +02:00
|
|
|
{
|
|
|
|
"'%s': column id[%u] is not available or specified in schema",
|
|
|
|
this->name,
|
|
|
|
id
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-16 22:21:36 +01:00
|
|
|
uint32_t
|
|
|
|
ircd::db::database::cfid(const string_view &name)
|
|
|
|
const
|
|
|
|
{
|
|
|
|
const int32_t id
|
|
|
|
{
|
|
|
|
cfid(std::nothrow, name)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(id < 0)
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"'%s': column '%s' is not available or specified in schema",
|
|
|
|
this->name,
|
|
|
|
name
|
|
|
|
};
|
|
|
|
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t
|
|
|
|
ircd::db::database::cfid(const std::nothrow_t,
|
|
|
|
const string_view &name)
|
|
|
|
const
|
|
|
|
{
|
|
|
|
const auto it{column_names.find(name)};
|
|
|
|
return it != std::end(column_names)?
|
|
|
|
db::id(*it->second):
|
|
|
|
-1;
|
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::column
|
|
|
|
//
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
void
|
|
|
|
ircd::db::drop(database::column &c)
|
|
|
|
{
|
|
|
|
if(!c.handle)
|
|
|
|
return;
|
|
|
|
|
2018-10-22 13:30:13 +02:00
|
|
|
database &d(c);
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s':'%s' @%lu DROPPING COLUMN",
|
|
|
|
name(d),
|
|
|
|
name(c),
|
|
|
|
sequence(d)
|
|
|
|
};
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
c.d->d->DropColumnFamily(c.handle.get())
|
|
|
|
};
|
2018-10-22 13:30:13 +02:00
|
|
|
|
|
|
|
log::notice
|
|
|
|
{
|
|
|
|
log, "'%s':'%s' @%lu DROPPED COLUMN",
|
|
|
|
name(d),
|
|
|
|
name(c),
|
|
|
|
sequence(d)
|
|
|
|
};
|
2017-09-19 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
2019-01-24 01:00:22 +01:00
|
|
|
bool
|
|
|
|
ircd::db::dropped(const database::column &c)
|
|
|
|
{
|
|
|
|
return c.descriptor?
|
|
|
|
c.descriptor->drop:
|
|
|
|
true;
|
|
|
|
}
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
uint32_t
|
|
|
|
ircd::db::id(const database::column &c)
|
|
|
|
{
|
|
|
|
if(!c.handle)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return c.handle->GetID();
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string &
|
|
|
|
ircd::db::name(const database::column &c)
|
|
|
|
{
|
|
|
|
return c.name;
|
|
|
|
}
|
|
|
|
|
2018-09-20 00:38:37 +02:00
|
|
|
const ircd::db::descriptor &
|
2017-09-21 05:38:39 +02:00
|
|
|
ircd::db::describe(const database::column &c)
|
|
|
|
{
|
2018-10-22 16:09:16 +02:00
|
|
|
assert(c.descriptor);
|
|
|
|
return *c.descriptor;
|
2017-09-21 05:38:39 +02:00
|
|
|
}
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
//
|
|
|
|
// database::column
|
|
|
|
//
|
|
|
|
|
2018-10-22 16:09:16 +02:00
|
|
|
ircd::db::database::column::column(database &d,
|
|
|
|
db::descriptor &descriptor)
|
2017-09-19 04:17:36 +02:00
|
|
|
:rocksdb::ColumnFamilyDescriptor
|
|
|
|
(
|
2018-12-12 18:53:16 +01:00
|
|
|
descriptor.name, db::options{descriptor.options}
|
2017-09-19 04:17:36 +02:00
|
|
|
)
|
2018-10-22 16:09:16 +02:00
|
|
|
,d{&d}
|
|
|
|
,descriptor{&descriptor}
|
|
|
|
,key_type{this->descriptor->type.first}
|
|
|
|
,mapped_type{this->descriptor->type.second}
|
|
|
|
,cmp{this->d, this->descriptor->cmp}
|
|
|
|
,prefix{this->d, this->descriptor->prefix}
|
|
|
|
,cfilter{this, this->descriptor->compactor}
|
|
|
|
,stats{std::make_shared<struct database::stats>(this->d)}
|
2017-04-03 06:02:32 +02:00
|
|
|
,handle
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-10-22 16:09:16 +02:00
|
|
|
nullptr, [&d](rocksdb::ColumnFamilyHandle *const handle)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2018-10-22 16:09:16 +02:00
|
|
|
assert(d.d);
|
|
|
|
if(handle && d.d)
|
|
|
|
d.d->DestroyColumnFamilyHandle(handle);
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
{
|
2018-04-18 00:28:08 +02:00
|
|
|
// If possible, deduce comparator based on type given in descriptor
|
2018-10-22 16:09:16 +02:00
|
|
|
if(!this->descriptor->cmp.less)
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
|
|
|
if(key_type == typeid(string_view))
|
2017-09-23 03:48:35 +02:00
|
|
|
this->cmp.user = cmp_string_view{};
|
2017-03-31 00:57:08 +02:00
|
|
|
else if(key_type == typeid(int64_t))
|
2017-09-23 03:48:35 +02:00
|
|
|
this->cmp.user = cmp_int64_t{};
|
2018-04-18 00:28:08 +02:00
|
|
|
else if(key_type == typeid(uint64_t))
|
|
|
|
this->cmp.user = cmp_uint64_t{};
|
2017-03-31 00:57:08 +02:00
|
|
|
else
|
2018-04-18 00:28:08 +02:00
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"column '%s' key type[%s] requires user supplied comparator",
|
|
|
|
this->name,
|
|
|
|
key_type.name()
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2017-09-08 10:33:41 +02:00
|
|
|
// Set the key comparator
|
2017-03-31 00:57:08 +02:00
|
|
|
this->options.comparator = &this->cmp;
|
|
|
|
|
2017-08-31 07:12:58 +02:00
|
|
|
// Set the prefix extractor
|
|
|
|
if(this->prefix.user.get && this->prefix.user.has)
|
|
|
|
this->options.prefix_extractor = std::shared_ptr<const rocksdb::SliceTransform>
|
|
|
|
{
|
|
|
|
&this->prefix, [](const rocksdb::SliceTransform *) {}
|
|
|
|
};
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2018-09-25 11:37:29 +02:00
|
|
|
// Set the insert hint prefix extractor
|
|
|
|
if(this->options.prefix_extractor)
|
|
|
|
this->options.memtable_insert_with_hint_prefix_extractor = this->options.prefix_extractor;
|
|
|
|
|
2018-09-19 00:07:09 +02:00
|
|
|
// Set the compaction filter
|
|
|
|
this->options.compaction_filter = &this->cfilter;
|
|
|
|
|
2018-11-30 00:41:42 +01:00
|
|
|
//this->options.paranoid_file_checks = true;
|
|
|
|
|
|
|
|
// More stats reported by the rocksdb.stats property.
|
|
|
|
this->options.report_bg_io_stats = true;
|
|
|
|
|
|
|
|
// Set the compaction style; we don't override this in the descriptor yet.
|
|
|
|
//this->options.compaction_style = rocksdb::kCompactionStyleNone;
|
|
|
|
this->options.compaction_style = rocksdb::kCompactionStyleLevel;
|
|
|
|
|
|
|
|
// Set the compaction priority; this should probably be in the descriptor
|
|
|
|
// but this is currently selected for the general matrix workload.
|
2018-12-12 20:22:22 +01:00
|
|
|
this->options.compaction_pri = rocksdb::CompactionPri::kOldestSmallestSeqFirst;
|
2018-11-30 00:41:42 +01:00
|
|
|
|
|
|
|
// Set filter reductions for this column. This means we expect a key to exist.
|
|
|
|
this->options.optimize_filters_for_hits = this->descriptor->expect_queries_hit;
|
|
|
|
|
2018-12-02 01:11:32 +01:00
|
|
|
// Compression type
|
2018-11-30 00:41:42 +01:00
|
|
|
this->options.compression = find_supported_compression(this->descriptor->compression);
|
|
|
|
//this->options.compression = rocksdb::kNoCompression;
|
|
|
|
|
2018-12-02 01:11:32 +01:00
|
|
|
// Compression options
|
|
|
|
this->options.compression_opts.enabled = true;
|
|
|
|
this->options.compression_opts.max_dict_bytes = 0;//8_MiB;
|
|
|
|
|
|
|
|
// Mimic the above for bottommost compression.
|
|
|
|
//this->options.bottommost_compression = this->options.compression;
|
|
|
|
//this->options.bottommost_compression_opts = this->options.compression_opts;
|
|
|
|
|
2018-11-30 00:41:42 +01:00
|
|
|
//TODO: descriptor / conf
|
2018-12-12 20:22:22 +01:00
|
|
|
this->options.disable_auto_compactions = false;
|
|
|
|
this->options.level_compaction_dynamic_level_bytes = false;
|
|
|
|
|
2018-11-30 00:41:42 +01:00
|
|
|
this->options.num_levels = 7;
|
2019-02-26 03:07:50 +01:00
|
|
|
this->options.write_buffer_size = 4_MiB;
|
2019-01-12 02:37:51 +01:00
|
|
|
this->options.max_write_buffer_number = 8;
|
|
|
|
this->options.min_write_buffer_number_to_merge = 4;
|
2018-12-19 02:36:51 +01:00
|
|
|
this->options.max_write_buffer_number_to_maintain = 0;
|
2019-01-12 02:37:51 +01:00
|
|
|
this->options.level0_file_num_compaction_trigger = 2;
|
2019-04-11 06:18:27 +02:00
|
|
|
this->options.target_file_size_base = 48_MiB;
|
2019-02-26 03:07:50 +01:00
|
|
|
this->options.target_file_size_multiplier = 16;
|
2019-01-12 02:37:51 +01:00
|
|
|
this->options.max_bytes_for_level_base = 1_MiB;
|
2018-12-12 20:22:22 +01:00
|
|
|
this->options.max_bytes_for_level_multiplier = 2;
|
2018-11-30 00:41:42 +01:00
|
|
|
|
2018-04-18 00:28:08 +02:00
|
|
|
//
|
|
|
|
// Table options
|
|
|
|
//
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2018-10-14 17:53:46 +02:00
|
|
|
// Block based table index type.
|
2018-10-18 07:11:28 +02:00
|
|
|
table_opts.format_version = 3; // RocksDB >= 5.15 compat only; otherwise use 2.
|
2018-10-14 17:53:46 +02:00
|
|
|
table_opts.index_type = rocksdb::BlockBasedTableOptions::kTwoLevelIndexSearch;
|
2019-01-12 02:37:51 +01:00
|
|
|
table_opts.read_amp_bytes_per_bit = 8;
|
2018-10-14 17:53:46 +02:00
|
|
|
table_opts.partition_filters = true;
|
2018-10-18 07:11:28 +02:00
|
|
|
table_opts.use_delta_encoding = true;
|
2018-10-14 17:53:46 +02:00
|
|
|
|
2018-10-16 10:25:02 +02:00
|
|
|
// Specify that index blocks should use the cache. If not, they will be
|
|
|
|
// pre-read into RAM by rocksdb internally. Because of the above
|
|
|
|
// TwoLevelIndex + partition_filters configuration on RocksDB v5.15 it's
|
2018-11-09 07:19:26 +01:00
|
|
|
// better to use pre-read except in the case of a massive database.
|
2018-11-03 02:31:20 +01:00
|
|
|
table_opts.cache_index_and_filter_blocks = true;
|
2019-06-08 09:05:00 +02:00
|
|
|
table_opts.cache_index_and_filter_blocks_with_high_priority = true;
|
2018-10-16 10:25:02 +02:00
|
|
|
table_opts.pin_top_level_index_and_filter = false;
|
|
|
|
table_opts.pin_l0_filter_and_index_blocks_in_cache = false;
|
2018-12-19 02:36:51 +01:00
|
|
|
table_opts.enable_index_compression = false;
|
2019-02-26 03:07:50 +01:00
|
|
|
table_opts.index_block_restart_interval = 64;
|
2018-10-16 10:25:02 +02:00
|
|
|
|
2018-09-18 06:26:55 +02:00
|
|
|
// Setup the block size
|
2018-10-22 16:09:16 +02:00
|
|
|
table_opts.block_size = this->descriptor->block_size;
|
|
|
|
table_opts.metadata_block_size = this->descriptor->meta_block_size;
|
2018-10-22 22:53:02 +02:00
|
|
|
table_opts.block_size_deviation = 50;
|
2019-02-26 03:07:50 +01:00
|
|
|
table_opts.block_restart_interval = 64;
|
2018-11-30 00:41:42 +01:00
|
|
|
|
2018-12-12 20:22:22 +01:00
|
|
|
//table_opts.data_block_index_type = rocksdb::BlockBasedTableOptions::kDataBlockBinaryAndHash;
|
|
|
|
//table_opts.data_block_hash_table_util_ratio = 0.75;
|
|
|
|
|
2018-11-30 00:41:42 +01:00
|
|
|
// Block alignment doesn't work if compression is enabled for this
|
|
|
|
// column. If not, we want block alignment for direct IO.
|
|
|
|
table_opts.block_align = this->options.compression == rocksdb::kNoCompression;
|
2018-09-18 06:26:55 +02:00
|
|
|
|
2018-04-18 00:28:08 +02:00
|
|
|
// Setup the cache for assets.
|
2018-10-22 16:09:16 +02:00
|
|
|
const auto &cache_size(this->descriptor->cache_size);
|
2018-09-03 02:31:09 +02:00
|
|
|
if(cache_size != 0)
|
2019-01-12 01:21:04 +01:00
|
|
|
table_opts.block_cache = std::make_shared<database::cache>(this->d, this->stats, this->name, cache_size);
|
2018-04-15 08:00:56 +02:00
|
|
|
|
2019-01-12 01:35:52 +01:00
|
|
|
// RocksDB will create an 8_MiB block_cache if we don't create our own.
|
|
|
|
// To honor the user's desire for a zero-size cache, this must be set.
|
|
|
|
if(!table_opts.block_cache)
|
|
|
|
{
|
|
|
|
table_opts.no_block_cache = true;
|
|
|
|
table_opts.cache_index_and_filter_blocks = false; // MBZ or error w/o block_cache
|
|
|
|
}
|
|
|
|
|
2018-04-18 00:28:08 +02:00
|
|
|
// Setup the cache for compressed assets.
|
2018-10-22 16:09:16 +02:00
|
|
|
const auto &cache_size_comp(this->descriptor->cache_size_comp);
|
2018-09-03 02:31:09 +02:00
|
|
|
if(cache_size_comp != 0)
|
2019-01-12 01:21:04 +01:00
|
|
|
table_opts.block_cache_compressed = std::make_shared<database::cache>(this->d, this->stats, this->name, cache_size_comp);
|
2018-04-15 08:00:56 +02:00
|
|
|
|
2018-04-18 00:28:08 +02:00
|
|
|
// Setup the bloom filter.
|
2018-10-22 16:09:16 +02:00
|
|
|
const auto &bloom_bits(this->descriptor->bloom_bits);
|
2018-04-15 08:00:56 +02:00
|
|
|
if(bloom_bits)
|
|
|
|
table_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bloom_bits, false));
|
|
|
|
|
2018-04-18 00:28:08 +02:00
|
|
|
// Tickers::READ_AMP_TOTAL_READ_BYTES / Tickers::READ_AMP_ESTIMATE_USEFUL_BYTES
|
|
|
|
//table_opts.read_amp_bytes_per_bit = 8;
|
|
|
|
|
2018-10-14 17:53:46 +02:00
|
|
|
// Finally set the table options in the column options.
|
2018-04-15 08:00:56 +02:00
|
|
|
this->options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_opts));
|
|
|
|
|
2018-08-21 09:20:54 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-11-30 01:47:17 +01:00
|
|
|
log, "schema '%s' column [%s => %s] cmp[%s] pfx[%s] lru:%s:%s bloom:%zu compression:%d %s",
|
2018-10-22 16:09:16 +02:00
|
|
|
db::name(d),
|
2018-08-21 09:20:54 +02:00
|
|
|
demangle(key_type.name()),
|
|
|
|
demangle(mapped_type.name()),
|
|
|
|
this->cmp.Name(),
|
|
|
|
this->options.prefix_extractor? this->prefix.Name() : "none",
|
2018-09-03 05:42:33 +02:00
|
|
|
cache_size? "YES": "NO",
|
|
|
|
cache_size_comp? "YES": "NO",
|
2018-08-21 09:20:54 +02:00
|
|
|
bloom_bits,
|
2018-11-30 01:47:17 +01:00
|
|
|
int(this->options.compression),
|
2018-10-22 16:09:16 +02:00
|
|
|
this->descriptor->name
|
2018-08-21 09:20:54 +02:00
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::column::~column()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::column::operator
|
|
|
|
database &()
|
|
|
|
{
|
|
|
|
return *d;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::column::operator
|
|
|
|
rocksdb::ColumnFamilyHandle *()
|
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
return handle.get();
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::column::operator
|
|
|
|
const database &()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return *d;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::column::operator
|
|
|
|
const rocksdb::ColumnFamilyHandle *()
|
|
|
|
const
|
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
return handle.get();
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2018-12-01 00:19:00 +01:00
|
|
|
ircd::db::database::column::operator
|
|
|
|
const rocksdb::ColumnFamilyOptions &()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
2018-12-12 17:03:59 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::comparator
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::comparator::comparator(database *const &d,
|
|
|
|
db::comparator user)
|
|
|
|
:d{d}
|
|
|
|
,user
|
|
|
|
{
|
|
|
|
std::move(user)
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ircd::db::database::comparator::Name()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(!user.name.empty());
|
|
|
|
return user.name.data();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::comparator::Equal(const Slice &a,
|
|
|
|
const Slice &b)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
return user.equal?
|
|
|
|
user.equal(slice(a), slice(b)):
|
|
|
|
Compare(a, b) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ircd::db::database::comparator::Compare(const Slice &a,
|
|
|
|
const Slice &b)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(user.less));
|
|
|
|
const auto sa{slice(a)};
|
|
|
|
const auto sb{slice(b)};
|
|
|
|
return user.less(sa, sb)? -1: // less[Y], equal[?], greater[?]
|
|
|
|
user.equal && user.equal(sa, sb)? 0: // less[N], equal[Y], greater[?]
|
|
|
|
user.equal? 1: // less[N], equal[N], greater[Y]
|
|
|
|
user.less(sb, sa)? 1: // less[N], equal[?], greater[Y]
|
|
|
|
0; // less[N], equal[Y], greater[N]
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::comparator::FindShortestSeparator(std::string *const key,
|
|
|
|
const Slice &limit)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(key != nullptr);
|
|
|
|
if(user.separator)
|
|
|
|
user.separator(*key, slice(limit));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::comparator::FindShortSuccessor(std::string *const key)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(key != nullptr);
|
|
|
|
if(user.successor)
|
|
|
|
user.successor(*key);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::comparator::IsSameLengthImmediateSuccessor(const Slice &s,
|
|
|
|
const Slice &t)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
return rocksdb::Comparator::IsSameLengthImmediateSuccessor(s, t);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::comparator::CanKeysWithDifferentByteContentsBeEqual()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
// When keys with different byte contents can be equal the keys are
|
|
|
|
// not hashable.
|
|
|
|
return !user.hashable;
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::prefix_transform
|
|
|
|
//
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ircd::db::database::prefix_transform::Name()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(!user.name.empty());
|
|
|
|
return user.name.c_str();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Slice
|
|
|
|
ircd::db::database::prefix_transform::Transform(const Slice &key)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(user.get));
|
|
|
|
return slice(user.get(slice(key)));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::prefix_transform::InRange(const Slice &key)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
return InDomain(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::prefix_transform::InDomain(const Slice &key)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(user.has));
|
|
|
|
return user.has(slice(key));
|
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::snapshot
|
|
|
|
//
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ircd::db::sequence(const database::snapshot &s)
|
|
|
|
{
|
|
|
|
const rocksdb::Snapshot *const rs(s);
|
2017-09-23 11:52:33 +02:00
|
|
|
return sequence(rs);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ircd::db::sequence(const rocksdb::Snapshot *const &rs)
|
|
|
|
{
|
|
|
|
return likely(rs)? rs->GetSequenceNumber() : 0ULL;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// snapshot::shapshot
|
|
|
|
//
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::snapshot::snapshot(database &d)
|
2017-08-23 22:37:47 +02:00
|
|
|
:s
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2017-08-23 22:37:47 +02:00
|
|
|
d.d->GetSnapshot(),
|
|
|
|
[dp(weak_from(d))](const rocksdb::Snapshot *const s)
|
|
|
|
{
|
|
|
|
if(!s)
|
|
|
|
return;
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2017-08-23 22:37:47 +02:00
|
|
|
const auto d(dp.lock());
|
|
|
|
d->d->ReleaseSnapshot(s);
|
|
|
|
}
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::snapshot::~snapshot()
|
2017-03-24 02:36:49 +01:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2018-12-10 22:26:46 +01:00
|
|
|
// database::logger
|
2017-04-03 06:02:32 +02:00
|
|
|
//
|
|
|
|
|
2018-12-10 22:26:46 +01:00
|
|
|
ircd::db::database::logger::logger(database *const &d)
|
2018-08-29 00:44:03 +02:00
|
|
|
:rocksdb::Logger{}
|
|
|
|
,d{d}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-12-10 22:26:46 +01:00
|
|
|
ircd::db::database::logger::~logger()
|
2018-08-29 00:44:03 +02:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-12-10 22:26:46 +01:00
|
|
|
ircd::db::database::logger::Close()
|
2018-08-29 00:44:03 +02:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return rocksdb::Status::NotSupported();
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
static
|
2018-12-19 21:52:08 +01:00
|
|
|
ircd::log::level
|
2017-03-24 02:36:49 +01:00
|
|
|
translate(const rocksdb::InfoLogLevel &level)
|
|
|
|
{
|
|
|
|
switch(level)
|
|
|
|
{
|
|
|
|
// Treat all infomational messages from rocksdb as debug here for now.
|
|
|
|
// We can clean them up and make better reports for our users eventually.
|
|
|
|
default:
|
2018-12-19 21:52:08 +01:00
|
|
|
case rocksdb::InfoLogLevel::DEBUG_LEVEL: return ircd::log::level::DEBUG;
|
|
|
|
case rocksdb::InfoLogLevel::INFO_LEVEL: return ircd::log::level::DEBUG;
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2018-12-19 21:52:08 +01:00
|
|
|
case rocksdb::InfoLogLevel::WARN_LEVEL: return ircd::log::level::WARNING;
|
|
|
|
case rocksdb::InfoLogLevel::ERROR_LEVEL: return ircd::log::level::ERROR;
|
|
|
|
case rocksdb::InfoLogLevel::FATAL_LEVEL: return ircd::log::level::CRITICAL;
|
|
|
|
case rocksdb::InfoLogLevel::HEADER_LEVEL: return ircd::log::level::NOTICE;
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-12-10 22:26:46 +01:00
|
|
|
ircd::db::database::logger::Logv(const char *const fmt,
|
|
|
|
va_list ap)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
Logv(rocksdb::InfoLogLevel::DEBUG_LEVEL, fmt, ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-12-10 22:26:46 +01:00
|
|
|
ircd::db::database::logger::LogHeader(const char *const fmt,
|
|
|
|
va_list ap)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
Logv(rocksdb::InfoLogLevel::DEBUG_LEVEL, fmt, ap);
|
|
|
|
}
|
|
|
|
|
2017-10-01 12:06:48 +02:00
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
|
2017-03-24 02:36:49 +01:00
|
|
|
void
|
2018-12-23 23:33:22 +01:00
|
|
|
ircd::db::database::logger::Logv(const rocksdb::InfoLogLevel level_,
|
2018-12-10 22:26:46 +01:00
|
|
|
const char *const fmt,
|
|
|
|
va_list ap)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-23 23:33:22 +01:00
|
|
|
if(level_ < GetInfoLogLevel())
|
|
|
|
return;
|
|
|
|
|
|
|
|
const log::level level
|
|
|
|
{
|
|
|
|
translate(level_)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(level > RB_LOG_LEVEL)
|
2017-03-24 02:36:49 +01:00
|
|
|
return;
|
|
|
|
|
2018-01-12 07:21:51 +01:00
|
|
|
thread_local char buf[1024]; const auto len
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2017-08-23 23:37:06 +02:00
|
|
|
vsnprintf(buf, sizeof(buf), fmt, ap)
|
2017-03-24 02:36:49 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
const auto str
|
|
|
|
{
|
|
|
|
// RocksDB adds annoying leading whitespace to attempt to right-justify things and idc
|
2017-12-26 12:00:52 +01:00
|
|
|
lstrip(string_view{buf, size_t(len)}, ' ')
|
2017-03-24 02:36:49 +01:00
|
|
|
};
|
|
|
|
|
2017-08-23 22:37:47 +02:00
|
|
|
// Skip the options for now
|
|
|
|
if(startswith(str, "Options"))
|
|
|
|
return;
|
|
|
|
|
2018-12-23 23:33:22 +01:00
|
|
|
rog(level, "'%s': %s", d->name, str);
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
2017-10-01 12:06:48 +02:00
|
|
|
#pragma GCC diagnostic pop
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::mergeop
|
|
|
|
//
|
|
|
|
|
2018-08-29 00:44:03 +02:00
|
|
|
ircd::db::database::mergeop::mergeop(database *const &d,
|
|
|
|
merge_closure merger)
|
|
|
|
:d{d}
|
|
|
|
,merger
|
|
|
|
{
|
|
|
|
merger?
|
|
|
|
std::move(merger):
|
|
|
|
ircd::db::merge_operator
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::mergeop::~mergeop()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
const char *
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::mergeop::Name()
|
2018-01-18 09:49:23 +01:00
|
|
|
const noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
return "<unnamed>";
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::mergeop::Merge(const rocksdb::Slice &_key,
|
|
|
|
const rocksdb::Slice *const _exist,
|
|
|
|
const rocksdb::Slice &_update,
|
|
|
|
std::string *const newval,
|
|
|
|
rocksdb::Logger *const)
|
2018-01-18 09:49:23 +01:00
|
|
|
const noexcept try
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
const string_view key
|
|
|
|
{
|
|
|
|
_key.data(), _key.size()
|
|
|
|
};
|
|
|
|
|
|
|
|
const string_view exist
|
|
|
|
{
|
|
|
|
_exist? string_view { _exist->data(), _exist->size() } : string_view{}
|
|
|
|
};
|
|
|
|
|
|
|
|
const string_view update
|
|
|
|
{
|
|
|
|
_update.data(), _update.size()
|
|
|
|
};
|
|
|
|
|
|
|
|
if(exist.empty())
|
|
|
|
{
|
|
|
|
*newval = std::string(update);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//XXX caching opportunity?
|
|
|
|
*newval = merger(key, {exist, update}); // call the user
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
catch(const std::bad_function_call &e)
|
|
|
|
{
|
|
|
|
log.critical("merge: missing merge operator (%s)", e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log.error("merge: %s", e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2018-05-24 02:03:09 +02:00
|
|
|
// db/stats.h
|
2017-04-03 06:02:32 +02:00
|
|
|
//
|
|
|
|
|
2018-05-24 01:57:49 +02:00
|
|
|
std::string
|
|
|
|
ircd::db::string(const rocksdb::IOStatsContext &ic,
|
|
|
|
const bool &all)
|
|
|
|
{
|
|
|
|
const bool exclude_zeros(!all);
|
|
|
|
return ic.ToString(exclude_zeros);
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::IOStatsContext &
|
|
|
|
ircd::db::iostats_current()
|
|
|
|
{
|
|
|
|
const auto *const &ret
|
|
|
|
{
|
|
|
|
rocksdb::get_iostats_context()
|
|
|
|
};
|
|
|
|
|
|
|
|
if(unlikely(!ret))
|
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"IO counters are not available on this thread."
|
|
|
|
};
|
|
|
|
|
|
|
|
return *ret;
|
|
|
|
}
|
|
|
|
|
2018-05-24 01:27:44 +02:00
|
|
|
std::string
|
|
|
|
ircd::db::string(const rocksdb::PerfContext &pc,
|
|
|
|
const bool &all)
|
|
|
|
{
|
|
|
|
const bool exclude_zeros(!all);
|
|
|
|
return pc.ToString(exclude_zeros);
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::PerfContext &
|
|
|
|
ircd::db::perf_current()
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2018-05-24 01:27:44 +02:00
|
|
|
const auto *const &ret
|
2018-04-09 21:55:22 +02:00
|
|
|
{
|
|
|
|
rocksdb::get_perf_context()
|
|
|
|
};
|
|
|
|
|
2018-05-24 01:27:44 +02:00
|
|
|
if(unlikely(!ret))
|
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"Performance counters are not available on this thread."
|
|
|
|
};
|
|
|
|
|
|
|
|
return *ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::perf_level(const uint &level)
|
|
|
|
{
|
|
|
|
if(level >= rocksdb::PerfLevel::kOutOfBounds)
|
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"Perf level of '%u' is invalid; maximum is '%u'",
|
|
|
|
level,
|
|
|
|
uint(rocksdb::PerfLevel::kOutOfBounds)
|
|
|
|
};
|
|
|
|
|
|
|
|
rocksdb::SetPerfLevel(rocksdb::PerfLevel(level));
|
|
|
|
}
|
|
|
|
|
|
|
|
uint
|
|
|
|
ircd::db::perf_level()
|
|
|
|
{
|
|
|
|
return rocksdb::GetPerfLevel();
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2018-09-26 07:05:29 +02:00
|
|
|
//
|
|
|
|
// ticker
|
|
|
|
//
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ircd::db::ticker(const database &d,
|
|
|
|
const string_view &key)
|
|
|
|
{
|
|
|
|
return ticker(d, ticker_id(key));
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ircd::db::ticker(const database &d,
|
|
|
|
const uint32_t &id)
|
|
|
|
{
|
|
|
|
return d.stats->getTickerCount(id);
|
|
|
|
}
|
|
|
|
|
2018-04-15 13:45:31 +02:00
|
|
|
uint32_t
|
|
|
|
ircd::db::ticker_id(const string_view &key)
|
|
|
|
{
|
|
|
|
for(const auto &pair : rocksdb::TickersNameMap)
|
|
|
|
if(key == pair.second)
|
|
|
|
return pair.first;
|
|
|
|
|
|
|
|
throw std::out_of_range
|
|
|
|
{
|
|
|
|
"No ticker with that key"
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::ticker_id(const uint32_t &id)
|
|
|
|
{
|
|
|
|
for(const auto &pair : rocksdb::TickersNameMap)
|
|
|
|
if(id == pair.first)
|
|
|
|
return pair.second;
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
decltype(ircd::db::ticker_max)
|
|
|
|
ircd::db::ticker_max
|
|
|
|
{
|
|
|
|
rocksdb::TICKER_ENUM_MAX
|
|
|
|
};
|
|
|
|
|
2018-09-26 07:05:29 +02:00
|
|
|
//
|
|
|
|
// histogram
|
|
|
|
//
|
|
|
|
|
|
|
|
const struct ircd::db::histogram &
|
|
|
|
ircd::db::histogram(const database &d,
|
|
|
|
const string_view &key)
|
|
|
|
{
|
|
|
|
return histogram(d, histogram_id(key));
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct ircd::db::histogram &
|
|
|
|
ircd::db::histogram(const database &d,
|
|
|
|
const uint32_t &id)
|
|
|
|
{
|
|
|
|
return d.stats->histogram.at(id);
|
|
|
|
}
|
|
|
|
|
2018-04-15 13:45:31 +02:00
|
|
|
uint32_t
|
|
|
|
ircd::db::histogram_id(const string_view &key)
|
|
|
|
{
|
|
|
|
for(const auto &pair : rocksdb::HistogramsNameMap)
|
|
|
|
if(key == pair.second)
|
|
|
|
return pair.first;
|
|
|
|
|
|
|
|
throw std::out_of_range
|
|
|
|
{
|
|
|
|
"No histogram with that key"
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::histogram_id(const uint32_t &id)
|
|
|
|
{
|
|
|
|
for(const auto &pair : rocksdb::HistogramsNameMap)
|
|
|
|
if(id == pair.first)
|
|
|
|
return pair.second;
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
decltype(ircd::db::histogram_max)
|
|
|
|
ircd::db::histogram_max
|
|
|
|
{
|
|
|
|
rocksdb::HISTOGRAM_ENUM_MAX
|
|
|
|
};
|
|
|
|
|
2018-05-24 02:03:09 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::stats (db/database/stats.h) internal
|
|
|
|
//
|
|
|
|
|
2018-09-27 03:00:18 +02:00
|
|
|
//
|
|
|
|
// stats::stats
|
|
|
|
//
|
|
|
|
|
2018-09-26 06:20:02 +02:00
|
|
|
ircd::db::database::stats::stats(database *const &d)
|
|
|
|
:d{d}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-09-27 03:00:18 +02:00
|
|
|
ircd::db::database::stats::~stats()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-09-26 06:20:02 +02:00
|
|
|
rocksdb::Status
|
|
|
|
ircd::db::database::stats::Reset()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
ticker.fill(0);
|
|
|
|
histogram.fill({0.0});
|
|
|
|
return rocksdb::Status::OK();
|
|
|
|
}
|
|
|
|
|
2017-07-18 19:11:00 +02:00
|
|
|
uint64_t
|
|
|
|
ircd::db::database::stats::getAndResetTickerCount(const uint32_t type)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-07-18 19:11:00 +02:00
|
|
|
{
|
|
|
|
const auto ret(getTickerCount(type));
|
|
|
|
setTickerCount(type, 0);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
bool
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::HistEnabledForType(const uint32_t type)
|
2018-01-18 09:49:23 +01:00
|
|
|
const noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
return type < histogram.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::measureTime(const uint32_t type,
|
|
|
|
const uint64_t time)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-09-26 07:05:29 +02:00
|
|
|
auto &data(histogram.at(type));
|
|
|
|
|
|
|
|
data.time += time;
|
|
|
|
data.hits++;
|
|
|
|
|
|
|
|
data.max = std::max(data.max, double(time));
|
|
|
|
data.avg = data.time / static_cast<long double>(data.hits);
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::histogramData(const uint32_t type,
|
|
|
|
rocksdb::HistogramData *const data)
|
2018-01-18 09:49:23 +01:00
|
|
|
const noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
assert(data);
|
2018-09-26 07:05:29 +02:00
|
|
|
const auto &h
|
|
|
|
{
|
|
|
|
histogram.at(type)
|
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2018-09-26 07:05:29 +02:00
|
|
|
data->median = h.median;
|
|
|
|
data->percentile95 = h.pct95;
|
|
|
|
data->percentile99 = h.pct99;
|
|
|
|
data->average = h.avg;
|
|
|
|
data->standard_deviation = h.stddev;
|
|
|
|
data->max = h.max;
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::recordTick(const uint32_t type,
|
|
|
|
const uint64_t count)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
ticker.at(type) += count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::setTickerCount(const uint32_t type,
|
|
|
|
const uint64_t count)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
ticker.at(type) = count;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::stats::getTickerCount(const uint32_t type)
|
2018-01-18 09:49:23 +01:00
|
|
|
const noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
return ticker.at(type);
|
|
|
|
}
|
|
|
|
|
2018-09-27 03:00:18 +02:00
|
|
|
//
|
|
|
|
// database::stats::passthru
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::stats::passthru::passthru(rocksdb::Statistics *const &a,
|
|
|
|
rocksdb::Statistics *const &b)
|
|
|
|
:pass
|
|
|
|
{
|
|
|
|
{ a, b }
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::stats::passthru::~passthru()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]]
|
|
|
|
rocksdb::Status
|
|
|
|
ircd::db::database::stats::passthru::Reset()
|
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
throw panic {"Unavailable for passthru"};
|
2018-09-27 03:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::stats::passthru::recordTick(const uint32_t tickerType,
|
|
|
|
const uint64_t count)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
for(auto *const &pass : this->pass)
|
|
|
|
pass->recordTick(tickerType, count);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::stats::passthru::measureTime(const uint32_t histogramType,
|
|
|
|
const uint64_t time)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
for(auto *const &pass : this->pass)
|
|
|
|
pass->measureTime(histogramType, time);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::stats::passthru::HistEnabledForType(const uint32_t type)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
return std::all_of(begin(pass), end(pass), [&type]
|
|
|
|
(const auto *const &pass)
|
|
|
|
{
|
|
|
|
return pass->HistEnabledForType(type);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]]
|
|
|
|
uint64_t
|
|
|
|
ircd::db::database::stats::passthru::getTickerCount(const uint32_t tickerType)
|
|
|
|
const noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
throw panic {"Unavailable for passthru"};
|
2018-09-27 03:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]]
|
|
|
|
void
|
|
|
|
ircd::db::database::stats::passthru::setTickerCount(const uint32_t tickerType,
|
|
|
|
const uint64_t count)
|
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
throw panic {"Unavailable for passthru"};
|
2018-09-27 03:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]]
|
|
|
|
void
|
|
|
|
ircd::db::database::stats::passthru::histogramData(const uint32_t type,
|
|
|
|
rocksdb::HistogramData *const data)
|
|
|
|
const noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
throw panic {"Unavailable for passthru"};
|
2018-09-27 03:00:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]]
|
|
|
|
uint64_t
|
|
|
|
ircd::db::database::stats::passthru::getAndResetTickerCount(const uint32_t tickerType)
|
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
throw panic {"Unavailable for passthru"};
|
2018-09-27 03:00:18 +02:00
|
|
|
}
|
|
|
|
|
2018-01-18 03:34:20 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::events
|
|
|
|
//
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
void
|
2019-04-23 04:08:01 +02:00
|
|
|
ircd::db::database::events::OnFlushCompleted(rocksdb::DB *const db,
|
|
|
|
const rocksdb::FlushJobInfo &info)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-09-19 03:04:41 +02:00
|
|
|
log::info
|
|
|
|
{
|
2019-04-23 04:08:01 +02:00
|
|
|
log, "'%s': job:%d ctx:%lu flush ended writes[slow:%d stop:%d] seq[%zu -> %zu] %s '%s' `%s'",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.thread_id,
|
|
|
|
info.triggered_writes_slowdown,
|
|
|
|
info.triggered_writes_stop,
|
|
|
|
info.smallest_seqno,
|
|
|
|
info.largest_seqno,
|
2018-12-19 22:39:06 +01:00
|
|
|
reflect(info.flush_reason),
|
2018-12-19 22:03:40 +01:00
|
|
|
info.cf_name,
|
2019-04-23 04:08:01 +02:00
|
|
|
info.file_path,
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2018-12-12 23:52:04 +01:00
|
|
|
|
2019-04-23 04:08:01 +02:00
|
|
|
assert(info.thread_id == ctx::id(*ctx::current));
|
2018-09-19 03:04:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-04-23 04:08:01 +02:00
|
|
|
ircd::db::database::events::OnFlushBegin(rocksdb::DB *const db,
|
|
|
|
const rocksdb::FlushJobInfo &info)
|
2018-09-19 03:04:41 +02:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
log::info
|
|
|
|
{
|
2019-04-23 04:08:01 +02:00
|
|
|
log, "'%s': job:%d ctx:%lu flush start writes[slow:%d stop:%d] seq[%zu -> %zu] %s '%s'",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.thread_id,
|
|
|
|
info.triggered_writes_slowdown,
|
|
|
|
info.triggered_writes_stop,
|
|
|
|
info.smallest_seqno,
|
|
|
|
info.largest_seqno,
|
2018-12-19 22:39:06 +01:00
|
|
|
reflect(info.flush_reason),
|
2018-12-19 22:03:40 +01:00
|
|
|
info.cf_name,
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2019-04-23 04:08:01 +02:00
|
|
|
|
|
|
|
assert(info.thread_id == ctx::id(*ctx::current));
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::events::OnCompactionCompleted(rocksdb::DB *const db,
|
|
|
|
const rocksdb::CompactionJobInfo &info)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
const log::level level
|
2018-12-17 03:02:52 +01:00
|
|
|
{
|
|
|
|
info.status == rocksdb::Status::OK()?
|
2018-12-19 21:52:08 +01:00
|
|
|
log::level::INFO:
|
|
|
|
log::level::ERROR
|
2018-12-17 03:02:52 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
log::logf
|
2018-09-19 03:04:41 +02:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
log, level,
|
2018-12-19 22:39:06 +01:00
|
|
|
"'%s': job:%d ctx:%lu compacted level[%d -> %d] files[%zu -> %zu] %s '%s' (%d): %s",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.thread_id,
|
|
|
|
info.base_input_level,
|
|
|
|
info.output_level,
|
|
|
|
info.input_files.size(),
|
|
|
|
info.output_files.size(),
|
2018-12-19 22:39:06 +01:00
|
|
|
reflect(info.compaction_reason),
|
2018-12-19 22:03:40 +01:00
|
|
|
info.cf_name,
|
2018-12-12 23:15:59 +01:00
|
|
|
int(info.status.code()),
|
|
|
|
info.status.getState()?: "OK",
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2018-12-12 23:52:04 +01:00
|
|
|
|
2019-01-01 00:25:53 +01:00
|
|
|
const bool bytes_same
|
|
|
|
{
|
|
|
|
info.stats.total_input_bytes == info.stats.total_output_bytes
|
|
|
|
};
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': job:%d keys[in:%zu out:%zu upd:%zu] bytes[%s -> %s] falloc:%s write:%s rsync:%s fsync:%s total:%s",
|
|
|
|
d->name,
|
|
|
|
info.job_id,
|
|
|
|
info.stats.num_input_records,
|
|
|
|
info.stats.num_output_records,
|
|
|
|
info.stats.num_records_replaced,
|
|
|
|
pretty(iec(info.stats.total_input_bytes)),
|
|
|
|
bytes_same? "same": pretty(iec(info.stats.total_output_bytes)),
|
|
|
|
pretty(nanoseconds(info.stats.file_prepare_write_nanos), true),
|
|
|
|
pretty(nanoseconds(info.stats.file_write_nanos), true),
|
|
|
|
pretty(nanoseconds(info.stats.file_range_sync_nanos), true),
|
|
|
|
pretty(nanoseconds(info.stats.file_fsync_nanos), true),
|
|
|
|
pretty(microseconds(info.stats.elapsed_micros), true),
|
|
|
|
};
|
|
|
|
|
|
|
|
if(info.stats.num_corrupt_keys > 0)
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "'%s': job:%d reported %lu corrupt keys.",
|
|
|
|
d->name,
|
|
|
|
info.job_id,
|
|
|
|
info.stats.num_corrupt_keys
|
|
|
|
};
|
|
|
|
|
2018-12-12 23:52:04 +01:00
|
|
|
assert(info.thread_id == ctx::id(*ctx::current));
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::events::OnTableFileDeleted(const rocksdb::TableFileDeletionInfo &info)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
const log::level level
|
2018-12-17 03:02:52 +01:00
|
|
|
{
|
|
|
|
info.status == rocksdb::Status::OK()?
|
2018-12-19 21:52:08 +01:00
|
|
|
log::level::DEBUG:
|
|
|
|
log::level::ERROR
|
2018-12-17 03:02:52 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
log::logf
|
2018-09-19 03:04:41 +02:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
log, level,
|
2018-12-19 22:39:06 +01:00
|
|
|
"'%s': job:%d table file delete [%s][%s] (%d): %s",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.db_name,
|
2018-12-12 19:39:48 +01:00
|
|
|
lstrip(info.file_path, info.db_name),
|
2018-09-19 03:04:41 +02:00
|
|
|
int(info.status.code()),
|
2018-12-12 23:15:59 +01:00
|
|
|
info.status.getState()?: "OK",
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::events::OnTableFileCreated(const rocksdb::TableFileCreationInfo &info)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
const log::level level
|
2018-09-19 03:04:41 +02:00
|
|
|
{
|
2018-12-17 03:02:52 +01:00
|
|
|
info.status == rocksdb::Status::OK()?
|
2018-12-19 21:52:08 +01:00
|
|
|
log::level::DEBUG:
|
|
|
|
log::level::ERROR
|
2018-12-17 03:02:52 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
log::logf
|
|
|
|
{
|
2018-12-19 21:52:08 +01:00
|
|
|
log, level,
|
2019-01-01 01:26:57 +01:00
|
|
|
"'%s': job:%d table file closed [%s][%s] size:%s '%s' (%d): %s",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.db_name,
|
2018-12-12 19:39:48 +01:00
|
|
|
lstrip(info.file_path, info.db_name),
|
2019-01-01 01:26:57 +01:00
|
|
|
pretty(iec(info.file_size)),
|
2018-12-12 23:15:59 +01:00
|
|
|
info.cf_name,
|
2018-09-19 03:04:41 +02:00
|
|
|
int(info.status.code()),
|
2018-12-12 23:15:59 +01:00
|
|
|
info.status.getState()?: "OK",
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2019-01-01 00:52:47 +01:00
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': job:%d head[%s] index[%s] filter[%s] data[%lu %s] keys[%lu %s] vals[%s] %s",
|
|
|
|
d->name,
|
|
|
|
info.job_id,
|
|
|
|
pretty(iec(info.table_properties.top_level_index_size)),
|
|
|
|
pretty(iec(info.table_properties.index_size)),
|
|
|
|
pretty(iec(info.table_properties.filter_size)),
|
|
|
|
info.table_properties.num_data_blocks,
|
|
|
|
pretty(iec(info.table_properties.data_size)),
|
|
|
|
info.table_properties.num_entries,
|
|
|
|
pretty(iec(info.table_properties.raw_key_size)),
|
|
|
|
pretty(iec(info.table_properties.raw_value_size)),
|
|
|
|
info.table_properties.compression_name
|
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::events::OnTableFileCreationStarted(const rocksdb::TableFileCreationBriefInfo &info)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-09-19 03:04:41 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-12-19 22:03:40 +01:00
|
|
|
log, "'%s': job:%d table file opened [%s][%s] '%s'",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
2018-12-12 23:15:59 +01:00
|
|
|
info.job_id,
|
2018-09-19 03:04:41 +02:00
|
|
|
info.db_name,
|
2018-12-12 19:39:48 +01:00
|
|
|
lstrip(info.file_path, info.db_name),
|
2018-12-12 23:15:59 +01:00
|
|
|
info.cf_name,
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
2019-04-23 04:08:01 +02:00
|
|
|
void
|
|
|
|
ircd::db::database::events::OnMemTableSealed(const rocksdb::MemTableInfo &info)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': memory table sealed '%s' entries:%lu deletes:%lu",
|
|
|
|
d->name,
|
|
|
|
info.cf_name,
|
|
|
|
info.num_entries,
|
|
|
|
info.num_deletes
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::database::events::OnColumnFamilyHandleDeletionStarted(rocksdb::ColumnFamilyHandle *const h)
|
2018-01-18 09:49:23 +01:00
|
|
|
noexcept
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-09-19 03:04:41 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-12-12 22:24:47 +01:00
|
|
|
log, "'%s': column[%s] handle closing @ %p",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
|
|
|
h->GetName(),
|
|
|
|
h
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::events::OnExternalFileIngested(rocksdb::DB *const d,
|
|
|
|
const rocksdb::ExternalFileIngestionInfo &info)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
log::notice
|
|
|
|
{
|
2018-12-12 22:24:47 +01:00
|
|
|
log, "'%s': external file ingested column[%s] external[%s] internal[%s] sequence:%lu",
|
2018-09-19 03:04:41 +02:00
|
|
|
this->d->name,
|
|
|
|
info.cf_name,
|
|
|
|
info.external_file_path,
|
|
|
|
info.internal_file_path,
|
|
|
|
info.global_seqno
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::events::OnBackgroundError(rocksdb::BackgroundErrorReason reason,
|
|
|
|
rocksdb::Status *const status)
|
|
|
|
noexcept
|
|
|
|
{
|
2018-10-31 23:29:00 +01:00
|
|
|
assert(d);
|
2018-09-19 03:04:41 +02:00
|
|
|
assert(status);
|
2018-10-31 23:29:00 +01:00
|
|
|
|
|
|
|
thread_local char buf[1024];
|
|
|
|
const string_view str{fmt::sprintf
|
2018-09-19 03:04:41 +02:00
|
|
|
{
|
2018-10-31 23:29:00 +01:00
|
|
|
buf, "%s error in %s :%s",
|
2018-10-31 22:48:14 +01:00
|
|
|
reflect(status->severity()),
|
2018-10-31 22:40:00 +01:00
|
|
|
reflect(reason),
|
2018-09-19 03:04:41 +02:00
|
|
|
status->ToString()
|
2018-10-31 23:29:00 +01:00
|
|
|
}};
|
2018-10-31 23:03:32 +01:00
|
|
|
|
|
|
|
// This is a legitimate when we want to use it. If the error is not
|
|
|
|
// suppressed the DB will enter read-only mode and will require a
|
|
|
|
// call to db::resume() to clear the error (i.e by admin at console).
|
|
|
|
const bool ignore
|
|
|
|
{
|
|
|
|
false
|
|
|
|
};
|
|
|
|
|
2018-12-19 21:52:08 +01:00
|
|
|
const log::level fac
|
2018-10-31 23:29:00 +01:00
|
|
|
{
|
|
|
|
ignore?
|
2018-12-19 21:52:08 +01:00
|
|
|
log::level::DERROR:
|
|
|
|
log::level::ERROR
|
2018-10-31 23:29:00 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
log::logf
|
|
|
|
{
|
|
|
|
log, fac, "'%s': %s", d->name, str
|
|
|
|
};
|
|
|
|
|
2018-10-31 23:03:32 +01:00
|
|
|
if(ignore)
|
2018-10-31 23:29:00 +01:00
|
|
|
{
|
2018-10-31 23:03:32 +01:00
|
|
|
*status = rocksdb::Status::OK();
|
2018-10-31 23:29:00 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-02 08:05:21 +01:00
|
|
|
// Downgrade select fatal errors to hard errors. If this downgrade
|
|
|
|
// does not occur then it can never be cleared by a db::resume() and
|
|
|
|
// the daemon must be restarted.
|
|
|
|
|
|
|
|
if(reason == rocksdb::BackgroundErrorReason::kCompaction)
|
|
|
|
if(status->severity() == rocksdb::Status::kFatalError)
|
|
|
|
*status = rocksdb::Status(*status, rocksdb::Status::kHardError);
|
|
|
|
|
|
|
|
// Save the error string to the database instance for later examination.
|
2018-10-31 23:29:00 +01:00
|
|
|
d->errors.emplace_back(str);
|
2018-09-19 03:04:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::events::OnStallConditionsChanged(const rocksdb::WriteStallInfo &info)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
log::warning
|
|
|
|
{
|
2018-12-12 22:24:47 +01:00
|
|
|
log, "'%s' stall condition column[%s] %s -> %s",
|
2018-09-19 03:04:41 +02:00
|
|
|
d->name,
|
|
|
|
info.cf_name,
|
2018-10-31 22:40:00 +01:00
|
|
|
reflect(info.condition.prev),
|
|
|
|
reflect(info.condition.cur)
|
2018-09-19 03:04:41 +02:00
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
2018-09-03 02:29:44 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::cache (internal)
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::db::database::cache::DEFAULT_SHARD_BITS)
|
|
|
|
ircd::db::database::cache::DEFAULT_SHARD_BITS
|
2018-09-26 11:03:31 +02:00
|
|
|
(
|
2019-04-24 10:52:59 +02:00
|
|
|
std::log2(std::min(size_t(db::request_pool_size), 16UL))
|
2018-09-26 11:03:31 +02:00
|
|
|
);
|
2018-09-03 02:29:44 +02:00
|
|
|
|
|
|
|
decltype(ircd::db::database::cache::DEFAULT_STRICT)
|
|
|
|
ircd::db::database::cache::DEFAULT_STRICT
|
|
|
|
{
|
2018-09-03 12:24:37 +02:00
|
|
|
false
|
2018-09-03 02:29:44 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
decltype(ircd::db::database::cache::DEFAULT_HI_PRIO)
|
|
|
|
ircd::db::database::cache::DEFAULT_HI_PRIO
|
|
|
|
{
|
2019-06-08 09:05:00 +02:00
|
|
|
0.25
|
2018-09-03 02:29:44 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
//
|
|
|
|
// cache::cache
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::cache::cache(database *const &d,
|
2018-10-21 08:30:27 +02:00
|
|
|
std::shared_ptr<struct database::stats> stats,
|
2019-01-12 01:21:04 +01:00
|
|
|
std::string name,
|
2018-09-03 02:29:44 +02:00
|
|
|
const ssize_t &initial_capacity)
|
|
|
|
:d{d}
|
2019-01-12 01:21:04 +01:00
|
|
|
,name{std::move(name)}
|
2018-10-21 08:30:27 +02:00
|
|
|
,stats{std::move(stats)}
|
2018-09-03 02:29:44 +02:00
|
|
|
,c
|
|
|
|
{
|
|
|
|
rocksdb::NewLRUCache
|
|
|
|
(
|
2018-09-27 03:00:18 +02:00
|
|
|
std::max(initial_capacity, ssize_t(0))
|
|
|
|
,DEFAULT_SHARD_BITS
|
|
|
|
,DEFAULT_STRICT
|
|
|
|
,DEFAULT_HI_PRIO
|
2018-09-03 02:29:44 +02:00
|
|
|
)
|
|
|
|
}
|
|
|
|
{
|
2018-09-27 03:00:18 +02:00
|
|
|
assert(bool(c));
|
2018-09-03 02:29:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::cache::~cache()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ircd::db::database::cache::Name()
|
|
|
|
const noexcept
|
|
|
|
{
|
2019-01-12 01:21:04 +01:00
|
|
|
return !empty(name)?
|
|
|
|
name.c_str():
|
|
|
|
c->Name();
|
2018-09-03 02:29:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
|
|
|
ircd::db::database::cache::Insert(const Slice &key,
|
|
|
|
void *const value,
|
|
|
|
size_t charge,
|
|
|
|
deleter del,
|
|
|
|
Handle **const handle,
|
|
|
|
Priority priority)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
2018-10-21 08:30:27 +02:00
|
|
|
assert(bool(stats));
|
|
|
|
|
2018-09-05 11:56:50 +02:00
|
|
|
const rocksdb::Status &ret
|
|
|
|
{
|
|
|
|
c->Insert(key, value, charge, del, handle, priority)
|
|
|
|
};
|
|
|
|
|
2018-10-21 08:30:27 +02:00
|
|
|
stats->recordTick(rocksdb::Tickers::BLOCK_CACHE_ADD, ret.ok());
|
|
|
|
stats->recordTick(rocksdb::Tickers::BLOCK_CACHE_ADD_FAILURES, !ret.ok());
|
|
|
|
stats->recordTick(rocksdb::Tickers::BLOCK_CACHE_DATA_BYTES_INSERT, ret.ok()? charge : 0UL);
|
2018-09-05 11:56:50 +02:00
|
|
|
return ret;
|
2018-09-03 02:29:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Cache::Handle *
|
|
|
|
ircd::db::database::cache::Lookup(const Slice &key,
|
|
|
|
Statistics *const statistics)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
2018-10-21 08:30:27 +02:00
|
|
|
assert(bool(this->stats));
|
2018-09-27 03:00:18 +02:00
|
|
|
|
|
|
|
database::stats::passthru passthru
|
|
|
|
{
|
2018-10-21 08:30:27 +02:00
|
|
|
this->stats.get(), statistics
|
2018-09-27 03:00:18 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
rocksdb::Statistics *const s
|
|
|
|
{
|
|
|
|
statistics?
|
|
|
|
dynamic_cast<rocksdb::Statistics *>(&passthru):
|
2018-10-21 08:30:27 +02:00
|
|
|
dynamic_cast<rocksdb::Statistics *>(this->stats.get())
|
2018-09-27 03:00:18 +02:00
|
|
|
};
|
|
|
|
|
2018-09-05 11:56:50 +02:00
|
|
|
auto *const &ret
|
|
|
|
{
|
2018-09-27 03:00:18 +02:00
|
|
|
c->Lookup(key, s)
|
2018-09-05 11:56:50 +02:00
|
|
|
};
|
|
|
|
|
2018-09-27 03:00:18 +02:00
|
|
|
// Rocksdb's LRUCache stats are broke. The statistics ptr is null and
|
|
|
|
// passing it to Lookup() does nothing internally. We have to do this
|
|
|
|
// here ourselves :/
|
|
|
|
|
2018-10-21 08:30:27 +02:00
|
|
|
this->stats->recordTick(rocksdb::Tickers::BLOCK_CACHE_HIT, bool(ret));
|
|
|
|
this->stats->recordTick(rocksdb::Tickers::BLOCK_CACHE_MISS, !bool(ret));
|
2018-09-05 11:56:50 +02:00
|
|
|
return ret;
|
2018-09-03 02:29:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::cache::Ref(Handle *const handle)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->Ref(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::cache::Release(Handle *const handle,
|
|
|
|
bool force_erase)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->Release(handle, force_erase);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
ircd::db::database::cache::Value(Handle *const handle)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->Value(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::Erase(const Slice &key)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->Erase(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ircd::db::database::cache::NewId()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->NewId();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::SetCapacity(size_t capacity)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->SetCapacity(capacity);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::SetStrictCapacityLimit(bool strict_capacity_limit)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->SetStrictCapacityLimit(strict_capacity_limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::cache::HasStrictCapacityLimit()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->HasStrictCapacityLimit();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::database::cache::GetCapacity()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->GetCapacity();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::database::cache::GetUsage()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->GetUsage();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::database::cache::GetUsage(Handle *const handle)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->GetUsage(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::database::cache::GetPinnedUsage()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->GetPinnedUsage();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::DisownData()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->DisownData();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::ApplyToAllCacheEntries(callback cb,
|
|
|
|
bool thread_safe)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->ApplyToAllCacheEntries(cb, thread_safe);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::EraseUnRefEntries()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->EraseUnRefEntries();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
ircd::db::database::cache::GetPrintableOptions()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->GetPrintableOptions();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::cache::TEST_mark_as_data_block(const Slice &key,
|
|
|
|
size_t charge)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(bool(c));
|
|
|
|
return c->TEST_mark_as_data_block(key, charge);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-09-19 00:07:09 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::compaction_filter
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::compaction_filter::compaction_filter(column *const &c,
|
|
|
|
db::compactor user)
|
|
|
|
:c{c}
|
|
|
|
,d{c->d}
|
|
|
|
,user{std::move(user)}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::compaction_filter::~compaction_filter()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::CompactionFilter::Decision
|
|
|
|
ircd::db::database::compaction_filter::FilterV2(const int level,
|
|
|
|
const Slice &key,
|
|
|
|
const ValueType type,
|
|
|
|
const Slice &oldval,
|
|
|
|
std::string *const newval,
|
|
|
|
std::string *const skip)
|
2018-09-20 01:55:29 +02:00
|
|
|
const noexcept
|
2018-09-19 00:07:09 +02:00
|
|
|
{
|
2018-09-20 01:55:29 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
|
2018-09-19 00:07:09 +02:00
|
|
|
#ifdef RB_DEBUG_DB_ENV
|
|
|
|
const auto typestr
|
|
|
|
{
|
|
|
|
type == kValue?
|
|
|
|
"VALUE"_sv:
|
|
|
|
type == kMergeOperand?
|
|
|
|
"MERGE"_sv:
|
|
|
|
"BLOB"_sv
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2018-12-13 22:44:37 +01:00
|
|
|
static const compactor::callback empty;
|
2018-09-20 01:35:28 +02:00
|
|
|
const db::compactor::callback &callback
|
2018-09-19 00:07:09 +02:00
|
|
|
{
|
2018-09-20 01:35:28 +02:00
|
|
|
type == ValueType::kValue && user.value?
|
|
|
|
user.value:
|
2018-09-19 00:07:09 +02:00
|
|
|
|
2018-09-20 01:35:28 +02:00
|
|
|
type == ValueType::kMergeOperand && user.merge?
|
|
|
|
user.merge:
|
2018-09-19 00:07:09 +02:00
|
|
|
|
2018-12-13 22:44:37 +01:00
|
|
|
empty
|
2018-09-20 01:35:28 +02:00
|
|
|
};
|
2018-09-19 00:07:09 +02:00
|
|
|
|
2018-09-20 01:35:28 +02:00
|
|
|
if(!callback)
|
|
|
|
return Decision::kKeep;
|
|
|
|
|
2018-12-29 03:18:13 +01:00
|
|
|
#ifdef RB_DEBUG_DB_ENV
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s':'%s': compaction level:%d key:%zu@%p type:%s old:%zu@%p new:%p skip:%p",
|
|
|
|
d->name,
|
|
|
|
c->name,
|
|
|
|
level,
|
|
|
|
size(key),
|
|
|
|
data(key),
|
|
|
|
typestr,
|
|
|
|
size(oldval),
|
|
|
|
data(oldval),
|
|
|
|
(const void *)newval,
|
|
|
|
(const void *)skip
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2018-09-20 01:35:28 +02:00
|
|
|
const compactor::args args
|
|
|
|
{
|
|
|
|
level, slice(key), slice(oldval), newval, skip
|
|
|
|
};
|
2018-09-19 00:07:09 +02:00
|
|
|
|
2018-09-20 01:35:28 +02:00
|
|
|
switch(callback(args))
|
2018-09-19 00:07:09 +02:00
|
|
|
{
|
|
|
|
default:
|
|
|
|
case db::op::GET: return Decision::kKeep;
|
|
|
|
case db::op::SET: return Decision::kChangeValue;
|
|
|
|
case db::op::DELETE: return Decision::kRemove;
|
|
|
|
case db::op::DELETE_RANGE: return Decision::kRemoveAndSkipUntil;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::database::compaction_filter::IgnoreSnapshots()
|
2018-09-20 01:55:29 +02:00
|
|
|
const noexcept
|
2018-09-19 00:07:09 +02:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ircd::db::database::compaction_filter::Name()
|
2018-09-20 01:55:29 +02:00
|
|
|
const noexcept
|
2018-09-19 00:07:09 +02:00
|
|
|
{
|
|
|
|
assert(c);
|
|
|
|
return db::name(*c).c_str();
|
|
|
|
}
|
|
|
|
|
2019-04-20 23:22:08 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::wal_filter
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::db::database::wal_filter::debug)
|
|
|
|
ircd::db::database::wal_filter::debug
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.wal.debug" },
|
|
|
|
{ "default", false },
|
|
|
|
{ "persist", false },
|
|
|
|
};
|
|
|
|
|
|
|
|
ircd::db::database::wal_filter::wal_filter(database *const &d)
|
|
|
|
:d{d}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::wal_filter::~wal_filter()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::database::wal_filter::ColumnFamilyLogNumberMap(const log_number_map &log_number,
|
|
|
|
const name_id_map &name_id)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(d);
|
|
|
|
|
|
|
|
this->log_number = log_number;
|
|
|
|
this->name_id = name_id;
|
|
|
|
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': WAL recovery mapping update: log_number:%zu name_id:%zu",
|
|
|
|
db::name(*d),
|
|
|
|
log_number.size(),
|
|
|
|
name_id.size(),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::WalFilter::WalProcessingOption
|
|
|
|
ircd::db::database::wal_filter::LogRecordFound(unsigned long long log_nr,
|
|
|
|
const std::string &name,
|
|
|
|
const WriteBatch &wb,
|
|
|
|
WriteBatch *const replace,
|
|
|
|
bool *const replaced)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
assert(d && replace && replaced);
|
|
|
|
|
|
|
|
if(debug) log::debug
|
|
|
|
{
|
|
|
|
log, "'%s': WAL recovery record log:%lu '%s' wb[count:%zu size:%zu]",
|
|
|
|
db::name(*d),
|
|
|
|
log_nr,
|
|
|
|
name,
|
|
|
|
wb.Count(),
|
|
|
|
wb.GetDataSize(),
|
|
|
|
};
|
|
|
|
|
|
|
|
*replaced = false;
|
|
|
|
return WalProcessingOption::kContinueProcessing;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::WalFilter::WalProcessingOption
|
|
|
|
ircd::db::database::wal_filter::LogRecord(const WriteBatch &wb,
|
|
|
|
WriteBatch *const replace,
|
|
|
|
bool *const replaced)
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
return WalProcessingOption::kContinueProcessing;
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
ircd::db::database::wal_filter::Name()
|
|
|
|
const noexcept
|
|
|
|
{
|
|
|
|
assert(d);
|
|
|
|
return db::name(*d).c_str();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-09-21 00:57:15 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2018-09-22 22:13:05 +02:00
|
|
|
// database::sst
|
2018-09-21 00:57:15 +02:00
|
|
|
//
|
|
|
|
|
2018-09-22 02:12:50 +02:00
|
|
|
void
|
2018-09-22 23:24:15 +02:00
|
|
|
ircd::db::database::sst::tool(const vector_view<const string_view> &args)
|
2018-09-22 02:12:50 +02:00
|
|
|
{
|
2018-09-23 00:26:41 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
|
2019-06-04 23:59:05 +02:00
|
|
|
static const size_t arg_max {16};
|
|
|
|
static const size_t arg_max_len {256};
|
|
|
|
thread_local char arg[arg_max][arg_max_len]
|
2018-09-22 02:12:50 +02:00
|
|
|
{
|
|
|
|
"./sst_dump"
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t i(0);
|
2019-06-04 23:59:05 +02:00
|
|
|
char *argv[arg_max] { arg[i++] };
|
|
|
|
for(; i < arg_max - 1 && i - 1 < args.size(); ++i)
|
2018-09-22 02:12:50 +02:00
|
|
|
{
|
|
|
|
strlcpy(arg[i], args.at(i - 1));
|
|
|
|
argv[i] = arg[i];
|
|
|
|
}
|
2018-09-22 22:13:05 +02:00
|
|
|
argv[i++] = nullptr;
|
2019-06-04 23:59:05 +02:00
|
|
|
assert(i <= arg_max);
|
2018-09-22 02:12:50 +02:00
|
|
|
|
|
|
|
rocksdb::SSTDumpTool tool;
|
|
|
|
const int ret
|
|
|
|
{
|
|
|
|
tool.Run(i, argv)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(ret != 0)
|
|
|
|
throw error
|
|
|
|
{
|
|
|
|
"Error from SST dump tool: return value: %d", ret
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-09-22 23:24:15 +02:00
|
|
|
//
|
|
|
|
// sst::dump::dump
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::sst::dump::dump(db::column column,
|
|
|
|
const key_range &range,
|
|
|
|
const string_view &path_)
|
|
|
|
{
|
|
|
|
database::column &c(column);
|
2018-09-23 00:26:41 +02:00
|
|
|
const database &d(column);
|
2018-12-30 00:27:58 +01:00
|
|
|
std::string path
|
|
|
|
{
|
|
|
|
path_
|
|
|
|
};
|
2018-09-23 00:26:41 +02:00
|
|
|
|
2018-09-22 23:24:15 +02:00
|
|
|
if(path.empty())
|
|
|
|
{
|
|
|
|
const string_view path_parts[]
|
|
|
|
{
|
2019-01-25 19:35:39 +01:00
|
|
|
fs::path(fs::DB), db::name(d), db::name(c)
|
2018-09-22 23:24:15 +02:00
|
|
|
};
|
|
|
|
|
2019-02-08 05:56:48 +01:00
|
|
|
path = fs::path_string(path_parts);
|
2018-09-22 23:24:15 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Options opts(d.d->GetOptions(c));
|
|
|
|
rocksdb::EnvOptions eopts(opts);
|
|
|
|
rocksdb::SstFileWriter writer
|
|
|
|
{
|
|
|
|
eopts, opts, c
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
writer.Open(path)
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t i(0);
|
|
|
|
for(auto it(column.begin()); it != column.end(); ++it, ++i)
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
writer.Put(slice(it->first), slice(it->second))
|
|
|
|
};
|
|
|
|
|
|
|
|
rocksdb::ExternalSstFileInfo info;
|
|
|
|
if(i)
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
writer.Finish(&info)
|
|
|
|
};
|
|
|
|
|
|
|
|
this->info.column = db::name(column);
|
|
|
|
this->info.path = std::move(info.file_path);
|
|
|
|
this->info.min_key = std::move(info.smallest_key);
|
|
|
|
this->info.max_key = std::move(info.largest_key);
|
|
|
|
this->info.min_seq = info.sequence_number;
|
|
|
|
this->info.max_seq = info.sequence_number;
|
|
|
|
this->info.size = info.file_size;
|
|
|
|
this->info.entries = info.num_entries;
|
|
|
|
this->info.version = info.version;
|
|
|
|
}
|
|
|
|
|
2018-09-21 00:57:15 +02:00
|
|
|
//
|
2018-09-22 22:13:05 +02:00
|
|
|
// sst::info::vector
|
2018-09-21 00:57:15 +02:00
|
|
|
//
|
|
|
|
|
2018-09-22 22:13:05 +02:00
|
|
|
ircd::db::database::sst::info::vector::vector(const database &d)
|
2018-09-21 00:57:15 +02:00
|
|
|
{
|
2018-10-21 18:15:44 +02:00
|
|
|
this->reserve(db::file_count(d));
|
|
|
|
for(const auto &c : d.columns)
|
|
|
|
{
|
|
|
|
db::column column{*c};
|
|
|
|
for(auto &&info : vector(column))
|
|
|
|
this->emplace_back(std::move(info));
|
|
|
|
}
|
2018-09-21 00:57:15 +02:00
|
|
|
}
|
|
|
|
|
2018-09-25 07:26:44 +02:00
|
|
|
ircd::db::database::sst::info::vector::vector(const db::column &column)
|
|
|
|
{
|
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
database &d(*c.d);
|
|
|
|
|
|
|
|
rocksdb::ColumnFamilyMetaData cfmd;
|
|
|
|
d.d->GetColumnFamilyMetaData(c, &cfmd);
|
2018-10-21 17:08:12 +02:00
|
|
|
|
2018-10-21 18:15:44 +02:00
|
|
|
rocksdb::TablePropertiesCollection tpc;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetPropertiesOfAllTables(c, &tpc)
|
|
|
|
};
|
|
|
|
|
2018-10-21 17:08:12 +02:00
|
|
|
size_t i(0);
|
2018-10-21 18:15:44 +02:00
|
|
|
this->resize(std::max(cfmd.file_count, tpc.size()));
|
2018-09-25 07:26:44 +02:00
|
|
|
for(rocksdb::LevelMetaData &level : cfmd.levels)
|
|
|
|
for(rocksdb::SstFileMetaData md : level.files)
|
|
|
|
{
|
2018-10-21 17:08:12 +02:00
|
|
|
auto &info(this->at(i++));
|
|
|
|
info.operator=(std::move(md));
|
|
|
|
info.level = level.level;
|
2018-10-21 18:15:44 +02:00
|
|
|
|
|
|
|
const auto path(info.path + info.name);
|
|
|
|
auto tp(*tpc.at(path));
|
|
|
|
info.operator=(std::move(tp));
|
|
|
|
tpc.erase(path);
|
2018-09-25 07:26:44 +02:00
|
|
|
}
|
2018-10-21 18:15:44 +02:00
|
|
|
|
|
|
|
for(auto &&kv : tpc)
|
|
|
|
{
|
|
|
|
auto &info(this->at(i++));
|
|
|
|
auto tp(*kv.second);
|
|
|
|
info.operator=(std::move(tp));
|
|
|
|
info.path = kv.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(i == this->size());
|
2018-09-25 07:26:44 +02:00
|
|
|
}
|
|
|
|
|
2018-09-21 00:57:15 +02:00
|
|
|
//
|
2018-09-22 23:24:15 +02:00
|
|
|
// sst::info::info
|
2018-09-21 00:57:15 +02:00
|
|
|
//
|
|
|
|
|
2018-10-21 18:15:44 +02:00
|
|
|
ircd::db::database::sst::info::info(const database &d_,
|
2018-09-22 22:13:05 +02:00
|
|
|
const string_view &filename)
|
2018-09-21 00:57:15 +02:00
|
|
|
{
|
2018-10-21 18:15:44 +02:00
|
|
|
auto &d(const_cast<database &>(d_));
|
2018-09-23 00:26:41 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2018-10-21 18:15:44 +02:00
|
|
|
|
2018-09-21 00:57:15 +02:00
|
|
|
std::vector<rocksdb::LiveFileMetaData> v;
|
|
|
|
d.d->GetLiveFilesMetaData(&v);
|
|
|
|
|
|
|
|
for(auto &md : v)
|
|
|
|
if(md.name == filename)
|
|
|
|
{
|
2018-10-21 18:15:44 +02:00
|
|
|
rocksdb::TablePropertiesCollection tpc;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetPropertiesOfAllTables(d[md.column_family_name], &tpc)
|
|
|
|
};
|
|
|
|
|
|
|
|
auto tp(*tpc.at(md.db_path + md.name));
|
2018-10-21 17:08:12 +02:00
|
|
|
this->operator=(std::move(md));
|
2018-10-21 18:15:44 +02:00
|
|
|
this->operator=(std::move(tp));
|
2018-09-21 00:57:15 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"No file named '%s' is live in database '%s'",
|
|
|
|
filename,
|
|
|
|
d.name
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-10-21 17:08:12 +02:00
|
|
|
ircd::db::database::sst::info &
|
|
|
|
ircd::db::database::sst::info::operator=(rocksdb::LiveFileMetaData &&md)
|
|
|
|
{
|
|
|
|
name = std::move(md.name);
|
|
|
|
path = std::move(md.db_path);
|
|
|
|
column = std::move(md.column_family_name);
|
|
|
|
size = std::move(md.size);
|
|
|
|
min_seq = std::move(md.smallest_seqno);
|
|
|
|
max_seq = std::move(md.largest_seqno);
|
|
|
|
min_key = std::move(md.smallestkey);
|
|
|
|
max_key = std::move(md.largestkey);
|
|
|
|
num_reads = std::move(md.num_reads_sampled);
|
|
|
|
level = std::move(md.level);
|
|
|
|
compacting = std::move(md.being_compacted);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::sst::info &
|
|
|
|
ircd::db::database::sst::info::operator=(rocksdb::SstFileMetaData &&md)
|
|
|
|
{
|
|
|
|
name = std::move(md.name);
|
|
|
|
path = std::move(md.db_path);
|
|
|
|
size = std::move(md.size);
|
|
|
|
min_seq = std::move(md.smallest_seqno);
|
|
|
|
max_seq = std::move(md.largest_seqno);
|
|
|
|
min_key = std::move(md.smallestkey);
|
|
|
|
max_key = std::move(md.largestkey);
|
|
|
|
num_reads = std::move(md.num_reads_sampled);
|
|
|
|
compacting = std::move(md.being_compacted);
|
|
|
|
return *this;
|
2018-09-25 07:26:44 +02:00
|
|
|
}
|
|
|
|
|
2018-10-21 18:15:44 +02:00
|
|
|
ircd::db::database::sst::info &
|
|
|
|
ircd::db::database::sst::info::operator=(rocksdb::TableProperties &&tp)
|
|
|
|
{
|
|
|
|
column = std::move(tp.column_family_name);
|
|
|
|
filter = std::move(tp.filter_policy_name);
|
|
|
|
comparator = std::move(tp.comparator_name);
|
|
|
|
merge_operator = std::move(tp.merge_operator_name);
|
|
|
|
prefix_extractor = std::move(tp.prefix_extractor_name);
|
|
|
|
compression = std::move(tp.compression_name);
|
|
|
|
format = std::move(tp.format_version);
|
|
|
|
cfid = std::move(tp.column_family_id);
|
|
|
|
data_size = std::move(tp.data_size);
|
|
|
|
index_size = std::move(tp.index_size);
|
|
|
|
top_index_size = std::move(tp.top_level_index_size);
|
|
|
|
filter_size = std::move(tp.filter_size);
|
|
|
|
keys_size = std::move(tp.raw_key_size);
|
|
|
|
values_size = std::move(tp.raw_value_size);
|
|
|
|
index_parts = std::move(tp.index_partitions);
|
|
|
|
data_blocks = std::move(tp.num_data_blocks);
|
|
|
|
entries = std::move(tp.num_entries);
|
|
|
|
range_deletes = std::move(tp.num_range_deletions);
|
|
|
|
fixed_key_len = std::move(tp.fixed_key_len);
|
|
|
|
created = std::move(tp.creation_time);
|
|
|
|
oldest_key = std::move(tp.oldest_key_time);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2018-12-10 23:04:30 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// database::wal
|
|
|
|
//
|
|
|
|
|
|
|
|
//
|
|
|
|
// wal::info::vector
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::wal::info::vector::vector(const database &d_)
|
|
|
|
{
|
|
|
|
auto &d{const_cast<database &>(d_)};
|
|
|
|
std::vector<std::unique_ptr<rocksdb::LogFile>> vec;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetSortedWalFiles(vec)
|
|
|
|
};
|
|
|
|
|
|
|
|
this->resize(vec.size());
|
|
|
|
for(size_t i(0); i < vec.size(); ++i)
|
|
|
|
this->at(i).operator=(*vec.at(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// wal::info::info
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::database::wal::info::info(const database &d_,
|
|
|
|
const string_view &filename)
|
|
|
|
{
|
|
|
|
auto &d{const_cast<database &>(d_)};
|
|
|
|
std::vector<std::unique_ptr<rocksdb::LogFile>> vec;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetSortedWalFiles(vec)
|
|
|
|
};
|
|
|
|
|
|
|
|
for(const auto &ptr : vec)
|
|
|
|
if(ptr->PathName() == filename)
|
|
|
|
{
|
|
|
|
this->operator=(*ptr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"No file named '%s' is live in database '%s'",
|
|
|
|
filename,
|
|
|
|
d.name
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::database::wal::info &
|
|
|
|
ircd::db::database::wal::info::operator=(const rocksdb::LogFile &lf)
|
|
|
|
{
|
|
|
|
name = lf.PathName();
|
|
|
|
number = lf.LogNumber();
|
|
|
|
seq = lf.StartSequence();
|
|
|
|
size = lf.SizeFileBytes();
|
|
|
|
alive = lf.Type() == rocksdb::WalFileType::kAliveLogFile;
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2018-05-30 09:14:07 +02:00
|
|
|
|
2017-09-19 04:19:02 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2018-01-30 18:58:36 +01:00
|
|
|
// db/txn.h
|
2017-09-19 04:19:02 +02:00
|
|
|
//
|
|
|
|
|
2018-03-28 00:21:30 +02:00
|
|
|
void
|
|
|
|
ircd::db::get(database &d,
|
|
|
|
const uint64_t &seq,
|
|
|
|
const seq_closure &closure)
|
|
|
|
{
|
|
|
|
for_each(d, seq, seq_closure_bool{[&closure]
|
|
|
|
(txn &txn, const uint64_t &seq)
|
|
|
|
{
|
|
|
|
closure(txn, seq);
|
|
|
|
return false;
|
|
|
|
}});
|
|
|
|
}
|
|
|
|
|
2018-03-23 04:49:11 +01:00
|
|
|
void
|
|
|
|
ircd::db::for_each(database &d,
|
|
|
|
const uint64_t &seq,
|
|
|
|
const seq_closure &closure)
|
|
|
|
{
|
|
|
|
for_each(d, seq, seq_closure_bool{[&closure]
|
|
|
|
(txn &txn, const uint64_t &seq)
|
|
|
|
{
|
|
|
|
closure(txn, seq);
|
|
|
|
return true;
|
|
|
|
}});
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::for_each(database &d,
|
|
|
|
const uint64_t &seq,
|
|
|
|
const seq_closure_bool &closure)
|
|
|
|
{
|
|
|
|
std::unique_ptr<rocksdb::TransactionLogIterator> tit;
|
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
const ctx::uninterruptible ui;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->GetUpdatesSince(seq, &tit)
|
|
|
|
};
|
|
|
|
}
|
2018-03-23 04:49:11 +01:00
|
|
|
|
|
|
|
assert(bool(tit));
|
|
|
|
for(; tit->Valid(); tit->Next())
|
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
const ctx::uninterruptible ui;
|
|
|
|
|
2018-03-23 04:49:11 +01:00
|
|
|
auto batchres
|
|
|
|
{
|
|
|
|
tit->GetBatch()
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
tit->status()
|
|
|
|
};
|
|
|
|
|
|
|
|
db::txn txn
|
|
|
|
{
|
|
|
|
d, std::move(batchres.writeBatchPtr)
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(bool(txn.wb));
|
|
|
|
if(!closure(txn, batchres.sequence))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-19 04:19:02 +02:00
|
|
|
std::string
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::debug(const txn &t)
|
2017-09-19 04:19:02 +02:00
|
|
|
{
|
|
|
|
const rocksdb::WriteBatch &wb(t);
|
|
|
|
return db::debug(wb);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::for_each(const txn &t,
|
2019-05-10 01:28:17 +02:00
|
|
|
const delta_closure &closure)
|
2017-09-19 05:40:13 +02:00
|
|
|
{
|
|
|
|
const auto re{[&closure]
|
|
|
|
(const delta &delta)
|
|
|
|
{
|
|
|
|
closure(delta);
|
|
|
|
return true;
|
|
|
|
}};
|
|
|
|
|
|
|
|
const database &d(t);
|
|
|
|
const rocksdb::WriteBatch &wb{t};
|
2018-01-30 18:58:36 +01:00
|
|
|
txn::handler h{d, re};
|
2017-09-19 05:40:13 +02:00
|
|
|
wb.Iterate(&h);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-04-17 00:16:51 +02:00
|
|
|
ircd::db::for_each(const txn &t,
|
2019-05-10 01:28:17 +02:00
|
|
|
const delta_closure_bool &closure)
|
2017-09-19 04:19:02 +02:00
|
|
|
{
|
|
|
|
const database &d(t);
|
|
|
|
const rocksdb::WriteBatch &wb{t};
|
2018-01-30 18:58:36 +01:00
|
|
|
txn::handler h{d, closure};
|
2017-09-19 05:40:13 +02:00
|
|
|
wb.Iterate(&h);
|
|
|
|
return h._continue;
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
///
|
2018-03-23 04:47:46 +01:00
|
|
|
/// handler (db/database/txn.h)
|
2017-09-19 04:19:02 +02:00
|
|
|
///
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::PutCF(const uint32_t cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const Slice &key,
|
|
|
|
const Slice &val)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return callback(cfid, op::SET, key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::DeleteCF(const uint32_t cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const Slice &key)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return callback(cfid, op::DELETE, key, {});
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::DeleteRangeCF(const uint32_t cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const Slice &begin,
|
|
|
|
const Slice &end)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return callback(cfid, op::DELETE_RANGE, begin, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::SingleDeleteCF(const uint32_t cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const Slice &key)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return callback(cfid, op::SINGLE_DELETE, key, {});
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::MergeCF(const uint32_t cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const Slice &key,
|
|
|
|
const Slice &value)
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return callback(cfid, op::MERGE, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-10-16 07:11:14 +02:00
|
|
|
ircd::db::txn::handler::MarkBeginPrepare(bool b)
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
ircd::not_implemented{};
|
2017-09-19 04:19:02 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::MarkEndPrepare(const Slice &xid)
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
ircd::not_implemented{};
|
2017-09-19 04:19:02 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::MarkCommit(const Slice &xid)
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
ircd::not_implemented{};
|
2017-09-19 04:19:02 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::MarkRollback(const Slice &xid)
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
2019-01-14 00:50:04 +01:00
|
|
|
ircd::not_implemented{};
|
2017-09-19 04:19:02 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::callback(const uint32_t &cfid,
|
2017-09-19 04:19:02 +02:00
|
|
|
const op &op,
|
|
|
|
const Slice &a,
|
|
|
|
const Slice &b)
|
|
|
|
noexcept try
|
|
|
|
{
|
|
|
|
auto &c{d[cfid]};
|
|
|
|
const delta delta
|
|
|
|
{
|
|
|
|
op,
|
|
|
|
db::name(c),
|
|
|
|
slice(a),
|
|
|
|
slice(b)
|
|
|
|
};
|
|
|
|
|
|
|
|
return callback(delta);
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
_continue = false;
|
2018-05-29 10:09:15 +02:00
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
"txn::handler: cfid[%u]: %s", cfid, e.what()
|
|
|
|
};
|
|
|
|
|
2017-11-26 01:20:42 +01:00
|
|
|
ircd::terminate();
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Status
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::callback(const delta &delta)
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept try
|
|
|
|
{
|
2017-09-19 05:40:13 +02:00
|
|
|
_continue = cb(delta);
|
2017-09-19 04:19:02 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
_continue = false;
|
2017-09-19 05:40:13 +02:00
|
|
|
return Status::OK();
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::handler::Continue()
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
return _continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2018-01-30 18:58:36 +01:00
|
|
|
// txn
|
2017-09-19 04:19:02 +02:00
|
|
|
//
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::txn(database &d)
|
2018-03-23 04:48:23 +01:00
|
|
|
:txn{d, opts{}}
|
2017-09-19 04:19:02 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::txn(database &d,
|
2017-09-19 04:19:02 +02:00
|
|
|
const opts &opts)
|
|
|
|
:d{&d}
|
|
|
|
,wb
|
|
|
|
{
|
|
|
|
std::make_unique<rocksdb::WriteBatch>(opts.reserve_bytes, opts.max_bytes)
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-03-23 04:48:23 +01:00
|
|
|
ircd::db::txn::txn(database &d,
|
|
|
|
std::unique_ptr<rocksdb::WriteBatch> &&wb)
|
|
|
|
:d{&d}
|
|
|
|
,wb{std::move(wb)}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::~txn()
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator()(const sopts &opts)
|
2017-09-19 04:19:02 +02:00
|
|
|
{
|
|
|
|
assert(bool(d));
|
|
|
|
operator()(*d, opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator()(database &d,
|
2017-09-19 04:19:02 +02:00
|
|
|
const sopts &opts)
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
2019-03-19 21:13:16 +01:00
|
|
|
assert(this->state == state::BUILD);
|
|
|
|
this->state = state::COMMIT;
|
2017-09-19 04:19:02 +02:00
|
|
|
commit(d, *wb, opts);
|
2019-03-19 21:13:16 +01:00
|
|
|
this->state = state::COMMITTED;
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::clear()
|
2017-09-19 04:19:02 +02:00
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
wb->Clear();
|
2019-03-19 21:13:16 +01:00
|
|
|
this->state = state::BUILD;
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::size()
|
2017-09-19 04:19:02 +02:00
|
|
|
const
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
return wb->Count();
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::bytes()
|
2017-09-19 04:19:02 +02:00
|
|
|
const
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
return wb->GetDataSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::has(const op &op)
|
2017-09-19 04:19:02 +02:00
|
|
|
const
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
switch(op)
|
|
|
|
{
|
|
|
|
case op::GET: assert(0); return false;
|
|
|
|
case op::SET: return wb->HasPut();
|
|
|
|
case op::MERGE: return wb->HasMerge();
|
|
|
|
case op::DELETE: return wb->HasDelete();
|
|
|
|
case op::DELETE_RANGE: return wb->HasDeleteRange();
|
|
|
|
case op::SINGLE_DELETE: return wb->HasSingleDelete();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-19 05:40:13 +02:00
|
|
|
bool
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::has(const op &op,
|
2017-09-19 05:40:13 +02:00
|
|
|
const string_view &col)
|
|
|
|
const
|
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
return !for_each(*this, delta_closure_bool{[&op, &col]
|
2017-09-19 05:40:13 +02:00
|
|
|
(const auto &delta)
|
|
|
|
{
|
2017-09-28 03:25:39 +02:00
|
|
|
return std::get<delta.OP>(delta) == op &&
|
|
|
|
std::get<delta.COL>(delta) == col;
|
2018-04-17 00:16:51 +02:00
|
|
|
}});
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
|
|
|
|
2018-04-17 00:16:51 +02:00
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::at(const op &op,
|
2018-04-17 00:16:51 +02:00
|
|
|
const string_view &col,
|
|
|
|
const delta_closure &closure)
|
2017-09-19 05:40:13 +02:00
|
|
|
const
|
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
if(!get(op, col, closure))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"db::txn::at(%s, %s): no matching delta in transaction",
|
|
|
|
reflect(op),
|
|
|
|
col
|
|
|
|
};
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
|
|
|
|
2018-04-17 00:16:51 +02:00
|
|
|
bool
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::get(const op &op,
|
2018-04-17 00:16:51 +02:00
|
|
|
const string_view &col,
|
|
|
|
const delta_closure &closure)
|
2017-09-19 05:40:13 +02:00
|
|
|
const
|
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
return !for_each(*this, delta_closure_bool{[&op, &col, &closure]
|
2017-09-19 05:40:13 +02:00
|
|
|
(const delta &delta)
|
|
|
|
{
|
2017-09-28 03:25:39 +02:00
|
|
|
if(std::get<delta.OP>(delta) == op &&
|
|
|
|
std::get<delta.COL>(delta) == col)
|
2017-09-19 05:40:13 +02:00
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
closure(delta);
|
|
|
|
return false;
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
2018-04-17 00:16:51 +02:00
|
|
|
else return true;
|
|
|
|
}});
|
|
|
|
}
|
2017-09-19 05:40:13 +02:00
|
|
|
|
2018-04-17 00:16:51 +02:00
|
|
|
bool
|
|
|
|
ircd::db::txn::has(const op &op,
|
|
|
|
const string_view &col,
|
|
|
|
const string_view &key)
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return !for_each(*this, delta_closure_bool{[&op, &col, &key]
|
|
|
|
(const auto &delta)
|
|
|
|
{
|
|
|
|
return std::get<delta.OP>(delta) == op &&
|
|
|
|
std::get<delta.COL>(delta) == col &&
|
|
|
|
std::get<delta.KEY>(delta) == key;
|
|
|
|
}});
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
|
|
|
|
2018-04-17 00:16:51 +02:00
|
|
|
void
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::at(const op &op,
|
2017-09-19 05:40:13 +02:00
|
|
|
const string_view &col,
|
2018-04-17 00:16:51 +02:00
|
|
|
const string_view &key,
|
|
|
|
const value_closure &closure)
|
2017-09-19 05:40:13 +02:00
|
|
|
const
|
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
if(!get(op, col, key, closure))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"db::txn::at(%s, %s, %s): no matching delta in transaction",
|
|
|
|
reflect(op),
|
|
|
|
col,
|
|
|
|
key
|
|
|
|
};
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
|
|
|
|
2018-04-17 00:16:51 +02:00
|
|
|
bool
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::get(const op &op,
|
2017-09-19 05:40:13 +02:00
|
|
|
const string_view &col,
|
2018-04-17 00:16:51 +02:00
|
|
|
const string_view &key,
|
|
|
|
const value_closure &closure)
|
2017-09-19 05:40:13 +02:00
|
|
|
const
|
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
return !for_each(*this, delta_closure_bool{[&op, &col, &key, &closure]
|
2017-09-19 05:40:13 +02:00
|
|
|
(const delta &delta)
|
|
|
|
{
|
2017-09-28 03:25:39 +02:00
|
|
|
if(std::get<delta.OP>(delta) == op &&
|
|
|
|
std::get<delta.COL>(delta) == col &&
|
|
|
|
std::get<delta.KEY>(delta) == key)
|
2017-09-19 05:40:13 +02:00
|
|
|
{
|
2018-04-17 00:16:51 +02:00
|
|
|
closure(std::get<delta.VAL>(delta));
|
|
|
|
return false;
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
2018-04-17 00:16:51 +02:00
|
|
|
else return true;
|
|
|
|
}});
|
2017-09-19 05:40:13 +02:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator
|
2017-09-19 04:19:02 +02:00
|
|
|
ircd::db::database &()
|
|
|
|
{
|
|
|
|
assert(bool(d));
|
|
|
|
return *d;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator
|
2017-09-19 04:19:02 +02:00
|
|
|
rocksdb::WriteBatch &()
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
return *wb;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator
|
2017-09-19 04:19:02 +02:00
|
|
|
const ircd::db::database &()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
assert(bool(d));
|
|
|
|
return *d;
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::operator
|
2017-09-19 04:19:02 +02:00
|
|
|
const rocksdb::WriteBatch &()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
assert(bool(wb));
|
|
|
|
return *wb;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2019-01-24 00:05:12 +01:00
|
|
|
// txn::checkpoint
|
2017-09-19 04:19:02 +02:00
|
|
|
//
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::checkpoint::checkpoint(txn &t)
|
2017-09-19 04:19:02 +02:00
|
|
|
:t{t}
|
|
|
|
{
|
|
|
|
assert(bool(t.wb));
|
|
|
|
t.wb->SetSavePoint();
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::checkpoint::~checkpoint()
|
2017-09-19 04:19:02 +02:00
|
|
|
noexcept
|
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
const ctx::uninterruptible ui;
|
2018-09-13 11:50:01 +02:00
|
|
|
if(likely(!std::uncaught_exceptions()))
|
2017-09-19 04:19:02 +02:00
|
|
|
throw_on_error { t.wb->PopSavePoint() };
|
|
|
|
else
|
|
|
|
throw_on_error { t.wb->RollbackToSavePoint() };
|
|
|
|
}
|
|
|
|
|
2019-01-24 00:05:12 +01:00
|
|
|
//
|
|
|
|
// txn::append
|
|
|
|
//
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
const string_view &key,
|
|
|
|
const json::iov &iov)
|
|
|
|
{
|
|
|
|
std::for_each(std::begin(iov), std::end(iov), [&t, &key]
|
|
|
|
(const auto &member)
|
|
|
|
{
|
|
|
|
append
|
|
|
|
{
|
|
|
|
t, delta
|
|
|
|
{
|
|
|
|
member.first, // col
|
|
|
|
key, // key
|
|
|
|
member.second // val
|
|
|
|
}
|
|
|
|
};
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
const delta &delta)
|
|
|
|
{
|
|
|
|
assert(bool(t.d));
|
|
|
|
append(t, *t.d, delta);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
const row::delta &delta)
|
|
|
|
{
|
2019-02-16 22:51:55 +01:00
|
|
|
throw ircd::not_implemented
|
|
|
|
{
|
|
|
|
"db::txn::append (row::delta)"
|
|
|
|
};
|
2017-09-19 04:19:02 +02:00
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
const cell::delta &delta)
|
|
|
|
{
|
|
|
|
db::append(*t.wb, delta);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
column &c,
|
|
|
|
const column::delta &delta)
|
|
|
|
{
|
|
|
|
db::append(*t.wb, c, delta);
|
|
|
|
}
|
|
|
|
|
2018-01-30 18:58:36 +01:00
|
|
|
ircd::db::txn::append::append(txn &t,
|
2017-09-19 04:19:02 +02:00
|
|
|
database &d,
|
|
|
|
const delta &delta)
|
|
|
|
{
|
2019-01-16 22:46:15 +01:00
|
|
|
db::column c
|
|
|
|
{
|
|
|
|
d[std::get<1>(delta)]
|
|
|
|
};
|
|
|
|
|
2017-09-19 04:19:02 +02:00
|
|
|
db::append(*t.wb, c, db::column::delta
|
|
|
|
{
|
|
|
|
std::get<op>(delta),
|
|
|
|
std::get<2>(delta),
|
|
|
|
std::get<3>(delta)
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// db/row.h
|
2017-03-24 02:36:49 +01:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
namespace ircd::db
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
static std::vector<rocksdb::Iterator *>
|
|
|
|
_make_iterators(database &d,
|
|
|
|
database::column *const *const &columns,
|
|
|
|
const size_t &columns_size,
|
|
|
|
const rocksdb::ReadOptions &opts);
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::del(row &row,
|
|
|
|
const sopts &sopts)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
write(row::delta{op::DELETE, row}, sopts);
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const row::delta &delta,
|
2017-08-23 22:37:47 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
|
|
|
write(&delta, &delta + 1, sopts);
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
void
|
|
|
|
ircd::db::write(const sopts &sopts,
|
2019-01-29 20:13:58 +01:00
|
|
|
const std::initializer_list<row::delta> &deltas)
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
write(deltas, sopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const std::initializer_list<row::delta> &deltas,
|
2017-04-03 06:02:32 +02:00
|
|
|
const sopts &sopts)
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2017-08-23 22:37:47 +02:00
|
|
|
write(std::begin(deltas), std::end(deltas), sopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const row::delta *const &begin,
|
|
|
|
const row::delta *const &end,
|
2017-08-23 22:37:47 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
// Count the total number of cells for this transaction.
|
|
|
|
const auto cells
|
|
|
|
{
|
|
|
|
std::accumulate(begin, end, size_t(0), []
|
|
|
|
(auto ret, const row::delta &delta)
|
|
|
|
{
|
|
|
|
const auto &row(std::get<row *>(delta));
|
|
|
|
return ret += row->size();
|
|
|
|
})
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//TODO: allocator?
|
|
|
|
std::vector<cell::delta> deltas;
|
|
|
|
deltas.reserve(cells);
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Compose all of the cells from all of the rows into a single txn
|
|
|
|
std::for_each(begin, end, [&deltas]
|
|
|
|
(const auto &delta)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto &op(std::get<op>(delta));
|
|
|
|
const auto &row(std::get<row *>(delta));
|
|
|
|
std::for_each(std::begin(*row), std::end(*row), [&deltas, &op]
|
|
|
|
(auto &cell)
|
|
|
|
{
|
|
|
|
// For operations like DELETE which don't require a value in
|
|
|
|
// the delta, we can skip a potentially expensive load of the cell.
|
|
|
|
const auto value
|
|
|
|
{
|
|
|
|
value_required(op)? cell.val() : string_view{}
|
|
|
|
};
|
|
|
|
|
|
|
|
deltas.emplace_back(op, cell, value);
|
|
|
|
});
|
2017-08-23 22:37:47 +02:00
|
|
|
});
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Commitment
|
|
|
|
write(&deltas.front(), &deltas.front() + deltas.size(), sopts);
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Developer can specifically use RB_DEBUG_DB_SEEK_ROW without RB_DEBUG_DB_SEEK
|
|
|
|
// to only see a report of the row seek as a whole. If RB_DEBUG_DB_SEEK is
|
|
|
|
// enabled that implies RB_DEBUG_DB_SEEK_ROW as well.
|
|
|
|
//
|
2019-03-14 00:11:29 +01:00
|
|
|
#if !defined(RB_DEBUG_DB_SEEK_ROW) && defined(RB_DEBUG_DB_SEEK)
|
2019-01-29 20:13:58 +01:00
|
|
|
#define RB_DEBUG_DB_SEEK_ROW
|
|
|
|
#endif
|
2017-08-30 23:05:15 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::seek(row &r,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &opts)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
// The following closure performs the seek() for a single cell in the row.
|
|
|
|
// It may be executed on another ircd::ctx if the data isn't cached and
|
|
|
|
// blocking IO is required. This frame can't be interrupted because it may
|
|
|
|
// have requests pending in the request pool which must synchronize back
|
|
|
|
// here.
|
|
|
|
size_t ret{0};
|
|
|
|
std::exception_ptr eptr;
|
|
|
|
ctx::latch latch{r.size()};
|
|
|
|
const ctx::uninterruptible ui;
|
|
|
|
const auto closure{[&opts, &latch, &ret, &key, &eptr]
|
|
|
|
(auto &cell) noexcept
|
|
|
|
{
|
|
|
|
// If there's a pending error from another cell by the time this
|
|
|
|
// closure is executed we don't perform the seek() unless the user
|
|
|
|
// specifies db::get::NO_THROW to suppress it.
|
|
|
|
if(!eptr || test(opts, get::NO_THROW)) try
|
|
|
|
{
|
|
|
|
if(!seek(cell, key))
|
|
|
|
{
|
|
|
|
// If the cell is not_found that's not a thrown exception here;
|
|
|
|
// the cell will just be !valid(). The user can specify
|
|
|
|
// get::THROW to propagate a not_found from the seek(row);
|
|
|
|
if(test(opts, get::THROW))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"column '%s' key '%s'", cell.col(), key
|
|
|
|
};
|
|
|
|
}
|
|
|
|
else ++ret;
|
|
|
|
}
|
|
|
|
catch(const not_found &e)
|
|
|
|
{
|
|
|
|
eptr = std::current_exception();
|
|
|
|
}
|
|
|
|
catch(const std::exception &e)
|
|
|
|
{
|
|
|
|
log::error
|
|
|
|
{
|
|
|
|
log, "row seek: column '%s' key '%s' :%s",
|
|
|
|
cell.col(),
|
|
|
|
key,
|
|
|
|
e.what()
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
eptr = std::make_exception_ptr(e);
|
|
|
|
}
|
2017-09-14 21:59:20 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// The latch must always be hit here. No exception should propagate
|
|
|
|
// to prevent this from being reached or beyond.
|
|
|
|
latch.count_down();
|
|
|
|
}};
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
#ifdef RB_DEBUG_DB_SEEK_ROW
|
|
|
|
const ircd::timer timer;
|
|
|
|
size_t submits{0};
|
|
|
|
#endif
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Submit all the requests
|
|
|
|
for(auto &cell : r)
|
|
|
|
{
|
|
|
|
db::column &column(cell);
|
|
|
|
const auto reclosure{[&closure, &cell]
|
|
|
|
() noexcept
|
|
|
|
{
|
|
|
|
closure(cell);
|
|
|
|
}};
|
2017-09-23 06:55:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Whether to submit the request to another ctx or execute it here.
|
|
|
|
// Explicit option to prevent submitting must not be set. If there
|
|
|
|
// is a chance the data is already in the cache, we can avoid the
|
|
|
|
// context switching and occupation of the request pool.
|
|
|
|
//TODO: should check a bloom filter on the cache for this branch
|
|
|
|
//TODO: because right now double-querying the cache is gross.
|
|
|
|
const bool submit
|
|
|
|
{
|
|
|
|
r.size() > 1 &&
|
|
|
|
!test(opts, get::NO_PARALLEL) &&
|
|
|
|
!db::cached(column, key, opts)
|
|
|
|
};
|
2017-09-14 21:59:20 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
#ifdef RB_DEBUG_DB_SEEK_ROW
|
|
|
|
submits += submit;
|
|
|
|
#endif
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
if(submit)
|
|
|
|
request(reclosure);
|
|
|
|
else
|
|
|
|
reclosure();
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Wait for responses.
|
|
|
|
latch.wait();
|
|
|
|
assert(ret <= r.size());
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
#ifdef RB_DEBUG_DB_SEEK_ROW
|
|
|
|
if(likely(!r.empty()))
|
|
|
|
{
|
|
|
|
const column &c(r[0]);
|
|
|
|
const database &d(c);
|
|
|
|
thread_local char tmbuf[32];
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' SEEK ROW seq:%lu:%-10lu cnt:%-2zu req:%-2zu ret:%-2zu in %s %s",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
sequence(opts.snapshot),
|
|
|
|
r.size(),
|
|
|
|
submits,
|
|
|
|
ret,
|
|
|
|
pretty(tmbuf, timer.at<microseconds>(), true),
|
|
|
|
what(eptr)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
#endif
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
if(eptr && !test(opts, get::NO_THROW))
|
|
|
|
std::rethrow_exception(eptr);
|
|
|
|
|
|
|
|
return ret;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// row
|
|
|
|
//
|
|
|
|
#pragma GCC diagnostic push
|
|
|
|
#pragma GCC diagnostic ignored "-Wstack-usage="
|
2019-05-08 08:05:18 +02:00
|
|
|
__attribute__((stack_protect))
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::row(database &d,
|
|
|
|
const string_view &key,
|
|
|
|
const vector_view<const string_view> &colnames,
|
|
|
|
const vector_view<cell> &buf,
|
|
|
|
gopts opts)
|
|
|
|
:vector_view<cell>{[&d, &colnames, &buf, &opts]
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
using std::end;
|
|
|
|
using std::begin;
|
2016-09-25 03:18:54 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!opts.snapshot)
|
|
|
|
opts.snapshot = database::snapshot(d);
|
|
|
|
|
|
|
|
const rocksdb::ReadOptions options
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
make_opts(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(buf.size() >= colnames.size());
|
|
|
|
const size_t request_count
|
|
|
|
{
|
|
|
|
std::min(colnames.size(), buf.size())
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t count(0);
|
|
|
|
database::column *colptr[request_count];
|
|
|
|
for(size_t i(0); i < request_count; ++i)
|
|
|
|
{
|
|
|
|
const auto cfid
|
|
|
|
{
|
|
|
|
d.cfid(std::nothrow, colnames.at(i))
|
|
|
|
};
|
|
|
|
|
|
|
|
if(cfid >= 0)
|
|
|
|
colptr[count++] = &d[cfid];
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// All pointers returned by rocksdb in this vector must be free'd.
|
|
|
|
const auto iterators
|
|
|
|
{
|
|
|
|
_make_iterators(d, colptr, count, options)
|
|
|
|
};
|
2019-01-16 04:03:03 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
assert(iterators.size() == count);
|
|
|
|
for(size_t i(0); i < iterators.size(); ++i)
|
|
|
|
{
|
|
|
|
std::unique_ptr<rocksdb::Iterator> it
|
|
|
|
{
|
|
|
|
iterators.at(i)
|
|
|
|
};
|
2017-03-24 02:36:49 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
buf[i] = cell
|
|
|
|
{
|
|
|
|
*colptr[i], std::move(it), opts
|
|
|
|
};
|
|
|
|
}
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return vector_view<cell>
|
|
|
|
{
|
|
|
|
buf.data(), iterators.size()
|
|
|
|
};
|
|
|
|
}()}
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(key)
|
|
|
|
seek(*this, key, opts);
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
#pragma GCC diagnostic pop
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
static std::vector<rocksdb::Iterator *>
|
|
|
|
ircd::db::_make_iterators(database &d,
|
|
|
|
database::column *const *const &column,
|
|
|
|
const size_t &column_count,
|
|
|
|
const rocksdb::ReadOptions &opts)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
using rocksdb::Iterator;
|
|
|
|
using rocksdb::ColumnFamilyHandle;
|
|
|
|
assert(column_count <= d.columns.size());
|
|
|
|
|
|
|
|
//const ctx::critical_assertion ca;
|
|
|
|
// NewIterators() has been seen to lead to IO and block the ircd::ctx;
|
|
|
|
// specifically when background options are aggressive and shortly
|
|
|
|
// after db opens. It would be nice if we could maintain the
|
|
|
|
// critical_assertion for this function, as we could eliminate the
|
|
|
|
// vector allocation for ColumnFamilyHandle pointers.
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyHandle *> handles(column_count);
|
|
|
|
std::transform(column, column + column_count, begin(handles), []
|
|
|
|
(database::column *const &ptr)
|
|
|
|
{
|
|
|
|
assert(ptr);
|
|
|
|
return ptr->handle.get();
|
|
|
|
});
|
|
|
|
|
|
|
|
std::vector<Iterator *> ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->NewIterators(opts, handles, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::row::operator()(const op &op,
|
|
|
|
const string_view &col,
|
|
|
|
const string_view &val,
|
|
|
|
const sopts &sopts)
|
2017-03-14 19:39:26 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
write(cell::delta{op, (*this)[col], val}, sopts);
|
2017-03-14 19:39:26 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell &
|
|
|
|
ircd::db::row::operator[](const string_view &column)
|
2017-03-14 19:39:26 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto it(find(column));
|
|
|
|
if(unlikely(it == end()))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"column '%s' not specified in the descriptor schema", column
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return *it;
|
2017-03-14 19:39:26 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
const ircd::db::cell &
|
|
|
|
ircd::db::row::operator[](const string_view &column)
|
|
|
|
const
|
2017-03-14 19:39:26 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto it(find(column));
|
|
|
|
if(unlikely(it == end()))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"column '%s' not specified in the descriptor schema", column
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return *it;
|
2017-03-14 19:39:26 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::iterator
|
|
|
|
ircd::db::row::find(const string_view &col)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::find_if(std::begin(*this), std::end(*this), [&col]
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
return name(cell.c) == col;
|
|
|
|
});
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
2017-03-14 19:39:26 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::const_iterator
|
|
|
|
ircd::db::row::find(const string_view &col)
|
2017-04-03 06:02:32 +02:00
|
|
|
const
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::find_if(std::begin(*this), std::end(*this), [&col]
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
return name(cell.c) == col;
|
|
|
|
});
|
2017-09-14 21:59:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::cached()
|
2017-09-14 21:59:20 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::all_of(std::begin(*this), std::end(*this), []
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
db::column &column(const_cast<db::cell &>(cell));
|
|
|
|
return cell.valid() && db::cached(column, cell.key());
|
|
|
|
});
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::cached(const string_view &key)
|
2017-04-03 06:02:32 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::all_of(std::begin(*this), std::end(*this), [&key]
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
db::column &column(const_cast<db::cell &>(cell));
|
|
|
|
return db::cached(column, key);
|
|
|
|
});
|
2017-09-16 20:48:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::valid()
|
2017-09-16 20:48:09 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::any_of(std::begin(*this), std::end(*this), []
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
return cell.valid();
|
|
|
|
});
|
2017-09-16 20:48:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::row::valid(const string_view &s)
|
2017-09-16 20:48:09 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return std::any_of(std::begin(*this), std::end(*this), [&s]
|
|
|
|
(const auto &cell)
|
|
|
|
{
|
|
|
|
return cell.valid(s);
|
|
|
|
});
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2017-03-31 00:57:08 +02:00
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// db/cell.h
|
2017-03-31 00:57:08 +02:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
uint64_t
|
|
|
|
ircd::db::sequence(const cell &c)
|
2019-01-24 19:52:05 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const database::snapshot &ss(c);
|
|
|
|
return sequence(database::snapshot(c));
|
2019-01-24 19:52:05 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
const std::string &
|
|
|
|
ircd::db::name(const cell &c)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return name(c.c);
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const cell::delta &delta,
|
2017-08-23 22:37:47 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
|
|
|
write(&delta, &delta + 1, sopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::write(const sopts &sopts,
|
2019-01-29 20:13:58 +01:00
|
|
|
const std::initializer_list<cell::delta> &deltas)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
|
|
|
write(deltas, sopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const std::initializer_list<cell::delta> &deltas,
|
2017-08-23 22:37:47 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
|
|
|
write(std::begin(deltas), std::end(deltas), sopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::write(const cell::delta *const &begin,
|
|
|
|
const cell::delta *const &end,
|
2017-08-23 22:37:47 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(begin == end)
|
|
|
|
return;
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Find the database through one of the cell's columns. cell::deltas
|
|
|
|
// may come from different columns so we do nothing else with this.
|
|
|
|
auto &front(*begin);
|
|
|
|
column &c(std::get<cell *>(front)->c);
|
|
|
|
database &d(c);
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::WriteBatch batch;
|
|
|
|
std::for_each(begin, end, [&batch]
|
|
|
|
(const cell::delta &delta)
|
2017-08-23 22:37:47 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
append(batch, delta);
|
2017-08-23 22:37:47 +02:00
|
|
|
});
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
commit(d, batch, sopts);
|
2017-08-23 22:37:47 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
template<class pos>
|
|
|
|
bool
|
|
|
|
ircd::db::seek(cell &c,
|
|
|
|
const pos &p,
|
|
|
|
gopts opts)
|
2018-08-19 05:59:28 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
column &cc(c);
|
|
|
|
database::column &dc(cc);
|
2018-05-29 10:42:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!opts.snapshot)
|
|
|
|
opts.snapshot = c.ss;
|
2018-12-24 23:33:35 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto ropts(make_opts(opts));
|
|
|
|
return seek(dc, p, ropts, c.it);
|
2017-08-30 23:05:15 +02:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
template bool ircd::db::seek<ircd::db::pos>(cell &, const pos &, gopts);
|
|
|
|
template bool ircd::db::seek<ircd::string_view>(cell &, const string_view &, gopts);
|
2017-08-30 23:05:15 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Linkage for incomplete rocksdb::Iterator
|
|
|
|
ircd::db::cell::cell()
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::cell(database &d,
|
|
|
|
const string_view &colname,
|
|
|
|
const gopts &opts)
|
|
|
|
:cell
|
|
|
|
{
|
|
|
|
column(d[colname]), std::unique_ptr<rocksdb::Iterator>{}, opts
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::cell(database &d,
|
|
|
|
const string_view &colname,
|
|
|
|
const string_view &index,
|
|
|
|
const gopts &opts)
|
|
|
|
:cell
|
|
|
|
{
|
|
|
|
column(d[colname]), index, opts
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::cell(column column,
|
|
|
|
const string_view &index,
|
|
|
|
const gopts &opts)
|
|
|
|
:c{std::move(column)}
|
|
|
|
,ss{opts.snapshot}
|
|
|
|
,it
|
|
|
|
{
|
|
|
|
!index.empty()?
|
|
|
|
seek(this->c, index, opts):
|
|
|
|
std::unique_ptr<rocksdb::Iterator>{}
|
|
|
|
}
|
|
|
|
{
|
|
|
|
if(bool(this->it))
|
|
|
|
if(!valid_eq(*this->it, index))
|
|
|
|
this->it.reset();
|
|
|
|
}
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::cell(column column,
|
|
|
|
const string_view &index,
|
|
|
|
std::unique_ptr<rocksdb::Iterator> it,
|
|
|
|
const gopts &opts)
|
|
|
|
:c{std::move(column)}
|
|
|
|
,ss{opts.snapshot}
|
|
|
|
,it{std::move(it)}
|
|
|
|
{
|
|
|
|
if(index.empty())
|
|
|
|
return;
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
seek(*this, index, opts);
|
|
|
|
if(!valid_eq(*this->it, index))
|
|
|
|
this->it.reset();
|
|
|
|
}
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::cell(column column,
|
|
|
|
std::unique_ptr<rocksdb::Iterator> it,
|
|
|
|
const gopts &opts)
|
|
|
|
:c{std::move(column)}
|
|
|
|
,ss{opts.snapshot}
|
|
|
|
,it{std::move(it)}
|
|
|
|
{
|
|
|
|
}
|
2019-01-18 00:33:20 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Linkage for incomplete rocksdb::Iterator
|
|
|
|
ircd::db::cell::cell(cell &&o)
|
|
|
|
noexcept
|
|
|
|
:c{std::move(o.c)}
|
|
|
|
,ss{std::move(o.ss)}
|
|
|
|
,it{std::move(o.it)}
|
|
|
|
{
|
|
|
|
}
|
2018-08-19 05:59:28 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Linkage for incomplete rocksdb::Iterator
|
|
|
|
ircd::db::cell &
|
|
|
|
ircd::db::cell::operator=(cell &&o)
|
|
|
|
noexcept
|
2019-01-24 19:52:05 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
c = std::move(o.c);
|
|
|
|
ss = std::move(o.ss);
|
|
|
|
it = std::move(o.it);
|
|
|
|
|
|
|
|
return *this;
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
// Linkage for incomplete rocksdb::Iterator
|
|
|
|
ircd::db::cell::~cell()
|
|
|
|
noexcept
|
2019-01-24 19:52:05 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
}
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::cell::load(const string_view &index,
|
|
|
|
gopts opts)
|
|
|
|
{
|
|
|
|
database &d(c);
|
|
|
|
if(valid(index) && !opts.snapshot && sequence(ss) == sequence(d))
|
|
|
|
return true;
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
if(bool(opts.snapshot))
|
2019-01-24 19:52:05 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
this->it.reset();
|
|
|
|
this->ss = std::move(opts.snapshot);
|
|
|
|
}
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
database::column &c(this->c);
|
|
|
|
if(!seek(c, index, opts, this->it))
|
|
|
|
return false;
|
2019-01-24 19:52:05 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(index);
|
2019-01-24 19:52:05 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell &
|
|
|
|
ircd::db::cell::operator=(const string_view &s)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
write(c, key(), s);
|
|
|
|
return *this;
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::cell::operator()(const op &op,
|
|
|
|
const string_view &val,
|
|
|
|
const sopts &sopts)
|
2017-09-22 02:00:48 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
write(cell::delta{op, *this, val}, sopts);
|
|
|
|
}
|
2017-09-22 02:00:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::operator
|
|
|
|
string_view()
|
|
|
|
{
|
|
|
|
return val();
|
2017-09-22 02:00:48 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::operator
|
|
|
|
string_view()
|
2017-09-22 02:00:48 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return val();
|
|
|
|
}
|
2017-09-22 02:00:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::cell::val()
|
|
|
|
{
|
|
|
|
if(!valid())
|
|
|
|
load();
|
|
|
|
|
|
|
|
return likely(valid())? db::val(*it) : string_view{};
|
2017-09-22 02:00:48 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::cell::key()
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!valid())
|
|
|
|
load();
|
|
|
|
|
|
|
|
return likely(valid())? db::key(*it) : string_view{};
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::cell::val()
|
2017-04-03 06:02:32 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return likely(valid())? db::val(*it) : string_view{};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::cell::key()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return likely(valid())? db::key(*it) : string_view{};
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2018-12-27 04:45:04 +01:00
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::valid(const string_view &s)
|
2018-12-27 04:45:04 +01:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid() && db::valid_eq(*it, s);
|
2018-12-27 04:45:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::valid_gt(const string_view &s)
|
2018-12-27 04:45:04 +01:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid() && db::valid_gt(*it, s);
|
2018-12-27 04:45:04 +01:00
|
|
|
}
|
|
|
|
|
2017-08-30 23:22:19 +02:00
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::valid_lte(const string_view &s)
|
2017-08-30 23:22:19 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid() && db::valid_lte(*it, s);
|
2017-08-30 23:22:19 +02:00
|
|
|
}
|
|
|
|
|
2017-09-16 20:48:09 +02:00
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cell::valid()
|
2017-09-16 20:48:09 +02:00
|
|
|
const
|
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return bool(it) && db::valid(*it);
|
2017-09-16 20:48:09 +02:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2019-02-07 01:38:42 +01:00
|
|
|
//
|
|
|
|
// db/index.h
|
|
|
|
//
|
|
|
|
|
|
|
|
const ircd::db::gopts
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::applied_opts
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
{ get::PREFIX }
|
|
|
|
};
|
|
|
|
|
|
|
|
bool
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::seek(domain::const_iterator_base &it,
|
2019-02-07 01:38:42 +01:00
|
|
|
const pos &p)
|
|
|
|
{
|
|
|
|
switch(p)
|
|
|
|
{
|
|
|
|
case pos::BACK:
|
|
|
|
{
|
|
|
|
// This is inefficient as per RocksDB's prefix impl. unknown why
|
|
|
|
// a seek to NEXT is still needed after walking back one.
|
|
|
|
while(seek(it, pos::NEXT));
|
|
|
|
if(seek(it, pos::PREV))
|
|
|
|
seek(it, pos::NEXT);
|
|
|
|
|
|
|
|
return bool(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
it.opts |= domain::applied_opts;
|
2019-02-07 01:38:42 +01:00
|
|
|
return seek(static_cast<column::const_iterator_base &>(it), p);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::seek(domain::const_iterator_base &it,
|
2019-02-07 01:38:42 +01:00
|
|
|
const string_view &p)
|
|
|
|
{
|
2019-06-11 21:55:14 +02:00
|
|
|
it.opts |= domain::applied_opts;
|
2019-02-07 01:38:42 +01:00
|
|
|
return seek(static_cast<column::const_iterator_base &>(it), p);
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_iterator
|
|
|
|
ircd::db::domain::begin(const string_view &key,
|
|
|
|
gopts opts)
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
const_iterator ret
|
|
|
|
{
|
|
|
|
c, {}, std::move(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
seek(ret, key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_iterator
|
|
|
|
ircd::db::domain::end(const string_view &key,
|
|
|
|
gopts opts)
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
const_iterator ret
|
|
|
|
{
|
|
|
|
c, {}, std::move(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(seek(ret, key))
|
|
|
|
seek(ret, pos::END);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// NOTE: RocksDB says they don't support reverse iteration over a prefix range
|
|
|
|
/// This means we have to forward scan to the end and then walk back! Reverse
|
2019-06-11 21:55:14 +02:00
|
|
|
/// iterations of a domain should only be used for debugging and statistics! The
|
|
|
|
/// domain should be ordered the way it will be primarily accessed using the
|
2019-02-07 01:38:42 +01:00
|
|
|
/// comparator. If it will be accessed in different directions, make another
|
2019-06-11 21:55:14 +02:00
|
|
|
/// domain column.
|
|
|
|
ircd::db::domain::const_reverse_iterator
|
|
|
|
ircd::db::domain::rbegin(const string_view &key,
|
|
|
|
gopts opts)
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
const_reverse_iterator ret
|
|
|
|
{
|
|
|
|
c, {}, std::move(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(seek(ret, key))
|
|
|
|
seek(ret, pos::BACK);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_reverse_iterator
|
|
|
|
ircd::db::domain::rend(const string_view &key,
|
|
|
|
gopts opts)
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
const_reverse_iterator ret
|
|
|
|
{
|
|
|
|
c, {}, std::move(opts)
|
|
|
|
};
|
|
|
|
|
|
|
|
if(seek(ret, key))
|
|
|
|
seek(ret, pos::END);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// const_iterator
|
|
|
|
//
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_iterator &
|
|
|
|
ircd::db::domain::const_iterator::operator--()
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::PREV);
|
|
|
|
else
|
|
|
|
seek(*this, pos::BACK);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_iterator &
|
|
|
|
ircd::db::domain::const_iterator::operator++()
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::NEXT);
|
|
|
|
else
|
|
|
|
seek(*this, pos::FRONT);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_reverse_iterator &
|
|
|
|
ircd::db::domain::const_reverse_iterator::operator--()
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::NEXT);
|
|
|
|
else
|
|
|
|
seek(*this, pos::FRONT);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
ircd::db::domain::const_reverse_iterator &
|
|
|
|
ircd::db::domain::const_reverse_iterator::operator++()
|
2019-02-07 01:38:42 +01:00
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::PREV);
|
|
|
|
else
|
|
|
|
seek(*this, pos::BACK);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
const ircd::db::domain::const_iterator_base::value_type &
|
|
|
|
ircd::db::domain::const_iterator_base::operator*()
|
2019-02-07 01:38:42 +01:00
|
|
|
const
|
|
|
|
{
|
|
|
|
const auto &prefix
|
|
|
|
{
|
|
|
|
describe(*c).prefix
|
|
|
|
};
|
|
|
|
|
|
|
|
// Fetch the full value like a standard column first
|
|
|
|
column::const_iterator_base::operator*();
|
|
|
|
string_view &key{val.first};
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
// When there's no prefixing this domain column is just
|
2019-02-07 01:38:42 +01:00
|
|
|
// like a normal column. Otherwise, we remove the prefix
|
|
|
|
// from the key the user will end up seeing.
|
|
|
|
if(prefix.has && prefix.has(key))
|
|
|
|
{
|
|
|
|
const auto &first(prefix.get(key));
|
|
|
|
const auto &second(key.substr(first.size()));
|
|
|
|
key = second;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2019-06-11 21:55:14 +02:00
|
|
|
const ircd::db::domain::const_iterator_base::value_type *
|
|
|
|
ircd::db::domain::const_iterator_base::operator->()
|
2019-02-07 01:38:42 +01:00
|
|
|
const
|
|
|
|
{
|
|
|
|
return &this->operator*();
|
|
|
|
}
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2017-04-03 06:02:32 +02:00
|
|
|
//
|
|
|
|
// db/column.h
|
|
|
|
//
|
|
|
|
|
|
|
|
std::string
|
|
|
|
ircd::db::read(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
std::string ret;
|
2018-05-05 00:55:21 +02:00
|
|
|
const auto closure([&ret]
|
2017-04-03 06:02:32 +02:00
|
|
|
(const string_view &src)
|
|
|
|
{
|
|
|
|
ret.assign(begin(src), end(src));
|
|
|
|
});
|
|
|
|
|
2018-05-05 00:55:21 +02:00
|
|
|
column(key, closure, gopts);
|
2017-04-03 06:02:32 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::read(column &column,
|
|
|
|
const string_view &key,
|
2018-01-26 18:58:35 +01:00
|
|
|
const mutable_buffer &buf,
|
2017-04-03 06:02:32 +02:00
|
|
|
const gopts &gopts)
|
|
|
|
{
|
2018-05-05 00:55:21 +02:00
|
|
|
string_view ret;
|
|
|
|
const auto closure([&ret, &buf]
|
2017-04-03 06:02:32 +02:00
|
|
|
(const string_view &src)
|
|
|
|
{
|
2018-05-05 00:55:21 +02:00
|
|
|
ret = { data(buf), copy(buf, src) };
|
2017-04-03 06:02:32 +02:00
|
|
|
});
|
|
|
|
|
2018-05-05 00:55:21 +02:00
|
|
|
column(key, closure, gopts);
|
|
|
|
return ret;
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2018-05-05 00:57:50 +02:00
|
|
|
std::string
|
|
|
|
ircd::db::read(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
bool &found,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
std::string ret;
|
|
|
|
const auto closure([&ret]
|
|
|
|
(const string_view &src)
|
|
|
|
{
|
|
|
|
ret.assign(begin(src), end(src));
|
|
|
|
});
|
|
|
|
|
|
|
|
found = column(key, std::nothrow, closure, gopts);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::read(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
bool &found,
|
|
|
|
const mutable_buffer &buf,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
string_view ret;
|
|
|
|
const auto closure([&buf, &ret]
|
|
|
|
(const string_view &src)
|
|
|
|
{
|
|
|
|
ret = { data(buf), copy(buf, src) };
|
|
|
|
});
|
|
|
|
|
|
|
|
found = column(key, std::nothrow, closure, gopts);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-05-15 01:17:43 +02:00
|
|
|
rocksdb::Cache *
|
|
|
|
ircd::db::cache(column &column)
|
|
|
|
{
|
|
|
|
database::column &c(column);
|
|
|
|
return c.table_opts.block_cache.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::Cache *
|
|
|
|
ircd::db::cache_compressed(column &column)
|
|
|
|
{
|
|
|
|
database::column &c(column);
|
|
|
|
return c.table_opts.block_cache_compressed.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::Cache *
|
|
|
|
ircd::db::cache(const column &column)
|
|
|
|
{
|
|
|
|
const database::column &c(column);
|
|
|
|
return c.table_opts.block_cache.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
const rocksdb::Cache *
|
|
|
|
ircd::db::cache_compressed(const column &column)
|
|
|
|
{
|
|
|
|
const database::column &c(column);
|
|
|
|
return c.table_opts.block_cache_compressed.get();
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
template<>
|
2018-04-03 19:58:16 +02:00
|
|
|
ircd::db::prop_str
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::property(const column &column,
|
2017-04-03 06:02:32 +02:00
|
|
|
const string_view &name)
|
|
|
|
{
|
|
|
|
std::string ret;
|
2018-03-23 03:35:50 +01:00
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
database &d(const_cast<db::column &>(column));
|
2018-04-13 03:57:03 +02:00
|
|
|
if(!d.d->GetProperty(c, slice(name), &ret))
|
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"'property '%s' for column '%s' in '%s' not found.",
|
|
|
|
name,
|
|
|
|
db::name(column),
|
|
|
|
db::name(d)
|
|
|
|
};
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<>
|
2018-04-03 19:58:16 +02:00
|
|
|
ircd::db::prop_int
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::property(const column &column,
|
2017-04-03 06:02:32 +02:00
|
|
|
const string_view &name)
|
|
|
|
{
|
2018-09-26 00:07:56 +02:00
|
|
|
uint64_t ret(0);
|
2018-03-23 03:35:50 +01:00
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
database &d(const_cast<db::column &>(column));
|
2017-04-03 06:02:32 +02:00
|
|
|
if(!d.d->GetIntProperty(c, slice(name), &ret))
|
2018-04-13 03:57:03 +02:00
|
|
|
throw not_found
|
|
|
|
{
|
|
|
|
"property '%s' for column '%s' in '%s' not found or not an integer.",
|
|
|
|
name,
|
|
|
|
db::name(column),
|
|
|
|
db::name(d)
|
|
|
|
};
|
2017-04-03 06:02:32 +02:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-04-03 20:05:03 +02:00
|
|
|
template<>
|
|
|
|
ircd::db::prop_map
|
|
|
|
ircd::db::property(const column &column,
|
|
|
|
const string_view &name)
|
|
|
|
{
|
2018-04-09 21:55:22 +02:00
|
|
|
std::map<std::string, std::string> ret;
|
2018-04-03 20:05:03 +02:00
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
database &d(const_cast<db::column &>(column));
|
2018-04-13 03:57:03 +02:00
|
|
|
if(!d.d->GetMapProperty(c, slice(name), &ret))
|
|
|
|
ret.emplace(std::string{name}, property<std::string>(column, name));
|
|
|
|
|
2018-04-03 20:05:03 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-12-12 18:57:46 +01:00
|
|
|
ircd::db::options
|
|
|
|
ircd::db::getopt(const column &column)
|
|
|
|
{
|
|
|
|
database &d(const_cast<db::column &>(column));
|
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
return options
|
|
|
|
{
|
|
|
|
static_cast<rocksdb::ColumnFamilyOptions>(d.d->GetOptions(c))
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
size_t
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::bytes(const column &column)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
rocksdb::ColumnFamilyMetaData cfm;
|
2018-03-23 03:35:50 +01:00
|
|
|
database &d(const_cast<db::column &>(column));
|
|
|
|
database::column &c(const_cast<db::column &>(column));
|
2017-04-03 06:02:32 +02:00
|
|
|
assert(bool(c.handle));
|
|
|
|
d.d->GetColumnFamilyMetaData(c.handle.get(), &cfm);
|
|
|
|
return cfm.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
2018-03-23 03:35:50 +01:00
|
|
|
ircd::db::file_count(const column &column)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
rocksdb::ColumnFamilyMetaData cfm;
|
2018-03-23 03:35:50 +01:00
|
|
|
database &d(const_cast<db::column &>(column));
|
|
|
|
database::column &c(const_cast<db::column &>(column));
|
2017-04-03 06:02:32 +02:00
|
|
|
assert(bool(c.handle));
|
|
|
|
d.d->GetColumnFamilyMetaData(c.handle.get(), &cfm);
|
|
|
|
return cfm.file_count;
|
|
|
|
}
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
uint32_t
|
|
|
|
ircd::db::id(const column &column)
|
|
|
|
{
|
|
|
|
const database::column &c(column);
|
|
|
|
return id(c);
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
const std::string &
|
|
|
|
ircd::db::name(const column &column)
|
|
|
|
{
|
|
|
|
const database::column &c(column);
|
|
|
|
return name(c);
|
|
|
|
}
|
|
|
|
|
2018-09-20 00:38:37 +02:00
|
|
|
const ircd::db::descriptor &
|
2017-09-21 05:38:39 +02:00
|
|
|
ircd::db::describe(const column &column)
|
|
|
|
{
|
|
|
|
const database::column &c(column);
|
|
|
|
return describe(c);
|
|
|
|
}
|
|
|
|
|
2018-09-21 00:56:46 +02:00
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::files(const column &column)
|
|
|
|
{
|
|
|
|
database::column &c(const_cast<db::column &>(column));
|
|
|
|
database &d(*c.d);
|
|
|
|
|
|
|
|
rocksdb::ColumnFamilyMetaData cfmd;
|
|
|
|
d.d->GetColumnFamilyMetaData(c, &cfmd);
|
|
|
|
|
|
|
|
size_t count(0);
|
|
|
|
for(const auto &level : cfmd.levels)
|
|
|
|
count += level.files.size();
|
|
|
|
|
|
|
|
std::vector<std::string> ret;
|
|
|
|
ret.reserve(count);
|
|
|
|
for(auto &level : cfmd.levels)
|
|
|
|
for(auto &file : level.files)
|
|
|
|
ret.emplace_back(std::move(file.name));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-10-22 13:30:13 +02:00
|
|
|
void
|
|
|
|
ircd::db::drop(column &column)
|
|
|
|
{
|
|
|
|
database::column &c(column);
|
|
|
|
drop(c);
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
void
|
2018-04-27 02:19:29 +02:00
|
|
|
ircd::db::sort(column &column,
|
2018-12-19 22:50:42 +01:00
|
|
|
const bool &blocking,
|
|
|
|
const bool &now)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
database::column &c(column);
|
2018-04-20 23:12:57 +02:00
|
|
|
database &d(*c.d);
|
2018-08-20 03:35:42 +02:00
|
|
|
|
2018-04-20 23:12:57 +02:00
|
|
|
rocksdb::FlushOptions opts;
|
|
|
|
opts.wait = blocking;
|
2018-12-19 22:50:42 +01:00
|
|
|
opts.allow_write_stall = now;
|
2018-08-20 04:30:17 +02:00
|
|
|
|
2018-08-29 06:53:17 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-08-20 03:35:42 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-12-19 22:50:42 +01:00
|
|
|
log, "'%s':'%s' @%lu FLUSH (sort) %s %s",
|
2018-08-20 03:35:42 +02:00
|
|
|
name(d),
|
|
|
|
name(c),
|
2018-08-29 06:53:17 +02:00
|
|
|
sequence(d),
|
2018-12-19 22:50:42 +01:00
|
|
|
blocking? "blocking"_sv: "non-blocking"_sv,
|
|
|
|
now? "now"_sv: "later"_sv
|
2018-08-20 03:35:42 +02:00
|
|
|
};
|
2018-04-20 23:12:57 +02:00
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->Flush(opts, c)
|
|
|
|
};
|
2016-09-24 06:01:57 +02:00
|
|
|
}
|
|
|
|
|
2018-04-15 11:42:57 +02:00
|
|
|
void
|
|
|
|
ircd::db::compact(column &column,
|
2018-12-13 00:53:16 +01:00
|
|
|
const std::pair<int, int> &level,
|
2018-09-20 01:36:01 +02:00
|
|
|
const compactor &cb)
|
2018-04-15 11:42:57 +02:00
|
|
|
{
|
|
|
|
database::column &c(column);
|
2018-04-20 23:12:57 +02:00
|
|
|
database &d(*c.d);
|
|
|
|
|
2018-12-13 00:53:16 +01:00
|
|
|
const auto &dst_level{level.second};
|
|
|
|
const auto &src_level{level.first};
|
|
|
|
|
2018-04-21 01:48:07 +02:00
|
|
|
rocksdb::ColumnFamilyMetaData cfmd;
|
|
|
|
d.d->GetColumnFamilyMetaData(c, &cfmd);
|
|
|
|
for(const auto &level : cfmd.levels)
|
|
|
|
{
|
2018-12-13 00:53:16 +01:00
|
|
|
if(src_level != -1 && src_level != level.level)
|
2018-05-11 01:41:34 +02:00
|
|
|
continue;
|
|
|
|
|
2018-04-21 01:48:07 +02:00
|
|
|
if(level.files.empty())
|
|
|
|
continue;
|
|
|
|
|
2018-12-29 02:14:19 +01:00
|
|
|
const ctx::uninterruptible ui;
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock
|
2018-12-29 02:14:19 +01:00
|
|
|
{
|
|
|
|
write_mutex
|
|
|
|
};
|
|
|
|
|
2018-12-13 00:53:16 +01:00
|
|
|
const auto &to_level
|
|
|
|
{
|
|
|
|
dst_level > -1? dst_level : level.level
|
|
|
|
};
|
|
|
|
|
2018-08-29 06:53:17 +02:00
|
|
|
rocksdb::CompactionOptions opts;
|
2019-01-12 02:37:51 +01:00
|
|
|
opts.output_file_size_limit = 1_GiB; //TODO: conf
|
2018-12-12 01:42:18 +01:00
|
|
|
|
|
|
|
// RocksDB sez that setting this to Disable means that the column's
|
|
|
|
// compression options are read instead. If we don't set this here,
|
|
|
|
// rocksdb defaults to "snappy" (which is strange).
|
|
|
|
opts.compression = rocksdb::kDisableCompressionOption;
|
2018-12-01 00:19:49 +01:00
|
|
|
|
2018-04-21 01:48:07 +02:00
|
|
|
std::vector<std::string> files(level.files.size());
|
|
|
|
std::transform(level.files.begin(), level.files.end(), files.begin(), []
|
|
|
|
(auto &metadata)
|
|
|
|
{
|
|
|
|
return std::move(metadata.name);
|
|
|
|
});
|
|
|
|
|
2018-09-20 01:36:01 +02:00
|
|
|
// Save and restore the existing filter callback so we can allow our
|
|
|
|
// caller to use theirs. Note that this manual compaction should be
|
|
|
|
// exclusive for this column (no background compaction should be
|
|
|
|
// occurring, at least one relying on this filter).
|
|
|
|
auto their_filter(std::move(c.cfilter.user));
|
|
|
|
const unwind unfilter{[&c, &their_filter]
|
|
|
|
{
|
|
|
|
c.cfilter.user = std::move(their_filter);
|
|
|
|
}};
|
|
|
|
|
|
|
|
c.cfilter.user = cb;
|
|
|
|
|
2018-05-11 01:41:34 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-12-13 01:51:45 +01:00
|
|
|
log, "'%s':'%s' COMPACT L%d -> L%d files:%zu size:%zu",
|
2018-05-11 01:41:34 +02:00
|
|
|
name(d),
|
|
|
|
name(c),
|
|
|
|
level.level,
|
2018-12-31 23:34:10 +01:00
|
|
|
to_level,
|
2018-05-11 01:41:34 +02:00
|
|
|
level.files.size(),
|
|
|
|
level.size
|
|
|
|
};
|
2018-04-21 01:48:07 +02:00
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
2018-12-13 00:53:16 +01:00
|
|
|
d.d->CompactFiles(opts, c, files, to_level)
|
2018-04-21 01:48:07 +02:00
|
|
|
};
|
|
|
|
}
|
2018-04-15 11:42:57 +02:00
|
|
|
}
|
|
|
|
|
2018-05-11 01:41:34 +02:00
|
|
|
void
|
|
|
|
ircd::db::compact(column &column,
|
|
|
|
const std::pair<string_view, string_view> &range,
|
2018-09-21 08:30:00 +02:00
|
|
|
const int &to_level,
|
2018-09-20 01:36:01 +02:00
|
|
|
const compactor &cb)
|
2018-05-11 01:41:34 +02:00
|
|
|
{
|
2018-08-29 06:53:17 +02:00
|
|
|
database &d(column);
|
2018-05-11 01:41:34 +02:00
|
|
|
database::column &c(column);
|
2018-12-29 02:14:19 +01:00
|
|
|
const ctx::uninterruptible ui;
|
2018-05-11 01:41:34 +02:00
|
|
|
|
|
|
|
const auto begin(slice(range.first));
|
|
|
|
const rocksdb::Slice *const b
|
|
|
|
{
|
|
|
|
empty(range.first)? nullptr : &begin
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto end(slice(range.second));
|
|
|
|
const rocksdb::Slice *const e
|
|
|
|
{
|
|
|
|
empty(range.second)? nullptr : &end
|
|
|
|
};
|
|
|
|
|
2018-08-29 06:53:17 +02:00
|
|
|
rocksdb::CompactRangeOptions opts;
|
2018-12-13 00:59:07 +01:00
|
|
|
opts.exclusive_manual_compaction = true;
|
2018-08-29 06:53:17 +02:00
|
|
|
opts.allow_write_stall = true;
|
2018-12-13 00:59:07 +01:00
|
|
|
opts.change_level = true;
|
|
|
|
opts.target_level = std::max(to_level, -1);
|
|
|
|
opts.bottommost_level_compaction = rocksdb::BottommostLevelCompaction::kForce;
|
2018-08-29 06:53:17 +02:00
|
|
|
|
2018-09-20 01:36:01 +02:00
|
|
|
// Save and restore the existing filter callback so we can allow our
|
|
|
|
// caller to use theirs. Note that this manual compaction should be
|
|
|
|
// exclusive for this column (no background compaction should be
|
|
|
|
// occurring, at least one relying on this filter).
|
|
|
|
auto their_filter(std::move(c.cfilter.user));
|
|
|
|
const unwind unfilter{[&c, &their_filter]
|
|
|
|
{
|
|
|
|
c.cfilter.user = std::move(their_filter);
|
|
|
|
}};
|
|
|
|
|
|
|
|
c.cfilter.user = cb;
|
|
|
|
|
2018-05-11 01:41:34 +02:00
|
|
|
log::debug
|
|
|
|
{
|
2018-12-13 01:51:45 +01:00
|
|
|
log, "'%s':'%s' @%lu COMPACT [%s, %s] -> L:%d (Lmax:%d Lbase:%d)",
|
2018-05-11 01:41:34 +02:00
|
|
|
name(d),
|
|
|
|
name(c),
|
|
|
|
sequence(d),
|
|
|
|
range.first,
|
|
|
|
range.second,
|
2018-12-13 01:51:45 +01:00
|
|
|
opts.target_level,
|
|
|
|
d.d->NumberLevels(c),
|
|
|
|
d.d->MaxMemCompactionLevel(c),
|
2018-05-11 01:41:34 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->CompactRange(opts, c, b, e)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-04-20 23:10:59 +02:00
|
|
|
void
|
|
|
|
ircd::db::setopt(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const string_view &val)
|
|
|
|
{
|
2018-08-29 06:53:17 +02:00
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
2018-04-20 23:10:59 +02:00
|
|
|
const std::unordered_map<std::string, std::string> options
|
|
|
|
{
|
|
|
|
{ std::string{key}, std::string{val} }
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->SetOptions(c, options)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-09-22 00:08:57 +02:00
|
|
|
void
|
|
|
|
ircd::db::ingest(column &column,
|
|
|
|
const string_view &path)
|
|
|
|
{
|
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
|
|
|
|
rocksdb::IngestExternalFileOptions opts;
|
2018-09-22 23:54:04 +02:00
|
|
|
opts.allow_global_seqno = true;
|
2018-09-23 00:16:45 +02:00
|
|
|
opts.allow_blocking_flush = true;
|
2018-09-22 00:08:57 +02:00
|
|
|
|
2018-09-22 23:54:04 +02:00
|
|
|
// Automatically determine if we can avoid issuing new sequence
|
|
|
|
// numbers by considering this ingestion as "backfill" of missing
|
|
|
|
// data which did actually exist but was physically removed.
|
|
|
|
const auto &copts{d.d->GetOptions(c)};
|
|
|
|
opts.ingest_behind = copts.allow_ingest_behind;
|
|
|
|
|
2018-09-22 00:08:57 +02:00
|
|
|
const std::vector<std::string> files
|
|
|
|
{
|
|
|
|
{ std::string{path} }
|
|
|
|
};
|
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-09-22 00:08:57 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->IngestExternalFile(c, files, opts)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-01-25 21:32:04 +01:00
|
|
|
void
|
|
|
|
ircd::db::del(column &column,
|
|
|
|
const std::pair<string_view, string_view> &range,
|
|
|
|
const sopts &sopts)
|
|
|
|
{
|
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
auto opts(make_opts(sopts));
|
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2019-01-25 21:32:04 +01:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' %lu '%s' RANGE DELETE",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
name(c),
|
|
|
|
};
|
|
|
|
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->DeleteRange(opts, c, slice(range.first), slice(range.second))
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2016-09-24 06:01:57 +02:00
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::del(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const sopts &sopts)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
database &d(column);
|
2018-08-29 06:53:17 +02:00
|
|
|
database::column &c(column);
|
|
|
|
auto opts(make_opts(sopts));
|
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-08-29 06:53:17 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2018-08-20 04:30:17 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' %lu '%s' DELETE key(%zu B)",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
name(c),
|
|
|
|
key.size()
|
|
|
|
};
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->Delete(opts, c, slice(key))
|
|
|
|
};
|
2016-09-25 06:12:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::write(column &column,
|
|
|
|
const string_view &key,
|
2018-04-30 16:18:06 +02:00
|
|
|
const const_buffer &val,
|
2016-09-25 06:12:43 +02:00
|
|
|
const sopts &sopts)
|
|
|
|
{
|
2018-08-20 04:30:17 +02:00
|
|
|
database &d(column);
|
2018-08-29 06:53:17 +02:00
|
|
|
database::column &c(column);
|
|
|
|
auto opts(make_opts(sopts));
|
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2018-08-29 06:53:17 +02:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
2018-08-20 04:30:17 +02:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' %lu '%s' PUT key(%zu B) val(%zu B)",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
name(c),
|
|
|
|
size(key),
|
|
|
|
size(val)
|
|
|
|
};
|
2016-09-25 03:18:54 +02:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
d.d->Put(opts, c, slice(key), slice(val))
|
|
|
|
};
|
2016-09-25 06:12:43 +02:00
|
|
|
}
|
|
|
|
|
2019-02-25 23:46:21 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::bytes_value(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
size_t ret{0};
|
|
|
|
column(key, std::nothrow, gopts, [&ret]
|
|
|
|
(const string_view &value)
|
|
|
|
{
|
|
|
|
ret = value.size();
|
|
|
|
});
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ircd::db::bytes(column &column,
|
|
|
|
const std::pair<string_view, string_view> &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
const rocksdb::Range range[1]
|
|
|
|
{
|
|
|
|
{ slice(key.first), slice(key.second) }
|
|
|
|
};
|
|
|
|
|
|
|
|
uint64_t ret[1] {0};
|
|
|
|
d.d->GetApproximateSizes(c, range, 1, ret);
|
|
|
|
return ret[0];
|
|
|
|
}
|
|
|
|
|
2018-09-01 13:38:30 +02:00
|
|
|
void
|
|
|
|
ircd::db::prefetch(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
2018-12-27 04:44:26 +01:00
|
|
|
if(cached(column, key, gopts))
|
2018-09-01 13:38:30 +02:00
|
|
|
return;
|
|
|
|
|
2018-12-28 01:13:49 +01:00
|
|
|
if(!request.avail())
|
|
|
|
return;
|
|
|
|
|
2018-09-01 13:38:30 +02:00
|
|
|
request([column(column), key(std::string(key)), gopts]
|
|
|
|
() mutable
|
|
|
|
{
|
|
|
|
has(column, key, gopts);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2019-01-12 02:36:05 +01:00
|
|
|
#if 0
|
2018-08-19 05:40:23 +02:00
|
|
|
bool
|
|
|
|
ircd::db::cached(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
2018-12-27 04:44:26 +01:00
|
|
|
return exists(cache(column), key);
|
|
|
|
}
|
2019-01-12 02:36:05 +01:00
|
|
|
#endif
|
2018-08-19 05:40:23 +02:00
|
|
|
|
2018-12-27 04:44:26 +01:00
|
|
|
bool
|
|
|
|
ircd::db::cached(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
2018-08-19 05:40:23 +02:00
|
|
|
auto opts(make_opts(gopts));
|
|
|
|
opts.read_tier = NON_BLOCKING;
|
|
|
|
opts.fill_cache = false;
|
|
|
|
|
2019-01-12 02:36:05 +01:00
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
|
|
|
|
// Theoretically this can be faster than a seek(), but it's not.
|
|
|
|
//thread_local std::string discard;
|
|
|
|
//if(!d.d->KeyMayExist(opts, c, slice(key), &discard, nullptr))
|
|
|
|
// return false;
|
|
|
|
|
2018-08-19 05:40:23 +02:00
|
|
|
std::unique_ptr<rocksdb::Iterator> it;
|
|
|
|
if(!seek(c, key, opts, it))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(bool(it));
|
|
|
|
return valid_eq(*it, key);
|
|
|
|
}
|
|
|
|
|
2017-03-31 01:20:01 +02:00
|
|
|
bool
|
|
|
|
ircd::db::has(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
|
2018-09-01 13:06:13 +02:00
|
|
|
// Perform a co-RP query to the filtration
|
2018-10-16 11:20:49 +02:00
|
|
|
// NOTE disabled for rocksdb >= v5.15 due to a regression
|
|
|
|
// where rocksdb does not init SuperVersion data in the column
|
|
|
|
// family handle and this codepath triggers null derefs and ub.
|
2018-10-21 13:41:21 +02:00
|
|
|
if(0 && c.table_opts.filter_policy)
|
2018-10-16 10:21:42 +02:00
|
|
|
{
|
|
|
|
const auto k(slice(key));
|
|
|
|
auto opts(make_opts(gopts));
|
|
|
|
opts.read_tier = NON_BLOCKING;
|
|
|
|
thread_local std::string discard;
|
|
|
|
if(!d.d->KeyMayExist(opts, c, k, &discard, nullptr))
|
|
|
|
return false;
|
|
|
|
}
|
2017-10-28 01:48:27 +02:00
|
|
|
|
2018-09-01 13:06:13 +02:00
|
|
|
const auto it
|
2017-03-31 01:20:01 +02:00
|
|
|
{
|
2018-09-01 13:06:13 +02:00
|
|
|
seek(column, key, gopts)
|
|
|
|
};
|
2018-04-10 01:57:13 +02:00
|
|
|
|
2018-09-01 13:06:13 +02:00
|
|
|
return valid_eq(*it, key);
|
2017-03-31 01:20:01 +02:00
|
|
|
}
|
|
|
|
|
2017-08-31 07:13:50 +02:00
|
|
|
//
|
|
|
|
// column
|
|
|
|
//
|
|
|
|
|
2019-01-16 22:21:54 +01:00
|
|
|
ircd::db::column::column(database &d,
|
|
|
|
const string_view &column_name,
|
|
|
|
const std::nothrow_t)
|
|
|
|
:c{[&d, &column_name]
|
|
|
|
{
|
|
|
|
const int32_t cfid
|
|
|
|
{
|
|
|
|
d.cfid(std::nothrow, column_name)
|
|
|
|
};
|
|
|
|
|
|
|
|
return cfid >= 0?
|
|
|
|
&d[cfid]:
|
|
|
|
nullptr;
|
|
|
|
}()}
|
2017-08-31 07:13:50 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::column(database &d,
|
|
|
|
const string_view &column_name)
|
2019-01-16 22:21:54 +01:00
|
|
|
:column
|
|
|
|
{
|
|
|
|
d[column_name]
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::column(database::column &c)
|
|
|
|
:c{&c}
|
|
|
|
{
|
|
|
|
}
|
2017-08-31 07:13:50 +02:00
|
|
|
|
2017-03-23 22:58:24 +01:00
|
|
|
void
|
2017-08-23 22:37:47 +02:00
|
|
|
ircd::db::column::operator()(const delta &delta,
|
2017-03-31 00:57:08 +02:00
|
|
|
const sopts &sopts)
|
2017-03-23 22:58:24 +01:00
|
|
|
{
|
2017-08-23 22:37:47 +02:00
|
|
|
operator()(&delta, &delta + 1, sopts);
|
2017-03-23 22:58:24 +01:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
void
|
|
|
|
ircd::db::column::operator()(const sopts &sopts,
|
|
|
|
const std::initializer_list<delta> &deltas)
|
|
|
|
{
|
|
|
|
operator()(deltas, sopts);
|
|
|
|
}
|
|
|
|
|
2017-03-21 07:28:20 +01:00
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::operator()(const std::initializer_list<delta> &deltas,
|
|
|
|
const sopts &sopts)
|
2017-03-21 07:28:20 +01:00
|
|
|
{
|
2017-08-23 22:37:47 +02:00
|
|
|
operator()(std::begin(deltas), std::end(deltas), sopts);
|
2017-03-21 07:28:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-08-23 22:37:47 +02:00
|
|
|
ircd::db::column::operator()(const delta *const &begin,
|
|
|
|
const delta *const &end,
|
2017-03-31 00:57:08 +02:00
|
|
|
const sopts &sopts)
|
2017-03-21 07:28:20 +01:00
|
|
|
{
|
2017-08-23 22:37:47 +02:00
|
|
|
database &d(*this);
|
|
|
|
|
2017-03-21 07:28:20 +01:00
|
|
|
rocksdb::WriteBatch batch;
|
2017-08-23 22:37:47 +02:00
|
|
|
std::for_each(begin, end, [this, &batch]
|
|
|
|
(const delta &delta)
|
|
|
|
{
|
|
|
|
append(batch, *this, delta);
|
|
|
|
});
|
2017-03-21 07:28:20 +01:00
|
|
|
|
2017-09-08 11:14:17 +02:00
|
|
|
commit(d, batch, sopts);
|
2017-03-21 07:28:20 +01:00
|
|
|
}
|
|
|
|
|
2016-09-24 06:01:57 +02:00
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::operator()(const string_view &key,
|
|
|
|
const gopts &gopts,
|
|
|
|
const view_closure &func)
|
2016-11-29 16:23:38 +01:00
|
|
|
{
|
|
|
|
return operator()(key, func, gopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::operator()(const string_view &key,
|
|
|
|
const view_closure &func,
|
|
|
|
const gopts &gopts)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2017-03-31 00:57:08 +02:00
|
|
|
const auto it(seek(*this, key, gopts));
|
2017-09-16 20:48:09 +02:00
|
|
|
valid_eq_or_throw(*it, key);
|
2017-04-03 06:02:32 +02:00
|
|
|
func(val(*it));
|
|
|
|
}
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2018-01-27 19:07:08 +01:00
|
|
|
bool
|
|
|
|
ircd::db::column::operator()(const string_view &key,
|
|
|
|
const std::nothrow_t,
|
|
|
|
const gopts &gopts,
|
|
|
|
const view_closure &func)
|
|
|
|
{
|
|
|
|
return operator()(key, std::nothrow, func, gopts);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::column::operator()(const string_view &key,
|
|
|
|
const std::nothrow_t,
|
|
|
|
const view_closure &func,
|
|
|
|
const gopts &gopts)
|
|
|
|
{
|
|
|
|
const auto it(seek(*this, key, gopts));
|
|
|
|
if(!valid_eq(*it, key))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
func(val(*it));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
ircd::db::cell
|
|
|
|
ircd::db::column::operator[](const string_view &key)
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return { *this, key };
|
2016-09-24 06:01:57 +02:00
|
|
|
}
|
|
|
|
|
2019-01-24 01:00:22 +01:00
|
|
|
ircd::db::column::operator
|
|
|
|
bool()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
return c?
|
|
|
|
!dropped(*c):
|
|
|
|
false;
|
|
|
|
}
|
|
|
|
|
2017-09-08 10:33:41 +02:00
|
|
|
ircd::db::column::operator
|
2018-09-20 00:38:37 +02:00
|
|
|
const descriptor &()
|
2017-09-08 10:33:41 +02:00
|
|
|
const
|
|
|
|
{
|
2018-10-22 16:09:16 +02:00
|
|
|
assert(c->descriptor);
|
|
|
|
return *c->descriptor;
|
2017-09-08 10:33:41 +02:00
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2017-03-23 22:58:24 +01:00
|
|
|
//
|
2017-03-31 00:57:08 +02:00
|
|
|
// column::const_iterator
|
2017-03-23 22:58:24 +01:00
|
|
|
//
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::const_iterator
|
2018-05-25 11:34:44 +02:00
|
|
|
ircd::db::column::end(gopts gopts)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-09-22 12:31:44 +02:00
|
|
|
const_iterator ret
|
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
c, {}, std::move(gopts)
|
2017-09-22 12:31:44 +02:00
|
|
|
};
|
|
|
|
|
2018-05-25 11:34:44 +02:00
|
|
|
seek(ret, pos::END);
|
2017-09-22 12:31:44 +02:00
|
|
|
return ret;
|
2016-09-24 06:01:57 +02:00
|
|
|
}
|
|
|
|
|
2019-02-05 03:41:00 +01:00
|
|
|
ircd::db::column::const_iterator
|
|
|
|
ircd::db::column::last(gopts gopts)
|
|
|
|
{
|
|
|
|
const_iterator ret
|
|
|
|
{
|
|
|
|
c, {}, std::move(gopts)
|
|
|
|
};
|
|
|
|
|
|
|
|
seek(ret, pos::BACK);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::const_iterator
|
2018-05-25 11:34:44 +02:00
|
|
|
ircd::db::column::begin(gopts gopts)
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2016-09-26 06:07:22 +02:00
|
|
|
const_iterator ret
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
c, {}, std::move(gopts)
|
2016-09-26 06:07:22 +02:00
|
|
|
};
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2018-05-25 11:34:44 +02:00
|
|
|
seek(ret, pos::FRONT);
|
2017-09-22 12:31:44 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_reverse_iterator
|
2018-05-25 11:34:44 +02:00
|
|
|
ircd::db::column::rend(gopts gopts)
|
2017-09-22 12:31:44 +02:00
|
|
|
{
|
|
|
|
const_reverse_iterator ret
|
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
c, {}, std::move(gopts)
|
2017-09-22 12:31:44 +02:00
|
|
|
};
|
|
|
|
|
2018-05-25 11:34:44 +02:00
|
|
|
seek(ret, pos::END);
|
2017-09-22 12:31:44 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_reverse_iterator
|
2018-05-25 11:34:44 +02:00
|
|
|
ircd::db::column::rbegin(gopts gopts)
|
2017-09-22 12:31:44 +02:00
|
|
|
{
|
|
|
|
const_reverse_iterator ret
|
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
c, {}, std::move(gopts)
|
2017-09-22 12:31:44 +02:00
|
|
|
};
|
|
|
|
|
2018-05-25 11:34:44 +02:00
|
|
|
seek(ret, pos::BACK);
|
2017-09-22 12:31:44 +02:00
|
|
|
return ret;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::const_iterator
|
|
|
|
ircd::db::column::upper_bound(const string_view &key,
|
2018-05-25 11:34:44 +02:00
|
|
|
gopts gopts)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
auto it(lower_bound(key, std::move(gopts)));
|
2017-03-31 00:57:08 +02:00
|
|
|
if(it && it.it->key().compare(slice(key)) == 0)
|
2016-09-26 06:07:22 +02:00
|
|
|
++it;
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
return it;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::const_iterator
|
|
|
|
ircd::db::column::find(const string_view &key,
|
2018-05-25 11:34:44 +02:00
|
|
|
gopts gopts)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-03-31 00:57:08 +02:00
|
|
|
auto it(lower_bound(key, gopts));
|
|
|
|
if(!it || it.it->key().compare(slice(key)) != 0)
|
2017-09-22 05:08:11 +02:00
|
|
|
return end(gopts);
|
2017-03-31 00:57:08 +02:00
|
|
|
|
|
|
|
return it;
|
|
|
|
}
|
2016-09-26 06:07:22 +02:00
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::column::const_iterator
|
|
|
|
ircd::db::column::lower_bound(const string_view &key,
|
2018-05-25 11:34:44 +02:00
|
|
|
gopts gopts)
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2016-09-26 06:07:22 +02:00
|
|
|
const_iterator ret
|
2016-09-24 06:01:57 +02:00
|
|
|
{
|
2018-05-25 11:34:44 +02:00
|
|
|
c, {}, std::move(gopts)
|
2016-09-26 06:07:22 +02:00
|
|
|
};
|
|
|
|
|
2018-05-25 11:34:44 +02:00
|
|
|
seek(ret, key);
|
2017-09-22 12:31:44 +02:00
|
|
|
return ret;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::column::const_iterator &
|
|
|
|
ircd::db::column::const_iterator::operator--()
|
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::PREV);
|
|
|
|
else
|
|
|
|
seek(*this, pos::BACK);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_iterator &
|
|
|
|
ircd::db::column::const_iterator::operator++()
|
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::NEXT);
|
|
|
|
else
|
|
|
|
seek(*this, pos::FRONT);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_reverse_iterator &
|
|
|
|
ircd::db::column::const_reverse_iterator::operator--()
|
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::NEXT);
|
|
|
|
else
|
|
|
|
seek(*this, pos::FRONT);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_reverse_iterator &
|
|
|
|
ircd::db::column::const_reverse_iterator::operator++()
|
|
|
|
{
|
|
|
|
if(likely(bool(*this)))
|
|
|
|
seek(*this, pos::PREV);
|
|
|
|
else
|
|
|
|
seek(*this, pos::BACK);
|
|
|
|
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_iterator_base::const_iterator_base(const_iterator_base &&o)
|
2017-03-31 00:57:08 +02:00
|
|
|
noexcept
|
2017-09-08 10:33:41 +02:00
|
|
|
:c{std::move(o.c)}
|
2018-05-25 11:34:44 +02:00
|
|
|
,opts{std::move(o.opts)}
|
2017-03-31 00:57:08 +02:00
|
|
|
,it{std::move(o.it)}
|
|
|
|
,val{std::move(o.val)}
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
|
|
|
}
|
2016-09-24 06:01:57 +02:00
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::column::const_iterator_base &
|
|
|
|
ircd::db::column::const_iterator_base::operator=(const_iterator_base &&o)
|
2017-03-31 00:57:08 +02:00
|
|
|
noexcept
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-03-31 00:57:08 +02:00
|
|
|
c = std::move(o.c);
|
2018-05-25 11:34:44 +02:00
|
|
|
opts = std::move(o.opts);
|
2017-03-31 00:57:08 +02:00
|
|
|
it = std::move(o.it);
|
|
|
|
val = std::move(o.val);
|
|
|
|
return *this;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
// linkage for incmplete rocksdb::Iterator
|
|
|
|
ircd::db::column::const_iterator_base::const_iterator_base()
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
// linkage for incmplete rocksdb::Iterator
|
|
|
|
ircd::db::column::const_iterator_base::~const_iterator_base()
|
|
|
|
noexcept
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::column::const_iterator_base::const_iterator_base(database::column *const &c,
|
2018-05-25 11:34:44 +02:00
|
|
|
std::unique_ptr<rocksdb::Iterator> &&it,
|
2018-11-16 05:54:50 +01:00
|
|
|
gopts opts)
|
2017-09-08 10:33:41 +02:00
|
|
|
:c{c}
|
2018-05-25 11:34:44 +02:00
|
|
|
,opts{std::move(opts)}
|
2017-03-31 00:57:08 +02:00
|
|
|
,it{std::move(it)}
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
const ircd::db::column::const_iterator_base::value_type &
|
|
|
|
ircd::db::column::const_iterator_base::operator*()
|
2016-09-26 06:07:22 +02:00
|
|
|
const
|
|
|
|
{
|
2017-09-16 20:48:09 +02:00
|
|
|
assert(it && valid(*it));
|
2017-04-03 06:02:32 +02:00
|
|
|
val.first = db::key(*it);
|
|
|
|
val.second = db::val(*it);
|
2016-09-26 06:07:22 +02:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
const ircd::db::column::const_iterator_base::value_type *
|
|
|
|
ircd::db::column::const_iterator_base::operator->()
|
2016-09-26 06:07:22 +02:00
|
|
|
const
|
|
|
|
{
|
|
|
|
return &operator*();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::column::const_iterator_base::operator!()
|
2016-09-26 06:07:22 +02:00
|
|
|
const
|
|
|
|
{
|
2017-09-16 20:48:09 +02:00
|
|
|
return !static_cast<bool>(*this);
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::column::const_iterator_base::operator bool()
|
2016-09-26 06:07:22 +02:00
|
|
|
const
|
|
|
|
{
|
2017-09-16 20:48:09 +02:00
|
|
|
if(!it)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if(!valid(*it))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::operator!=(const column::const_iterator_base &a, const column::const_iterator_base &b)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
return !(a == b);
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::operator==(const column::const_iterator_base &a, const column::const_iterator_base &b)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
if(a && b)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
const auto &ak(a.it->key());
|
|
|
|
const auto &bk(b.it->key());
|
|
|
|
return ak.compare(bk) == 0;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
if(!a && !b)
|
2016-09-26 06:07:22 +02:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::operator>(const column::const_iterator_base &a, const column::const_iterator_base &b)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
if(a && b)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
const auto &ak(a.it->key());
|
|
|
|
const auto &bk(b.it->key());
|
|
|
|
return ak.compare(bk) == 1;
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
if(!a && b)
|
2016-09-26 06:07:22 +02:00
|
|
|
return true;
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
if(!a && !b)
|
2016-09-26 06:07:22 +02:00
|
|
|
return false;
|
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
assert(!a && b);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::operator<(const column::const_iterator_base &a, const column::const_iterator_base &b)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
if(a && b)
|
|
|
|
{
|
|
|
|
const auto &ak(a.it->key());
|
|
|
|
const auto &bk(b.it->key());
|
|
|
|
return ak.compare(bk) == -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!a && b)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if(!a && !b)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(a && !b);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class pos>
|
2017-08-30 23:05:15 +02:00
|
|
|
bool
|
2017-09-22 12:31:44 +02:00
|
|
|
ircd::db::seek(column::const_iterator_base &it,
|
2018-05-25 11:34:44 +02:00
|
|
|
const pos &p)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
|
|
|
database::column &c(it);
|
2018-11-16 05:54:50 +01:00
|
|
|
return seek(c, p, it.opts, it.it);
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
2018-05-25 11:34:44 +02:00
|
|
|
template bool ircd::db::seek<ircd::db::pos>(column::const_iterator_base &, const pos &);
|
|
|
|
template bool ircd::db::seek<ircd::string_view>(column::const_iterator_base &, const string_view &);
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2018-09-25 07:00:21 +02:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// opts.h
|
2018-09-25 07:00:21 +02:00
|
|
|
//
|
|
|
|
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// options
|
2018-09-25 07:00:21 +02:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::options::options(const database &d)
|
|
|
|
:options{d.d->GetDBOptions()}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::options(const database::column &c)
|
|
|
|
:options
|
|
|
|
{
|
|
|
|
rocksdb::ColumnFamilyOptions
|
|
|
|
{
|
|
|
|
c.d->d->GetOptions(c.handle.get())
|
|
|
|
}
|
|
|
|
}{}
|
|
|
|
|
|
|
|
ircd::db::options::options(const rocksdb::DBOptions &opts)
|
|
|
|
{
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetStringFromDBOptions(this, opts)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::options(const rocksdb::ColumnFamilyOptions &opts)
|
|
|
|
{
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetStringFromColumnFamilyOptions(this, opts)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::operator rocksdb::PlainTableOptions()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
rocksdb::PlainTableOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetPlainTableOptionsFromString(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::operator rocksdb::BlockBasedTableOptions()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
rocksdb::BlockBasedTableOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetBlockBasedTableOptionsFromString(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::operator rocksdb::ColumnFamilyOptions()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
rocksdb::ColumnFamilyOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetColumnFamilyOptionsFromString(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::operator rocksdb::DBOptions()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
rocksdb::DBOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetDBOptionsFromString(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::db::options::operator rocksdb::Options()
|
|
|
|
const
|
|
|
|
{
|
|
|
|
rocksdb::Options ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetOptionsFromString(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// options::map
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::options::map::map(const options &o)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::StringToMap(o, this)
|
|
|
|
};
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::options::map::operator rocksdb::PlainTableOptions()
|
|
|
|
const
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::PlainTableOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetPlainTableOptionsFromMap(ret, *this, &ret)
|
|
|
|
};
|
2018-09-25 07:00:21 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return ret;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::options::map::operator rocksdb::BlockBasedTableOptions()
|
|
|
|
const
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::BlockBasedTableOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetBlockBasedTableOptionsFromMap(ret, *this, &ret)
|
|
|
|
};
|
2018-09-25 07:00:21 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return ret;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::options::map::operator rocksdb::ColumnFamilyOptions()
|
|
|
|
const
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::ColumnFamilyOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetColumnFamilyOptionsFromMap(ret, *this, &ret)
|
|
|
|
};
|
2018-09-25 07:00:21 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return ret;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::options::map::operator rocksdb::DBOptions()
|
|
|
|
const
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::DBOptions ret;
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::GetDBOptionsFromMap(ret, *this, &ret)
|
|
|
|
};
|
|
|
|
|
|
|
|
return ret;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
2018-09-25 07:00:21 +02:00
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// cache.h
|
2018-09-25 07:00:21 +02:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::clear(rocksdb::Cache *const &cache)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(cache)
|
|
|
|
return clear(*cache);
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::clear(rocksdb::Cache &cache)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
cache.EraseUnRefEntries();
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::remove(rocksdb::Cache *const &cache,
|
|
|
|
const string_view &key)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache? remove(*cache, key) : false;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::remove(rocksdb::Cache &cache,
|
|
|
|
const string_view &key)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
cache.Erase(slice(key));
|
|
|
|
return true;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::insert(rocksdb::Cache *const &cache,
|
|
|
|
const string_view &key,
|
|
|
|
const string_view &value)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache? insert(*cache, key, value) : false;
|
2018-09-25 07:00:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::insert(rocksdb::Cache &cache,
|
|
|
|
const string_view &key,
|
|
|
|
const string_view &value)
|
2018-09-25 07:00:21 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
unique_buffer<const_buffer> buf
|
|
|
|
{
|
|
|
|
const_buffer{value}
|
|
|
|
};
|
2018-09-25 07:00:21 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return insert(cache, key, std::move(buf));
|
|
|
|
}
|
2017-09-14 20:30:06 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::insert(rocksdb::Cache *const &cache,
|
|
|
|
const string_view &key,
|
2019-04-12 18:36:53 +02:00
|
|
|
unique_buffer<const_buffer> &&value)
|
2017-09-14 20:30:06 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache? insert(*cache, key, std::move(value)) : false;
|
2017-09-14 20:30:06 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::insert(rocksdb::Cache &cache,
|
|
|
|
const string_view &key,
|
2019-04-12 18:36:53 +02:00
|
|
|
unique_buffer<const_buffer> &&value)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const size_t value_size
|
2019-01-16 22:46:15 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
size(value)
|
2019-01-16 22:46:15 +01:00
|
|
|
};
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
static const auto deleter{[]
|
|
|
|
(const rocksdb::Slice &key, void *const value)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
delete[] reinterpret_cast<const char *>(value);
|
|
|
|
}};
|
|
|
|
|
|
|
|
// Note that because of the nullptr handle argument below, rocksdb
|
|
|
|
// will run the deleter if the insert throws; just make sure
|
|
|
|
// the argument execution doesn't throw after release()
|
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
cache.Insert(slice(key),
|
|
|
|
const_cast<char *>(data(value.release())),
|
|
|
|
value_size,
|
|
|
|
deleter,
|
|
|
|
nullptr)
|
|
|
|
};
|
|
|
|
|
|
|
|
return true;
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::for_each(const rocksdb::Cache *const &cache,
|
|
|
|
const cache_closure &closure)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(cache)
|
|
|
|
for_each(*cache, closure);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::for_each(const rocksdb::Cache &cache,
|
|
|
|
const cache_closure &closure)
|
|
|
|
{
|
|
|
|
// Due to the use of the global variables which are required when using a
|
|
|
|
// C-style callback for RocksDB, we have to make use of this function
|
|
|
|
// exclusive for different contexts.
|
|
|
|
thread_local ctx::mutex mutex;
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{mutex};
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
thread_local rocksdb::Cache *_cache;
|
|
|
|
_cache = const_cast<rocksdb::Cache *>(&cache);
|
|
|
|
|
|
|
|
thread_local const cache_closure *_closure;
|
|
|
|
_closure = &closure;
|
|
|
|
|
|
|
|
_cache->ApplyToAllCacheEntries([]
|
|
|
|
(void *const value_buffer, const size_t buffer_size)
|
|
|
|
noexcept
|
2019-01-16 22:46:15 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
assert(_cache);
|
|
|
|
assert(_closure);
|
|
|
|
const const_buffer buf
|
2019-01-16 22:46:15 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
reinterpret_cast<const char *>(value_buffer), buffer_size
|
2019-01-16 22:46:15 +01:00
|
|
|
};
|
2017-09-08 11:14:17 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
(*_closure)(buf);
|
|
|
|
},
|
|
|
|
true);
|
|
|
|
}
|
2019-01-16 22:46:15 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::exists(const rocksdb::Cache *const &cache,
|
|
|
|
const string_view &key)
|
|
|
|
{
|
|
|
|
return cache? exists(*cache, key) : false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::exists(const rocksdb::Cache &cache_,
|
|
|
|
const string_view &key)
|
|
|
|
{
|
|
|
|
auto &cache
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const_cast<rocksdb::Cache &>(cache_)
|
|
|
|
};
|
|
|
|
|
|
|
|
const custom_ptr<rocksdb::Cache::Handle> handle
|
|
|
|
{
|
|
|
|
cache.Lookup(slice(key)), [&cache](auto *const &handle)
|
|
|
|
{
|
|
|
|
cache.Release(handle);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
return bool(handle);
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::pinned(const rocksdb::Cache *const &cache)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache? pinned(*cache) : 0;
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::pinned(const rocksdb::Cache &cache)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache.GetPinnedUsage();
|
|
|
|
}
|
2018-03-14 01:32:13 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::usage(const rocksdb::Cache *const &cache)
|
|
|
|
{
|
|
|
|
return cache? usage(*cache) : 0;
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::usage(const rocksdb::Cache &cache)
|
2017-09-19 04:17:36 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache.GetUsage();
|
2017-09-19 04:17:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::capacity(rocksdb::Cache *const &cache,
|
|
|
|
const size_t &cap)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!cache)
|
|
|
|
return false;
|
2017-09-08 11:14:17 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
capacity(*cache, cap);
|
|
|
|
return true;
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::capacity(rocksdb::Cache &cache,
|
|
|
|
const size_t &cap)
|
|
|
|
{
|
|
|
|
cache.SetCapacity(cap);
|
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::capacity(const rocksdb::Cache *const &cache)
|
2017-09-08 11:14:17 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache? capacity(*cache): 0;
|
2017-09-08 11:14:17 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::capacity(const rocksdb::Cache &cache)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return cache.GetCapacity();
|
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
uint64_t
|
|
|
|
ircd::db::ticker(const rocksdb::Cache *const &cache,
|
|
|
|
const uint32_t &ticker_id)
|
|
|
|
{
|
|
|
|
return cache? ticker(*cache, ticker_id) : 0UL;
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
const uint64_t &
|
|
|
|
ircd::db::ticker(const rocksdb::Cache &cache,
|
|
|
|
const uint32_t &ticker_id)
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto &c
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
dynamic_cast<const database::cache &>(cache)
|
2017-04-03 06:02:32 +02:00
|
|
|
};
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
static const uint64_t &zero
|
|
|
|
{
|
|
|
|
0ULL
|
|
|
|
};
|
|
|
|
|
|
|
|
return c.stats?
|
|
|
|
c.stats->ticker.at(ticker_id):
|
|
|
|
zero;
|
2017-04-03 06:02:32 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// error.h
|
|
|
|
//
|
|
|
|
|
|
|
|
//
|
|
|
|
// error::not_found
|
|
|
|
//
|
|
|
|
|
|
|
|
decltype(ircd::db::error::not_found::_not_found_)
|
|
|
|
ircd::db::error::not_found::_not_found_
|
2017-09-23 10:13:26 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::Status::NotFound()
|
|
|
|
};
|
2018-08-20 04:30:17 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// error::not_found::not_found
|
|
|
|
//
|
2017-09-23 10:13:26 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error::not_found::not_found()
|
|
|
|
:error
|
|
|
|
{
|
|
|
|
generate_skip, _not_found_
|
2017-09-23 10:13:26 +02:00
|
|
|
}
|
2017-04-03 06:02:32 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
strlcpy(buf, "NotFound");
|
|
|
|
}
|
2017-08-19 00:13:15 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// error
|
|
|
|
//
|
2017-08-19 00:13:15 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
decltype(ircd::db::error::_no_code_)
|
|
|
|
ircd::db::error::_no_code_
|
|
|
|
{
|
|
|
|
rocksdb::Status::OK()
|
|
|
|
};
|
2018-05-29 10:42:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// error::error
|
|
|
|
//
|
2017-09-23 10:13:26 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error::error(internal_t,
|
|
|
|
const rocksdb::Status &s,
|
|
|
|
const string_view &fmt,
|
|
|
|
const va_rtti &ap)
|
|
|
|
:error
|
2017-09-23 10:13:26 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
s
|
|
|
|
}
|
|
|
|
{
|
|
|
|
const string_view &msg{buf};
|
|
|
|
const mutable_buffer remain
|
2017-09-23 10:13:26 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
buf + size(msg), sizeof(buf) - size(msg)
|
2017-09-23 10:13:26 +02:00
|
|
|
};
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
fmt::vsprintf
|
2018-05-29 10:42:48 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
remain, fmt, ap
|
2018-05-29 10:42:48 +02:00
|
|
|
};
|
2019-01-29 20:13:58 +01:00
|
|
|
}
|
2018-05-29 10:42:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error::error(const rocksdb::Status &s)
|
|
|
|
:error
|
|
|
|
{
|
|
|
|
generate_skip, s
|
|
|
|
}
|
|
|
|
{
|
|
|
|
fmt::sprintf
|
|
|
|
{
|
|
|
|
buf, "(%u:%u:%u %s): %s",
|
|
|
|
this->code,
|
|
|
|
this->subcode,
|
|
|
|
this->severity,
|
|
|
|
reflect(s.severity()),
|
|
|
|
s.getState(),
|
|
|
|
};
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error::error(generate_skip_t,
|
|
|
|
const rocksdb::Status &s)
|
|
|
|
:ircd::error
|
2017-09-24 04:47:36 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
generate_skip
|
2017-09-24 04:47:36 +02:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
,code
|
2017-09-24 04:47:36 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
s.code()
|
2017-09-24 04:47:36 +02:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
,subcode
|
2017-09-24 04:47:36 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
s.subcode()
|
2017-09-24 04:47:36 +02:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
,severity
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
s.severity()
|
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
2016-09-26 06:07:22 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// merge.h
|
|
|
|
//
|
2017-09-08 11:14:17 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
std::string
|
2019-02-16 22:51:55 +01:00
|
|
|
__attribute__((noreturn))
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::merge_operator(const string_view &key,
|
|
|
|
const std::pair<string_view, string_view> &delta)
|
|
|
|
{
|
|
|
|
//ircd::json::index index{delta.first};
|
|
|
|
//index += delta.second;
|
|
|
|
//return index;
|
2019-02-16 22:51:55 +01:00
|
|
|
|
|
|
|
throw ircd::not_implemented
|
|
|
|
{
|
|
|
|
"db::merge_operator()"
|
|
|
|
};
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
|
|
|
|
2018-12-12 18:53:16 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// comparator.h
|
2018-12-12 18:53:16 +01:00
|
|
|
//
|
|
|
|
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// linkage placements for integer comparators so they all have the same addr
|
2018-12-12 18:53:16 +01:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cmp_int64_t::cmp_int64_t()
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cmp_int64_t::~cmp_int64_t()
|
|
|
|
noexcept
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
}
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cmp_uint64_t::cmp_uint64_t()
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::cmp_uint64_t::~cmp_uint64_t()
|
|
|
|
noexcept
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::reverse_cmp_int64_t::reverse_cmp_int64_t()
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::reverse_cmp_int64_t::~reverse_cmp_int64_t()
|
|
|
|
noexcept
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::reverse_cmp_uint64_t::reverse_cmp_uint64_t()
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::reverse_cmp_uint64_t::~reverse_cmp_uint64_t()
|
|
|
|
noexcept
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
}
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// cmp_string_view
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::cmp_string_view::cmp_string_view()
|
|
|
|
:db::comparator{"string_view", &less, &equal}
|
|
|
|
{
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::cmp_string_view::less(const string_view &a,
|
|
|
|
const string_view &b)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return a < b;
|
|
|
|
}
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::cmp_string_view::equal(const string_view &a,
|
|
|
|
const string_view &b)
|
|
|
|
{
|
|
|
|
return a == b;
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// reverse_cmp_string_view
|
2018-12-12 18:53:16 +01:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::reverse_cmp_string_view::reverse_cmp_string_view()
|
|
|
|
:db::comparator{"reverse_string_view", &less, &equal}
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::reverse_cmp_string_view::less(const string_view &a,
|
|
|
|
const string_view &b)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
/// RocksDB sez things will not work correctly unless a shorter string
|
|
|
|
/// result returns less than a longer string even if one intends some
|
|
|
|
/// reverse ordering
|
|
|
|
if(a.size() < b.size())
|
|
|
|
return true;
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
/// Furthermore, b.size() < a.size() returning false from this function
|
|
|
|
/// appears to not be correct. The reversal also has to also come in
|
|
|
|
/// the form of a bytewise forward iteration.
|
|
|
|
return std::memcmp(a.data(), b.data(), std::min(a.size(), b.size())) > 0;
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::reverse_cmp_string_view::equal(const string_view &a,
|
|
|
|
const string_view &b)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return a == b;
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// delta.h
|
|
|
|
//
|
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::value_required(const op &op)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
switch(op)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
case op::SET:
|
|
|
|
case op::MERGE:
|
|
|
|
case op::DELETE_RANGE:
|
|
|
|
return true;
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
case op::GET:
|
|
|
|
case op::DELETE:
|
|
|
|
case op::SINGLE_DELETE:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(0);
|
|
|
|
return false;
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
|
|
|
// db.h (internal)
|
|
|
|
//
|
|
|
|
|
|
|
|
//
|
|
|
|
// throw_on_error
|
|
|
|
//
|
|
|
|
|
|
|
|
ircd::db::throw_on_error::throw_on_error(const rocksdb::Status &status)
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
using rocksdb::Status;
|
|
|
|
|
|
|
|
switch(status.code())
|
2018-12-12 18:53:16 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
case Status::kOk:
|
|
|
|
return;
|
2018-12-12 18:53:16 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
case Status::kNotFound:
|
|
|
|
throw not_found{};
|
|
|
|
|
|
|
|
default:
|
|
|
|
throw error{status};
|
|
|
|
}
|
2018-12-12 18:53:16 +01:00
|
|
|
}
|
|
|
|
|
2018-05-15 01:17:43 +02:00
|
|
|
//
|
2019-01-29 20:13:58 +01:00
|
|
|
// error_to_status
|
2018-05-15 01:17:43 +02:00
|
|
|
//
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error_to_status::error_to_status(const std::exception &e)
|
|
|
|
:rocksdb::Status
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
Status::Aborted(slice(string_view(e.what())))
|
2018-08-19 02:59:31 +02:00
|
|
|
}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error_to_status::error_to_status(const std::system_error &e)
|
|
|
|
:error_to_status{e.code()}
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::error_to_status::error_to_status(const std::error_code &e)
|
|
|
|
:rocksdb::Status{[&e]
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
using std::errc;
|
2018-08-19 02:59:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
switch(e.value())
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
case 0:
|
|
|
|
return Status::OK();
|
2018-08-19 02:59:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
case int(errc::no_such_file_or_directory):
|
|
|
|
return Status::NotFound();
|
2018-08-19 02:59:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
case int(errc::not_supported):
|
|
|
|
return Status::NotSupported();
|
|
|
|
|
|
|
|
case int(errc::invalid_argument):
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
|
|
|
|
case int(errc::io_error):
|
|
|
|
return Status::IOError();
|
|
|
|
|
|
|
|
case int(errc::timed_out):
|
|
|
|
return Status::TimedOut();
|
|
|
|
|
|
|
|
case int(errc::device_or_resource_busy):
|
|
|
|
return Status::Busy();
|
|
|
|
|
|
|
|
case int(errc::resource_unavailable_try_again):
|
|
|
|
return Status::TryAgain();
|
|
|
|
|
|
|
|
case int(errc::no_space_on_device):
|
|
|
|
return Status::NoSpace();
|
|
|
|
|
|
|
|
case int(errc::not_enough_memory):
|
|
|
|
return Status::MemoryLimit();
|
|
|
|
|
|
|
|
default:
|
|
|
|
return Status::Aborted(slice(string_view(e.message())));
|
|
|
|
}
|
|
|
|
}()}
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// writebatch suite
|
|
|
|
//
|
|
|
|
|
|
|
|
void
|
|
|
|
ircd::db::append(rocksdb::WriteBatch &batch,
|
|
|
|
const cell::delta &delta)
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
auto &column
|
2018-08-21 09:47:05 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
std::get<cell *>(delta)->c
|
2018-08-21 09:47:05 +02:00
|
|
|
};
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
append(batch, column, column::delta
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
std::get<op>(delta),
|
|
|
|
std::get<cell *>(delta)->key(),
|
|
|
|
std::get<string_view>(delta)
|
|
|
|
});
|
|
|
|
}
|
2018-08-19 02:59:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::append(rocksdb::WriteBatch &batch,
|
|
|
|
column &column,
|
|
|
|
const column::delta &delta)
|
|
|
|
{
|
|
|
|
if(unlikely(!column))
|
2018-08-19 02:59:31 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
// Note: Unknown at this time whether allowing attempts at writing
|
|
|
|
// to a null column should be erroneous or silently ignored. It's
|
|
|
|
// highly likely this log message will be removed soon to allow
|
|
|
|
// toggling database columns for optimization without touching calls.
|
|
|
|
log::critical
|
|
|
|
{
|
|
|
|
log, "Attempting to transact a delta for a null column"
|
|
|
|
};
|
2018-08-19 02:59:31 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
database::column &c(column);
|
|
|
|
const auto k(slice(std::get<1>(delta)));
|
|
|
|
const auto v(slice(std::get<2>(delta)));
|
|
|
|
switch(std::get<0>(delta))
|
|
|
|
{
|
|
|
|
case op::GET: assert(0); break;
|
|
|
|
case op::SET: batch.Put(c, k, v); break;
|
|
|
|
case op::MERGE: batch.Merge(c, k, v); break;
|
|
|
|
case op::DELETE: batch.Delete(c, k); break;
|
|
|
|
case op::DELETE_RANGE: batch.DeleteRange(c, k, v); break;
|
|
|
|
case op::SINGLE_DELETE: batch.SingleDelete(c, k); break;
|
|
|
|
}
|
2018-08-19 02:59:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::commit(database &d,
|
|
|
|
rocksdb::WriteBatch &batch,
|
|
|
|
const sopts &sopts)
|
2018-05-15 01:53:28 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const auto opts(make_opts(sopts));
|
|
|
|
commit(d, batch, opts);
|
2018-05-15 01:53:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::commit(database &d,
|
|
|
|
rocksdb::WriteBatch &batch,
|
|
|
|
const rocksdb::WriteOptions &opts)
|
2018-05-15 01:53:28 +02:00
|
|
|
{
|
2019-04-19 09:15:14 +02:00
|
|
|
#ifdef RB_DEBUG
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::timer timer;
|
|
|
|
#endif
|
2018-05-15 01:53:28 +02:00
|
|
|
|
2019-03-02 21:33:32 +01:00
|
|
|
const std::lock_guard lock{write_mutex};
|
2019-01-29 20:13:58 +01:00
|
|
|
const ctx::uninterruptible ui;
|
|
|
|
throw_on_error
|
2018-05-15 01:53:28 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
d.d->Write(opts, &batch)
|
|
|
|
};
|
2018-05-15 01:53:28 +02:00
|
|
|
|
2019-04-19 09:15:14 +02:00
|
|
|
#ifdef RB_DEBUG
|
2019-01-29 20:13:58 +01:00
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' %lu COMMIT %s in %ld$us",
|
|
|
|
d.name,
|
|
|
|
sequence(d),
|
|
|
|
debug(batch),
|
|
|
|
timer.at<microseconds>().count()
|
|
|
|
};
|
|
|
|
#endif
|
2018-05-15 01:53:28 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
std::string
|
|
|
|
ircd::db::debug(const rocksdb::WriteBatch &batch)
|
2018-05-15 01:36:48 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return ircd::string(512, [&batch]
|
|
|
|
(const mutable_buffer &ret)
|
|
|
|
{
|
|
|
|
return snprintf(data(ret), size(ret)+1,
|
|
|
|
"%d deltas; size: %zuB :%s%s%s%s%s%s%s%s%s",
|
|
|
|
batch.Count(),
|
|
|
|
batch.GetDataSize(),
|
|
|
|
batch.HasPut()? " PUT" : "",
|
|
|
|
batch.HasDelete()? " DELETE" : "",
|
|
|
|
batch.HasSingleDelete()? " SINGLE_DELETE" : "",
|
|
|
|
batch.HasDeleteRange()? " DELETE_RANGE" : "",
|
|
|
|
batch.HasMerge()? " MERGE" : "",
|
|
|
|
batch.HasBeginPrepare()? " BEGIN_PREPARE" : "",
|
|
|
|
batch.HasEndPrepare()? " END_PREPARE" : "",
|
|
|
|
batch.HasCommit()? " COMMIT" : "",
|
|
|
|
batch.HasRollback()? " ROLLBACK" : "");
|
|
|
|
});
|
2018-05-15 01:36:48 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::has(const rocksdb::WriteBatch &wb,
|
|
|
|
const op &op)
|
2018-05-15 01:36:48 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
switch(op)
|
2018-05-15 01:36:48 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
case op::GET: assert(0); return false;
|
|
|
|
case op::SET: return wb.HasPut();
|
|
|
|
case op::MERGE: return wb.HasMerge();
|
|
|
|
case op::DELETE: return wb.HasDelete();
|
|
|
|
case op::DELETE_RANGE: return wb.HasDeleteRange();
|
|
|
|
case op::SINGLE_DELETE: return wb.HasSingleDelete();
|
|
|
|
}
|
2018-05-15 01:36:48 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return false;
|
2018-09-26 11:34:47 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// seek suite
|
|
|
|
//
|
2018-09-26 11:34:47 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
namespace ircd::db
|
2018-05-15 01:17:43 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
static rocksdb::Iterator &_seek_(rocksdb::Iterator &, const pos &);
|
|
|
|
static rocksdb::Iterator &_seek_(rocksdb::Iterator &, const string_view &);
|
|
|
|
static rocksdb::Iterator &_seek_lower_(rocksdb::Iterator &, const string_view &);
|
|
|
|
static rocksdb::Iterator &_seek_upper_(rocksdb::Iterator &, const string_view &);
|
|
|
|
static bool _seek(database::column &, const pos &, const rocksdb::ReadOptions &, rocksdb::Iterator &it);
|
|
|
|
static bool _seek(database::column &, const string_view &, const rocksdb::ReadOptions &, rocksdb::Iterator &it);
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
std::unique_ptr<rocksdb::Iterator>
|
|
|
|
ircd::db::seek(column &column,
|
|
|
|
const string_view &key,
|
|
|
|
const gopts &opts)
|
2018-05-15 01:17:43 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
database &d(column);
|
|
|
|
database::column &c(column);
|
|
|
|
|
|
|
|
std::unique_ptr<rocksdb::Iterator> ret;
|
|
|
|
seek(c, key, opts, ret);
|
|
|
|
return std::move(ret);
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
template<class pos>
|
2018-05-15 01:17:43 +02:00
|
|
|
bool
|
2019-01-29 20:13:58 +01:00
|
|
|
ircd::db::seek(database::column &c,
|
|
|
|
const pos &p,
|
|
|
|
const gopts &gopts,
|
|
|
|
std::unique_ptr<rocksdb::Iterator> &it)
|
2018-05-15 01:17:43 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
const rocksdb::ReadOptions opts
|
|
|
|
{
|
|
|
|
make_opts(gopts)
|
|
|
|
};
|
2018-05-15 01:17:43 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return seek(c, p, opts, it);
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
template<class pos>
|
|
|
|
bool
|
|
|
|
ircd::db::seek(database::column &c,
|
|
|
|
const pos &p,
|
|
|
|
const rocksdb::ReadOptions &opts,
|
|
|
|
std::unique_ptr<rocksdb::Iterator> &it)
|
2018-05-15 01:17:43 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!it)
|
|
|
|
{
|
2019-02-08 05:57:44 +01:00
|
|
|
const ctx::uninterruptible::nothrow ui;
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
database &d(*c.d);
|
|
|
|
rocksdb::ColumnFamilyHandle *const &cf(c);
|
|
|
|
it.reset(d.d->NewIterator(opts, cf));
|
|
|
|
}
|
2018-05-15 01:17:43 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return _seek(c, p, opts, *it);
|
2018-05-15 01:17:43 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::_seek(database::column &c,
|
|
|
|
const string_view &p,
|
|
|
|
const rocksdb::ReadOptions &opts,
|
|
|
|
rocksdb::Iterator &it)
|
2018-09-05 11:56:50 +02:00
|
|
|
{
|
2019-02-08 05:57:44 +01:00
|
|
|
const ctx::uninterruptible ui;
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
#ifdef RB_DEBUG_DB_SEEK
|
|
|
|
database &d(*c.d);
|
|
|
|
const ircd::timer timer;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
_seek_(it, p);
|
|
|
|
|
|
|
|
#ifdef RB_DEBUG_DB_SEEK
|
|
|
|
log::debug
|
|
|
|
{
|
|
|
|
log, "'%s' %lu:%lu SEEK %s in %ld$us '%s'",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
sequence(opts.snapshot),
|
|
|
|
it.status().ToString(),
|
|
|
|
timer.at<microseconds>().count(),
|
|
|
|
name(c)
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return valid(it);
|
2018-09-05 11:56:50 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::_seek(database::column &c,
|
|
|
|
const pos &p,
|
|
|
|
const rocksdb::ReadOptions &opts,
|
|
|
|
rocksdb::Iterator &it)
|
2018-09-05 11:56:50 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
#ifdef RB_DEBUG_DB_SEEK
|
|
|
|
database &d(*c.d);
|
|
|
|
const ircd::timer timer;
|
|
|
|
const bool valid_it
|
2018-09-05 11:56:50 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
valid(it)
|
2018-09-05 11:56:50 +02:00
|
|
|
};
|
2019-01-29 20:13:58 +01:00
|
|
|
#endif
|
2018-09-05 11:56:50 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
_seek_(it, p);
|
|
|
|
|
|
|
|
#ifdef RB_DEBUG_DB_SEEK
|
|
|
|
log::debug
|
2018-10-21 08:30:27 +02:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
log, "'%s' %lu:%lu SEEK[%s] %s -> %s in %ld$us '%s'",
|
|
|
|
name(d),
|
|
|
|
sequence(d),
|
|
|
|
sequence(opts.snapshot),
|
|
|
|
reflect(p),
|
|
|
|
valid_it? "VALID" : "INVALID",
|
|
|
|
it.status().ToString(),
|
|
|
|
timer.at<microseconds>().count(),
|
|
|
|
name(c)
|
2018-10-21 08:30:27 +02:00
|
|
|
};
|
2019-01-29 20:13:58 +01:00
|
|
|
#endif
|
2018-10-21 08:30:27 +02:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(it);
|
2018-09-05 11:56:50 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
/// Seek to entry NOT GREATER THAN key. That is, equal to or less than key
|
|
|
|
rocksdb::Iterator &
|
|
|
|
ircd::db::_seek_lower_(rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
it.SeekForPrev(slice(sv));
|
|
|
|
return it;
|
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
/// Seek to entry NOT LESS THAN key. That is, equal to or greater than key
|
|
|
|
rocksdb::Iterator &
|
|
|
|
ircd::db::_seek_upper_(rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
it.Seek(slice(sv));
|
|
|
|
return it;
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
/// Defaults to _seek_upper_ because it has better support from RocksDB.
|
|
|
|
rocksdb::Iterator &
|
|
|
|
ircd::db::_seek_(rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return _seek_upper_(it, sv);
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
rocksdb::Iterator &
|
|
|
|
ircd::db::_seek_(rocksdb::Iterator &it,
|
|
|
|
const pos &p)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
switch(p)
|
|
|
|
{
|
|
|
|
case pos::NEXT: it.Next(); break;
|
|
|
|
case pos::PREV: it.Prev(); break;
|
|
|
|
case pos::FRONT: it.SeekToFirst(); break;
|
|
|
|
case pos::BACK: it.SeekToLast(); break;
|
|
|
|
default:
|
|
|
|
case pos::END:
|
|
|
|
{
|
|
|
|
it.SeekToLast();
|
|
|
|
if(it.Valid())
|
|
|
|
it.Next();
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
return it;
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// validation suite
|
|
|
|
//
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::valid_eq_or_throw(const rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
assert(!empty(sv));
|
|
|
|
if(!valid_eq(it, sv))
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
throw_on_error(it.status());
|
|
|
|
throw not_found{};
|
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
void
|
|
|
|
ircd::db::valid_or_throw(const rocksdb::Iterator &it)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
if(!valid(it))
|
|
|
|
{
|
|
|
|
throw_on_error(it.status());
|
|
|
|
throw not_found{};
|
|
|
|
//assert(0); // status == ok + !Valid() == ???
|
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::valid_lte(const rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(it, [&sv](const auto &it)
|
|
|
|
{
|
|
|
|
return it.key().compare(slice(sv)) <= 0;
|
|
|
|
});
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::valid_gt(const rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(it, [&sv](const auto &it)
|
|
|
|
{
|
|
|
|
return it.key().compare(slice(sv)) > 0;
|
|
|
|
});
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::valid_eq(const rocksdb::Iterator &it,
|
|
|
|
const string_view &sv)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(it, [&sv](const auto &it)
|
|
|
|
{
|
|
|
|
return it.key().compare(slice(sv)) == 0;
|
|
|
|
});
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
bool
|
|
|
|
ircd::db::valid(const rocksdb::Iterator &it,
|
|
|
|
const valid_proffer &proffer)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return valid(it)? proffer(it) : false;
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::operator!(const rocksdb::Iterator &it)
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
return !valid(it);
|
|
|
|
}
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
bool
|
|
|
|
ircd::db::valid(const rocksdb::Iterator &it)
|
|
|
|
{
|
|
|
|
switch(it.status().code())
|
2018-12-24 22:32:22 +01:00
|
|
|
{
|
2019-01-29 20:13:58 +01:00
|
|
|
using rocksdb::Status;
|
2018-12-24 22:32:22 +01:00
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
case Status::kOk: break;
|
|
|
|
case Status::kNotFound: break;
|
|
|
|
case Status::kIncomplete: break;
|
2018-12-24 22:32:22 +01:00
|
|
|
default:
|
2019-01-29 20:13:58 +01:00
|
|
|
throw_on_error(it.status());
|
|
|
|
__builtin_unreachable();
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
2019-01-29 20:13:58 +01:00
|
|
|
|
|
|
|
return it.Valid();
|
2018-12-24 22:32:22 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// column_names
|
|
|
|
//
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::column_names(const std::string &path,
|
2017-03-31 00:57:08 +02:00
|
|
|
const std::string &options)
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-12 18:53:16 +01:00
|
|
|
return column_names(path, db::options{options});
|
2017-03-24 02:36:49 +01:00
|
|
|
}
|
|
|
|
|
2018-12-16 03:41:37 +01:00
|
|
|
/// Note that if there is no database found at path we still return a
|
|
|
|
/// vector containing the column name "default". This function is not
|
|
|
|
/// to be used as a test for whether the database exists. It returns
|
|
|
|
/// the columns required to be described at `path`. That will always
|
|
|
|
/// include the default column (RocksDB sez) even if database doesn't
|
|
|
|
/// exist yet.
|
2017-03-24 02:36:49 +01:00
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::column_names(const std::string &path,
|
|
|
|
const rocksdb::DBOptions &opts)
|
|
|
|
try
|
|
|
|
{
|
|
|
|
std::vector<std::string> ret;
|
2018-12-16 03:41:37 +01:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
throw_on_error
|
|
|
|
{
|
|
|
|
rocksdb::DB::ListColumnFamilies(opts, path, &ret)
|
|
|
|
};
|
|
|
|
|
2017-03-24 02:36:49 +01:00
|
|
|
return ret;
|
|
|
|
}
|
2018-12-16 03:41:37 +01:00
|
|
|
catch(const not_found &)
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
2018-12-16 03:41:37 +01:00
|
|
|
return // No database found at path.
|
2017-03-24 02:36:49 +01:00
|
|
|
{
|
|
|
|
{ rocksdb::kDefaultColumnFamilyName }
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2017-09-16 18:41:10 +02:00
|
|
|
//
|
|
|
|
// Misc
|
|
|
|
//
|
|
|
|
|
2018-11-03 02:08:40 +01:00
|
|
|
rocksdb::CompressionType
|
|
|
|
ircd::db::find_supported_compression(const std::string &list)
|
|
|
|
{
|
|
|
|
rocksdb::CompressionType ret
|
|
|
|
{
|
|
|
|
rocksdb::kNoCompression
|
|
|
|
};
|
|
|
|
|
|
|
|
tokens(list, ';', [&ret]
|
2019-06-02 10:03:28 +02:00
|
|
|
(const string_view &requested)
|
2018-11-03 02:08:40 +01:00
|
|
|
{
|
2018-12-01 00:07:51 +01:00
|
|
|
if(ret != rocksdb::kNoCompression)
|
|
|
|
return;
|
|
|
|
|
2019-06-02 10:03:28 +02:00
|
|
|
for(const auto &[name, type] : db::compressions)
|
|
|
|
if(type != 0L && name == requested)
|
|
|
|
{
|
|
|
|
ret = rocksdb::CompressionType(type);
|
|
|
|
break;
|
|
|
|
}
|
2018-11-03 02:08:40 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
rocksdb::DBOptions
|
2018-04-09 20:51:36 +02:00
|
|
|
ircd::db::make_dbopts(std::string optstr,
|
|
|
|
std::string *const &out,
|
2017-09-19 04:17:36 +02:00
|
|
|
bool *const read_only,
|
|
|
|
bool *const fsck)
|
|
|
|
{
|
|
|
|
// RocksDB doesn't parse a read_only option, so we allow that to be added
|
|
|
|
// to open the database as read_only and then remove that from the string.
|
|
|
|
if(read_only)
|
2019-04-15 20:08:40 +02:00
|
|
|
*read_only |= optstr_find_and_remove(optstr, "read_only=true;"s);
|
2018-04-09 20:51:36 +02:00
|
|
|
else
|
|
|
|
optstr_find_and_remove(optstr, "read_only=true;"s);
|
2017-09-19 04:17:36 +02:00
|
|
|
|
|
|
|
// We also allow the user to specify fsck=true to run a repair operation on
|
|
|
|
// the db. This may be expensive to do by default every startup.
|
|
|
|
if(fsck)
|
2019-04-15 20:08:40 +02:00
|
|
|
*fsck |= optstr_find_and_remove(optstr, "fsck=true;"s);
|
2018-04-09 20:51:36 +02:00
|
|
|
else
|
|
|
|
optstr_find_and_remove(optstr, "fsck=true;"s);
|
2017-09-19 04:17:36 +02:00
|
|
|
|
|
|
|
// Generate RocksDB options from string
|
|
|
|
rocksdb::DBOptions opts
|
|
|
|
{
|
2018-12-12 18:53:16 +01:00
|
|
|
db::options(optstr)
|
2017-09-19 04:17:36 +02:00
|
|
|
};
|
|
|
|
|
2018-04-09 20:51:36 +02:00
|
|
|
if(out)
|
|
|
|
*out = std::move(optstr);
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
return opts;
|
|
|
|
}
|
|
|
|
|
2017-08-23 22:37:47 +02:00
|
|
|
bool
|
|
|
|
ircd::db::optstr_find_and_remove(std::string &optstr,
|
|
|
|
const std::string &what)
|
|
|
|
{
|
|
|
|
const auto pos(optstr.find(what));
|
|
|
|
if(pos == std::string::npos)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
optstr.erase(pos, what.size());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-16 05:54:50 +01:00
|
|
|
/// Convert our options structure into RocksDB's options structure.
|
2016-09-25 03:18:54 +02:00
|
|
|
rocksdb::ReadOptions
|
2017-09-22 05:08:11 +02:00
|
|
|
ircd::db::make_opts(const gopts &opts)
|
2016-09-25 03:18:54 +02:00
|
|
|
{
|
|
|
|
rocksdb::ReadOptions ret;
|
2017-09-23 10:13:26 +02:00
|
|
|
assert(ret.fill_cache);
|
2018-12-28 19:55:57 +01:00
|
|
|
assert(ret.read_tier == BLOCKING);
|
2016-09-25 03:18:54 +02:00
|
|
|
|
2017-09-22 05:08:11 +02:00
|
|
|
// slice* for exclusive upper bound. when prefixes are used this value must
|
|
|
|
// have the same prefix because ordering is not guaranteed between prefixes
|
2018-04-14 07:08:57 +02:00
|
|
|
ret.iterate_lower_bound = opts.lower_bound;
|
|
|
|
ret.iterate_upper_bound = opts.upper_bound;
|
2016-09-25 03:18:54 +02:00
|
|
|
|
2017-09-22 05:08:11 +02:00
|
|
|
ret += opts;
|
|
|
|
return ret;
|
|
|
|
}
|
2017-08-31 07:12:58 +02:00
|
|
|
|
2018-05-24 03:43:47 +02:00
|
|
|
ircd::conf::item<bool>
|
|
|
|
read_checksum
|
|
|
|
{
|
|
|
|
{ "name", "ircd.db.read.checksum" },
|
|
|
|
{ "default", false }
|
|
|
|
};
|
|
|
|
|
2018-11-16 05:54:50 +01:00
|
|
|
/// Update a RocksDB options structure with our options structure. We use
|
|
|
|
/// operator+= for fun here; we can avoid reconstructing and returning a new
|
|
|
|
/// options structure in some cases by breaking out this function from
|
|
|
|
/// make_opts().
|
2017-09-22 05:08:11 +02:00
|
|
|
rocksdb::ReadOptions &
|
|
|
|
ircd::db::operator+=(rocksdb::ReadOptions &ret,
|
|
|
|
const gopts &opts)
|
|
|
|
{
|
|
|
|
ret.pin_data = test(opts, get::PIN);
|
|
|
|
ret.fill_cache |= test(opts, get::CACHE);
|
|
|
|
ret.fill_cache &= !test(opts, get::NO_CACHE);
|
|
|
|
ret.tailing = test(opts, get::NO_SNAPSHOT);
|
|
|
|
ret.prefix_same_as_start = test(opts, get::PREFIX);
|
2017-09-24 04:47:36 +02:00
|
|
|
ret.total_order_seek = test(opts, get::ORDERED);
|
2018-05-24 03:43:47 +02:00
|
|
|
ret.verify_checksums = bool(read_checksum);
|
|
|
|
ret.verify_checksums |= test(opts, get::CHECKSUM);
|
|
|
|
ret.verify_checksums &= !test(opts, get::NO_CHECKSUM);
|
2018-11-16 05:54:50 +01:00
|
|
|
|
|
|
|
ret.readahead_size = opts.readahead;
|
|
|
|
ret.iter_start_seqnum = opts.seqnum;
|
|
|
|
|
2018-12-28 19:55:57 +01:00
|
|
|
ret.read_tier = test(opts, get::NO_BLOCKING)?
|
|
|
|
rocksdb::ReadTier::kBlockCacheTier:
|
|
|
|
rocksdb::ReadTier::kReadAllTier;
|
|
|
|
|
2018-11-16 05:54:50 +01:00
|
|
|
if(opts.snapshot && !test(opts, get::NO_SNAPSHOT))
|
|
|
|
ret.snapshot = opts.snapshot;
|
|
|
|
|
2016-09-25 03:18:54 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
rocksdb::WriteOptions
|
2017-03-31 00:57:08 +02:00
|
|
|
ircd::db::make_opts(const sopts &opts)
|
2016-09-25 03:18:54 +02:00
|
|
|
{
|
|
|
|
rocksdb::WriteOptions ret;
|
2017-11-30 20:04:25 +01:00
|
|
|
//ret.no_slowdown = true; // read_tier = NON_BLOCKING for writes
|
2017-09-22 05:08:11 +02:00
|
|
|
ret += opts;
|
|
|
|
return ret;
|
|
|
|
}
|
2016-09-25 03:18:54 +02:00
|
|
|
|
2017-09-22 05:08:11 +02:00
|
|
|
rocksdb::WriteOptions &
|
|
|
|
ircd::db::operator+=(rocksdb::WriteOptions &ret,
|
|
|
|
const sopts &opts)
|
|
|
|
{
|
|
|
|
ret.sync = test(opts, set::FSYNC);
|
|
|
|
ret.disableWAL = test(opts, set::NO_JOURNAL);
|
2018-12-28 19:55:57 +01:00
|
|
|
ret.ignore_missing_column_families = test(opts, set::NO_COLUMN_ERR);
|
|
|
|
ret.no_slowdown = test(opts, set::NO_BLOCKING);
|
|
|
|
ret.low_pri = test(opts, set::PRIO_LOW);
|
2016-09-25 03:18:54 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-23 13:02:12 +02:00
|
|
|
//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
std::vector<std::string>
|
|
|
|
ircd::db::available()
|
|
|
|
{
|
2018-12-30 00:27:58 +01:00
|
|
|
const auto &prefix
|
2018-05-25 05:01:52 +02:00
|
|
|
{
|
2019-01-25 19:35:39 +01:00
|
|
|
fs::path(fs::DB)
|
2018-05-25 05:01:52 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
const auto dirs
|
|
|
|
{
|
|
|
|
fs::ls(prefix)
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<std::string> ret;
|
|
|
|
for(const auto &dir : dirs)
|
|
|
|
{
|
2018-09-04 11:28:04 +02:00
|
|
|
if(!fs::is_dir(dir))
|
|
|
|
continue;
|
|
|
|
|
2018-05-25 05:01:52 +02:00
|
|
|
const auto name
|
|
|
|
{
|
|
|
|
lstrip(dir, prefix)
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto checkpoints
|
|
|
|
{
|
|
|
|
fs::ls(dir)
|
|
|
|
};
|
|
|
|
|
|
|
|
for(const auto cpdir : checkpoints) try
|
|
|
|
{
|
|
|
|
const auto checkpoint
|
|
|
|
{
|
|
|
|
lstrip(lstrip(cpdir, dir), '/') //TODO: x-platform
|
|
|
|
};
|
|
|
|
|
|
|
|
auto path
|
|
|
|
{
|
|
|
|
db::path(name, lex_cast<uint64_t>(checkpoint))
|
|
|
|
};
|
|
|
|
|
|
|
|
ret.emplace_back(std::move(path));
|
|
|
|
}
|
|
|
|
catch(const bad_lex_cast &e)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
ircd::db::path(const string_view &name)
|
|
|
|
{
|
|
|
|
const auto pair
|
|
|
|
{
|
|
|
|
namepoint(name)
|
|
|
|
};
|
|
|
|
|
|
|
|
return path(pair.first, pair.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string
|
|
|
|
ircd::db::path(const string_view &name,
|
|
|
|
const uint64_t &checkpoint)
|
|
|
|
{
|
2018-12-30 00:27:58 +01:00
|
|
|
const auto &prefix
|
2018-05-25 05:01:52 +02:00
|
|
|
{
|
2019-01-25 19:35:39 +01:00
|
|
|
fs::path(fs::DB)
|
2018-05-25 05:01:52 +02:00
|
|
|
};
|
|
|
|
|
2018-09-13 14:43:30 +02:00
|
|
|
const string_view parts[]
|
2018-05-25 05:01:52 +02:00
|
|
|
{
|
|
|
|
prefix, name, lex_cast(checkpoint)
|
2018-09-13 14:43:30 +02:00
|
|
|
};
|
|
|
|
|
2019-02-08 05:56:48 +01:00
|
|
|
return fs::path_string(parts);
|
2018-05-25 05:01:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<ircd::string_view, uint64_t>
|
|
|
|
ircd::db::namepoint(const string_view &name_)
|
|
|
|
{
|
|
|
|
const auto s
|
|
|
|
{
|
|
|
|
split(name_, ':')
|
|
|
|
};
|
|
|
|
|
|
|
|
return
|
|
|
|
{
|
|
|
|
s.first,
|
|
|
|
s.second? lex_cast<uint64_t>(s.second) : uint64_t(-1)
|
|
|
|
};
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2016-09-26 06:07:22 +02:00
|
|
|
std::string
|
2018-05-25 05:01:52 +02:00
|
|
|
ircd::db::namepoint(const string_view &name,
|
|
|
|
const uint64_t &checkpoint)
|
2017-03-31 00:57:08 +02:00
|
|
|
{
|
2018-05-25 05:01:52 +02:00
|
|
|
return std::string{name} + ':' + std::string{lex_cast(checkpoint)};
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<ircd::string_view, ircd::string_view>
|
|
|
|
ircd::db::operator*(const rocksdb::Iterator &it)
|
|
|
|
{
|
2017-04-03 06:02:32 +02:00
|
|
|
return { key(it), val(it) };
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::key(const rocksdb::Iterator &it)
|
|
|
|
{
|
|
|
|
return slice(it.key());
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::val(const rocksdb::Iterator &it)
|
|
|
|
{
|
|
|
|
return slice(it.value());
|
2017-03-31 00:57:08 +02:00
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// slice
|
|
|
|
//
|
|
|
|
|
2018-01-18 05:28:34 +01:00
|
|
|
const char *
|
|
|
|
ircd::db::data(const rocksdb::Slice &slice)
|
|
|
|
{
|
|
|
|
return slice.data();
|
|
|
|
}
|
|
|
|
|
2018-01-18 05:15:13 +01:00
|
|
|
size_t
|
|
|
|
ircd::db::size(const rocksdb::Slice &slice)
|
|
|
|
{
|
|
|
|
return slice.size();
|
|
|
|
}
|
|
|
|
|
2017-03-31 00:57:08 +02:00
|
|
|
rocksdb::Slice
|
|
|
|
ircd::db::slice(const string_view &sv)
|
2016-09-26 06:07:22 +02:00
|
|
|
{
|
2017-03-31 00:57:08 +02:00
|
|
|
return { sv.data(), sv.size() };
|
2016-09-26 06:07:22 +02:00
|
|
|
}
|
2017-03-23 22:58:24 +01:00
|
|
|
|
2017-04-03 06:02:32 +02:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::slice(const rocksdb::Slice &sk)
|
|
|
|
{
|
|
|
|
return { sk.data(), sk.size() };
|
|
|
|
}
|
|
|
|
|
2019-01-29 20:13:58 +01:00
|
|
|
//
|
|
|
|
// reflect
|
|
|
|
//
|
|
|
|
|
2017-03-23 22:58:24 +01:00
|
|
|
const std::string &
|
|
|
|
ircd::db::reflect(const rocksdb::Tickers &type)
|
|
|
|
{
|
|
|
|
const auto &names(rocksdb::TickersNameMap);
|
|
|
|
const auto it(std::find_if(begin(names), end(names), [&type]
|
|
|
|
(const auto &pair)
|
|
|
|
{
|
|
|
|
return pair.first == type;
|
|
|
|
}));
|
|
|
|
|
|
|
|
static const auto empty{"<ticker>?????"s};
|
|
|
|
return it != end(names)? it->second : empty;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string &
|
|
|
|
ircd::db::reflect(const rocksdb::Histograms &type)
|
|
|
|
{
|
|
|
|
const auto &names(rocksdb::HistogramsNameMap);
|
|
|
|
const auto it(std::find_if(begin(names), end(names), [&type]
|
|
|
|
(const auto &pair)
|
|
|
|
{
|
|
|
|
return pair.first == type;
|
|
|
|
}));
|
|
|
|
|
|
|
|
static const auto empty{"<histogram>?????"s};
|
|
|
|
return it != end(names)? it->second : empty;
|
|
|
|
}
|
2017-08-23 22:37:47 +02:00
|
|
|
|
2017-09-16 20:51:52 +02:00
|
|
|
ircd::string_view
|
2017-08-30 23:06:34 +02:00
|
|
|
ircd::db::reflect(const pos &pos)
|
|
|
|
{
|
|
|
|
switch(pos)
|
|
|
|
{
|
2017-09-19 04:17:36 +02:00
|
|
|
case pos::NEXT: return "NEXT";
|
|
|
|
case pos::PREV: return "PREV";
|
|
|
|
case pos::FRONT: return "FRONT";
|
|
|
|
case pos::BACK: return "BACK";
|
|
|
|
case pos::END: return "END";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "?????";
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const op &op)
|
|
|
|
{
|
|
|
|
switch(op)
|
|
|
|
{
|
|
|
|
case op::GET: return "GET";
|
|
|
|
case op::SET: return "SET";
|
|
|
|
case op::MERGE: return "MERGE";
|
|
|
|
case op::DELETE_RANGE: return "DELETE_RANGE";
|
|
|
|
case op::DELETE: return "DELETE";
|
|
|
|
case op::SINGLE_DELETE: return "SINGLE_DELETE";
|
2017-08-30 23:06:34 +02:00
|
|
|
}
|
|
|
|
|
2017-09-19 04:17:36 +02:00
|
|
|
return "?????";
|
2017-08-30 23:06:34 +02:00
|
|
|
}
|
|
|
|
|
2018-10-31 22:40:00 +01:00
|
|
|
ircd::string_view
|
2018-12-19 22:39:06 +01:00
|
|
|
ircd::db::reflect(const rocksdb::FlushReason &r)
|
2018-10-31 22:40:00 +01:00
|
|
|
{
|
2018-12-19 22:39:06 +01:00
|
|
|
using FlushReason = rocksdb::FlushReason;
|
2018-10-31 22:40:00 +01:00
|
|
|
|
2018-12-19 22:39:06 +01:00
|
|
|
switch(r)
|
2018-10-31 22:40:00 +01:00
|
|
|
{
|
2018-12-19 22:39:06 +01:00
|
|
|
case FlushReason::kOthers: return "Others";
|
|
|
|
case FlushReason::kGetLiveFiles: return "GetLiveFiles";
|
|
|
|
case FlushReason::kShutDown: return "ShutDown";
|
|
|
|
case FlushReason::kExternalFileIngestion: return "ExternalFileIngestion";
|
|
|
|
case FlushReason::kManualCompaction: return "ManualCompaction";
|
|
|
|
case FlushReason::kWriteBufferManager: return "WriteBufferManager";
|
|
|
|
case FlushReason::kWriteBufferFull: return "WriteBufferFull";
|
|
|
|
case FlushReason::kTest: return "Test";
|
|
|
|
case FlushReason::kDeleteFiles: return "DeleteFiles";
|
|
|
|
case FlushReason::kAutoCompaction: return "AutoCompaction";
|
|
|
|
case FlushReason::kManualFlush: return "ManualFlush";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "??????";
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::CompactionReason &r)
|
|
|
|
{
|
|
|
|
using CompactionReason = rocksdb::CompactionReason;
|
|
|
|
|
|
|
|
switch(r)
|
|
|
|
{
|
|
|
|
case CompactionReason::kUnknown: return "Unknown";
|
|
|
|
case CompactionReason::kLevelL0FilesNum: return "LevelL0FilesNum";
|
|
|
|
case CompactionReason::kLevelMaxLevelSize: return "LevelMaxLevelSize";
|
|
|
|
case CompactionReason::kUniversalSizeAmplification: return "UniversalSizeAmplification";
|
|
|
|
case CompactionReason::kUniversalSizeRatio: return "UniversalSizeRatio";
|
|
|
|
case CompactionReason::kUniversalSortedRunNum: return "UniversalSortedRunNum";
|
|
|
|
case CompactionReason::kFIFOMaxSize: return "FIFOMaxSize";
|
|
|
|
case CompactionReason::kFIFOReduceNumFiles: return "FIFOReduceNumFiles";
|
|
|
|
case CompactionReason::kFIFOTtl: return "FIFOTtl";
|
|
|
|
case CompactionReason::kManualCompaction: return "ManualCompaction";
|
|
|
|
case CompactionReason::kFilesMarkedForCompaction: return "FilesMarkedForCompaction";
|
|
|
|
case CompactionReason::kBottommostFiles: return "BottommostFiles";
|
|
|
|
case CompactionReason::kTtl: return "Ttl";
|
|
|
|
case CompactionReason::kFlush: return "Flush";
|
|
|
|
case CompactionReason::kExternalSstIngestion: return "ExternalSstIngestion";
|
|
|
|
case CompactionReason::kNumOfReasons:
|
|
|
|
break;
|
2018-10-31 22:40:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return "??????";
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::BackgroundErrorReason &r)
|
|
|
|
{
|
|
|
|
using rocksdb::BackgroundErrorReason;
|
|
|
|
|
|
|
|
switch(r)
|
|
|
|
{
|
|
|
|
case BackgroundErrorReason::kFlush: return "FLUSH";
|
|
|
|
case BackgroundErrorReason::kCompaction: return "COMPACTION";
|
|
|
|
case BackgroundErrorReason::kWriteCallback: return "WRITE";
|
|
|
|
case BackgroundErrorReason::kMemTable: return "MEMTABLE";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "??????";
|
|
|
|
}
|
|
|
|
|
2018-12-19 22:39:06 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::WriteStallCondition &c)
|
|
|
|
{
|
|
|
|
using rocksdb::WriteStallCondition;
|
|
|
|
|
|
|
|
switch(c)
|
|
|
|
{
|
|
|
|
case WriteStallCondition::kNormal: return "NORMAL";
|
|
|
|
case WriteStallCondition::kDelayed: return "DELAYED";
|
|
|
|
case WriteStallCondition::kStopped: return "STOPPED";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "??????";
|
|
|
|
}
|
|
|
|
|
2018-01-18 04:22:35 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::Env::Priority &p)
|
|
|
|
{
|
|
|
|
switch(p)
|
|
|
|
{
|
2018-04-09 21:55:22 +02:00
|
|
|
case rocksdb::Env::Priority::BOTTOM: return "BOTTOM"_sv;
|
2018-01-18 04:22:35 +01:00
|
|
|
case rocksdb::Env::Priority::LOW: return "LOW"_sv;
|
|
|
|
case rocksdb::Env::Priority::HIGH: return "HIGH"_sv;
|
2018-08-16 11:04:16 +02:00
|
|
|
case rocksdb::Env::Priority::TOTAL: assert(0); break;
|
2018-01-18 04:22:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return "????"_sv;
|
|
|
|
}
|
|
|
|
|
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::Env::IOPriority &p)
|
|
|
|
{
|
|
|
|
switch(p)
|
|
|
|
{
|
|
|
|
case rocksdb::Env::IOPriority::IO_LOW: return "IO_LOW"_sv;
|
|
|
|
case rocksdb::Env::IOPriority::IO_HIGH: return "IO_HIGH"_sv;
|
2018-08-16 11:04:16 +02:00
|
|
|
case rocksdb::Env::IOPriority::IO_TOTAL: assert(0); break;
|
2018-01-18 04:22:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return "IO_????"_sv;
|
|
|
|
}
|
|
|
|
|
2018-11-02 09:07:09 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::Env::WriteLifeTimeHint &h)
|
|
|
|
{
|
|
|
|
using WriteLifeTimeHint = rocksdb::Env::WriteLifeTimeHint;
|
|
|
|
|
|
|
|
switch(h)
|
|
|
|
{
|
|
|
|
case WriteLifeTimeHint::WLTH_NOT_SET: return "NOT_SET";
|
|
|
|
case WriteLifeTimeHint::WLTH_NONE: return "NONE";
|
|
|
|
case WriteLifeTimeHint::WLTH_SHORT: return "SHORT";
|
|
|
|
case WriteLifeTimeHint::WLTH_MEDIUM: return "MEDIUM";
|
|
|
|
case WriteLifeTimeHint::WLTH_LONG: return "LONG";
|
|
|
|
case WriteLifeTimeHint::WLTH_EXTREME: return "EXTREME";
|
|
|
|
}
|
|
|
|
|
|
|
|
return "WLTH_????"_sv;
|
|
|
|
}
|
|
|
|
|
2018-10-31 22:48:14 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::Status::Severity &s)
|
|
|
|
{
|
|
|
|
using Severity = rocksdb::Status::Severity;
|
|
|
|
|
|
|
|
switch(s)
|
|
|
|
{
|
|
|
|
case Severity::kNoError: return "NONE";
|
|
|
|
case Severity::kSoftError: return "SOFT";
|
|
|
|
case Severity::kHardError: return "HARD";
|
|
|
|
case Severity::kFatalError: return "FATAL";
|
|
|
|
case Severity::kUnrecoverableError: return "UNRECOVERABLE";
|
|
|
|
case Severity::kMaxSeverity: break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "?????";
|
|
|
|
}
|
|
|
|
|
2018-01-18 06:24:32 +01:00
|
|
|
ircd::string_view
|
|
|
|
ircd::db::reflect(const rocksdb::RandomAccessFile::AccessPattern &p)
|
|
|
|
{
|
|
|
|
switch(p)
|
|
|
|
{
|
|
|
|
case rocksdb::RandomAccessFile::AccessPattern::NORMAL: return "NORMAL"_sv;
|
|
|
|
case rocksdb::RandomAccessFile::AccessPattern::RANDOM: return "RANDOM"_sv;
|
|
|
|
case rocksdb::RandomAccessFile::AccessPattern::SEQUENTIAL: return "SEQUENTIAL"_sv;
|
|
|
|
case rocksdb::RandomAccessFile::AccessPattern::WILLNEED: return "WILLNEED"_sv;
|
|
|
|
case rocksdb::RandomAccessFile::AccessPattern::DONTNEED: return "DONTNEED"_sv;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "??????"_sv;
|
|
|
|
}
|