clang format

This commit is contained in:
gabime
2023-09-25 02:44:07 +03:00
parent f24f7fa2fa
commit 1a0bfc7a89
92 changed files with 2365 additions and 3537 deletions

View File

@@ -4,26 +4,25 @@
#pragma once
#ifndef SPDLOG_HEADER_ONLY
# include <spdlog/details/thread_pool.h>
#include <spdlog/details/thread_pool.h>
#endif
#include <spdlog/common.h>
#include <cassert>
#include <spdlog/common.h>
namespace spdlog {
namespace details {
SPDLOG_INLINE thread_pool::thread_pool(
size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start, std::function<void()> on_thread_stop)
: q_(q_max_items)
{
if (threads_n == 0 || threads_n > 1000)
{
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items,
size_t threads_n,
std::function<void()> on_thread_start,
std::function<void()> on_thread_stop)
: q_(q_max_items) {
if (threads_n == 0 || threads_n > 1000) {
throw_spdlog_ex("spdlog::thread_pool(): invalid threads_n param (valid "
"range is 1-1000)");
}
for (size_t i = 0; i < threads_n; i++)
{
for (size_t i = 0; i < threads_n; i++) {
threads_.emplace_back([this, on_thread_start, on_thread_stop] {
on_thread_start();
this->thread_pool::worker_loop_();
@@ -33,100 +32,71 @@ SPDLOG_INLINE thread_pool::thread_pool(
}
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n, std::function<void()> on_thread_start)
: thread_pool(q_max_items, threads_n, on_thread_start, [] {})
{}
: thread_pool(q_max_items, threads_n, on_thread_start, [] {}) {}
SPDLOG_INLINE thread_pool::thread_pool(size_t q_max_items, size_t threads_n)
: thread_pool(
q_max_items, threads_n, [] {}, [] {})
{}
q_max_items, threads_n, [] {}, [] {}) {}
// message all threads to terminate gracefully join them
SPDLOG_INLINE thread_pool::~thread_pool()
{
SPDLOG_TRY
{
for (size_t i = 0; i < threads_.size(); i++)
{
SPDLOG_INLINE thread_pool::~thread_pool() {
SPDLOG_TRY {
for (size_t i = 0; i < threads_.size(); i++) {
post_async_msg_(async_msg(async_msg_type::terminate), async_overflow_policy::block);
}
for (auto &t : threads_)
{
for (auto &t : threads_) {
t.join();
}
}
SPDLOG_CATCH_STD
}
void SPDLOG_INLINE thread_pool::post_log(async_logger_ptr &&worker_ptr, const details::log_msg &msg, async_overflow_policy overflow_policy)
{
void SPDLOG_INLINE thread_pool::post_log(async_logger_ptr &&worker_ptr,
const details::log_msg &msg,
async_overflow_policy overflow_policy) {
async_msg async_m(std::move(worker_ptr), async_msg_type::log, msg);
post_async_msg_(std::move(async_m), overflow_policy);
}
void SPDLOG_INLINE thread_pool::post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy)
{
void SPDLOG_INLINE thread_pool::post_flush(async_logger_ptr &&worker_ptr, async_overflow_policy overflow_policy) {
post_async_msg_(async_msg(std::move(worker_ptr), async_msg_type::flush), overflow_policy);
}
size_t SPDLOG_INLINE thread_pool::overrun_counter()
{
return q_.overrun_counter();
}
size_t SPDLOG_INLINE thread_pool::overrun_counter() { return q_.overrun_counter(); }
void SPDLOG_INLINE thread_pool::reset_overrun_counter()
{
q_.reset_overrun_counter();
}
void SPDLOG_INLINE thread_pool::reset_overrun_counter() { q_.reset_overrun_counter(); }
size_t SPDLOG_INLINE thread_pool::discard_counter()
{
return q_.discard_counter();
}
size_t SPDLOG_INLINE thread_pool::discard_counter() { return q_.discard_counter(); }
void SPDLOG_INLINE thread_pool::reset_discard_counter()
{
q_.reset_discard_counter();
}
void SPDLOG_INLINE thread_pool::reset_discard_counter() { q_.reset_discard_counter(); }
size_t SPDLOG_INLINE thread_pool::queue_size()
{
return q_.size();
}
size_t SPDLOG_INLINE thread_pool::queue_size() { return q_.size(); }
void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy)
{
if (overflow_policy == async_overflow_policy::block)
{
void SPDLOG_INLINE thread_pool::post_async_msg_(async_msg &&new_msg, async_overflow_policy overflow_policy) {
if (overflow_policy == async_overflow_policy::block) {
q_.enqueue(std::move(new_msg));
}
else if (overflow_policy == async_overflow_policy::overrun_oldest)
{
} else if (overflow_policy == async_overflow_policy::overrun_oldest) {
q_.enqueue_nowait(std::move(new_msg));
}
else
{
} else {
assert(overflow_policy == async_overflow_policy::discard_new);
q_.enqueue_if_have_room(std::move(new_msg));
}
}
void SPDLOG_INLINE thread_pool::worker_loop_()
{
while (process_next_msg_()) {}
void SPDLOG_INLINE thread_pool::worker_loop_() {
while (process_next_msg_()) {
}
}
// process next message in the queue
// return true if this thread should still be active (while no terminate msg
// was received)
bool SPDLOG_INLINE thread_pool::process_next_msg_()
{
bool SPDLOG_INLINE thread_pool::process_next_msg_() {
async_msg incoming_async_msg;
q_.dequeue(incoming_async_msg);
switch (incoming_async_msg.msg_type)
{
switch (incoming_async_msg.msg_type) {
case async_msg_type::log: {
incoming_async_msg.worker_ptr->backend_sink_it_(incoming_async_msg);
return true;