gpt4 book ai didi

c++ - boost::asio 错误?销毁io_service之前的task_io_service

转载 作者:塔克拉玛干 更新时间:2023-11-03 00:40:57 24 4
gpt4 key购买 nike

我在我的代码中发现了这个奇怪的错误。这是我设法完成的自包含测试用例。

#include <memory>
#include <thread>
#include <stack>
#include <system_error>

#include <boost/asio.hpp>

using boost::asio::io_service;
using std::placeholders::_1;

class async_service
{
public:
async_service();
async_service(size_t number_threads);
~async_service();

async_service(const async_service&) = delete;
void operator=(const async_service&) = delete;

void spawn();
void shutdown();

io_service& get_service();
const io_service& get_service() const;

private:
io_service service_;
io_service::work* work_;
std::vector<std::thread> threads_;
};

async_service::async_service()
: work_(nullptr)
{
}

async_service::async_service(size_t number_threads)
: work_(nullptr)
{
for (size_t i = 0; i < number_threads; ++i)
spawn();
}

async_service::~async_service()
{
std::cout << __PRETTY_FUNCTION__ << std::endl;
service_.stop();
for (std::thread& t: threads_)
t.join();
}

void run_service(io_service* service)
{
service->run();
}

void async_service::spawn()
{
if (!work_)
work_ = new io_service::work(service_);
threads_.push_back(std::thread(run_service, &service_));
}
void async_service::shutdown()
{
delete work_;
work_ = nullptr;
}

io_service& async_service::get_service()
{
return service_;
}
const io_service& async_service::get_service() const
{
return service_;
}

// --------------------------------------------------------------

template <typename... Args>
class subscriber
: public std::enable_shared_from_this<subscriber<Args...>>
{
public:
typedef std::function<void (Args...)> handler_type;
typedef std::shared_ptr<subscriber<Args...>> ptr;

subscriber(async_service& service)
: strand_(service.get_service())
{
}

void subscribe(handler_type handle)
{
strand_.dispatch(
std::bind(&subscriber<Args...>::do_subscribe,
this->shared_from_this(), handle));
}

void relay(Args... params)
{
strand_.dispatch(
std::bind(&subscriber<Args...>::do_relay,
this->shared_from_this(), std::forward<Args>(params)...));
}

private:
typedef std::stack<handler_type> registry_stack;

void do_subscribe(handler_type handle)
{
registry_.push(handle);
}

void do_relay(Args... params)
{
registry_stack notify_copy = std::move(registry_);
registry_ = registry_stack();
while (!notify_copy.empty())
{
notify_copy.top()(params...);
notify_copy.pop();
}
assert(notify_copy.empty());
}

io_service::strand strand_;
registry_stack registry_;
};

// --------------------------------------------------------

class lala_channel_proxy
: public std::enable_shared_from_this<lala_channel_proxy>
{
public:
typedef std::function<void (const std::error_code&)> receive_inventory_handler;

lala_channel_proxy(async_service& service)
: strand_(service.get_service())
{
inventory_subscriber_ =
std::make_shared<inventory_subscriber_type>(service);
}

void start()
{
read_header();
}

void subscribe_inventory(receive_inventory_handler handle_receive)
{
inventory_subscriber_->subscribe(handle_receive);
}

typedef subscriber<const std::error_code&> inventory_subscriber_type;

void read_header()
{
strand_.post(
std::bind(&lala_channel_proxy::handle_read_header,
shared_from_this(), boost::system::error_code(), 0));
}

void handle_read_header(const boost::system::error_code& ec,
size_t bytes_transferred)
{
std::cout << "inventory ----------" << std::endl;
inventory_subscriber_->relay(std::error_code());
sleep(1.0);
read_header();
}

io_service::strand strand_;
inventory_subscriber_type::ptr inventory_subscriber_;
};

typedef std::shared_ptr<lala_channel_proxy> lala_channel_proxy_ptr;

class lala_channel
{
public:
lala_channel(async_service& service)
{
lala_channel_proxy_ptr proxy =
std::make_shared<lala_channel_proxy>(service);
proxy->start();
//weak_proxy_ = proxy;
strong_proxy_ = proxy;
}
void subscribe_inventory(
lala_channel_proxy::receive_inventory_handler handle_receive)
{
lala_channel_proxy_ptr proxy = strong_proxy_;
proxy->subscribe_inventory(handle_receive);
}
lala_channel_proxy_ptr strong_proxy_;
// Normally this has a weak pointer to the channel pimpl to allow
// it to die, but whether it uses a weak_ptr or shared_ptr makes
// no difference.
//std::weak_ptr<channel_proxy> weak_proxy_;
};

typedef std::shared_ptr<lala_channel> lala_channel_ptr;
//typedef lala_channel_proxy_ptr lala_channel_ptr;

class session
: public std::enable_shared_from_this<session>
{
public:
typedef std::function<void (const std::error_code&)> completion_handler;

session(async_service& service, async_service& mempool_service,
async_service& disk_service)
: strand_(service.get_service()),
txpool_strand_(mempool_service.get_service()),
chain_strand_(disk_service.get_service()), service_(service)
{
}

void start()
{
auto this_ptr = shared_from_this();
lala_channel_ptr node =
std::make_shared<lala_channel>(service_);
node->subscribe_inventory(
std::bind(&session::inventory, this_ptr, _1, node));
for (size_t i = 0; i < 500; ++i)
{
chain_strand_.post(
[]()
{
std::cout << "HERE!" << std::endl;
sleep(2);
});
}
}

private:
void inventory(const std::error_code& ec, lala_channel_ptr node)
{
if (ec)
{
std::cerr << ec.message() << std::endl;
return;
}
auto this_ptr = shared_from_this();
txpool_strand_.post([]() {});
node->subscribe_inventory(
std::bind(&session::inventory, this_ptr, _1, node));
}

async_service& service_;
io_service::strand txpool_strand_, strand_, chain_strand_;
};

int main()
{
// First level
{
// Bug only happens for this ordering of async_service's
// That means it is only triggered when they are destroyed in
// this reverse order.
async_service network_service(1), disk_service(1), mempool_service(1);
//async_service network_service(1), mempool_service(1), disk_service(1);
//async_service disk_service(1), mempool_service(1), network_service(1);
//async_service disk_service(1), network_service(1), mempool_service(1);
//async_service mempool_service(1), disk_service(1), network_service(1);
//async_service mempool_service(1), network_service(1), disk_service(1);

// Second level
{
// Should be kept alive by io_service
auto s = std::make_shared<session>(network_service, mempool_service, disk_service);
s->start();
}
//network_service.shutdown();
//disk_service.shutdown();
//mempool_service.shutdown();
sleep(3);
// Never gets past here
}
std::cout << "Exiting..." << std::endl;
return 0;
}

当我运行它时,我得到这个:

$ g++ -std=c++0x /tmp/ideone_y6OlI.cpp -lboost_system -pthread -ggdb
$ gdb a.out
GNU gdb (Ubuntu/Linaro 7.4-2012.04-0ubuntu2) 7.4-2012.04
Copyright (C) 2012 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
For bug reporting instructions, please see:
<http://bugs.launchpad.net/gdb-linaro/>...
Reading symbols from /home/genjix/src/brokenlibbtc/a.out...done.
(gdb) r
Starting program: /home/genjix/src/brokenlibbtc/a.out
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
[New Thread 0x7ffff6deb700 (LWP 28098)]
[New Thread 0x7ffff65ea700 (LWP 28099)]
[New Thread 0x7ffff5de9700 (LWP 28100)]
inventory ----------
HERE!
inventory ----------
HERE!
inventory ----------
async_service::~async_service()
async_service::~async_service()
[Thread 0x7ffff5de9700 (LWP 28100) exited]

Program received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7ffff6deb700 (LWP 28098)]
0x0000000000405873 in boost::asio::detail::task_io_service::wake_one_idle_thread_and_unlock (this=0x6255e0, lock=...) at /usr/include/boost/asio/detail/impl/task_io_service.ipp:461
461 first_idle_thread_ = idle_thread->next;

boost 1.48 和 1.49 也是一样。

我想知道为什么会这样。它只发生在这种高度特殊的配置中。如果我更改任何内容,则不会出现错误。

async_service 是 io_service 的便捷包装器。奇怪的是,如果我将 io_service 更改为 *io_service 并且不删除 io_service 那么错误就不会发生......但这应该无关紧要吧?

如果您查看 main() 中的源代码,会创建 3 个 async_service 对象。它们中的每一个都管理单个 io_service 的生命周期。

        // Bug only happens for this ordering of async_service's
// That means it is only triggered when they are destroyed in
// this reverse order.
async_service network_service(1), disk_service(1), mempool_service(1);
//async_service network_service(1), mempool_service(1), disk_service(1);
//async_service disk_service(1), mempool_service(1), network_service(1);
//async_service disk_service(1), network_service(1), mempool_service(1);
//async_service mempool_service(1), disk_service(1), network_service(1);
//async_service mempool_service(1), network_service(1), disk_service(1);

订阅者类表示订阅...调用特定事件的事物。 session 和 channel 是从一个更大的程序改编而来的,所以它们可能看起来很困惑/令人困惑。

最佳答案

一个问题是 session::inventory,当从构造函数的第一个参数下的线程执行时(network_service 在失败的情况下),试图访问一个使用第二个参数 (mempool_service) 初始化的链。

void inventory(const std::error_code& ec, lala_channel_ptr node)
{
if (ec)
{
std::cerr << ec.message() << std::endl;
return;
}
auto this_ptr = shared_from_this();
txpool_strand_.post([]() {}); // <-- one problem is here.
node->subscribe_inventory(
std::bind(&session::inventory, this_ptr, _1, node));
}

按照销毁的顺序,mempool_service已经被销毁了,在post执行的过程中,那里的访问会在某处失败。

关于c++ - boost::asio 错误?销毁io_service之前的task_io_service,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/10902761/

24 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com