diff --git a/src/rpcserver.cpp b/src/rpcserver.cpp index 3ea5ebd80..3c9e731cc 100644 --- a/src/rpcserver.cpp +++ b/src/rpcserver.cpp @@ -595,12 +595,13 @@ void StartRPCThreads() asio::ip::address bindAddress = loopback ? asio::ip::address_v6::loopback() : asio::ip::address_v6::any(); ip::tcp::endpoint endpoint(bindAddress, GetArg("-rpcport", Params().RPCPort())); boost::system::error_code v6_only_error; - boost::shared_ptr acceptor(new ip::tcp::acceptor(*rpc_io_service)); bool fListening = false; std::string strerr; try { + boost::shared_ptr acceptor(new ip::tcp::acceptor(*rpc_io_service)); + rpc_acceptors.push_back(acceptor); acceptor->open(endpoint.protocol()); acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); @@ -618,7 +619,6 @@ void StartRPCThreads() { strerr = strprintf(_("An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s"), endpoint.port(), e.what()); } - try { // If dual IPv6/IPv4 failed (or we're opening loopback interfaces only), open IPv4 separately if (!fListening || loopback || v6_only_error) @@ -626,7 +626,8 @@ void StartRPCThreads() bindAddress = loopback ? asio::ip::address_v4::loopback() : asio::ip::address_v4::any(); endpoint.address(bindAddress); - acceptor.reset(new ip::tcp::acceptor(*rpc_io_service)); + boost::shared_ptr acceptor(new ip::tcp::acceptor(*rpc_io_service)); + rpc_acceptors.push_back(acceptor); acceptor->open(endpoint.protocol()); acceptor->set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); acceptor->bind(endpoint); @@ -670,7 +671,16 @@ void StopRPCThreads() { if (rpc_io_service == NULL) return; + // First, cancel all timers and acceptors + // This is not done automatically by ->stop(), and in some cases the destructor of + // asio::io_service can hang if this is skipped. + BOOST_FOREACH(const boost::shared_ptr &acceptor, rpc_acceptors) + acceptor->cancel(); + rpc_acceptors.clear(); + BOOST_FOREACH(const PAIRTYPE(std::string, boost::shared_ptr) &timer, deadlineTimers) + timer.second->cancel(); deadlineTimers.clear(); + rpc_io_service->stop(); if (rpc_worker_group != NULL) rpc_worker_group->join_all();