为什么io_context在我的Boost Asio协程服务器中消失

时间:2019-11-17 11:36:03

标签: c++ boost-asio boost-coroutine

我的服务器基于boost spawn echo服务器示例,并在this thread中进行了改进。真正的服务器很复杂,我制作了一个简单的服务器来显示问题:

服务器在端口12345上侦听,从新连接接收0x4000字节数据。

客户端运行1000个线程,连接到服务器并发送0x4000字节数据。

问题:当客户端运行时,请在1秒钟后通过控制台中的 Ctrl-C 终止客户端进程,然后服务器的io_context为停止后,服务器会进入无限循环,并消耗100%的CPU。如果这种情况没有发生,请重复启动客户端并将其杀死几次,这将会发生。可能是它在TCP端口用尽了几次之后,请稍等几分钟,然后重试,这是在我的计算机上杀死客户端3〜15次之后发生的。

boost documentio_context.stopped()用于确定是否停止

  

通过显式调用stop()或由于没有工作

我从不致电io_context.stop(),而是使用make_work_guard(io_context)来保持io_context不停止,但是为什么它仍然停止?

我的环境:Win10-64bit,提升1.71.0

服务器代码:

#include <iostream>
using namespace std;

#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
using namespace boost;
using namespace boost::asio;
using namespace boost::asio::ip;
namespace ba=boost::asio;

#define SERVER_PORT 12345
#define DATA_LEN 0x4000


struct session : public std::enable_shared_from_this<session>
{
    tcp::socket socket_;
    boost::asio::steady_timer timer_;
    boost::asio::strand<boost::asio::io_context::executor_type> strand_;

    explicit session(boost::asio::io_context& io_context, tcp::socket socket)
    : socket_(std::move(socket)),
      timer_(io_context),
      strand_(io_context.get_executor())
    { }

    void go()
    {
        auto self(shared_from_this());
        boost::asio::spawn(strand_, [this, self](boost::asio::yield_context yield)
        {
            spawn(yield, [this, self](ba::yield_context yield) {
                timer_.expires_from_now(10s); // 10 second
                while (socket_.is_open()) {
                    boost::system::error_code ec;
                    timer_.async_wait(yield[ec]);
                    // timeout triggered, timer was not canceled
                    if (ba::error::operation_aborted != ec) {
                        socket_.close();
                    }
                }
            });

            try
            {
                // recv data
                string packet;

                // read data
                boost::system::error_code ec;

                ba::async_read(socket_,
                               ba::dynamic_buffer(packet),
                               ba::transfer_exactly(DATA_LEN),
                               yield[ec]);
                if(ec) {
                    throw "read_fail";
                }

            }
            catch (...)
            {
                cout << "exception" << endl;
            }

            timer_.cancel();
            socket_.close();
        });

    }
};
struct my_server {  
    my_server() { }
    ~my_server() { } 

    void start() {
        ba::io_context io_context;
        auto worker = ba::make_work_guard(io_context);

        ba::spawn(io_context, [&](ba::yield_context yield)
        {
            tcp::acceptor acceptor(io_context,
            tcp::endpoint(tcp::v4(), SERVER_PORT));

            for (;;)
            {
                boost::system::error_code ec;

                tcp::socket socket(io_context);
                acceptor.async_accept(socket, yield[ec]);
                if (!ec) {
                    std::make_shared<session>(io_context, std::move(socket))->go();
                } 
            }
        });

        // Run io_context on All CPUs
        auto thread_count = std::thread::hardware_concurrency();
        boost::thread_group tgroup;
        for (auto i = 0; i < thread_count; ++i) 
            tgroup.create_thread([&] {
                for (;;) {
                    try { 
                        if (io_context.stopped()) { // <- this happens after killing Client process several times
                            cout << "io_context STOPPED, now server runs infinit loop with full cpu usage" << endl;
                        }
                        io_context.run(); 
                    }
                    catch(const std::exception& e) { 
                        MessageBox(0, "This never popup", e.what(), 0); 
                    }
                    catch(const boost::exception& e) { 
                        MessageBox(0, "This never popup", boost::diagnostic_information(e).data(), 0); 
                    }
                    catch(...) { MessageBox(0, "This never popup", "", 0); }
                }
            });
        tgroup.join_all();
    }
};  

int main() {
    my_server svr;
    svr.start();
}

客户:

#include <iostream>
#include <random>
#include <thread>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
using namespace std;

using boost::asio::ip::tcp;
namespace ba=boost::asio;

#define SERVER "127.0.0.1"
#define PORT "12345"

int main() {
    boost::asio::io_context io_context;

    static string data_0x4000(0x4000, 'a');

    boost::thread_group tgroup;
    for (auto i = 0; i < 1000; ++i) 
        tgroup.create_thread([&] {
            for(;;) {

                try {
                    tcp::socket s(io_context);
                    tcp::resolver resolver(io_context);
                    boost::asio::connect(s, resolver.resolve(SERVER, PORT));

                    ba::write(s, ba::buffer(data_0x4000));
                } catch (std::exception e) {
                    cout << " exception: " << e.what() << endl;
                } catch (...) {
                    cout << "unknown exception" << endl;
                }
            }
        });

    tgroup.join_all();

    return 0;
}

更新解决方法:

我猜想io_context和协程发生了问题,所以我尝试将不必要的spawn替换为std::thread,并且它起作用了,io_context再也不会停止了。但是为什么问题仍然发生?

替换:

ba::spawn(io_context, [&](ba::yield_context yield)
{
    tcp::acceptor acceptor(io_context,
    tcp::endpoint(tcp::v4(), SERVER_PORT));

    for (;;)
    {
        boost::system::error_code ec;

        tcp::socket socket(io_context);
        acceptor.async_accept(socket, yield[ec]);
        if (!ec) {
            std::make_shared<session>(io_context, std::move(socket))->go();
        } 
    }
});

收件人:

std::thread([&]()
{
    tcp::acceptor acceptor(io_context,
    tcp::endpoint(tcp::v4(), SERVER_PORT));

    for (;;)
    {
        boost::system::error_code ec;

        tcp::socket socket(io_context);
        acceptor.accept(socket, ec);
        if (!ec) {
            std::make_shared<session>(io_context, std::move(socket))->go();
        } 
    }
}).detach();

1 个答案:

答案 0 :(得分:0)

即使(非常)广泛的压力测试,我也无法在linux上重现您的问题。

除了一些达到预期效果的会话到达“ EOF”消息外,即使是艰苦的客户端流程也没有发现任何其他影响。

存在可用端口用尽的问题,但这主要是由于您在客户端中重新连接的速率异常高。

开箱即用

  • 是不是您在使用std::cout和/或MessageBox²却没有同步,而MSVC的标准库处理得不好?
  • 是不是asio run循环引发了catch处理程序未正确捕获的异常?我不知道这是否相关,但是MSVC确实具有SEH(结构化异常)¹
  • 没有必要使“运行”保持紧密联系。如果您实际上想继续运行循环,则应在两者之间调用io_context.restart();。我不建议这样做,因为这将导致无法进行常规关机。

如果您有兴趣的话,这里对代码做了一些小的调整。它增加了一些可视化的会话/连接处理/建立。请注意,client基本上没有变化,但是server进行了一些更改,可能会激发您的想法:

server.cpp

#include <iostream>
#include <iomanip>

#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>

namespace ba = boost::asio;
using boost::asio::ip::tcp;
using namespace std::literals;

#define SERVER_PORT 12345
#define DATA_LEN 0x4000

void MessageBox(int, std::string const& caption, std::string const& message, ...) {
    std::cerr << caption << ": " << std::quoted(message) << std::endl;
}

struct session : public std::enable_shared_from_this<session>
{
    tcp::socket socket_;
    ba::steady_timer timer_;
    ba::strand<ba::io_context::executor_type> strand_;

    explicit session(ba::io_context& io_context, tcp::socket socket)
    : socket_(std::move(socket)),
      timer_(io_context),
      strand_(io_context.get_executor())
    { }

    void go()
    {
        auto self(shared_from_this());
        ba::spawn(strand_, [this, self](ba::yield_context yield)
        {
            spawn(yield, [this, self](ba::yield_context yield) {
                while (socket_.is_open()) {
                    timer_.expires_from_now(10s); 
                    boost::system::error_code ec;
                    timer_.async_wait(yield[ec]);
                    // timeout triggered, timer was not canceled
                    if (ba::error::operation_aborted != ec) {
                        socket_.close(ec);
                    }
                }
            });

            try
            {
                // recv data
                std::string packet;

                // read data
                ba::async_read(socket_,
                               ba::dynamic_buffer(packet),
                               ba::transfer_exactly(DATA_LEN),
                               yield);

                std::cout << std::unitbuf << ".";
            }
            catch (std::exception const& e) {
                std::cout << "exception: " << std::quoted(e.what()) << std::endl;
            }
            catch (...) {
                std::cout << "exception" << std::endl;
            }

            boost::system::error_code ec;
            timer_.cancel(ec);
            socket_.close(ec);
        });

    }
};

struct my_server {  
    void start() {
        ba::io_context io_context;
        auto worker = ba::make_work_guard(io_context);

        ba::spawn(io_context, [&](ba::yield_context yield)
        {
            tcp::acceptor acceptor(io_context,
            tcp::endpoint(tcp::v4(), SERVER_PORT));

            for (;;)
            {
                boost::system::error_code ec;

                tcp::socket socket(io_context);
                acceptor.async_accept(socket, yield[ec]);
                if (!ec) {
                    std::make_shared<session>(io_context, std::move(socket))->go();
                } 
            }
        });

        // Run io_context on All CPUs
        auto thread_count = std::thread::hardware_concurrency();
        boost::thread_group tgroup;
        for (auto i = 0u; i < thread_count; ++i) 
            tgroup.create_thread([&] {
                for (;;) {
                    try { 
                        io_context.run(); 
                        break;
                    }
                    catch(const std::exception& e) { 
                        MessageBox(0, "This never popup", e.what(), 0); 
                    }
                    catch(const boost::exception& e) { 
                        MessageBox(0, "This never popup", boost::diagnostic_information(e).data(), 0); 
                    }
                    catch(...) { MessageBox(0, "This never popup", "", 0); }
                }

                std::cout << "stopped: " << io_context.stopped() << std::endl;
            });
        tgroup.join_all();
    }
};  

int main() {
    my_server svr;
    svr.start();
}

client.cpp

#include <iostream>
#include <random>
#include <thread>
#include <boost/asio.hpp>
#include <boost/thread.hpp>

using boost::asio::ip::tcp;
namespace ba=boost::asio;

#define SERVER "127.0.0.1"
#define PORT "12345"

int main() {
    ba::io_context io_context;

    static std::string const data_0x4000(0x4000, 'a');

    boost::thread_group tgroup;
    for (auto i = 0; i < 1000; ++i) 
        tgroup.create_thread([&] {
            for(;;) {

                try {
                    tcp::socket s(io_context);

                    tcp::resolver resolver(io_context);
                    ba::connect(s, resolver.resolve(SERVER, PORT));
                    s.set_option(ba::socket_base::reuse_address(true));

                    ba::write(s, ba::buffer(data_0x4000));
                } catch (std::exception const& e) {
                    std::cout << " exception: " << e.what() << std::endl;
                } catch (...) {
                    std::cout << "unknown exception" << std::endl;
                }
                std::cout << std::unitbuf << ".";
            }
        });

    tgroup.join_all();
}

¹参见例如https://docs.microsoft.com/en-us/cpp/build/reference/eh-exception-handling-model?view=vs-2019#remarks

²也许MessageBox仅从“ UI”线程中被允许。