带有阻塞的c ++工作队列

时间:2014-09-19 02:06:01

标签: c++ multithreading threadpool boost-asio boost-thread

这个问题应该比我的前几个简单一些。我在我的程序中实现了以下工作队列:

Pool.h:

// tpool class
// It's always closed. :glasses:
#ifndef __POOL_H
#define __POOL_H
class tpool {
    public:
        tpool( std::size_t tpool_size );
        ~tpool();
        template< typename Task >
        void run_task( Task task ){
        boost::unique_lock< boost::mutex > lock( mutex_ );
            if( 0 < available_ ) {
                --available_;
                io_service_.post( boost::bind( &tpool::wrap_task, this, boost::function< void() > ( task ) ) );
            }
        }
    private:
        boost::asio::io_service io_service_;
        boost::asio::io_service::work work_;
        boost::thread_group threads_;
        std::size_t available_;
        boost::mutex mutex_;
        void wrap_task( boost::function< void() > task );
};
extern tpool dbpool;
#endif

pool.cpp:

#include <boost/asio/io_service.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include "pool.h"
tpool::tpool( std::size_t tpool_size ) : work_( io_service_ ), available_( tpool_size ) {
    for ( std::size_t i = 0; i < tpool_size; ++i ){
        threads_.create_thread( boost::bind( &boost::asio::io_service::run, &io_service_ ) );
    }
}
tpool::~tpool() {
    io_service_.stop();
    try {
        threads_.join_all();
    }
    catch( ... ) {}
}
void tpool::wrap_task( boost::function< void() > task ) {
    // run the supplied task
    try {
        task();
    } // suppress exceptions
    catch( ... ) {
    }
    boost::unique_lock< boost::mutex > lock( mutex_ );
    ++available_;
}
tpool dbpool( 50 );

问题是,并非我对run_task()的所有调用都是由工作线程完成的。我不确定是不是因为它没有进入队列,或者当创建它的线程退出时任务消失。

所以我的问题是,有什么特别的东西我必须给boost::thread让它等到队列解锁?输入队列的任务的预期生命周期是多少?当创建它们的线程退出时,任务是否超出范围?如果是这样,我该如何防止这种情况发生?

编辑:我对代码进行了以下更改:

template< typename Task >
void run_task( Task task ){ // add item to the queue
    io_service_.post( boost::bind( &tpool::wrap_task, this, boost::function< void() > ( task ) ) );
}

现在看到正确输入的所有条目。但是,我留下了一个挥之不去的问题:添加到队列中的任务的生命周期是多少?一旦创建它们的线程退出,它们是否会停止存在?

1 个答案:

答案 0 :(得分:5)

好。这真的很简单;你拒绝发布的任务了!

template< typename Task >
void run_task(task task){
    boost::unique_lock<boost::mutex> lock( mutex_ );
    if(0 < available_) {
        --available_;
        io_service_.post(boost::bind(&tpool::wrap_task, this, boost::function< void() > ( task )));
    }
}

请注意lock&#34;等待&#34;直到互斥锁不属于某个线程。这可能已经是这种情况,并且可能在available_已经为0时。现在是行

if(0 < available_) {

这条线就是条件。它不是&#34;魔法&#34;因为您将mutex_锁定。 (该程序甚至不知道mutex_available_之间存在关系。因此,如果available_ <= 0您将跳过该职位。


解决方案#1

您应该使用io_service为您排队。这很可能是你想要实现的目标。而不是跟踪&#34;可用&#34;线程,io_service为您完成工作。您可以通过在尽可能多的线程上运行io_service来控制它可以使用的线程数。简单。

由于io_service已经是线程安全的,因此您可以不使用锁定。

#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <iostream>

// tpool class
// It's always closed. :glasses:
#ifndef __POOL_H
#define __POOL_H
class tpool {
    public:
        tpool( std::size_t tpool_size );
        ~tpool();

        template<typename Task>
        void run_task(Task task){
            io_service_.post(task);
        }
    private:
        // note the order of destruction of members
        boost::asio::io_service io_service_;
        boost::asio::io_service::work work_;

        boost::thread_group threads_;
};

extern tpool dbpool;
#endif

#include <boost/asio/io_service.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
//#include "pool.h"

tpool::tpool(std::size_t tpool_size) : work_(io_service_) {
    for (std::size_t i = 0; i < tpool_size; ++i)
    {
        threads_.create_thread( 
                boost::bind(&boost::asio::io_service::run, &io_service_) 
            );
    }
}

tpool::~tpool() {
    io_service_.stop();

    try {
        threads_.join_all();
    }
    catch(...) {}
}

void foo() { std::cout << __PRETTY_FUNCTION__ << "\n"; }
void bar() { std::cout << __PRETTY_FUNCTION__ << "\n"; }

int main() {
    tpool dbpool(50);

    dbpool.run_task(foo);
    dbpool.run_task(bar);

    boost::this_thread::sleep_for(boost::chrono::seconds(1));
}

出于关机目的,您需要启用&#34;清除&#34; io_service::work对象,否则您的池将永远不会退出。


解决方案#2

不要使用io_service,而是使用条件变量滚动自己的队列实现,以通知工作线程正在发布的新工作。同样,工作人员数量由组中的线程数决定。

#include <boost/thread.hpp>
#include <boost/phoenix.hpp>
#include <boost/optional.hpp>

using namespace boost;
using namespace boost::phoenix::arg_names;

class thread_pool
{
  private:
      mutex mx;
      condition_variable cv;

      typedef function<void()> job_t;
      std::deque<job_t> _queue;

      thread_group pool;

      boost::atomic_bool shutdown;
      static void worker_thread(thread_pool& q)
      {
          while (auto job = q.dequeue())
              (*job)();
      }

  public:
      thread_pool() : shutdown(false) {
          for (unsigned i = 0; i < boost::thread::hardware_concurrency(); ++i)
              pool.create_thread(bind(worker_thread, ref(*this)));
      }

      void enqueue(job_t job) 
      {
          lock_guard<mutex> lk(mx);
          _queue.push_back(std::move(job));

          cv.notify_one();
      }

      optional<job_t> dequeue() 
      {
          unique_lock<mutex> lk(mx);
          namespace phx = boost::phoenix;

          cv.wait(lk, phx::ref(shutdown) || !phx::empty(phx::ref(_queue)));

          if (_queue.empty())
              return none;

          auto job = std::move(_queue.front());
          _queue.pop_front();

          return std::move(job);
      }

      ~thread_pool()
      {
          shutdown = true;
          {
              lock_guard<mutex> lk(mx);
              cv.notify_all();
          }

          pool.join_all();
      }
};

void the_work(int id)
{
    std::cout << "worker " << id << " entered\n";

    // no more synchronization; the pool size determines max concurrency
    std::cout << "worker " << id << " start work\n";
    this_thread::sleep_for(chrono::seconds(2));
    std::cout << "worker " << id << " done\n";
}

int main()
{
    thread_pool pool; // uses 1 thread per core

    for (int i = 0; i < 10; ++i)
        pool.enqueue(bind(the_work, i));
}