TCLAP使多线程程序变慢

时间:2014-09-05 21:11:12

标签: c++ multithreading compiler-optimization

TCLAP是一个C ++模板化的仅限头的库,用于解析命令行参数。

我使用TCLAP在多线程程序中处理命令行参数:在main函数中读取参数,然后启动多个线程来处理由参数定义的任务(某些参数为NLP任务)。

我开始显示线程处理的每秒字数,我发现如果我将参数硬编码到main中而不是使用TCLAP从cli中读取它们,吞吐量快6倍!

我使用带有-O2参数的gcc,在编译期间(当没有使用TCLAP时)我看到速度增加了大约10倍...所以似乎使用TCLAP以某种方式否定编译器优化的一部分优势。

这里的主要功能是我使用TCLAP的唯一地方:

int main(int argc, char** argv)                                                 
{                                                                               
uint32_t mincount;                                                          
uint32_t dim;                                                               
uint32_t contexthalfwidth;                                                  
uint32_t negsamples;                                                        
uint32_t numthreads;                                                        
uint32_t randomseed;                                                        
string corpus_fname;                                                        
string output_basefname;                                                    
string vocab_fname;                                                         

Eigen::initParallel();                                                      

try {                                                                       
TCLAP::CmdLine cmd("Driver for various word embedding models", ' ', "0.1"); 
TCLAP::ValueArg<uint32_t> dimArg("d","dimension","dimension of word representations",false,300,"uint32_t");
TCLAP::ValueArg<uint32_t> mincountArg("m", "mincount", "required minimum occurrence count to be added to vocabulary",false,5,"uint32_t");
TCLAP::ValueArg<uint32_t> contexthalfwidthArg("c", "contexthalfwidth", "half window size of a context frame",false,15,"uint32_t");
TCLAP::ValueArg<uint32_t> numthreadsArg("t", "numthreads", "number of threads",false,12,"uint32_t");
TCLAP::ValueArg<uint32_t> negsamplesArg("n", "negsamples", "number of negative samples for skipgram model",false,15,"uint32_t");
TCLAP::ValueArg<uint32_t> randomseedArg("s", "randomseed", "seed for random number generator",false,2014,"uint32_t");
TCLAP::UnlabeledValueArg<string> corpus_fnameArg("corpusfname", "file containing the training corpus, one paragraph or sentence per line", true, "corpus", "corpusfname");
TCLAP::UnlabeledValueArg<string> output_basefnameArg("outputbasefname", "base filename for the learnt word embeddings", true, "wordreps-", "outputbasefname");
TCLAP::ValueArg<string> vocab_fnameArg("v", "vocabfname", "filename for the vocabulary and word counts", false, "wordsandcounts.txt", "filename");
cmd.add(dimArg);                                                            
cmd.add(mincountArg);                                                       
cmd.add(contexthalfwidthArg);                                               
cmd.add(numthreadsArg);                                                     
cmd.add(randomseedArg);                                                     
cmd.add(corpus_fnameArg);                                                   
cmd.add(output_basefnameArg);                                               
cmd.add(vocab_fnameArg);                                                    
cmd.parse(argc, argv);                                                      

mincount = mincountArg.getValue();                                          
dim = dimArg.getValue();                                                    
contexthalfwidth = contexthalfwidthArg.getValue();                          
negsamples = negsamplesArg.getValue();                                      
numthreads = numthreadsArg.getValue();                                      
randomseed = randomseedArg.getValue();                                      
corpus_fname = corpus_fnameArg.getValue();                                  
output_basefname = output_basefnameArg.getValue();                          
vocab_fname = vocab_fnameArg.getValue();                                    
}                                                                           
catch (TCLAP::ArgException &e) {};         

/*                                                                          
uint32_t mincount = 5;                                                      
uint32_t dim = 50;                                                          
uint32_t contexthalfwidth = 15;                                             
uint32_t negsamples = 15;                                                   
uint32_t numthreads = 10;                                                   
uint32_t randomseed = 2014;                                                 
string corpus_fname = "imdbtrain.txt";                                      
string output_basefname = "wordreps-";                                      
string vocab_fname = "wordsandcounts.txt";                                  
*/                                                                          

string test_fname = "imdbtest.txt";                                         
string output_fname = "parreps.txt";                                        
string countmat_fname = "counts.hdf5";                                      
Vocabulary * vocab;                                                                                                              

vocab = determineVocabulary(corpus_fname, mincount);                        
vocab->dump(vocab_fname);                                                   

Par2VecModel p2vm = Par2VecModel(corpus_fname, vocab, dim, contexthalfwidth, negsamples, randomseed);
p2vm.learn(numthreads);                                                     
p2vm.save(output_basefname);                                                
p2vm.learnparreps(test_fname, output_fname, numthreads); 

}    

使用多线程的唯一地方是Par2VecModel :: learn功能:

void Par2VecModel::learn(uint32_t numthreads) {                                 
thread* workers;                                                            
workers = new thread[numthreads];                                           
uint64_t numwords = 0;                                                      
bool killflag = 0;                                                          
uint32_t randseed;                                                          

ifstream filein(corpus_fname.c_str(), ifstream::ate | ifstream::binary);    
uint64_t filesize = filein.tellg();                                         

fprintf(stderr, "Total number of in vocab words to train over: %u\n", vocab->gettotalinvocabwords());

for(uint32_t idx = 0; idx < numthreads; idx++) {                            
    randseed = eng();                                                       
    workers[idx] = thread(skipgram_training_thread, this, numthreads, idx, filesize, randseed, std::ref(numwords));
}                                                                           

thread monitor(monitor_training_thread, this, numthreads, std::ref(numwords), std::ref(killflag));

for(uint32_t idx = 0; idx < numthreads; idx++)                              
    workers[idx].join();                                                    

killflag = true;                                                            
monitor.join();                                                             
}

本节根本不涉及TCLAP,那么接下来会发生什么? (我也使用c ++ 11特性,所以如果有区别的话,请使用-std = c ++ 11标志)

1 个答案:

答案 0 :(得分:0)

所以这已经开放很长时间了,这个建议可能不再有用,但我首先要检查一下如果用一个简单的&#34;替换TCLAP会发生什么?解析器(即只是以特定的固定顺序在命令行中提供参数并将它们转换为正确的类型)。 高度不太可能是因为TCLAP引起的问题(即我无法想象这种行为的任何机制)。但是,可以想象,对于硬编码值,编译器能够进行一些编译时优化,这些优化在这些值必须是变量时是不可能的。然而,性能差异的程度似乎有点病态,所以我仍然怀疑其他事情还没有发生。