多处理Queue.get()挂起,多个进程

时间:2018-07-11 14:14:08

标签: python python-3.x keras mysql-python python-multiprocessing

我正在分析喀拉拉邦斑马鱼的行为数据。我还使用多重处理来启动8个获取数据并将其放入队列的进程。神经网络在主要过程中运行。我已经为队列设置了最大大小,因此连续获取数据并尝试将其放入队列的8个进程将必须保持到其未满为止,请参阅文档。与神经网络类似,它尝试使用get()从队列中获取数据,如果队列为空,则应等待直到它不是。

问题看起来像这样,如果我将数据集大小设置为100,这是放入队列中的一个元素,则一切正常。如果我将数据集大小增加到1000,则神经网络将从队列中获取一个数据集,进行训练,从队列中获取另一个数据集,进行训练,然后挂起,尽管我知道队列大小大于零。 1000大小的数据集将由一个1000 x 43浮点数矩阵作为输入,以及一个1000 x 1352浮点数作为输出。所以它不是很大的数字。

 
#Method where i get data, this method is started in 8 separate processes
def fetch(q,n):
    from GetParametersFishAverage import get50SetsOfParameters
    while(True):
        q.put(get50SetsOfParameters(n))
        print("The queue size is now: "+str(q.qsize()))
def fetchEva(Eq,v):
    from GetParametersFishAverage import get50SetsOfParameters
    while(True):
        Eq.put(get50SetsOfParameters(v))
        print("The evaluation queue size is now: "+str(Eq.qsize()))


if __name__ == "__main__":
    
    from keras.models import Sequential, Model
    from keras.layers.core import Dense, Dropout, Activation, Flatten
    from keras.layers.convolutional import *
    from keras.optimizers import SGD, RMSprop, Adam
    from keras.models import load_model
    from keras import backend as np
    from keras.layers import Reshape, LeakyReLU, BatchNormalization, Input, Concatenate
    from keras.initializers import RandomUniform
    from keras.utils import plot_model#plot_model(model) plt.plot(model.predict(concentrationNpEva));plt.plot(parameterNpEva);
    import numpy
    import math
    import matplotlib.pyplot as plt #plt.plot();
    import multiprocessing
    import time
#    def startNN(q,Eq):
#        print("Starting NN")
        #Set the parameters for the NN model
    paramSize = 1000
    validSize = 100
    normParam = 1000.
    normConc = 100.
    batchSize = 100
    epochsNN = 100
    iteration = 0
    aL=0.1
    numpy.random.seed(7)
    q = multiprocessing.Queue(maxsize=10)
    Eq = multiprocessing.Queue(maxsize=2)
    
#    pNN = multiprocessing.Process(target=startNN, args=(q,Eq))
#    pNN.start()
    
    
    pE = multiprocessing.Process(target=fetchEva, args=(Eq,validSize))
    pE.start()
    time.sleep(0.1)
    pd = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd.start()
    time.sleep(0.1)
    pd2 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd2.start()
    time.sleep(0.1)
    pd3 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd3.start()
    time.sleep(0.1)
    pd4 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd4.start()
    time.sleep(0.1)
    pd5 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd5.start()
    time.sleep(0.1)
    pd6 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd6.start()
    time.sleep(0.1)
    pd7 = multiprocessing.Process(target=fetch, args=(q,paramSize))
    pd7.start()
    time.sleep(0.1)
 
   
    
    #Create the NN model
    inn = Input((43,))
    
    r=math.sqrt(6/(30+100))
    lay=Dense(100,activation=None,kernel_initializer=initializers.RandomUniform(minval=-r, maxval=r))(inn)
    lay=LeakyReLU(alpha=aL)(lay)
    lay=Dropout(0.2)(lay)
    
    
    r=math.sqrt(6/(100+1000))
    lay=Dense(1000,activation=None,kernel_initializer=initializers.RandomUniform(minval=-r, maxval=r))(lay)
    lay=LeakyReLU(alpha=aL)(lay)
    lay=Dropout(0.2)(lay)
    
    r=math.sqrt(6/(1000+1000))
    lay=Dense(2000,activation=None,kernel_initializer=initializers.RandomUniform(minval=-r, maxval=r))(lay)
    lay=LeakyReLU(alpha=aL)(lay)
    lay=Dropout(0.2)(lay)
    
    r=math.sqrt(6/(1000+1000))
    lay=Dense(2000,activation=None,kernel_initializer=initializers.RandomUniform(minval=-r, maxval=r))(lay)
    lay=LeakyReLU(alpha=aL)(lay)
    lay=Dropout(0.2)(lay)
    
    listL = []
   
    for i in range(104):
        layP=Dense(128,activation=None)(lay)
        layP=LeakyReLU(alpha=aL)(layP)
        layP=Dense(60,activation=None)(layP)
        layP=LeakyReLU(alpha=aL)(layP)
        layP=Dense(13,activation=None)(layP)
        layP=LeakyReLU(alpha=aL)(layP)
        
        listL.append(layP)
        
    
    lay=Concatenate()(listL)
    
    lay=Dense(1352,activation=None)(lay)
    lay=LeakyReLU(alpha=aL)(lay)
    model=Model(inputs=inn,outputs=lay)
    model.summary()
    adam = Adam(lr=0.001)
    #Compile the created model
    model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])
    


    
    
    for i in range(0,10000):
        #Repeat the training 10000 times
        iteration += 1
        print("*****************Fetching data********************")
        #IT GETS STUCK HERE AFTER TWO ROUNDS!
        parameterNp, concentrationNp = q.get()
        parameterNpEva, concentrationNpEva = Eq.get()
        
        parameterNpEva=parameterNpEva/normParam
        concentrationNpEva=concentrationNpEva/normConc
        
        parameterNp=parameterNp/normParam
        concentrationNp=concentrationNp/normConc
        model.fit(concentrationNp, parameterNp, epochs=epochsNN, batch_size=batchSize)
        
        

        
        # evaluate the model
        scores = model.evaluate(concentrationNpEva, parameterNpEva)
        print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
        print("This was round "+str(i))
        
        file = open("10juliNNLog.txt","a") 
        file.write("Round number "+str(i)+" ")
        file.write("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
        file.write("\n")  
        file.close() 
        
        if(iteration==10):
            model.save('10juliFishAverage.h5')
            iteration = 0

您是否看到任何明显的原因,为什么两轮后get()应该挂起? 非常感谢您的帮助!

0 个答案:

没有答案