在割炬的每一层之后获取图像尺寸

时间:2019-01-18 04:11:30

标签: lua neural-network torch unity3d-unet

我正在lua中尝试实现unet和deconvnet。我有下面的模型代码。当我的输入尺寸为224 * 224时,既可以工作,又可以输出,但是我将输入图像的尺寸减小为width = 100和Height = 75。减小尺寸后,两个模型都出现相同的错误。我认为通过减小中间层之一的大小,图像的大小变得小于内核的大小,因此给了我错误。但是我不知道如何更改代码以使其正常工作,以及如何在每个中间层之后看到输入图像的尺寸? PS:批量大小为1。

unet模型:

local function ConvLayers(nIn, nOut, dropout)
    local kW, kH, dW, dH, padW, padH = 3, 3, 1, 1, 1, 1 -- parameters for 'same' conv layers

    local net = nn.Sequential()
    net:add(Convolution(nIn, nOut, kW, kH, dW, dH, padW, padH))
    net:add(BatchNorm(nOut))
    net:add(ReLU(true))
    if dropout then net:add(Dropout(dropout)) end

    net:add(Convolution(nOut, nOut, kW, kH, dW, dH, padW, padH))
    net:add(BatchNorm(nOut))
    net:add(ReLU(true))
    if dropout then net:add(Dropout(dropout)) end

    return net
end

--- Returns model, name which is used for the naming of models generated while training
    -- # of labls
    local input = nn.Identity()()

    local D1 = ConvLayers(nbChannels,32)(input)
    local D2 = ConvLayers(32,64)(MaxPooling(2,2)(D1))
    local D3 = ConvLayers(64,128)(MaxPooling(2,2)(D2))
    local D4 = ConvLayers(128,256)(MaxPooling(2,2)(D3))

    local B = ConvLayers(256,512)(MaxPooling(2,2)(D4))
    --local tt = Join(1,3)({ D4, ReLU(true)(UpConvolution(512,256, 2,2,2,2)(B)) })

    local U4 = ConvLayers(512,256)(Join(1,3)({ D4, ReLU(true)(UpConvolution(512,256, 2,2,2,2)(B)) }))
    local U3 = ConvLayers(256,128)(Join(1,3)({ D3, ReLU(true)(UpConvolution(256,128, 2,2,2,2)(U4)) }))
    local U2 = ConvLayers(128,64)(Join(1,3)({ D2, ReLU(true)(UpConvolution(128,64, 2,2,2,2)(U3)) }))
    local U1 = ConvLayers(64,32)(Join(1,3)({ D1, ReLU(true)(UpConvolution(64,32, 2,2,2,2)(U2))   }))

    local net = nn.Sequential()
    net:add(nn.gModule({input}, {U1}))
    net:add(Convolution(32, nbClasses, 1,1))
    net:cuda()

decnvnet模型:

    encoder=nn.Sequential()

pool = {}
-- conv_sizes = {3,64,64,128,128,256,256,256,512,512,512,512,512,512}

encoder:add(cudnn.SpatialConvolution(3,64,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(64))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(64,64,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(64))
encoder:add(cudnn.ReLU())

pool[1] = nn.SpatialMaxPooling(2,2,2,2)
    encoder:add(pool[1])

encoder:add(cudnn.SpatialConvolution(64,128,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(128))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(128,128,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(128))
encoder:add(cudnn.ReLU())

pool[2] = nn.SpatialMaxPooling(2,2,2,2)
    encoder:add(pool[2])

encoder:add(cudnn.SpatialConvolution(128,256,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(256))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(256,256,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(256))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(256,256,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(256))
encoder:add(cudnn.ReLU())

pool[3] = nn.SpatialMaxPooling(2,2,2,2)
    encoder:add(pool[3])

encoder:add(cudnn.SpatialConvolution(256,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(512,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(512,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

pool[4] = nn.SpatialMaxPooling(2,2,2,2)
    encoder:add(pool[4])

encoder:add(cudnn.SpatialConvolution(512,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(512,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(512,512,3,3,1,1,1,1))
encoder:add(nn.SpatialBatchNormalization(512))
encoder:add(cudnn.ReLU())

pool[5] = nn.SpatialMaxPooling(2,2,2,2)
    encoder:add(pool[5])
---------------------------------------------------------------------------
encoder:add(cudnn.SpatialConvolution(512,4096,7,7,1,1,0,0))
encoder:add(cudnn.ReLU())

encoder:add(cudnn.SpatialConvolution(4096,4096,1,1,1,1,0,0))
encoder:add(cudnn.ReLU())

-----------------------------------------------------
decoder=nn.Sequential()

decoder:add(cudnn.SpatialFullConvolution(4096,512,7,7,1,1,0,0))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())

decoder:add(nn.SpatialMaxUnpooling(pool[5]))

decoder:add(cudnn.SpatialFullConvolution(512,512,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(512,512,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(512,512,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())


decoder:add(nn.SpatialMaxUnpooling(pool[4]))


decoder:add(cudnn.SpatialFullConvolution(512,512,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(512,512,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(512))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(512,256,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(256))
decoder:add(cudnn.ReLU())

decoder:add(nn.SpatialMaxUnpooling(pool[3]))

decoder:add(cudnn.SpatialFullConvolution(256,256,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(256))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(256,256,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(256))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(256,128,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(128))
decoder:add(cudnn.ReLU())

decoder:add(nn.SpatialMaxUnpooling(pool[2]))

decoder:add(cudnn.SpatialFullConvolution(128,128,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(128))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(128,64,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(64))
decoder:add(cudnn.ReLU())

decoder:add(nn.SpatialMaxUnpooling(pool[1]))

decoder:add(cudnn.SpatialFullConvolution(64,64,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(64))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialFullConvolution(64,64,3,3,1,1,1,1))
decoder:add(nn.SpatialBatchNormalization(64))
decoder:add(cudnn.ReLU())

decoder:add(cudnn.SpatialConvolution(64,2,1,1,1,1,0,0))
---decoder:add(nn.SpatialBatchNormalization(2))


net = nn.Sequential()
net:add(encoder)
net:add(decoder)
net:cuda()

错误:

/root/torch/install/share/lua/5.1/nn/JoinTable.lua:39: bad argument #1 to 'copy' (sizes do not match at /root/torch/extra/cutorch/lib/THC/THCTensorCopy.cu:31)
stack traceback:
    [C]: in function 'copy'
    /root/torch/install/share/lua/5.1/nn/JoinTable.lua:39: in function 'func'
    /root/torch/install/share/lua/5.1/nngraph/gmodule.lua:345: in function 'neteval'
    /root/torch/install/share/lua/5.1/nngraph/gmodule.lua:380: in function </root/torch/install/share/lua/5.1/nngraph/gmodule.lua:300>
    [C]: in function 'xpcall'
    /root/torch/install/share/lua/5.1/nn/Container.lua:63: in function 'rethrowErrors'
    /root/torch/install/share/lua/5.1/nn/Sequential.lua:44: in function 'forward'
    ./train.lua:68: in function 'opfunc'
    /root/torch/install/share/lua/5.1/optim/sgd.lua:44: in function 'sgd'
    ./train.lua:77: in function 'train'
    run.lua:57: in main chunk
    [C]: in function 'dofile'
    /root/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:150: in main chunk
    [C]: at 0x00406670

0 个答案:

没有答案