我正在尝试运行此代码https://github.com/Winfrand/C-MIL,并且得到了此输出 分段错误(核心已转储)
当我尝试调试代码时,我发现代码在执行时就停止了
local output = self.model:forward(inputs)
功能
Optim:optimize(optimMethod, inputs, targets, criterion, scale)
在
fbnn_Optim.lua文件,第145行
当我分别打印模型和输入的内容时
print(model)
nn.Sequential {[input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> output]
(1): nn.ParallelTable {
input
|`-> (1): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> (7) -> (8) -> (9) -> (10) -> (11) -> (12) -> (13) -> (14) -> output]
| (1): cudnn.SpatialConvolution(3 -> 64, 11x11, 4,4)
| (2): cudnn.ReLU
| (3): cudnn.SpatialCrossMapLRN
| (4): cudnn.SpatialMaxPooling(3x3, 2,2)
| (5): cudnn.SpatialConvolution(64 -> 256, 5x5, 1,1, 2,2)
| (6): cudnn.ReLU
| (7): cudnn.SpatialCrossMapLRN
| (8): cudnn.SpatialMaxPooling(3x3, 2,2)
| (9): cudnn.SpatialConvolution(256 -> 256, 3x3, 1,1, 1,1)
| (10): cudnn.ReLU
| (11): cudnn.SpatialConvolution(256 -> 256, 3x3, 1,1, 1,1)
| (12): cudnn.ReLU
| (13): cudnn.SpatialConvolution(256 -> 256, 3x3, 1,1, 1,1)
| (14): cudnn.ReLU
| }
`-> (2): nn.Identity
... -> output
}
(2): nn.ConcatTable {
input
|`-> (1): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> output]
| (1): RectangularRingRoiPooling
| (2): nn.View(-1)
| (3): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> output]
| (1): nn.Linear(9216 -> 4096)
| (2): cudnn.ReLU
| (3): nn.Dropout(0.500000)
| (4): nn.Linear(4096 -> 4096)
| (5): cudnn.ReLU
| (6): nn.Dropout(0.500000)
| }
| }
|`-> (2): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> output]
| (1): RectangularRingRoiPooling
| (2): nn.View(-1)
| (3): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> output]
| (1): nn.Linear(9216 -> 4096)
| (2): cudnn.ReLU
| (3): nn.Dropout(0.500000)
| (4): nn.Linear(4096 -> 4096)
| (5): cudnn.ReLU
| (6): nn.Dropout(0.500000)
| }
| }
`-> (3): nn.Sequential {
[input -> (1) -> (2) -> (3) -> output]
(1): RectangularRingRoiPooling
(2): nn.View(-1)
(3): nn.Sequential {
[input -> (1) -> (2) -> (3) -> (4) -> (5) -> (6) -> output]
(1): nn.Linear(9216 -> 4096)
(2): cudnn.ReLU
(3): nn.Dropout(0.500000)
(4): nn.Linear(4096 -> 4096)
(5): cudnn.ReLU
(6): nn.Dropout(0.500000)
}
}
... -> output
}
(3): nn.ConcatTable {
input
|`-> (1): nn.Identity
|`-> (2): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> output]
| (1): nn.SelectTable(1)
| (2): nn.Linear(4096 -> 21)
| (3): cudnn.SpatialSoftMax
| }
`-> (3): nn.Sequential {
[input -> (1) -> (2) -> (3) -> output]
(1): nn.SelectTable(1)
(2): nn.Linear(4096 -> 21)
(3): cudnn.SpatialSoftMax
}
... -> output
}
(4): nn.ReWeight
(5): nn.ParallelTable {
input
|`-> (1): nn.Sequential {
| [input -> (1) -> (2) -> output]
| (1): nn.ConcatTable {
| input
| |`-> (1): nn.Sequential {
| | [input -> (1) -> (2) -> (3) -> (4) -> output]
| | (1): nn.SelectTable(1)
| | (2): nn.Linear(4096 -> 20)
| | (3): nn.View(-1)
| | (4): nn.Squeeze
| | }
| `-> (2): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> output]
| (1): nn.ConcatTable {
| input
| |`-> (1): nn.Sequential {
| | [input -> (1) -> (2) -> output]
| | (1): nn.SelectTable(2)
| | (2): nn.Linear(4096 -> 20)
| | }
| `-> (2): nn.Sequential {
| [input -> (1) -> (2) -> (3) -> output]
| (1): nn.SelectTable(3)
| (2): nn.Linear(4096 -> 20)
| (3): nn.MulConstant
| }
| ... -> output
| }
| (2): nn.CAddTable
| (3): nn.View(-1)
| }
| ... -> output
| }
| (2): nn.ContinuationSubset
| }
|`-> (2): nn.Identity
`-> (3): nn.Identity
... -> output
}
(6): nn.ContinuationDetector
}
print(inputs)
{
1 : CudaTensor - size: 1x3x608x586
2 : FloatTensor - size: 1x1963x5
}
这是此功能的代码
function Optim:optimize(optimMethod, inputs, targets, criterion, scale)
assert(optimMethod)
assert(inputs)
assert(targets)
assert(criterion)
assert(self.modulesToOptState)
self.model:zeroGradParameters()
local output = self.model:forward(inputs)
if type(targets) == 'table' then
if type(batch_box_labels_gpu) == 'table' then
for i=1,#batch_box_labels_gpu do
targets[i+1] = batch_box_labels_gpu[i]
end
else
targets[2] = batch_box_labels_gpu
end
end
print(output)
print(targets)
local err = criterion:forward(output, targets)
if err ~= 0 then
local df_do = criterion:backward(output, targets)
self.model:backward(inputs, df_do, scale)
-- We'll set these in the loop that iterates over each module. Get them
-- out here to be captured.
local curGrad
local curParam
local function fEvalMod(x)
return err, curGrad
end
for curMod, opt in pairs(self.modulesToOptState) do
on_device_for_module(curMod, function()
local curModParams = self.weight_bias_parameters(curMod)
-- expects either an empty table or 2 element table, one for weights
-- and one for biases
assert(pl.tablex.size(curModParams) == 0 or
pl.tablex.size(curModParams) == 2)
if curModParams then
for i, tensor in ipairs(curModParams) do
if curModParams[i] then
-- expect param, gradParam pair
curParam, curGrad = table.unpack(curModParams[i])
assert(curParam and curGrad)
optimMethod(fEvalMod, curParam, opt[i])
end
end
end
end)
end
end
return err, output
end
因此,如果有人可以指导我解决此问题,我将永远感激
提前谢谢