我正在尝试使用增强型生成器类重新实现双边网格示例(例如,使用schedule()
和generate()
。
但是在尝试编译代码时我遇到了错误。
g++ -std=c++11 -I ../../include/ -I ../../tools/ -I ../../apps/support/ -g - fno-rtti bilateral_grid_generator.cpp ../../lib/libHalide.a ../../tools/GenGen.cpp -o bin/bilateral_grid_exec -ldl -lpthread -lz
bin/bilateral_grid_exec -o ./bin target=host
Generator bilateral_grid has base_path ./bin/bilateral_grid
Internal error at /home/xxx/Projects/Halide/src/Generator.cpp:966 triggered by user code at /usr/include/c++/4.8/functional:2057:
Condition failed: generator
make: *** [bin/bilateral_grid.a] Aborted (core dumped)
似乎我没有将RDom
和GeneratorParam
的定义放在正确的位置。由于r.x
和r.y
同时使用schedule()
和generate()
,我认为我应该将其作为类成员使用。应该怎么做才能解决这个问题?
这是我写的代码。
class BilateralGrid : public Halide::Generator<BilateralGrid> {
public:
GeneratorParam<int> s_sigma{"s_sigma", 8};
//ImageParam input{Float(32), 2, "input"};
//Param<float> r_sigma{"r_sigma"};
Input<Buffer<float>> input{"input", 2};
Input<float> r_sigma{"r_sigma"};
Output<Buffer<float>> output{"output", 2};
// Algorithm Description
void generate() {
//int s_sigma = 8;
// Add a boundary condition
clamped(x,y) = BoundaryConditions::repeat_edge(input)(x,y);
// Construct the bilateral grid
Expr val = clamped(x * s_sigma + r.x - s_sigma/2, y * s_sigma + r.y - s_sigma/2);
val = clamp(val, 0.0f, 1.0f);
Expr zi = cast<int>(val * (1.0f/r_sigma) + 0.5f);
// Histogram
histogram(x, y, z, c) = 0.0f;
histogram(x, y, zi, c) += select(c == 0, val, 1.0f);
// Blur the grid using a five-tap filter
blurz(x, y, z, c) = (histogram(x, y, z-2, c) +
histogram(x, y, z-1, c)*4 +
histogram(x, y, z , c)*6 +
histogram(x, y, z+1, c)*4 +
histogram(x, y, z+2, c));
blurx(x, y, z, c) = (blurz(x-2, y, z, c) +
blurz(x-1, y, z, c)*4 +
blurz(x , y, z, c)*6 +
blurz(x+1, y, z, c)*4 +
blurz(x+2, y, z, c));
blury(x, y, z, c) = (blurx(x, y-2, z, c) +
blurx(x, y-1, z, c)*4 +
blurx(x, y , z, c)*6 +
blurx(x, y+1, z, c)*4 +
blurx(x, y+2, z, c));
// Take trilinear samples to compute the output
val = clamp(input(x, y), 0.0f, 1.0f);
Expr zv = val * (1.0f/r_sigma);
zi = cast<int>(zv);
Expr zf = zv - zi;
Expr xf = cast<float>(x % s_sigma) / s_sigma;
Expr yf = cast<float>(y % s_sigma) / s_sigma;
Expr xi = x/s_sigma;
Expr yi = y/s_sigma;
interpolated(x, y, c) =
lerp(lerp(lerp(blury(xi, yi, zi, c), blury(xi+1, yi, zi, c), xf),
lerp(blury(xi, yi+1, zi, c), blury(xi+1, yi+1, zi, c), xf), yf),
lerp(lerp(blury(xi, yi, zi+1, c), blury(xi+1, yi, zi+1, c), xf),
lerp(blury(xi, yi+1, zi+1, c), blury(xi+1, yi+1, zi+1, c), xf), yf), zf);
// Normalize and return the output.
bilateral_grid(x, y) = interpolated(x, y, 0)/interpolated(x, y, 1);
output(x,y) = bilateral_grid(x,y);
}
// Scheduling
void schedule() {
// int s_sigma = 8;
if (get_target().has_gpu_feature()) {
// The GPU schedule
Var xi{"xi"}, yi{"yi"}, zi{"zi"};
// Schedule blurz in 8x8 tiles. This is a tile in
// grid-space, which means it represents something like
// 64x64 pixels in the input (if s_sigma is 8).
blurz.compute_root().reorder(c, z, x, y).gpu_tile(x, y, xi, yi, 8, 8);
// Schedule histogram to happen per-tile of blurz, with
// intermediate results in shared memory. This means histogram
// and blurz makes a three-stage kernel:
// 1) Zero out the 8x8 set of histograms
// 2) Compute those histogram by iterating over lots of the input image
// 3) Blur the set of histograms in z
histogram.reorder(c, z, x, y).compute_at(blurz, x).gpu_threads(x, y);
histogram.update().reorder(c, r.x, r.y, x, y).gpu_threads(x, y).unroll(c);
// An alternative schedule for histogram that doesn't use shared memory:
// histogram.compute_root().reorder(c, z, x, y).gpu_tile(x, y, xi, yi, 8, 8);
// histogram.update().reorder(c, r.x, r.y, x, y).gpu_tile(x, y, xi, yi, 8, 8).unroll(c);
// Schedule the remaining blurs and the sampling at the end similarly.
blurx.compute_root().gpu_tile(x, y, z, xi, yi, zi, 8, 8, 1);
blury.compute_root().gpu_tile(x, y, z, xi, yi, zi, 8, 8, 1);
bilateral_grid.compute_root().gpu_tile(x, y, xi, yi, s_sigma, s_sigma);
} else {
// The CPU schedule.
blurz.compute_root().reorder(c, z, x, y).parallel(y).vectorize(x, 8).unroll(c);
histogram.compute_at(blurz, y);
histogram.update().reorder(c, r.x, r.y, x, y).unroll(c);
blurx.compute_root().reorder(c, x, y, z).parallel(z).vectorize(x, 8).unroll(c);
blury.compute_root().reorder(c, x, y, z).parallel(z).vectorize(x, 8).unroll(c);
bilateral_grid.compute_root().parallel(y).vectorize(x, 8);
}
}
Func clamped{"clamped"}, histogram{"histogram"};
Func bilateral_grid{"bilateral_grid"};
Func blurx{"blurx"}, blury{"blury"}, blurz{"blurz"}, interpolated{"interpolated"};
Var x{"x"}, y{"y"}, z{"z"}, c{"c"};
RDom r{0, s_sigma, 0, s_sigma};
};
//Halide::RegisterGenerator<BilateralGrid> register_me{"bilateral_grid"};
HALIDE_REGISTER_GENERATOR(BilateralGrid, "bilateral_grid");
} // namespace
答案 0 :(得分:4)
这里的错误是微妙的,当前的断言失败消息令人遗憾地无益。
此处的问题是此代码使用GeneratorParam
(s_sigma)初始化成员变量 - RDom
(r),但GeneratorParam
可能没有最终值设定在那一点。一般来说,在调用GeneratorParam
方法之前访问ScheduleParam
(或generate()
)会产生这样的断言。
这是为什么?让我们看看在典型的构建系统中创建和初始化Generators的方式:
bin/bilateral_grid_exec -o ./bin target=host s_sigma=7
调用了Generator,则s_sigma
中存储的默认值(8)将替换为7. generate()
,然后调用schedule()
,然后将结果编译为.o(或.a等)。那你为什么看到断言?此代码中发生的是,在上面的步骤1中,r
的ctor正在步骤1中运行...但r
的ctor的参数读取当前值s_sigma
,其默认值为(8),但不一定是构建文件指定的值。如果我们允许在不声明的情况下进行此读取,则可能会在生成器的不同部分中为s_sigma
获取不一致的值。
您可以通过将RDom的初始化推迟到generate()
方法来解决此问题:
class BilateralGrid : public Halide::Generator<BilateralGrid> {
public:
GeneratorParam<int> s_sigma{"s_sigma", 8};
...
void generate() {
r = RDom(0, s_sigma, 0, s_sigma);
...
}
...
private:
RDom r;
};
(显然,断言失败需要更有用的错误消息;我将修改代码以执行此操作。)