我注意到,在以下程序中,创建的netcdf
文件已损坏,即执行ncdump -h out.nc
会产生错误。
#include <netcdf>
/**
* This file produces a corrupted nc output file.
* Compile with `g++ -std=c++11 -o test test.cpp -lnetcdf -lnetcdf_c++4
*/
// this is the first non-working
// chunk size. It does work with 1048576
// 1048576 is representable by exactly 20 bits.
#define CHUNK_SIZE 1048577
using namespace std;
using namespace netCDF;
using namespace netCDF::exceptions;
int main()
{
typedef std::vector<size_t> vs;
typedef std::vector<netCDF::NcDim> vd;
try
{
NcFile outFile = NcFile("out.nc", NcFile::replace);
// create the dimensions complying to the AMBER specs
NcDim frameDim = outFile.addDim("frame");
NcDim atomDim = outFile.addDim("atom");
NcDim spatialDim = outFile.addDim("spatial", 3);
NcDim radiusDim = outFile.addDim("radius",1);
// create the variables
NcVar coords = outFile.addVar("coordinates", ncFloat, vd({frameDim, atomDim, spatialDim}));
NcVar radii = outFile.addVar("radius", ncFloat, vd({frameDim, atomDim}));
// set up chunking
vs chunk_coords({1, CHUNK_SIZE, 3});
vs chunk_radii({1, CHUNK_SIZE, 1});
coords.setChunking(NcVar::nc_CHUNKED, chunk_coords);
radii.setChunking(NcVar::nc_CHUNKED, chunk_radii);
// set up compression
coords.setCompression(false, true, 1);
radii.setCompression(false, true, 1);
return 0;
}
catch(NcException& e)
{
return -1;
}
}
{...}} {...}}成为有效且有效的out.nc
文件。
注意,最大工作数netcdf
,CHUNK_SIZE
是可用20位表示的最大整数。
导致这种行为的原因是什么?通过重命名1048576
维度很容易解决,但我仍然很好奇为什么这与HDF5 / netcdf的分块有任何关联。