我已经将自定义训练的图像分割TensorFlow模型转换为TF Lite格式,现在正尝试使用CPP API在该模型上进行推断。
我正在研究输入和输出张量,并遵循以下link进行此操作。
奇怪的是,我的输入张量的形状大于需要馈入模型的图像。图像的形状为480x640x3,但是下面的代码片段中的dims->data
包含的信息远不止于此(请参见控制台的screenshot)。怎么会来?
#include <cstdio>
#include <iostream>
#include <iomanip>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include <time.h>
using namespace tflite;
using namespace std;
#define TFLITE_MINIMAL_CHECK(x) \
if (!(x)) { \
fprintf(stderr, "Error at %s:%d\n", __FILE__, __LINE__); \
exit(1); \
}
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "minimal <tflite model>\n");
return 1;
}
const char* filename = argv[1];
int startt, endd;
startt = clock();
// Load model
// Load model
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(filename);
TFLITE_MINIMAL_CHECK(model != nullptr);
// Build the interpreter
tflite::ops::builtin::BuiltinOpResolver resolver;
std::unique_ptr<Interpreter> interpreter(new Interpreter());
InterpreterBuilder builder(*model, resolver);
std::cout << "allocating a number of threads" << std::endl;
int numthreads=4;
interpreter->SetNumThreads(numthreads);
std::cout << "threads allocated" << std::endl;
builder(&interpreter);
TFLITE_MINIMAL_CHECK(interpreter != nullptr);
endd = clock();
// Allocate tensor buffers.
TFLITE_MINIMAL_CHECK(interpreter->AllocateTensors() == kTfLiteOk);
printf("=== Pre-invoke Interpreter State ===\n");
tflite::PrintInterpreterState(interpreter.get());
double time_taken = double(endd-startt)/double(CLOCKS_PER_SEC);
std::cout << "a number of threads should be selected" << std::endl;
std::cout << "Time taken to load in the model in tflite using CPP API :" << fixed << time_taken << std::setprecision(5);
std::cout << " sec " << std::endl;
// Fill input buffers
std::cout << "inputs : " << interpreter->inputs().size() << "\n";
std::cout << "inputs(0) name : " << interpreter->GetInputName(0) << "\n";
std::cout << "tensors size: " << interpreter->tensors_size() << "\n";
std::cout << "nodes size: " << interpreter->nodes_size() << "\n";
int startinput = clock();
int input = interpreter->inputs()[0];
std::cout << "input.1 : " << input <<"\n";
const std::vector<int> inputs = interpreter->inputs();
const std::vector<int> outputs = interpreter->outputs();
std::cout << "number of inputs: " <<inputs.size() << "\n";
std::cout << "number of outputs: " <<outputs.size() << "\n";
TfLiteIntArray* dims = interpreter->tensor(input)->dims;
int test0 = dims->data[0];
int wanted_channels = dims->data[1];
int wanted_height = dims->data[2];
int wanted_width = dims->data[3];
int test4 = dims->data[4];
int test5 = dims->data[5];
int test6 = dims->data[6];
std::cout << "type of input tensor: " << interpreter->tensor(input)->type << std::endl;
std::cout << "height, width, channels of input : " << wanted_height << " " << wanted_width << " "<< wanted_channels << " " << test0 << " " << test4 << " " << test5 << " " << test6 << std::endl;
}