嗨,我想根据github上的tflite示例了解tflites输入格式的工作方式。为什么仍然要使用这种格式。原因是我想在没有camera 2 api的情况下实现此功能,但与python不同,它不是那么容易操作或查看图像
@Override
protected void onPreviewSizeChosen(Size size, int rotation) {
int cropSize = TF_OD_API_INPUT_SIZE;
initTFLite(cropSize);
Log.d("PREVIEW", size.toString());
previewHeight = size.getHeight();
previewWidth = size.getWidth();
sensorOrientation = rotation - getScreenOrientation();
// Create empty bitmap
rgbFrameBitmap = Bitmap.createBitmap(previewWidth, previewHeight, Bitmap.Config.ARGB_8888);
croppedBitmap = Bitmap.createBitmap(cropSize, cropSize, Bitmap.Config.ARGB_8888);
// I dont understand what this does
frameToCropTransform = ImageUtils.getTransformationMatrix(
previewWidth, previewHeight,
cropSize, cropSize,
sensorOrientation, MAINTAIN_ASPECT
);
cropToFrameTransform = new Matrix();
frameToCropTransform.invert(cropToFrameTransform);
}
@Override
protected void processImage() {
// No mutex needed as this method is not reentrant.
if (computingDetection) {
readyForNextImage();
return;
}
computingDetection = true;
// Prepare image
// What does this set pixels do anyway
rgbFrameBitmap.setPixels(getRgbBytes(), 0, previewWidth, 0, 0, previewWidth, previewHeight);
readyForNextImage();
final Canvas canvas = new Canvas(croppedBitmap);
canvas.drawBitmap(rgbFrameBitmap, frameToCropTransform, null);
// For examining the actual TF input.
if (SAVE_PREVIEW_BITMAP) {
ImageUtils.saveBitmap(croppedBitmap);
}
runInBackground(new Runnable() {
@Override
public void run() {
final List<Classifier.Recognition> results = detector.recognizeImage(croppedBitmap);
float minimumConfidence = MINIMUM_CONFIDENCE_TF_OD_API;
switch (MODE) {
case TF_OD_API:
minimumConfidence = MINIMUM_CONFIDENCE_TF_OD_API;
break;
}
Log.d("RESULTS", results.toString());
computingDetection = false;
}
});