我正面临有关cameraview的问题。我是否必须导入一些东西,因为错误发生了#34;无法解决flurgle" for import com.flurgle.camerakit.CameraView; 我已经设置了清单文件中的权限。
使用cameraview代替代码(取自" https://blog.mindorks.com/android-tensorflow-machine-learning-example-ff0e9b2654cc.html")
package com.example.mujtaba.basicai.Machine_Learning;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import com.example.mujtaba.basicai.R;
import com.flurgle.camerakit.CameraListener;
import com.flurgle.camerakit.CameraView;
import java.util.List;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
public class ImageRecognition extends AppCompatActivity {
private static final int INPUT_SIZE = 224;
private static final int IMAGE_MEAN = 117;
private static final float IMAGE_STD = 1;
private static final String INPUT_NAME = "input";
private static final String OUTPUT_NAME = "output";
private static final String MODEL_FILE = "file:///android_asset/tensorflow_inception_graph.pb";
private static final String LABEL_FILE =
"file:///android_asset/imagenet_comp_graph_label_strings.txt";
private Classifier classifier;
private Executor executor = Executors.newSingleThreadExecutor();
private TextView textViewResult;
private Button btnDetectObject, btnToggleCamera;
private ImageView imageViewResult;
private cameraView cameraView;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_image_recognition);
cameraView = (CameraView) findViewById(R.id.cameraView);
imageViewResult = (ImageView) findViewById(R.id.imageViewResult);
textViewResult = (TextView) findViewById(R.id.textViewResult);
textViewResult.setMovementMethod(new ScrollingMovementMethod());
btnToggleCamera = (Button) findViewById(R.id.btnToggleCamera);
btnDetectObject = (Button) findViewById(R.id.btnDetectObject);
cameraView.setCameraListener(new CameraListener() {
@Override
public void onPictureTaken(byte[] picture) {
super.onPictureTaken(picture);
Bitmap bitmap = BitmapFactory.decodeByteArray(picture, 0, picture.length);
bitmap = Bitmap.createScaledBitmap(bitmap, INPUT_SIZE, INPUT_SIZE, false);
imageViewResult.setImageBitmap(bitmap);
final List<Classifier.Recognition> results = classifier.recognizeImage(bitmap);
textViewResult.setText(results.toString());
}
});
btnToggleCamera.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
cameraView.toggleFacing();
}
});
btnDetectObject.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
cameraView.captureImage();
}
});
initTensorFlowAndLoadModel();
}
@Override
protected void onResume() {
super.onResume();
cameraView.start();
}
@Override
protected void onPause() {
cameraView.stop();
super.onPause();
}
@Override
protected void onDestroy() {
super.onDestroy();
executor.execute(new Runnable() {
@Override
public void run() {
classifier.close();
}
});
}
private void initTensorFlowAndLoadModel() {
executor.execute(new Runnable() {
@Override
public void run() {
try {
classifier = TensorFlowImageClassifier.create(
getAssets(),
MODEL_FILE,
LABEL_FILE,
INPUT_SIZE,
IMAGE_MEAN,
IMAGE_STD,
INPUT_NAME,
OUTPUT_NAME);
makeButtonVisible();
} catch (final Exception e) {
throw new RuntimeException("Error initializing TensorFlow!", e);
}
}
});
}
private void makeButtonVisible() {
runOnUiThread(new Runnable() {
@Override
public void run() {
btnDetectObject.setVisibility(View.VISIBLE);
}
});
}
}