当我尝试启动应用程序时,我的应用程序崩溃了,如何解决此错误消息?

时间:2019-04-26 10:44:27

标签: java android

上下文:我正在尝试将tensorflow lite用于摄像机捕获

启动我的应用程序时,我的应用程序崩溃并且我的手机上显示了此错误消息(构建步骤中没有错误):

  

java.lang.NoClassDefFoundError:无法解决以下问题:
  Lcom / google / android / things / pio / GpioCallback;   在http://com.google.android处。things.contrib.driver.button.ButtonInputDriver。(http://ButtonInputDriver.java:44)   在http:// com.google.android .things.contrib.driver.rainbowhat.RainbowHat.createButtonInputDriver(htt ps:// t.co/pCxFVemJoJ)   在http://com.google.android.things.contrib.driver.rainbowhat.RainbowHat.createButtonCInputDriver(htt ps:// t.co/L5GdT2osCC)   在com.example.androidthings.imageclassifier.ImageClassifierActivity.initButton(http:// ImageClassifierActivity.java:186)   在com.example.androidthings.imageclassifier.ImageClassifierActivity.onCreate(http://ImageClassifierActivity.java:172)   在http://android.app。Activity.performCreate(http s:// t.co/iAgPPJxDqN)   在http://android.app处。Activity.performCreate(http s:// t.co/x1jCqk83Vz)   在http://android.app处。Instrumentation.callActivityOnCreate(http s://    t.co/sH4ZVEXClz)   在http://android.app处。ActivityThread.performLaunchActivity(http s://    t.co/7ivkHUx5ol)   在http://android.app处。ActivityThread.handleLaunchActivity(http s://    t.co/JrImVgSukO)   在http://android.app处。servertransaction.LaunchActivityItem.execute(http s://    t.co/xJzsqMgH1S)   在http://android.app处.servertransaction.TransactionExecutor.executeCallbacks(htt ps://    t.co/pmDJcEYPAe)   在http://android.app处.servertransaction.TransactionExecutor.execute(htt ps:// t.co/B56pWorl1W)   在http://android.app处。ActivityThread$ H.handleMessage(htt ps://    t.co/3pIiwfbaZj)   在android.os.Handler.dispatchMessage(ht tp://Handler.java:106)   在android.os.Looper.loop(ht tp://Looper.java:201)   位于http://android.app。ActivityThread.main(ht tps:// t.co/X8nXQTeKz7)   在java.lang.reflect.Method.invoke(本机方法)   在http://com.android处。internal.os.RuntimeInit$ MethodAndArgsCaller.run(ht tps:// t.co/5IltBH9HjT)   在http://com.android处.internal.os.ZygoteInit.main(ht tps://t.co/ UDaAWnMnAC)   原因:java.lang.ClassNotFoundException:在路径:DexPathList [[zip file“ /system/framework/org.apache]上找不到类” h ttp://com.google.android .things.pio.GpioCallback“。 http.legacy.boot.jar”,zip文件“ /data/app/com.example.androidthings.imageclassifier-n33NGwKNRyCZU-UICaS1Qw==/base.apk”]、nativeLibraryDirectories=[/data/app/com.example.androidthings .imageclassifier-n33NGwKNRyCZU-UICaS1Qw == / lib / arm64,/data/app/com.example.androidthings.imageclassifier-n33NGwKNRyCZU-UICaS1Qw==/base.apk!/lib/arm64-v8a,/ system / lib64,/ vendor / lib64]]   在dalvik.system.BaseDexClassLoader.findClass(h tp://BaseDexClassLoader.java:171)   在java.lang.ClassLoader.loadClass(ht tp:// ClassLoader.java:379)   在java.lang.ClassLoader.loadClass(ht tp:// ClassLoader.java:312)

这是 ImageClassifierActivity

package com.example.androidthings.imageclassifier;

import android.app.Activity;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.ImageReader;
import android.os.Bundle;
import android.util.Log;
import android.view.KeyEvent;
import android.view.WindowManager;
import android.widget.ImageView;
import android.widget.TextView;

import com.example.androidthings.imageclassifier.classifier.Recognition;
import com.example.androidthings.imageclassifier.classifier.TensorFlowHelper;
import com.google.android.things.contrib.driver.button.ButtonInputDriver;
import com.google.android.things.contrib.driver.rainbowhat.RainbowHat;

import org.tensorflow.lite.Interpreter;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;

public class ImageClassifierActivity extends Activity {
    private static final String TAG = "ImageClassifierActivity";

    /** Camera image capture size */
    private static final int PREVIEW_IMAGE_WIDTH = 640;
    private static final int PREVIEW_IMAGE_HEIGHT = 480;
    /** Image dimensions required by TF model */
    private static final int TF_INPUT_IMAGE_WIDTH = 224;
    private static final int TF_INPUT_IMAGE_HEIGHT = 224;
    /** Dimensions of model inputs. */
    private static final int DIM_BATCH_SIZE = 1;
    private static final int DIM_PIXEL_SIZE = 3;
    /** TF model asset files */
    private static final String LABELS_FILE = "labels.txt";
    private static final String MODEL_FILE = "mobilenet_quant_v1_224.tflite";

    private ButtonInputDriver mButtonDriver;
    private boolean mProcessing;

    private ImageView mImage;
    private TextView mResultText;

    private Interpreter mTensorFlowLite;
    private List<String> mLabels;
    private CameraHandler mCameraHandler;
    private ImagePreprocessor mImagePreprocessor;

    /**
     * Initialize the classifier that will be used to process images.
     */
    private void initClassifier() {
        try {
            mTensorFlowLite = new Interpreter(TensorFlowHelper.loadModelFile(this, MODEL_FILE));
            mLabels = TensorFlowHelper.readLabels(this, LABELS_FILE);
        } catch (IOException e) {
            Log.w(TAG, "Unable to initialize TensorFlow Lite.", e);
        }
    }

    /**
     * Clean up the resources used by the classifier.
     */
    private void destroyClassifier() {
        mTensorFlowLite.close();
    }

    /**
     * Process an image and identify what is in it. When done, the method
     * {@link #onPhotoRecognitionReady(Collection)} must be called with the results of
     * the image recognition process.
     *
     * @param image Bitmap containing the image to be classified. The image can be
     *              of any size, but preprocessing might occur to resize it to the
     *              format expected by the classification process, which can be time
     *              and power consuming.
     */
    private void doRecognize(Bitmap image) {
        // Allocate space for the inference results
        byte[][] confidencePerLabel = new byte[1][mLabels.size()];
        // Allocate buffer for image pixels.
        int[] intValues = new int[TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT];
        ByteBuffer imgData = ByteBuffer.allocateDirect(
                DIM_BATCH_SIZE * TF_INPUT_IMAGE_WIDTH * TF_INPUT_IMAGE_HEIGHT * DIM_PIXEL_SIZE);
        imgData.order(ByteOrder.nativeOrder());

        // Read image data into buffer formatted for the TensorFlow model
        TensorFlowHelper.convertBitmapToByteBuffer(image, intValues, imgData);

        // Run inference on the network with the image bytes in imgData as input,
        // storing results on the confidencePerLabel array.
        mTensorFlowLite.run(imgData, confidencePerLabel);

        // Get the results with the highest confidence and map them to their labels
        Collection<Recognition> results = TensorFlowHelper.getBestResults(confidencePerLabel, mLabels);
        // Report the results with the highest confidence
        onPhotoRecognitionReady(results);
    }

    /**
     * Initialize the camera that will be used to capture images.
     */
    private void initCamera() {
        mImagePreprocessor = new ImagePreprocessor(PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT,
                TF_INPUT_IMAGE_WIDTH, TF_INPUT_IMAGE_HEIGHT);
        mCameraHandler = CameraHandler.getInstance();
        mCameraHandler.initializeCamera(this,
                PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT, null,
                new ImageReader.OnImageAvailableListener() {
                    @Override
                    public void onImageAvailable(ImageReader imageReader) {
                        Bitmap bitmap = mImagePreprocessor.preprocessImage(imageReader.acquireNextImage());
                        onPhotoReady(bitmap);
                    }
                });
    }

    /**
     * Clean up resources used by the camera.
     */
    private void closeCamera() {
        mCameraHandler.shutDown();
    }

    /**
     * Load the image that will be used in the classification process.
     * When done, the method {@link #onPhotoReady(Bitmap)} must be called with the image.
     */
    private void loadPhoto() {
        mCameraHandler.takePicture();
    }


// --------------------------------------------------------------------------------------
// NOTE: The normal codelab flow won't require you to change anything below this line,
// although you are encouraged to read and understand it.

@Override
protected void onCreate(final Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

    setContentView(R.layout.activity_camera);
    mImage = findViewById(R.id.imageView);
    mResultText = findViewById(R.id.resultText);

    updateStatus(getString(R.string.initializing));
    initCamera();
    initClassifier();
    initButton();
    updateStatus(getString(R.string.help_message));
}

/**
 * Register a GPIO button that, when clicked, will generate the {@link KeyEvent#KEYCODE_ENTER}
 * key, to be handled by {@link #onKeyUp(int, KeyEvent)} just like any regular keyboard
 * event.
 *
 * If there's no button connected to the board, the doRecognize can still be triggered by
 * sending key events using a USB keyboard or `adb shell input keyevent 66`.
 */
private void initButton() {
    try {
        mButtonDriver = RainbowHat.createButtonCInputDriver(KeyEvent.KEYCODE_ENTER);
        mButtonDriver.register();
    } catch (IOException e) {
        Log.w(TAG, "Cannot find button. Ignoring push button. Use a keyboard instead.", e);
    }
}

private Bitmap getStaticBitmap() {
    Log.d(TAG, "Using sample photo in res/drawable/sampledog_224x224.png");
    return BitmapFactory.decodeResource(this.getResources(), R.drawable.sampledog_224x224);
}

@Override
public boolean onKeyUp(int keyCode, KeyEvent event) {
    if (keyCode == KeyEvent.KEYCODE_ENTER) {
        if (mProcessing) {
            updateStatus("Still processing, please wait");
            return true;
        }
        updateStatus("Running photo recognition");
        mProcessing = true;
        loadPhoto();
        return true;
    }
    return super.onKeyUp(keyCode, event);
}

/**
 * Image capture process complete
 */
private void onPhotoReady(Bitmap bitmap) {
    mImage.setImageBitmap(bitmap);
    doRecognize(bitmap);
}

/**
 * Image classification process complete
 */
private void onPhotoRecognitionReady(Collection<Recognition> results) {
    updateStatus(formatResults(results));
    mProcessing = false;
}

/**
 * Format results list for display
 */
private String formatResults(Collection<Recognition> results) {
    if (results == null || results.isEmpty()) {
        return getString(R.string.empty_result);
    } else {
        StringBuilder sb = new StringBuilder();
        Iterator<Recognition> it = results.iterator();
        int counter = 0;
        while (it.hasNext()) {
            Recognition r = it.next();
            sb.append(r.getTitle());
            counter++;
            if (counter < results.size() - 1) {
                sb.append(", ");
            } else if (counter == results.size() - 1) {
                sb.append(" or ");
            }
        }

        return sb.toString();
    }
}

/**
 * Report updates to the display and log output
 */
private void updateStatus(String status) {
    Log.d(TAG, status);
    mResultText.setText(status);
}

@Override
protected void onDestroy() {
    super.onDestroy();
    try {
        destroyClassifier();
    } catch (Throwable t) {
        // close quietly
    }
    try {
        closeCamera();
    } catch (Throwable t) {
        // close quietly
    }
    try {
        if (mButtonDriver != null) mButtonDriver.close();
    } catch (Throwable t) {
        // close quietly
    }
}
}

0 个答案:

没有答案