在android中使用ffmpeg时变得乱七八糟

时间:2013-02-27 14:29:58

标签: android android-ndk ffmpeg

我正在尝试将音频文件解码为PCM以与AudioTrack配合使用。音频是吱吱作响的,吵闹的,只是简单的乱码,随意的第二声听起来应该是,但主要是完全混乱。我不确定我的错误在哪里,是否将数组传递回playSound?

提前谢谢你,我真的很感谢这件事的帮助,因为它已经踢了我的屁股了一段时间。

这是我的java代码:

public void init() {
int bufSize = AudioTrack.getMinBufferSize(44100, 
                AudioFormat.CHANNEL_OUT_STEREO,  
                AudioFormat.ENCODING_PCM_16BIT); 


        track = new AudioTrack(AudioManager.STREAM_MUSIC,  
        44100,  
        AudioFormat.CHANNEL_OUT_STEREO,  
        AudioFormat.ENCODING_PCM_16BIT,  
        bufSize, 
        AudioTrack.MODE_STREAM); 
        log("STARTING!!! _________________________ <--");

        byte[] array = new byte[bufSize];

        try {

            fos  = new FileOutputStream("/sdcard/acdc.bin");

        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        decoder("/sdcard/acdc.ogg", array); 
}

void playSound(byte[] buf, int size) {   

    try {
        fos.write(buf, 0, size);
    } catch (IOException e) {
        e.printStackTrace();
    }
    if(track.getPlayState()!=AudioTrack.PLAYSTATE_PLAYING) {   
        track.play();                       
    }
    int wrote =  track.write(buf, 0, size);
    if (wrote != size)
        log("WRITING: " + wrote  + " but size was: " + size); 
}

这是我的c函数:

void Java_com_example_ffmpegsample_MainActivity_decoder(JNIEnv* env, jobject obj,jstring file,jbyteArray array)
    {
        jboolean            isfilenameCopy;
        const char *        filename = (*env)->GetStringUTFChars(env, file, &isfilenameCopy);
        AVCodec *codec;
        AVCodecContext *c= NULL;
        AVFormatContext *pFormatCtx;
        AVCodecContext *pCodecCtx;

        int out_size, len;
        FILE *f, *outfile;
        uint8_t *outbuf;
        uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
        AVPacket avpkt;
        LOGI("HERE");
        jclass              cls = (*env)->GetObjectClass(env, obj);
        LOGI(cls);
        jmethodID           play = (*env)->GetMethodID(env, cls, "playSound", "([BI)V");//At the begining of your main function

        av_init_packet(&avpkt);



        av_register_all();
        LOGI("AUDIO DECODER");
        printf("Audio decoding\n");
        int err;
        err = av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL);
        if (err!=0) {
            LOGI("COULD NOT AV_OPEN file");
            return;
        }
        if(av_find_stream_info(pFormatCtx)<0) {
                LOGE("Unable to get stream info");
                return;
        }
        int audioStream = -1;
        int i;

        for (i=0; i<pFormatCtx->nb_streams; i++) {
                if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO) {
                    audioStream = i;
                    break;
                }
            }
            if(audioStream==-1) {
                LOGE("Unable to find audio stream");
                return;
            }
        LOGI("Audio stream is [%d]", audioStream);

        pCodecCtx=pFormatCtx->streams[audioStream]->codec;
        codec = avcodec_find_decoder(pCodecCtx->codec_id);
        /* find the mpeg audio decoder */
       // codec = avcodec_find_decoder(CODEC_ID_AAC);
        if (!codec) {
            LOGI("NO CODEC");
            fprintf(stderr, "codec not found\n");
            return;
        }

        //c= avcodec_alloc_context();
        c = pCodecCtx;

        /* open it */
        if (avcodec_open(c, codec) < 0) {
            fprintf(stderr, "could not open codec\n");
            LOGI("NOT LOADING CODEC");
            return;
        }

        outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);

        f = fopen(filename, "rb");
        if (!f) {
            fprintf(stderr, "could not open %s\n", filename);
            LOGI("COULD NOT OPEN FILE");
            return;
        }

        /* decode until eof */
        avpkt.data = inbuf;
        avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);

        while (avpkt.size > 0) {
            LOGI("............................." + avpkt.size);
            out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
            len = avcodec_decode_audio3(c, (short *)outbuf, &out_size, &avpkt);
            if (len < 0) {
                fprintf(stderr, "Error while decoding\n");
                LOGI("ERROR DECODING, error: %d", len);
                return;
            }
            if (out_size > 0) {
                /* if a frame has been decoded, output it */
                jbyte *bytes = (*env)->GetByteArrayElements(env, array, NULL);
                memcpy(bytes, outbuf, out_size); //
                (*env)->ReleaseByteArrayElements(env, array, bytes, 0);
                (*env)->CallVoidMethod(env, obj, play, array, out_size);

            }
            avpkt.size -= len;
            avpkt.data += len;
            if (avpkt.size < AUDIO_REFILL_THRESH) {
                /* Refill the input buffer, to avoid trying to decode
                 * incomplete frames. Instead of this, one could also use
                 * a parser, or use a proper container format through
                 * libavformat. */
                memmove(inbuf, avpkt.data, avpkt.size);
                avpkt.data = inbuf;
                len = fread(avpkt.data + avpkt.size, 1,
                            AUDIO_INBUF_SIZE - avpkt.size, f);
                if (len > 0)
                    avpkt.size += len;
            }
        }

        fclose(f);
        free(outbuf);

        avcodec_close(c);
        av_free(c);
    }

2 个答案:

答案 0 :(得分:1)

使用最小尺寸缓冲区在Streaming模式下创建AudioTrack。可能是因为它正在转换某些数据,然后写入一些数据,这些数据表明音频播放中存在间隙。如果转换必须继续从SD卡读取,则尤其如此。您可以增加AudioTrack缓冲区的大小以查看这是否是一个因素。

另外,我不确定,但我认为avcodec_decode_audio3可以将&amp; out_size设置为大小,但是然后将其作为大小(以字节为单位)发送到AudioTrack。(不只是检查API及其字节)。

答案 1 :(得分:1)

您是否尝试在此类project中使用普通的CLI界面? 您可以使用简单的fileProcesssor接口:   IN = OGG   OUT =原始PCM

请参阅下面的CLI,了解.ogg转换为PCM格式。然后对输出做任何事情

 rob@ Downloads$ ffmpeg -i Example.ogg -f s16le -acodec pcm_s16le output.raw

ffmpeg version N-35901-g27a3415 Copyright (c) 2000-2012 the FFmpeg developers
  built on Oct  7 2012 12:06:43 with gcc 4.6 (Ubuntu/Linaro 4.6.3-1ubuntu5)
  configuration: --enable-gpl --enable-libfaac --enable-libfdk-aac --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-librtmp --enable-libtheora --enable-libvorbis --enable-libvpx --enable-x11grab --enable-libx264 --enable-nonfree --enable-version3
  libavutil      51. 73.102 / 51. 73.102
  libavcodec     54. 64.100 / 54. 64.100
  libavformat    54. 29.105 / 54. 29.105
  libavdevice    54.  3.100 / 54.  3.100
  libavfilter     3. 19.102 /  3. 19.102
  libswscale      2.  1.101 /  2.  1.101
  libswresample   0. 16.100 /  0. 16.100
  libpostproc    52.  1.100 / 52.  1.100
[NULL @ 0x186b840] Invalid packet
Input #0, ogg, from 'Example.ogg':
  Duration: 00:00:06.10, start: 0.000000, bitrate: 137 kb/s
    Stream #0:0: Audio: vorbis, 44100 Hz, stereo, s16, 160 kb/s
Output #0, s16le, to 'output.raw':
  Metadata:
    encoder         : Lavf54.29.105
    Stream #0:0: Audio: pcm_s16le, 44100 Hz, stereo, s16, 1411 kb/s
Stream mapping:
  Stream #0:0 -> #0:0 (vorbis -> pcm_s16le)
Press [q] to stop, [?] for help
size=    1054kB time=00:00:06.11 bitrate=1411.2kbits/s    
video:0kB audio:1054kB subtitle:0 global headers:0kB muxing overhead 0.000000%

这假设您可以为具有OGG LIB依赖关系的android配置/生成ffmpeg,这可能是一个棘手的过程......