我在项目中集成了ffmpeg lib,我也可以获取媒体文件的信息。但现在我必须使用ffmpeg lib在Android中使用AudioTrack类播放mp3文件。
为此,我必须将字节缓冲区传递给AudioTrack,但我不知道如何从ffmpeg获取字节缓冲区并将其与AudioTrack一起使用。我也希望立即播放文件。
这是我在java中的音轨代码:
AudioTrack track;
bufferSize = AudioTrack.getMinBufferSize(44100,AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT)
track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize, mode);
//Play audio clip
track.play();
while(stream_is_over){
//Copy the decoded raw buffer from native code to "buffer" .....
............
track.write(buffer, 0, readBytes);
}
有谁能请给我工作代码播放带音轨的mp3文件。我搜索了很多,但没有找到任何正确的答案。
答案 0 :(得分:3)
我通过创建音频文件的缓冲区然后在运行中使用AudioTrack类来管理它。 现在我试图暂停/停止音频文件导致停止或暂停AudioTrack不起作用。
这是我将字节缓冲区传递给我的java类的代码:
#include <assert.h>
#include <jni.h>
#include <string.h>
#include <android/log.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#define LOG_TAG "mylib"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
void Java_ru_dzakhov_ffmpeg_test_MainActivity_createEngine(JNIEnv* env,
jclass clazz) {
avcodec_init();
av_register_all();
}
jstring Java_ru_dzakhov_ffmpeg_test_MainActivity_loadFile(JNIEnv* env,
jobject obj, jstring file, jbyteArray array) {
{
jboolean isfilenameCopy;
const char * filename = (*env)->GetStringUTFChars(env, file,
&isfilenameCopy);
int audioStreamIndex;
AVCodec *codec;
AVCodecContext *c = NULL;
AVFormatContext * pFormatCtx;
AVCodecContext * aCodecCtx;
int out_size, len, audioStream = -1, i, err;
FILE *f, *outfile;
uint8_t *outbuf;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
jclass cls = (*env)->GetObjectClass(env, obj);
jmethodID play = (*env)->GetMethodID(env, cls, "playSound", "([BI)V"); //At the begining of your main function
LOGE("source file name is %s", filename);
avcodec_init();
av_register_all();
LOGE("Stage 1");
/* get format somthing of source file to AVFormatContext */
int lError;
if ((lError = av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL))
!= 0) {
LOGE("Error open source file: %d", lError);
exit(1);
}
if ((lError = av_find_stream_info(pFormatCtx)) < 0) {
LOGE("Error find stream information: %d", lError);
exit(1);
}
LOGE("Stage 1.5");
LOGE("audio format: %s", pFormatCtx->iformat->name);
LOGE("audio bitrate: %d", pFormatCtx->bit_rate);
audioStreamIndex = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_AUDIO,
-1, -1, &codec, 0);
LOGE("audio codec: %s", codec->name);
/* get codec somthing of audio stream to AVCodecContext */
aCodecCtx = pFormatCtx->streams[audioStreamIndex]->codec;
if (avcodec_open(aCodecCtx, codec) < 0) {
LOGE("cannot open the audio codec!");
exit(1);
}
printf("Audio decoding\n");
LOGE("Stage 1.7");
LOGE("S");
codec = avcodec_find_decoder(aCodecCtx->codec_id);
LOGE("Stage 1.8");
if (!codec) {
LOGE("codec not found\n");
exit(1);
}
LOGE("Stage 2");
// c= avcodec_alloc_context();
LOGE("Stage 3");
/* open it */
if (avcodec_open(aCodecCtx, codec) < 0) {
LOGE("could upper");
fprintf(stderr, "could not open codec\n");
LOGE("could not open codec");
}
LOGE("Stage 4");
outbuf = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
LOGE("could not open");
exit(1);
}
/* decode until eof */
avpkt.data = inbuf;
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
LOGE("Stage 5");
while (avpkt.size > 0) {
// LOGE("Stage 6");
out_size = (AVCODEC_MAX_AUDIO_FRAME_SIZE / 3) * 2;
len = avcodec_decode_audio3(aCodecCtx, (int16_t *) outbuf,
&out_size, &avpkt);
LOGE("data_size %d len %d", out_size, len);
if (len < 0) {
fprintf(stderr, "Error while decoding\n");
LOGE("DECODING ERROR");
LOGE("DECODING ERROR %d", len);
exit(1);
}
// LOGE("Stage 7");
if (out_size > 0) {
/* if a frame has been decoded, output it */
// LOGE("Stage 8");
jbyte *bytes = (*env)->GetByteArrayElements(env, array, NULL);
memcpy(bytes, outbuf, out_size); //
(*env)->ReleaseByteArrayElements(env, array, bytes, 0);
(*env)->CallVoidMethod(env, obj, play, array, out_size);
// LOGE("DECODING ERROR5");
}
LOGE("Stage 9");
avpkt.size -= len;
avpkt.data += len;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
* a parser, or use a proper container format through
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
len = fread(avpkt.data + avpkt.size, 1,
AUDIO_INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
}
LOGE("Stage 12");
fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
}
}
答案 1 :(得分:0)
我对Android中的编程没有任何线索,但谷歌刚刚在I / O 2012上推出了一个新的低级媒体API
以下是yt视频的链接:http://www.youtube.com/watch?v=YmCqJlzIUXs