使用ffmpeg和audiotrack android进行音频播放

时间:2014-12-15 14:47:18

标签: android ffmpeg audiotrack

我正在尝试使用ffmpeg和audiotrack在Android中播放mp3音频。但我只得到乱码音频输出。任何人都可以建议问题出在哪里?这里整个播放器以JNI代码运行。 所有其他ffmpeg和音频轨道初始化也在启动此线程之前完成。

void *decode_audio_thread(void *arg){
AVPacket packet;
AVFrame *decoded_frame;
int index,i=0,ret;  
bool isEOS = false;
struct timeval tp;
int startMs;
JNIEnv* env = NULL;
jlong timeout = 100000;
int got_frame,len,data_size,size;
uint8_t **src_data = NULL, **dst_data = NULL;
int src_rate = 48000, dst_rate = 44100;
int src_nb_channels = 0, dst_nb_channels = 0;
int src_linesize, dst_linesize;
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
enum AVSampleFormat src_sample_fmt, dst_sample_fmt;
int64_t src_ch_layout, dst_ch_layout;
int dst_bufsize;

  (*myVm)->AttachCurrentThread(myVm, &env, NULL);

  LOGV("Allocating avframe");
  decoded_frame = av_frame_alloc();
  if(decoded_frame)
    LOGV("Allocation done");

  av_init_packet(&packet);

  while(stop){

    ret = av_read_frame(mDataSource, &packet);
    if (ret < 0) {
      LOGV("av_read_frame : returned %d ERROR_END_OF_STREAM=%d\n",ret);
      isEOS = true;
      break;
    }
    LOGV("av_read_frame (%d) size=%d index=%d mAudioIndex=%d\n",i++,packet.size,packet.stream_index,mAudioIndex);

    if(packet.stream_index == mAudioIndex) {
      //LOGV("Decoding Frame size=%d",packet.size);

      len = avcodec_decode_audio4(mAudioTrack,decoded_frame, &got_frame,&packet);
      if(len < 0) {
      LOGV("Error frame skiping");
      continue;
      }
      LOGV("avcodec_decode_audio4 ret=%d got_fram=%d packet.size=%d  format decoded %d= configured %d",len,got_frame,packet.size,decoded_frame->format,AV_SAMPLE_FMT_S16);
      if(got_frame){

      data_size = av_samples_get_buffer_size(NULL,mAudioTrack->channels , decoded_frame->nb_samples,mAudioTrack->sample_fmt, 1);
      LOGV("av_samples_get_buffer_size data_size=%d decode_len=%d avpacket.len=%d ",data_size,len,packet.size);

      jbyteArray samples_byte_array;

      if(decoded_frame->format != AV_SAMPLE_FMT_S16) {
         src_nb_samples = decoded_frame->nb_samples;
         src_linesize = (int) decoded_frame->linesize;
         src_data = decoded_frame->data;
         if (decoded_frame->channel_layout == 0) {
           decoded_frame->channel_layout = av_get_default_channel_layout(decoded_frame->channels);
         }
         /* create resampler context */
         swr_ctx = swr_alloc();
         if (!swr_ctx) {
           LOGV("sw_ctx allocation failed");
           break;
         }
         src_rate = decoded_frame->sample_rate;
         dst_rate = decoded_frame->sample_rate;
         src_ch_layout = decoded_frame->channel_layout;
         dst_ch_layout = decoded_frame->channel_layout;
         av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
         av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
         av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
         av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
         src_sample_fmt = decoded_frame->format;
         dst_sample_fmt = AV_SAMPLE_FMT_S16;
         av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", decoded_frame->format, 0);
         av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
         LOGV("Swr_init : src_rate=%d dst_rate=%d src_ch_layout=%d dst_ch_layout=%d src_sample_fmt=%d %d dst_sample_fmt=%d %d",
          src_rate,dst_rate,src_ch_layout,dst_ch_layout,src_sample_fmt,decoded_frame->format,dst_sample_fmt,AV_SAMPLE_FMT_S16);
         /* initialize the resampling context */
         if ((ret = swr_init(swr_ctx)) < 0) {
           LOGV("Failed to initialize the resampling context\n");
           break;
         }
         /* allocate source and destination samples buffers */
         src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
         ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
                              src_nb_samples, src_sample_fmt, 0);
         if (ret < 0) {
           LOGV("Could not allocate source samples\n");
           break;
         }
         /* compute the number of converted samples: buffering is avoided
          * ensuring that the output buffer will contain at least all the
          * converted input samples */
         max_dst_nb_samples = dst_nb_samples =
         av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
         /* buffer is going to be directly written to a rawaudio file, no alignment */
         dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
         ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
                              dst_nb_samples, dst_sample_fmt, 0);
         if (ret < 0) {
           LOGV("Could not allocate destination samples\n");
           //goto end;
         }
         /* compute destination number of samples */
         dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
         src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
         /* convert to destination format */
         ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)decoded_frame->data, src_nb_samples);
         if (ret < 0) {
           LOGV("Error while converting\n");
           break;
         }
         dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
                              ret, dst_sample_fmt, 1);
         if (dst_bufsize < 0) {
           LOGV("Could not get sample buffer size\n");
           //goto end;
         }
         //jbyteArray 
         samples_byte_array = (*env)->NewByteArray(env, dst_bufsize);
         if (samples_byte_array == NULL) {
        LOGV("Cannot Allocate byte array");
        break;
         }
         jbyte *jni_samples = (*env)->GetByteArrayElements(env, samples_byte_array, NULL);
         LOGV("Coping Data %d ",dst_bufsize);
         memcpy(jni_samples,dst_data[0], dst_bufsize);
         LOGV("Releasing jni_samples");
         (*env)->ReleaseByteArrayElements(env, samples_byte_array, jni_samples, 0);
         swr_free(&swr_ctx);

      }  
      else  {

          //jbyteArray 
          samples_byte_array = (*env)->NewByteArray(env, data_size);
          if (samples_byte_array == NULL) {
        LOGV("Cannot Allocate byte array");
        break;
          }
          jbyte *jni_samples = (*env)->GetByteArrayElements(env, samples_byte_array, NULL);
          LOGV("Coping Data %d ",data_size);
          memcpy(jni_samples,decoded_frame->data[0], len);
          LOGV("Releasing jni_samples");
          (*env)->ReleaseByteArrayElements(env, samples_byte_array, jni_samples, 0);

      }
      //(*env)->SetByteArrayRegion(env, p_sys->jb_array[0], 0, data_size, audio_frame->data[0]);

      LOGV("Audio: AudioTrack.write size=%d\n",len);
      int ret = (*env)->CallIntMethod(env, p_sys->audio_track, p_sys->aud_write,samples_byte_array,0, len);
      if ((*env)->ExceptionOccurred(env)) {
          LOGV( "Exception in AudioTrack.write failed ret=%d",ret);
          (*env)->ExceptionClear(env);
          p_sys->error_state = true;
          goto error;
      }
      LOGV("Audio: ret=%d Done AudioTrack...Aud deleting localref\n",ret);
     (*env)->DeleteLocalRef(env, samples_byte_array);       
      }    
    }
    av_free_packet(&packet);
  }
error:  
  LOGV("Closing");
  avformat_close_input(&mDataSource);
  avcodec_close(mAudioTrack);
  av_free(mDataSource);
  av_free(decoded_frame);
  LOGV("DetachCurrentThread\n");
  (*myVm)->DetachCurrentThread(myVm);
  return;
}

0 个答案:

没有答案