iOS:将内存音频数据上传到RTSP服务器

时间:2012-12-12 05:51:31

标签: objective-c ios avfoundation rtsp

在我的应用程序中,我必须从设备的默认麦克风录制声音并将其发送到RTSP服务器以进一步传输。

我的记录器状态结构是:

typedef struct AQRecorderState {
    AudioStreamBasicDescription  mDataFormat;                   // 2
    AudioQueueRef                mQueue;                        // 3
    AudioQueueBufferRef          mBuffers[kNumberRecordBuffers];// 4
    AudioFileID                  mAudioFile;                    // 5
    UInt32                       bufferByteSize;                // 6
    SInt64                       mCurrentPacket;                // 7
    bool                         mIsRunning;                    // 8
}AQRecorderState;

我的录制设置参数为:

    ars.mDataFormat.mSampleRate     = 44100;
    ars.mDataFormat.mChannelsPerFrame   = 1;
    ars.mDataFormat.mFramesPerPacket    = 1;
    ars.mDataFormat.mBitsPerChannel     = 16;
    ars.mDataFormat.mBytesPerFrame      = ars.mDataFormat.mChannelsPerFrame * sizeof (SInt16); // BYTES_PER_FRAME;
    ars.mDataFormat.mBytesPerPacket     = ars.mDataFormat.mChannelsPerFrame * sizeof (SInt16); // BYTES_PER_PACKET;

    /*----------------- FORMAT -------------------*/
    ars.mDataFormat.mFormatID = kAudioFormatLinearPCM;
    ars.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;

    // derive the buffer size
    DeriveBufferSize(ars.mQueue, ars.mDataFormat, kBufferSeconds, &ars.bufferByteSize);

    // create the queue
    AudioQueueNewInput(&ars.mDataFormat,
                       AQInputCallback,
                       (__bridge void *)(self),
                       NULL,
                       kCFRunLoopCommonModes,
                       0,
                       &ars.mQueue);

// set the magic cookie for the queue
    setMagicCookieForFile(ars.mQueue, ars.mAudioFile);

    // allocate and enque the recording buffers
    for (int i=0; i<kNumberRecordBuffers; i++){
        AudioQueueAllocateBuffer(ars.mQueue, ars.bufferByteSize, &ars.mBuffers[i]);
        AudioQueueEnqueueBuffer(ars.mQueue, ars.mBuffers[i], 0, NULL);
    }

    // set current packet index and run state
    ars.mCurrentPacket = 0;
    ars.mIsRunning = true;

    // start the recording
    AudioQueueStart(ars.mQueue, NULL);

我的输入回调看起来像:

static void AQInputCallback(void *aqRecorderState,
            AudioQueueRef                        inQ,
            AudioQueueBufferRef                  inBuffer,
            const AudioTimeStamp                 *timestamp,
            unsigned long                        inNumPackets,
            const AudioStreamPacketDescription   *mDataFormat)
{

    NSLog(@"........Callback called");
    AppDelegate *THIS=(__bridge AppDelegate *)aqRecorderState;
    AQRecorderState *pArs = &(THIS->ars);

    if (inNumPackets > 0) {
        write_audio_frame(THIS->oc, THIS->audio_st);
        // Stream audio frame
        uint8_t *data;

        AVCodecContext *codec;
        AVPacket packet;
        uint32_t size = inBuffer->mAudioDataByteSize;

        long sample_time_ms = (long)(timestamp->mSampleTime * 1000 / SAMPLE_RATE);

        codec = THIS->audio_st->codec;

        data = av_malloc(size*100);
        if (!data) {
            fprintf(stderr, "Couldn't allocate data for audio_frame\n");
            exit(1);
        }

        NSLog(@"Size 1 : %d",size);

        av_init_packet(&packet);
        packet.size = avcodec_encode_audio(codec, data, size, inBuffer->mAudioData);

        NSLog(@"Size 2: %d",packet.size);

        packet.data = data;
        packet.pts = sample_time_ms;
        packet.dts = sample_time_ms;
        packet.flags |= AV_PKT_FLAG_KEY;
        packet.duration = (size * 8 * 1024 * 1000000)/BITS_PER_CHANNEL;
        packet.stream_index = THIS->audio_st->index; //audio_st->index

        pArs->mCurrentPacket += inNumPackets;  // advance packet index pointer

        if (av_interleaved_write_frame(THIS->oc, &packet) != 0) {
            exit(1);
        }

        pArs->mCurrentPacket += inNumPackets;  // advance packet index pointer

        av_free(data);
    }

}

我在输入回调中获取数据,如果将其写入文件然后播放它然后它工作正常,但我的东西是不保存在文件中,而是直接将数据发送到RTSP服务器。

注意:我正在使用av_interleaved_write_frame()向服务器写入帧,我认为问题在于在我的输入回调中转换AVPacket,这就是为什么我会得到“OUt订单“服务器端的数据包。

我在互联网上到处搜索但找不到任何有用的东西。如果有人对此有所了解,请提供帮助。

1 个答案:

答案 0 :(得分:0)

你使用的是tcp还是udp,我认为乱序包和丢包都是udp的问题,ffmpeg可以在必要时进行数据包重新排序。