将输入流从PortAudio提供给webrtc :: AudioProcessing

时间:2017-03-05 14:15:46

标签: c++ cygwin webrtc portaudio

我使用cygwin软件包libwebrtc-audio-processing-devel-0.3-1来实现webrtc的AudioProcessing类。

我正在使用PortAudio从我的麦克风读取输入,并希望将其传递给webrtc以进行VAD检查,但我不知道如何将我的数据传递给ProcessStream方法。

#define SAMPLE_RATE       (32000)
#define FRAMES_PER_BUFFER   (320)
#define PA_SAMPLE_TYPE  paFloat32
#define SAMPLE_SIZE (4)

...

err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );

// sampleBlock should now point to 320 32 bit floats

....
apm->ProcessStream( <What goes here?> )

以下是ProcessStream definitions

当我尝试为第一个方法实例化一个AudioFrame时:

AudioFrame frame;

我收到以下错误:

main.cpp:161:22: error: aggregate ‘webrtc::AudioFrame frame’ has incomplete type and cannot be defined
   webrtc::AudioFrame frame;

第二种和第三种方法要求数据采用格式&#34; const float * const * src&#34;。 这是否意味着我需要一个指向常量浮点指针的常量指针? 这让我有点困惑。

以下完整示例(available on Pastebin)从默认输入设备检索输入,并为ProcessStream调用准备webrtc。我的通话尝试被包含并注释掉,因为它会导致段错误。

代码需要PortAudio和libwebrtc-audio-processing-devel-0.3.1。 我使用以下代码在cygwin上编译:

g++ main_example.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -I/usr/include/webrtc_audio_processing/ -DWEBRTC_WIN -std=gnu++11 -L/bin/ -lcygwebrtc_audio_processing-1
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "portaudio.h"
#include <sys/time.h>
#include <windows.h>
#include <windowsx.h>
#include <unistd.h>

#include "webrtc/modules/audio_processing/include/audio_processing.h"
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::EchoCancellation;
using webrtc::VoiceDetection;


#define SAMPLE_RATE       (32000)
#define FRAMES_PER_BUFFER   (320)
#define DITHER_FLAG           (0)

#define PA_SAMPLE_TYPE  paFloat32
#define SAMPLE_SIZE (4)
#define SAMPLE_SILENCE  (0)
#define PRINTF_S_FORMAT "%8f"

/*******************************************************************/
int main(int argc, char **argv);
/* error handling */
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
int main (int argc, char **argv)
{

    PaStreamParameters inputParameters;
    PaStream *stream = NULL;
    PaError err;
    const PaDeviceInfo* inputInfo;
    char *sampleBlock = NULL;
    int i;
    int numBytes;
    int numChannels;

    err = Pa_Initialize();
    if( err != paNoError ) error2(stream, err);

    inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */
    inputInfo = Pa_GetDeviceInfo( inputParameters.device );
    numChannels = inputInfo->maxInputChannels;
    inputParameters.channelCount = 1;// numChannels;
    inputParameters.sampleFormat = PA_SAMPLE_TYPE;
    inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
    inputParameters.hostApiSpecificStreamInfo = NULL;
    printf( "Input device # %d.\n", inputParameters.device );
    printf( "    Name: %s\n", inputInfo->name );

    /* -- setup -- */

    err = Pa_OpenStream(
              &stream,
              &inputParameters,
              NULL,
              SAMPLE_RATE,
              FRAMES_PER_BUFFER,
              paClipOff,      /* we won't output out of range samples so don't bother clipping them */
              NULL, /* no callback, use blocking API */
              NULL ); /* no callback, so no callback userData */
    if( err != paNoError ) error2(stream, err);

    numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
    sampleBlock = (char *) malloc( numBytes );
    if( sampleBlock == NULL )
    {
        printf("Could not allocate record array.\n");
        error1(stream, sampleBlock);
    }

    err = Pa_StartStream( stream );
    if( err != paNoError ) error1(stream, sampleBlock);

        // Configure webrtc::audioprocessing
        AudioProcessing* apm = AudioProcessing::Create();

        apm->high_pass_filter()->Enable(true);

        apm->echo_cancellation()->enable_drift_compensation(false);
        apm->echo_cancellation()->Enable(true);

        apm->noise_suppression()->set_level(apm->noise_suppression()->kHigh);
        apm->noise_suppression()->Enable(true);

        apm->gain_control()->set_analog_level_limits(0, 255);
        apm->gain_control()->set_mode(apm->gain_control()->kAdaptiveAnalog);
        apm->gain_control()->Enable(true);

        apm->voice_detection()->Enable(true);

        int analog_level = apm->gain_control()->stream_analog_level();
        int delay_ms = 20;
        int voiceDetected = 0;


    long int holdTime = 600; //milliseconds
    int prevVoiceDetected = -1;
    int holding = 0;
    int transmitting = 0;
    int prevTransmitting = -1;
    struct timeval startHoldTime, currentTime, elapsedHoldTime;

        while (1) {
                // Read in input frames
        err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
        if( err ) xrun(stream, err, sampleBlock);

                // Run webrtc vad
                apm->set_stream_delay_ms(delay_ms);
                apm->gain_control()->set_stream_analog_level(analog_level);

                /*
                // A apm->ProcessStream call is required here. The one I've tried here seg faults, probably due to those casts I don't understand
                webrtc::StreamConfig inputConfig = webrtc::StreamConfig(SAMPLE_RATE, numChannels, false);
                webrtc::StreamConfig outputConfig = webrtc::StreamConfig(SAMPLE_RATE, numChannels, false);
                apm->ProcessStream((const float* const*)sampleBlock, inputConfig, outputConfig, (float* const*)sampleBlock);
                */


                analog_level = apm->gain_control()->stream_analog_level();
                voiceDetected = apm->voice_detection()->stream_has_voice();

                transmitting = 0;
                if (voiceDetected) {
                        transmitting = 1;
                        holding = 0;
                } else if (holding) {
                        gettimeofday (&currentTime, NULL);
                        long elapsedHoldTime =  (((currentTime.tv_sec - startHoldTime.tv_sec)*1000000L+currentTime.tv_usec) - startHoldTime.tv_usec)/1000;
                        //printf("elapsedtime: %d\n", elapsedHoldTime); fflush(stdout);
                        if (elapsedHoldTime > holdTime) {
                                //printf("completedhold\n"); fflush(stdout);
                                holding = 0;
                        } else {
                                //printf("holding\n"); fflush(stdout);
                                transmitting = 1;
                        }
                } else if (prevVoiceDetected) {
                        holding = 1;
                        gettimeofday (&startHoldTime, NULL);
                        transmitting = 1;
                }
                prevVoiceDetected = voiceDetected;

                if (prevTransmitting != transmitting) {
                        printf("Transmitting: %s\n", (transmitting) ? "true" : "false"); fflush(stdout);
                }
                prevTransmitting = transmitting;
    }
    printf("Wire off.\n"); fflush(stdout);

    err = Pa_StopStream( stream );
    if( err != paNoError ) error1(stream, sampleBlock);

    free( sampleBlock );

    Pa_Terminate();
    return 0;

}

int xrun(PaStream *stream, int err, char* sampleBlock) {
    printf("err = %d\n", err); fflush(stdout);
    if( stream ) {
       Pa_AbortStream( stream );
       Pa_CloseStream( stream );
    }
    free( sampleBlock );
    Pa_Terminate();
    if( err & paInputOverflow )
       fprintf( stderr, "Input Overflow.\n" );
    if( err & paOutputUnderflow )
       fprintf( stderr, "Output Underflow.\n" );
    return -2;
}

void error1(PaStream *stream, char* sampleBlock) {
    free( sampleBlock );
    exit(-1);
}
void error2(PaStream *stream, int err) {
    if( stream ) {
       Pa_AbortStream( stream );
       Pa_CloseStream( stream );
    }
    Pa_Terminate();
    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    exit(-1);
}

1 个答案:

答案 0 :(得分:1)

我私下联系了@matzeri,他指着我在gstreamer中working example,指出了我正确的方向。包括module_common_types.h,添加WEBRTC_AUDIO_RPOCESSING_ONLY_BUILD指令,并在webrtc / common_types.h中为cygwin修复字符串比较函数的定义,允许我定义一个AudioFrame,然后使用相应的ProcessStream调用。

这是一个使用libwebrtc-audio-processing-devel-0.3-1在cygwin上使用PortAudio进行VAD的工作示例!

注意:我需要修改webrtc / common_types.h,因此它应用了以下定义而不是win32版本

#define STR_CASE_CMP(s1, s2) ::strcasecmp(s1, s2)
#define STR_NCASE_CMP(s1, s2, n) ::strncasecmp(s1, s2, n)

的main.cpp

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "portaudio.h"
#include <sys/time.h>
#include <windows.h>
#include <windowsx.h>
#include <unistd.h>

#include "webrtc/modules/audio_processing/include/audio_processing.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/system_wrappers/include/trace.h"
using webrtc::AudioProcessing;
using webrtc::AudioFrame;
using webrtc::GainControl;
using webrtc::NoiseSuppression;
using webrtc::EchoCancellation;
using webrtc::VoiceDetection;


#define SAMPLE_RATE       (32000)
#define FRAMES_PER_BUFFER   (320)
#define DITHER_FLAG           (0)

#define PA_SAMPLE_TYPE  paInt16
#define SAMPLE_SIZE (2)
#define SAMPLE_SILENCE  (0)
#define PRINTF_S_FORMAT "%d"

/*******************************************************************/
int main(int argc, char **argv);
/* error handling */
int xrun(PaStream *stream, int err, char* sampleBlock);
void error1(PaStream *stream, char* sampleBlock);
void error2(PaStream *stream, int err);
int main (int argc, char **argv)
{

    PaStreamParameters inputParameters;
    PaStream *stream = NULL;
    PaError err;
    const PaDeviceInfo* inputInfo;
    char *sampleBlock = NULL;
    int i;
    int numBytes;
    int numChannels;

    err = Pa_Initialize();
    if( err != paNoError ) error2(stream, err);

    inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */
    inputInfo = Pa_GetDeviceInfo( inputParameters.device );
    numChannels = inputInfo->maxInputChannels;
    inputParameters.channelCount = 1;// numChannels;
    inputParameters.sampleFormat = PA_SAMPLE_TYPE;
    inputParameters.suggestedLatency = inputInfo->defaultHighInputLatency ;
    inputParameters.hostApiSpecificStreamInfo = NULL;
    printf( "Input device # %d.\n", inputParameters.device );
    printf( "    Name: %s\n", inputInfo->name );

    /* -- setup -- */

    err = Pa_OpenStream(
              &stream,
              &inputParameters,
              NULL,
              SAMPLE_RATE,
              FRAMES_PER_BUFFER,
              paClipOff,      /* we won't output out of range samples so don't bother clipping them */
              NULL, /* no callback, use blocking API */
              NULL ); /* no callback, so no callback userData */
    if( err != paNoError ) error2(stream, err);

    numBytes = FRAMES_PER_BUFFER * numChannels * SAMPLE_SIZE ;
    sampleBlock = (char *) malloc( numBytes );
    if( sampleBlock == NULL )
    {
        printf("Could not allocate record array.\n");
        error1(stream, sampleBlock);
    }

    err = Pa_StartStream( stream );
    if( err != paNoError ) error1(stream, sampleBlock);

        // Configure webrtc::audioprocessing
        AudioProcessing* apm = AudioProcessing::Create();

        apm->high_pass_filter()->Enable(true);

        apm->echo_cancellation()->enable_drift_compensation(false);
        apm->echo_cancellation()->Enable(true);

        apm->noise_suppression()->set_level(apm->noise_suppression()->kHigh);
        apm->noise_suppression()->Enable(true);

        apm->gain_control()->set_analog_level_limits(0, 255);
        apm->gain_control()->set_mode(apm->gain_control()->kAdaptiveAnalog);
        apm->gain_control()->Enable(true);

        apm->voice_detection()->Enable(true);

        int analog_level = apm->gain_control()->stream_analog_level();
        int delay_ms = 20;
        int voiceDetected = 0;


    long int holdTime = 600; //milliseconds
    int prevVoiceDetected = -1;
    int holding = 0;
    int transmitting = 0;
    int prevTransmitting = -1;
    struct timeval startHoldTime, currentTime, elapsedHoldTime;
        int webrtcErr = 0;

        while (1) {
                // Read in input frames
        err = Pa_ReadStream( stream, sampleBlock, FRAMES_PER_BUFFER );
        if( err ) xrun(stream, err, sampleBlock);

                // Run webrtc vad
                apm->set_stream_delay_ms(delay_ms);
                apm->gain_control()->set_stream_analog_level(analog_level);

                webrtc::AudioFrame frame;
                frame.num_channels_ = numChannels;
                frame.sample_rate_hz_ = SAMPLE_RATE;
                frame.samples_per_channel_ = FRAMES_PER_BUFFER;
                memcpy(frame.data_, sampleBlock, numBytes);

                if ((webrtcErr = apm->ProcessStream(&frame)) < 0) {
                        printf("Error Code: %d\n", webrtcErr); fflush(stdout);
                        return -1;
                }

                analog_level = apm->gain_control()->stream_analog_level();
                voiceDetected = apm->voice_detection()->stream_has_voice();

                transmitting = 0;
                if (voiceDetected) {
                        transmitting = 1;
                        holding = 0;
                } else if (holding) {
                        gettimeofday (&currentTime, NULL);
                        long elapsedHoldTime =  (((currentTime.tv_sec - startHoldTime.tv_sec)*1000000L+currentTime.tv_usec) - startHoldTime.tv_usec)/1000;
                        //printf("elapsedtime: %d\n", elapsedHoldTime); fflush(stdout);
                        if (elapsedHoldTime > holdTime) {
                                //printf("completedhold\n"); fflush(stdout);
                                holding = 0;
                        } else {
                                //printf("holding\n"); fflush(stdout);
                                transmitting = 1;
                        }
                } else if (prevVoiceDetected) {
                        holding = 1;
                        gettimeofday (&startHoldTime, NULL);
                        transmitting = 1;
                }
                prevVoiceDetected = voiceDetected;

                if (prevTransmitting != transmitting) {
                        printf("Transmitting: %s\n", (transmitting) ? "true" : "false"); fflush(stdout);
                }
                prevTransmitting = transmitting;
    }
    printf("Wire off.\n"); fflush(stdout);

    err = Pa_StopStream( stream );
    if( err != paNoError ) error1(stream, sampleBlock);

    free( sampleBlock );

    Pa_Terminate();
    return 0;

}

int xrun(PaStream *stream, int err, char* sampleBlock) {
    printf("err = %d\n", err); fflush(stdout);
    if( stream ) {
       Pa_AbortStream( stream );
       Pa_CloseStream( stream );
    }
    free( sampleBlock );
    Pa_Terminate();
    if( err & paInputOverflow )
       fprintf( stderr, "Input Overflow.\n" );
    if( err & paOutputUnderflow )
       fprintf( stderr, "Output Underflow.\n" );
    return -2;
}

void error1(PaStream *stream, char* sampleBlock) {
    free( sampleBlock );
    exit(-1);
}
void error2(PaStream *stream, int err) {
    if( stream ) {
       Pa_AbortStream( stream );
       Pa_CloseStream( stream );
    }
    Pa_Terminate();
    fprintf( stderr, "An error occured while using the portaudio stream\n" );
    fprintf( stderr, "Error number: %d\n", err );
    fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
    exit(-1);
}

编译:

g++ main.cpp -o main -L./ -lcygportaudio-2 -lrt -lm -pthread -L./cygspeexdsp-1 -I/usr/include/webrtc_audio_processing/ -DWEBRTC_WIN -DWEBRTC_AUDIO_PROCESSING_ONLY_BUILD -std=gnu++11 -L/bin/ -lcygwebrtc_audio_processing-1