如何使用QAudioOutput连续播放模拟信号?

时间:2019-05-11 03:11:01

标签: qt c++11

我已经在Qt中编写了一个c ++代码,以每秒16384的采样频率生成引擎声音信号。所以我的计时器每秒运行一次 而且我每秒都会收到一个16384大小的新Vector。 我正在尝试使用QAudioOutput播放此信号。我每秒都得到音频输出,但是没有连续的声音。

    qreal sampleRate = 16384; 
        qreal duration = 1.000;
        qreal frequency = 1000;




// --- generate a QVector<double> xengine that contains a 16384 samples of data ---


// --- transfer QVector data to QByteArray 
        QByteArray* byteBuffer = new QByteArray();
        int n = xengine.size();
        byteBuffer->resize(n);

        for (int i = 0; i < xengine.size(); i++)
        {
            qreal y = xengine[i];   // transfer data to y
            *byteBuffer)[i] = (qint16)y;  enter code here
        } 



// use qint16 (instead of quint16), because our waveform goes above and below zeros.


   // create and setup a QAudioFormat object
        QAudioFormat audioFormat;
        audioFormat.setSampleRate(static_cast<int>(sampleRate));
        audioFormat.setChannelCount(1);
        audioFormat.setSampleSize(8);   
        audioFormat.setCodec("audio/pcm");
        audioFormat.setByteOrder(QAudioFormat::LittleEndian);
        audioFormat.setSampleType(QAudioFormat::SignedInt);   


   // create a QAudioDeviceInfo object, to make sure that our audioFormat is supported by the device

         QAudioDeviceInfo deviceInfo(QAudioDeviceInfo::defaultOutputDevice());

         if(!deviceInfo.isFormatSupported(audioFormat))
         {
              qWarning() << "Raw audio format not supported by backend, 
              cannot play audio.";
             return;
         }


         QBuffer* input = new QBuffer(byteBuffer);
         input->open(QIODevice::ReadOnly);   
         QAudioOutput* audio = new QAudioOutput(audioFormat, this);


         audio->start(input);`

1 个答案:

答案 0 :(得分:0)

在拉模式下,将读取音频数据直到缓冲区结束,您可以重新启动并再次播放缓冲区,但是这可能会在旧音频的最终播放和音频重新启动之间产生一瞬间的静默状态。

相反,我建议您使用推送模式,在该模式下您可以更好地控制音频再现。

请参见以下示例:

编辑:对该答案进行了编辑,以显示一种按需添加数据以进行再现的方法,如果您有声音生成器,请忽略此处提供的声音生成器,然后将其替换为生成器。

#include <QtCore>
#include <QtMultimedia>

static inline int timeToSize(int ms, const QAudioFormat &format)
{
    return ((format.channelCount() * (format.sampleSize() / 8) * format.sampleRate()) * ms / 1000);
}

struct AudioContext
{
    QAudioOutput *m_audio_output;
    QIODevice *m_output_device;

    QByteArray m_buffer;

    QAudioDeviceInfo m_output_device_info;
    QAudioFormat m_format;

    int m_time_to_buffer;

    int m_size_to_buffer;

    /*** Tone generator variables ***/
    uint m_frequency;

    uint m_duration_msecs;

    bool m_reverse = false;
    /*** Tone generator variables ***/

    bool m_play_called = false;
};

QByteArray toneGenerator(AudioContext *ctx)
{
    QByteArray data;

    QDataStream write_stream(&data, QIODevice::ReadWrite);
    write_stream.setVersion(QDataStream::Qt_5_0); //Protocol for version 5.0
    write_stream.setByteOrder(QDataStream::LittleEndian);

    //Tone generator from http://www.cplusplus.com/forum/general/129827/

    const uint samplerate = uint(ctx->m_format.sampleRate());
    const uint channels = uint(ctx->m_format.channelCount());

    const double pi = M_PI;
    const qint16 amplitude = INT16_MAX;

    const uint frequency = ctx->m_frequency;
    const uint n_msecs = ctx->m_duration_msecs;

    const int n_samples = int(channels * samplerate * (n_msecs / double(1000)));

    int index = n_samples;

    double freq = frequency;
    double d = (samplerate / freq);
    int c = 0;

    for (int j = 0; j < index; j++)
    {
        double deg = double(360) / d;
        write_stream << qint16(qSin((c++ * double(deg)) * pi / double(180)) * amplitude);
    }

    return data;
}

void play(AudioContext *ctx)
{
    //Set that last async call was triggered
    ctx->m_play_called = false;

    while (ctx->m_buffer.size() < ctx->m_size_to_buffer)
    {
        QByteArray data = toneGenerator(ctx);

        //APPEND HERE YOUR DATA
        ctx->m_buffer.append(data);

        if (!ctx->m_reverse)
        {
            if (ctx->m_frequency < 20 * 1000)
                ctx->m_frequency += 100;
            else //Restart
                ctx->m_reverse = !ctx->m_reverse;
        }
        else
        {
            if (ctx->m_frequency > 100)
                ctx->m_frequency -= 100;
            else //Restart
                ctx->m_reverse = !ctx->m_reverse;
        }
    }

    int readlen = ctx->m_audio_output->periodSize();

    int chunks = ctx->m_audio_output->bytesFree() / readlen;

    //Play data while it's available in the output device
    while (chunks)
    {
        //Get chunk from the buffer
        QByteArray samples = ctx->m_buffer.mid(0, readlen);
        int len = samples.size();
        ctx->m_buffer.remove(0, len);

        //Write data to the output device after the volume was applied
        if (len)
        {
            ctx->m_output_device->write(samples);
        }

        //If chunk is smaller than the output chunk size, exit loop
        if (len != readlen)
            break;

        //Decrease the available number of chunks
        chunks--;
    }
}

void preplay(AudioContext *ctx)
{
    //Verify if exists a pending call to play function
    //If not, call the play function async
    if (!ctx->m_play_called)
    {
        ctx->m_play_called = true;
        QTimer::singleShot(0, [=]{play(ctx);});
    }
}

void init(AudioContext *ctx)
{
    /***** INITIALIZE OUTPUT *****/

    //Check if format is supported by the choosen output device
    if (!ctx->m_output_device_info.isFormatSupported(ctx->m_format))
    {
        qDebug() << "Format not supported by the output device";
        return;
    }

    //Initialize the audio output device
    ctx->m_audio_output = new QAudioOutput(ctx->m_output_device_info, ctx->m_format, qApp);

    //Compute the size in bytes to be buffered based on the current format
    ctx->m_size_to_buffer = int(timeToSize(ctx->m_time_to_buffer, ctx->m_format));

    ctx->m_output_device = ctx->m_audio_output->start();

    if (!ctx->m_output_device)
    {
        qDebug() << "Failed to open output audio device";
        return;
    }

    //Timer that helps to keep playing data while it's available on the internal buffer
    QTimer *timer_play = new QTimer(qApp);
    timer_play->setTimerType(Qt::PreciseTimer);
    QObject::connect(timer_play, &QTimer::timeout, [=]{
        preplay(ctx);
    });
    timer_play->start(10);

    /***** INITIALIZE OUTPUT *****/

    qDebug() << "Playing...";
}

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    AudioContext ctx;

    QAudioFormat format;
    format.setCodec("audio/pcm");
    format.setSampleRate(8000);
    format.setChannelCount(1);
    format.setSampleSize(16);
    format.setByteOrder(QAudioFormat::LittleEndian);
    format.setSampleType(QAudioFormat::SignedInt);

    ctx.m_format = format;

    ctx.m_output_device_info = QAudioDeviceInfo::defaultOutputDevice();

    ctx.m_time_to_buffer = 1000; //Needed time in buffer before play(ms)

    ctx.m_duration_msecs = 20; //Duration of each tone(ms)

    ctx.m_frequency = 100; //Initial tone frequency(hz)

    init(&ctx);

    return a.exec();
}

有关推和推模式的更多信息,请参阅: