如何使用espeak_EVENT_TYPE;

时间:2018-04-28 02:07:00

标签: c++ events compiler-errors espeak

我正在寻找一种方法来了解espeak何时结束它的发言。有人告诉我使用espeakEVENT_MSG_TERMINATED。但是当我尝试将这部分放到我的代码中时,它给了我这个错误:

#include <espeak/speak_lib.h>

espeak_EVENT_TYPE;

    if( espeak_EVENT_TYPE == espeakEVENT_MSG_TERMINATED)
    {
    do something;
    }
  

application.cpp:31:1:错误:声明没有声明任何内容   [-fpermissive] espeak_EVENT_TYPE; ^ application.cpp:在函数中   'void speech(char *)':application.cpp:116:27:错误:预期   '=='标记之前的primary-expression        if(espeak_EVENT_TYPE == espeakEVENT_MSG_TERMINATED)

编辑:我使用这个简单的代码来使用espeak

#include <string.h>
#include <malloc.h>
#include <espeak/speak_lib.h>


espeak_POSITION_TYPE position_type;
espeak_AUDIO_OUTPUT output;
char *path=NULL;
int Buflength = 1000, Options=0;
void* user_data;
t_espeak_callback *SynthCallback;
espeak_PARAMETER Parm;



char Voice[] = {"English"};


char text[30] = {"this is an english text"};
unsigned int Size,position=0, end_position=0, flags=espeakCHARS_AUTO, *unique_identifier;




int main(int argc, char* argv[] ) 
{
    output = AUDIO_OUTPUT_PLAYBACK;
    espeak_Initialize(output, Buflength, path, AUDIO_OUTPUT_SYNCHRONOUS ); //Options ); 
    espeak_SetVoiceByName(Voice);
    const char *langNativeString = "en";
    espeak_VOICE voice = {0};
  //  memset(&voice, 0, sizeof(espeak_VOICE));
        voice.languages = langNativeString;
        voice.name = "US";
        voice.variant = 2;
        voice.gender = 1;
        espeak_SetVoiceByProperties(&voice);
    Size = strlen(text)+1;    

    espeak_Synth( text, Size, position, position_type, end_position, flags,unique_identifier, user_data );
    espeak_Synchronize( );

    return 0;
}

EDIT2:

#include <stdio.h>
#include <string.h>
#include <assert.h>

#include <sphinxbase/err.h>
#include <sphinxbase/ad.h>

#include <espeak/speak_lib.h>
#include <string>
#include <iostream>
using namespace std;

#include "pocketsphinx.h"
static ps_decoder_t *ps;
static cmd_ln_t *config;
static FILE *rawfd;

ad_rec_t *ad;

espeak_POSITION_TYPE position_type;
espeak_AUDIO_OUTPUT output;
char *path=NULL;
int Buflength = 1000, Options=0;
void* user_data;
char Voice[] = {"English"};
unsigned int Size,position=0, end_position=0, flags=espeakCHARS_AUTO, *unique_identifier;
t_espeak_callback *SynthCallback;
espeak_PARAMETER Parm;

static void initFuncs()
{

    output = AUDIO_OUTPUT_PLAYBACK;
    espeak_Initialize(output, Buflength, path, AUDIO_OUTPUT_SYNCHRONOUS ); 
    espeak_SetVoiceByName(Voice);
    const char *langNativeString = "en";
    espeak_VOICE voice;
    memset(&voice, 0, sizeof(espeak_VOICE));
        voice.languages = langNativeString;
        voice.name = "US";
        voice.variant = 2;
        voice.gender = 1;
        espeak_SetVoiceByProperties(&voice);

}

int receive_espeak_events(short *wav, int numsamples, espeak_EVENT *event)
{
    while (event->type != espeakEVENT_LIST_TERMINATED) {
        if (event->type == espeakEVENT_MSG_TERMINATED) {
            /* do something */
            ad_start_rec(ad);
        }
        ++event; // Examine the next event.
    }
    return 0; // Continue speaking.
}

static void sleep_msec(int32 ms)
{
    struct timeval tmo;

    tmo.tv_sec = 0;
    tmo.tv_usec = ms * 1000;

    select(0, NULL, NULL, NULL, &tmo);

}

static void speech(char* hyp)
{      
    Size = strlen(hyp)+1;
    espeak_SetSynthCallback(receive_espeak_events);
    espeak_Synth( hyp, Size, position, position_type, end_position, flags,unique_identifier, user_data );
    espeak_Synchronize( );    
}


static void recognize_from_microphone()
{

    ad_rec_t *ad;
    int16 adbuf[2048];
    uint8 utt_started, in_speech;
    int32 k;
    char  *hyp=0;

    if ((ad = ad_open_dev(cmd_ln_str_r(config, "-adcdev"),(int) cmd_ln_float32_r(config,"-samprate"))) == NULL)
        E_FATAL("Failed to open audio device\n");
    if (ad_start_rec(ad) < 0)
        E_FATAL("Failed to start recording\n");

    if (ps_start_utt(ps) < 0)
        E_FATAL("Failed to start utterance\n");


    utt_started = FALSE;
    E_INFO("Ready....\n");

    for (;;) 
    {

        if ((k = ad_read(ad, adbuf, 2048)) < 0)
            E_FATAL("Failed to read audio\n");
        ps_process_raw(ps, adbuf, k, FALSE, FALSE);
        in_speech = ps_get_in_speech(ps);
        if (in_speech && !utt_started) 
        {
            utt_started = TRUE;
            E_INFO("Listening...\n");
        }
        if (!in_speech && utt_started) 
        {

            ps_end_utt(ps);
            hyp = (char*)ps_get_hyp(ps, NULL );
            if (hyp != NULL) 
            {
///////////////////I am passing hyp to espeak heere ////////////////////
             ad_stop_rec(ad);
             speech(hyp);

                printf("%s\n",hyp);
                fflush(stdout);
            //    sleep_msec(3000);

            }

            if (ps_start_utt(ps) < 0)
                E_FATAL("Failed to start utterance\n");
            utt_started = FALSE;
            E_INFO("Ready....\n");

        }
    }//for
    ad_close(ad);
}

int main(int argc, char *argv[])
{
    initFuncs();

                 config = cmd_ln_init(NULL, ps_args(), TRUE,
                 "-hmm", "/home/m/myrobot3/robot/model_parameters/robot.cd_cont_1000",
                     "-lm","/home/m/myrobot3/robot/etc/robot.lm.bin",
                     "-dict", "/home/m/myrobot3/robot/etc/robot.dic",
                     NULL);

    ps = ps_init(config);
    recognize_from_microphone();

    ps_free(ps);
    cmd_ln_free_r(config);

    return 0;
}

ERROR:

  

致命:“application.cpp”,第163行:无法读取音频

1 个答案:

答案 0 :(得分:1)

espeak_EVENT_TYPE;

这一行对编译器不敏感。 espeak_EVENT_TYPE是一种数据类型。它不是可以与espeakEVENT_MSG_TERMINATED之类的值进行比较的变量。为了声明变量,语法为:

espeak_EVENT_TYPE an_event_type {};
if (an_event_type == espeakEVENT_MSG_TERMINATED) {
/* ... */

但是,如果我们这样做,那么我们刚创建的变量an_event_type实际上不会包含任何有关任何真实espeak事件的信息。它肯定不会说明任何实际的消息是否被终止。

从espeak

接收真实事件信息

为了获取有关消息是否终止的信息,程序需要从espeak库中获取类型为espeak_EVENT_TYPE的变量。

通过this header查看,espeak_EVENT_TYPE用作espeak_EVENT结构的一部分。要接收espeak_EVENT通知,必须编写一个将由espeak库调用的函数。 (这被称为&#34;回调&#34;功能)。然后通过调用espeak_SetSynthCallback向库注册回调函数。

从同一个头文件中,回调函数的原型必须如下:

  

int SynthCallback(short * wav,int numsamples,espeak_EVENT * events);

     

wav:是已经产生的语音数据。空值           表示合成已完成。

     

numsamples:是wav中的条目数。这个数字可能不一样,   可能小于&gt;中给出的buflength参数所暗示的值。 espeak_Initialize,有时可能为零(不表示   合成结束)。

     

events:一组espeak_EVENT项,表示单词和   句子事件,以及if和元素的发生   在文中。事件列表由类型&gt;的事件终止。 = 0。

     

回调返回:0 =继续合成,1 =中止合成。

将它们放在一起,我们需要一个循环遍历events变量的函数,就像它是一个数组一样,直到它遇到类型为0的事件。然后函数需要返回0来继续语音活动。 / p>

int receive_espeak_events(short *wav, int numsamples, espeak_EVENT *event)
{
    while (event->type != espeakEVENT_LIST_TERMINATED) {
        if (event->type == espeakEVENT_MSG_TERMINATED) {
            /* do something */
        }
        ++event; // Examine the next event.
    }
    return 0; // Continue speaking.
}

告诉espeak调用此函数,在开始任何综合操作之前将函数传递给espeak_SetSynthCallback

espeak_SetSynthCallback(receive_espeak_events);