我在理解SDL音频回调如何工作方面遇到了一些问题。 我有这个简单的代码,它应该生成一个简单的方波:
#include "SDL.h"
#include "SDL_audio.h"
#include <stdlib.h>
#include <math.h>
SDL_Surface *screen;
SDL_AudioSpec spec;
Uint32 sound_len=512;
Uint8 *sound_buffer;
int sound_pos = 0;
int counter;
unsigned int phase_delta=600;
unsigned int phase;
unsigned char out;
//Initialization
void init_sdl (void)
{
if (SDL_Init (SDL_INIT_VIDEO|SDL_INIT_AUDIO) < 0)
exit (-1);
atexit (SDL_Quit);
screen = SDL_SetVideoMode (640, 480, 16, SDL_HWSURFACE);
if (screen == NULL)
exit (-1);
}
//Generates a new sample and outputs it to the audio card
void Callback (void *userdata, Uint8 *stream, int len)
{
Uint8 *waveptr;
//Generates a new sample
phase+=phase_delta;
if ((phase>>8)<127) out=255; else out=0;
//End
//Output the current sample to the audio card
waveptr = sound_buffer;
SDL_MixAudio(stream, waveptr, 1, SDL_MIX_MAXVOLUME);
}
void play (void)
{
sound_buffer = new Uint8[512];
sound_len= 512;
spec.freq = 22050;
spec.format = AUDIO_S16SYS;
spec.channels = 1;
spec.silence = 0;
spec.samples = 512;
spec.padding = 0;
spec.size = 0;
spec.userdata = 0;
spec.callback = Callback;
if (SDL_OpenAudio (&spec, NULL) < 0)
{ //Throw an error
printf ("I don't think you like this: %s\n", SDL_GetError ());
exit (-1);
}
SDL_PauseAudio (0);//Start the audio
}
int main(int argc, char* argv[])
{
init_sdl ();
play ();
SDL_Delay (250);
return 0;
}
我知道回调没有正确完成,因为我不知道如何输出到缓冲区。每次调用回调时,回调函数代码的第一部分都会生成新样本,并将其存储在变量Out中。
这里的任何人都可以修改此代码,以便新样本从Out转到音频缓冲区中的正确位置吗? 此外,我不希望以非常复杂的方式修改代码只是为了生成方波 - 我已经处理过了。正确生成波形,每个新样本出现在变量Out中。我只需要将这些样本正确路由到音频缓冲区。
答案 0 :(得分:0)
您需要将stream
转换为actual.format
- 适当的数据类型,然后使用stream
样本覆盖len / sizeof( <format's datatype> )
中的值。
方波很难听到,因为当phase
缠绕时,给定的算法只会产生约710万个样本(约为5分钟@ 22050Hz)的短暂高脉冲。
尝试这样的事情:
#include <SDL.h>
#include <SDL_audio.h>
#include <iostream>
using namespace std;
//Generates new samples and outputs them to the audio card
void Callback( void* userdata, Uint8* stream, int len )
{
// the format of stream depends on actual.format in main()
// we're assuming it's AUDIO_S16SYS
short* samples = reinterpret_cast< short* >( stream );
size_t numSamples = len / sizeof( short );
const unsigned int phase_delta = 600;
static unsigned int phase = 0;
// loop over all our samples
for( size_t i = 0; i < numSamples; ++i )
{
phase+=phase_delta;
short out = 0;
if ((phase>>8)<127) out=SHRT_MAX; else out=0;
samples[i] = out;
}
}
int main( int argc, char* argv[] )
{
if( SDL_Init( SDL_INIT_VIDEO | SDL_INIT_AUDIO ) < 0 )
return -1;
atexit( SDL_Quit );
SDL_Surface* screen = SDL_SetVideoMode( 640, 480, 16, SDL_ANYFORMAT );
if( screen == NULL)
return -1;
SDL_AudioSpec spec;
spec.freq = 22050;
spec.format = AUDIO_S16SYS;
spec.channels = 1;
spec.samples = 4096;
spec.callback = Callback;
spec.userdata = NULL;
SDL_AudioSpec actual;
if( SDL_OpenAudio( &spec, &actual ) < 0 )
{
cerr << "I don't think you like this: " << SDL_GetError() << endl;
return -1;
}
if( spec.format != actual.format )
{
cerr << "format mismatch!" << endl;
return -1;
}
SDL_PauseAudio( 0 );
SDL_Event ev;
while( SDL_WaitEvent( &ev ) )
{
if( ev.type == SDL_QUIT )
break;
}
SDL_CloseAudio();
SDL_Quit();
return 0;
}