我正在学习Java OpenAL绑定(版本3.1.2),我想制作一个自定义声音(合成)。
我已经在网上搜索了这个,但我还没找到我要找的东西 - 所有教程都展示了如何播放声音文件。
我想创建一个我可以"编程"声音。到目前为止,我理解声音文件的数据正被传递到缓冲区。在我的情况下,我可以找出哪些数据必须放在这些缓冲区中吗?
这是我的应用程序基础。它是用Groovy编写的。
package alexiy.openal
import javafx.application.Application
import javafx.event.ActionEvent
import javafx.event.EventHandler
import javafx.scene.Scene
import javafx.scene.control.Button
import javafx.scene.layout.GridPane
import javafx.stage.Stage
import javafx.stage.WindowEvent
import org.lwjgl.BufferUtils
import org.lwjgl.openal.*
import org.lwjgl.system.libc.LibCStdlib
import java.nio.ByteBuffer
import java.nio.IntBuffer
class OALP extends Application {
static ALCapabilities alCapabilities;
static ALCCapabilities alcCapabilities;
@Override
void start(Stage primaryStage) throws Exception {
String deviceinfo= ALC10.alcGetString(0,EnumerateAllExt.ALC_DEFAULT_ALL_DEVICES_SPECIFIER);
println('Device: '+deviceinfo)
long device= ALC10.alcOpenDevice(deviceinfo);
if(device==0) throw new RuntimeException("Couldn't find such device")
println device
int[] attributes=[ALC11.ALC_STEREO_SOURCES,1,0]
long context=ALC10.alcCreateContext(device,attributes)
println context
boolean b=ALC10.alcMakeContextCurrent(context)
alcCapabilities= ALC.createCapabilities(device)
AL.createCapabilities(alcCapabilities)
alCapabilities= AL.getCapabilities()
GridPane gridPane=new GridPane();
gridPane.setVgap(6)
gridPane.setHgap(6)
Scene scene=new Scene(gridPane,300,100)
primaryStage.setScene(scene)
Button decode=new Button("Decode")
decode.setOnAction(new EventHandler<ActionEvent>() {
@Override
void handle(ActionEvent event) {
File track=new File("Fields.ogg")
}
})
gridPane.add(decode,0,0)
primaryStage.show();
primaryStage.setOnCloseRequest(new EventHandler<WindowEvent>() {
@Override
void handle(WindowEvent event) {
ALC10.alcMakeContextCurrent(0)
ALC10.alcDestroyContext(context)
ALC10.alcCloseDevice(device)
}
})
int seconds=4;
int samplerate=44100
int buffersiz=seconds*samplerate
int buffer=AL10.alGenBuffers()
float freq=1
float incr=0.1f
// IntBuffer intBuffer=IntBuffer.allocate(buffersiz)
ByteBuffer intBuffer=LibCStdlib.malloc(buffersiz)
for (int i = 0; i < buffersiz; i++) {
intBuffer.put(i,(byte) (32760 * Math.sin( 2 * 3.14 *freq)/samplerate * i))
freq+=incr
if (100.0 > freq || freq > 5000.0) {
incr *= -1.0f;
}
}
AL10.alBufferData(buffer,AL10.AL_FORMAT_STEREO16, intBuffer,300)
LibCStdlib.free(intBuffer)
int souirce=AL10.alGenSources()
AL10.alSourcei(souirce,AL10.AL_BUFFER,buffer)
AL10.alSourcePlay(souirce)
Thread.sleep(3000)
printError()
}
static void printError(){ println AL10.alGetString(AL10.alGetError())}
}
答案 0 :(得分:0)
这里有一些c代码,我使用OpenAL来呈现音频,这是通过在循环中调用sin函数本地合成的......转换为java应该是直截了当的
// sudo apt-get install libopenal-dev # install OpenAL on linux
// gcc -o openal_play_monday openal_play_monday.c -lopenal -lm
#include <stdio.h>
#include <stdlib.h> // gives malloc
#include <math.h>
#include <unistd.h> // gives sleep
#ifdef __APPLE__
#include <OpenAL/al.h>
#include <OpenAL/alc.h>
#elif __linux
#include <AL/al.h>
#include <AL/alc.h>
#endif
ALCdevice * openal_output_device;
ALCcontext * openal_output_context;
ALuint internal_buffer;
ALuint streaming_source[1];
int al_check_error(const char * given_label) {
ALenum al_error;
al_error = alGetError();
if(AL_NO_ERROR != al_error) {
printf("ERROR - %s (%s)\n", alGetString(al_error), given_label);
return al_error;
}
return 0;
}
void MM_init_al() {
const char * defname = alcGetString(NULL, ALC_DEFAULT_DEVICE_SPECIFIER);
openal_output_device = alcOpenDevice(defname);
openal_output_context = alcCreateContext(openal_output_device, NULL);
alcMakeContextCurrent(openal_output_context);
// setup buffer and source
alGenBuffers(1, & internal_buffer);
al_check_error("failed call to alGenBuffers");
}
void MM_exit_al() {
ALenum errorCode = 0;
// Stop the sources
alSourceStopv(1, & streaming_source[0]); // streaming_source
int ii;
for (ii = 0; ii < 1; ++ii) {
alSourcei(streaming_source[ii], AL_BUFFER, 0);
}
// Clean-up
alDeleteSources(1, &streaming_source[0]);
alDeleteBuffers(16, &streaming_source[0]);
errorCode = alGetError();
alcMakeContextCurrent(NULL);
errorCode = alGetError();
alcDestroyContext(openal_output_context);
alcCloseDevice(openal_output_device);
}
void MM_render_one_buffer() {
/* Fill buffer with Sine-Wave */
// float freq = 440.f;
float freq = 100.f;
float incr_freq = 0.1f;
int seconds = 4;
// unsigned sample_rate = 22050;
unsigned sample_rate = 44100;
double my_pi = 3.14159;
size_t buf_size = seconds * sample_rate;
// allocate PCM audio buffer
short * samples = malloc(sizeof(short) * buf_size);
printf("\nhere is freq %f\n", freq);
int i=0;
for(; i<buf_size; ++i) {
samples[i] = 32760 * sin( (2.f * my_pi * freq)/sample_rate * i );
freq += incr_freq;
// incr_freq += incr_freq;
// freq *= factor_freq;
if (100.0 > freq || freq > 5000.0) {
incr_freq *= -1.0f;
}
}
/* upload buffer to OpenAL */
alBufferData( internal_buffer, AL_FORMAT_MONO16, samples, buf_size, sample_rate);
al_check_error("populating alBufferData");
free(samples);
/* Set-up sound source and play buffer */
// ALuint src = 0;
// alGenSources(1, &src);
// alSourcei(src, AL_BUFFER, internal_buffer);
alGenSources(1, & streaming_source[0]);
alSourcei(streaming_source[0], AL_BUFFER, internal_buffer);
// alSourcePlay(src);
alSourcePlay(streaming_source[0]);
// ---------------------
ALenum current_playing_state;
alGetSourcei(streaming_source[0], AL_SOURCE_STATE, & current_playing_state);
al_check_error("alGetSourcei AL_SOURCE_STATE");
while (AL_PLAYING == current_playing_state) {
printf("still playing ... so sleep\n");
sleep(1); // should use a thread sleep NOT sleep() for a more responsive finish
alGetSourcei(streaming_source[0], AL_SOURCE_STATE, & current_playing_state);
al_check_error("alGetSourcei AL_SOURCE_STATE");
}
printf("end of playing\n");
/* Dealloc OpenAL */
MM_exit_al();
} // MM_render_one_buffer
int main() {
MM_init_al();
MM_render_one_buffer();
}