我正在尝试使用AudioRecord
对象,但我无法成功录制语音。
我检查了类似的问题,并确保应用他们的建议,但仍然有同样的错误。
我为清单文件添加了所需的权限,因为我是我的devce运行Android M,我根据我浏览过的建议添加了以下代码,但它仍然没有帮助:
private void requestRecordAudioPermission() {
//check API version, do nothing if API version < 23!
int currentapiVersion = android.os.Build.VERSION.SDK_INT;
if (currentapiVersion > android.os.Build.VERSION_CODES.LOLLIPOP){
if (ContextCompat.checkSelfPermission(this, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
// Should we show an explanation?
if (ActivityCompat.shouldShowRequestPermissionRationale(this, Manifest.permission.RECORD_AUDIO)) {
// Show an expanation to the user *asynchronously* -- don't block
// this thread waiting for the user's response! After the user
// sees the explanation, try again to request the permission.
} else {
// No explanation needed, we can request the permission.
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.RECORD_AUDIO}, 1);
}
}
}
}
@Override
public void onRequestPermissionsResult(int requestCode, String permissions[], int[] grantResults) {
switch (requestCode) {
case 1: {
// If request is cancelled, the result arrays are empty.
if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
// permission was granted, yay! Do the
// contacts-related task you need to do.
Log.d("Activity", "Granted!");
} else {
// permission denied, boo! Disable the
// functionality that depends on this permission.
Log.d("Activity", "Denied!");
finish();
}
return;
}
// other 'case' lines to check for other
// permissions this app might request
}
}
以下是我得到的错误和日志:
Could not get audio input for session 35305, record source 1, sample rate 48000, format 0x1, channel mask 0x10, flags 0
E/AudioRecord-JNI: Error creating AudioRecord instance: initialization check failed with status -22.
E/android.media.AudioRecord: Error code -20 when initializing native AudioRecord object.
I/myRecordThread.Run: Successfully created new AudioRecord
I/myRecordThread.Run: AudioRecord NOT_INITIALIZED
I/myRecordThread.Run: Released AudioRecord
以下是代码: 这就是我创建记录线程的方式:
public myRecordThread() {
audioQuality = 60000;
//get teh maximum possible recording sample rate
for (final int s : new int[] { 48000, 44100, 22050, 11025, 8000 }) {
bufferSize = AudioRecord.getMinBufferSize(
s,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize > 0) {
recordingSampleRate = s;
break;
}
}
if (bufferSize < 0) {
throw new RuntimeException("No recording sample rate found");
}
Log.i("mumbleclient", "Selected recording sample rate: " +
recordingSampleRate);
frameSize = recordingSampleRate / 100;
buffer = new short[frameSize];
celtMode = Native.celt_mode_create(
MumbleProtocol.SAMPLE_RATE, /*48000*/
MumbleProtocol.FRAME_SIZE); /*480*/
celtEncoder = Native.celt_encoder_create(celtMode, 1);
Native.celt_encoder_ctl(
celtEncoder,
celtConstants.CELT_SET_PREDICTION_REQUEST,
0);
Native.celt_encoder_ctl(
celtEncoder,
celtConstants.CELT_SET_VBR_RATE_REQUEST,
audioQuality);
if (recordingSampleRate != TARGET_SAMPLE_RATE) {
speexResamplerState = Native.speex_resampler_init(
1,
recordingSampleRate,
TARGET_SAMPLE_RATE,
3);
} else {
speexResamplerState = 0;
}
Log.i("myRecordThread.Run", "Successfully created voiceRecord thread");
}
这是线程的运行方法:
@Override
public final void run() {
Log.i("myRecordThread.Run", "Just Started voiceRecord Thread");
final boolean running = true;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
Log.i("myRecordThread.Run", "Thread given Urgent Priority");
AudioRecord ar = null;
try {
ar = new AudioRecord(
MediaRecorder.AudioSource.MIC,
recordingSampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
64 * 1024);
Log.i("myRecordThread.Run", "Successfully created new AudioRecord");
if (ar.getState() != AudioRecord.STATE_INITIALIZED) {
Log.i("myRecordThread.Run", "AudioRecord NOT_INITIALIZED");
return;
}
ar.startRecording();
Log.i("myRecordThread.Run", "Started Recording...");
while (running && !Thread.interrupted()) {
final int read = ar.read(buffer, 0, frameSize);
if (read == AudioRecord.ERROR_BAD_VALUE ||
read == AudioRecord.ERROR_INVALID_OPERATION) {
throw new RuntimeException("" + read);
}
short[] out;
if (speexResamplerState != 0) {
out = resampleBuffer;
final int[] in_len = new int[] { buffer.length };
final int[] out_len = new int[] { out.length };
Native.speex_resampler_process_int(
speexResamplerState,
0,
buffer,
in_len,
out,
out_len);
} else {
out = buffer;
}
final int compressedSize = Math.min(
audioQuality / (100 * 8),
127);
final byte[] compressed = new byte[compressedSize];
synchronized (Native.class) {
Native.celt_encode(
celtEncoder,
out,
compressed,
compressedSize);
}
outputQueue.add(compressed);
if (outputQueue.size() < framesPerPacket) {
continue;
}
final byte[] outputBuffer = new byte[1024];
final PacketDataStream pds = new PacketDataStream(outputBuffer);
while (!outputQueue.isEmpty()) {
int flags = 0;
flags |= getCodec() << 5;
outputBuffer[0] = (byte) flags;
pds.rewind();
// skip flags
pds.next();
seq += framesPerPacket;
pds.writeLong(seq);
for (int i = 0; i < framesPerPacket; ++i) {
final byte[] tmp = outputQueue.poll();
if (tmp == null) {
break;
}
int head = (short) tmp.length;
if (i < framesPerPacket - 1) {
head |= 0x80;
}
pds.append(head);
pds.append(tmp);
}
sendUdpMessage(outputBuffer, pds.size(), false);
}
}
} catch (UnknownHostException e) {
e.printStackTrace();
} finally {
if (ar != null) {
ar.release();
Log.i("myRecordThread.Run", "Released AudioRecord");
}
}
}