android中具有固定数据报包长度的datagramsocket中的音频流质量不佳

时间:2016-10-31 09:37:36

标签: android udp audio-streaming audio-recording datagram

使用数据报文通过datagrampackets传输音频进行电话会话。我能听到来自另一端的音频。但是其他人听不到我的声音。

这里数据包固定为340byte。前20个字节有一些连接建立值,源地址目的地址等。下一个320字节有音频数据。这里音频数据总是固定为320byte。

        @Override
        public void run() {


            long seq=0;
            byte[] buffer = new byte[BUF_SIZE];

            WiphoneProp callProp=new WiphoneProp();
            callProp.setOpCode(WiPhonePacket.OpCodes.Call.getValue());//1 byte
            callProp.setOperand(WiPhonePacket.OperandCodes.Talk.getValue());//1 byte
            callProp.setSelfState(0);//1 byte
            callProp.setCallId(1);//1 byte

            callProp.setCheckSum(BitConverter.checkSum(deviceSignature.getSignatureData(),0,deviceSignature.getSignatureData().length));//4 byte

            try {

                int offset=0;
                audioRecorder.startRecording();
                while (mic) {
                    // Capture audio from the mic and transmit it

                    try {
                        callProp.setWiphoneId(ByteBuffer.wrap(getBroadcastQuadIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//4 byte
                        callProp.setClientId(ByteBuffer.wrap(getLocalIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//4 byte
                    } catch (IOException e) {
                        e.printStackTrace();
                    }

                    callProp.setSequence(seq);//4 byte

                    byte[] buff=new byte[320];

                    audioRecorder.read(buffer, offset, 320);

                    System.arraycopy(buffer,offset,buff,0,buff.length);
                    callProp.setVoice(buff);//320 byte

                    byte[] voice = callProp.ToBuffer();

                    DatagramPacket packet = new DatagramPacket(voice, voice.length, InetAddress.getByName(deviceSignature.getDeviceIP()), WIPHONEPORT);
                    commonSoc.send(packet);

                    offset+=320;
                    if(offset>=1600){
                        offset=0;
                    }
                    seq++;
                }
                // Stop recording and release resources
                audioRecorder.stop();
                audioRecorder.release();
                mic = false;
            }
           /* catch(InterruptedException e) {

                Log.e(TAG, "InterruptedException: " + e.toString());
                mic = false;
            }*/
            catch(SocketException e) {

                Log.e(TAG, "SocketException: " + e.toString());
                mic = false;
            }
            catch(UnknownHostException e) {

                Log.e(TAG, "UnknownHostException: " + e.toString());
                mic = false;
            }
            catch(IOException e) {

                Log.e(TAG, "IOException: " + e.toString());
                mic = false;
            }
        }

//音频传输线程

  CaptureDevicesCollection captureDeviceCollection = new CaptureDevicesCollection();

                DeviceInformation deviceInfo = captureDeviceCollection[0];

                capture = new Capture(deviceInfo.DriverGuid);

                short channels = 1; //Stereo.
                short bitsPerSample = 16; //16Bit, alternatively use 8Bits.
                int samplesPerSecond = 8000;//Default: 22050; //11KHz use 11025 , 22KHz use 22050, 44KHz use 44100 etc.

                //Set up the wave format to be captured.
                waveFormat = new WaveFormat();
                waveFormat.Channels = channels;
                waveFormat.FormatTag = WaveFormatTag.Pcm;
                waveFormat.SamplesPerSecond = samplesPerSecond;
                waveFormat.BitsPerSample = bitsPerSample;
                waveFormat.BlockAlign = (short)(channels * (bitsPerSample / (short)8));
                waveFormat.AverageBytesPerSecond = waveFormat.BlockAlign * samplesPerSecond;

                captureBufferDescription = new CaptureBufferDescription();
                captureBufferDescription.BufferBytes = waveFormat.AverageBytesPerSecond / 5; //Approx 200 milliseconds of PCM data.
                captureBufferDescription.Format = waveFormat;

                playbackBufferDescription = new BufferDescription();
                playbackBufferDescription.BufferBytes = waveFormat.AverageBytesPerSecond / 5;
                playbackBufferDescription.Format = waveFormat;
                playbackBuffer = new SecondaryBuffer(playbackBufferDescription, device);

                bufferSize = captureBufferDescription.BufferBytes;

现在我的音频缓冲区大小为1600byte。 另一端没有音频。

我提到this

这是我的c#代码,用于获取320字节的音频流

    try
                {
                    //The following lines get audio from microphone and then send them 
                    //across network.
                    captureBuffer = new CaptureBuffer(captureBufferDescription, capture);
                    CreateNotifyPositions();
                    int blockSize = bufferSize / 10;
                    int halfBuffer = bufferSize / 2;
                    captureBuffer.Start(true);
                    //bool readFirstBufferPart = true;
                    int offset = 0;
                    MemoryStream memStream = new MemoryStream(halfBuffer);
                    bStop = false;
                    uint seq = 0;
                    while (!bStop)
                    {
                        autoResetEvent.WaitOne();
                        this.Invoke(new Action(() =>
                            {
                                listBox1.Items.Add(DateTime.Now.ToString("HH:mm:ss.fff"));
                            }));
                        offset = (int)(((seq + 5) % 10) * blockSize);
                        halfBuffer = blockSize;
                        memStream.Seek(0, SeekOrigin.Begin);
                        captureBuffer.Read(offset, memStream, halfBuffer, LockFlag.None);
                        //readFirstBufferPart = !readFirstBufferPart;
                        //offset = readFirstBufferPart ? 0 : halfBuffer;
                        byte[] dataToWrite = memStream.GetBuffer();
                        var voicePacket = new WiPhonePacket
                        {
                            OpCode = (byte)WiPhonePacket.OpCodes.Call,
                            Operand = (byte)WiPhonePacket.OperandCodes.Talk,
                            SelfState = 0,
                            CallId = 1,
                            WiPhoneId = broadcastIp,
                            ClientId = localIp,
                            CheckSum = Helpers.General.CalculateChecksum(Configurations.Signature.SignatureData, 0, Configurations.Signature.SignatureData.Length),
                            Sequence = seq++
                        };
                        voicePacket.SetVoice(dataToWrite);
                        var voiceBuffer = voicePacket.ToBuffer();
                        IPEndPoint ep = new IPEndPoint(IPAddress.Parse(Configurations.Signature.DeviceIP), 2739);
                        clientSocket.SendTo(voiceBuffer, ep);
                        //udpClient.Send(dataToWrite, dataToWrite.Length, otherPartyIP.Address.ToString(), 2739);
                    }
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.Message, "VoiceChat-Send ()", MessageBoxButtons.OK, MessageBoxIcon.Error);
                }
                finally
                {
                    captureBuffer.Stop();
                    captureBuffer.Dispose();
                    captureBuffer = null;
                    autoResetEvent.Dispose();
                    autoResetEvent = null;
                    notify.Dispose();
                    notify = null;
                    //Increment flag by one.
                    nUdpClientFlag += 1;
                    //When flag is two then it means we have got out of loops in Send and Receive.
                    //while (nUdpClientFlag != 2)
                    //{ }
                    //Clear the flag.
                    nUdpClientFlag = 0;
                    //Close the socket.
                    //udpClient.Close();
                }




private void CreateNotifyPositions()
        {
            try
            {
                autoResetEvent = new AutoResetEvent(false);

                notify = new Notify(captureBuffer);

                BufferPositionNotify[] nots = new BufferPositionNotify[10];
                for (int i = 0; i < 10; i++)
                {
                    var bufferPositionNotify1 = new BufferPositionNotify();
                    bufferPositionNotify1.Offset = (i + 1) * (bufferSize / 10) - 1;
                    bufferPositionNotify1.EventNotifyHandle = autoResetEvent.SafeWaitHandle.DangerousGetHandle();

                    nots[i] = bufferPositionNotify1;
                }

                notify.SetNotificationPositions(nots);

            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message, "VoiceChat-CreateNotifyPositions ()", MessageBoxButtons.OK, MessageBoxIcon.Error);
            }
        }

////获取320字节音频数据的逻辑

fruits[0].quantity

2 个答案:

答案 0 :(得分:0)

感谢您添加源代码的缺失部分。这非常有用,因为它包含最明显的错误:

    if(i==0){
        bytes_read   = audioRecorder.read(buffer, 0, 319);
        System.arraycopy(buffer,0,buff,0,buff.length);
        callProp.setVoice(buff);//320 byte
    }else{
        bytes_read   = audioRecorder.read(buffer, 320, 639);
        System.arraycopy(buffer,320,buff,0,buff.length);
        callProp.setVoice(buff);//320 byte
    }

audioRecorder.read()具有要读取的第三个参数的字节数(而不是要写入的数组的最后一个元素的索引)。所以你应该将代码更改为:

    if(i==0){
        bytes_read   = audioRecorder.read(buffer, 0, 320);
        System.arraycopy(buffer,0,buff,0,buff.length);
        callProp.setVoice(buff);//320 byte
    }else{
        bytes_read   = audioRecorder.read(buffer, 320, 320);
        System.arraycopy(buffer,320,buff,0,buff.length);
        callProp.setVoice(buff);//320 byte
    }

之后声音质量要好得多,但不要期望太多:你的采样率只有8kHz,这意味着最高可能的频率低于4kHz,这与旧的电话质量有关。

答案 1 :(得分:0)

我修正了以下问题

private static final int SAMPLE_INTERVAL = 20; // Milliseconds
private static final int SAMPLE_SIZE = 2; // Bytes
private static final int BUF_SIZE = SAMPLE_INTERVAL * SAMPLE_INTERVAL * SAMPLE_SIZE * 2; 


private static final int RECORDING_RATE = 8000; // Hertz
private static final int CHANNEL = AudioFormat.CHANNEL_IN_MONO;//16
private static final int FORMAT = AudioFormat.ENCODING_PCM_16BIT;//2


audioRecorder = new AudioRecord(MediaRecorder.AudioSource.VOICE_COMMUNICATION,
            RECORDING_RATE, CHANNEL, FORMAT,BUF_SIZE*10);


@Override
            public void run() {
                // Create an instance of the AudioRecord class
                long seq=0;
                int offset=0;
                byte[] buffer = new byte[BUF_SIZE];

                try {
                    // Create a socket and start recording
                    Log.i(TAG, "Packet destination: " + deviceSignature.getDeviceIP()+ " "+BUF_SIZE);
                    audioRecorder.startRecording();
                    while (mic) {
                        // Capture audio from the mic and transmit it

                        WiphoneProp callProp=new WiphoneProp();
                        callProp.setOpCode(WiPhonePacket.OpCodes.Call.getValue());
                        callProp.setOperand(WiPhonePacket.OperandCodes.Talk.getValue());
                        callProp.setSelfState(0);
                        callProp.setCallId(1);

                        callProp.setCheckSum(BitConverter.checkSum(deviceSignature.getSignatureData(),0,deviceSignature.getSignatureData().length));
                        try {
                            callProp.setWiphoneId(ByteBuffer.wrap(getBroadcastQuadIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//broadcast ip or wiphone ip
                            callProp.setClientId(ByteBuffer.wrap(getLocalIp().getAddress()).order(ByteOrder.LITTLE_ENDIAN).getInt());//device ip or local ip
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                        callProp.setSequence(seq);

                        byte[] buff=new byte[320];

                        audioRecorder.read(buffer, offset, buff.length);

                        System.arraycopy(buffer,offset,buff,0,buff.length);

                        callProp.setVoice(buff);//320 byte
                        byte[] voice = callProp.ToBuffer();


                        DatagramPacket packet = new DatagramPacket(voice, voice.length, InetAddress.getByName(deviceSignature.getDeviceIP()), WIPHONEPORT);
                        commonSoc.send(packet);

                        seq++;
                        offset+=320;
                        if(offset>=1600){
                            offset=0;
                        }


                    }
                    // Stop recording and release resources
                    audioRecorder.stop();
                    audioRecorder.release();
                    mic = false;
                }
               /* catch(InterruptedException e) {

                    Log.e(TAG, "InterruptedException: " + e.toString());
                    mic = false;
                }*/
                catch(SocketException e) {

                    Log.e(TAG, "SocketException: " + e.toString());
                    mic = false;
                }
                catch(UnknownHostException e) {

                    Log.e(TAG, "UnknownHostException: " + e.toString());
                    mic = false;
                }
                catch(IOException e) {

                    Log.e(TAG, "IOException: " + e.toString());
                    mic = false;
                }
            }