There isn't too much info about apply this codec when we need to streaming audio. Without apply the codec, my code work like a charm establishing a communication between 2 devices but I need encode/decode in that format because I will need streaming with the server and not between two devices (I am testing this code using 2 devices).
I am looking for the chance if anyone of your could see where is the key of my problem. I've tried different configurations of the input parameters. Maybe, the codecs that I am using are wrong (I took them from one project with Apache license.)
This values are set in the recorder-sender as in the player-receiver device:
private int port=50005;
private int sampleRate = 8000 ;//44100;
private int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
private int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
int minBufSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
Note: CHANNEL_OUT_MONO in the player and CHANNEL_IN_MONO in the recorder item.
And these are my methods:
public void startStreamingEncoding() {
Thread streamThread = new Thread(new Runnable() {
@Override
public void run() {
try {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
DatagramSocket socket = new DatagramSocket();
short[] buffer = new short[minBufSize];
DatagramPacket packet;
final InetAddress destination = InetAddress.getByName(ip_receiver);
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,sampleRate,channelConfig,audioFormat,minBufSize*10);
recorder.startRecording();
/////Encoding:
Encoder encoder = new G711UCodec();
byte[] outBuffer = new byte[minBufSize];
while(status == true) {
//reading data from MIC into buffer
minBufSize = recorder.read(buffer, 0, buffer.length);
//Encoding:
encoder.encode(buffer, minBufSize, outBuffer, 0);
//putting buffer in the packet
packet = new DatagramPacket (outBuffer, outBuffer.length, destination,port);
socket.send(packet);
}
} catch(UnknownHostException e) {
Log.e("VS", "UnknownHostException");
} catch (IOException e) {
e.printStackTrace();
Log.e("VS", "IOException");
}
}
});
streamThread.start();
}
And the method to play and decoding the stream:
public void playerAudioDecoding()
{
Thread thrd = new Thread(new Runnable() {
@Override
public void run()
{
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
sampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, minBufSize,
AudioTrack.MODE_STREAM);
track.play();
Decoder decoder = new G711UCodec();
try
{
DatagramSocket sock = new DatagramSocket(port);
byte[] buf = new byte[minBufSize];
while(true)
{
DatagramPacket pack = new DatagramPacket(buf, minBufSize);
sock.receive(pack);
//Decoding:
int size = pack.getData().length;
short[] shortArray = new short[size];
decoder.decode(shortArray, pack.getData(), minBufSize, 0);
byte[] array = MyShortToByte(shortArray);
track.write(array, 0, array.length);
}
}
catch (SocketException se)
{
Log.e("Error", "SocketException: " + se.toString());
}
catch (IOException ie)
{
Log.e("Error", "IOException" + ie.toString());
}
} // end run
});
thrd.start();
}
And it is the codec class that I am using with Apache license:
public class G711UCodec implements Encoder, Decoder {
// s00000001wxyz...s000wxyz
// s0000001wxyza...s001wxyz
// s000001wxyzab...s010wxyz
// s00001wxyzabc...s011wxyz
// s0001wxyzabcd...s100wxyz
// s001wxyzabcde...s101wxyz
// s01wxyzabcdef...s110wxyz
// s1wxyzabcdefg...s111wxyz
private static byte[] table13to8 = new byte[8192];
private static short[] table8to16 = new short[256];
static {
// b13 --> b8
for (int p = 1, q = 0; p <= 0x80; p <<= 1, q+=0x10) {
for (int i = 0, j = (p << 4) - 0x10; i < 16; i++, j += p) {
int v = (i + q) ^ 0x7F;
byte value1 = (byte) v;
byte value2 = (byte) (v + 128);
for (int m = j, e = j + p; m < e; m++) {
table13to8[m] = value1;
table13to8[8191 - m] = value2;
}
}
}
// b8 --> b16
for (int q = 0; q <= 7; q++) {
for (int i = 0, m = (q << 4); i < 16; i++, m++) {
int v = (((i + 0x10) << q) - 0x10) << 3;
table8to16[m ^ 0x7F] = (short) v;
table8to16[(m ^ 0x7F) + 128] = (short) (65536 - v);
}
}
}
public int decode(short[] b16, byte[] b8, int count, int offset) {
for (int i = 0, j = offset; i < count; i++, j++) {
b16[i] = table8to16[b8[j] & 0xFF];
}
return count;
}
public int encode(short[] b16, int count, byte[] b8, int offset) {
for (int i = 0, j = offset; i < count; i++, j++) {
b8[j] = table13to8[(b16[i] >> 4) & 0x1FFF];
}
return count;
}
public int getSampleCount(int frameSize) {
return frameSize;
}
}
Really, I don't know what it happen; If I change the sampleRate to 4000 I can recognice my voice and some words but there is a lot echo. And i repeat, if disable the encoding/decoding process and make the streaming in PCM, the quality is fantastic. Let see if anybody could help me and thanks in advance.