Sketcher2 source code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

310 lines
11 KiB

package com.jotuntech.sketcher.client.voice;
import java.io.IOException;
import java.net.DatagramSocket;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.channels.DatagramChannel;
import java.util.HashMap;
import java.util.Map;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.TargetDataLine;
import org.xiph.speex.SpeexEncoder;
public class VoiceClient extends Thread {
private static final int bufferSize = VoiceChannel.SPEEX_FRAME_BYTES * 4;
private DatagramChannel channel;
private VoiceMixer voiceMixer;
private Map<Integer, VoiceChannel> voiceChannels;
private Integer peerKey;
private AudioFormat targetFormat;
private TargetDataLine target;
private SpeexEncoder encoder;
private ByteBuffer recordBuffer;
private InetSocketAddress serverAddress;
private int packetNumber;
private boolean bigEndian = false;
private float gain, gain2;
private VoiceListener listener;
private Highpass24 highpass;
private VoiceDetector detector;
public VoiceClient(InetSocketAddress serverAddress, Integer peerKey) {
super("VoiceClient");
setPriority(Thread.MAX_PRIORITY);
voiceChannels = new HashMap<Integer, VoiceChannel>();
this.serverAddress = serverAddress;
this.peerKey = peerKey;
packetNumber = 0;
bigEndian = false;
targetFormat = new AudioFormat(16000, 16, 1, true, false);
gain = 1f;
gain2 = 1f;
}
public void run() {
try {
channel = DatagramChannel.open();
channel.configureBlocking(true);
DatagramSocket socket = channel.socket();
socket.bind(new InetSocketAddress(0));
System.err.println("Bound to address " + socket.getLocalAddress().toString() + ":" + socket.getLocalPort());
channel.configureBlocking(false);
voiceMixer = new VoiceMixer(VoiceChannel.SPEEX_FRAME_BYTES * 16);
System.err.println("Voice client ready.");
while(!interrupted()) {
sleep(10);
if(target == null) {
try {
target = AudioSystem.getTargetDataLine(targetFormat);
} catch(IllegalArgumentException e) {
System.err.println("Mono 16-bit 16000 Hz little-endian target data line not supported.");
System.err.println("Java exception:");
e.printStackTrace();
System.err.println("Attempting to open big-endian target data line instead.");
targetFormat = new AudioFormat(16000, 16, 1, true, true);
try {
target = AudioSystem.getTargetDataLine(targetFormat);
bigEndian = true;
} catch(IllegalArgumentException e2) {
System.err.println("Big-endian target data line not supported either.");
System.err.println("Java exception:");
e.printStackTrace();
System.err.println("Continuing in playback-only mode!");
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_WARN, "The sound device does not support the required format."));
}
}
}
if(target != null) {
target.open(targetFormat, VoiceChannel.SPEEX_FRAME_BYTES * 4);
target.start();
System.err.println("Target data line successfully opened and started.");
}
}
if(target != null) {
if(recordBuffer == null) {
recordBuffer = ByteBuffer.allocate(bufferSize);
if(bigEndian) {
recordBuffer.order(ByteOrder.BIG_ENDIAN);
System.err.println("Allocated " + bufferSize + " byte big-endian recording buffer.");
} else {
recordBuffer.order(ByteOrder.LITTLE_ENDIAN);
System.err.println("Allocated " + bufferSize + " byte little-endian recording buffer.");
}
highpass = new Highpass24();
detector = new VoiceDetector(16000f, 0.1f);
}
recordBuffer.clear();
while(target.available() >= VoiceChannel.SPEEX_FRAME_BYTES && recordBuffer.remaining() >= VoiceChannel.SPEEX_FRAME_BYTES) {
int bytesRead = target.read(recordBuffer.array(), recordBuffer.arrayOffset() + recordBuffer.position(), VoiceChannel.SPEEX_FRAME_BYTES);
recordBuffer.position(recordBuffer.position() + bytesRead);
}
recordBuffer.flip();
if(recordBuffer.remaining() >= VoiceChannel.SPEEX_FRAME_BYTES) {
if(bigEndian) {
for(int i = recordBuffer.position(); i < recordBuffer.limit(); i += 2) {
byte a = recordBuffer.get(i);
byte b = recordBuffer.get(i + 1);
recordBuffer.put(i, b);
recordBuffer.put(i + 1, a);
}
}
float minimumChange = 0.01f;
//float avgChange = 0f;
//float numSamples = recordBuffer.remaining() / 2f;
int peakSample = 0;
for(int i = recordBuffer.position(); i < recordBuffer.limit(); i += 2) {
int rawSample = recordBuffer.getShort(i);
int absSample = Math.abs(rawSample);
if(absSample > peakSample) {
peakSample = absSample;
}
/** Get sample and perform 100 Hz highpass */
float sample = highpass.process(rawSample / 32767f, 0.0125f);
/** Detect voice activity */
float change = detector.process(sample);
//avgChange += change;
/** Apply automatic gain */
float gainSample = sample * gain;
/** Absolute sample with gain applied */
float absGainSample = Math.abs(gainSample);
if(gain > 1f && absGainSample >= 1f) {
/** Brickwall limit output to 0dB */
gain /= absGainSample;
gainSample = sample * gain;
} else if(gain > 1f && absGainSample > 1f) {
/** 2 second release */
gain -= gain * 0.00003125f;
} else if(gain < 8f && absGainSample > 0 && absGainSample < 1f) {
/** 2 second attack */
gain += gain * 0.00003125f;
}
if(gain2 < 1f && change > minimumChange) {
/** 1/16th second attack */
gain2 += (1f - gain2) * 0.001f;
} else if(gain2 > 0.03125f && change < minimumChange) {
/** 1 second release */
gain2 -= gain2 * 0.000125f;
}
/** Apply voice activity gain */
gainSample *= gain2;
/** Clip sample */
if(gainSample < -1f) {
gainSample = -1f;
} else if(gainSample > 1f) {
gainSample = 1f;
}
recordBuffer.putShort(i, (short) (gainSample * 32767f));
}
//avgChange /= numSamples;
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_PACKET_VOLUME, peerKey, peakSample));
}
//System.err.println("Gain = " + gain + ", Gain2 = " + gain2 + ", <EFBFBD>Change = " + (long) (avgChange * 1000000f));
while(recordBuffer.remaining() >= VoiceChannel.SPEEX_FRAME_BYTES) {
if(encoder == null) {
/** Start encoder */
encoder = new SpeexEncoder();
encoder.init(1, 4, 16000, 1);
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_START, null, target.getBufferSize()));
}
System.err.println("Speex encoder initialized and started.");
}
if(encoder.processData(recordBuffer.array(), recordBuffer.arrayOffset() + recordBuffer.position(), VoiceChannel.SPEEX_FRAME_BYTES)) {
recordBuffer.position(recordBuffer.position() + VoiceChannel.SPEEX_FRAME_BYTES);
int bytesProcessed = encoder.getProcessedDataByteSize();
ByteBuffer packetBuffer = ByteBuffer.allocate(bytesProcessed + 8);
packetBuffer.putInt(peerKey);
packetBuffer.putInt(packetNumber);
encoder.getProcessedData(packetBuffer.array(), packetBuffer.arrayOffset() + packetBuffer.position());
packetBuffer.position(packetBuffer.position() + bytesProcessed);
packetBuffer.flip();
int bytesSent = channel.send(packetBuffer, serverAddress);
if(bytesSent > 0) {
++packetNumber;
}
} else {
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_ERROR, "An unknown error occurred while encoding a voice packet."));
}
System.err.println("An unknown error occurred while encoding a voice packet.");
return;
}
}
}
} /** End of recording specific section */
ByteBuffer packetBuffer = ByteBuffer.allocate(VoiceChannel.SPEEX_FRAME_BYTES + 8);
for(SocketAddress address = channel.receive(packetBuffer); address != null; address = channel.receive(packetBuffer)) {
packetBuffer.flip();
if(packetBuffer.remaining() > 4) {
Integer voiceChannelKey = packetBuffer.getInt();
VoiceChannel voiceChannel = voiceChannels.get(voiceChannelKey);
if(voiceChannel == null) {
voiceChannel = new VoiceChannel(voiceMixer, voiceChannelKey);
voiceChannels.put(voiceChannelKey, voiceChannel);
System.err.println("Creating new voice channel #" + voiceChannelKey);
if(listener != null) {
voiceChannel.setListener(listener);
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_CHANNEL_NEW, voiceChannelKey));
}
}
voiceChannel.packet(packetBuffer);
}
packetBuffer = ByteBuffer.allocate(VoiceChannel.SPEEX_FRAME_BYTES + 8);
}
for(VoiceChannel voiceChannel : voiceChannels.values()) {
voiceChannel.process();
}
/** Remove dead voice channels, one at a time, to avoid ConcurrentModificationException */
for(Map.Entry<Integer, VoiceChannel> e : voiceChannels.entrySet()) {
if(e.getValue().isDead()) {
System.err.println("Voice channel #" + e.getKey() + " is dead and was dropped.");
voiceMixer.drop(e.getKey());
voiceChannels.remove(e.getKey());
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_CHANNEL_DEAD, e.getKey()));
}
break;
}
}
if(!voiceMixer.mix()) {
break;
}
}
} catch (IOException e) {
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_ERROR, e.getMessage()));
}
} catch (InterruptedException e) {
} catch (LineUnavailableException e) {
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_ERROR, "The sound device is busy."));
}
}
voiceMixer.close();
if(target != null) {
target.stop();
target.close();
}
try { channel.close(); } catch(IOException e) { }
if(listener != null) {
listener.voiceEvent(new VoiceEvent(VoiceEvent.TYPE_STOP));
}
System.err.println("Voice client terminated.");
}
public void setListener(VoiceListener listener) {
this.listener = listener;
for(VoiceChannel channel : voiceChannels.values()) {
channel.setListener(listener);
}
}
}