diff --git a/instrument-core/src/main/java/jomu/instrument/audio/AudioPlayer.java b/instrument-core/src/main/java/jomu/instrument/audio/AudioPlayer.java new file mode 100644 index 00000000..c6bd7b12 --- /dev/null +++ b/instrument-core/src/main/java/jomu/instrument/audio/AudioPlayer.java @@ -0,0 +1,149 @@ +package jomu.instrument.audio; + +import java.util.logging.Logger; + +import javax.sound.sampled.AudioFormat; +import javax.sound.sampled.AudioSystem; +import javax.sound.sampled.DataLine; +import javax.sound.sampled.LineUnavailableException; +import javax.sound.sampled.SourceDataLine; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.io.TarsosDSPAudioFormat; + +/** + * This AudioProcessor can be used to sync events with sound. It uses a pattern + * described in JavaFX Special Effects Taking Java RIA to the Extreme with + * Animation, Multimedia, and Game Element Chapter 9 page 185:
+ * The variable LineWavelet is the Java Sound object that actually makes the + * sound. The + * write method on LineWavelet is interesting because it blocks until it is + * ready for + * more data.
If this AudioProcessor chained with other + * AudioProcessors the others should be able to operate in real time or process + * the signal on a separate thread. + * + * @author Joren Six + */ +public final class AudioPlayer implements AudioProcessor { + private final static Logger LOG = Logger.getLogger(AudioPlayer.class.getName()); + /** + * The LineWavelet to send sound to. Is also used to keep everything in sync. + */ + private SourceDataLine line; + + private final AudioFormat format; + + byte[] lastBuffer = null; + + boolean lastBufferEmpty = true; + + /** + * Creates a new audio player. + * + * @param format + * The AudioFormat of the buffer. + * @throws LineUnavailableException + * If no output LineWavelet is available. + */ + public AudioPlayer(final AudioFormat format) throws LineUnavailableException { + this(format, 1024); + } + + public AudioPlayer(final AudioFormat format, int bufferSize) throws LineUnavailableException { + final DataLine.Info info = new DataLine.Info(SourceDataLine.class, format, bufferSize); + LOG.info("Opening data line" + info.toString()); + this.format = format; + line = (SourceDataLine) AudioSystem.getLine(info); + + line.open(format, bufferSize * 2); + line.start(); + } + + public AudioPlayer(final TarsosDSPAudioFormat format, int bufferSize) throws LineUnavailableException { + this(JVMAudioInputStream.toAudioFormat(format), bufferSize); + } + + public AudioPlayer(final TarsosDSPAudioFormat format) throws LineUnavailableException { + this(JVMAudioInputStream.toAudioFormat(format)); + } + + public long getMicroSecondPosition() { + return line.getMicrosecondPosition(); + } + + @Override + public boolean process(AudioEvent audioEvent) { + boolean thisBufferEmpty = true; + for (byte entry : audioEvent.getByteBuffer()) { + if (entry != 0) { + thisBufferEmpty = false; + break; + } + } + if (lastBuffer != null) { + if (lastBufferEmpty && !thisBufferEmpty) { + for (int i = 0; i < audioEvent.getByteBuffer().length; i++) { + if (audioEvent.getByteBuffer()[i] != 0) { + audioEvent.getByteBuffer()[i] = 0; + } else { + break; + } + } + } else if (!lastBufferEmpty && thisBufferEmpty) { + // for (int i = 0; i < audioEvent.getByteBuffer().length; i++) { + // if (audioEvent.getByteBuffer()[i] != 0) { + // audioEvent.getByteBuffer()[i] = 0; + // } else { + // break; + // } + // } + } + } + byte[] lastBuffer = audioEvent.getByteBuffer(); + int byteOverlap = audioEvent.getOverlap() * format.getFrameSize(); + int byteStepSize = audioEvent.getBufferSize() * format.getFrameSize() - byteOverlap; + LOG.severe(">>>AO 1: " + byteOverlap + ", " + byteStepSize + + ", " + (System.currentTimeMillis() / 1000.0) + + ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed()); + + if (audioEvent.getTimeStamp() == 0) { + byteOverlap = 0; + byteStepSize = audioEvent.getBufferSize() * format.getFrameSize(); + LOG.severe(">>>AO 2: " + byteOverlap + ", " + byteStepSize + + ", " + (System.currentTimeMillis() / 1000.0) + + ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed()); + } + // overlap in samples * nr of bytes / sample = bytes overlap + + /* + * if(byteStepSize < line.available()){ + * System.out.println(line.available() + " Will not block " + + * line.getMicrosecondPosition()); + * }else { + * System.out.println("Will block " + line.getMicrosecondPosition()); + * } + */ + + int bytesWritten = line.write(audioEvent.getByteBuffer(), byteOverlap, byteStepSize); + if (bytesWritten != byteStepSize) { + System.err.println( + String.format("Expected to write %d bytes but only wrote %d bytes", byteStepSize, bytesWritten)); + } + return true; + } + + /* + * (non-Javadoc) + * + * @see be.tarsos.util.RealTimeAudioProcessor.AudioProcessor# + * processingFinished() + */ + public void processingFinished() { + // cleanup + line.drain();// drain takes too long.. + line.stop(); + line.close(); + } +} diff --git a/instrument-core/src/main/java/jomu/instrument/audio/JVMAudioInputStream.java b/instrument-core/src/main/java/jomu/instrument/audio/JVMAudioInputStream.java new file mode 100644 index 00000000..6d80bd54 --- /dev/null +++ b/instrument-core/src/main/java/jomu/instrument/audio/JVMAudioInputStream.java @@ -0,0 +1,83 @@ +package jomu.instrument.audio; + +import java.io.IOException; + +import javax.sound.sampled.AudioFormat; +import javax.sound.sampled.AudioFormat.Encoding; +import javax.sound.sampled.AudioInputStream; + +import be.tarsos.dsp.io.TarsosDSPAudioFormat; +import be.tarsos.dsp.io.TarsosDSPAudioInputStream; + +/** + * Encapsulates an {@link AudioInputStream} to make it work with the core + * TarsosDSP library. + * + * @author Joren Six + * + */ +public class JVMAudioInputStream implements TarsosDSPAudioInputStream { + + private final AudioInputStream underlyingStream; + private final TarsosDSPAudioFormat tarsosDSPAudioFormat; + + public JVMAudioInputStream(AudioInputStream stream) { + this.underlyingStream = stream; + this.tarsosDSPAudioFormat = JVMAudioInputStream.toTarsosDSPFormat(stream.getFormat()); + } + + @Override + public long skip(long bytesToSkip) throws IOException { + return underlyingStream.skip(bytesToSkip); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return underlyingStream.read(b, off, len); + } + + @Override + public void close() throws IOException { + underlyingStream.close(); + } + + @Override + public long getFrameLength() { + + return underlyingStream.getFrameLength(); + } + + @Override + public TarsosDSPAudioFormat getFormat() { + return tarsosDSPAudioFormat; + } + + /** + * Converts a {@link AudioFormat} to a {@link TarsosDSPAudioFormat}. + * + * @param format + * The {@link AudioFormat} + * @return A {@link TarsosDSPAudioFormat} + */ + public static TarsosDSPAudioFormat toTarsosDSPFormat(AudioFormat format) { + boolean isSigned = format.getEncoding() == Encoding.PCM_SIGNED; + TarsosDSPAudioFormat tarsosDSPFormat = new TarsosDSPAudioFormat(format.getSampleRate(), + format.getSampleSizeInBits(), format.getChannels(), isSigned, format.isBigEndian()); + return tarsosDSPFormat; + } + + /** + * Converts a {@link TarsosDSPAudioFormat} to a {@link AudioFormat}. + * + * @param format + * The {@link TarsosDSPAudioFormat} + * @return A {@link AudioFormat} + */ + public static AudioFormat toAudioFormat(TarsosDSPAudioFormat format) { + boolean isSigned = format.getEncoding() == TarsosDSPAudioFormat.Encoding.PCM_SIGNED; + AudioFormat audioFormat = new AudioFormat(format.getSampleRate(), format.getSampleSizeInBits(), + format.getChannels(), isSigned, format.isBigEndian()); + return audioFormat; + } + +} diff --git a/instrument-core/src/main/java/jomu/instrument/audio/ResynthAudioSynthesizer.java b/instrument-core/src/main/java/jomu/instrument/audio/ResynthAudioSynthesizer.java index a3642e12..904e541a 100644 --- a/instrument-core/src/main/java/jomu/instrument/audio/ResynthAudioSynthesizer.java +++ b/instrument-core/src/main/java/jomu/instrument/audio/ResynthAudioSynthesizer.java @@ -7,7 +7,6 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import javax.sound.sampled.AudioFormat; @@ -17,7 +16,6 @@ import be.tarsos.dsp.AudioEvent; import be.tarsos.dsp.AudioProcessor; -import be.tarsos.dsp.io.jvm.AudioPlayer; import jomu.instrument.Instrument; import jomu.instrument.audio.features.AudioFeatureFrame; import jomu.instrument.audio.features.AudioFeatureProcessor; @@ -44,6 +42,25 @@ public class ResynthProcessor implements AudioProcessor { @Override public boolean process(AudioEvent audioEvent) { audioEvent.setFloatBuffer(resynthInfo.getSourceBuffer()); + + float smin = 0; + float smax = 0; + for (int i = 0; i < audioEvent.getFloatBuffer().length; i++) { + float sample = audioEvent.getFloatBuffer()[i]; + if (sample < smin) { + smin = sample; + } + + if (sample > smax) { + smax = sample; + } + + } + + LOG.severe(">>>RP after: " + (System.currentTimeMillis() / 1000.0) + + ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed() + + ", min: " + smin + + ", max: " + smax + ", len: " + audioEvent.getFloatBuffer().length); return true; } @@ -149,7 +166,7 @@ public void playFrameSequence(ToneTimeFrame toneTimeFrame, String streamId, int AudioFeatureFrame aff = afp.getAudioFeatureFrame(sequence); TreeMap features = aff.getResynthFeatures() .getFeatures(); - + AudioQueueMessage audioQueueMessage = new AudioQueueMessage(toneTimeFrame, features, sequence); audioStream.bq.add(audioQueueMessage); @@ -161,9 +178,8 @@ private class AudioQueueConsumer implements Runnable { private AudioStream audioStream; private BlockingQueue bq; - double sampleTime = -1; - int counter = 0; boolean running = true; + private double lastTime; public AudioQueueConsumer(BlockingQueue bq, AudioStream audioStream) { this.bq = bq; @@ -184,7 +200,6 @@ public void run() { } AudioQueueMessage aqm = bq.take(); - counter++; ToneTimeFrame toneTimeFrame = aqm.toneTimeFrame; @@ -193,17 +208,11 @@ public void run() { break; } - if (sampleTime != -1) { - TimeUnit.MILLISECONDS.sleep((long) (sampleTime * 1000)); - } - if (audioStream.isClosed()) { running = false; break; } - double time = toneTimeFrame.getStartTime(); - for (Entry entry : aqm.features.entrySet()) { audioStream.getResynthProcessor() .setResynthInfo(entry.getKey(), entry.getValue()); diff --git a/instrument-core/src/main/java/jomu/instrument/audio/TarsosAudioSynthesizer.java b/instrument-core/src/main/java/jomu/instrument/audio/TarsosAudioSynthesizer.java index ca492530..d353ab02 100644 --- a/instrument-core/src/main/java/jomu/instrument/audio/TarsosAudioSynthesizer.java +++ b/instrument-core/src/main/java/jomu/instrument/audio/TarsosAudioSynthesizer.java @@ -339,11 +339,6 @@ public void run() { } double time = toneTimeFrame.getStartTime(); - if (lastTime > 0) { - TimeUnit.MILLISECONDS.sleep((long) (time - lastTime) * 1000); - } - lastTime = time; - // System.out.println(">>TIME: " + time); if (frameHistory.size() >= MAP_MAX_COUNT) { frameHistory.removeLast(); diff --git a/instrument-core/src/main/java/jomu/instrument/audio/features/ResynthSource.java b/instrument-core/src/main/java/jomu/instrument/audio/features/ResynthSource.java index 9e9dbe72..724d927a 100644 --- a/instrument-core/src/main/java/jomu/instrument/audio/features/ResynthSource.java +++ b/instrument-core/src/main/java/jomu/instrument/audio/features/ResynthSource.java @@ -47,7 +47,6 @@ public class ResynthSource extends AudioEventSource implements Pitc private double phaseFirst = 0; private double phaseSecond = 0; private double prevFrequency = 0; - private float samplerate; private final EnvelopeFollower envelopeFollower; private boolean usePureSine; private boolean followEnvelope; @@ -64,9 +63,9 @@ public ResynthSource(AudioDispatcher dispatcher) { .getController() .getParameterManager(); this.windowSize = parameterManager.getIntParameter(InstrumentParameterNames.PERCEPTION_HEARING_DEFAULT_WINDOW); - envelopeFollower = new EnvelopeFollower(samplerate, 0.005, 0.01); + envelopeFollower = new EnvelopeFollower(sampleRate, 0.005, 0.01); this.followEnvelope = false; - this.usePureSine = false; + this.usePureSine = true; previousFrequencies = new double[5]; previousFrequencyIndex = 0; } @@ -172,6 +171,11 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au prevFrequency = frequency; } + if (frequency <= 0) { + // audioEvent.clearFloatBuffer(); + envelopeAudioBuffer = audioEvent.getFloatBuffer().clone(); + return; + } final double twoPiF = 2 * Math.PI * frequency; float[] audioBuffer = audioEvent.getFloatBuffer(); @@ -181,7 +185,7 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au } for (int sample = 0; sample < audioBuffer.length; sample++) { - double time = sample / samplerate; + double time = sample / sampleRate; double wave = Math.sin(twoPiF * time + phase); if (!usePureSine) { wave += 0.05 * Math.sin(twoPiF * 4 * time + phaseFirst); @@ -191,14 +195,16 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au if (followEnvelope) { audioBuffer[sample] = audioBuffer[sample] * envelopeAudioBuffer[sample]; } + } - double timefactor = twoPiF * audioBuffer.length / samplerate; + double timefactor = twoPiF * audioBuffer.length / sampleRate; phase = timefactor + phase; if (!usePureSine) { phaseFirst = 4 * timefactor + phaseFirst; phaseSecond = 8 * timefactor + phaseSecond; } + } @Override diff --git a/instrument-desktop/src/main/resources/instrument-client.properties b/instrument-desktop/src/main/resources/instrument-client.properties index 7f817b07..91357e4d 100644 --- a/instrument-desktop/src/main/resources/instrument-client.properties +++ b/instrument-desktop/src/main/resources/instrument-client.properties @@ -9,3 +9,4 @@ perception.hearing.audioSourceDirectory=C:\\dev\\projects\\instrument\\instrumen perception.hearing.tonemap.persistenceMode=2 # actuation.voice.midiSoundFonts=FluidR3_GM.sf2 storage.objectStore.basePath=user.home +actuation.voice.midiDevicePlaySwitch=true