Skip to content

Commit

Permalink
resynth2
Browse files Browse the repository at this point in the history
  • Loading branch information
jimomulloy committed Aug 14, 2024
1 parent 9d71aa4 commit d7a47e2
Show file tree
Hide file tree
Showing 6 changed files with 265 additions and 22 deletions.
149 changes: 149 additions & 0 deletions instrument-core/src/main/java/jomu/instrument/audio/AudioPlayer.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
package jomu.instrument.audio;

import java.util.logging.Logger;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.LineUnavailableException;
import javax.sound.sampled.SourceDataLine;

import be.tarsos.dsp.AudioEvent;
import be.tarsos.dsp.AudioProcessor;
import be.tarsos.dsp.io.TarsosDSPAudioFormat;

/**
* This AudioProcessor can be used to sync events with sound. It uses a pattern
* described in JavaFX Special Effects Taking Java RIA to the Extreme with
* Animation, Multimedia, and Game Element Chapter 9 page 185: <blockquote><i>
* The variable LineWavelet is the Java Sound object that actually makes the
* sound. The
* write method on LineWavelet is interesting because it blocks until it is
* ready for
* more data. </i></blockquote> If this AudioProcessor chained with other
* AudioProcessors the others should be able to operate in real time or process
* the signal on a separate thread.
*
* @author Joren Six
*/
public final class AudioPlayer implements AudioProcessor {
private final static Logger LOG = Logger.getLogger(AudioPlayer.class.getName());
/**
* The LineWavelet to send sound to. Is also used to keep everything in sync.
*/
private SourceDataLine line;

private final AudioFormat format;

byte[] lastBuffer = null;

boolean lastBufferEmpty = true;

/**
* Creates a new audio player.
*
* @param format
* The AudioFormat of the buffer.
* @throws LineUnavailableException
* If no output LineWavelet is available.
*/
public AudioPlayer(final AudioFormat format) throws LineUnavailableException {
this(format, 1024);
}

public AudioPlayer(final AudioFormat format, int bufferSize) throws LineUnavailableException {
final DataLine.Info info = new DataLine.Info(SourceDataLine.class, format, bufferSize);
LOG.info("Opening data line" + info.toString());
this.format = format;
line = (SourceDataLine) AudioSystem.getLine(info);

line.open(format, bufferSize * 2);
line.start();
}

public AudioPlayer(final TarsosDSPAudioFormat format, int bufferSize) throws LineUnavailableException {
this(JVMAudioInputStream.toAudioFormat(format), bufferSize);
}

public AudioPlayer(final TarsosDSPAudioFormat format) throws LineUnavailableException {
this(JVMAudioInputStream.toAudioFormat(format));
}

public long getMicroSecondPosition() {
return line.getMicrosecondPosition();
}

@Override
public boolean process(AudioEvent audioEvent) {
boolean thisBufferEmpty = true;
for (byte entry : audioEvent.getByteBuffer()) {
if (entry != 0) {
thisBufferEmpty = false;
break;
}
}
if (lastBuffer != null) {
if (lastBufferEmpty && !thisBufferEmpty) {
for (int i = 0; i < audioEvent.getByteBuffer().length; i++) {
if (audioEvent.getByteBuffer()[i] != 0) {
audioEvent.getByteBuffer()[i] = 0;
} else {
break;
}
}
} else if (!lastBufferEmpty && thisBufferEmpty) {
// for (int i = 0; i < audioEvent.getByteBuffer().length; i++) {
// if (audioEvent.getByteBuffer()[i] != 0) {
// audioEvent.getByteBuffer()[i] = 0;
// } else {
// break;
// }
// }
}
}
byte[] lastBuffer = audioEvent.getByteBuffer();
int byteOverlap = audioEvent.getOverlap() * format.getFrameSize();
int byteStepSize = audioEvent.getBufferSize() * format.getFrameSize() - byteOverlap;
LOG.severe(">>>AO 1: " + byteOverlap + ", " + byteStepSize
+ ", " + (System.currentTimeMillis() / 1000.0)
+ ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed());

if (audioEvent.getTimeStamp() == 0) {
byteOverlap = 0;
byteStepSize = audioEvent.getBufferSize() * format.getFrameSize();
LOG.severe(">>>AO 2: " + byteOverlap + ", " + byteStepSize
+ ", " + (System.currentTimeMillis() / 1000.0)
+ ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed());
}
// overlap in samples * nr of bytes / sample = bytes overlap

/*
* if(byteStepSize < line.available()){
* System.out.println(line.available() + " Will not block " +
* line.getMicrosecondPosition());
* }else {
* System.out.println("Will block " + line.getMicrosecondPosition());
* }
*/

int bytesWritten = line.write(audioEvent.getByteBuffer(), byteOverlap, byteStepSize);
if (bytesWritten != byteStepSize) {
System.err.println(
String.format("Expected to write %d bytes but only wrote %d bytes", byteStepSize, bytesWritten));
}
return true;
}

/*
* (non-Javadoc)
*
* @see be.tarsos.util.RealTimeAudioProcessor.AudioProcessor#
* processingFinished()
*/
public void processingFinished() {
// cleanup
line.drain();// drain takes too long..
line.stop();
line.close();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
package jomu.instrument.audio;

import java.io.IOException;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioFormat.Encoding;
import javax.sound.sampled.AudioInputStream;

import be.tarsos.dsp.io.TarsosDSPAudioFormat;
import be.tarsos.dsp.io.TarsosDSPAudioInputStream;

/**
* Encapsulates an {@link AudioInputStream} to make it work with the core
* TarsosDSP library.
*
* @author Joren Six
*
*/
public class JVMAudioInputStream implements TarsosDSPAudioInputStream {

private final AudioInputStream underlyingStream;
private final TarsosDSPAudioFormat tarsosDSPAudioFormat;

public JVMAudioInputStream(AudioInputStream stream) {
this.underlyingStream = stream;
this.tarsosDSPAudioFormat = JVMAudioInputStream.toTarsosDSPFormat(stream.getFormat());
}

@Override
public long skip(long bytesToSkip) throws IOException {
return underlyingStream.skip(bytesToSkip);
}

@Override
public int read(byte[] b, int off, int len) throws IOException {
return underlyingStream.read(b, off, len);
}

@Override
public void close() throws IOException {
underlyingStream.close();
}

@Override
public long getFrameLength() {

return underlyingStream.getFrameLength();
}

@Override
public TarsosDSPAudioFormat getFormat() {
return tarsosDSPAudioFormat;
}

/**
* Converts a {@link AudioFormat} to a {@link TarsosDSPAudioFormat}.
*
* @param format
* The {@link AudioFormat}
* @return A {@link TarsosDSPAudioFormat}
*/
public static TarsosDSPAudioFormat toTarsosDSPFormat(AudioFormat format) {
boolean isSigned = format.getEncoding() == Encoding.PCM_SIGNED;
TarsosDSPAudioFormat tarsosDSPFormat = new TarsosDSPAudioFormat(format.getSampleRate(),
format.getSampleSizeInBits(), format.getChannels(), isSigned, format.isBigEndian());
return tarsosDSPFormat;
}

/**
* Converts a {@link TarsosDSPAudioFormat} to a {@link AudioFormat}.
*
* @param format
* The {@link TarsosDSPAudioFormat}
* @return A {@link AudioFormat}
*/
public static AudioFormat toAudioFormat(TarsosDSPAudioFormat format) {
boolean isSigned = format.getEncoding() == TarsosDSPAudioFormat.Encoding.PCM_SIGNED;
AudioFormat audioFormat = new AudioFormat(format.getSampleRate(), format.getSampleSizeInBits(),
format.getChannels(), isSigned, format.isBigEndian());
return audioFormat;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;

import javax.sound.sampled.AudioFormat;
Expand All @@ -17,7 +16,6 @@

import be.tarsos.dsp.AudioEvent;
import be.tarsos.dsp.AudioProcessor;
import be.tarsos.dsp.io.jvm.AudioPlayer;
import jomu.instrument.Instrument;
import jomu.instrument.audio.features.AudioFeatureFrame;
import jomu.instrument.audio.features.AudioFeatureProcessor;
Expand All @@ -44,6 +42,25 @@ public class ResynthProcessor implements AudioProcessor {
@Override
public boolean process(AudioEvent audioEvent) {
audioEvent.setFloatBuffer(resynthInfo.getSourceBuffer());

float smin = 0;
float smax = 0;
for (int i = 0; i < audioEvent.getFloatBuffer().length; i++) {
float sample = audioEvent.getFloatBuffer()[i];
if (sample < smin) {
smin = sample;
}

if (sample > smax) {
smax = sample;
}

}

LOG.severe(">>>RP after: " + (System.currentTimeMillis() / 1000.0)
+ ", " + audioEvent.getTimeStamp() + ", " + audioEvent.getSamplesProcessed()
+ ", min: " + smin +
", max: " + smax + ", len: " + audioEvent.getFloatBuffer().length);
return true;
}

Expand Down Expand Up @@ -149,7 +166,7 @@ public void playFrameSequence(ToneTimeFrame toneTimeFrame, String streamId, int
AudioFeatureFrame aff = afp.getAudioFeatureFrame(sequence);
TreeMap<Double, ResynthInfo> features = aff.getResynthFeatures()
.getFeatures();

AudioQueueMessage audioQueueMessage = new AudioQueueMessage(toneTimeFrame, features, sequence);

audioStream.bq.add(audioQueueMessage);
Expand All @@ -161,9 +178,8 @@ private class AudioQueueConsumer implements Runnable {

private AudioStream audioStream;
private BlockingQueue<AudioQueueMessage> bq;
double sampleTime = -1;
int counter = 0;
boolean running = true;
private double lastTime;

public AudioQueueConsumer(BlockingQueue<AudioQueueMessage> bq, AudioStream audioStream) {
this.bq = bq;
Expand All @@ -184,7 +200,6 @@ public void run() {
}

AudioQueueMessage aqm = bq.take();
counter++;

ToneTimeFrame toneTimeFrame = aqm.toneTimeFrame;

Expand All @@ -193,17 +208,11 @@ public void run() {
break;
}

if (sampleTime != -1) {
TimeUnit.MILLISECONDS.sleep((long) (sampleTime * 1000));
}

if (audioStream.isClosed()) {
running = false;
break;
}

double time = toneTimeFrame.getStartTime();

for (Entry<Double, ResynthInfo> entry : aqm.features.entrySet()) {
audioStream.getResynthProcessor()
.setResynthInfo(entry.getKey(), entry.getValue());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,11 +339,6 @@ public void run() {
}

double time = toneTimeFrame.getStartTime();
if (lastTime > 0) {
TimeUnit.MILLISECONDS.sleep((long) (time - lastTime) * 1000);
}
lastTime = time;
// System.out.println(">>TIME: " + time);

if (frameHistory.size() >= MAP_MAX_COUNT) {
frameHistory.removeLast();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ public class ResynthSource extends AudioEventSource<ResynthInfo> implements Pitc
private double phaseFirst = 0;
private double phaseSecond = 0;
private double prevFrequency = 0;
private float samplerate;
private final EnvelopeFollower envelopeFollower;
private boolean usePureSine;
private boolean followEnvelope;
Expand All @@ -64,9 +63,9 @@ public ResynthSource(AudioDispatcher dispatcher) {
.getController()
.getParameterManager();
this.windowSize = parameterManager.getIntParameter(InstrumentParameterNames.PERCEPTION_HEARING_DEFAULT_WINDOW);
envelopeFollower = new EnvelopeFollower(samplerate, 0.005, 0.01);
envelopeFollower = new EnvelopeFollower(sampleRate, 0.005, 0.01);
this.followEnvelope = false;
this.usePureSine = false;
this.usePureSine = true;
previousFrequencies = new double[5];
previousFrequencyIndex = 0;
}
Expand Down Expand Up @@ -172,6 +171,11 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au

prevFrequency = frequency;
}
if (frequency <= 0) {
// audioEvent.clearFloatBuffer();
envelopeAudioBuffer = audioEvent.getFloatBuffer().clone();
return;
}

final double twoPiF = 2 * Math.PI * frequency;
float[] audioBuffer = audioEvent.getFloatBuffer();
Expand All @@ -181,7 +185,7 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au
}

for (int sample = 0; sample < audioBuffer.length; sample++) {
double time = sample / samplerate;
double time = sample / sampleRate;
double wave = Math.sin(twoPiF * time + phase);
if (!usePureSine) {
wave += 0.05 * Math.sin(twoPiF * 4 * time + phaseFirst);
Expand All @@ -191,14 +195,16 @@ public void handlePitch(PitchDetectionResult pitchDetectionResult, AudioEvent au
if (followEnvelope) {
audioBuffer[sample] = audioBuffer[sample] * envelopeAudioBuffer[sample];
}

}

double timefactor = twoPiF * audioBuffer.length / samplerate;
double timefactor = twoPiF * audioBuffer.length / sampleRate;
phase = timefactor + phase;
if (!usePureSine) {
phaseFirst = 4 * timefactor + phaseFirst;
phaseSecond = 8 * timefactor + phaseSecond;
}

}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@ perception.hearing.audioSourceDirectory=C:\\dev\\projects\\instrument\\instrumen
perception.hearing.tonemap.persistenceMode=2
# actuation.voice.midiSoundFonts=FluidR3_GM.sf2
storage.objectStore.basePath=user.home
actuation.voice.midiDevicePlaySwitch=true

0 comments on commit d7a47e2

Please sign in to comment.