Added single column
This commit is contained in:
parent
06600b8341
commit
e239610909
@ -99,14 +99,14 @@ impl AudioData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get underlying chunk for post-record processing
|
/// Get underlying chunk for post-record processing
|
||||||
pub fn get_chunk_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> {
|
pub fn get_chunk_for_processing(&self) -> Result<(Arc<AudioChunk>, usize)> {
|
||||||
match self {
|
match self {
|
||||||
Self::Unconsolidated {
|
Self::Unconsolidated {
|
||||||
chunks,
|
chunks,
|
||||||
sync_offset,
|
sync_offset,
|
||||||
..
|
..
|
||||||
} => Some((chunks.clone(), *sync_offset)),
|
} => Ok((chunks.clone(), *sync_offset)),
|
||||||
_ => None,
|
_ => Err(LooperError::ChunkOwnership(std::panic::Location::caller())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,6 +131,24 @@ impl AudioData {
|
|||||||
*self = Self::Empty;
|
*self = Self::Empty;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Append silence frames to audio data (RT-safe, uses fixed-size buffer and loops)
|
||||||
|
pub fn append_silence<F: ChunkFactory>(
|
||||||
|
&mut self,
|
||||||
|
frame_count: usize,
|
||||||
|
chunk_factory: &mut F,
|
||||||
|
) -> Result<()> {
|
||||||
|
const SILENCE_BUFFER_SIZE: usize = 1024;
|
||||||
|
let silence_buffer = [0.0; SILENCE_BUFFER_SIZE];
|
||||||
|
|
||||||
|
let mut remaining_frames = frame_count;
|
||||||
|
while remaining_frames > 0 {
|
||||||
|
let frames_this_iteration = remaining_frames.min(SILENCE_BUFFER_SIZE);
|
||||||
|
self.append_samples(&silence_buffer[..frames_this_iteration], chunk_factory)?;
|
||||||
|
remaining_frames -= frames_this_iteration;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AudioData {
|
impl AudioData {
|
||||||
@ -408,7 +426,7 @@ mod tests {
|
|||||||
audio_data.append_samples(&samples, &mut factory).unwrap();
|
audio_data.append_samples(&samples, &mut factory).unwrap();
|
||||||
|
|
||||||
let result = audio_data.get_chunk_for_processing();
|
let result = audio_data.get_chunk_for_processing();
|
||||||
assert!(result.is_some());
|
assert!(result.is_ok());
|
||||||
|
|
||||||
let (chunk, offset) = result.unwrap();
|
let (chunk, offset) = result.unwrap();
|
||||||
assert_eq!(offset, 42);
|
assert_eq!(offset, 42);
|
||||||
@ -419,7 +437,7 @@ mod tests {
|
|||||||
fn test_get_chunk_for_processing_wrong_state() {
|
fn test_get_chunk_for_processing_wrong_state() {
|
||||||
let audio_data = AudioData::new_empty();
|
let audio_data = AudioData::new_empty();
|
||||||
let result = audio_data.get_chunk_for_processing();
|
let result = audio_data.get_chunk_for_processing();
|
||||||
assert!(result.is_none());
|
assert!(result.is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
144
audio_engine/src/column.rs
Normal file
144
audio_engine/src/column.rs
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
use crate::*;
|
||||||
|
|
||||||
|
pub struct Column<const ROWS: usize> {
|
||||||
|
frames_per_beat: usize,
|
||||||
|
tracks: [Track; ROWS],
|
||||||
|
playback_position: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<const ROWS: usize> Column<ROWS> {
|
||||||
|
pub fn new(frames_per_beat: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
frames_per_beat,
|
||||||
|
tracks: core::array::from_fn(|_| Track::new()),
|
||||||
|
playback_position: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
for track in &self.tracks {
|
||||||
|
if track.is_recording() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let len = track.len();
|
||||||
|
if len > 0 {
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn idle(&self) -> bool {
|
||||||
|
for track in &self.tracks {
|
||||||
|
if !track.is_idle() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_record_button(&mut self, row: usize) -> Result<()> {
|
||||||
|
let len = self.len();
|
||||||
|
let track = &mut self.tracks[row];
|
||||||
|
if track.is_recording() {
|
||||||
|
if len > 0 {
|
||||||
|
track.clear();
|
||||||
|
} else {
|
||||||
|
track.play();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len > 0 {
|
||||||
|
let sync_offset = len - self.playback_position;
|
||||||
|
track.record_auto_stop(len, sync_offset);
|
||||||
|
} else {
|
||||||
|
track.record();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_play_button(&mut self, row: usize) -> Result<()> {
|
||||||
|
let track = &mut self.tracks[row];
|
||||||
|
if track.len() > 0 && track.is_idle() {
|
||||||
|
track.play();
|
||||||
|
} else if ! track.is_idle() {
|
||||||
|
track.stop();
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_clear_button(&mut self, row: usize) -> Result<()> {
|
||||||
|
let track = &mut self.tracks[row];
|
||||||
|
track.clear();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_consolidated_buffer(&mut self, row: usize, buffer: Box<[f32]>) -> Result<()> {
|
||||||
|
self.tracks[row].set_consolidated_buffer(buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_xrun<H>(
|
||||||
|
&mut self,
|
||||||
|
timing: &BufferTiming,
|
||||||
|
chunk_factory: &mut impl ChunkFactory,
|
||||||
|
post_record_handler: H,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
H: Fn(usize, Arc<AudioChunk>, usize) -> Result<()>,
|
||||||
|
{
|
||||||
|
for (row, track) in self.tracks.iter_mut().enumerate() {
|
||||||
|
track.handle_xrun(
|
||||||
|
timing.beat_in_missed,
|
||||||
|
timing.missed_frames,
|
||||||
|
chunk_factory,
|
||||||
|
|chunk, sync_offset| post_record_handler(row, chunk, sync_offset),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn process<H>(
|
||||||
|
&mut self,
|
||||||
|
timing: &BufferTiming,
|
||||||
|
input_buffer: &[f32],
|
||||||
|
output_buffer: &mut [f32],
|
||||||
|
scratch_pad: &mut [f32],
|
||||||
|
chunk_factory: &mut impl ChunkFactory,
|
||||||
|
post_record_handler: H,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
H: Fn(usize, Arc<AudioChunk>, usize) -> Result<()>,
|
||||||
|
{
|
||||||
|
let len = self.len();
|
||||||
|
if self.idle() {
|
||||||
|
if let Some(beat_index) = timing.beat_in_buffer {
|
||||||
|
let idle_time = input_buffer.len() - beat_index as usize;
|
||||||
|
if len == 0 {
|
||||||
|
self.playback_position = self.frames_per_beat - idle_time;
|
||||||
|
} else {
|
||||||
|
self.playback_position = len - idle_time;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (row, track) in self.tracks.iter_mut().enumerate() {
|
||||||
|
track.process(
|
||||||
|
self.playback_position,
|
||||||
|
timing.beat_in_buffer,
|
||||||
|
input_buffer,
|
||||||
|
scratch_pad,
|
||||||
|
chunk_factory,
|
||||||
|
|chunk, sync_offset| post_record_handler(row, chunk, sync_offset),
|
||||||
|
)?;
|
||||||
|
for (output_val, scratch_pad_val) in output_buffer.iter_mut().zip(scratch_pad.iter()) {
|
||||||
|
*output_val += *scratch_pad_val;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let len = self.len();
|
||||||
|
if len > 0 {
|
||||||
|
self.playback_position = (self.playback_position + input_buffer.len()) % self.len();
|
||||||
|
} else {
|
||||||
|
self.playback_position = (self.playback_position + input_buffer.len()) % self.frames_per_beat;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -3,6 +3,7 @@ mod audio_chunk;
|
|||||||
mod audio_data;
|
mod audio_data;
|
||||||
mod beep;
|
mod beep;
|
||||||
mod chunk_factory;
|
mod chunk_factory;
|
||||||
|
mod column;
|
||||||
mod connection_manager;
|
mod connection_manager;
|
||||||
mod looper_error;
|
mod looper_error;
|
||||||
mod metronome;
|
mod metronome;
|
||||||
@ -21,9 +22,11 @@ use audio_chunk::AudioChunk;
|
|||||||
use audio_data::AudioData;
|
use audio_data::AudioData;
|
||||||
use beep::generate_beep;
|
use beep::generate_beep;
|
||||||
use chunk_factory::ChunkFactory;
|
use chunk_factory::ChunkFactory;
|
||||||
|
use column::Column;
|
||||||
use connection_manager::ConnectionManager;
|
use connection_manager::ConnectionManager;
|
||||||
use looper_error::LooperError;
|
use looper_error::LooperError;
|
||||||
use looper_error::Result;
|
use looper_error::Result;
|
||||||
|
use metronome::BufferTiming;
|
||||||
use metronome::Metronome;
|
use metronome::Metronome;
|
||||||
use notification_handler::JackNotification;
|
use notification_handler::JackNotification;
|
||||||
use notification_handler::NotificationHandler;
|
use notification_handler::NotificationHandler;
|
||||||
@ -33,8 +36,6 @@ use post_record_handler::PostRecordHandler;
|
|||||||
use process_handler::ProcessHandler;
|
use process_handler::ProcessHandler;
|
||||||
use state::State;
|
use state::State;
|
||||||
use track::Track;
|
use track::Track;
|
||||||
use track::TrackState;
|
|
||||||
use track::TrackTiming;
|
|
||||||
|
|
||||||
pub struct JackPorts {
|
pub struct JackPorts {
|
||||||
pub audio_in: jack::Port<jack::AudioIn>,
|
pub audio_in: jack::Port<jack::AudioIn>,
|
||||||
@ -69,7 +70,8 @@ async fn main() {
|
|||||||
let (mut post_record_handler, post_record_controller) =
|
let (mut post_record_handler, post_record_controller) =
|
||||||
PostRecordHandler::new().expect("Could not create post-record handler");
|
PostRecordHandler::new().expect("Could not create post-record handler");
|
||||||
|
|
||||||
let process_handler = ProcessHandler::new(
|
let process_handler = ProcessHandler::<_, 5>::new(
|
||||||
|
&jack_client,
|
||||||
ports,
|
ports,
|
||||||
allocator,
|
allocator,
|
||||||
beep_samples,
|
beep_samples,
|
||||||
|
|||||||
@ -6,8 +6,8 @@ pub struct Metronome {
|
|||||||
click_volume: f32,
|
click_volume: f32,
|
||||||
|
|
||||||
// Timing state
|
// Timing state
|
||||||
frames_per_beat: u32,
|
frames_per_beat: usize,
|
||||||
frames_since_last_beat: u32, // Where we are in the current beat cycle
|
frames_since_last_beat: usize, // Where we are in the current beat cycle
|
||||||
last_frame_time: Option<u32>, // For xrun detection
|
last_frame_time: Option<u32>, // For xrun detection
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ pub struct BufferTiming {
|
|||||||
pub beat_in_buffer: Option<u32>,
|
pub beat_in_buffer: Option<u32>,
|
||||||
|
|
||||||
/// Number of frames missed due to xrun (0 if no xrun)
|
/// Number of frames missed due to xrun (0 if no xrun)
|
||||||
pub missed_frames: u32,
|
pub missed_frames: usize,
|
||||||
|
|
||||||
/// Beat index within the missed frames (if any)
|
/// Beat index within the missed frames (if any)
|
||||||
pub beat_in_missed: Option<u32>,
|
pub beat_in_missed: Option<u32>,
|
||||||
@ -34,10 +34,6 @@ impl Metronome {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn frames_per_beat(&self) -> u32 {
|
|
||||||
self.frames_per_beat
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Process audio for current buffer, writing to output slice
|
/// Process audio for current buffer, writing to output slice
|
||||||
pub fn process(
|
pub fn process(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -53,66 +49,80 @@ impl Metronome {
|
|||||||
// Get output buffer for click track
|
// Get output buffer for click track
|
||||||
let click_output = ports.click_track_out.as_mut_slice(ps);
|
let click_output = ports.click_track_out.as_mut_slice(ps);
|
||||||
|
|
||||||
self.render_click(buffer_size, current_frame_time, &timing, click_output);
|
self.render_click(buffer_size, &timing, click_output);
|
||||||
|
|
||||||
Ok(timing)
|
Ok(timing)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn render_click(
|
fn render_click(&mut self, buffer_size: u32, timing: &BufferTiming, click_output: &mut [f32]) {
|
||||||
&mut self,
|
let click_length = self.click_samples.sample_count;
|
||||||
buffer_size: u32,
|
|
||||||
current_frame_time: u32,
|
// Calculate our position at the START of this buffer (before calculate_timing updated it)
|
||||||
timing: &BufferTiming,
|
// We need to go back by: buffer_size + any missed samples
|
||||||
click_output: &mut [f32],
|
let total_advancement = buffer_size as usize + timing.missed_frames;
|
||||||
) {
|
let start_position =
|
||||||
// Calculate current position within the beep (frames since last beat started)
|
(self.frames_since_last_beat + self.frames_per_beat - total_advancement)
|
||||||
let frames_since_beat_start = current_frame_time - self.frames_since_last_beat;
|
% self.frames_per_beat;
|
||||||
let click_length = self.click_samples.sample_count as u32;
|
|
||||||
|
|
||||||
if let Some(beat_offset) = timing.beat_in_buffer {
|
if let Some(beat_offset) = timing.beat_in_buffer {
|
||||||
// Write silence up to beat boundary
|
let beat_offset = beat_offset as usize;
|
||||||
let silence_end = beat_offset.min(buffer_size);
|
|
||||||
click_output[0..silence_end as _].fill(0.0);
|
|
||||||
|
|
||||||
// Write click samples from boundary onward
|
// Check if we're still playing a click from before this beat
|
||||||
if beat_offset < buffer_size {
|
if start_position < click_length {
|
||||||
let remaining_buffer = buffer_size - beat_offset;
|
// Continue click until beat offset
|
||||||
let samples_to_write = remaining_buffer.min(click_length);
|
let click_samples_remaining = click_length as usize - start_position as usize;
|
||||||
|
let samples_to_write = beat_offset.min(click_samples_remaining);
|
||||||
|
|
||||||
// Copy click samples in bulk
|
let dest = &mut click_output[0..samples_to_write];
|
||||||
let dest = &mut click_output
|
if self
|
||||||
[beat_offset as usize..beat_offset as usize + samples_to_write as usize];
|
.click_samples
|
||||||
if self.click_samples.copy_samples(dest, 0).is_ok() {
|
.copy_samples(dest, start_position as usize)
|
||||||
// Apply volume scaling with iterators
|
.is_ok()
|
||||||
|
{
|
||||||
dest.iter_mut()
|
dest.iter_mut()
|
||||||
.for_each(|sample| *sample *= self.click_volume);
|
.for_each(|sample| *sample *= self.click_volume);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill remaining buffer with silence
|
// Fill gap between end of click and new beat with silence
|
||||||
click_output[(beat_offset as usize + samples_to_write as usize)..].fill(0.0);
|
click_output[samples_to_write..beat_offset].fill(0.0);
|
||||||
}
|
} else {
|
||||||
} else if frames_since_beat_start < click_length {
|
// Write silence up to beat
|
||||||
// Continue playing click from previous beat if still within beep duration
|
click_output[0..beat_offset].fill(0.0);
|
||||||
let click_start_offset = frames_since_beat_start;
|
|
||||||
let remaining_click_samples = click_length - click_start_offset;
|
|
||||||
let samples_to_write = buffer_size.min(remaining_click_samples);
|
|
||||||
|
|
||||||
// Copy remaining beep samples in bulk
|
|
||||||
let dest = &mut click_output[0..samples_to_write as _];
|
|
||||||
if self
|
|
||||||
.click_samples
|
|
||||||
.copy_samples(dest, click_start_offset as _)
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
// Apply volume scaling with iterators
|
|
||||||
dest.iter_mut()
|
|
||||||
.for_each(|sample| *sample *= self.click_volume);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill remaining buffer with silence
|
// Start new click at beat offset
|
||||||
click_output[samples_to_write as _..].fill(0.0);
|
if beat_offset < buffer_size as usize {
|
||||||
|
let remaining_buffer = buffer_size as usize - beat_offset;
|
||||||
|
let samples_to_write = remaining_buffer.min(click_length as usize);
|
||||||
|
|
||||||
|
let dest = &mut click_output[beat_offset..beat_offset + samples_to_write];
|
||||||
|
if self.click_samples.copy_samples(dest, 0).is_ok() {
|
||||||
|
dest.iter_mut()
|
||||||
|
.for_each(|sample| *sample *= self.click_volume);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill any remaining buffer with silence
|
||||||
|
click_output[(beat_offset + samples_to_write)..].fill(0.0);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
click_output.fill(0.0);
|
// No beat in this buffer - check if we're continuing a click
|
||||||
|
if start_position < click_length {
|
||||||
|
let click_offset = start_position as usize;
|
||||||
|
let remaining_click = click_length as usize - click_offset;
|
||||||
|
let samples_to_write = (buffer_size as usize).min(remaining_click);
|
||||||
|
|
||||||
|
let dest = &mut click_output[0..samples_to_write];
|
||||||
|
if self.click_samples.copy_samples(dest, click_offset).is_ok() {
|
||||||
|
dest.iter_mut()
|
||||||
|
.for_each(|sample| *sample *= self.click_volume);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill remaining with silence
|
||||||
|
click_output[samples_to_write..].fill(0.0);
|
||||||
|
} else {
|
||||||
|
// No click playing - all silence
|
||||||
|
click_output.fill(0.0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,22 +132,22 @@ impl Metronome {
|
|||||||
buffer_size: u32,
|
buffer_size: u32,
|
||||||
) -> Result<BufferTiming> {
|
) -> Result<BufferTiming> {
|
||||||
// Detect xrun
|
// Detect xrun
|
||||||
let (missed_samples, beat_in_missed) = if let Some(last) = self.last_frame_time {
|
let (missed_frames, beat_in_missed) = if let Some(last) = self.last_frame_time {
|
||||||
let expected = last.wrapping_add(buffer_size); // Handle u32 wrap
|
let expected = last.wrapping_add(buffer_size); // Handle u32 wrap
|
||||||
if current_frame_time != expected {
|
if current_frame_time != expected {
|
||||||
// We have a gap
|
// We have a gap
|
||||||
let missed = current_frame_time.wrapping_sub(expected);
|
let missed = current_frame_time.wrapping_sub(expected) as usize;
|
||||||
|
|
||||||
// Check if we missed multiple beats
|
// Check if we missed multiple beats
|
||||||
let total_samples = self.frames_since_last_beat + missed + buffer_size;
|
let total_samples = self.frames_since_last_beat + missed as usize + buffer_size as usize;
|
||||||
if total_samples >= 2 * self.frames_per_beat {
|
if total_samples >= 2 * self.frames_per_beat {
|
||||||
return Err(LooperError::Xrun(std::panic::Location::caller()));
|
return Err(LooperError::Xrun(std::panic::Location::caller()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if a beat occurred in the missed section
|
// Check if a beat occurred in the missed section
|
||||||
let beat_in_missed = if self.frames_since_last_beat + missed >= self.frames_per_beat
|
let beat_in_missed = if self.frames_since_last_beat + missed as usize >= self.frames_per_beat
|
||||||
{
|
{
|
||||||
Some(self.frames_per_beat - self.frames_since_last_beat)
|
Some((self.frames_per_beat - self.frames_since_last_beat) as u32)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -153,21 +163,21 @@ impl Metronome {
|
|||||||
|
|
||||||
// Check for beat in current buffer
|
// Check for beat in current buffer
|
||||||
// We need to account for any missed samples here too
|
// We need to account for any missed samples here too
|
||||||
let start_position = (self.frames_since_last_beat + missed_samples) % self.frames_per_beat;
|
let start_position = (self.frames_since_last_beat + missed_frames) % self.frames_per_beat;
|
||||||
let beat_in_buffer = if start_position + buffer_size >= self.frames_per_beat {
|
let beat_in_buffer = if start_position + buffer_size as usize >= self.frames_per_beat {
|
||||||
Some(self.frames_per_beat - start_position)
|
Some((self.frames_per_beat - start_position) as u32)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
// Update state - advance by total samples (missed + buffer)
|
// Update state - advance by total samples (missed + buffer)
|
||||||
self.frames_since_last_beat =
|
self.frames_since_last_beat =
|
||||||
(self.frames_since_last_beat + missed_samples + buffer_size) % self.frames_per_beat;
|
(self.frames_since_last_beat + missed_frames + buffer_size as usize) % self.frames_per_beat;
|
||||||
self.last_frame_time = Some(current_frame_time);
|
self.last_frame_time = Some(current_frame_time);
|
||||||
|
|
||||||
Ok(BufferTiming {
|
Ok(BufferTiming {
|
||||||
beat_in_buffer,
|
beat_in_buffer,
|
||||||
missed_frames: missed_samples,
|
missed_frames: missed_frames,
|
||||||
beat_in_missed,
|
beat_in_missed,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -177,7 +187,7 @@ impl Metronome {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
fn create_test_metronome(samples_per_beat: u32) -> Metronome {
|
fn create_test_metronome(frames_per_beat: usize) -> Metronome {
|
||||||
let beep_samples = Arc::new(AudioChunk {
|
let beep_samples = Arc::new(AudioChunk {
|
||||||
samples: vec![1.0; 100].into_boxed_slice(),
|
samples: vec![1.0; 100].into_boxed_slice(),
|
||||||
sample_count: 100,
|
sample_count: 100,
|
||||||
@ -187,7 +197,7 @@ mod tests {
|
|||||||
Metronome {
|
Metronome {
|
||||||
click_samples: beep_samples,
|
click_samples: beep_samples,
|
||||||
click_volume: 1.0,
|
click_volume: 1.0,
|
||||||
frames_per_beat: samples_per_beat,
|
frames_per_beat,
|
||||||
frames_since_last_beat: 0,
|
frames_since_last_beat: 0,
|
||||||
last_frame_time: None,
|
last_frame_time: None,
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,8 +1,8 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
/// Process MIDI events
|
/// Process MIDI events
|
||||||
pub fn process_events<F: ChunkFactory>(
|
pub fn process_events<F: ChunkFactory, const ROWS: usize>(
|
||||||
process_handler: &mut ProcessHandler<F>,
|
process_handler: &mut ProcessHandler<F, ROWS>,
|
||||||
ps: &jack::ProcessScope,
|
ps: &jack::ProcessScope,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// First, collect all MIDI events into a fixed-size array
|
// First, collect all MIDI events into a fixed-size array
|
||||||
@ -38,35 +38,36 @@ pub fn process_events<F: ChunkFactory>(
|
|||||||
if u8::from(value) > 0 {
|
if u8::from(value) > 0 {
|
||||||
match u8::from(controller) {
|
match u8::from(controller) {
|
||||||
20 => {
|
20 => {
|
||||||
// Button 1: Record/Play toggle
|
process_handler.handle_button_1()?;
|
||||||
process_handler.record_toggle()?;
|
|
||||||
}
|
}
|
||||||
21 => {
|
21 => {
|
||||||
// Button 2: Play/Mute
|
process_handler.handle_button_2()?;
|
||||||
process_handler.play_toggle()?;
|
|
||||||
}
|
}
|
||||||
22 => {
|
22 => {
|
||||||
// Button 3: Auto-stop record
|
process_handler.handle_button_3()?;
|
||||||
process_handler.record_auto_stop()?;
|
|
||||||
}
|
}
|
||||||
24 => {
|
24 => {
|
||||||
// Button 5: Clear track
|
process_handler.handle_button_5()?;
|
||||||
process_handler.clear_track()?;
|
}
|
||||||
|
30 => {
|
||||||
|
process_handler.handle_button_up()?;
|
||||||
|
}
|
||||||
|
31 => {
|
||||||
|
process_handler.handle_button_down()?;
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// Other CC messages - ignore for now
|
// Other CC messages - ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// Ignore other MIDI messages for now
|
// Other MIDI messages - ignore
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
// Skip malformed MIDI messages instead of panicking
|
// Malformed MIDI messages - ignore
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,14 +4,16 @@ use std::path::PathBuf;
|
|||||||
/// Request to process a recorded chunk chain with sync offset
|
/// Request to process a recorded chunk chain with sync offset
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PostRecordRequest {
|
pub struct PostRecordRequest {
|
||||||
|
pub row: usize,
|
||||||
pub chunk_chain: Arc<AudioChunk>,
|
pub chunk_chain: Arc<AudioChunk>,
|
||||||
pub sync_offset: u32,
|
pub sync_offset: usize,
|
||||||
pub sample_rate: u32,
|
pub sample_rate: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Response containing the consolidated buffer
|
/// Response containing the consolidated buffer
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PostRecordResponse {
|
pub struct PostRecordResponse {
|
||||||
|
pub row: usize,
|
||||||
pub consolidated_buffer: Box<[f32]>,
|
pub consolidated_buffer: Box<[f32]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -26,11 +28,13 @@ impl PostRecordController {
|
|||||||
/// Send a post-record processing request (RT-safe)
|
/// Send a post-record processing request (RT-safe)
|
||||||
pub fn send_request(
|
pub fn send_request(
|
||||||
&self,
|
&self,
|
||||||
|
row: usize,
|
||||||
chunk_chain: Arc<AudioChunk>,
|
chunk_chain: Arc<AudioChunk>,
|
||||||
sync_offset: u32,
|
sync_offset: usize,
|
||||||
sample_rate: u32,
|
sample_rate: usize,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let request = PostRecordRequest {
|
let request = PostRecordRequest {
|
||||||
|
row,
|
||||||
chunk_chain,
|
chunk_chain,
|
||||||
sync_offset,
|
sync_offset,
|
||||||
sample_rate,
|
sample_rate,
|
||||||
@ -113,6 +117,7 @@ impl PostRecordHandler {
|
|||||||
|
|
||||||
// Step 2: Send consolidated buffer back to RT thread immediately
|
// Step 2: Send consolidated buffer back to RT thread immediately
|
||||||
let response = PostRecordResponse {
|
let response = PostRecordResponse {
|
||||||
|
row: request.row,
|
||||||
consolidated_buffer,
|
consolidated_buffer,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -123,7 +128,7 @@ impl PostRecordHandler {
|
|||||||
// Step 3: Save WAV file in background (I/O intensive)
|
// Step 3: Save WAV file in background (I/O intensive)
|
||||||
// Use original chunk chain for saving (not reordered)
|
// Use original chunk chain for saving (not reordered)
|
||||||
let consolidated_chunk = AudioChunk::consolidate(&request.chunk_chain);
|
let consolidated_chunk = AudioChunk::consolidate(&request.chunk_chain);
|
||||||
let file_path = self.get_file_path();
|
let file_path = self.get_file_path(request.row);
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.save_wav_file(&consolidated_chunk, request.sample_rate, &file_path)
|
.save_wav_file(&consolidated_chunk, request.sample_rate, &file_path)
|
||||||
@ -174,7 +179,7 @@ impl PostRecordHandler {
|
|||||||
async fn save_wav_file(
|
async fn save_wav_file(
|
||||||
&self,
|
&self,
|
||||||
chunk: &AudioChunk,
|
chunk: &AudioChunk,
|
||||||
sample_rate: u32,
|
sample_rate: usize,
|
||||||
file_path: &PathBuf,
|
file_path: &PathBuf,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// Run WAV writing in blocking task to avoid blocking async runtime
|
// Run WAV writing in blocking task to avoid blocking async runtime
|
||||||
@ -184,7 +189,7 @@ impl PostRecordHandler {
|
|||||||
tokio::task::spawn_blocking(move || {
|
tokio::task::spawn_blocking(move || {
|
||||||
let spec = hound::WavSpec {
|
let spec = hound::WavSpec {
|
||||||
channels: 1,
|
channels: 1,
|
||||||
sample_rate,
|
sample_rate: sample_rate as _,
|
||||||
bits_per_sample: 32,
|
bits_per_sample: 32,
|
||||||
sample_format: hound::SampleFormat::Float,
|
sample_format: hound::SampleFormat::Float,
|
||||||
};
|
};
|
||||||
@ -221,7 +226,7 @@ impl PostRecordHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get file path for track recording
|
/// Get file path for track recording
|
||||||
fn get_file_path(&self) -> PathBuf {
|
fn get_file_path(&self, row: usize) -> PathBuf {
|
||||||
self.directory.join("track.wav")
|
self.directory.join(format!("row_{row}.wav"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,20 +1,18 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
// Testing constants for sync offset functionality
|
pub struct ProcessHandler<F: ChunkFactory, const ROWS: usize> {
|
||||||
const SYNC_OFFSET_BEATS: u32 = 2; // Start recording at beat 3 (0-indexed)
|
|
||||||
const AUTO_STOP_BEATS: u32 = 4; // Record for 4 beats total
|
|
||||||
|
|
||||||
pub struct ProcessHandler<F: ChunkFactory> {
|
|
||||||
track: Track,
|
|
||||||
playback_position: usize,
|
|
||||||
pub ports: JackPorts,
|
pub ports: JackPorts,
|
||||||
chunk_factory: F,
|
chunk_factory: F,
|
||||||
metronome: Metronome,
|
metronome: Metronome,
|
||||||
post_record_controller: PostRecordController,
|
post_record_controller: PostRecordController,
|
||||||
|
column: Column<ROWS>,
|
||||||
|
selected_row: usize,
|
||||||
|
scratch_pad: Box<[f32]>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: ChunkFactory> ProcessHandler<F> {
|
impl<F: ChunkFactory, const ROWS: usize> ProcessHandler<F, ROWS> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
|
client: &jack::Client,
|
||||||
ports: JackPorts,
|
ports: JackPorts,
|
||||||
chunk_factory: F,
|
chunk_factory: F,
|
||||||
beep_samples: Arc<AudioChunk>,
|
beep_samples: Arc<AudioChunk>,
|
||||||
@ -22,227 +20,113 @@ impl<F: ChunkFactory> ProcessHandler<F> {
|
|||||||
post_record_controller: PostRecordController,
|
post_record_controller: PostRecordController,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
track: Track::new(),
|
|
||||||
playback_position: 0,
|
|
||||||
ports,
|
ports,
|
||||||
chunk_factory,
|
chunk_factory,
|
||||||
metronome: Metronome::new(beep_samples, state),
|
metronome: Metronome::new(beep_samples, state),
|
||||||
post_record_controller,
|
post_record_controller,
|
||||||
|
column: Column::new(state.metronome.frames_per_beat),
|
||||||
|
selected_row: 0,
|
||||||
|
scratch_pad: vec![0.0; client.buffer_size() as usize].into_boxed_slice(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle record/play toggle button (Button 1)
|
pub fn handle_button_1(&mut self) -> Result<()> {
|
||||||
pub fn record_toggle(&mut self) -> Result<()> {
|
self.column.handle_record_button(self.selected_row)
|
||||||
self.track.queue_record_toggle();
|
}
|
||||||
|
|
||||||
|
pub fn handle_button_2(&mut self) -> Result<()> {
|
||||||
|
self.column.handle_play_button(self.selected_row)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_button_3(&mut self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle play/mute toggle button (Button 2)
|
pub fn handle_button_5(&mut self) -> Result<()> {
|
||||||
pub fn play_toggle(&mut self) -> Result<()> {
|
self.column.handle_clear_button(self.selected_row)
|
||||||
self.track.queue_play_toggle();
|
}
|
||||||
|
|
||||||
|
pub fn handle_button_up(&mut self) -> Result<()> {
|
||||||
|
if self.selected_row == 0 {
|
||||||
|
self.selected_row = ROWS - 1;
|
||||||
|
} else {
|
||||||
|
self.selected_row -= 1;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle auto-stop record button (Button 3)
|
pub fn handle_button_down(&mut self) -> Result<()> {
|
||||||
pub fn record_auto_stop(&mut self) -> Result<()> {
|
self.selected_row = (self.selected_row + 1) % ROWS;
|
||||||
let samples_per_beat = self.metronome.frames_per_beat();
|
|
||||||
let sync_offset = SYNC_OFFSET_BEATS * samples_per_beat;
|
|
||||||
let target_samples = AUTO_STOP_BEATS * samples_per_beat;
|
|
||||||
|
|
||||||
self.track
|
|
||||||
.queue_record_auto_stop(target_samples as usize, sync_offset as usize);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle clear button (Button 5)
|
|
||||||
pub fn clear_track(&mut self) -> Result<()> {
|
|
||||||
self.track.queue_clear();
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
|
impl<F: ChunkFactory, const ROWS: usize> jack::ProcessHandler for ProcessHandler<F, ROWS> {
|
||||||
fn process(&mut self, client: &jack::Client, ps: &jack::ProcessScope) -> jack::Control {
|
fn process(&mut self, client: &jack::Client, ps: &jack::ProcessScope) -> jack::Control {
|
||||||
// Process MIDI first - this updates next_state on the track
|
if let Err(e) = self.process_with_error_handling(client, ps) {
|
||||||
if let Err(e) = midi::process_events(self, ps) {
|
log::error!("Error processing audio: {}", e);
|
||||||
log::error!("Error processing MIDI events: {}", e);
|
jack::Control::Quit
|
||||||
return jack::Control::Quit;
|
} else {
|
||||||
|
jack::Control::Continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F: ChunkFactory, const ROWS: usize> ProcessHandler<F, ROWS> {
|
||||||
|
fn process_with_error_handling(
|
||||||
|
&mut self,
|
||||||
|
client: &jack::Client,
|
||||||
|
ps: &jack::ProcessScope,
|
||||||
|
) -> Result<()> {
|
||||||
|
// Check for consolidation response
|
||||||
|
if let Some(response) = self.post_record_controller.try_recv_response() {
|
||||||
|
self.column
|
||||||
|
.set_consolidated_buffer(response.row, response.consolidated_buffer)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process metronome and get beat timing information
|
// Process metronome and get beat timing information
|
||||||
let beat_sample_index = match self.metronome.process(ps, &mut self.ports) {
|
let timing = self.metronome.process(ps, &mut self.ports)?;
|
||||||
Ok(beat_index) => beat_index,
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("Error processing metronome: {}", e);
|
|
||||||
return jack::Control::Quit;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.beat_in_buffer;
|
|
||||||
|
|
||||||
let buffer_size = client.buffer_size() as usize;
|
// Handle xruns
|
||||||
let state_before = self.track.current_state().clone();
|
if timing.missed_frames > 0 {
|
||||||
|
self.column.handle_xrun(
|
||||||
// Calculate timing information for track processing
|
&timing,
|
||||||
let timing = self.calculate_track_timing(beat_sample_index, &state_before);
|
&mut self.chunk_factory,
|
||||||
|
|row, chunk, sync_offset| {
|
||||||
// Process track audio with calculated timing
|
self.post_record_controller.send_request(
|
||||||
let should_consolidate =
|
row,
|
||||||
match self
|
chunk,
|
||||||
.track
|
sync_offset,
|
||||||
.process(ps, &mut self.ports, timing, &mut self.chunk_factory)
|
client.sample_rate(),
|
||||||
{
|
)
|
||||||
Ok(consolidate) => consolidate,
|
},
|
||||||
Err(e) => {
|
)?;
|
||||||
log::error!("Error processing track: {}", e);
|
|
||||||
return jack::Control::Quit;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Handle post-record processing
|
|
||||||
if let Err(e) =
|
|
||||||
self.handle_post_record_processing(should_consolidate, client.sample_rate() as u32)
|
|
||||||
{
|
|
||||||
log::error!("Error handling post-record processing: {}", e);
|
|
||||||
return jack::Control::Quit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update playback position based on what happened
|
// Process MIDI
|
||||||
self.update_playback_position(beat_sample_index, buffer_size, &state_before);
|
midi::process_events(self, ps)?;
|
||||||
|
|
||||||
jack::Control::Continue
|
// Process audio
|
||||||
}
|
let input_buffer = self.ports.audio_in.as_slice(ps);
|
||||||
}
|
let output_buffer = self.ports.audio_out.as_mut_slice(ps);
|
||||||
|
output_buffer.fill(0.0);
|
||||||
|
|
||||||
impl<F: ChunkFactory> ProcessHandler<F> {
|
self.column.process(
|
||||||
/// Handle post-record processing: send requests and swap buffers
|
&timing,
|
||||||
fn handle_post_record_processing(
|
input_buffer,
|
||||||
&mut self,
|
output_buffer,
|
||||||
should_consolidate: bool,
|
&mut self.scratch_pad,
|
||||||
sample_rate: u32,
|
&mut self.chunk_factory,
|
||||||
) -> Result<()> {
|
|row, chunk, sync_offset| {
|
||||||
// Send audio data for processing if track indicates consolidation needed
|
|
||||||
if should_consolidate {
|
|
||||||
if let Some((chunk_chain, sync_offset)) = self.track.get_audio_data_for_processing() {
|
|
||||||
self.post_record_controller.send_request(
|
self.post_record_controller.send_request(
|
||||||
chunk_chain,
|
row,
|
||||||
sync_offset as u32,
|
chunk,
|
||||||
sample_rate,
|
sync_offset,
|
||||||
)?;
|
client.sample_rate(),
|
||||||
}
|
)
|
||||||
}
|
},
|
||||||
|
)?;
|
||||||
// Check for consolidation response
|
|
||||||
if let Some(response) = self.post_record_controller.try_recv_response() {
|
|
||||||
self.track
|
|
||||||
.set_consolidated_buffer(response.consolidated_buffer)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate timing information for track processing
|
|
||||||
fn calculate_track_timing(
|
|
||||||
&self,
|
|
||||||
beat_sample_index: Option<u32>,
|
|
||||||
state_before: &TrackState,
|
|
||||||
) -> TrackTiming {
|
|
||||||
match beat_sample_index {
|
|
||||||
None => {
|
|
||||||
// No beat in this buffer
|
|
||||||
TrackTiming::NoBeat {
|
|
||||||
position: self.playback_position,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(beat_index) => {
|
|
||||||
let beat_index = beat_index as usize;
|
|
||||||
let pre_beat_position = self.playback_position;
|
|
||||||
let post_beat_position = self.calculate_post_beat_position(state_before);
|
|
||||||
|
|
||||||
TrackTiming::Beat {
|
|
||||||
pre_beat_position,
|
|
||||||
post_beat_position,
|
|
||||||
beat_sample_index: beat_index,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the correct playback position after a beat transition
|
|
||||||
fn calculate_post_beat_position(&self, state_before: &TrackState) -> usize {
|
|
||||||
let state_after = self.track.next_state(); // Use next_state since transition hasn't happened yet
|
|
||||||
|
|
||||||
match (state_before, state_after) {
|
|
||||||
(_, TrackState::Playing) if !matches!(state_before, TrackState::Playing) => {
|
|
||||||
// Just started playing - start from beginning
|
|
||||||
// Note: In future Column implementation, this will be:
|
|
||||||
// column.get_sync_position() to sync with other playing tracks
|
|
||||||
0
|
|
||||||
}
|
|
||||||
(TrackState::Playing, TrackState::Playing) => {
|
|
||||||
// Continue playing - use current position
|
|
||||||
self.playback_position
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
// Not playing after transition - position doesn't matter
|
|
||||||
self.playback_position
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update playback position after track processing
|
|
||||||
fn update_playback_position(
|
|
||||||
&mut self,
|
|
||||||
beat_sample_index: Option<u32>,
|
|
||||||
buffer_size: usize,
|
|
||||||
state_before: &TrackState,
|
|
||||||
) {
|
|
||||||
let state_after = self.track.current_state().clone();
|
|
||||||
|
|
||||||
match beat_sample_index {
|
|
||||||
None => {
|
|
||||||
// No beat - simple position update
|
|
||||||
if *state_before == TrackState::Playing {
|
|
||||||
self.advance_playback_position(buffer_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(beat_index) => {
|
|
||||||
let beat_index = beat_index as usize;
|
|
||||||
|
|
||||||
// Handle position updates around beat boundary
|
|
||||||
if beat_index > 0 && *state_before == TrackState::Playing {
|
|
||||||
// Advance position for samples before beat
|
|
||||||
self.advance_playback_position(beat_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if state transition at beat affects position
|
|
||||||
if state_after == TrackState::Playing
|
|
||||||
&& !matches!(state_before, TrackState::Playing)
|
|
||||||
{
|
|
||||||
// Started playing at beat - reset position to post-beat calculation
|
|
||||||
self.playback_position = self.calculate_post_beat_position(state_before);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance position for samples after beat if playing
|
|
||||||
if beat_index < buffer_size && state_after == TrackState::Playing {
|
|
||||||
let samples_after_beat = buffer_size - beat_index;
|
|
||||||
self.advance_playback_position(samples_after_beat);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Advance playback position with looping
|
|
||||||
fn advance_playback_position(&mut self, samples: usize) {
|
|
||||||
if self.track.len() == 0 {
|
|
||||||
self.playback_position = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.playback_position += samples;
|
|
||||||
|
|
||||||
// Handle looping
|
|
||||||
while self.playback_position >= self.track.len() {
|
|
||||||
self.playback_position -= self.track.len();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -16,7 +16,7 @@ pub struct ConnectionState {
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct MetronomeState {
|
pub struct MetronomeState {
|
||||||
pub frames_per_beat: u32,
|
pub frames_per_beat: usize,
|
||||||
pub click_volume: f32, // 0.0 to 1.0
|
pub click_volume: f32, // 0.0 to 1.0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,29 +1,5 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
pub enum TrackState {
|
|
||||||
Empty, // No audio data (---)
|
|
||||||
Idle, // Has data, not playing (READY)
|
|
||||||
Playing, // Currently playing (PLAY)
|
|
||||||
Recording, // Currently recording (REC) - manual stop
|
|
||||||
RecordingAutoStop {
|
|
||||||
target_samples: usize, // Auto-stop when this many samples recorded
|
|
||||||
sync_offset: usize, // Offset in samples from column start
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum TrackTiming {
|
|
||||||
NoBeat {
|
|
||||||
position: usize,
|
|
||||||
},
|
|
||||||
Beat {
|
|
||||||
pre_beat_position: usize,
|
|
||||||
post_beat_position: usize,
|
|
||||||
beat_sample_index: usize,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Track {
|
pub struct Track {
|
||||||
audio_data: AudioData,
|
audio_data: AudioData,
|
||||||
current_state: TrackState,
|
current_state: TrackState,
|
||||||
@ -31,6 +7,18 @@ pub struct Track {
|
|||||||
volume: f32,
|
volume: f32,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
enum TrackState {
|
||||||
|
Empty,
|
||||||
|
Idle,
|
||||||
|
Playing,
|
||||||
|
Recording,
|
||||||
|
RecordingAutoStop {
|
||||||
|
target_samples: usize,
|
||||||
|
sync_offset: usize,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
impl Track {
|
impl Track {
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -41,130 +29,201 @@ impl Track {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Main audio processing method called from ProcessHandler
|
pub fn is_recording(&self) -> bool {
|
||||||
/// Returns true if track should be consolidated and saved
|
matches!(
|
||||||
pub fn process<F: ChunkFactory>(
|
self.current_state,
|
||||||
&mut self,
|
TrackState::Recording | TrackState::RecordingAutoStop { .. }
|
||||||
ps: &jack::ProcessScope,
|
)
|
||||||
ports: &mut JackPorts,
|
}
|
||||||
timing: TrackTiming,
|
|
||||||
chunk_factory: &mut F,
|
|
||||||
) -> Result<bool> {
|
|
||||||
let input_buffer = ports.audio_in.as_slice(ps);
|
|
||||||
let output_buffer = ports.audio_out.as_mut_slice(ps);
|
|
||||||
let buffer_size = output_buffer.len();
|
|
||||||
|
|
||||||
let should_consolidate = match timing {
|
pub fn is_playing(&self) -> bool {
|
||||||
TrackTiming::NoBeat { position } => {
|
matches!(self.current_state, TrackState::Playing)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_idle(&self) -> bool {
|
||||||
|
! self.is_recording() && ! self.is_playing()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.audio_data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn volume(&self) -> f32 {
|
||||||
|
self.volume
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_volume(&mut self, volume: f32) {
|
||||||
|
self.volume = volume.clamp(0.0, 1.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn record(&mut self) {
|
||||||
|
self.next_state = TrackState::Recording;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) {
|
||||||
|
self.next_state = TrackState::RecordingAutoStop {
|
||||||
|
target_samples,
|
||||||
|
sync_offset,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn play(&mut self) {
|
||||||
|
self.next_state = TrackState::Playing;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stop(&mut self) {
|
||||||
|
self.next_state = TrackState::Idle;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.next_state = TrackState::Empty;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> {
|
||||||
|
self.audio_data.set_consolidated_buffer(buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_xrun<H>(
|
||||||
|
&mut self,
|
||||||
|
beat_in_missed: Option<u32>,
|
||||||
|
missed_frames: usize,
|
||||||
|
chunk_factory: &mut impl ChunkFactory,
|
||||||
|
post_record_handler: H,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||||
|
{
|
||||||
|
match beat_in_missed {
|
||||||
|
None => {
|
||||||
|
if self.is_recording() {
|
||||||
|
self.audio_data
|
||||||
|
.append_silence(missed_frames as _, chunk_factory)
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(beat_offset) => {
|
||||||
|
// Insert silence before beat with current state
|
||||||
|
if beat_offset > 0 && self.is_recording() {
|
||||||
|
self.audio_data
|
||||||
|
.append_silence(beat_offset as _, chunk_factory)?;
|
||||||
|
}
|
||||||
|
// Apply state transition at beat boundary
|
||||||
|
self.apply_state_transition(chunk_factory, post_record_handler)?;
|
||||||
|
// Insert silence after beat with new state
|
||||||
|
let frames_after_beat = missed_frames - beat_offset as usize;
|
||||||
|
if frames_after_beat > 0 && self.is_recording() {
|
||||||
|
self.audio_data
|
||||||
|
.append_silence(frames_after_beat, chunk_factory)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Audio processing
|
||||||
|
pub fn process<H>(
|
||||||
|
&mut self,
|
||||||
|
playback_position: usize,
|
||||||
|
beat_in_buffer: Option<u32>,
|
||||||
|
input_buffer: &[f32],
|
||||||
|
output_buffer: &mut [f32],
|
||||||
|
chunk_factory: &mut impl ChunkFactory,
|
||||||
|
post_record_handler: H,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||||
|
{
|
||||||
|
match beat_in_buffer {
|
||||||
|
None => {
|
||||||
// No beat in this buffer - process entire buffer with current state
|
// No beat in this buffer - process entire buffer with current state
|
||||||
self.process_audio_range(
|
self.process_audio_range(
|
||||||
input_buffer,
|
input_buffer,
|
||||||
output_buffer,
|
output_buffer,
|
||||||
0,
|
playback_position,
|
||||||
buffer_size,
|
|
||||||
position,
|
|
||||||
chunk_factory,
|
chunk_factory,
|
||||||
)?;
|
)
|
||||||
false // No state transition possible without beat
|
|
||||||
}
|
}
|
||||||
TrackTiming::Beat {
|
Some(beat_index_in_buffer) => {
|
||||||
pre_beat_position,
|
// Process samples before beat with current state
|
||||||
post_beat_position,
|
if beat_index_in_buffer > 0 {
|
||||||
beat_sample_index,
|
|
||||||
} => {
|
|
||||||
if beat_sample_index > 0 {
|
|
||||||
// Process samples before beat with current state
|
|
||||||
self.process_audio_range(
|
self.process_audio_range(
|
||||||
input_buffer,
|
&input_buffer[..beat_index_in_buffer as _],
|
||||||
output_buffer,
|
&mut output_buffer[..beat_index_in_buffer as _],
|
||||||
0,
|
playback_position,
|
||||||
beat_sample_index,
|
|
||||||
pre_beat_position,
|
|
||||||
chunk_factory,
|
chunk_factory,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply state transition at beat boundary and check if consolidation needed
|
// Apply state transition at beat boundary
|
||||||
let should_consolidate = self.apply_state_transition(chunk_factory)?;
|
self.apply_state_transition(chunk_factory, post_record_handler)?;
|
||||||
|
|
||||||
|
// Process samples after beat with new current state
|
||||||
|
if (beat_index_in_buffer as usize) < output_buffer.len() {
|
||||||
|
// Calculate position after beat for remaining samples
|
||||||
|
let mut post_beat_position = playback_position + beat_index_in_buffer as usize;
|
||||||
|
if self.audio_data.len() > 0 {
|
||||||
|
post_beat_position %= self.audio_data.len();
|
||||||
|
}
|
||||||
|
|
||||||
if beat_sample_index < buffer_size {
|
|
||||||
// Process samples after beat with new current state
|
|
||||||
self.process_audio_range(
|
self.process_audio_range(
|
||||||
input_buffer,
|
&input_buffer[beat_index_in_buffer as _..],
|
||||||
output_buffer,
|
&mut output_buffer[beat_index_in_buffer as _..],
|
||||||
beat_sample_index,
|
|
||||||
buffer_size,
|
|
||||||
post_beat_position,
|
post_beat_position,
|
||||||
chunk_factory,
|
chunk_factory,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
should_consolidate
|
Ok(())
|
||||||
}
|
}
|
||||||
};
|
}
|
||||||
|
|
||||||
Ok(should_consolidate)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process audio for a specific range within the buffer
|
|
||||||
fn process_audio_range<F: ChunkFactory>(
|
fn process_audio_range<F: ChunkFactory>(
|
||||||
&mut self,
|
&mut self,
|
||||||
input_buffer: &[f32],
|
input_buffer: &[f32],
|
||||||
output_buffer: &mut [f32],
|
output_buffer: &mut [f32],
|
||||||
start_index: usize,
|
|
||||||
end_index: usize,
|
|
||||||
playback_position: usize,
|
playback_position: usize,
|
||||||
chunk_factory: &mut F,
|
chunk_factory: &mut F,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let sample_count = end_index - start_index;
|
if output_buffer.is_empty() {
|
||||||
if sample_count == 0 {
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
match &mut self.current_state {
|
match &mut self.current_state {
|
||||||
TrackState::Empty | TrackState::Idle => {
|
TrackState::Empty | TrackState::Idle => {
|
||||||
// Output silence for this range
|
output_buffer.fill(0.0);
|
||||||
output_buffer[start_index..end_index].fill(0.0);
|
|
||||||
}
|
}
|
||||||
TrackState::Recording => {
|
TrackState::Recording => {
|
||||||
// Record input samples (manual recording)
|
|
||||||
let samples_to_record = &input_buffer[start_index..end_index];
|
|
||||||
self.audio_data
|
self.audio_data
|
||||||
.append_samples(samples_to_record, chunk_factory)?;
|
.append_samples(input_buffer, chunk_factory)?;
|
||||||
|
output_buffer.fill(0.0);
|
||||||
// Output silence during recording
|
|
||||||
output_buffer[start_index..end_index].fill(0.0);
|
|
||||||
}
|
}
|
||||||
TrackState::RecordingAutoStop { target_samples, .. } => {
|
TrackState::RecordingAutoStop { target_samples, .. } => {
|
||||||
// Record input samples with auto-stop logic
|
|
||||||
let samples_to_record = &input_buffer[start_index..end_index];
|
|
||||||
let current_length = self.audio_data.len();
|
let current_length = self.audio_data.len();
|
||||||
|
|
||||||
if current_length < *target_samples {
|
if current_length < *target_samples {
|
||||||
// Still recording - determine how many samples to actually record
|
|
||||||
let samples_needed = *target_samples - current_length;
|
let samples_needed = *target_samples - current_length;
|
||||||
let samples_to_append = samples_to_record.len().min(samples_needed);
|
let samples_to_append = input_buffer.len().min(samples_needed);
|
||||||
|
|
||||||
if samples_to_append > 0 {
|
if samples_to_append > 0 {
|
||||||
self.audio_data.append_samples(
|
self.audio_data
|
||||||
&samples_to_record[..samples_to_append],
|
.append_samples(&input_buffer[..samples_to_append], chunk_factory)?;
|
||||||
chunk_factory,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we've reached target and should auto-transition
|
// Assert: auto-stop target should never be exceeded mid-buffer
|
||||||
if self.audio_data.len() >= *target_samples {
|
assert!(
|
||||||
self.next_state = TrackState::Playing;
|
self.audio_data.len() <= *target_samples,
|
||||||
}
|
"Auto-stop recording target exceeded mid-buffer - this indicates incorrect usage. \
|
||||||
|
Target: {}, actual: {}", *target_samples, self.audio_data.len()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Output silence during recording
|
output_buffer.fill(0.0);
|
||||||
output_buffer[start_index..end_index].fill(0.0);
|
|
||||||
}
|
}
|
||||||
TrackState::Playing => {
|
TrackState::Playing => {
|
||||||
// Playback with looping
|
|
||||||
self.audio_data.copy_samples_to_output(
|
self.audio_data.copy_samples_to_output(
|
||||||
&mut output_buffer[start_index..end_index],
|
output_buffer,
|
||||||
playback_position,
|
playback_position,
|
||||||
self.volume,
|
self.volume,
|
||||||
)?;
|
)?;
|
||||||
@ -176,13 +235,25 @@ impl Track {
|
|||||||
|
|
||||||
/// Apply state transition from next_state to current_state
|
/// Apply state transition from next_state to current_state
|
||||||
/// Returns true if track should be consolidated and saved
|
/// Returns true if track should be consolidated and saved
|
||||||
fn apply_state_transition<F: ChunkFactory>(&mut self, chunk_factory: &mut F) -> Result<bool> {
|
fn apply_state_transition<H>(
|
||||||
// Check if this is a recording → playing transition (consolidation trigger)
|
&mut self,
|
||||||
let should_consolidate = matches!(
|
chunk_factory: &mut impl ChunkFactory,
|
||||||
(&self.current_state, &self.next_state),
|
post_record_handler: H,
|
||||||
(TrackState::Recording, TrackState::Playing)
|
) -> Result<()>
|
||||||
| (TrackState::RecordingAutoStop { .. }, TrackState::Playing)
|
where
|
||||||
);
|
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||||
|
{
|
||||||
|
// Check for auto-stop recording completion and transition to playing if no other state transition
|
||||||
|
if self.current_state == self.next_state {
|
||||||
|
if let TrackState::RecordingAutoStop { target_samples, .. } = &self.current_state {
|
||||||
|
if self.audio_data.len() >= *target_samples {
|
||||||
|
self.next_state = TrackState::Playing;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remember recording before transition
|
||||||
|
let was_recording = self.is_recording();
|
||||||
|
|
||||||
// Handle transitions that require setup
|
// Handle transitions that require setup
|
||||||
match (&self.current_state, &self.next_state) {
|
match (&self.current_state, &self.next_state) {
|
||||||
@ -217,118 +288,11 @@ impl Track {
|
|||||||
// Apply the state transition
|
// Apply the state transition
|
||||||
self.current_state = self.next_state.clone();
|
self.current_state = self.next_state.clone();
|
||||||
|
|
||||||
Ok(should_consolidate)
|
// Handle post-record processing
|
||||||
}
|
if was_recording && !self.is_recording() {
|
||||||
|
let (chunk, sync_offset) = self.audio_data.get_chunk_for_processing()?;
|
||||||
/// Get audio data for post-record processing (returns chunk and sync offset)
|
post_record_handler(chunk, sync_offset)?;
|
||||||
pub fn get_audio_data_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> {
|
|
||||||
self.audio_data.get_chunk_for_processing()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set consolidated buffer (for swapping in consolidated audio data)
|
|
||||||
pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> {
|
|
||||||
self.audio_data.set_consolidated_buffer(buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Public accessors and commands for MIDI handling
|
|
||||||
pub fn current_state(&self) -> &TrackState {
|
|
||||||
&self.current_state
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn next_state(&self) -> &TrackState {
|
|
||||||
&self.next_state
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.audio_data.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn volume(&self) -> f32 {
|
|
||||||
self.volume
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_volume(&mut self, volume: f32) {
|
|
||||||
self.volume = volume.clamp(0.0, 1.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle record/play toggle command (sets next_state)
|
|
||||||
pub fn queue_record_toggle(&mut self) {
|
|
||||||
match self.current_state {
|
|
||||||
TrackState::Empty | TrackState::Idle => {
|
|
||||||
self.next_state = TrackState::Recording;
|
|
||||||
}
|
|
||||||
TrackState::Recording => {
|
|
||||||
self.next_state = TrackState::Playing;
|
|
||||||
}
|
|
||||||
TrackState::RecordingAutoStop { .. } => {
|
|
||||||
// Auto-stop recording - can't manually stop, wait for auto-transition
|
|
||||||
self.next_state = self.current_state.clone();
|
|
||||||
}
|
|
||||||
TrackState::Playing => {
|
|
||||||
self.next_state = TrackState::Idle;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
Ok(())
|
||||||
|
|
||||||
/// Handle auto-stop record command (sets next_state)
|
|
||||||
pub fn queue_record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) {
|
|
||||||
match self.current_state {
|
|
||||||
TrackState::Empty | TrackState::Idle => {
|
|
||||||
self.next_state = TrackState::RecordingAutoStop {
|
|
||||||
target_samples,
|
|
||||||
sync_offset,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
TrackState::Recording => {
|
|
||||||
// Switch from manual to auto-stop recording
|
|
||||||
self.next_state = TrackState::RecordingAutoStop {
|
|
||||||
target_samples,
|
|
||||||
sync_offset,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
TrackState::RecordingAutoStop { .. } => {
|
|
||||||
// Already auto-recording - update parameters
|
|
||||||
self.next_state = TrackState::RecordingAutoStop {
|
|
||||||
target_samples,
|
|
||||||
sync_offset,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
TrackState::Playing => {
|
|
||||||
// Stop playing and start auto-recording
|
|
||||||
self.next_state = TrackState::RecordingAutoStop {
|
|
||||||
target_samples,
|
|
||||||
sync_offset,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle play/mute toggle command (sets next_state)
|
|
||||||
pub fn queue_play_toggle(&mut self) {
|
|
||||||
match self.current_state {
|
|
||||||
TrackState::Empty => {
|
|
||||||
// Can't play empty track
|
|
||||||
self.next_state = TrackState::Empty;
|
|
||||||
}
|
|
||||||
TrackState::Idle => {
|
|
||||||
if !self.audio_data.is_empty() {
|
|
||||||
self.next_state = TrackState::Playing;
|
|
||||||
} else {
|
|
||||||
self.next_state = TrackState::Idle;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
TrackState::Recording | TrackState::RecordingAutoStop { .. } => {
|
|
||||||
// Don't change state while recording
|
|
||||||
self.next_state = self.current_state.clone();
|
|
||||||
}
|
|
||||||
TrackState::Playing => {
|
|
||||||
self.next_state = TrackState::Idle;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle clear command (sets next_state)
|
|
||||||
pub fn queue_clear(&mut self) {
|
|
||||||
self.next_state = TrackState::Empty;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user