Improved metronome for better xrun handling

This commit is contained in:
Geens 2025-06-10 19:28:11 +02:00
parent deb6bd8f4c
commit 7a78c6b9e6
11 changed files with 680 additions and 337 deletions

View File

@ -80,23 +80,20 @@ impl AudioData {
output_slice.fill(0.0); output_slice.fill(0.0);
Ok(()) Ok(())
} }
Self::Unconsolidated { chunks, sync_offset, length } => { Self::Unconsolidated {
self.copy_unconsolidated_samples( chunks,
sync_offset,
length,
} => self.copy_unconsolidated_samples(
chunks, chunks,
*sync_offset, *sync_offset,
*length, *length,
output_slice, output_slice,
logical_position, logical_position,
volume volume,
) ),
}
Self::Consolidated { buffer } => { Self::Consolidated { buffer } => {
self.copy_consolidated_samples( self.copy_consolidated_samples(buffer, output_slice, logical_position, volume)
buffer,
output_slice,
logical_position,
volume
)
} }
} }
} }
@ -104,9 +101,11 @@ impl AudioData {
/// Get underlying chunk for post-record processing /// Get underlying chunk for post-record processing
pub fn get_chunk_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> { pub fn get_chunk_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> {
match self { match self {
Self::Unconsolidated { chunks, sync_offset, .. } => { Self::Unconsolidated {
Some((chunks.clone(), *sync_offset)) chunks,
} sync_offset,
..
} => Some((chunks.clone(), *sync_offset)),
_ => None, _ => None,
} }
} }
@ -114,7 +113,9 @@ impl AudioData {
/// Replace with consolidated buffer from post-record processing /// Replace with consolidated buffer from post-record processing
pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> { pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> {
match self { match self {
Self::Unconsolidated { length: old_length, .. } => { Self::Unconsolidated {
length: old_length, ..
} => {
if buffer.len() != *old_length { if buffer.len() != *old_length {
return Err(LooperError::OutOfBounds(std::panic::Location::caller())); return Err(LooperError::OutOfBounds(std::panic::Location::caller()));
} }
@ -248,7 +249,11 @@ mod tests {
let audio_data = AudioData::new_unconsolidated(&mut factory, sync_offset).unwrap(); let audio_data = AudioData::new_unconsolidated(&mut factory, sync_offset).unwrap();
match audio_data { match audio_data {
AudioData::Unconsolidated { sync_offset: offset, length, .. } => { AudioData::Unconsolidated {
sync_offset: offset,
length,
..
} => {
assert_eq!(offset, 100); assert_eq!(offset, 100);
assert_eq!(length, 0); assert_eq!(length, 0);
} }
@ -284,7 +289,9 @@ mod tests {
let audio_data = AudioData::new_empty(); let audio_data = AudioData::new_empty();
let mut output = vec![99.0; 4]; // Fill with non-zero to verify silence let mut output = vec![99.0; 4]; // Fill with non-zero to verify silence
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap();
assert_eq!(output, vec![0.0, 0.0, 0.0, 0.0]); assert_eq!(output, vec![0.0, 0.0, 0.0, 0.0]);
} }
@ -299,7 +306,9 @@ mod tests {
audio_data.append_samples(&samples, &mut factory).unwrap(); audio_data.append_samples(&samples, &mut factory).unwrap();
let mut output = vec![0.0; 4]; let mut output = vec![0.0; 4];
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap();
assert_eq!(output, vec![1.0, 2.0, 3.0, 4.0]); assert_eq!(output, vec![1.0, 2.0, 3.0, 4.0]);
} }
@ -316,7 +325,9 @@ mod tests {
audio_data.append_samples(&samples, &mut factory).unwrap(); audio_data.append_samples(&samples, &mut factory).unwrap();
let mut output = vec![0.0; 4]; let mut output = vec![0.0; 4];
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); // Start at logical beat 1 audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap(); // Start at logical beat 1
// Should get: [C, D, A, B] = [30.0, 40.0, 10.0, 20.0] // Should get: [C, D, A, B] = [30.0, 40.0, 10.0, 20.0]
assert_eq!(output, vec![30.0, 40.0, 10.0, 20.0]); assert_eq!(output, vec![30.0, 40.0, 10.0, 20.0]);
@ -331,7 +342,9 @@ mod tests {
audio_data.append_samples(&samples, &mut factory).unwrap(); audio_data.append_samples(&samples, &mut factory).unwrap();
let mut output = vec![0.0; 4]; let mut output = vec![0.0; 4];
audio_data.copy_samples_to_output(&mut output, 0, 0.5).unwrap(); // 50% volume audio_data
.copy_samples_to_output(&mut output, 0, 0.5)
.unwrap(); // 50% volume
assert_eq!(output, vec![0.5, 1.0, 1.5, 2.0]); assert_eq!(output, vec![0.5, 1.0, 1.5, 2.0]);
} }
@ -345,7 +358,9 @@ mod tests {
audio_data.append_samples(&samples, &mut factory).unwrap(); audio_data.append_samples(&samples, &mut factory).unwrap();
let mut output = vec![0.0; 6]; // Request more samples than available let mut output = vec![0.0; 6]; // Request more samples than available
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap();
// Should loop: [1.0, 2.0, 1.0, 2.0, 1.0, 2.0] // Should loop: [1.0, 2.0, 1.0, 2.0, 1.0, 2.0]
assert_eq!(output, vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0]); assert_eq!(output, vec![1.0, 2.0, 1.0, 2.0, 1.0, 2.0]);
@ -363,7 +378,9 @@ mod tests {
audio_data.append_samples(&samples, &mut factory).unwrap(); audio_data.append_samples(&samples, &mut factory).unwrap();
let mut output = vec![0.0; 6]; // Request 2 full loops let mut output = vec![0.0; 6]; // Request 2 full loops
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap();
// Should get: [B, C, A, B, C, A] = [20.0, 30.0, 10.0, 20.0, 30.0, 10.0] // Should get: [B, C, A, B, C, A] = [20.0, 30.0, 10.0, 20.0, 30.0, 10.0]
assert_eq!(output, vec![20.0, 30.0, 10.0, 20.0, 30.0, 10.0]); assert_eq!(output, vec![20.0, 30.0, 10.0, 20.0, 30.0, 10.0]);
@ -377,7 +394,9 @@ mod tests {
let audio_data = AudioData::Consolidated { buffer }; let audio_data = AudioData::Consolidated { buffer };
let mut output = vec![0.0; 4]; let mut output = vec![0.0; 4];
audio_data.copy_samples_to_output(&mut output, 0, 1.0).unwrap(); audio_data
.copy_samples_to_output(&mut output, 0, 1.0)
.unwrap();
assert_eq!(output, vec![1.0, 2.0, 3.0, 4.0]); assert_eq!(output, vec![1.0, 2.0, 3.0, 4.0]);
} }
@ -417,7 +436,9 @@ mod tests {
// Create consolidated buffer with same length // Create consolidated buffer with same length
let consolidated_buffer = vec![10.0, 20.0, 30.0, 40.0].into_boxed_slice(); let consolidated_buffer = vec![10.0, 20.0, 30.0, 40.0].into_boxed_slice();
audio_data.set_consolidated_buffer(consolidated_buffer).unwrap(); audio_data
.set_consolidated_buffer(consolidated_buffer)
.unwrap();
// Should now be Consolidated variant // Should now be Consolidated variant
match audio_data { match audio_data {

View File

@ -1,16 +1,17 @@
mod allocator; mod allocator;
mod audio_chunk; mod audio_chunk;
mod audio_data; mod audio_data;
mod beep;
mod chunk_factory; mod chunk_factory;
mod connection_manager; mod connection_manager;
mod looper_error; mod looper_error;
mod metronome; mod metronome;
mod midi; mod midi;
mod notification_handler; mod notification_handler;
mod persistence_manager;
mod post_record_handler; mod post_record_handler;
mod process_handler; mod process_handler;
mod state; mod state;
mod persistence_manager;
mod track; mod track;
use std::sync::Arc; use std::sync::Arc;
@ -18,19 +19,19 @@ use std::sync::Arc;
use allocator::Allocator; use allocator::Allocator;
use audio_chunk::AudioChunk; use audio_chunk::AudioChunk;
use audio_data::AudioData; use audio_data::AudioData;
use beep::generate_beep;
use chunk_factory::ChunkFactory; use chunk_factory::ChunkFactory;
use connection_manager::ConnectionManager; use connection_manager::ConnectionManager;
use looper_error::LooperError; use looper_error::LooperError;
use looper_error::Result; use looper_error::Result;
use metronome::Metronome; use metronome::Metronome;
use metronome::generate_beep;
use notification_handler::JackNotification; use notification_handler::JackNotification;
use notification_handler::NotificationHandler; use notification_handler::NotificationHandler;
use post_record_handler::PostRecordHandler; use persistence_manager::PersistenceManager;
use post_record_handler::PostRecordController; use post_record_handler::PostRecordController;
use post_record_handler::PostRecordHandler;
use process_handler::ProcessHandler; use process_handler::ProcessHandler;
use state::State; use state::State;
use persistence_manager::PersistenceManager;
use track::Track; use track::Track;
use track::TrackState; use track::TrackState;
use track::TrackTiming; use track::TrackTiming;
@ -58,14 +59,15 @@ async fn main() {
let notification_handler = NotificationHandler::new(); let notification_handler = NotificationHandler::new();
let mut notification_channel = notification_handler.subscribe(); let mut notification_channel = notification_handler.subscribe();
let (mut persistence_manager, state_watch) = PersistenceManager::new(notification_handler.subscribe()); let (mut persistence_manager, state_watch) =
PersistenceManager::new(notification_handler.subscribe());
// Load state values for metronome configuration // Load state values for metronome configuration
let initial_state = state_watch.borrow().clone(); let initial_state = state_watch.borrow().clone();
// Create post-record handler and get controller for ProcessHandler // Create post-record handler and get controller for ProcessHandler
let (mut post_record_handler, post_record_controller) = PostRecordHandler::new() let (mut post_record_handler, post_record_controller) =
.expect("Could not create post-record handler"); PostRecordHandler::new().expect("Could not create post-record handler");
let process_handler = ProcessHandler::new( let process_handler = ProcessHandler::new(
ports, ports,
@ -73,7 +75,8 @@ async fn main() {
beep_samples, beep_samples,
&initial_state, &initial_state,
post_record_controller, post_record_controller,
).expect("Could not create process handler"); )
.expect("Could not create process handler");
let mut connection_manager = ConnectionManager::new( let mut connection_manager = ConnectionManager::new(
state_watch, state_watch,

25
src/beep.rs Normal file
View File

@ -0,0 +1,25 @@
use crate::*;
/// Generate a 100ms sine wave beep at 1000Hz
pub fn generate_beep<F: ChunkFactory>(
sample_rate: u32,
chunk_factory: &mut F,
) -> Result<Arc<AudioChunk>> {
const FREQUENCY_HZ: f32 = 1000.0;
const DURATION_MS: f32 = 100.0;
let sample_count = ((sample_rate as f32) * (DURATION_MS / 1000.0)) as usize;
let mut samples = Vec::with_capacity(sample_count);
for i in 0..sample_count {
let t = i as f32 / sample_rate as f32;
let sample = (std::f32::consts::TAU * FREQUENCY_HZ * t).sin();
samples.push(sample);
}
// Create AudioChunk and fill it with samples
let mut chunk = chunk_factory.create_chunk()?;
chunk.append_samples(&samples, chunk_factory)?;
Ok(chunk)
}

View File

@ -1,123 +1,368 @@
use crate::*; use crate::*;
use std::f32::consts::TAU;
pub struct Metronome { pub struct Metronome {
// Audio playback // Audio playback
beep_samples: Arc<AudioChunk>, click_samples: Arc<AudioChunk>,
click_volume: f32, // 0.0 to 1.0 click_volume: f32,
// Timing state // Timing state
samples_per_beat: u32, frames_per_beat: u32,
beat_time: u32, frames_since_last_beat: u32, // Where we are in the current beat cycle
last_frame_time: Option<u32>, // For xrun detection
}
#[derive(Debug, Clone, PartialEq)]
pub struct BufferTiming {
/// Beat index within the current buffer (if any)
pub beat_in_buffer: Option<u32>,
/// Number of frames missed due to xrun (0 if no xrun)
pub missed_frames: u32,
/// Beat index within the missed frames (if any)
pub beat_in_missed: Option<u32>,
} }
impl Metronome { impl Metronome {
pub fn new(beep_samples: Arc<AudioChunk>, state: &State) -> Self { pub fn new(click_samples: Arc<AudioChunk>, state: &State) -> Self {
Self { Self {
beep_samples, click_samples,
click_volume: state.metronome.click_volume, click_volume: state.metronome.click_volume,
samples_per_beat: state.metronome.samples_per_beat, frames_per_beat: state.metronome.frames_per_beat,
beat_time: 0, frames_since_last_beat: 0,
last_frame_time: None,
} }
} }
pub fn samples_per_beat(&self) -> u32 { pub fn frames_per_beat(&self) -> u32 {
self.samples_per_beat self.frames_per_beat
} }
/// Process audio for current buffer, writing to output slice /// Process audio for current buffer, writing to output slice
/// Returns the sample index where a beat occurred, if any pub fn process(
pub fn process(&mut self, ps: &jack::ProcessScope, ports: &mut JackPorts) -> Result<Option<u32>> { &mut self,
if 0 == self.beat_time { ps: &jack::ProcessScope,
self.beat_time = ps.last_frame_time(); ports: &mut JackPorts,
) -> Result<BufferTiming> {
let buffer_size = ps.n_frames();
let current_frame_time = ps.last_frame_time();
// Calculate timing for this buffer
let timing = self.calculate_timing(current_frame_time, buffer_size)?;
// Get output buffer for click track
let click_output = ports.click_track_out.as_mut_slice(ps);
self.render_click(buffer_size, current_frame_time, &timing, click_output);
Ok(timing)
} }
let time_since_last_beat = ps.last_frame_time() - self.beat_time; fn render_click(
&mut self,
buffer_size: u32,
current_frame_time: u32,
timing: &BufferTiming,
click_output: &mut [f32],
) {
// Calculate current position within the beep (frames since last beat started)
let frames_since_beat_start = current_frame_time - self.frames_since_last_beat;
let click_length = self.click_samples.sample_count as u32;
if time_since_last_beat >= (2 * self.samples_per_beat) && self.beat_time != 0 { if let Some(beat_offset) = timing.beat_in_buffer {
// Write silence up to beat boundary
let silence_end = beat_offset.min(buffer_size);
click_output[0..silence_end as _].fill(0.0);
// Write click samples from boundary onward
if beat_offset < buffer_size {
let remaining_buffer = buffer_size - beat_offset;
let samples_to_write = remaining_buffer.min(click_length);
// Copy click samples in bulk
let dest = &mut click_output
[beat_offset as usize..beat_offset as usize + samples_to_write as usize];
if self.click_samples.copy_samples(dest, 0).is_ok() {
// Apply volume scaling with iterators
dest.iter_mut()
.for_each(|sample| *sample *= self.click_volume);
}
// Fill remaining buffer with silence
click_output[(beat_offset as usize + samples_to_write as usize)..].fill(0.0);
}
} else if frames_since_beat_start < click_length {
// Continue playing click from previous beat if still within beep duration
let click_start_offset = frames_since_beat_start;
let remaining_click_samples = click_length - click_start_offset;
let samples_to_write = buffer_size.min(remaining_click_samples);
// Copy remaining beep samples in bulk
let dest = &mut click_output[0..samples_to_write as _];
if self
.click_samples
.copy_samples(dest, click_start_offset as _)
.is_ok()
{
// Apply volume scaling with iterators
dest.iter_mut()
.for_each(|sample| *sample *= self.click_volume);
}
// Fill remaining buffer with silence
click_output[samples_to_write as _..].fill(0.0);
} else {
click_output.fill(0.0);
}
}
pub fn calculate_timing(
&mut self,
current_frame_time: u32,
buffer_size: u32,
) -> Result<BufferTiming> {
// Detect xrun
let (missed_samples, beat_in_missed) = if let Some(last) = self.last_frame_time {
let expected = last.wrapping_add(buffer_size); // Handle u32 wrap
if current_frame_time != expected {
// We have a gap
let missed = current_frame_time.wrapping_sub(expected);
// Check if we missed multiple beats
let total_samples = self.frames_since_last_beat + missed + buffer_size;
if total_samples >= 2 * self.frames_per_beat {
return Err(LooperError::Xrun(std::panic::Location::caller())); return Err(LooperError::Xrun(std::panic::Location::caller()));
} }
let beat_sample_index = if time_since_last_beat >= self.samples_per_beat { // Check if a beat occurred in the missed section
self.beat_time += self.samples_per_beat; let beat_in_missed = if self.frames_since_last_beat + missed >= self.frames_per_beat
Some(self.beat_time - ps.last_frame_time()) {
Some(self.frames_per_beat - self.frames_since_last_beat)
} else { } else {
None None
}; };
// Get output buffer for click track (missed, beat_in_missed)
let click_output = ports.click_track_out.as_mut_slice(ps);
let buffer_size = click_output.len();
// Calculate current position within the beep (frames since last beat started)
let frames_since_beat_start = ps.last_frame_time() - self.beat_time;
let beep_length = self.beep_samples.sample_count as u32;
if let Some(beat_offset) = beat_sample_index {
let beat_offset = beat_offset as usize;
// Write silence up to beat boundary
let silence_end = beat_offset.min(buffer_size);
click_output[0..silence_end].fill(0.0);
// Write beep samples from boundary onward
if beat_offset < buffer_size {
let remaining_buffer = buffer_size - beat_offset;
let samples_to_write = remaining_buffer.min(beep_length as usize);
// Copy beep samples in bulk
let dest = &mut click_output[beat_offset..beat_offset + samples_to_write];
if self.beep_samples.copy_samples(dest, 0).is_ok() {
// Apply volume scaling with iterators
dest.iter_mut().for_each(|sample| *sample *= self.click_volume);
}
// Fill remaining buffer with silence
click_output[(beat_offset + samples_to_write)..].fill(0.0);
}
} else if frames_since_beat_start < beep_length {
// Continue playing beep from previous beat if still within beep duration
let beep_start_offset = frames_since_beat_start as usize;
let remaining_beep_samples = beep_length as usize - beep_start_offset;
let samples_to_write = buffer_size.min(remaining_beep_samples);
// Copy remaining beep samples in bulk
let dest = &mut click_output[0..samples_to_write];
if self.beep_samples.copy_samples(dest, beep_start_offset).is_ok() {
// Apply volume scaling with iterators
dest.iter_mut().for_each(|sample| *sample *= self.click_volume);
}
// Fill remaining buffer with silence
click_output[samples_to_write..].fill(0.0);
} else { } else {
click_output.fill(0.0); (0, None)
} }
} else {
// First call
(0, None)
};
Ok(beat_sample_index) // Check for beat in current buffer
// We need to account for any missed samples here too
let start_position = (self.frames_since_last_beat + missed_samples) % self.frames_per_beat;
let beat_in_buffer = if start_position + buffer_size >= self.frames_per_beat {
Some(self.frames_per_beat - start_position)
} else {
None
};
// Update state - advance by total samples (missed + buffer)
self.frames_since_last_beat =
(self.frames_since_last_beat + missed_samples + buffer_size) % self.frames_per_beat;
self.last_frame_time = Some(current_frame_time);
Ok(BufferTiming {
beat_in_buffer,
missed_frames: missed_samples,
beat_in_missed,
})
} }
} }
/// Generate a 100ms sine wave beep at 1000Hz #[cfg(test)]
pub fn generate_beep<F: ChunkFactory>( mod tests {
sample_rate: u32, use super::*;
chunk_factory: &mut F,
) -> Result<Arc<AudioChunk>> {
const FREQUENCY_HZ: f32 = 1000.0;
const DURATION_MS: f32 = 100.0;
let sample_count = ((sample_rate as f32) * (DURATION_MS / 1000.0)) as usize; fn create_test_metronome(samples_per_beat: u32) -> Metronome {
let mut samples = Vec::with_capacity(sample_count); let beep_samples = Arc::new(AudioChunk {
samples: vec![1.0; 100].into_boxed_slice(),
sample_count: 100,
next: None,
});
for i in 0..sample_count { Metronome {
let t = i as f32 / sample_rate as f32; click_samples: beep_samples,
let sample = (TAU * FREQUENCY_HZ * t).sin(); click_volume: 1.0,
samples.push(sample); frames_per_beat: samples_per_beat,
frames_since_last_beat: 0,
last_frame_time: None,
}
} }
// Create AudioChunk and fill it with samples #[test]
let mut chunk = chunk_factory.create_chunk()?; fn test_first_call_initialization() {
chunk.append_samples(&samples, chunk_factory)?; let mut metronome = create_test_metronome(1000);
Ok(chunk) let result = metronome.calculate_timing(5000, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 128);
assert_eq!(metronome.last_frame_time, Some(5000));
assert_eq!(result.missed_frames, 0);
assert_eq!(result.beat_in_missed, None);
assert_eq!(result.beat_in_buffer, None);
}
#[test]
fn test_normal_buffer_no_beat() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 1000
metronome.calculate_timing(1000, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 128);
// Next buffer at 1128 - no beat expected
let result = metronome.calculate_timing(1128, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 256);
assert_eq!(result.missed_frames, 0);
assert_eq!(result.beat_in_missed, None);
assert_eq!(result.beat_in_buffer, None);
}
#[test]
fn test_beat_in_buffer() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 0
metronome.calculate_timing(0, 512).unwrap();
assert_eq!(metronome.frames_since_last_beat, 512);
// Next buffer: 512 -> 1024, beat at 1000 (offset 488)
let result = metronome.calculate_timing(512, 512).unwrap();
assert_eq!(result.missed_frames, 0);
assert_eq!(result.beat_in_missed, None);
assert_eq!(result.beat_in_buffer, Some(488)); // 1000 - 512
assert_eq!(metronome.frames_since_last_beat, 24); // (512 + 512) % 1000
}
#[test]
fn test_xrun_no_missed_beat() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 1000
metronome.calculate_timing(1000, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 128);
// Normal buffer at 1128
metronome.calculate_timing(1128, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 256);
// Xrun: expected 1256 but got 1428 (172 samples missed)
let result = metronome.calculate_timing(1428, 128).unwrap();
assert_eq!(result.missed_frames, 172);
assert_eq!(result.beat_in_missed, None);
assert_eq!(result.beat_in_buffer, None);
assert_eq!(metronome.frames_since_last_beat, 556); // 256 + 172 + 128
}
#[test]
fn test_xrun_with_missed_beat() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 1000
metronome.calculate_timing(1000, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 128);
// Normal buffer at 1128
metronome.calculate_timing(1128, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 256);
// Xrun: expected 1256 but got 2228 (972 samples missed)
// We're at position 256, miss 972 samples = 1228 total
// Beat occurs at position 1000, so beat_in_missed = 1000 - 256 = 744
let result = metronome.calculate_timing(2228, 128).unwrap();
assert_eq!(result.missed_frames, 972);
assert_eq!(result.beat_in_missed, Some(744)); // 1000 - 256
assert_eq!(result.beat_in_buffer, None);
assert_eq!(metronome.frames_since_last_beat, 356); // (256 + 972 + 128) % 1000
}
#[test]
fn test_xrun_with_missed_beat_and_upcoming_beat() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 1000
metronome.calculate_timing(1000, 128).unwrap();
// Normal buffer at 1128
metronome.calculate_timing(1128, 128).unwrap();
assert_eq!(metronome.frames_since_last_beat, 256);
// Xrun: expected 1256 but got 2078 (822 samples missed)
// We're at position 256, miss 822 samples = 1078 total
// Beat occurs at position 1000, so beat_in_missed = 1000 - 256 = 744
// After missed: position = 78, buffer = 128, no beat in buffer
let result = metronome.calculate_timing(2078, 128).unwrap();
assert_eq!(result.missed_frames, 822);
assert_eq!(result.beat_in_missed, Some(744)); // 1000 - 256
assert_eq!(result.beat_in_buffer, None); // 78 + 128 < 1000
assert_eq!(metronome.frames_since_last_beat, 206); // (256 + 822 + 128) % 1000
}
#[test]
fn test_xrun_multiple_beats_error() {
let mut metronome = create_test_metronome(1000);
// Initialize at time 1000
metronome.calculate_timing(1000, 128).unwrap();
// Normal buffer at 1128
metronome.calculate_timing(1128, 128).unwrap();
// Xrun: expected 1256 but got 3328 (2072 samples missed)
// Total advancement would be 256 + 2072 + 128 = 2456 samples
// That's more than 2 beats (2000 samples), so error
let result = metronome.calculate_timing(3328, 128);
assert!(result.is_err());
}
#[test]
fn test_consecutive_buffers_with_beat() {
let mut metronome = create_test_metronome(1000);
// First buffer - initialization
let result1 = metronome.calculate_timing(0, 512).unwrap();
assert_eq!(result1.beat_in_buffer, None);
assert_eq!(metronome.frames_since_last_beat, 512);
// Second buffer - beat should occur at position 1000
let result2 = metronome.calculate_timing(512, 512).unwrap();
assert_eq!(result2.beat_in_buffer, Some(488)); // 1000 - 512
assert_eq!(metronome.frames_since_last_beat, 24); // (512 + 512) % 1000
// Third buffer - no beat
let result3 = metronome.calculate_timing(1024, 512).unwrap();
assert_eq!(result3.beat_in_buffer, None);
assert_eq!(metronome.frames_since_last_beat, 536);
// Fourth buffer - next beat at position 1000 again
let result4 = metronome.calculate_timing(1536, 512).unwrap();
assert_eq!(result4.beat_in_buffer, Some(464)); // 1000 - 536
assert_eq!(metronome.frames_since_last_beat, 48); // (536 + 512) % 1000
}
#[test]
fn test_u32_wrapping() {
let mut metronome = create_test_metronome(1000);
// Initialize near u32::MAX
let start_time = u32::MAX - 100;
metronome.calculate_timing(start_time, 128).unwrap();
// Next buffer wraps around
let next_time = start_time.wrapping_add(128);
let result = metronome.calculate_timing(next_time, 128).unwrap();
assert_eq!(result.missed_frames, 0);
assert_eq!(metronome.frames_since_last_beat, 256);
}
} }

View File

@ -63,7 +63,12 @@ impl jack::NotificationHandler for NotificationHandler {
.expect("Could not send port connection notification"); .expect("Could not send port connection notification");
} }
fn port_registration(&mut self, _client: &jack::Client, _port_id: jack::PortId, register: bool) { fn port_registration(
&mut self,
_client: &jack::Client,
_port_id: jack::PortId,
register: bool,
) {
if register { if register {
let notification = JackNotification::PortRegistered {}; let notification = JackNotification::PortRegistered {};
self.channel self.channel

View File

@ -24,8 +24,17 @@ pub struct PostRecordController {
impl PostRecordController { impl PostRecordController {
/// Send a post-record processing request (RT-safe) /// Send a post-record processing request (RT-safe)
pub fn send_request(&self, chunk_chain: Arc<AudioChunk>, sync_offset: u32, sample_rate: u32) -> Result<()> { pub fn send_request(
let request = PostRecordRequest { chunk_chain, sync_offset, sample_rate }; &self,
chunk_chain: Arc<AudioChunk>,
sync_offset: u32,
sample_rate: u32,
) -> Result<()> {
let request = PostRecordRequest {
chunk_chain,
sync_offset,
sample_rate,
};
match self.request_sender.try_send(request) { match self.request_sender.try_send(request) {
Ok(true) => Ok(()), // Successfully sent Ok(true) => Ok(()), // Successfully sent
@ -86,23 +95,26 @@ impl PostRecordHandler {
} }
/// Process a single post-record request /// Process a single post-record request
async fn process_request( async fn process_request(&self, request: PostRecordRequest) -> Result<()> {
&self, log::debug!(
request: PostRecordRequest, "Processing post-record request for {} samples with sync_offset {}",
) -> Result<()> { request.chunk_chain.len(),
log::debug!("Processing post-record request for {} samples with sync_offset {}", request.sync_offset
request.chunk_chain.len(), request.sync_offset); );
// Step 1: Consolidate and reorder chunk chain based on sync offset // Step 1: Consolidate and reorder chunk chain based on sync offset
let consolidated_buffer = self.consolidate_with_sync_offset( let consolidated_buffer =
&request.chunk_chain, self.consolidate_with_sync_offset(&request.chunk_chain, request.sync_offset as usize)?;
request.sync_offset as usize
)?;
log::debug!("Consolidated and reordered {} samples", consolidated_buffer.len()); log::debug!(
"Consolidated and reordered {} samples",
consolidated_buffer.len()
);
// Step 2: Send consolidated buffer back to RT thread immediately // Step 2: Send consolidated buffer back to RT thread immediately
let response = PostRecordResponse { consolidated_buffer }; let response = PostRecordResponse {
consolidated_buffer,
};
if let Err(_) = self.response_sender.send(response).await { if let Err(_) = self.response_sender.send(response).await {
log::warn!("Failed to send consolidated buffer to RT thread"); log::warn!("Failed to send consolidated buffer to RT thread");
@ -113,7 +125,10 @@ impl PostRecordHandler {
let consolidated_chunk = AudioChunk::consolidate(&request.chunk_chain); let consolidated_chunk = AudioChunk::consolidate(&request.chunk_chain);
let file_path = self.get_file_path(); let file_path = self.get_file_path();
match self.save_wav_file(&consolidated_chunk, request.sample_rate, &file_path).await { match self
.save_wav_file(&consolidated_chunk, request.sample_rate, &file_path)
.await
{
Ok(_) => log::info!("Saved recording to {:?}", file_path), Ok(_) => log::info!("Saved recording to {:?}", file_path),
Err(e) => log::error!("Failed to save recording to {:?}: {}", file_path, e), Err(e) => log::error!("Failed to save recording to {:?}: {}", file_path, e),
} }
@ -122,7 +137,11 @@ impl PostRecordHandler {
} }
/// Consolidate chunk chain and reorder samples based on sync offset /// Consolidate chunk chain and reorder samples based on sync offset
fn consolidate_with_sync_offset(&self, chunk_chain: &Arc<AudioChunk>, sync_offset: usize) -> Result<Box<[f32]>> { fn consolidate_with_sync_offset(
&self,
chunk_chain: &Arc<AudioChunk>,
sync_offset: usize,
) -> Result<Box<[f32]>> {
let total_length = chunk_chain.len(); let total_length = chunk_chain.len();
if total_length == 0 { if total_length == 0 {
@ -175,11 +194,13 @@ impl PostRecordHandler {
// Write all samples from the chunk // Write all samples from the chunk
for sample in chunk_samples { for sample in chunk_samples {
writer.write_sample(sample) writer
.write_sample(sample)
.map_err(|_| LooperError::StateSave(std::panic::Location::caller()))?; .map_err(|_| LooperError::StateSave(std::panic::Location::caller()))?;
} }
writer.finalize() writer
.finalize()
.map_err(|_| LooperError::StateSave(std::panic::Location::caller()))?; .map_err(|_| LooperError::StateSave(std::panic::Location::caller()))?;
Ok::<(), LooperError>(()) Ok::<(), LooperError>(())
@ -190,8 +211,7 @@ impl PostRecordHandler {
/// Create save directory and return path /// Create save directory and return path
fn create_directory() -> Result<PathBuf> { fn create_directory() -> Result<PathBuf> {
let mut path = dirs::home_dir() let mut path = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
.unwrap_or_else(|| PathBuf::from("."));
path.push(".fcb_looper"); path.push(".fcb_looper");
std::fs::create_dir_all(&path) std::fs::create_dir_all(&path)

View File

@ -45,11 +45,12 @@ impl<F: ChunkFactory> ProcessHandler<F> {
/// Handle auto-stop record button (Button 3) /// Handle auto-stop record button (Button 3)
pub fn record_auto_stop(&mut self) -> Result<()> { pub fn record_auto_stop(&mut self) -> Result<()> {
let samples_per_beat = self.metronome.samples_per_beat(); let samples_per_beat = self.metronome.frames_per_beat();
let sync_offset = SYNC_OFFSET_BEATS * samples_per_beat; let sync_offset = SYNC_OFFSET_BEATS * samples_per_beat;
let target_samples = AUTO_STOP_BEATS * samples_per_beat; let target_samples = AUTO_STOP_BEATS * samples_per_beat;
self.track.queue_record_auto_stop(target_samples as usize, sync_offset as usize); self.track
.queue_record_auto_stop(target_samples as usize, sync_offset as usize);
Ok(()) Ok(())
} }
@ -75,7 +76,8 @@ impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
log::error!("Error processing metronome: {}", e); log::error!("Error processing metronome: {}", e);
return jack::Control::Quit; return jack::Control::Quit;
} }
}; }
.beat_in_buffer;
let buffer_size = client.buffer_size() as usize; let buffer_size = client.buffer_size() as usize;
let state_before = self.track.current_state().clone(); let state_before = self.track.current_state().clone();
@ -84,12 +86,11 @@ impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
let timing = self.calculate_track_timing(beat_sample_index, &state_before); let timing = self.calculate_track_timing(beat_sample_index, &state_before);
// Process track audio with calculated timing // Process track audio with calculated timing
let should_consolidate = match self.track.process( let should_consolidate =
ps, match self
&mut self.ports, .track
timing, .process(ps, &mut self.ports, timing, &mut self.chunk_factory)
&mut self.chunk_factory {
) {
Ok(consolidate) => consolidate, Ok(consolidate) => consolidate,
Err(e) => { Err(e) => {
log::error!("Error processing track: {}", e); log::error!("Error processing track: {}", e);
@ -98,7 +99,9 @@ impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
}; };
// Handle post-record processing // Handle post-record processing
if let Err(e) = self.handle_post_record_processing(should_consolidate, client.sample_rate() as u32) { if let Err(e) =
self.handle_post_record_processing(should_consolidate, client.sample_rate() as u32)
{
log::error!("Error handling post-record processing: {}", e); log::error!("Error handling post-record processing: {}", e);
return jack::Control::Quit; return jack::Control::Quit;
} }
@ -112,17 +115,26 @@ impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
impl<F: ChunkFactory> ProcessHandler<F> { impl<F: ChunkFactory> ProcessHandler<F> {
/// Handle post-record processing: send requests and swap buffers /// Handle post-record processing: send requests and swap buffers
fn handle_post_record_processing(&mut self, should_consolidate: bool, sample_rate: u32) -> Result<()> { fn handle_post_record_processing(
&mut self,
should_consolidate: bool,
sample_rate: u32,
) -> Result<()> {
// Send audio data for processing if track indicates consolidation needed // Send audio data for processing if track indicates consolidation needed
if should_consolidate { if should_consolidate {
if let Some((chunk_chain, sync_offset)) = self.track.get_audio_data_for_processing() { if let Some((chunk_chain, sync_offset)) = self.track.get_audio_data_for_processing() {
self.post_record_controller.send_request(chunk_chain, sync_offset as u32, sample_rate)?; self.post_record_controller.send_request(
chunk_chain,
sync_offset as u32,
sample_rate,
)?;
} }
} }
// Check for consolidation response // Check for consolidation response
if let Some(response) = self.post_record_controller.try_recv_response() { if let Some(response) = self.post_record_controller.try_recv_response() {
self.track.set_consolidated_buffer(response.consolidated_buffer)?; self.track
.set_consolidated_buffer(response.consolidated_buffer)?;
} }
Ok(()) Ok(())
@ -203,7 +215,9 @@ impl<F: ChunkFactory> ProcessHandler<F> {
} }
// Check if state transition at beat affects position // Check if state transition at beat affects position
if state_after == TrackState::Playing && !matches!(state_before, TrackState::Playing) { if state_after == TrackState::Playing
&& !matches!(state_before, TrackState::Playing)
{
// Started playing at beat - reset position to post-beat calculation // Started playing at beat - reset position to post-beat calculation
self.playback_position = self.calculate_post_beat_position(state_before); self.playback_position = self.calculate_post_beat_position(state_before);
} }

View File

@ -16,7 +16,7 @@ pub struct ConnectionState {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MetronomeState { pub struct MetronomeState {
pub samples_per_beat: u32, pub frames_per_beat: u32,
pub click_volume: f32, // 0.0 to 1.0 pub click_volume: f32, // 0.0 to 1.0
} }
@ -30,12 +30,9 @@ impl Default for State {
click_track_out: Vec::new(), click_track_out: Vec::new(),
}, },
metronome: MetronomeState { metronome: MetronomeState {
samples_per_beat: 96000, // 120 BPM at 192kHz sample rate frames_per_beat: 96000, // 120 BPM at 192kHz sample rate
click_volume: 0.5, // Default 50% volume click_volume: 0.5, // Default 50% volume
}, },
} }
} }
} }

View File

@ -15,13 +15,13 @@ pub enum TrackState {
#[derive(Debug)] #[derive(Debug)]
pub enum TrackTiming { pub enum TrackTiming {
NoBeat { NoBeat {
position: usize position: usize,
}, },
Beat { Beat {
pre_beat_position: usize, pre_beat_position: usize,
post_beat_position: usize, post_beat_position: usize,
beat_sample_index: usize, beat_sample_index: usize,
} },
} }
pub struct Track { pub struct Track {
@ -70,7 +70,7 @@ impl Track {
TrackTiming::Beat { TrackTiming::Beat {
pre_beat_position, pre_beat_position,
post_beat_position, post_beat_position,
beat_sample_index beat_sample_index,
} => { } => {
if beat_sample_index > 0 { if beat_sample_index > 0 {
// Process samples before beat with current state // Process samples before beat with current state
@ -129,7 +129,8 @@ impl Track {
TrackState::Recording => { TrackState::Recording => {
// Record input samples (manual recording) // Record input samples (manual recording)
let samples_to_record = &input_buffer[start_index..end_index]; let samples_to_record = &input_buffer[start_index..end_index];
self.audio_data.append_samples(samples_to_record, chunk_factory)?; self.audio_data
.append_samples(samples_to_record, chunk_factory)?;
// Output silence during recording // Output silence during recording
output_buffer[start_index..end_index].fill(0.0); output_buffer[start_index..end_index].fill(0.0);
@ -147,7 +148,7 @@ impl Track {
if samples_to_append > 0 { if samples_to_append > 0 {
self.audio_data.append_samples( self.audio_data.append_samples(
&samples_to_record[..samples_to_append], &samples_to_record[..samples_to_append],
chunk_factory chunk_factory,
)?; )?;
} }
@ -175,25 +176,25 @@ impl Track {
/// Apply state transition from next_state to current_state /// Apply state transition from next_state to current_state
/// Returns true if track should be consolidated and saved /// Returns true if track should be consolidated and saved
fn apply_state_transition<F: ChunkFactory>( fn apply_state_transition<F: ChunkFactory>(&mut self, chunk_factory: &mut F) -> Result<bool> {
&mut self,
chunk_factory: &mut F
) -> Result<bool> {
// Check if this is a recording → playing transition (consolidation trigger) // Check if this is a recording → playing transition (consolidation trigger)
let should_consolidate = matches!( let should_consolidate = matches!(
(&self.current_state, &self.next_state), (&self.current_state, &self.next_state),
(TrackState::Recording, TrackState::Playing) | (TrackState::Recording, TrackState::Playing)
(TrackState::RecordingAutoStop { .. }, TrackState::Playing) | (TrackState::RecordingAutoStop { .. }, TrackState::Playing)
); );
// Handle transitions that require setup // Handle transitions that require setup
match (&self.current_state, &self.next_state) { match (&self.current_state, &self.next_state) {
(current_state, TrackState::Recording) if !matches!(current_state, TrackState::Recording) => { (current_state, TrackState::Recording)
if !matches!(current_state, TrackState::Recording) =>
{
// Starting manual recording - clear previous data and create new unconsolidated data // Starting manual recording - clear previous data and create new unconsolidated data
self.audio_data = AudioData::new_unconsolidated(chunk_factory, 0)?; self.audio_data = AudioData::new_unconsolidated(chunk_factory, 0)?;
} }
(current_state, TrackState::RecordingAutoStop { sync_offset, .. }) (current_state, TrackState::RecordingAutoStop { sync_offset, .. })
if !matches!(current_state, TrackState::RecordingAutoStop { .. }) => { if !matches!(current_state, TrackState::RecordingAutoStop { .. }) =>
{
// Starting auto-stop recording - clear previous data and create new unconsolidated data with offset // Starting auto-stop recording - clear previous data and create new unconsolidated data with offset
self.audio_data = AudioData::new_unconsolidated(chunk_factory, *sync_offset)?; self.audio_data = AudioData::new_unconsolidated(chunk_factory, *sync_offset)?;
} }
@ -273,19 +274,31 @@ impl Track {
pub fn queue_record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) { pub fn queue_record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) {
match self.current_state { match self.current_state {
TrackState::Empty | TrackState::Idle => { TrackState::Empty | TrackState::Idle => {
self.next_state = TrackState::RecordingAutoStop { target_samples, sync_offset }; self.next_state = TrackState::RecordingAutoStop {
target_samples,
sync_offset,
};
} }
TrackState::Recording => { TrackState::Recording => {
// Switch from manual to auto-stop recording // Switch from manual to auto-stop recording
self.next_state = TrackState::RecordingAutoStop { target_samples, sync_offset }; self.next_state = TrackState::RecordingAutoStop {
target_samples,
sync_offset,
};
} }
TrackState::RecordingAutoStop { .. } => { TrackState::RecordingAutoStop { .. } => {
// Already auto-recording - update parameters // Already auto-recording - update parameters
self.next_state = TrackState::RecordingAutoStop { target_samples, sync_offset }; self.next_state = TrackState::RecordingAutoStop {
target_samples,
sync_offset,
};
} }
TrackState::Playing => { TrackState::Playing => {
// Stop playing and start auto-recording // Stop playing and start auto-recording
self.next_state = TrackState::RecordingAutoStop { target_samples, sync_offset }; self.next_state = TrackState::RecordingAutoStop {
target_samples,
sync_offset,
};
} }
} }
} }