Added single column
This commit is contained in:
parent
06600b8341
commit
e239610909
@ -99,14 +99,14 @@ impl AudioData {
|
||||
}
|
||||
|
||||
/// Get underlying chunk for post-record processing
|
||||
pub fn get_chunk_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> {
|
||||
pub fn get_chunk_for_processing(&self) -> Result<(Arc<AudioChunk>, usize)> {
|
||||
match self {
|
||||
Self::Unconsolidated {
|
||||
chunks,
|
||||
sync_offset,
|
||||
..
|
||||
} => Some((chunks.clone(), *sync_offset)),
|
||||
_ => None,
|
||||
} => Ok((chunks.clone(), *sync_offset)),
|
||||
_ => Err(LooperError::ChunkOwnership(std::panic::Location::caller())),
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,6 +131,24 @@ impl AudioData {
|
||||
*self = Self::Empty;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append silence frames to audio data (RT-safe, uses fixed-size buffer and loops)
|
||||
pub fn append_silence<F: ChunkFactory>(
|
||||
&mut self,
|
||||
frame_count: usize,
|
||||
chunk_factory: &mut F,
|
||||
) -> Result<()> {
|
||||
const SILENCE_BUFFER_SIZE: usize = 1024;
|
||||
let silence_buffer = [0.0; SILENCE_BUFFER_SIZE];
|
||||
|
||||
let mut remaining_frames = frame_count;
|
||||
while remaining_frames > 0 {
|
||||
let frames_this_iteration = remaining_frames.min(SILENCE_BUFFER_SIZE);
|
||||
self.append_samples(&silence_buffer[..frames_this_iteration], chunk_factory)?;
|
||||
remaining_frames -= frames_this_iteration;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AudioData {
|
||||
@ -408,7 +426,7 @@ mod tests {
|
||||
audio_data.append_samples(&samples, &mut factory).unwrap();
|
||||
|
||||
let result = audio_data.get_chunk_for_processing();
|
||||
assert!(result.is_some());
|
||||
assert!(result.is_ok());
|
||||
|
||||
let (chunk, offset) = result.unwrap();
|
||||
assert_eq!(offset, 42);
|
||||
@ -419,7 +437,7 @@ mod tests {
|
||||
fn test_get_chunk_for_processing_wrong_state() {
|
||||
let audio_data = AudioData::new_empty();
|
||||
let result = audio_data.get_chunk_for_processing();
|
||||
assert!(result.is_none());
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
144
audio_engine/src/column.rs
Normal file
144
audio_engine/src/column.rs
Normal file
@ -0,0 +1,144 @@
|
||||
use crate::*;
|
||||
|
||||
pub struct Column<const ROWS: usize> {
|
||||
frames_per_beat: usize,
|
||||
tracks: [Track; ROWS],
|
||||
playback_position: usize,
|
||||
}
|
||||
|
||||
impl<const ROWS: usize> Column<ROWS> {
|
||||
pub fn new(frames_per_beat: usize) -> Self {
|
||||
Self {
|
||||
frames_per_beat,
|
||||
tracks: core::array::from_fn(|_| Track::new()),
|
||||
playback_position: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
for track in &self.tracks {
|
||||
if track.is_recording() {
|
||||
continue;
|
||||
}
|
||||
let len = track.len();
|
||||
if len > 0 {
|
||||
return len;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
pub fn idle(&self) -> bool {
|
||||
for track in &self.tracks {
|
||||
if !track.is_idle() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn handle_record_button(&mut self, row: usize) -> Result<()> {
|
||||
let len = self.len();
|
||||
let track = &mut self.tracks[row];
|
||||
if track.is_recording() {
|
||||
if len > 0 {
|
||||
track.clear();
|
||||
} else {
|
||||
track.play();
|
||||
}
|
||||
} else {
|
||||
if len > 0 {
|
||||
let sync_offset = len - self.playback_position;
|
||||
track.record_auto_stop(len, sync_offset);
|
||||
} else {
|
||||
track.record();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_play_button(&mut self, row: usize) -> Result<()> {
|
||||
let track = &mut self.tracks[row];
|
||||
if track.len() > 0 && track.is_idle() {
|
||||
track.play();
|
||||
} else if ! track.is_idle() {
|
||||
track.stop();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_clear_button(&mut self, row: usize) -> Result<()> {
|
||||
let track = &mut self.tracks[row];
|
||||
track.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_consolidated_buffer(&mut self, row: usize, buffer: Box<[f32]>) -> Result<()> {
|
||||
self.tracks[row].set_consolidated_buffer(buffer)
|
||||
}
|
||||
|
||||
pub fn handle_xrun<H>(
|
||||
&mut self,
|
||||
timing: &BufferTiming,
|
||||
chunk_factory: &mut impl ChunkFactory,
|
||||
post_record_handler: H,
|
||||
) -> Result<()>
|
||||
where
|
||||
H: Fn(usize, Arc<AudioChunk>, usize) -> Result<()>,
|
||||
{
|
||||
for (row, track) in self.tracks.iter_mut().enumerate() {
|
||||
track.handle_xrun(
|
||||
timing.beat_in_missed,
|
||||
timing.missed_frames,
|
||||
chunk_factory,
|
||||
|chunk, sync_offset| post_record_handler(row, chunk, sync_offset),
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn process<H>(
|
||||
&mut self,
|
||||
timing: &BufferTiming,
|
||||
input_buffer: &[f32],
|
||||
output_buffer: &mut [f32],
|
||||
scratch_pad: &mut [f32],
|
||||
chunk_factory: &mut impl ChunkFactory,
|
||||
post_record_handler: H,
|
||||
) -> Result<()>
|
||||
where
|
||||
H: Fn(usize, Arc<AudioChunk>, usize) -> Result<()>,
|
||||
{
|
||||
let len = self.len();
|
||||
if self.idle() {
|
||||
if let Some(beat_index) = timing.beat_in_buffer {
|
||||
let idle_time = input_buffer.len() - beat_index as usize;
|
||||
if len == 0 {
|
||||
self.playback_position = self.frames_per_beat - idle_time;
|
||||
} else {
|
||||
self.playback_position = len - idle_time;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (row, track) in self.tracks.iter_mut().enumerate() {
|
||||
track.process(
|
||||
self.playback_position,
|
||||
timing.beat_in_buffer,
|
||||
input_buffer,
|
||||
scratch_pad,
|
||||
chunk_factory,
|
||||
|chunk, sync_offset| post_record_handler(row, chunk, sync_offset),
|
||||
)?;
|
||||
for (output_val, scratch_pad_val) in output_buffer.iter_mut().zip(scratch_pad.iter()) {
|
||||
*output_val += *scratch_pad_val;
|
||||
}
|
||||
}
|
||||
let len = self.len();
|
||||
if len > 0 {
|
||||
self.playback_position = (self.playback_position + input_buffer.len()) % self.len();
|
||||
} else {
|
||||
self.playback_position = (self.playback_position + input_buffer.len()) % self.frames_per_beat;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -3,6 +3,7 @@ mod audio_chunk;
|
||||
mod audio_data;
|
||||
mod beep;
|
||||
mod chunk_factory;
|
||||
mod column;
|
||||
mod connection_manager;
|
||||
mod looper_error;
|
||||
mod metronome;
|
||||
@ -21,9 +22,11 @@ use audio_chunk::AudioChunk;
|
||||
use audio_data::AudioData;
|
||||
use beep::generate_beep;
|
||||
use chunk_factory::ChunkFactory;
|
||||
use column::Column;
|
||||
use connection_manager::ConnectionManager;
|
||||
use looper_error::LooperError;
|
||||
use looper_error::Result;
|
||||
use metronome::BufferTiming;
|
||||
use metronome::Metronome;
|
||||
use notification_handler::JackNotification;
|
||||
use notification_handler::NotificationHandler;
|
||||
@ -33,8 +36,6 @@ use post_record_handler::PostRecordHandler;
|
||||
use process_handler::ProcessHandler;
|
||||
use state::State;
|
||||
use track::Track;
|
||||
use track::TrackState;
|
||||
use track::TrackTiming;
|
||||
|
||||
pub struct JackPorts {
|
||||
pub audio_in: jack::Port<jack::AudioIn>,
|
||||
@ -69,7 +70,8 @@ async fn main() {
|
||||
let (mut post_record_handler, post_record_controller) =
|
||||
PostRecordHandler::new().expect("Could not create post-record handler");
|
||||
|
||||
let process_handler = ProcessHandler::new(
|
||||
let process_handler = ProcessHandler::<_, 5>::new(
|
||||
&jack_client,
|
||||
ports,
|
||||
allocator,
|
||||
beep_samples,
|
||||
|
||||
@ -6,8 +6,8 @@ pub struct Metronome {
|
||||
click_volume: f32,
|
||||
|
||||
// Timing state
|
||||
frames_per_beat: u32,
|
||||
frames_since_last_beat: u32, // Where we are in the current beat cycle
|
||||
frames_per_beat: usize,
|
||||
frames_since_last_beat: usize, // Where we are in the current beat cycle
|
||||
last_frame_time: Option<u32>, // For xrun detection
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ pub struct BufferTiming {
|
||||
pub beat_in_buffer: Option<u32>,
|
||||
|
||||
/// Number of frames missed due to xrun (0 if no xrun)
|
||||
pub missed_frames: u32,
|
||||
pub missed_frames: usize,
|
||||
|
||||
/// Beat index within the missed frames (if any)
|
||||
pub beat_in_missed: Option<u32>,
|
||||
@ -34,10 +34,6 @@ impl Metronome {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn frames_per_beat(&self) -> u32 {
|
||||
self.frames_per_beat
|
||||
}
|
||||
|
||||
/// Process audio for current buffer, writing to output slice
|
||||
pub fn process(
|
||||
&mut self,
|
||||
@ -53,68 +49,82 @@ impl Metronome {
|
||||
// Get output buffer for click track
|
||||
let click_output = ports.click_track_out.as_mut_slice(ps);
|
||||
|
||||
self.render_click(buffer_size, current_frame_time, &timing, click_output);
|
||||
self.render_click(buffer_size, &timing, click_output);
|
||||
|
||||
Ok(timing)
|
||||
}
|
||||
|
||||
fn render_click(
|
||||
&mut self,
|
||||
buffer_size: u32,
|
||||
current_frame_time: u32,
|
||||
timing: &BufferTiming,
|
||||
click_output: &mut [f32],
|
||||
) {
|
||||
// Calculate current position within the beep (frames since last beat started)
|
||||
let frames_since_beat_start = current_frame_time - self.frames_since_last_beat;
|
||||
let click_length = self.click_samples.sample_count as u32;
|
||||
fn render_click(&mut self, buffer_size: u32, timing: &BufferTiming, click_output: &mut [f32]) {
|
||||
let click_length = self.click_samples.sample_count;
|
||||
|
||||
// Calculate our position at the START of this buffer (before calculate_timing updated it)
|
||||
// We need to go back by: buffer_size + any missed samples
|
||||
let total_advancement = buffer_size as usize + timing.missed_frames;
|
||||
let start_position =
|
||||
(self.frames_since_last_beat + self.frames_per_beat - total_advancement)
|
||||
% self.frames_per_beat;
|
||||
|
||||
if let Some(beat_offset) = timing.beat_in_buffer {
|
||||
// Write silence up to beat boundary
|
||||
let silence_end = beat_offset.min(buffer_size);
|
||||
click_output[0..silence_end as _].fill(0.0);
|
||||
let beat_offset = beat_offset as usize;
|
||||
|
||||
// Write click samples from boundary onward
|
||||
if beat_offset < buffer_size {
|
||||
let remaining_buffer = buffer_size - beat_offset;
|
||||
let samples_to_write = remaining_buffer.min(click_length);
|
||||
// Check if we're still playing a click from before this beat
|
||||
if start_position < click_length {
|
||||
// Continue click until beat offset
|
||||
let click_samples_remaining = click_length as usize - start_position as usize;
|
||||
let samples_to_write = beat_offset.min(click_samples_remaining);
|
||||
|
||||
// Copy click samples in bulk
|
||||
let dest = &mut click_output
|
||||
[beat_offset as usize..beat_offset as usize + samples_to_write as usize];
|
||||
if self.click_samples.copy_samples(dest, 0).is_ok() {
|
||||
// Apply volume scaling with iterators
|
||||
dest.iter_mut()
|
||||
.for_each(|sample| *sample *= self.click_volume);
|
||||
}
|
||||
|
||||
// Fill remaining buffer with silence
|
||||
click_output[(beat_offset as usize + samples_to_write as usize)..].fill(0.0);
|
||||
}
|
||||
} else if frames_since_beat_start < click_length {
|
||||
// Continue playing click from previous beat if still within beep duration
|
||||
let click_start_offset = frames_since_beat_start;
|
||||
let remaining_click_samples = click_length - click_start_offset;
|
||||
let samples_to_write = buffer_size.min(remaining_click_samples);
|
||||
|
||||
// Copy remaining beep samples in bulk
|
||||
let dest = &mut click_output[0..samples_to_write as _];
|
||||
let dest = &mut click_output[0..samples_to_write];
|
||||
if self
|
||||
.click_samples
|
||||
.copy_samples(dest, click_start_offset as _)
|
||||
.copy_samples(dest, start_position as usize)
|
||||
.is_ok()
|
||||
{
|
||||
// Apply volume scaling with iterators
|
||||
dest.iter_mut()
|
||||
.for_each(|sample| *sample *= self.click_volume);
|
||||
}
|
||||
|
||||
// Fill remaining buffer with silence
|
||||
click_output[samples_to_write as _..].fill(0.0);
|
||||
// Fill gap between end of click and new beat with silence
|
||||
click_output[samples_to_write..beat_offset].fill(0.0);
|
||||
} else {
|
||||
// Write silence up to beat
|
||||
click_output[0..beat_offset].fill(0.0);
|
||||
}
|
||||
|
||||
// Start new click at beat offset
|
||||
if beat_offset < buffer_size as usize {
|
||||
let remaining_buffer = buffer_size as usize - beat_offset;
|
||||
let samples_to_write = remaining_buffer.min(click_length as usize);
|
||||
|
||||
let dest = &mut click_output[beat_offset..beat_offset + samples_to_write];
|
||||
if self.click_samples.copy_samples(dest, 0).is_ok() {
|
||||
dest.iter_mut()
|
||||
.for_each(|sample| *sample *= self.click_volume);
|
||||
}
|
||||
|
||||
// Fill any remaining buffer with silence
|
||||
click_output[(beat_offset + samples_to_write)..].fill(0.0);
|
||||
}
|
||||
} else {
|
||||
// No beat in this buffer - check if we're continuing a click
|
||||
if start_position < click_length {
|
||||
let click_offset = start_position as usize;
|
||||
let remaining_click = click_length as usize - click_offset;
|
||||
let samples_to_write = (buffer_size as usize).min(remaining_click);
|
||||
|
||||
let dest = &mut click_output[0..samples_to_write];
|
||||
if self.click_samples.copy_samples(dest, click_offset).is_ok() {
|
||||
dest.iter_mut()
|
||||
.for_each(|sample| *sample *= self.click_volume);
|
||||
}
|
||||
|
||||
// Fill remaining with silence
|
||||
click_output[samples_to_write..].fill(0.0);
|
||||
} else {
|
||||
// No click playing - all silence
|
||||
click_output.fill(0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_timing(
|
||||
&mut self,
|
||||
@ -122,22 +132,22 @@ impl Metronome {
|
||||
buffer_size: u32,
|
||||
) -> Result<BufferTiming> {
|
||||
// Detect xrun
|
||||
let (missed_samples, beat_in_missed) = if let Some(last) = self.last_frame_time {
|
||||
let (missed_frames, beat_in_missed) = if let Some(last) = self.last_frame_time {
|
||||
let expected = last.wrapping_add(buffer_size); // Handle u32 wrap
|
||||
if current_frame_time != expected {
|
||||
// We have a gap
|
||||
let missed = current_frame_time.wrapping_sub(expected);
|
||||
let missed = current_frame_time.wrapping_sub(expected) as usize;
|
||||
|
||||
// Check if we missed multiple beats
|
||||
let total_samples = self.frames_since_last_beat + missed + buffer_size;
|
||||
let total_samples = self.frames_since_last_beat + missed as usize + buffer_size as usize;
|
||||
if total_samples >= 2 * self.frames_per_beat {
|
||||
return Err(LooperError::Xrun(std::panic::Location::caller()));
|
||||
}
|
||||
|
||||
// Check if a beat occurred in the missed section
|
||||
let beat_in_missed = if self.frames_since_last_beat + missed >= self.frames_per_beat
|
||||
let beat_in_missed = if self.frames_since_last_beat + missed as usize >= self.frames_per_beat
|
||||
{
|
||||
Some(self.frames_per_beat - self.frames_since_last_beat)
|
||||
Some((self.frames_per_beat - self.frames_since_last_beat) as u32)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@ -153,21 +163,21 @@ impl Metronome {
|
||||
|
||||
// Check for beat in current buffer
|
||||
// We need to account for any missed samples here too
|
||||
let start_position = (self.frames_since_last_beat + missed_samples) % self.frames_per_beat;
|
||||
let beat_in_buffer = if start_position + buffer_size >= self.frames_per_beat {
|
||||
Some(self.frames_per_beat - start_position)
|
||||
let start_position = (self.frames_since_last_beat + missed_frames) % self.frames_per_beat;
|
||||
let beat_in_buffer = if start_position + buffer_size as usize >= self.frames_per_beat {
|
||||
Some((self.frames_per_beat - start_position) as u32)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Update state - advance by total samples (missed + buffer)
|
||||
self.frames_since_last_beat =
|
||||
(self.frames_since_last_beat + missed_samples + buffer_size) % self.frames_per_beat;
|
||||
(self.frames_since_last_beat + missed_frames + buffer_size as usize) % self.frames_per_beat;
|
||||
self.last_frame_time = Some(current_frame_time);
|
||||
|
||||
Ok(BufferTiming {
|
||||
beat_in_buffer,
|
||||
missed_frames: missed_samples,
|
||||
missed_frames: missed_frames,
|
||||
beat_in_missed,
|
||||
})
|
||||
}
|
||||
@ -177,7 +187,7 @@ impl Metronome {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_test_metronome(samples_per_beat: u32) -> Metronome {
|
||||
fn create_test_metronome(frames_per_beat: usize) -> Metronome {
|
||||
let beep_samples = Arc::new(AudioChunk {
|
||||
samples: vec![1.0; 100].into_boxed_slice(),
|
||||
sample_count: 100,
|
||||
@ -187,7 +197,7 @@ mod tests {
|
||||
Metronome {
|
||||
click_samples: beep_samples,
|
||||
click_volume: 1.0,
|
||||
frames_per_beat: samples_per_beat,
|
||||
frames_per_beat,
|
||||
frames_since_last_beat: 0,
|
||||
last_frame_time: None,
|
||||
}
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
use crate::*;
|
||||
|
||||
/// Process MIDI events
|
||||
pub fn process_events<F: ChunkFactory>(
|
||||
process_handler: &mut ProcessHandler<F>,
|
||||
pub fn process_events<F: ChunkFactory, const ROWS: usize>(
|
||||
process_handler: &mut ProcessHandler<F, ROWS>,
|
||||
ps: &jack::ProcessScope,
|
||||
) -> Result<()> {
|
||||
// First, collect all MIDI events into a fixed-size array
|
||||
@ -38,35 +38,36 @@ pub fn process_events<F: ChunkFactory>(
|
||||
if u8::from(value) > 0 {
|
||||
match u8::from(controller) {
|
||||
20 => {
|
||||
// Button 1: Record/Play toggle
|
||||
process_handler.record_toggle()?;
|
||||
process_handler.handle_button_1()?;
|
||||
}
|
||||
21 => {
|
||||
// Button 2: Play/Mute
|
||||
process_handler.play_toggle()?;
|
||||
process_handler.handle_button_2()?;
|
||||
}
|
||||
22 => {
|
||||
// Button 3: Auto-stop record
|
||||
process_handler.record_auto_stop()?;
|
||||
process_handler.handle_button_3()?;
|
||||
}
|
||||
24 => {
|
||||
// Button 5: Clear track
|
||||
process_handler.clear_track()?;
|
||||
process_handler.handle_button_5()?;
|
||||
}
|
||||
30 => {
|
||||
process_handler.handle_button_up()?;
|
||||
}
|
||||
31 => {
|
||||
process_handler.handle_button_down()?;
|
||||
}
|
||||
_ => {
|
||||
// Other CC messages - ignore for now
|
||||
// Other CC messages - ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// Ignore other MIDI messages for now
|
||||
// Other MIDI messages - ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Skip malformed MIDI messages instead of panicking
|
||||
continue;
|
||||
// Malformed MIDI messages - ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4,14 +4,16 @@ use std::path::PathBuf;
|
||||
/// Request to process a recorded chunk chain with sync offset
|
||||
#[derive(Debug)]
|
||||
pub struct PostRecordRequest {
|
||||
pub row: usize,
|
||||
pub chunk_chain: Arc<AudioChunk>,
|
||||
pub sync_offset: u32,
|
||||
pub sample_rate: u32,
|
||||
pub sync_offset: usize,
|
||||
pub sample_rate: usize,
|
||||
}
|
||||
|
||||
/// Response containing the consolidated buffer
|
||||
#[derive(Debug)]
|
||||
pub struct PostRecordResponse {
|
||||
pub row: usize,
|
||||
pub consolidated_buffer: Box<[f32]>,
|
||||
}
|
||||
|
||||
@ -26,11 +28,13 @@ impl PostRecordController {
|
||||
/// Send a post-record processing request (RT-safe)
|
||||
pub fn send_request(
|
||||
&self,
|
||||
row: usize,
|
||||
chunk_chain: Arc<AudioChunk>,
|
||||
sync_offset: u32,
|
||||
sample_rate: u32,
|
||||
sync_offset: usize,
|
||||
sample_rate: usize,
|
||||
) -> Result<()> {
|
||||
let request = PostRecordRequest {
|
||||
row,
|
||||
chunk_chain,
|
||||
sync_offset,
|
||||
sample_rate,
|
||||
@ -113,6 +117,7 @@ impl PostRecordHandler {
|
||||
|
||||
// Step 2: Send consolidated buffer back to RT thread immediately
|
||||
let response = PostRecordResponse {
|
||||
row: request.row,
|
||||
consolidated_buffer,
|
||||
};
|
||||
|
||||
@ -123,7 +128,7 @@ impl PostRecordHandler {
|
||||
// Step 3: Save WAV file in background (I/O intensive)
|
||||
// Use original chunk chain for saving (not reordered)
|
||||
let consolidated_chunk = AudioChunk::consolidate(&request.chunk_chain);
|
||||
let file_path = self.get_file_path();
|
||||
let file_path = self.get_file_path(request.row);
|
||||
|
||||
match self
|
||||
.save_wav_file(&consolidated_chunk, request.sample_rate, &file_path)
|
||||
@ -174,7 +179,7 @@ impl PostRecordHandler {
|
||||
async fn save_wav_file(
|
||||
&self,
|
||||
chunk: &AudioChunk,
|
||||
sample_rate: u32,
|
||||
sample_rate: usize,
|
||||
file_path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
// Run WAV writing in blocking task to avoid blocking async runtime
|
||||
@ -184,7 +189,7 @@ impl PostRecordHandler {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let spec = hound::WavSpec {
|
||||
channels: 1,
|
||||
sample_rate,
|
||||
sample_rate: sample_rate as _,
|
||||
bits_per_sample: 32,
|
||||
sample_format: hound::SampleFormat::Float,
|
||||
};
|
||||
@ -221,7 +226,7 @@ impl PostRecordHandler {
|
||||
}
|
||||
|
||||
/// Get file path for track recording
|
||||
fn get_file_path(&self) -> PathBuf {
|
||||
self.directory.join("track.wav")
|
||||
fn get_file_path(&self, row: usize) -> PathBuf {
|
||||
self.directory.join(format!("row_{row}.wav"))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,20 +1,18 @@
|
||||
use crate::*;
|
||||
|
||||
// Testing constants for sync offset functionality
|
||||
const SYNC_OFFSET_BEATS: u32 = 2; // Start recording at beat 3 (0-indexed)
|
||||
const AUTO_STOP_BEATS: u32 = 4; // Record for 4 beats total
|
||||
|
||||
pub struct ProcessHandler<F: ChunkFactory> {
|
||||
track: Track,
|
||||
playback_position: usize,
|
||||
pub struct ProcessHandler<F: ChunkFactory, const ROWS: usize> {
|
||||
pub ports: JackPorts,
|
||||
chunk_factory: F,
|
||||
metronome: Metronome,
|
||||
post_record_controller: PostRecordController,
|
||||
column: Column<ROWS>,
|
||||
selected_row: usize,
|
||||
scratch_pad: Box<[f32]>,
|
||||
}
|
||||
|
||||
impl<F: ChunkFactory> ProcessHandler<F> {
|
||||
impl<F: ChunkFactory, const ROWS: usize> ProcessHandler<F, ROWS> {
|
||||
pub fn new(
|
||||
client: &jack::Client,
|
||||
ports: JackPorts,
|
||||
chunk_factory: F,
|
||||
beep_samples: Arc<AudioChunk>,
|
||||
@ -22,227 +20,113 @@ impl<F: ChunkFactory> ProcessHandler<F> {
|
||||
post_record_controller: PostRecordController,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
track: Track::new(),
|
||||
playback_position: 0,
|
||||
ports,
|
||||
chunk_factory,
|
||||
metronome: Metronome::new(beep_samples, state),
|
||||
post_record_controller,
|
||||
column: Column::new(state.metronome.frames_per_beat),
|
||||
selected_row: 0,
|
||||
scratch_pad: vec![0.0; client.buffer_size() as usize].into_boxed_slice(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Handle record/play toggle button (Button 1)
|
||||
pub fn record_toggle(&mut self) -> Result<()> {
|
||||
self.track.queue_record_toggle();
|
||||
pub fn handle_button_1(&mut self) -> Result<()> {
|
||||
self.column.handle_record_button(self.selected_row)
|
||||
}
|
||||
|
||||
pub fn handle_button_2(&mut self) -> Result<()> {
|
||||
self.column.handle_play_button(self.selected_row)
|
||||
}
|
||||
|
||||
pub fn handle_button_3(&mut self) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle play/mute toggle button (Button 2)
|
||||
pub fn play_toggle(&mut self) -> Result<()> {
|
||||
self.track.queue_play_toggle();
|
||||
pub fn handle_button_5(&mut self) -> Result<()> {
|
||||
self.column.handle_clear_button(self.selected_row)
|
||||
}
|
||||
|
||||
pub fn handle_button_up(&mut self) -> Result<()> {
|
||||
if self.selected_row == 0 {
|
||||
self.selected_row = ROWS - 1;
|
||||
} else {
|
||||
self.selected_row -= 1;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle auto-stop record button (Button 3)
|
||||
pub fn record_auto_stop(&mut self) -> Result<()> {
|
||||
let samples_per_beat = self.metronome.frames_per_beat();
|
||||
let sync_offset = SYNC_OFFSET_BEATS * samples_per_beat;
|
||||
let target_samples = AUTO_STOP_BEATS * samples_per_beat;
|
||||
|
||||
self.track
|
||||
.queue_record_auto_stop(target_samples as usize, sync_offset as usize);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle clear button (Button 5)
|
||||
pub fn clear_track(&mut self) -> Result<()> {
|
||||
self.track.queue_clear();
|
||||
pub fn handle_button_down(&mut self) -> Result<()> {
|
||||
self.selected_row = (self.selected_row + 1) % ROWS;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: ChunkFactory> jack::ProcessHandler for ProcessHandler<F> {
|
||||
impl<F: ChunkFactory, const ROWS: usize> jack::ProcessHandler for ProcessHandler<F, ROWS> {
|
||||
fn process(&mut self, client: &jack::Client, ps: &jack::ProcessScope) -> jack::Control {
|
||||
// Process MIDI first - this updates next_state on the track
|
||||
if let Err(e) = midi::process_events(self, ps) {
|
||||
log::error!("Error processing MIDI events: {}", e);
|
||||
return jack::Control::Quit;
|
||||
if let Err(e) = self.process_with_error_handling(client, ps) {
|
||||
log::error!("Error processing audio: {}", e);
|
||||
jack::Control::Quit
|
||||
} else {
|
||||
jack::Control::Continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: ChunkFactory, const ROWS: usize> ProcessHandler<F, ROWS> {
|
||||
fn process_with_error_handling(
|
||||
&mut self,
|
||||
client: &jack::Client,
|
||||
ps: &jack::ProcessScope,
|
||||
) -> Result<()> {
|
||||
// Check for consolidation response
|
||||
if let Some(response) = self.post_record_controller.try_recv_response() {
|
||||
self.column
|
||||
.set_consolidated_buffer(response.row, response.consolidated_buffer)?;
|
||||
}
|
||||
|
||||
// Process metronome and get beat timing information
|
||||
let beat_sample_index = match self.metronome.process(ps, &mut self.ports) {
|
||||
Ok(beat_index) => beat_index,
|
||||
Err(e) => {
|
||||
log::error!("Error processing metronome: {}", e);
|
||||
return jack::Control::Quit;
|
||||
}
|
||||
}
|
||||
.beat_in_buffer;
|
||||
let timing = self.metronome.process(ps, &mut self.ports)?;
|
||||
|
||||
let buffer_size = client.buffer_size() as usize;
|
||||
let state_before = self.track.current_state().clone();
|
||||
|
||||
// Calculate timing information for track processing
|
||||
let timing = self.calculate_track_timing(beat_sample_index, &state_before);
|
||||
|
||||
// Process track audio with calculated timing
|
||||
let should_consolidate =
|
||||
match self
|
||||
.track
|
||||
.process(ps, &mut self.ports, timing, &mut self.chunk_factory)
|
||||
{
|
||||
Ok(consolidate) => consolidate,
|
||||
Err(e) => {
|
||||
log::error!("Error processing track: {}", e);
|
||||
return jack::Control::Quit;
|
||||
}
|
||||
};
|
||||
|
||||
// Handle post-record processing
|
||||
if let Err(e) =
|
||||
self.handle_post_record_processing(should_consolidate, client.sample_rate() as u32)
|
||||
{
|
||||
log::error!("Error handling post-record processing: {}", e);
|
||||
return jack::Control::Quit;
|
||||
}
|
||||
|
||||
// Update playback position based on what happened
|
||||
self.update_playback_position(beat_sample_index, buffer_size, &state_before);
|
||||
|
||||
jack::Control::Continue
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: ChunkFactory> ProcessHandler<F> {
|
||||
/// Handle post-record processing: send requests and swap buffers
|
||||
fn handle_post_record_processing(
|
||||
&mut self,
|
||||
should_consolidate: bool,
|
||||
sample_rate: u32,
|
||||
) -> Result<()> {
|
||||
// Send audio data for processing if track indicates consolidation needed
|
||||
if should_consolidate {
|
||||
if let Some((chunk_chain, sync_offset)) = self.track.get_audio_data_for_processing() {
|
||||
// Handle xruns
|
||||
if timing.missed_frames > 0 {
|
||||
self.column.handle_xrun(
|
||||
&timing,
|
||||
&mut self.chunk_factory,
|
||||
|row, chunk, sync_offset| {
|
||||
self.post_record_controller.send_request(
|
||||
chunk_chain,
|
||||
sync_offset as u32,
|
||||
sample_rate,
|
||||
row,
|
||||
chunk,
|
||||
sync_offset,
|
||||
client.sample_rate(),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for consolidation response
|
||||
if let Some(response) = self.post_record_controller.try_recv_response() {
|
||||
self.track
|
||||
.set_consolidated_buffer(response.consolidated_buffer)?;
|
||||
}
|
||||
// Process MIDI
|
||||
midi::process_events(self, ps)?;
|
||||
|
||||
// Process audio
|
||||
let input_buffer = self.ports.audio_in.as_slice(ps);
|
||||
let output_buffer = self.ports.audio_out.as_mut_slice(ps);
|
||||
output_buffer.fill(0.0);
|
||||
|
||||
self.column.process(
|
||||
&timing,
|
||||
input_buffer,
|
||||
output_buffer,
|
||||
&mut self.scratch_pad,
|
||||
&mut self.chunk_factory,
|
||||
|row, chunk, sync_offset| {
|
||||
self.post_record_controller.send_request(
|
||||
row,
|
||||
chunk,
|
||||
sync_offset,
|
||||
client.sample_rate(),
|
||||
)
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Calculate timing information for track processing
|
||||
fn calculate_track_timing(
|
||||
&self,
|
||||
beat_sample_index: Option<u32>,
|
||||
state_before: &TrackState,
|
||||
) -> TrackTiming {
|
||||
match beat_sample_index {
|
||||
None => {
|
||||
// No beat in this buffer
|
||||
TrackTiming::NoBeat {
|
||||
position: self.playback_position,
|
||||
}
|
||||
}
|
||||
Some(beat_index) => {
|
||||
let beat_index = beat_index as usize;
|
||||
let pre_beat_position = self.playback_position;
|
||||
let post_beat_position = self.calculate_post_beat_position(state_before);
|
||||
|
||||
TrackTiming::Beat {
|
||||
pre_beat_position,
|
||||
post_beat_position,
|
||||
beat_sample_index: beat_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the correct playback position after a beat transition
|
||||
fn calculate_post_beat_position(&self, state_before: &TrackState) -> usize {
|
||||
let state_after = self.track.next_state(); // Use next_state since transition hasn't happened yet
|
||||
|
||||
match (state_before, state_after) {
|
||||
(_, TrackState::Playing) if !matches!(state_before, TrackState::Playing) => {
|
||||
// Just started playing - start from beginning
|
||||
// Note: In future Column implementation, this will be:
|
||||
// column.get_sync_position() to sync with other playing tracks
|
||||
0
|
||||
}
|
||||
(TrackState::Playing, TrackState::Playing) => {
|
||||
// Continue playing - use current position
|
||||
self.playback_position
|
||||
}
|
||||
_ => {
|
||||
// Not playing after transition - position doesn't matter
|
||||
self.playback_position
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Update playback position after track processing
|
||||
fn update_playback_position(
|
||||
&mut self,
|
||||
beat_sample_index: Option<u32>,
|
||||
buffer_size: usize,
|
||||
state_before: &TrackState,
|
||||
) {
|
||||
let state_after = self.track.current_state().clone();
|
||||
|
||||
match beat_sample_index {
|
||||
None => {
|
||||
// No beat - simple position update
|
||||
if *state_before == TrackState::Playing {
|
||||
self.advance_playback_position(buffer_size);
|
||||
}
|
||||
}
|
||||
Some(beat_index) => {
|
||||
let beat_index = beat_index as usize;
|
||||
|
||||
// Handle position updates around beat boundary
|
||||
if beat_index > 0 && *state_before == TrackState::Playing {
|
||||
// Advance position for samples before beat
|
||||
self.advance_playback_position(beat_index);
|
||||
}
|
||||
|
||||
// Check if state transition at beat affects position
|
||||
if state_after == TrackState::Playing
|
||||
&& !matches!(state_before, TrackState::Playing)
|
||||
{
|
||||
// Started playing at beat - reset position to post-beat calculation
|
||||
self.playback_position = self.calculate_post_beat_position(state_before);
|
||||
}
|
||||
|
||||
// Advance position for samples after beat if playing
|
||||
if beat_index < buffer_size && state_after == TrackState::Playing {
|
||||
let samples_after_beat = buffer_size - beat_index;
|
||||
self.advance_playback_position(samples_after_beat);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Advance playback position with looping
|
||||
fn advance_playback_position(&mut self, samples: usize) {
|
||||
if self.track.len() == 0 {
|
||||
self.playback_position = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
self.playback_position += samples;
|
||||
|
||||
// Handle looping
|
||||
while self.playback_position >= self.track.len() {
|
||||
self.playback_position -= self.track.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,7 +16,7 @@ pub struct ConnectionState {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MetronomeState {
|
||||
pub frames_per_beat: u32,
|
||||
pub frames_per_beat: usize,
|
||||
pub click_volume: f32, // 0.0 to 1.0
|
||||
}
|
||||
|
||||
|
||||
@ -1,29 +1,5 @@
|
||||
use crate::*;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum TrackState {
|
||||
Empty, // No audio data (---)
|
||||
Idle, // Has data, not playing (READY)
|
||||
Playing, // Currently playing (PLAY)
|
||||
Recording, // Currently recording (REC) - manual stop
|
||||
RecordingAutoStop {
|
||||
target_samples: usize, // Auto-stop when this many samples recorded
|
||||
sync_offset: usize, // Offset in samples from column start
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TrackTiming {
|
||||
NoBeat {
|
||||
position: usize,
|
||||
},
|
||||
Beat {
|
||||
pre_beat_position: usize,
|
||||
post_beat_position: usize,
|
||||
beat_sample_index: usize,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct Track {
|
||||
audio_data: AudioData,
|
||||
current_state: TrackState,
|
||||
@ -31,6 +7,18 @@ pub struct Track {
|
||||
volume: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
enum TrackState {
|
||||
Empty,
|
||||
Idle,
|
||||
Playing,
|
||||
Recording,
|
||||
RecordingAutoStop {
|
||||
target_samples: usize,
|
||||
sync_offset: usize,
|
||||
},
|
||||
}
|
||||
|
||||
impl Track {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
@ -41,130 +29,201 @@ impl Track {
|
||||
}
|
||||
}
|
||||
|
||||
/// Main audio processing method called from ProcessHandler
|
||||
/// Returns true if track should be consolidated and saved
|
||||
pub fn process<F: ChunkFactory>(
|
||||
&mut self,
|
||||
ps: &jack::ProcessScope,
|
||||
ports: &mut JackPorts,
|
||||
timing: TrackTiming,
|
||||
chunk_factory: &mut F,
|
||||
) -> Result<bool> {
|
||||
let input_buffer = ports.audio_in.as_slice(ps);
|
||||
let output_buffer = ports.audio_out.as_mut_slice(ps);
|
||||
let buffer_size = output_buffer.len();
|
||||
pub fn is_recording(&self) -> bool {
|
||||
matches!(
|
||||
self.current_state,
|
||||
TrackState::Recording | TrackState::RecordingAutoStop { .. }
|
||||
)
|
||||
}
|
||||
|
||||
let should_consolidate = match timing {
|
||||
TrackTiming::NoBeat { position } => {
|
||||
pub fn is_playing(&self) -> bool {
|
||||
matches!(self.current_state, TrackState::Playing)
|
||||
}
|
||||
|
||||
pub fn is_idle(&self) -> bool {
|
||||
! self.is_recording() && ! self.is_playing()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.audio_data.len()
|
||||
}
|
||||
|
||||
pub fn volume(&self) -> f32 {
|
||||
self.volume
|
||||
}
|
||||
|
||||
pub fn set_volume(&mut self, volume: f32) {
|
||||
self.volume = volume.clamp(0.0, 1.0);
|
||||
}
|
||||
|
||||
pub fn record(&mut self) {
|
||||
self.next_state = TrackState::Recording;
|
||||
}
|
||||
|
||||
pub fn record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) {
|
||||
self.next_state = TrackState::RecordingAutoStop {
|
||||
target_samples,
|
||||
sync_offset,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn play(&mut self) {
|
||||
self.next_state = TrackState::Playing;
|
||||
}
|
||||
|
||||
pub fn stop(&mut self) {
|
||||
self.next_state = TrackState::Idle;
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.next_state = TrackState::Empty;
|
||||
}
|
||||
|
||||
pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> {
|
||||
self.audio_data.set_consolidated_buffer(buffer)
|
||||
}
|
||||
|
||||
pub fn handle_xrun<H>(
|
||||
&mut self,
|
||||
beat_in_missed: Option<u32>,
|
||||
missed_frames: usize,
|
||||
chunk_factory: &mut impl ChunkFactory,
|
||||
post_record_handler: H,
|
||||
) -> Result<()>
|
||||
where
|
||||
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||
{
|
||||
match beat_in_missed {
|
||||
None => {
|
||||
if self.is_recording() {
|
||||
self.audio_data
|
||||
.append_silence(missed_frames as _, chunk_factory)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Some(beat_offset) => {
|
||||
// Insert silence before beat with current state
|
||||
if beat_offset > 0 && self.is_recording() {
|
||||
self.audio_data
|
||||
.append_silence(beat_offset as _, chunk_factory)?;
|
||||
}
|
||||
// Apply state transition at beat boundary
|
||||
self.apply_state_transition(chunk_factory, post_record_handler)?;
|
||||
// Insert silence after beat with new state
|
||||
let frames_after_beat = missed_frames - beat_offset as usize;
|
||||
if frames_after_beat > 0 && self.is_recording() {
|
||||
self.audio_data
|
||||
.append_silence(frames_after_beat, chunk_factory)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Audio processing
|
||||
pub fn process<H>(
|
||||
&mut self,
|
||||
playback_position: usize,
|
||||
beat_in_buffer: Option<u32>,
|
||||
input_buffer: &[f32],
|
||||
output_buffer: &mut [f32],
|
||||
chunk_factory: &mut impl ChunkFactory,
|
||||
post_record_handler: H,
|
||||
) -> Result<()>
|
||||
where
|
||||
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||
{
|
||||
match beat_in_buffer {
|
||||
None => {
|
||||
// No beat in this buffer - process entire buffer with current state
|
||||
self.process_audio_range(
|
||||
input_buffer,
|
||||
output_buffer,
|
||||
0,
|
||||
buffer_size,
|
||||
position,
|
||||
playback_position,
|
||||
chunk_factory,
|
||||
)?;
|
||||
false // No state transition possible without beat
|
||||
)
|
||||
}
|
||||
TrackTiming::Beat {
|
||||
pre_beat_position,
|
||||
post_beat_position,
|
||||
beat_sample_index,
|
||||
} => {
|
||||
if beat_sample_index > 0 {
|
||||
Some(beat_index_in_buffer) => {
|
||||
// Process samples before beat with current state
|
||||
if beat_index_in_buffer > 0 {
|
||||
self.process_audio_range(
|
||||
input_buffer,
|
||||
output_buffer,
|
||||
0,
|
||||
beat_sample_index,
|
||||
pre_beat_position,
|
||||
&input_buffer[..beat_index_in_buffer as _],
|
||||
&mut output_buffer[..beat_index_in_buffer as _],
|
||||
playback_position,
|
||||
chunk_factory,
|
||||
)?;
|
||||
}
|
||||
|
||||
// Apply state transition at beat boundary and check if consolidation needed
|
||||
let should_consolidate = self.apply_state_transition(chunk_factory)?;
|
||||
// Apply state transition at beat boundary
|
||||
self.apply_state_transition(chunk_factory, post_record_handler)?;
|
||||
|
||||
if beat_sample_index < buffer_size {
|
||||
// Process samples after beat with new current state
|
||||
if (beat_index_in_buffer as usize) < output_buffer.len() {
|
||||
// Calculate position after beat for remaining samples
|
||||
let mut post_beat_position = playback_position + beat_index_in_buffer as usize;
|
||||
if self.audio_data.len() > 0 {
|
||||
post_beat_position %= self.audio_data.len();
|
||||
}
|
||||
|
||||
self.process_audio_range(
|
||||
input_buffer,
|
||||
output_buffer,
|
||||
beat_sample_index,
|
||||
buffer_size,
|
||||
&input_buffer[beat_index_in_buffer as _..],
|
||||
&mut output_buffer[beat_index_in_buffer as _..],
|
||||
post_beat_position,
|
||||
chunk_factory,
|
||||
)?;
|
||||
}
|
||||
|
||||
should_consolidate
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(should_consolidate)
|
||||
}
|
||||
|
||||
/// Process audio for a specific range within the buffer
|
||||
fn process_audio_range<F: ChunkFactory>(
|
||||
&mut self,
|
||||
input_buffer: &[f32],
|
||||
output_buffer: &mut [f32],
|
||||
start_index: usize,
|
||||
end_index: usize,
|
||||
playback_position: usize,
|
||||
chunk_factory: &mut F,
|
||||
) -> Result<()> {
|
||||
let sample_count = end_index - start_index;
|
||||
if sample_count == 0 {
|
||||
if output_buffer.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match &mut self.current_state {
|
||||
TrackState::Empty | TrackState::Idle => {
|
||||
// Output silence for this range
|
||||
output_buffer[start_index..end_index].fill(0.0);
|
||||
output_buffer.fill(0.0);
|
||||
}
|
||||
TrackState::Recording => {
|
||||
// Record input samples (manual recording)
|
||||
let samples_to_record = &input_buffer[start_index..end_index];
|
||||
self.audio_data
|
||||
.append_samples(samples_to_record, chunk_factory)?;
|
||||
|
||||
// Output silence during recording
|
||||
output_buffer[start_index..end_index].fill(0.0);
|
||||
.append_samples(input_buffer, chunk_factory)?;
|
||||
output_buffer.fill(0.0);
|
||||
}
|
||||
TrackState::RecordingAutoStop { target_samples, .. } => {
|
||||
// Record input samples with auto-stop logic
|
||||
let samples_to_record = &input_buffer[start_index..end_index];
|
||||
let current_length = self.audio_data.len();
|
||||
|
||||
if current_length < *target_samples {
|
||||
// Still recording - determine how many samples to actually record
|
||||
let samples_needed = *target_samples - current_length;
|
||||
let samples_to_append = samples_to_record.len().min(samples_needed);
|
||||
let samples_to_append = input_buffer.len().min(samples_needed);
|
||||
|
||||
if samples_to_append > 0 {
|
||||
self.audio_data.append_samples(
|
||||
&samples_to_record[..samples_to_append],
|
||||
chunk_factory,
|
||||
)?;
|
||||
self.audio_data
|
||||
.append_samples(&input_buffer[..samples_to_append], chunk_factory)?;
|
||||
}
|
||||
|
||||
// Check if we've reached target and should auto-transition
|
||||
if self.audio_data.len() >= *target_samples {
|
||||
self.next_state = TrackState::Playing;
|
||||
}
|
||||
// Assert: auto-stop target should never be exceeded mid-buffer
|
||||
assert!(
|
||||
self.audio_data.len() <= *target_samples,
|
||||
"Auto-stop recording target exceeded mid-buffer - this indicates incorrect usage. \
|
||||
Target: {}, actual: {}", *target_samples, self.audio_data.len()
|
||||
);
|
||||
}
|
||||
|
||||
// Output silence during recording
|
||||
output_buffer[start_index..end_index].fill(0.0);
|
||||
output_buffer.fill(0.0);
|
||||
}
|
||||
TrackState::Playing => {
|
||||
// Playback with looping
|
||||
self.audio_data.copy_samples_to_output(
|
||||
&mut output_buffer[start_index..end_index],
|
||||
output_buffer,
|
||||
playback_position,
|
||||
self.volume,
|
||||
)?;
|
||||
@ -176,13 +235,25 @@ impl Track {
|
||||
|
||||
/// Apply state transition from next_state to current_state
|
||||
/// Returns true if track should be consolidated and saved
|
||||
fn apply_state_transition<F: ChunkFactory>(&mut self, chunk_factory: &mut F) -> Result<bool> {
|
||||
// Check if this is a recording → playing transition (consolidation trigger)
|
||||
let should_consolidate = matches!(
|
||||
(&self.current_state, &self.next_state),
|
||||
(TrackState::Recording, TrackState::Playing)
|
||||
| (TrackState::RecordingAutoStop { .. }, TrackState::Playing)
|
||||
);
|
||||
fn apply_state_transition<H>(
|
||||
&mut self,
|
||||
chunk_factory: &mut impl ChunkFactory,
|
||||
post_record_handler: H,
|
||||
) -> Result<()>
|
||||
where
|
||||
H: Fn(Arc<AudioChunk>, usize) -> Result<()>,
|
||||
{
|
||||
// Check for auto-stop recording completion and transition to playing if no other state transition
|
||||
if self.current_state == self.next_state {
|
||||
if let TrackState::RecordingAutoStop { target_samples, .. } = &self.current_state {
|
||||
if self.audio_data.len() >= *target_samples {
|
||||
self.next_state = TrackState::Playing;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remember recording before transition
|
||||
let was_recording = self.is_recording();
|
||||
|
||||
// Handle transitions that require setup
|
||||
match (&self.current_state, &self.next_state) {
|
||||
@ -217,118 +288,11 @@ impl Track {
|
||||
// Apply the state transition
|
||||
self.current_state = self.next_state.clone();
|
||||
|
||||
Ok(should_consolidate)
|
||||
// Handle post-record processing
|
||||
if was_recording && !self.is_recording() {
|
||||
let (chunk, sync_offset) = self.audio_data.get_chunk_for_processing()?;
|
||||
post_record_handler(chunk, sync_offset)?;
|
||||
}
|
||||
|
||||
/// Get audio data for post-record processing (returns chunk and sync offset)
|
||||
pub fn get_audio_data_for_processing(&self) -> Option<(Arc<AudioChunk>, usize)> {
|
||||
self.audio_data.get_chunk_for_processing()
|
||||
}
|
||||
|
||||
/// Set consolidated buffer (for swapping in consolidated audio data)
|
||||
pub fn set_consolidated_buffer(&mut self, buffer: Box<[f32]>) -> Result<()> {
|
||||
self.audio_data.set_consolidated_buffer(buffer)
|
||||
}
|
||||
|
||||
// Public accessors and commands for MIDI handling
|
||||
pub fn current_state(&self) -> &TrackState {
|
||||
&self.current_state
|
||||
}
|
||||
|
||||
pub fn next_state(&self) -> &TrackState {
|
||||
&self.next_state
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.audio_data.len()
|
||||
}
|
||||
|
||||
pub fn volume(&self) -> f32 {
|
||||
self.volume
|
||||
}
|
||||
|
||||
pub fn set_volume(&mut self, volume: f32) {
|
||||
self.volume = volume.clamp(0.0, 1.0);
|
||||
}
|
||||
|
||||
/// Handle record/play toggle command (sets next_state)
|
||||
pub fn queue_record_toggle(&mut self) {
|
||||
match self.current_state {
|
||||
TrackState::Empty | TrackState::Idle => {
|
||||
self.next_state = TrackState::Recording;
|
||||
}
|
||||
TrackState::Recording => {
|
||||
self.next_state = TrackState::Playing;
|
||||
}
|
||||
TrackState::RecordingAutoStop { .. } => {
|
||||
// Auto-stop recording - can't manually stop, wait for auto-transition
|
||||
self.next_state = self.current_state.clone();
|
||||
}
|
||||
TrackState::Playing => {
|
||||
self.next_state = TrackState::Idle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle auto-stop record command (sets next_state)
|
||||
pub fn queue_record_auto_stop(&mut self, target_samples: usize, sync_offset: usize) {
|
||||
match self.current_state {
|
||||
TrackState::Empty | TrackState::Idle => {
|
||||
self.next_state = TrackState::RecordingAutoStop {
|
||||
target_samples,
|
||||
sync_offset,
|
||||
};
|
||||
}
|
||||
TrackState::Recording => {
|
||||
// Switch from manual to auto-stop recording
|
||||
self.next_state = TrackState::RecordingAutoStop {
|
||||
target_samples,
|
||||
sync_offset,
|
||||
};
|
||||
}
|
||||
TrackState::RecordingAutoStop { .. } => {
|
||||
// Already auto-recording - update parameters
|
||||
self.next_state = TrackState::RecordingAutoStop {
|
||||
target_samples,
|
||||
sync_offset,
|
||||
};
|
||||
}
|
||||
TrackState::Playing => {
|
||||
// Stop playing and start auto-recording
|
||||
self.next_state = TrackState::RecordingAutoStop {
|
||||
target_samples,
|
||||
sync_offset,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle play/mute toggle command (sets next_state)
|
||||
pub fn queue_play_toggle(&mut self) {
|
||||
match self.current_state {
|
||||
TrackState::Empty => {
|
||||
// Can't play empty track
|
||||
self.next_state = TrackState::Empty;
|
||||
}
|
||||
TrackState::Idle => {
|
||||
if !self.audio_data.is_empty() {
|
||||
self.next_state = TrackState::Playing;
|
||||
} else {
|
||||
self.next_state = TrackState::Idle;
|
||||
}
|
||||
}
|
||||
TrackState::Recording | TrackState::RecordingAutoStop { .. } => {
|
||||
// Don't change state while recording
|
||||
self.next_state = self.current_state.clone();
|
||||
}
|
||||
TrackState::Playing => {
|
||||
self.next_state = TrackState::Idle;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle clear command (sets next_state)
|
||||
pub fn queue_clear(&mut self) {
|
||||
self.next_state = TrackState::Empty;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user