Use audio engine as source of truth for audio tracks

This commit is contained in:
Skyler Lehmkuhl 2026-03-11 12:37:31 -04:00
parent b8f847e167
commit 3bc980d08d
13 changed files with 379 additions and 230 deletions

View File

@ -10,8 +10,17 @@ use crate::audio::track::{Track, TrackId, TrackNode};
use crate::command::{AudioEvent, Command, Query, QueryResponse}; use crate::command::{AudioEvent, Command, Query, QueryResponse};
use crate::io::MidiInputManager; use crate::io::MidiInputManager;
use petgraph::stable_graph::NodeIndex; use petgraph::stable_graph::NodeIndex;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use std::sync::Arc; use std::sync::{Arc, RwLock};
/// Read-only snapshot of all clip instances, updated after every clip mutation.
/// Shared between the audio thread (writer) and the UI thread (reader).
#[derive(Default, Clone)]
pub struct AudioClipSnapshot {
pub audio: HashMap<TrackId, Vec<AudioClipInstance>>,
pub midi: HashMap<TrackId, Vec<MidiClipInstance>>,
}
/// Audio engine for Phase 6: hierarchical tracks with groups /// Audio engine for Phase 6: hierarchical tracks with groups
pub struct Engine { pub struct Engine {
@ -34,12 +43,18 @@ pub struct Engine {
chunk_generation_rx: std::sync::mpsc::Receiver<AudioEvent>, chunk_generation_rx: std::sync::mpsc::Receiver<AudioEvent>,
chunk_generation_tx: std::sync::mpsc::Sender<AudioEvent>, chunk_generation_tx: std::sync::mpsc::Sender<AudioEvent>,
// Shared clip snapshot for UI reads
clip_snapshot: Arc<RwLock<AudioClipSnapshot>>,
// Shared playhead for UI reads // Shared playhead for UI reads
playhead_atomic: Arc<AtomicU64>, playhead_atomic: Arc<AtomicU64>,
// Shared MIDI clip ID counter for synchronous access // Shared MIDI clip ID counter for synchronous access
next_midi_clip_id_atomic: Arc<AtomicU32>, next_midi_clip_id_atomic: Arc<AtomicU32>,
// Shared audio clip ID counter (shared with EngineController for pre-assigned IDs)
next_audio_clip_id_atomic: Arc<AtomicU32>,
// Event counter for periodic position updates // Event counter for periodic position updates
frames_since_last_event: usize, frames_since_last_event: usize,
event_interval_frames: usize, event_interval_frames: usize,
@ -47,8 +62,8 @@ pub struct Engine {
// Mix buffer for output // Mix buffer for output
mix_buffer: Vec<f32>, mix_buffer: Vec<f32>,
// ID counters // ID counters (legacy, unused — kept for potential future use)
next_clip_id: ClipId, // Audio clip IDs are now generated via next_audio_clip_id_atomic
// Recording state // Recording state
recording_state: Option<RecordingState>, recording_state: Option<RecordingState>,
@ -133,12 +148,13 @@ impl Engine {
query_response_tx, query_response_tx,
chunk_generation_rx, chunk_generation_rx,
chunk_generation_tx, chunk_generation_tx,
clip_snapshot: Arc::new(RwLock::new(AudioClipSnapshot::default())),
playhead_atomic, playhead_atomic,
next_midi_clip_id_atomic: Arc::new(AtomicU32::new(0)), next_midi_clip_id_atomic: Arc::new(AtomicU32::new(0)),
next_audio_clip_id_atomic: Arc::new(AtomicU32::new(0)),
frames_since_last_event: 0, frames_since_last_event: 0,
event_interval_frames, event_interval_frames,
mix_buffer: Vec::new(), mix_buffer: Vec::new(),
next_clip_id: 0,
recording_state: None, recording_state: None,
input_rx: None, input_rx: None,
recording_mirror_tx: None, recording_mirror_tx: None,
@ -240,6 +256,25 @@ impl Engine {
&self.audio_pool &self.audio_pool
} }
/// Rebuild the clip snapshot from the current project state.
/// Call this after any command that adds, removes, or modifies clip instances.
fn refresh_clip_snapshot(&self) {
let mut snap = self.clip_snapshot.write().unwrap();
snap.audio.clear();
snap.midi.clear();
for (track_id, node) in self.project.track_iter() {
match node {
crate::audio::track::TrackNode::Audio(t) => {
snap.audio.insert(track_id, t.clips.clone());
}
crate::audio::track::TrackNode::Midi(t) => {
snap.midi.insert(track_id, t.clip_instances.clone());
}
crate::audio::track::TrackNode::Group(_) => {}
}
}
}
/// Get a handle for controlling playback from the UI thread /// Get a handle for controlling playback from the UI thread
pub fn get_controller( pub fn get_controller(
&self, &self,
@ -253,6 +288,8 @@ impl Engine {
query_response_rx, query_response_rx,
playhead: Arc::clone(&self.playhead_atomic), playhead: Arc::clone(&self.playhead_atomic),
next_midi_clip_id: Arc::clone(&self.next_midi_clip_id_atomic), next_midi_clip_id: Arc::clone(&self.next_midi_clip_id_atomic),
next_audio_clip_id: Arc::clone(&self.next_audio_clip_id_atomic),
clip_snapshot: Arc::clone(&self.clip_snapshot),
sample_rate: self.sample_rate, sample_rate: self.sample_rate,
channels: self.channels, channels: self.channels,
cached_export_response: None, cached_export_response: None,
@ -689,6 +726,7 @@ impl Engine {
} }
_ => {} _ => {}
} }
self.refresh_clip_snapshot();
} }
Command::TrimClip(track_id, clip_id, new_internal_start, new_internal_end) => { Command::TrimClip(track_id, clip_id, new_internal_start, new_internal_end) => {
// Trim changes which portion of the source content is used // Trim changes which portion of the source content is used
@ -713,6 +751,7 @@ impl Engine {
} }
_ => {} _ => {}
} }
self.refresh_clip_snapshot();
} }
Command::ExtendClip(track_id, clip_id, new_external_duration) => { Command::ExtendClip(track_id, clip_id, new_external_duration) => {
// Extend changes the external duration (enables looping if > internal duration) // Extend changes the external duration (enables looping if > internal duration)
@ -730,6 +769,7 @@ impl Engine {
} }
_ => {} _ => {}
} }
self.refresh_clip_snapshot();
} }
Command::CreateMetatrack(name, parent_id) => { Command::CreateMetatrack(name, parent_id) => {
let track_id = self.project.add_group_track(name.clone(), parent_id); let track_id = self.project.add_group_track(name.clone(), parent_id);
@ -841,23 +881,8 @@ impl Engine {
// Notify UI about the new audio file // Notify UI about the new audio file
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path)); let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
} }
Command::AddAudioClip(track_id, pool_index, start_time, duration, offset) => { Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset) => {
eprintln!("[Engine] AddAudioClip: track_id={}, pool_index={}, start_time={}, duration={}", // Create a new clip instance with the pre-assigned clip_id
track_id, pool_index, start_time, duration);
// Check if pool index is valid
let pool_size = self.audio_pool.len();
if pool_index >= pool_size {
eprintln!("[Engine] ERROR: pool_index {} is out of bounds (pool size: {})",
pool_index, pool_size);
} else {
eprintln!("[Engine] Pool index {} is valid, pool has {} files",
pool_index, pool_size);
}
// Create a new clip instance with unique ID using legacy parameters
let clip_id = self.next_clip_id;
self.next_clip_id += 1;
let clip = AudioClipInstance::from_legacy( let clip = AudioClipInstance::from_legacy(
clip_id, clip_id,
pool_index, pool_index,
@ -869,12 +894,9 @@ impl Engine {
// Add clip to track // Add clip to track
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) { if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
track.clips.push(clip); track.clips.push(clip);
eprintln!("[Engine] Clip {} added to track {} successfully", clip_id, track_id);
// Notify UI about the new clip
let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id)); let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id));
} else {
eprintln!("[Engine] ERROR: Track {} not found or is not an audio track", track_id);
} }
self.refresh_clip_snapshot();
} }
Command::CreateMidiTrack(name, parent_id) => { Command::CreateMidiTrack(name, parent_id) => {
let track_id = self.project.add_midi_track(name.clone(), parent_id); let track_id = self.project.add_midi_track(name.clone(), parent_id);
@ -903,6 +925,7 @@ impl Engine {
// Notify UI about the new clip with its ID (using clip_id for now) // Notify UI about the new clip with its ID (using clip_id for now)
let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id)); let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id));
self.refresh_clip_snapshot();
} }
Command::AddMidiNote(track_id, clip_id, time_offset, note, velocity, duration) => { Command::AddMidiNote(track_id, clip_id, time_offset, note, velocity, duration) => {
// Add a MIDI note event to the specified clip in the pool // Add a MIDI note event to the specified clip in the pool
@ -935,6 +958,7 @@ impl Engine {
Command::AddLoadedMidiClip(track_id, clip, start_time) => { Command::AddLoadedMidiClip(track_id, clip, start_time) => {
// Add a pre-loaded MIDI clip to the track with the given start time // Add a pre-loaded MIDI clip to the track with the given start time
let _ = self.project.add_midi_clip_at(track_id, clip, start_time); let _ = self.project.add_midi_clip_at(track_id, clip, start_time);
self.refresh_clip_snapshot();
} }
Command::UpdateMidiClipNotes(_track_id, clip_id, notes) => { Command::UpdateMidiClipNotes(_track_id, clip_id, notes) => {
// Update all notes in a MIDI clip (directly in the pool) // Update all notes in a MIDI clip (directly in the pool)
@ -961,6 +985,7 @@ impl Engine {
Command::RemoveMidiClip(track_id, instance_id) => { Command::RemoveMidiClip(track_id, instance_id) => {
// Remove a MIDI clip instance from a track (for undo/redo support) // Remove a MIDI clip instance from a track (for undo/redo support)
let _ = self.project.remove_midi_clip(track_id, instance_id); let _ = self.project.remove_midi_clip(track_id, instance_id);
self.refresh_clip_snapshot();
} }
Command::RemoveAudioClip(track_id, instance_id) => { Command::RemoveAudioClip(track_id, instance_id) => {
// Deactivate the per-clip disk reader before removing // Deactivate the per-clip disk reader before removing
@ -971,6 +996,7 @@ impl Engine {
} }
// Remove an audio clip instance from a track (for undo/redo support) // Remove an audio clip instance from a track (for undo/redo support)
let _ = self.project.remove_audio_clip(track_id, instance_id); let _ = self.project.remove_audio_clip(track_id, instance_id);
self.refresh_clip_snapshot();
} }
Command::RequestBufferPoolStats => { Command::RequestBufferPoolStats => {
// Send buffer pool statistics back to UI // Send buffer pool statistics back to UI
@ -1153,7 +1179,7 @@ impl Engine {
// Reset ID counters // Reset ID counters
self.next_midi_clip_id_atomic.store(0, Ordering::Relaxed); self.next_midi_clip_id_atomic.store(0, Ordering::Relaxed);
self.next_clip_id = 0; self.next_audio_clip_id_atomic.store(0, Ordering::Relaxed);
// Clear mix buffer // Clear mix buffer
self.mix_buffer.clear(); self.mix_buffer.clear();
@ -2562,10 +2588,12 @@ impl Engine {
} }
Query::AddMidiClipSync(track_id, clip, start_time) => { Query::AddMidiClipSync(track_id, clip, start_time) => {
// Add MIDI clip to track and return the instance ID // Add MIDI clip to track and return the instance ID
match self.project.add_midi_clip_at(track_id, clip, start_time) { let result = match self.project.add_midi_clip_at(track_id, clip, start_time) {
Ok(instance_id) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)), Ok(instance_id) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())), Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())),
} };
self.refresh_clip_snapshot();
result
} }
Query::AddMidiClipInstanceSync(track_id, mut instance) => { Query::AddMidiClipInstanceSync(track_id, mut instance) => {
// Add MIDI clip instance to track (clip must already be in pool) // Add MIDI clip instance to track (clip must already be in pool)
@ -2573,54 +2601,12 @@ impl Engine {
let instance_id = self.project.next_midi_clip_instance_id(); let instance_id = self.project.next_midi_clip_instance_id();
instance.id = instance_id; instance.id = instance_id;
match self.project.add_midi_clip_instance(track_id, instance) { let result = match self.project.add_midi_clip_instance(track_id, instance) {
Ok(_) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)), Ok(_) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())), Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())),
}
}
Query::AddAudioClipSync(track_id, pool_index, start_time, duration, offset) => {
// Add audio clip to track and return the instance ID
// Create audio clip instance
let instance_id = self.next_clip_id;
self.next_clip_id += 1;
// For compressed files, create a per-clip read-ahead buffer
let read_ahead = if let Some(file) = self.audio_pool.get_file(pool_index) {
if matches!(file.storage, crate::audio::pool::AudioStorage::Compressed { .. }) {
let buffer = crate::audio::disk_reader::DiskReader::create_buffer(
file.sample_rate,
file.channels,
);
if let Some(ref mut dr) = self.disk_reader {
dr.send(crate::audio::disk_reader::DiskReaderCommand::ActivateFile {
reader_id: instance_id as u64,
path: file.path.clone(),
buffer: buffer.clone(),
});
}
Some(buffer)
} else {
None
}
} else {
None
}; };
self.refresh_clip_snapshot();
let clip = AudioClipInstance { result
id: instance_id,
audio_pool_index: pool_index,
internal_start: offset,
internal_end: offset + duration,
external_start: start_time,
external_duration: duration,
gain: 1.0,
read_ahead,
};
match self.project.add_clip(track_id, clip) {
Ok(instance_id) => QueryResponse::AudioClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::AudioClipInstanceAdded(Err(e.to_string())),
}
} }
Query::AddAudioFileSync(path, data, channels, sample_rate) => { Query::AddAudioFileSync(path, data, channels, sample_rate) => {
// Add audio file to pool and return the pool index // Add audio file to pool and return the pool index
@ -2764,9 +2750,8 @@ impl Engine {
// Create WAV writer // Create WAV writer
match WavWriter::create(&temp_file_path, self.sample_rate, self.channels) { match WavWriter::create(&temp_file_path, self.sample_rate, self.channels) {
Ok(writer) => { Ok(writer) => {
// Create intermediate clip // Create intermediate clip with a unique ID
let clip_id = self.next_clip_id; let clip_id = self.next_audio_clip_id_atomic.fetch_add(1, Ordering::Relaxed);
self.next_clip_id += 1;
let clip = crate::audio::clip::Clip::new( let clip = crate::audio::clip::Clip::new(
clip_id, clip_id,
@ -2780,6 +2765,7 @@ impl Engine {
// Add clip to track // Add clip to track
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) { if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
track.clips.push(clip); track.clips.push(clip);
self.refresh_clip_snapshot();
} }
// Create recording state // Create recording state
@ -2878,6 +2864,7 @@ impl Engine {
eprintln!("[STOP_RECORDING] Updated clip {} with pool_index {}", clip_id, pool_index); eprintln!("[STOP_RECORDING] Updated clip {} with pool_index {}", clip_id, pool_index);
} }
} }
self.refresh_clip_snapshot();
// Delete temp file // Delete temp file
let _ = std::fs::remove_file(&temp_file_path); let _ = std::fs::remove_file(&temp_file_path);
@ -2984,6 +2971,8 @@ impl Engine {
eprintln!("[MIDI_RECORDING] ERROR: Clip {} not found in pool!", clip_id); eprintln!("[MIDI_RECORDING] ERROR: Clip {} not found in pool!", clip_id);
} }
self.refresh_clip_snapshot();
// Send event to UI // Send event to UI
eprintln!("[MIDI_RECORDING] Pushing MidiRecordingStopped event to event_tx..."); eprintln!("[MIDI_RECORDING] Pushing MidiRecordingStopped event to event_tx...");
match self.event_tx.push(AudioEvent::MidiRecordingStopped(track_id, clip_id, note_count)) { match self.event_tx.push(AudioEvent::MidiRecordingStopped(track_id, clip_id, note_count)) {
@ -3018,6 +3007,8 @@ pub struct EngineController {
query_response_rx: rtrb::Consumer<QueryResponse>, query_response_rx: rtrb::Consumer<QueryResponse>,
playhead: Arc<AtomicU64>, playhead: Arc<AtomicU64>,
next_midi_clip_id: Arc<AtomicU32>, next_midi_clip_id: Arc<AtomicU32>,
next_audio_clip_id: Arc<AtomicU32>,
clip_snapshot: Arc<RwLock<AudioClipSnapshot>>,
sample_rate: u32, sample_rate: u32,
#[allow(dead_code)] // Used in public getter method #[allow(dead_code)] // Used in public getter method
channels: u32, channels: u32,
@ -3112,6 +3103,12 @@ impl EngineController {
frames as f64 / self.sample_rate as f64 frames as f64 / self.sample_rate as f64
} }
/// Get the shared clip snapshot. The UI can read this each frame to display
/// the authoritative clip state from the backend.
pub fn clip_snapshot(&self) -> Arc<RwLock<AudioClipSnapshot>> {
Arc::clone(&self.clip_snapshot)
}
/// Create a new metatrack /// Create a new metatrack
pub fn create_metatrack(&mut self, name: String) { pub fn create_metatrack(&mut self, name: String) {
let _ = self.command_tx.push(Command::CreateMetatrack(name, None)); let _ = self.command_tx.push(Command::CreateMetatrack(name, None));
@ -3199,9 +3196,22 @@ impl EngineController {
} }
} }
/// Add a clip to an audio track /// Generate the next unique audio clip instance ID (atomic, thread-safe)
pub fn add_audio_clip(&mut self, track_id: TrackId, pool_index: usize, start_time: f64, duration: f64, offset: f64) { pub fn next_audio_clip_id(&self) -> AudioClipInstanceId {
let _ = self.command_tx.push(Command::AddAudioClip(track_id, pool_index, start_time, duration, offset)); self.next_audio_clip_id.fetch_add(1, Ordering::Relaxed)
}
/// Add a clip to an audio track (async, fire-and-forget)
/// Returns the pre-assigned clip instance ID so callers can track the clip without a sync round-trip
pub fn add_audio_clip(&mut self, track_id: TrackId, pool_index: usize, start_time: f64, duration: f64, offset: f64) -> AudioClipInstanceId {
let clip_id = self.next_audio_clip_id.fetch_add(1, Ordering::Relaxed);
let _ = self.command_tx.push(Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset));
clip_id
}
/// Add a clip to an audio track with a pre-assigned ID (for undo/redo, restoring deleted clips)
pub fn add_audio_clip_with_id(&mut self, track_id: TrackId, clip_id: AudioClipInstanceId, pool_index: usize, start_time: f64, duration: f64, offset: f64) {
let _ = self.command_tx.push(Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset));
} }
/// Create a new MIDI track /// Create a new MIDI track

View File

@ -19,7 +19,7 @@ pub mod waveform_cache;
pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId}; pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId};
pub use buffer_pool::BufferPool; pub use buffer_pool::BufferPool;
pub use clip::{AudioClipInstance, AudioClipInstanceId, Clip, ClipId}; pub use clip::{AudioClipInstance, AudioClipInstanceId, Clip, ClipId};
pub use engine::{Engine, EngineController}; pub use engine::{AudioClipSnapshot, Engine, EngineController};
pub use export::{export_audio, ExportFormat, ExportSettings}; pub use export::{export_audio, ExportFormat, ExportSettings};
pub use metronome::Metronome; pub use metronome::Metronome;
pub use midi::{MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent}; pub use midi::{MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent};

View File

@ -211,6 +211,11 @@ impl Project {
self.tracks.get_mut(&track_id) self.tracks.get_mut(&track_id)
} }
/// Iterate over all tracks in the project.
pub fn track_iter(&self) -> impl Iterator<Item = (TrackId, &TrackNode)> {
self.tracks.iter().map(|(&id, node)| (id, node))
}
/// Get oscilloscope data from a node in a track's graph /// Get oscilloscope data from a node in a track's graph
pub fn get_oscilloscope_data(&self, track_id: TrackId, node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> { pub fn get_oscilloscope_data(&self, track_id: TrackId, node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> {
if let Some(TrackNode::Midi(track)) = self.tracks.get(&track_id) { if let Some(TrackNode::Midi(track)) = self.tracks.get(&track_id) {

View File

@ -67,8 +67,9 @@ pub enum Command {
/// Add an audio file to the pool (path, data, channels, sample_rate) /// Add an audio file to the pool (path, data, channels, sample_rate)
/// Returns the pool index via an AudioEvent /// Returns the pool index via an AudioEvent
AddAudioFile(String, Vec<f32>, u32, u32), AddAudioFile(String, Vec<f32>, u32, u32),
/// Add a clip to an audio track (track_id, pool_index, start_time, duration, offset) /// Add a clip to an audio track (track_id, clip_id, pool_index, start_time, duration, offset)
AddAudioClip(TrackId, usize, f64, f64, f64), /// The clip_id is pre-assigned by the caller (via EngineController::next_audio_clip_id())
AddAudioClip(TrackId, AudioClipInstanceId, usize, f64, f64, f64),
// MIDI commands // MIDI commands
/// Create a new MIDI track with a name and optional parent group /// Create a new MIDI track with a name and optional parent group
@ -418,8 +419,6 @@ pub enum Query {
/// Add a MIDI clip instance to a track synchronously (track_id, instance) - returns instance ID /// Add a MIDI clip instance to a track synchronously (track_id, instance) - returns instance ID
/// The clip must already exist in the MidiClipPool /// The clip must already exist in the MidiClipPool
AddMidiClipInstanceSync(TrackId, crate::audio::midi::MidiClipInstance), AddMidiClipInstanceSync(TrackId, crate::audio::midi::MidiClipInstance),
/// Add an audio clip to a track synchronously (track_id, pool_index, start_time, duration, offset) - returns instance ID
AddAudioClipSync(TrackId, usize, f64, f64, f64),
/// Add an audio file to the pool synchronously (path, data, channels, sample_rate) - returns pool index /// Add an audio file to the pool synchronously (path, data, channels, sample_rate) - returns pool index
AddAudioFileSync(String, Vec<f32>, u32, u32), AddAudioFileSync(String, Vec<f32>, u32, u32),
/// Import an audio file synchronously (path) - returns pool index. /// Import an audio file synchronously (path) - returns pool index.
@ -501,8 +500,6 @@ pub enum QueryResponse {
AudioExported(Result<(), String>), AudioExported(Result<(), String>),
/// MIDI clip instance added (returns instance ID) /// MIDI clip instance added (returns instance ID)
MidiClipInstanceAdded(Result<MidiClipInstanceId, String>), MidiClipInstanceAdded(Result<MidiClipInstanceId, String>),
/// Audio clip instance added (returns instance ID)
AudioClipInstanceAdded(Result<AudioClipInstanceId, String>),
/// Audio file added to pool (returns pool index) /// Audio file added to pool (returns pool index)
AudioFileAddedSync(Result<usize, String>), AudioFileAddedSync(Result<usize, String>),
/// Audio file imported to pool (returns pool index) /// Audio file imported to pool (returns pool index)

View File

@ -13,7 +13,7 @@ pub mod tui;
// Re-export commonly used types // Re-export commonly used types
pub use audio::{ pub use audio::{
AudioClipInstanceId, AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController, AudioClipInstanceId, AudioClipSnapshot, AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController,
Metatrack, MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent, MidiTrack, ParameterId, PoolAudioFile, Project, RecordingState, RenderContext, Track, TrackId, Metatrack, MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent, MidiTrack, ParameterId, PoolAudioFile, Project, RecordingState, RenderContext, Track, TrackId,
TrackNode, TrackNode,
}; };

View File

@ -235,34 +235,30 @@ impl Action for AddClipInstanceAction {
} }
} }
AudioClipType::Sampled { audio_pool_index } => { AudioClipType::Sampled { audio_pool_index } => {
// For sampled audio, send AddAudioClipSync query
use daw_backend::command::{Query, QueryResponse};
let internal_start = self.clip_instance.trim_start; let internal_start = self.clip_instance.trim_start;
let internal_end = self.clip_instance.trim_end.unwrap_or(clip.duration); let internal_end = self.clip_instance.trim_end.unwrap_or(clip.duration);
let effective_duration = self.clip_instance.timeline_duration let effective_duration = self.clip_instance.timeline_duration
.unwrap_or(internal_end - internal_start); .unwrap_or(internal_end - internal_start);
let start_time = self.clip_instance.timeline_start; let start_time = self.clip_instance.timeline_start;
let query = let instance_id = controller.add_audio_clip(
Query::AddAudioClipSync(*backend_track_id, *audio_pool_index, start_time, effective_duration, internal_start); *backend_track_id,
*audio_pool_index,
start_time,
effective_duration,
internal_start,
);
match controller.send_query(query)? { self.backend_track_id = Some(*backend_track_id);
QueryResponse::AudioClipInstanceAdded(Ok(instance_id)) => { self.backend_audio_instance_id = Some(instance_id);
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
// Add to global clip instance mapping // Add to global clip instance mapping
backend.clip_instance_to_backend_map.insert( backend.clip_instance_to_backend_map.insert(
self.clip_instance.id, self.clip_instance.id,
crate::action::BackendClipInstanceId::Audio(instance_id) crate::action::BackendClipInstanceId::Audio(instance_id)
); );
Ok(()) Ok(())
}
QueryResponse::AudioClipInstanceAdded(Err(e)) => Err(e),
_ => Err("Unexpected query response".to_string()),
}
} }
AudioClipType::Recording => { AudioClipType::Recording => {
// Recording clips are not synced to backend until finalized // Recording clips are not synced to backend until finalized

View File

@ -194,29 +194,23 @@ impl Action for RemoveClipInstancesAction {
} }
} }
AudioClipType::Sampled { audio_pool_index } => { AudioClipType::Sampled { audio_pool_index } => {
use daw_backend::command::{Query, QueryResponse};
let internal_start = instance.trim_start; let internal_start = instance.trim_start;
let internal_end = instance.trim_end.unwrap_or(clip.duration); let internal_end = instance.trim_end.unwrap_or(clip.duration);
let effective_duration = instance.timeline_duration let effective_duration = instance.timeline_duration
.unwrap_or(internal_end - internal_start); .unwrap_or(internal_end - internal_start);
let start_time = instance.timeline_start; let start_time = instance.timeline_start;
let query = Query::AddAudioClipSync( let new_id = controller.add_audio_clip(
track_id, track_id,
*audio_pool_index, *audio_pool_index,
start_time, start_time,
effective_duration, effective_duration,
internal_start, internal_start,
); );
if let Ok(QueryResponse::AudioClipInstanceAdded(Ok(new_id))) = backend.clip_instance_to_backend_map.insert(
controller.send_query(query) instance.id,
{ BackendClipInstanceId::Audio(new_id),
backend.clip_instance_to_backend_map.insert( );
instance.id,
BackendClipInstanceId::Audio(new_id),
);
}
} }
AudioClipType::Recording => {} AudioClipType::Recording => {}
} }

View File

@ -415,8 +415,6 @@ impl Action for SplitClipInstanceAction {
} }
} }
AudioClipType::Sampled { audio_pool_index } => { AudioClipType::Sampled { audio_pool_index } => {
use daw_backend::command::{Query, QueryResponse};
// 1. Trim the original (left) instance // 1. Trim the original (left) instance
let orig_internal_start = original_instance.trim_start; let orig_internal_start = original_instance.trim_start;
let orig_internal_end = original_instance.trim_end.unwrap_or(clip.duration); let orig_internal_end = original_instance.trim_end.unwrap_or(clip.duration);
@ -435,7 +433,7 @@ impl Action for SplitClipInstanceAction {
.unwrap_or(internal_end - internal_start); .unwrap_or(internal_end - internal_start);
let start_time = new_instance.timeline_start; let start_time = new_instance.timeline_start;
let query = Query::AddAudioClipSync( let instance_id = controller.add_audio_clip(
*backend_track_id, *backend_track_id,
*audio_pool_index, *audio_pool_index,
start_time, start_time,
@ -443,21 +441,15 @@ impl Action for SplitClipInstanceAction {
internal_start, internal_start,
); );
match controller.send_query(query)? { self.backend_track_id = Some(*backend_track_id);
QueryResponse::AudioClipInstanceAdded(Ok(instance_id)) => { self.backend_audio_instance_id = Some(instance_id);
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
backend.clip_instance_to_backend_map.insert( backend.clip_instance_to_backend_map.insert(
new_instance_id, new_instance_id,
crate::action::BackendClipInstanceId::Audio(instance_id), crate::action::BackendClipInstanceId::Audio(instance_id),
); );
Ok(()) Ok(())
}
QueryResponse::AudioClipInstanceAdded(Err(e)) => Err(e),
_ => Err("Unexpected query response".to_string()),
}
} }
AudioClipType::Recording => { AudioClipType::Recording => {
// Recording clips cannot be split // Recording clips cannot be split

View File

@ -646,6 +646,46 @@ pub struct ClipInstance {
pub loop_before: Option<f64>, pub loop_before: Option<f64>,
} }
/// High 64-bit sentinel used to identify UUIDs that encode a backend audio clip instance ID.
/// Using a sentinel that would never appear in a v4 random UUID (which has specific version bits).
const AUDIO_BACKEND_UUID_HIGH: u64 = 0xDEAD_BEEF_CAFE_BABE;
/// Convert a backend `AudioClipInstanceId` (u32) to a synthetic UUID for use in selection/hit-testing.
/// These UUIDs are distinct from real document UUIDs and can be round-tripped via `audio_backend_id_from_uuid`.
pub fn audio_backend_uuid(backend_id: u32) -> Uuid {
Uuid::from_u64_pair(AUDIO_BACKEND_UUID_HIGH, backend_id as u64)
}
/// Extract a backend `AudioClipInstanceId` from a synthetic UUID created by `audio_backend_uuid`.
/// Returns `None` if this is a regular document UUID.
pub fn audio_backend_id_from_uuid(uuid: Uuid) -> Option<u32> {
let (high, low) = uuid.as_u64_pair();
if high == AUDIO_BACKEND_UUID_HIGH {
Some(low as u32)
} else {
None
}
}
/// High 64-bit sentinel used to identify UUIDs that encode a backend MIDI clip instance ID.
const MIDI_BACKEND_UUID_HIGH: u64 = 0xDEAD_BEEF_CAFE_BEEF;
/// Convert a backend `MidiClipInstanceId` (u32) to a synthetic UUID for use in selection/hit-testing.
pub fn midi_backend_uuid(backend_id: u32) -> Uuid {
Uuid::from_u64_pair(MIDI_BACKEND_UUID_HIGH, backend_id as u64)
}
/// Extract a backend `MidiClipInstanceId` from a synthetic UUID created by `midi_backend_uuid`.
/// Returns `None` if this is a regular document UUID.
pub fn midi_backend_id_from_uuid(uuid: Uuid) -> Option<u32> {
let (high, low) = uuid.as_u64_pair();
if high == MIDI_BACKEND_UUID_HIGH {
Some(low as u32)
} else {
None
}
}
impl ClipInstance { impl ClipInstance {
/// Create a new clip instance /// Create a new clip instance
pub fn new(clip_id: Uuid) -> Self { pub fn new(clip_id: Uuid) -> Self {

View File

@ -665,6 +665,20 @@ impl Document {
self.audio_clips.get(id) self.audio_clips.get(id)
} }
/// Find the document audio clip (UUID + ref) that owns the given backend pool index.
pub fn audio_clip_by_pool_index(&self, pool_index: usize) -> Option<(Uuid, &AudioClip)> {
self.audio_clips.iter()
.find(|(_, c)| c.audio_pool_index() == Some(pool_index))
.map(|(&id, c)| (id, c))
}
/// Find the document audio clip (UUID + ref) that owns the given backend MIDI clip ID.
pub fn audio_clip_by_midi_clip_id(&self, midi_clip_id: u32) -> Option<(Uuid, &AudioClip)> {
self.audio_clips.iter()
.find(|(_, c)| c.midi_clip_id() == Some(midi_clip_id))
.map(|(&id, c)| (id, c))
}
/// Get a mutable vector clip by ID /// Get a mutable vector clip by ID
pub fn get_vector_clip_mut(&mut self, id: &Uuid) -> Option<&mut VectorClip> { pub fn get_vector_clip_mut(&mut self, id: &Uuid) -> Option<&mut VectorClip> {
self.vector_clips.get_mut(id) self.vector_clips.get_mut(id)

View File

@ -4998,6 +4998,8 @@ impl eframe::App for EditorApp {
let clip_instance = ClipInstance::new(doc_clip_id) let clip_instance = ClipInstance::new(doc_clip_id)
.with_timeline_start(self.recording_start_time); .with_timeline_start(self.recording_start_time);
let clip_instance_id = clip_instance.id;
// Add instance to layer (works for root and inside movie clips) // Add instance to layer (works for root and inside movie clips)
if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) { if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) {
if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer { if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer {
@ -5006,6 +5008,12 @@ impl eframe::App for EditorApp {
} }
} }
// Insert mapping so the snapshot cache assigns the doc UUID to this recording clip
self.clip_instance_to_backend_map.insert(
clip_instance_id,
lightningbeam_core::action::BackendClipInstanceId::Audio(backend_clip_id),
);
// Store mapping for later updates // Store mapping for later updates
self.recording_clips.insert(layer_id, backend_clip_id); self.recording_clips.insert(layer_id, backend_clip_id);
} }
@ -5117,41 +5125,16 @@ impl eframe::App for EditorApp {
} }
} }
// Sync the clip instance to backend for playback // The backend already has the clip at _backend_clip_id (created
if let Some(backend_track_id) = self.layer_to_track_map.get(&layer_id) { // during handle_start_recording and finalized in handle_stop_recording).
if let Some(ref controller_arc) = self.audio_controller { // Map the document instance_id → the existing backend clip so that
let mut controller = controller_arc.lock().unwrap(); // delete/move/trim actions can reference it correctly.
use daw_backend::command::{Query, QueryResponse}; // DO NOT call AddAudioClipSync — that would create a duplicate clip.
self.clip_instance_to_backend_map.insert(
let query = Query::AddAudioClipSync( instance_id,
*backend_track_id, lightningbeam_core::action::BackendClipInstanceId::Audio(_backend_clip_id),
pool_index, );
timeline_start, eprintln!("[AUDIO] Mapped doc instance {} → backend clip {}", instance_id, _backend_clip_id);
duration,
trim_start
);
match controller.send_query(query) {
Ok(QueryResponse::AudioClipInstanceAdded(Ok(backend_instance_id))) => {
// Store the mapping
self.clip_instance_to_backend_map.insert(
instance_id,
lightningbeam_core::action::BackendClipInstanceId::Audio(backend_instance_id)
);
println!("✅ Synced recording to backend: instance_id={}", backend_instance_id);
}
Ok(QueryResponse::AudioClipInstanceAdded(Err(e))) => {
eprintln!("❌ Failed to sync recording to backend: {}", e);
}
Ok(_) => {
eprintln!("❌ Unexpected query response when syncing recording");
}
Err(e) => {
eprintln!("❌ Failed to send query to backend: {}", e);
}
}
}
}
} }
} }
@ -5775,6 +5758,9 @@ impl eframe::App for EditorApp {
schneider_max_error: &mut self.schneider_max_error, schneider_max_error: &mut self.schneider_max_error,
raster_settings: &mut self.raster_settings, raster_settings: &mut self.raster_settings,
audio_controller: self.audio_controller.as_ref(), audio_controller: self.audio_controller.as_ref(),
clip_snapshot: self.audio_controller.as_ref().map(|arc| {
arc.lock().unwrap().clip_snapshot()
}),
audio_input_opener: &mut self.audio_input, audio_input_opener: &mut self.audio_input,
audio_input_stream: &mut self.audio_input_stream, audio_input_stream: &mut self.audio_input_stream,
audio_buffer_size: self.audio_buffer_size, audio_buffer_size: self.audio_buffer_size,
@ -5792,6 +5778,7 @@ impl eframe::App for EditorApp {
paint_bucket_gap_tolerance: &mut self.paint_bucket_gap_tolerance, paint_bucket_gap_tolerance: &mut self.paint_bucket_gap_tolerance,
polygon_sides: &mut self.polygon_sides, polygon_sides: &mut self.polygon_sides,
layer_to_track_map: &self.layer_to_track_map, layer_to_track_map: &self.layer_to_track_map,
clip_instance_to_backend_map: &self.clip_instance_to_backend_map,
midi_event_cache: &mut self.midi_event_cache, midi_event_cache: &mut self.midi_event_cache,
audio_pools_with_new_waveforms: &self.audio_pools_with_new_waveforms, audio_pools_with_new_waveforms: &self.audio_pools_with_new_waveforms,
raw_audio_cache: &self.raw_audio_cache, raw_audio_cache: &self.raw_audio_cache,

View File

@ -192,6 +192,9 @@ pub struct SharedPaneState<'a> {
pub raster_settings: &'a mut crate::tools::RasterToolSettings, pub raster_settings: &'a mut crate::tools::RasterToolSettings,
/// Audio engine controller for playback control (wrapped in Arc<Mutex<>> for thread safety) /// Audio engine controller for playback control (wrapped in Arc<Mutex<>> for thread safety)
pub audio_controller: Option<&'a std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>, pub audio_controller: Option<&'a std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
/// Snapshot of all audio/MIDI clip instances from the backend (for timeline rendering).
/// Updated by the audio thread after each mutation; UI reads it each frame.
pub clip_snapshot: Option<std::sync::Arc<std::sync::RwLock<daw_backend::AudioClipSnapshot>>>,
/// Opener for the microphone/line-in stream — consumed on first use. /// Opener for the microphone/line-in stream — consumed on first use.
pub audio_input_opener: &'a mut Option<daw_backend::InputStreamOpener>, pub audio_input_opener: &'a mut Option<daw_backend::InputStreamOpener>,
/// Live input stream handle; kept alive while recording is active. /// Live input stream handle; kept alive while recording is active.
@ -202,6 +205,8 @@ pub struct SharedPaneState<'a> {
pub video_manager: &'a std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>, pub video_manager: &'a std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
/// Maps all layer/group/clip UUIDs to backend track IDs (audio, MIDI, and metatracks) /// Maps all layer/group/clip UUIDs to backend track IDs (audio, MIDI, and metatracks)
pub layer_to_track_map: &'a std::collections::HashMap<Uuid, daw_backend::TrackId>, pub layer_to_track_map: &'a std::collections::HashMap<Uuid, daw_backend::TrackId>,
/// Maps document clip instance UUIDs to backend clip instance IDs (for action dispatch)
pub clip_instance_to_backend_map: &'a std::collections::HashMap<Uuid, lightningbeam_core::action::BackendClipInstanceId>,
/// Global playback state /// Global playback state
pub playback_time: &'a mut f64, // Current playback position in seconds pub playback_time: &'a mut f64, // Current playback position in seconds
pub is_playing: &'a mut bool, // Whether playback is currently active pub is_playing: &'a mut bool, // Whether playback is currently active

View File

@ -7,9 +7,12 @@
/// - Basic layer visualization /// - Basic layer visualization
use eframe::egui; use eframe::egui;
use lightningbeam_core::clip::ClipInstance; use lightningbeam_core::clip::{
ClipInstance, audio_backend_uuid, midi_backend_uuid,
};
use lightningbeam_core::layer::{AnyLayer, AudioLayerType, GroupLayer, LayerTrait}; use lightningbeam_core::layer::{AnyLayer, AudioLayerType, GroupLayer, LayerTrait};
use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState}; use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState};
use std::collections::HashMap;
const RULER_HEIGHT: f32 = 30.0; const RULER_HEIGHT: f32 = 30.0;
const LAYER_HEIGHT: f32 = 60.0; const LAYER_HEIGHT: f32 = 60.0;
@ -402,26 +405,147 @@ fn shift_toggle_layer(
*focus = lightningbeam_core::selection::FocusSelection::Layers(vec![layer_id]); *focus = lightningbeam_core::selection::FocusSelection::Layers(vec![layer_id]);
} }
/// Build a per-audio-layer clip instance cache from the backend snapshot.
///
/// Audio layers read clip instances from the backend snapshot (source of truth) rather
/// than from `AudioLayer::clip_instances`. The cache maps layer_id → Vec<ClipInstance>.
///
/// Clip instance UUIDs in the cache are doc UUIDs when available (via reverse lookup of
/// `clip_instance_to_backend_map`), falling back to synthetic `audio_backend_uuid` /
/// `midi_backend_uuid` values for clips not yet in the map.
fn build_audio_clip_cache(
snap: &daw_backend::AudioClipSnapshot,
layer_to_track_map: &HashMap<uuid::Uuid, daw_backend::TrackId>,
document: &lightningbeam_core::document::Document,
clip_map: &HashMap<uuid::Uuid, lightningbeam_core::action::BackendClipInstanceId>,
) -> HashMap<uuid::Uuid, Vec<ClipInstance>> {
use lightningbeam_core::action::BackendClipInstanceId;
// Build reverse maps: backend_id → doc_instance_uuid
let mut audio_id_to_doc: HashMap<u32, uuid::Uuid> = HashMap::new();
let mut midi_id_to_doc: HashMap<u32, uuid::Uuid> = HashMap::new();
for (&doc_uuid, backend_id) in clip_map {
match backend_id {
BackendClipInstanceId::Audio(id) => { audio_id_to_doc.insert(*id, doc_uuid); }
BackendClipInstanceId::Midi(id) => { midi_id_to_doc.insert(*id, doc_uuid); }
}
}
let mut cache: HashMap<uuid::Uuid, Vec<ClipInstance>> = HashMap::new();
for (&layer_id, &track_id) in layer_to_track_map {
// Only process audio layers
match document.get_layer(&layer_id) {
Some(AnyLayer::Audio(_)) => {}
_ => continue,
}
let mut instances = Vec::new();
// Sampled audio clips
if let Some(audio_clips) = snap.audio.get(&track_id) {
for ac in audio_clips {
if let Some((clip_id, _)) = document.audio_clip_by_pool_index(ac.audio_pool_index) {
// Use doc UUID if we have it; otherwise fall back to synthetic UUID
let instance_id = audio_id_to_doc.get(&ac.id)
.copied()
.unwrap_or_else(|| audio_backend_uuid(ac.id));
let mut ci = ClipInstance::new(clip_id);
ci.id = instance_id;
ci.timeline_start = ac.external_start;
ci.trim_start = ac.internal_start;
ci.trim_end = Some(ac.internal_end);
let internal_dur = ac.internal_end - ac.internal_start;
if (ac.external_duration - internal_dur).abs() > 1e-9 {
ci.timeline_duration = Some(ac.external_duration);
}
ci.gain = ac.gain;
instances.push(ci);
}
}
}
// MIDI clips
if let Some(midi_clips) = snap.midi.get(&track_id) {
for mc in midi_clips {
if let Some((clip_id, _)) = document.audio_clip_by_midi_clip_id(mc.clip_id) {
let instance_id = midi_id_to_doc.get(&mc.id)
.copied()
.unwrap_or_else(|| midi_backend_uuid(mc.id));
let mut ci = ClipInstance::new(clip_id);
ci.id = instance_id;
ci.timeline_start = mc.external_start;
ci.trim_start = mc.internal_start;
ci.trim_end = Some(mc.internal_end);
let internal_dur = mc.internal_end - mc.internal_start;
if (mc.external_duration - internal_dur).abs() > 1e-9 {
ci.timeline_duration = Some(mc.external_duration);
}
instances.push(ci);
}
}
}
// Only insert if we found clips (so layer_clips() can fall back to al.clip_instances
// for layers where the snapshot has no clips yet, e.g. during recording setup)
if !instances.is_empty() {
cache.insert(layer_id, instances);
}
}
cache
}
/// Get clip instances for a layer, using the snapshot-based cache for audio layers
/// and falling back to the doc's `clip_instances` if the cache has no entry OR is empty
/// while the doc has clips (e.g., a recording clip not yet reflected in the snapshot).
fn layer_clips<'a>(
layer: &'a AnyLayer,
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> &'a [ClipInstance] {
match layer {
AnyLayer::Audio(al) => {
match audio_cache.get(&al.layer.id) {
Some(cached) if !cached.is_empty() => cached.as_slice(),
// Cache empty or missing: fall back to doc (covers recording-in-progress)
_ => &al.clip_instances,
}
}
AnyLayer::Vector(l) => &l.clip_instances,
AnyLayer::Video(l) => &l.clip_instances,
AnyLayer::Effect(l) => &l.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
}
}
/// Collect all (layer_ref, clip_instances) tuples from context_layers, /// Collect all (layer_ref, clip_instances) tuples from context_layers,
/// recursively descending into group children. /// recursively descending into group children.
/// Returns (&AnyLayer, &[ClipInstance]) so callers have access to both layer info and clips. /// Returns (&AnyLayer, &[ClipInstance]) so callers have access to both layer info and clips.
fn all_layer_clip_instances<'a>(context_layers: &[&'a AnyLayer]) -> Vec<(&'a AnyLayer, &'a [ClipInstance])> { fn all_layer_clip_instances<'a>(
context_layers: &[&'a AnyLayer],
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Vec<(&'a AnyLayer, &'a [ClipInstance])> {
let mut result = Vec::new(); let mut result = Vec::new();
for &layer in context_layers { for &layer in context_layers {
collect_clip_instances(layer, &mut result); collect_clip_instances(layer, audio_cache, &mut result);
} }
result result
} }
fn collect_clip_instances<'a>(layer: &'a AnyLayer, result: &mut Vec<(&'a AnyLayer, &'a [ClipInstance])>) { fn collect_clip_instances<'a>(
layer: &'a AnyLayer,
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
result: &mut Vec<(&'a AnyLayer, &'a [ClipInstance])>,
) {
match layer { match layer {
AnyLayer::Audio(_) => result.push((layer, layer_clips(layer, audio_cache))),
AnyLayer::Vector(l) => result.push((layer, &l.clip_instances)), AnyLayer::Vector(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Audio(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Video(l) => result.push((layer, &l.clip_instances)), AnyLayer::Video(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Effect(l) => result.push((layer, &l.clip_instances)), AnyLayer::Effect(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Group(g) => { AnyLayer::Group(g) => {
for child in &g.children { for child in &g.children {
collect_clip_instances(child, result); collect_clip_instances(child, audio_cache, result);
} }
} }
AnyLayer::Raster(_) => {} AnyLayer::Raster(_) => {}
@ -798,6 +922,7 @@ impl TimelinePane {
content_rect: egui::Rect, content_rect: egui::Rect,
header_rect: egui::Rect, header_rect: egui::Rect,
editing_clip_id: Option<&uuid::Uuid>, editing_clip_id: Option<&uuid::Uuid>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Option<(ClipDragType, uuid::Uuid)> { ) -> Option<(ClipDragType, uuid::Uuid)> {
let context_layers = document.context_layers(editing_clip_id); let context_layers = document.context_layers(editing_clip_id);
let rows = build_timeline_rows(&context_layers); let rows = build_timeline_rows(&context_layers);
@ -827,14 +952,7 @@ impl TimelinePane {
}; };
let _layer_data = layer.layer(); let _layer_data = layer.layer();
let clip_instances: &[ClipInstance] = match layer { let clip_instances = layer_clips(layer, audio_cache);
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// Check each clip instance // Check each clip instance
let stacking = compute_clip_stacking(document, layer, clip_instances); let stacking = compute_clip_stacking(document, layer, clip_instances);
@ -1947,6 +2065,7 @@ impl TimelinePane {
waveform_stereo: bool, waveform_stereo: bool,
context_layers: &[&lightningbeam_core::layer::AnyLayer], context_layers: &[&lightningbeam_core::layer::AnyLayer],
video_manager: &std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>, video_manager: &std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Vec<(egui::Rect, uuid::Uuid, f64, f64)> { ) -> Vec<(egui::Rect, uuid::Uuid, f64, f64)> {
let painter = ui.painter(); let painter = ui.painter();
@ -2330,8 +2449,8 @@ impl TimelinePane {
bright_teal.a() as f32 / 255.0, bright_teal.a() as f32 / 255.0,
]; ];
for child in &g.children { for child in &g.children {
if let AnyLayer::Audio(al) = child { if let AnyLayer::Audio(_) = child {
for ci in &al.clip_instances { for ci in layer_clips(child, &audio_cache) {
let audio_clip = match document.get_audio_clip(&ci.clip_id) { let audio_clip = match document.get_audio_clip(&ci.clip_id) {
Some(c) => c, Some(c) => c,
None => continue, None => continue,
@ -2441,14 +2560,7 @@ impl TimelinePane {
}; };
// Draw clip instances for this layer // Draw clip instances for this layer
let clip_instances: &[ClipInstance] = match layer { let clip_instances = layer_clips(layer, &audio_cache);
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// For moves, precompute the clamped offset so all selected clips move uniformly // For moves, precompute the clamped offset so all selected clips move uniformly
let group_move_offset = if self.clip_drag_state == Some(ClipDragType::Move) { let group_move_offset = if self.clip_drag_state == Some(ClipDragType::Move) {
@ -3248,6 +3360,7 @@ impl TimelinePane {
audio_controller: Option<&std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>, audio_controller: Option<&std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
context_layers: &[&lightningbeam_core::layer::AnyLayer], context_layers: &[&lightningbeam_core::layer::AnyLayer],
editing_clip_id: Option<&uuid::Uuid>, editing_clip_id: Option<&uuid::Uuid>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) { ) {
// Only allocate content area (ruler + layers) with click and drag // Only allocate content area (ruler + layers) with click and drag
let content_response = ui.allocate_rect( let content_response = ui.allocate_rect(
@ -3317,14 +3430,7 @@ impl TimelinePane {
let _layer_data = layer.layer(); let _layer_data = layer.layer();
// Get clip instances for this layer // Get clip instances for this layer
let clip_instances: &[ClipInstance] = match layer { let clip_instances = layer_clips(layer, &audio_cache);
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// Check if click is within any clip instance // Check if click is within any clip instance
let click_stacking = compute_clip_stacking(document, layer, clip_instances); let click_stacking = compute_clip_stacking(document, layer, clip_instances);
@ -3610,6 +3716,7 @@ impl TimelinePane {
content_rect, content_rect,
header_rect, header_rect,
editing_clip_id, editing_clip_id,
&audio_cache,
) { ) {
// If this clip is not selected, select it (respecting shift key) // If this clip is not selected, select it (respecting shift key)
if !selection.contains_clip_instance(&clip_id) { if !selection.contains_clip_instance(&clip_id) {
@ -3663,7 +3770,7 @@ impl TimelinePane {
HashMap::new(); HashMap::new();
// Iterate through all layers (including group children) to find selected clip instances // Iterate through all layers (including group children) to find selected clip instances
for (layer, clip_instances) in all_layer_clip_instances(context_layers) { for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
let layer_id = layer.id(); let layer_id = layer.id();
// Find selected clip instances in this layer // Find selected clip instances in this layer
for clip_instance in clip_instances { for clip_instance in clip_instances {
@ -3705,7 +3812,7 @@ impl TimelinePane {
> = HashMap::new(); > = HashMap::new();
// Iterate through all layers (including group children) to find selected clip instances // Iterate through all layers (including group children) to find selected clip instances
for (layer, clip_instances) in all_layer_clip_instances(context_layers) { for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
let layer_id = layer.id(); let layer_id = layer.id();
// Find selected clip instances in this layer // Find selected clip instances in this layer
@ -3830,7 +3937,7 @@ impl TimelinePane {
ClipDragType::LoopExtendRight => { ClipDragType::LoopExtendRight => {
let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new(); let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new();
for (layer, clip_instances) in all_layer_clip_instances(context_layers) { for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
let layer_id = layer.id(); let layer_id = layer.id();
for clip_instance in clip_instances { for clip_instance in clip_instances {
@ -3896,7 +4003,7 @@ impl TimelinePane {
// Extend loop_before (pre-loop region) // Extend loop_before (pre-loop region)
let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new(); let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new();
for (layer, clip_instances) in all_layer_clip_instances(context_layers) { for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
let layer_id = layer.id(); let layer_id = layer.id();
for clip_instance in clip_instances { for clip_instance in clip_instances {
@ -4140,6 +4247,7 @@ impl TimelinePane {
content_rect, content_rect,
header_rect, header_rect,
editing_clip_id, editing_clip_id,
&audio_cache,
) { ) {
match drag_type { match drag_type {
ClipDragType::TrimLeft | ClipDragType::TrimRight => { ClipDragType::TrimLeft | ClipDragType::TrimRight => {
@ -4420,17 +4528,31 @@ impl PaneRenderer for TimelinePane {
// Use virtual row count (includes expanded group children) for height calculations // Use virtual row count (includes expanded group children) for height calculations
let layer_count = build_timeline_rows(&context_layers).len(); let layer_count = build_timeline_rows(&context_layers).len();
// Build audio clip cache from backend snapshot (backend-as-source-of-truth for audio).
// Uses doc UUIDs via reverse lookup of clip_instance_to_backend_map so that selection
// and action dispatch continue to work with doc UUIDs.
// Falls back to AudioLayer::clip_instances for layers with no snapshot data yet
// (e.g., layers where recording is in progress but not yet finalized).
let audio_cache: HashMap<uuid::Uuid, Vec<ClipInstance>> =
if let Some(snap_arc) = shared.clip_snapshot.as_ref() {
if let Ok(snap) = snap_arc.read() {
build_audio_clip_cache(
&snap,
shared.layer_to_track_map,
document,
shared.clip_instance_to_backend_map,
)
} else {
HashMap::new()
}
} else {
HashMap::new()
};
// Calculate project duration from last clip endpoint across all layers // Calculate project duration from last clip endpoint across all layers
let mut max_endpoint: f64 = 10.0; // Default minimum duration let mut max_endpoint: f64 = 10.0; // Default minimum duration
for &layer in &context_layers { for &layer in &context_layers {
let clip_instances: &[ClipInstance] = match layer { let clip_instances = layer_clips(layer, &audio_cache);
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
for clip_instance in clip_instances { for clip_instance in clip_instances {
let clip_duration = effective_clip_duration(document, layer, clip_instance); let clip_duration = effective_clip_duration(document, layer, clip_instance);
@ -4499,7 +4621,7 @@ impl PaneRenderer for TimelinePane {
// Render layer rows with clipping // Render layer rows with clipping
ui.set_clip_rect(content_rect.intersect(original_clip_rect)); ui.set_clip_rect(content_rect.intersect(original_clip_rect));
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager); let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager, &audio_cache);
// Render playhead on top (clip to timeline area) // Render playhead on top (clip to timeline area)
ui.set_clip_rect(timeline_rect.intersect(original_clip_rect)); ui.set_clip_rect(timeline_rect.intersect(original_clip_rect));
@ -4526,6 +4648,7 @@ impl PaneRenderer for TimelinePane {
shared.audio_controller, shared.audio_controller,
&context_layers, &context_layers,
editing_clip_id.as_ref(), editing_clip_id.as_ref(),
&audio_cache,
); );
// Context menu: detect right-click on clips or empty timeline space // Context menu: detect right-click on clips or empty timeline space
@ -4534,7 +4657,7 @@ impl PaneRenderer for TimelinePane {
if secondary_clicked { if secondary_clicked {
if let Some(pos) = ui.input(|i| i.pointer.interact_pos()) { if let Some(pos) = ui.input(|i| i.pointer.interact_pos()) {
if content_rect.contains(pos) { if content_rect.contains(pos) {
if let Some((_drag_type, clip_id)) = self.detect_clip_at_pointer(pos, document, content_rect, layer_headers_rect, editing_clip_id.as_ref()) { if let Some((_drag_type, clip_id)) = self.detect_clip_at_pointer(pos, document, content_rect, layer_headers_rect, editing_clip_id.as_ref(), &audio_cache) {
// Right-clicked on a clip // Right-clicked on a clip
if !shared.selection.contains_clip_instance(&clip_id) { if !shared.selection.contains_clip_instance(&clip_id) {
shared.selection.select_only_clip_instance(clip_id); shared.selection.select_only_clip_instance(clip_id);
@ -4562,14 +4685,7 @@ impl PaneRenderer for TimelinePane {
let mut enabled = false; let mut enabled = false;
if let Some(layer_id) = *shared.active_layer_id { if let Some(layer_id) = *shared.active_layer_id {
if let Some(layer) = document.get_layer(&layer_id) { if let Some(layer) = document.get_layer(&layer_id) {
let instances: &[ClipInstance] = match layer { let instances = layer_clips(layer, &audio_cache);
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
AnyLayer::Effect(el) => &el.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
};
for inst in instances { for inst in instances {
if !shared.selection.contains_clip_instance(&inst.id) { continue; } if !shared.selection.contains_clip_instance(&inst.id) { continue; }
if let Some(dur) = document.get_clip_duration(&inst.clip_id) { if let Some(dur) = document.get_clip_duration(&inst.clip_id) {
@ -4593,14 +4709,7 @@ impl PaneRenderer for TimelinePane {
let mut enabled = false; let mut enabled = false;
if let Some(layer_id) = *shared.active_layer_id { if let Some(layer_id) = *shared.active_layer_id {
if let Some(layer) = document.get_layer(&layer_id) { if let Some(layer) = document.get_layer(&layer_id) {
let instances: &[ClipInstance] = match layer { let instances = layer_clips(layer, &audio_cache);
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
AnyLayer::Effect(el) => &el.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
};
// Check each selected clip // Check each selected clip
enabled = instances.iter() enabled = instances.iter()
.filter(|ci| shared.selection.contains_clip_instance(&ci.id)) .filter(|ci| shared.selection.contains_clip_instance(&ci.id))