Compare commits

..

No commits in common. "d7a29ee1dc7eef30047f14449ee0353f0a55dc62" and "b8f847e1671db7282a761ed4dd333a12290b0545" have entirely different histories.

17 changed files with 255 additions and 989 deletions

View File

@ -10,17 +10,8 @@ use crate::audio::track::{Track, TrackId, TrackNode};
use crate::command::{AudioEvent, Command, Query, QueryResponse};
use crate::io::MidiInputManager;
use petgraph::stable_graph::NodeIndex;
use std::collections::HashMap;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use std::sync::{Arc, RwLock};
/// Read-only snapshot of all clip instances, updated after every clip mutation.
/// Shared between the audio thread (writer) and the UI thread (reader).
#[derive(Default, Clone)]
pub struct AudioClipSnapshot {
pub audio: HashMap<TrackId, Vec<AudioClipInstance>>,
pub midi: HashMap<TrackId, Vec<MidiClipInstance>>,
}
use std::sync::Arc;
/// Audio engine for Phase 6: hierarchical tracks with groups
pub struct Engine {
@ -43,18 +34,12 @@ pub struct Engine {
chunk_generation_rx: std::sync::mpsc::Receiver<AudioEvent>,
chunk_generation_tx: std::sync::mpsc::Sender<AudioEvent>,
// Shared clip snapshot for UI reads
clip_snapshot: Arc<RwLock<AudioClipSnapshot>>,
// Shared playhead for UI reads
playhead_atomic: Arc<AtomicU64>,
// Shared MIDI clip ID counter for synchronous access
next_midi_clip_id_atomic: Arc<AtomicU32>,
// Shared audio clip ID counter (shared with EngineController for pre-assigned IDs)
next_audio_clip_id_atomic: Arc<AtomicU32>,
// Event counter for periodic position updates
frames_since_last_event: usize,
event_interval_frames: usize,
@ -62,8 +47,8 @@ pub struct Engine {
// Mix buffer for output
mix_buffer: Vec<f32>,
// ID counters (legacy, unused — kept for potential future use)
// Audio clip IDs are now generated via next_audio_clip_id_atomic
// ID counters
next_clip_id: ClipId,
// Recording state
recording_state: Option<RecordingState>,
@ -148,13 +133,12 @@ impl Engine {
query_response_tx,
chunk_generation_rx,
chunk_generation_tx,
clip_snapshot: Arc::new(RwLock::new(AudioClipSnapshot::default())),
playhead_atomic,
next_midi_clip_id_atomic: Arc::new(AtomicU32::new(0)),
next_audio_clip_id_atomic: Arc::new(AtomicU32::new(0)),
frames_since_last_event: 0,
event_interval_frames,
mix_buffer: Vec::new(),
next_clip_id: 0,
recording_state: None,
input_rx: None,
recording_mirror_tx: None,
@ -256,25 +240,6 @@ impl Engine {
&self.audio_pool
}
/// Rebuild the clip snapshot from the current project state.
/// Call this after any command that adds, removes, or modifies clip instances.
fn refresh_clip_snapshot(&self) {
let mut snap = self.clip_snapshot.write().unwrap();
snap.audio.clear();
snap.midi.clear();
for (track_id, node) in self.project.track_iter() {
match node {
crate::audio::track::TrackNode::Audio(t) => {
snap.audio.insert(track_id, t.clips.clone());
}
crate::audio::track::TrackNode::Midi(t) => {
snap.midi.insert(track_id, t.clip_instances.clone());
}
crate::audio::track::TrackNode::Group(_) => {}
}
}
}
/// Get a handle for controlling playback from the UI thread
pub fn get_controller(
&self,
@ -288,8 +253,6 @@ impl Engine {
query_response_rx,
playhead: Arc::clone(&self.playhead_atomic),
next_midi_clip_id: Arc::clone(&self.next_midi_clip_id_atomic),
next_audio_clip_id: Arc::clone(&self.next_audio_clip_id_atomic),
clip_snapshot: Arc::clone(&self.clip_snapshot),
sample_rate: self.sample_rate,
channels: self.channels,
cached_export_response: None,
@ -416,23 +379,13 @@ impl Engine {
if let Some(recording) = &self.midi_recording_state {
let current_time = self.playhead as f64 / self.sample_rate as f64;
let duration = current_time - recording.start_time;
let notes = recording.get_notes_with_active(current_time);
let notes = recording.get_notes().to_vec();
let _ = self.event_tx.push(AudioEvent::MidiRecordingProgress(
recording.track_id,
recording.clip_id,
duration,
notes,
));
// Keep the snapshot up to date so the UI can display a growing clip bar.
let track_id = recording.track_id;
let clip_id = recording.clip_id;
if let Some(crate::audio::track::TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
if let Some(instance) = track.clip_instances.iter_mut().find(|i| i.clip_id == clip_id) {
instance.internal_end = duration;
instance.external_duration = duration;
}
}
self.refresh_clip_snapshot();
}
}
} else {
@ -736,7 +689,6 @@ impl Engine {
}
_ => {}
}
self.refresh_clip_snapshot();
}
Command::TrimClip(track_id, clip_id, new_internal_start, new_internal_end) => {
// Trim changes which portion of the source content is used
@ -761,7 +713,6 @@ impl Engine {
}
_ => {}
}
self.refresh_clip_snapshot();
}
Command::ExtendClip(track_id, clip_id, new_external_duration) => {
// Extend changes the external duration (enables looping if > internal duration)
@ -779,7 +730,6 @@ impl Engine {
}
_ => {}
}
self.refresh_clip_snapshot();
}
Command::CreateMetatrack(name, parent_id) => {
let track_id = self.project.add_group_track(name.clone(), parent_id);
@ -891,8 +841,23 @@ impl Engine {
// Notify UI about the new audio file
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
}
Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset) => {
// Create a new clip instance with the pre-assigned clip_id
Command::AddAudioClip(track_id, pool_index, start_time, duration, offset) => {
eprintln!("[Engine] AddAudioClip: track_id={}, pool_index={}, start_time={}, duration={}",
track_id, pool_index, start_time, duration);
// Check if pool index is valid
let pool_size = self.audio_pool.len();
if pool_index >= pool_size {
eprintln!("[Engine] ERROR: pool_index {} is out of bounds (pool size: {})",
pool_index, pool_size);
} else {
eprintln!("[Engine] Pool index {} is valid, pool has {} files",
pool_index, pool_size);
}
// Create a new clip instance with unique ID using legacy parameters
let clip_id = self.next_clip_id;
self.next_clip_id += 1;
let clip = AudioClipInstance::from_legacy(
clip_id,
pool_index,
@ -904,9 +869,12 @@ impl Engine {
// Add clip to track
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
track.clips.push(clip);
eprintln!("[Engine] Clip {} added to track {} successfully", clip_id, track_id);
// Notify UI about the new clip
let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id));
} else {
eprintln!("[Engine] ERROR: Track {} not found or is not an audio track", track_id);
}
self.refresh_clip_snapshot();
}
Command::CreateMidiTrack(name, parent_id) => {
let track_id = self.project.add_midi_track(name.clone(), parent_id);
@ -935,7 +903,6 @@ impl Engine {
// Notify UI about the new clip with its ID (using clip_id for now)
let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id));
self.refresh_clip_snapshot();
}
Command::AddMidiNote(track_id, clip_id, time_offset, note, velocity, duration) => {
// Add a MIDI note event to the specified clip in the pool
@ -968,7 +935,6 @@ impl Engine {
Command::AddLoadedMidiClip(track_id, clip, start_time) => {
// Add a pre-loaded MIDI clip to the track with the given start time
let _ = self.project.add_midi_clip_at(track_id, clip, start_time);
self.refresh_clip_snapshot();
}
Command::UpdateMidiClipNotes(_track_id, clip_id, notes) => {
// Update all notes in a MIDI clip (directly in the pool)
@ -995,7 +961,6 @@ impl Engine {
Command::RemoveMidiClip(track_id, instance_id) => {
// Remove a MIDI clip instance from a track (for undo/redo support)
let _ = self.project.remove_midi_clip(track_id, instance_id);
self.refresh_clip_snapshot();
}
Command::RemoveAudioClip(track_id, instance_id) => {
// Deactivate the per-clip disk reader before removing
@ -1006,7 +971,6 @@ impl Engine {
}
// Remove an audio clip instance from a track (for undo/redo support)
let _ = self.project.remove_audio_clip(track_id, instance_id);
self.refresh_clip_snapshot();
}
Command::RequestBufferPoolStats => {
// Send buffer pool statistics back to UI
@ -1189,7 +1153,7 @@ impl Engine {
// Reset ID counters
self.next_midi_clip_id_atomic.store(0, Ordering::Relaxed);
self.next_audio_clip_id_atomic.store(0, Ordering::Relaxed);
self.next_clip_id = 0;
// Clear mix buffer
self.mix_buffer.clear();
@ -2598,12 +2562,10 @@ impl Engine {
}
Query::AddMidiClipSync(track_id, clip, start_time) => {
// Add MIDI clip to track and return the instance ID
let result = match self.project.add_midi_clip_at(track_id, clip, start_time) {
match self.project.add_midi_clip_at(track_id, clip, start_time) {
Ok(instance_id) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())),
};
self.refresh_clip_snapshot();
result
}
}
Query::AddMidiClipInstanceSync(track_id, mut instance) => {
// Add MIDI clip instance to track (clip must already be in pool)
@ -2611,12 +2573,54 @@ impl Engine {
let instance_id = self.project.next_midi_clip_instance_id();
instance.id = instance_id;
let result = match self.project.add_midi_clip_instance(track_id, instance) {
match self.project.add_midi_clip_instance(track_id, instance) {
Ok(_) => QueryResponse::MidiClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::MidiClipInstanceAdded(Err(e.to_string())),
}
}
Query::AddAudioClipSync(track_id, pool_index, start_time, duration, offset) => {
// Add audio clip to track and return the instance ID
// Create audio clip instance
let instance_id = self.next_clip_id;
self.next_clip_id += 1;
// For compressed files, create a per-clip read-ahead buffer
let read_ahead = if let Some(file) = self.audio_pool.get_file(pool_index) {
if matches!(file.storage, crate::audio::pool::AudioStorage::Compressed { .. }) {
let buffer = crate::audio::disk_reader::DiskReader::create_buffer(
file.sample_rate,
file.channels,
);
if let Some(ref mut dr) = self.disk_reader {
dr.send(crate::audio::disk_reader::DiskReaderCommand::ActivateFile {
reader_id: instance_id as u64,
path: file.path.clone(),
buffer: buffer.clone(),
});
}
Some(buffer)
} else {
None
}
} else {
None
};
self.refresh_clip_snapshot();
result
let clip = AudioClipInstance {
id: instance_id,
audio_pool_index: pool_index,
internal_start: offset,
internal_end: offset + duration,
external_start: start_time,
external_duration: duration,
gain: 1.0,
read_ahead,
};
match self.project.add_clip(track_id, clip) {
Ok(instance_id) => QueryResponse::AudioClipInstanceAdded(Ok(instance_id)),
Err(e) => QueryResponse::AudioClipInstanceAdded(Err(e.to_string())),
}
}
Query::AddAudioFileSync(path, data, channels, sample_rate) => {
// Add audio file to pool and return the pool index
@ -2760,8 +2764,9 @@ impl Engine {
// Create WAV writer
match WavWriter::create(&temp_file_path, self.sample_rate, self.channels) {
Ok(writer) => {
// Create intermediate clip with a unique ID
let clip_id = self.next_audio_clip_id_atomic.fetch_add(1, Ordering::Relaxed);
// Create intermediate clip
let clip_id = self.next_clip_id;
self.next_clip_id += 1;
let clip = crate::audio::clip::Clip::new(
clip_id,
@ -2775,7 +2780,6 @@ impl Engine {
// Add clip to track
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
track.clips.push(clip);
self.refresh_clip_snapshot();
}
// Create recording state
@ -2874,7 +2878,6 @@ impl Engine {
eprintln!("[STOP_RECORDING] Updated clip {} with pool_index {}", clip_id, pool_index);
}
}
self.refresh_clip_snapshot();
// Delete temp file
let _ = std::fs::remove_file(&temp_file_path);
@ -2981,8 +2984,6 @@ impl Engine {
eprintln!("[MIDI_RECORDING] ERROR: Clip {} not found in pool!", clip_id);
}
self.refresh_clip_snapshot();
// Send event to UI
eprintln!("[MIDI_RECORDING] Pushing MidiRecordingStopped event to event_tx...");
match self.event_tx.push(AudioEvent::MidiRecordingStopped(track_id, clip_id, note_count)) {
@ -3017,8 +3018,6 @@ pub struct EngineController {
query_response_rx: rtrb::Consumer<QueryResponse>,
playhead: Arc<AtomicU64>,
next_midi_clip_id: Arc<AtomicU32>,
next_audio_clip_id: Arc<AtomicU32>,
clip_snapshot: Arc<RwLock<AudioClipSnapshot>>,
sample_rate: u32,
#[allow(dead_code)] // Used in public getter method
channels: u32,
@ -3113,12 +3112,6 @@ impl EngineController {
frames as f64 / self.sample_rate as f64
}
/// Get the shared clip snapshot. The UI can read this each frame to display
/// the authoritative clip state from the backend.
pub fn clip_snapshot(&self) -> Arc<RwLock<AudioClipSnapshot>> {
Arc::clone(&self.clip_snapshot)
}
/// Create a new metatrack
pub fn create_metatrack(&mut self, name: String) {
let _ = self.command_tx.push(Command::CreateMetatrack(name, None));
@ -3206,22 +3199,9 @@ impl EngineController {
}
}
/// Generate the next unique audio clip instance ID (atomic, thread-safe)
pub fn next_audio_clip_id(&self) -> AudioClipInstanceId {
self.next_audio_clip_id.fetch_add(1, Ordering::Relaxed)
}
/// Add a clip to an audio track (async, fire-and-forget)
/// Returns the pre-assigned clip instance ID so callers can track the clip without a sync round-trip
pub fn add_audio_clip(&mut self, track_id: TrackId, pool_index: usize, start_time: f64, duration: f64, offset: f64) -> AudioClipInstanceId {
let clip_id = self.next_audio_clip_id.fetch_add(1, Ordering::Relaxed);
let _ = self.command_tx.push(Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset));
clip_id
}
/// Add a clip to an audio track with a pre-assigned ID (for undo/redo, restoring deleted clips)
pub fn add_audio_clip_with_id(&mut self, track_id: TrackId, clip_id: AudioClipInstanceId, pool_index: usize, start_time: f64, duration: f64, offset: f64) {
let _ = self.command_tx.push(Command::AddAudioClip(track_id, clip_id, pool_index, start_time, duration, offset));
/// Add a clip to an audio track
pub fn add_audio_clip(&mut self, track_id: TrackId, pool_index: usize, start_time: f64, duration: f64, offset: f64) {
let _ = self.command_tx.push(Command::AddAudioClip(track_id, pool_index, start_time, duration, offset));
}
/// Create a new MIDI track

View File

@ -19,7 +19,7 @@ pub mod waveform_cache;
pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId};
pub use buffer_pool::BufferPool;
pub use clip::{AudioClipInstance, AudioClipInstanceId, Clip, ClipId};
pub use engine::{AudioClipSnapshot, Engine, EngineController};
pub use engine::{Engine, EngineController};
pub use export::{export_audio, ExportFormat, ExportSettings};
pub use metronome::Metronome;
pub use midi::{MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent};

View File

@ -211,11 +211,6 @@ impl Project {
self.tracks.get_mut(&track_id)
}
/// Iterate over all tracks in the project.
pub fn track_iter(&self) -> impl Iterator<Item = (TrackId, &TrackNode)> {
self.tracks.iter().map(|(&id, node)| (id, node))
}
/// Get oscilloscope data from a node in a track's graph
pub fn get_oscilloscope_data(&self, track_id: TrackId, node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> {
if let Some(TrackNode::Midi(track)) = self.tracks.get(&track_id) {

View File

@ -253,18 +253,6 @@ impl MidiRecordingState {
self.completed_notes.len()
}
/// Get all completed notes plus currently-held notes with a provisional duration.
/// Used for live preview during recording so held notes appear immediately.
pub fn get_notes_with_active(&self, current_time: f64) -> Vec<(f64, u8, u8, f64)> {
let mut notes = self.completed_notes.clone();
for active in self.active_notes.values() {
let time_offset = active.start_time - self.start_time;
let provisional_dur = (current_time - active.start_time).max(0.0);
notes.push((time_offset, active.note, active.velocity, provisional_dur));
}
notes
}
/// Get the note numbers of all currently held (active) notes
pub fn active_note_numbers(&self) -> Vec<u8> {
self.active_notes.keys().copied().collect()

View File

@ -67,9 +67,8 @@ pub enum Command {
/// Add an audio file to the pool (path, data, channels, sample_rate)
/// Returns the pool index via an AudioEvent
AddAudioFile(String, Vec<f32>, u32, u32),
/// Add a clip to an audio track (track_id, clip_id, pool_index, start_time, duration, offset)
/// The clip_id is pre-assigned by the caller (via EngineController::next_audio_clip_id())
AddAudioClip(TrackId, AudioClipInstanceId, usize, f64, f64, f64),
/// Add a clip to an audio track (track_id, pool_index, start_time, duration, offset)
AddAudioClip(TrackId, usize, f64, f64, f64),
// MIDI commands
/// Create a new MIDI track with a name and optional parent group
@ -419,6 +418,8 @@ pub enum Query {
/// Add a MIDI clip instance to a track synchronously (track_id, instance) - returns instance ID
/// The clip must already exist in the MidiClipPool
AddMidiClipInstanceSync(TrackId, crate::audio::midi::MidiClipInstance),
/// Add an audio clip to a track synchronously (track_id, pool_index, start_time, duration, offset) - returns instance ID
AddAudioClipSync(TrackId, usize, f64, f64, f64),
/// Add an audio file to the pool synchronously (path, data, channels, sample_rate) - returns pool index
AddAudioFileSync(String, Vec<f32>, u32, u32),
/// Import an audio file synchronously (path) - returns pool index.
@ -500,6 +501,8 @@ pub enum QueryResponse {
AudioExported(Result<(), String>),
/// MIDI clip instance added (returns instance ID)
MidiClipInstanceAdded(Result<MidiClipInstanceId, String>),
/// Audio clip instance added (returns instance ID)
AudioClipInstanceAdded(Result<AudioClipInstanceId, String>),
/// Audio file added to pool (returns pool index)
AudioFileAddedSync(Result<usize, String>),
/// Audio file imported to pool (returns pool index)

View File

@ -13,7 +13,7 @@ pub mod tui;
// Re-export commonly used types
pub use audio::{
AudioClipInstanceId, AudioClipSnapshot, AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController,
AudioClipInstanceId, AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController,
Metatrack, MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent, MidiTrack, ParameterId, PoolAudioFile, Project, RecordingState, RenderContext, Track, TrackId,
TrackNode,
};

View File

@ -66,7 +66,5 @@ windows-sys = { version = "0.60", features = [
"Win32_System_Memory",
] }
[dependencies.tiny-skia]
version = "0.11"
[dev-dependencies]
tiny-skia = "0.11"

View File

@ -235,20 +235,20 @@ impl Action for AddClipInstanceAction {
}
}
AudioClipType::Sampled { audio_pool_index } => {
// For sampled audio, send AddAudioClipSync query
use daw_backend::command::{Query, QueryResponse};
let internal_start = self.clip_instance.trim_start;
let internal_end = self.clip_instance.trim_end.unwrap_or(clip.duration);
let effective_duration = self.clip_instance.timeline_duration
.unwrap_or(internal_end - internal_start);
let start_time = self.clip_instance.timeline_start;
let instance_id = controller.add_audio_clip(
*backend_track_id,
*audio_pool_index,
start_time,
effective_duration,
internal_start,
);
let query =
Query::AddAudioClipSync(*backend_track_id, *audio_pool_index, start_time, effective_duration, internal_start);
match controller.send_query(query)? {
QueryResponse::AudioClipInstanceAdded(Ok(instance_id)) => {
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
@ -260,6 +260,10 @@ impl Action for AddClipInstanceAction {
Ok(())
}
QueryResponse::AudioClipInstanceAdded(Err(e)) => Err(e),
_ => Err("Unexpected query response".to_string()),
}
}
AudioClipType::Recording => {
// Recording clips are not synced to backend until finalized
Ok(())

View File

@ -194,24 +194,30 @@ impl Action for RemoveClipInstancesAction {
}
}
AudioClipType::Sampled { audio_pool_index } => {
use daw_backend::command::{Query, QueryResponse};
let internal_start = instance.trim_start;
let internal_end = instance.trim_end.unwrap_or(clip.duration);
let effective_duration = instance.timeline_duration
.unwrap_or(internal_end - internal_start);
let start_time = instance.timeline_start;
let new_id = controller.add_audio_clip(
let query = Query::AddAudioClipSync(
track_id,
*audio_pool_index,
start_time,
effective_duration,
internal_start,
);
if let Ok(QueryResponse::AudioClipInstanceAdded(Ok(new_id))) =
controller.send_query(query)
{
backend.clip_instance_to_backend_map.insert(
instance.id,
BackendClipInstanceId::Audio(new_id),
);
}
}
AudioClipType::Recording => {}
}
}

View File

@ -415,6 +415,8 @@ impl Action for SplitClipInstanceAction {
}
}
AudioClipType::Sampled { audio_pool_index } => {
use daw_backend::command::{Query, QueryResponse};
// 1. Trim the original (left) instance
let orig_internal_start = original_instance.trim_start;
let orig_internal_end = original_instance.trim_end.unwrap_or(clip.duration);
@ -433,7 +435,7 @@ impl Action for SplitClipInstanceAction {
.unwrap_or(internal_end - internal_start);
let start_time = new_instance.timeline_start;
let instance_id = controller.add_audio_clip(
let query = Query::AddAudioClipSync(
*backend_track_id,
*audio_pool_index,
start_time,
@ -441,6 +443,8 @@ impl Action for SplitClipInstanceAction {
internal_start,
);
match controller.send_query(query)? {
QueryResponse::AudioClipInstanceAdded(Ok(instance_id)) => {
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
@ -451,6 +455,10 @@ impl Action for SplitClipInstanceAction {
Ok(())
}
QueryResponse::AudioClipInstanceAdded(Err(e)) => Err(e),
_ => Err("Unexpected query response".to_string()),
}
}
AudioClipType::Recording => {
// Recording clips cannot be split
Err("Cannot split a clip that is currently recording".to_string())

View File

@ -646,46 +646,6 @@ pub struct ClipInstance {
pub loop_before: Option<f64>,
}
/// High 64-bit sentinel used to identify UUIDs that encode a backend audio clip instance ID.
/// Using a sentinel that would never appear in a v4 random UUID (which has specific version bits).
const AUDIO_BACKEND_UUID_HIGH: u64 = 0xDEAD_BEEF_CAFE_BABE;
/// Convert a backend `AudioClipInstanceId` (u32) to a synthetic UUID for use in selection/hit-testing.
/// These UUIDs are distinct from real document UUIDs and can be round-tripped via `audio_backend_id_from_uuid`.
pub fn audio_backend_uuid(backend_id: u32) -> Uuid {
Uuid::from_u64_pair(AUDIO_BACKEND_UUID_HIGH, backend_id as u64)
}
/// Extract a backend `AudioClipInstanceId` from a synthetic UUID created by `audio_backend_uuid`.
/// Returns `None` if this is a regular document UUID.
pub fn audio_backend_id_from_uuid(uuid: Uuid) -> Option<u32> {
let (high, low) = uuid.as_u64_pair();
if high == AUDIO_BACKEND_UUID_HIGH {
Some(low as u32)
} else {
None
}
}
/// High 64-bit sentinel used to identify UUIDs that encode a backend MIDI clip instance ID.
const MIDI_BACKEND_UUID_HIGH: u64 = 0xDEAD_BEEF_CAFE_BEEF;
/// Convert a backend `MidiClipInstanceId` (u32) to a synthetic UUID for use in selection/hit-testing.
pub fn midi_backend_uuid(backend_id: u32) -> Uuid {
Uuid::from_u64_pair(MIDI_BACKEND_UUID_HIGH, backend_id as u64)
}
/// Extract a backend `MidiClipInstanceId` from a synthetic UUID created by `midi_backend_uuid`.
/// Returns `None` if this is a regular document UUID.
pub fn midi_backend_id_from_uuid(uuid: Uuid) -> Option<u32> {
let (high, low) = uuid.as_u64_pair();
if high == MIDI_BACKEND_UUID_HIGH {
Some(low as u32)
} else {
None
}
}
impl ClipInstance {
/// Create a new clip instance
pub fn new(clip_id: Uuid) -> Self {

View File

@ -665,20 +665,6 @@ impl Document {
self.audio_clips.get(id)
}
/// Find the document audio clip (UUID + ref) that owns the given backend pool index.
pub fn audio_clip_by_pool_index(&self, pool_index: usize) -> Option<(Uuid, &AudioClip)> {
self.audio_clips.iter()
.find(|(_, c)| c.audio_pool_index() == Some(pool_index))
.map(|(&id, c)| (id, c))
}
/// Find the document audio clip (UUID + ref) that owns the given backend MIDI clip ID.
pub fn audio_clip_by_midi_clip_id(&self, midi_clip_id: u32) -> Option<(Uuid, &AudioClip)> {
self.audio_clips.iter()
.find(|(_, c)| c.midi_clip_id() == Some(midi_clip_id))
.map(|(&id, c)| (id, c))
}
/// Get a mutable vector clip by ID
pub fn get_vector_clip_mut(&mut self, id: &Uuid) -> Option<&mut VectorClip> {
self.vector_clips.get_mut(id)

View File

@ -143,11 +143,8 @@ pub enum RenderedLayerType {
pub struct RenderedLayer {
/// The layer's unique identifier
pub layer_id: Uuid,
/// Vello scene — only populated for `RenderedLayerType::Vector` in GPU mode.
/// Vello scene — only populated for `RenderedLayerType::Vector`.
pub scene: Scene,
/// CPU-rendered pixmap — `Some` for `RenderedLayerType::Vector` in CPU mode, `None` otherwise.
/// When `Some`, `scene` is empty; the pixmap is uploaded directly to the GPU texture.
pub cpu_pixmap: Option<tiny_skia::Pixmap>,
/// Layer opacity (0.0 to 1.0)
pub opacity: f32,
/// Blend mode for compositing
@ -164,7 +161,6 @@ impl RenderedLayer {
Self {
layer_id,
scene: Scene::new(),
cpu_pixmap: None,
opacity: 1.0,
blend_mode: BlendMode::Normal,
has_content: false,
@ -177,7 +173,6 @@ impl RenderedLayer {
Self {
layer_id,
scene: Scene::new(),
cpu_pixmap: None,
opacity,
blend_mode,
has_content: false,
@ -191,7 +186,6 @@ impl RenderedLayer {
Self {
layer_id,
scene: Scene::new(),
cpu_pixmap: None,
opacity,
blend_mode: BlendMode::Normal,
has_content,
@ -202,10 +196,8 @@ impl RenderedLayer {
/// Result of rendering a document for compositing
pub struct CompositeRenderResult {
/// Background scene — GPU mode only; empty in CPU mode.
/// Background scene (rendered separately for potential optimization)
pub background: Scene,
/// CPU-rendered background pixmap — `Some` in CPU mode, `None` in GPU mode.
pub background_cpu: Option<tiny_skia::Pixmap>,
/// Rendered layers in bottom-to-top order
pub layers: Vec<RenderedLayer>,
/// Document dimensions
@ -279,7 +271,6 @@ pub fn render_document_for_compositing(
let float_entry = RenderedLayer {
layer_id: Uuid::nil(), // sentinel — not a real document layer
scene: Scene::new(),
cpu_pixmap: None,
opacity: 1.0,
blend_mode: crate::gpu::BlendMode::Normal,
has_content: !float_sel.pixels.is_empty(),
@ -299,7 +290,6 @@ pub fn render_document_for_compositing(
CompositeRenderResult {
background,
background_cpu: None,
layers: rendered_layers,
width: document.width,
height: document.height,
@ -1201,480 +1191,6 @@ fn render_vector_layer(
}
}
// ============================================================================
// CPU Render Path (tiny-skia)
// ============================================================================
//
// When Vello's CPU renderer is too slow (fixed per-call overhead), we render
// vector layers to `tiny_skia::Pixmap` and upload via `queue.write_texture`.
// The GPU compositor pipeline (sRGB→linear, blend modes) is unchanged.
/// Convert a kurbo `Affine` to a tiny-skia `Transform`.
///
/// kurbo `as_coeffs()` → `[a, b, c, d, e, f]` where the matrix is:
/// ```text
/// | a c e |
/// | b d f |
/// | 0 0 1 |
/// ```
/// tiny-skia `from_row(sx, ky, kx, sy, tx, ty)` fills the same layout.
fn affine_to_ts(affine: Affine) -> tiny_skia::Transform {
let [a, b, c, d, e, f] = affine.as_coeffs();
tiny_skia::Transform::from_row(a as f32, b as f32, c as f32, d as f32, e as f32, f as f32)
}
/// Convert a kurbo `BezPath` to a tiny-skia `Path`. Returns `None` if the path
/// produces no segments (tiny-skia requires at least one segment).
fn bezpath_to_ts(path: &kurbo::BezPath) -> Option<tiny_skia::Path> {
use kurbo::PathEl;
let mut pb = tiny_skia::PathBuilder::new();
for el in path.iter() {
match el {
PathEl::MoveTo(p) => pb.move_to(p.x as f32, p.y as f32),
PathEl::LineTo(p) => pb.line_to(p.x as f32, p.y as f32),
PathEl::QuadTo(p1, p2) => {
pb.quad_to(p1.x as f32, p1.y as f32, p2.x as f32, p2.y as f32)
}
PathEl::CurveTo(p1, p2, p3) => pb.cubic_to(
p1.x as f32, p1.y as f32,
p2.x as f32, p2.y as f32,
p3.x as f32, p3.y as f32,
),
PathEl::ClosePath => pb.close(),
}
}
pb.finish()
}
/// Build a tiny-skia `Paint` with a solid colour and optional opacity.
fn solid_paint(r: u8, g: u8, b: u8, a: u8, opacity: f32) -> tiny_skia::Paint<'static> {
let alpha = ((a as f32 / 255.0) * opacity * 255.0).round().clamp(0.0, 255.0) as u8;
let mut paint = tiny_skia::Paint::default();
paint.set_color_rgba8(r, g, b, alpha);
paint.anti_alias = true;
paint
}
/// Build a tiny-skia `Paint` with a gradient shader.
fn gradient_paint<'a>(
grad: &crate::gradient::ShapeGradient,
start: kurbo::Point,
end: kurbo::Point,
opacity: f32,
) -> Option<tiny_skia::Paint<'a>> {
use crate::gradient::GradientType;
use tiny_skia::{Color, GradientStop, SpreadMode};
let spread_mode = match grad.extend {
crate::gradient::GradientExtend::Pad => SpreadMode::Pad,
crate::gradient::GradientExtend::Reflect => SpreadMode::Reflect,
crate::gradient::GradientExtend::Repeat => SpreadMode::Repeat,
};
let stops: Vec<GradientStop> = grad.stops.iter().map(|s| {
let a = ((s.color.a as f32 / 255.0) * opacity * 255.0).round().clamp(0.0, 255.0) as u8;
GradientStop::new(s.position, Color::from_rgba8(s.color.r, s.color.g, s.color.b, a))
}).collect();
let shader = match grad.kind {
GradientType::Linear => {
tiny_skia::LinearGradient::new(
tiny_skia::Point { x: start.x as f32, y: start.y as f32 },
tiny_skia::Point { x: end.x as f32, y: end.y as f32 },
stops,
spread_mode,
tiny_skia::Transform::identity(),
)?
}
GradientType::Radial => {
let mid = kurbo::Point::new((start.x + end.x) * 0.5, (start.y + end.y) * 0.5);
let dx = end.x - start.x;
let dy = end.y - start.y;
let radius = ((dx * dx + dy * dy).sqrt() * 0.5) as f32;
tiny_skia::RadialGradient::new(
tiny_skia::Point { x: mid.x as f32, y: mid.y as f32 },
tiny_skia::Point { x: mid.x as f32, y: mid.y as f32 },
radius,
stops,
spread_mode,
tiny_skia::Transform::identity(),
)?
}
};
let mut paint = tiny_skia::Paint::default();
paint.shader = shader;
paint.anti_alias = true;
Some(paint)
}
/// Render the document background to a CPU pixmap.
fn render_background_cpu(
document: &Document,
pixmap: &mut tiny_skia::PixmapMut<'_>,
base_transform: Affine,
draw_checkerboard: bool,
) {
let ts_transform = affine_to_ts(base_transform);
let bg_rect = tiny_skia::Rect::from_xywh(
0.0, 0.0, document.width as f32, document.height as f32,
);
let Some(bg_rect) = bg_rect else { return };
let bg = &document.background_color;
// Draw checkerboard behind transparent backgrounds
if draw_checkerboard && bg.a < 255 {
// Build a 32×32 checkerboard pixmap (16×16 px light/dark squares)
// in document space — each square = 16 document units.
if let Some(mut checker) = tiny_skia::Pixmap::new(32, 32) {
let light = tiny_skia::Color::from_rgba8(204, 204, 204, 255);
let dark = tiny_skia::Color::from_rgba8(170, 170, 170, 255);
for py in 0u32..32 {
for px in 0u32..32 {
let is_light = ((px / 16) + (py / 16)) % 2 == 0;
let color = if is_light { light } else { dark };
checker.pixels_mut()[(py * 32 + px) as usize] =
tiny_skia::PremultipliedColorU8::from_rgba(
(color.red() * 255.0) as u8,
(color.green() * 255.0) as u8,
(color.blue() * 255.0) as u8,
(color.alpha() * 255.0) as u8,
).unwrap();
}
}
let pattern = tiny_skia::Pattern::new(
checker.as_ref(),
tiny_skia::SpreadMode::Repeat,
tiny_skia::FilterQuality::Nearest,
1.0,
tiny_skia::Transform::identity(),
);
let mut paint = tiny_skia::Paint::default();
paint.shader = pattern;
pixmap.fill_rect(bg_rect, &paint, ts_transform, None);
}
}
// Draw the background colour
let alpha = bg.a;
let paint = solid_paint(bg.r, bg.g, bg.b, alpha, 1.0);
pixmap.fill_rect(bg_rect, &paint, ts_transform, None);
}
/// Render a DCEL to a CPU pixmap.
fn render_dcel_cpu(
dcel: &crate::dcel::Dcel,
pixmap: &mut tiny_skia::PixmapMut<'_>,
transform: tiny_skia::Transform,
opacity: f32,
_document: &Document,
_image_cache: &mut ImageCache,
) {
// 1. Faces (fills)
for (i, face) in dcel.faces.iter().enumerate() {
if face.deleted || i == 0 {
continue;
}
if face.fill_color.is_none() && face.image_fill.is_none() && face.gradient_fill.is_none() {
continue;
}
let face_id = crate::dcel::FaceId(i as u32);
let path = dcel.face_to_bezpath_with_holes(face_id);
let Some(ts_path) = bezpath_to_ts(&path) else { continue };
let fill_type = match face.fill_rule {
crate::shape::FillRule::NonZero => tiny_skia::FillRule::Winding,
crate::shape::FillRule::EvenOdd => tiny_skia::FillRule::EvenOdd,
};
let mut filled = false;
// Gradient fill (takes priority over solid)
if let Some(ref grad) = face.gradient_fill {
let bbox: kurbo::Rect = vello::kurbo::Shape::bounding_box(&path);
let (start, end) = match (grad.start_world, grad.end_world) {
(Some((sx, sy)), Some((ex, ey))) => match grad.kind {
crate::gradient::GradientType::Linear => {
(kurbo::Point::new(sx, sy), kurbo::Point::new(ex, ey))
}
crate::gradient::GradientType::Radial => {
let opp = kurbo::Point::new(2.0 * sx - ex, 2.0 * sy - ey);
(opp, kurbo::Point::new(ex, ey))
}
},
_ => gradient_bbox_endpoints(grad.angle, bbox),
};
if let Some(paint) = gradient_paint(grad, start, end, opacity) {
pixmap.fill_path(&ts_path, &paint, fill_type, transform, None);
filled = true;
}
}
// Image fill — not yet implemented for CPU renderer; fall through to solid or skip
// TODO: decode image to Pixmap and use as Pattern shader
// Solid colour fill
if !filled {
if let Some(fc) = &face.fill_color {
let paint = solid_paint(fc.r, fc.g, fc.b, fc.a, opacity);
pixmap.fill_path(&ts_path, &paint, fill_type, transform, None);
}
}
}
// 2. Edges (strokes)
for edge in &dcel.edges {
if edge.deleted {
continue;
}
if let (Some(stroke_color), Some(stroke_style)) = (&edge.stroke_color, &edge.stroke_style) {
let mut path = kurbo::BezPath::new();
path.move_to(edge.curve.p0);
path.curve_to(edge.curve.p1, edge.curve.p2, edge.curve.p3);
let Some(ts_path) = bezpath_to_ts(&path) else { continue };
let paint = solid_paint(stroke_color.r, stroke_color.g, stroke_color.b, stroke_color.a, opacity);
let stroke = tiny_skia::Stroke {
width: stroke_style.width as f32,
line_cap: match stroke_style.cap {
crate::shape::Cap::Butt => tiny_skia::LineCap::Butt,
crate::shape::Cap::Round => tiny_skia::LineCap::Round,
crate::shape::Cap::Square => tiny_skia::LineCap::Square,
},
line_join: match stroke_style.join {
crate::shape::Join::Miter => tiny_skia::LineJoin::Miter,
crate::shape::Join::Round => tiny_skia::LineJoin::Round,
crate::shape::Join::Bevel => tiny_skia::LineJoin::Bevel,
},
miter_limit: stroke_style.miter_limit as f32,
..Default::default()
};
pixmap.stroke_path(&ts_path, &paint, &stroke, transform, None);
}
}
}
/// Render a vector layer to a CPU pixmap.
fn render_vector_layer_cpu(
document: &Document,
time: f64,
layer: &crate::layer::VectorLayer,
pixmap: &mut tiny_skia::PixmapMut<'_>,
base_transform: Affine,
parent_opacity: f64,
image_cache: &mut ImageCache,
) {
let layer_opacity = parent_opacity * layer.layer.opacity;
for clip_instance in &layer.clip_instances {
let group_end_time = document.vector_clips.get(&clip_instance.clip_id)
.filter(|vc| vc.is_group)
.map(|_| {
let frame_duration = 1.0 / document.framerate;
layer.group_visibility_end(&clip_instance.id, clip_instance.timeline_start, frame_duration)
});
render_clip_instance_cpu(
document, time, clip_instance, layer_opacity, pixmap, base_transform,
&layer.layer.animation_data, image_cache, group_end_time,
);
}
if let Some(dcel) = layer.dcel_at_time(time) {
render_dcel_cpu(dcel, pixmap, affine_to_ts(base_transform), layer_opacity as f32, document, image_cache);
}
}
/// Render a clip instance (and its nested layers) to a CPU pixmap.
fn render_clip_instance_cpu(
document: &Document,
time: f64,
clip_instance: &crate::clip::ClipInstance,
parent_opacity: f64,
pixmap: &mut tiny_skia::PixmapMut<'_>,
base_transform: Affine,
animation_data: &crate::animation::AnimationData,
image_cache: &mut ImageCache,
group_end_time: Option<f64>,
) {
let Some(vector_clip) = document.vector_clips.get(&clip_instance.clip_id) else { return };
let clip_time = if vector_clip.is_group {
let end = group_end_time.unwrap_or(clip_instance.timeline_start);
if time < clip_instance.timeline_start || time >= end { return; }
0.0
} else {
let clip_dur = document.get_clip_duration(&vector_clip.id).unwrap_or(vector_clip.duration);
let Some(t) = clip_instance.remap_time(time, clip_dur) else { return };
t
};
let transform = &clip_instance.transform;
let x = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::X }, time, transform.x);
let y = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::Y }, time, transform.y);
let rotation = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::Rotation }, time, transform.rotation);
let scale_x = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::ScaleX }, time, transform.scale_x);
let scale_y = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::ScaleY }, time, transform.scale_y);
let skew_x = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::SkewX }, time, transform.skew_x);
let skew_y = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::SkewY }, time, transform.skew_y);
let opacity = animation_data.eval(&crate::animation::AnimationTarget::Object { id: clip_instance.id, property: TransformProperty::Opacity }, time, clip_instance.opacity);
let center_x = vector_clip.width / 2.0;
let center_y = vector_clip.height / 2.0;
let skew_transform = if skew_x != 0.0 || skew_y != 0.0 {
let sx = if skew_x != 0.0 { Affine::new([1.0, 0.0, skew_x.to_radians().tan(), 1.0, 0.0, 0.0]) } else { Affine::IDENTITY };
let sy = if skew_y != 0.0 { Affine::new([1.0, skew_y.to_radians().tan(), 0.0, 1.0, 0.0, 0.0]) } else { Affine::IDENTITY };
Affine::translate((center_x, center_y)) * sx * sy * Affine::translate((-center_x, -center_y))
} else { Affine::IDENTITY };
let clip_transform = Affine::translate((x, y)) * Affine::rotate(rotation.to_radians()) * Affine::scale_non_uniform(scale_x, scale_y) * skew_transform;
let instance_transform = base_transform * clip_transform;
let clip_opacity = parent_opacity * opacity;
for layer_node in vector_clip.layers.iter() {
if !layer_node.data.visible() { continue; }
render_vector_content_cpu(document, clip_time, &layer_node.data, pixmap, instance_transform, clip_opacity, image_cache);
}
}
/// Render only vector/group content from a layer to a CPU pixmap.
/// Video, Audio, Effect, and Raster variants are intentionally skipped —
/// they are handled by the compositor via other paths.
fn render_vector_content_cpu(
document: &Document,
time: f64,
layer: &AnyLayer,
pixmap: &mut tiny_skia::PixmapMut<'_>,
base_transform: Affine,
parent_opacity: f64,
image_cache: &mut ImageCache,
) {
match layer {
AnyLayer::Vector(vector_layer) => {
render_vector_layer_cpu(document, time, vector_layer, pixmap, base_transform, parent_opacity, image_cache);
}
AnyLayer::Group(group_layer) => {
for child in &group_layer.children {
render_vector_content_cpu(document, time, child, pixmap, base_transform, parent_opacity, image_cache);
}
}
AnyLayer::Audio(_) | AnyLayer::Video(_) | AnyLayer::Effect(_) | AnyLayer::Raster(_) => {}
}
}
/// Render a single layer to its own isolated CPU pixmap.
fn render_layer_isolated_cpu(
document: &Document,
time: f64,
layer: &AnyLayer,
base_transform: Affine,
width: u32,
height: u32,
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
camera_frame: Option<&crate::webcam::CaptureFrame>,
) -> RenderedLayer {
// Reuse the GPU path for non-vector layer types (they don't use the Vello scene anyway)
let mut rendered = render_layer_isolated(document, time, layer, base_transform, image_cache, video_manager, camera_frame);
// For vector layers, replace the empty scene with a CPU pixmap
if matches!(rendered.layer_type, RenderedLayerType::Vector) {
let opacity = layer.opacity() as f64;
if let Some(mut pixmap) = tiny_skia::Pixmap::new(width.max(1), height.max(1)) {
{
let mut pm = pixmap.as_mut();
render_vector_content_cpu(document, time, layer, &mut pm, base_transform, opacity, image_cache);
}
rendered.has_content = true;
rendered.cpu_pixmap = Some(pixmap);
}
}
rendered
}
/// Render a document for compositing using the CPU (tiny-skia) path.
///
/// Produces the same `CompositeRenderResult` shape as `render_document_for_compositing`,
/// but vector layers are rendered to `Pixmap`s instead of Vello `Scene`s.
/// `viewport_width` / `viewport_height` set the pixmap dimensions (should match
/// the wgpu render buffer size).
pub fn render_document_for_compositing_cpu(
document: &Document,
base_transform: Affine,
viewport_width: u32,
viewport_height: u32,
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
camera_frame: Option<&crate::webcam::CaptureFrame>,
floating_selection: Option<&crate::selection::RasterFloatingSelection>,
draw_checkerboard: bool,
) -> CompositeRenderResult {
let time = document.current_time;
let w = viewport_width.max(1);
let h = viewport_height.max(1);
// Render background
let background_cpu = tiny_skia::Pixmap::new(w, h).map(|mut pixmap| {
render_background_cpu(document, &mut pixmap.as_mut(), base_transform, draw_checkerboard);
pixmap
});
// Solo check
let any_soloed = document.visible_layers().any(|layer| layer.soloed());
let layers_to_render: Vec<_> = document
.visible_layers()
.filter(|layer| if any_soloed { layer.soloed() } else { true })
.collect();
let mut rendered_layers = Vec::with_capacity(layers_to_render.len());
for layer in layers_to_render {
let rendered = render_layer_isolated_cpu(
document, time, layer, base_transform, w, h,
image_cache, video_manager, camera_frame,
);
rendered_layers.push(rendered);
}
// Insert floating raster selection at the correct z-position (same logic as GPU path)
if let Some(float_sel) = floating_selection {
if let Some(pos) = rendered_layers.iter().position(|l| l.layer_id == float_sel.layer_id) {
let parent_transform = match &rendered_layers[pos].layer_type {
RenderedLayerType::Raster { transform, .. } => *transform,
_ => Affine::IDENTITY,
};
let float_entry = RenderedLayer {
layer_id: Uuid::nil(),
scene: Scene::new(),
cpu_pixmap: None,
opacity: 1.0,
blend_mode: crate::gpu::BlendMode::Normal,
has_content: !float_sel.pixels.is_empty(),
layer_type: RenderedLayerType::Float {
canvas_id: float_sel.canvas_id,
x: float_sel.x,
y: float_sel.y,
width: float_sel.width,
height: float_sel.height,
transform: parent_transform,
pixels: std::sync::Arc::clone(&float_sel.pixels),
},
};
rendered_layers.insert(pos + 1, float_entry);
}
}
CompositeRenderResult {
background: Scene::new(),
background_cpu,
layers: rendered_layers,
width: document.width,
height: document.height,
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -4998,8 +4998,6 @@ impl eframe::App for EditorApp {
let clip_instance = ClipInstance::new(doc_clip_id)
.with_timeline_start(self.recording_start_time);
let clip_instance_id = clip_instance.id;
// Add instance to layer (works for root and inside movie clips)
if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) {
if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer {
@ -5008,12 +5006,6 @@ impl eframe::App for EditorApp {
}
}
// Insert mapping so the snapshot cache assigns the doc UUID to this recording clip
self.clip_instance_to_backend_map.insert(
clip_instance_id,
lightningbeam_core::action::BackendClipInstanceId::Audio(backend_clip_id),
);
// Store mapping for later updates
self.recording_clips.insert(layer_id, backend_clip_id);
}
@ -5125,16 +5117,41 @@ impl eframe::App for EditorApp {
}
}
// The backend already has the clip at _backend_clip_id (created
// during handle_start_recording and finalized in handle_stop_recording).
// Map the document instance_id → the existing backend clip so that
// delete/move/trim actions can reference it correctly.
// DO NOT call AddAudioClipSync — that would create a duplicate clip.
// Sync the clip instance to backend for playback
if let Some(backend_track_id) = self.layer_to_track_map.get(&layer_id) {
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
use daw_backend::command::{Query, QueryResponse};
let query = Query::AddAudioClipSync(
*backend_track_id,
pool_index,
timeline_start,
duration,
trim_start
);
match controller.send_query(query) {
Ok(QueryResponse::AudioClipInstanceAdded(Ok(backend_instance_id))) => {
// Store the mapping
self.clip_instance_to_backend_map.insert(
instance_id,
lightningbeam_core::action::BackendClipInstanceId::Audio(_backend_clip_id),
lightningbeam_core::action::BackendClipInstanceId::Audio(backend_instance_id)
);
eprintln!("[AUDIO] Mapped doc instance {} → backend clip {}", instance_id, _backend_clip_id);
println!("✅ Synced recording to backend: instance_id={}", backend_instance_id);
}
Ok(QueryResponse::AudioClipInstanceAdded(Err(e))) => {
eprintln!("❌ Failed to sync recording to backend: {}", e);
}
Ok(_) => {
eprintln!("❌ Unexpected query response when syncing recording");
}
Err(e) => {
eprintln!("❌ Failed to send query to backend: {}", e);
}
}
}
}
}
}
@ -5164,29 +5181,6 @@ impl eframe::App for EditorApp {
.filter(|lid| self.recording_layer_ids.contains(lid))
.copied();
if let Some(layer_id) = midi_layer_id {
// Lazily create the doc clip + instance on the first progress event
// (there is no MidiRecordingStarted event from the backend).
let already_exists = self.clip_instance_to_backend_map.values().any(|v| {
matches!(v, lightningbeam_core::action::BackendClipInstanceId::Midi(id) if *id == clip_id)
});
if !already_exists {
use lightningbeam_core::clip::{AudioClip, ClipInstance};
let clip = AudioClip::new_recording("Recording...");
let doc_clip_id = self.action_executor.document_mut().add_audio_clip(clip);
let clip_instance = ClipInstance::new(doc_clip_id)
.with_timeline_start(self.recording_start_time);
let clip_instance_id = clip_instance.id;
if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) {
if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer {
audio_layer.clip_instances.push(clip_instance);
}
}
self.clip_instance_to_backend_map.insert(
clip_instance_id,
lightningbeam_core::action::BackendClipInstanceId::Midi(clip_id),
);
}
let doc_clip_id = {
let document = self.action_executor.document();
document.get_layer(&layer_id)
@ -5781,9 +5775,6 @@ impl eframe::App for EditorApp {
schneider_max_error: &mut self.schneider_max_error,
raster_settings: &mut self.raster_settings,
audio_controller: self.audio_controller.as_ref(),
clip_snapshot: self.audio_controller.as_ref().map(|arc| {
arc.lock().unwrap().clip_snapshot()
}),
audio_input_opener: &mut self.audio_input,
audio_input_stream: &mut self.audio_input_stream,
audio_buffer_size: self.audio_buffer_size,
@ -5801,7 +5792,6 @@ impl eframe::App for EditorApp {
paint_bucket_gap_tolerance: &mut self.paint_bucket_gap_tolerance,
polygon_sides: &mut self.polygon_sides,
layer_to_track_map: &self.layer_to_track_map,
clip_instance_to_backend_map: &self.clip_instance_to_backend_map,
midi_event_cache: &mut self.midi_event_cache,
audio_pools_with_new_waveforms: &self.audio_pools_with_new_waveforms,
raw_audio_cache: &self.raw_audio_cache,

View File

@ -192,9 +192,6 @@ pub struct SharedPaneState<'a> {
pub raster_settings: &'a mut crate::tools::RasterToolSettings,
/// Audio engine controller for playback control (wrapped in Arc<Mutex<>> for thread safety)
pub audio_controller: Option<&'a std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
/// Snapshot of all audio/MIDI clip instances from the backend (for timeline rendering).
/// Updated by the audio thread after each mutation; UI reads it each frame.
pub clip_snapshot: Option<std::sync::Arc<std::sync::RwLock<daw_backend::AudioClipSnapshot>>>,
/// Opener for the microphone/line-in stream — consumed on first use.
pub audio_input_opener: &'a mut Option<daw_backend::InputStreamOpener>,
/// Live input stream handle; kept alive while recording is active.
@ -205,8 +202,6 @@ pub struct SharedPaneState<'a> {
pub video_manager: &'a std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
/// Maps all layer/group/clip UUIDs to backend track IDs (audio, MIDI, and metatracks)
pub layer_to_track_map: &'a std::collections::HashMap<Uuid, daw_backend::TrackId>,
/// Maps document clip instance UUIDs to backend clip instance IDs (for action dispatch)
pub clip_instance_to_backend_map: &'a std::collections::HashMap<Uuid, lightningbeam_core::action::BackendClipInstanceId>,
/// Global playback state
pub playback_time: &'a mut f64, // Current playback position in seconds
pub is_playing: &'a mut bool, // Whether playback is currently active

View File

@ -17,30 +17,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
/// rendering path regardless of GPU capability.
pub static FORCE_CPU_RENDERER: AtomicBool = AtomicBool::new(false);
/// Upload a tiny-skia `Pixmap` directly to a wgpu texture (no Vello involved).
/// Used by the CPU render path to bypass `render_to_texture` overhead.
fn upload_pixmap_to_texture(queue: &wgpu::Queue, texture: &wgpu::Texture, pixmap: &tiny_skia::Pixmap) {
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
pixmap.data(),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(4 * pixmap.width()),
rows_per_image: None,
},
wgpu::Extent3d {
width: pixmap.width(),
height: pixmap.height(),
depth_or_array_layers: 1,
},
);
}
/// Enable HDR compositing pipeline (per-layer rendering with proper opacity)
/// Set to true to use the new pipeline, false for legacy single-scene rendering
const USE_HDR_COMPOSITING: bool = true; // Enabled for testing
@ -69,9 +45,6 @@ struct SharedVelloResources {
gpu_brush: Mutex<crate::gpu_brush::GpuBrushEngine>,
/// Canvas blit pipeline (renders GPU canvas to layer sRGB buffer)
canvas_blit: crate::gpu_brush::CanvasBlitPipeline,
/// True when Vello is running its CPU software renderer (either forced or GPU fallback).
/// Used to select cheaper antialiasing — Msaa16 on CPU costs 16× as much as Area.
is_cpu_renderer: bool,
}
/// Per-instance Vello resources (created for each Stage pane)
@ -119,8 +92,8 @@ impl SharedVelloResources {
)
}))
};
let (renderer, is_cpu_renderer) = match gpu_result {
Ok(Ok(r)) => (r, false),
let renderer = match gpu_result {
Ok(Ok(r)) => r,
Ok(Err(e)) => return Err(format!("Failed to create Vello renderer: {e}")),
Err(_) => {
if !use_cpu {
@ -129,7 +102,7 @@ impl SharedVelloResources {
capability). Falling back to CPU renderer performance may be reduced."
);
}
let r = vello::Renderer::new(
vello::Renderer::new(
device,
vello::RendererOptions {
use_cpu: true,
@ -137,8 +110,7 @@ impl SharedVelloResources {
num_init_threads: std::num::NonZeroUsize::new(1),
pipeline_cache: None,
},
).map_err(|e| format!("CPU fallback renderer also failed: {e}"))?;
(r, true)
).map_err(|e| format!("CPU fallback renderer also failed: {e}"))?
}
};
@ -299,7 +271,6 @@ impl SharedVelloResources {
srgb_to_linear,
gpu_brush: Mutex::new(gpu_brush),
canvas_blit,
is_cpu_renderer: use_cpu || is_cpu_renderer,
})
}
}
@ -599,9 +570,6 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
// Prints to stderr when any section exceeds 2 ms, or total > 8 ms.
let _t_prepare_start = std::time::Instant::now();
// On the CPU renderer Msaa16 runs the rasterizer 16× per frame; use Area instead.
let aa_method = if shared.is_cpu_renderer { vello::AaConfig::Area } else { vello::AaConfig::Msaa16 };
// Choose rendering path based on HDR compositing flag
let mut scene = if USE_HDR_COMPOSITING {
// HDR Compositing Pipeline: render each layer separately for proper opacity
@ -966,20 +934,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
let mut image_cache = shared.image_cache.lock().unwrap();
let composite_result = if shared.is_cpu_renderer {
lightningbeam_core::renderer::render_document_for_compositing_cpu(
&self.ctx.document,
camera_transform,
width,
height,
&mut image_cache,
&shared.video_manager,
self.ctx.webcam_frame.as_ref(),
self.ctx.selection.raster_floating.as_ref(),
true,
)
} else {
lightningbeam_core::renderer::render_document_for_compositing(
let composite_result = lightningbeam_core::renderer::render_document_for_compositing(
&self.ctx.document,
camera_transform,
&mut image_cache,
@ -987,8 +942,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
self.ctx.webcam_frame.as_ref(),
self.ctx.selection.raster_floating.as_ref(),
true, // Draw checkerboard for transparent backgrounds in the UI
)
};
);
drop(image_cache);
let _t_after_scene_build = std::time::Instant::now();
@ -1007,7 +961,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
// HDR buffer spec for linear buffers
@ -1028,14 +982,10 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Some(pixmap) = &composite_result.background_cpu {
if let Some(tex) = buffer_pool.get_texture(bg_srgb_handle) {
upload_pixmap_to_texture(queue, tex, pixmap);
}
} else if let Ok(mut renderer) = shared.renderer.lock() {
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &bg_render_params).ok();
}
@ -1234,11 +1184,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
buffer_pool.get_view(hdr_layer_handle),
&instance_resources.hdr_texture_view,
) {
if let Some(pixmap) = &rendered_layer.cpu_pixmap {
if let Some(tex) = buffer_pool.get_texture(srgb_handle) {
upload_pixmap_to_texture(queue, tex, pixmap);
}
} else if let Ok(mut renderer) = shared.renderer.lock() {
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params).ok();
}
let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
@ -1527,7 +1473,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
let dim_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width, height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &dim_scene, dim_srgb_view, &dim_params).ok();
@ -1568,7 +1514,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
let clip_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width, height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &clip_scene, clip_srgb_view, &clip_params).ok();
@ -2574,7 +2520,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {
@ -2646,7 +2592,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
base_color: vello::peniko::Color::from_rgb8(45, 45, 48), // Dark background
width,
height,
antialiasing_method: aa_method,
antialiasing_method: vello::AaConfig::Msaa16,
};
if let Ok(mut renderer) = shared.renderer.lock() {

View File

@ -7,12 +7,9 @@
/// - Basic layer visualization
use eframe::egui;
use lightningbeam_core::clip::{
ClipInstance, audio_backend_uuid, midi_backend_uuid,
};
use lightningbeam_core::clip::ClipInstance;
use lightningbeam_core::layer::{AnyLayer, AudioLayerType, GroupLayer, LayerTrait};
use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState};
use std::collections::HashMap;
const RULER_HEIGHT: f32 = 30.0;
const LAYER_HEIGHT: f32 = 60.0;
@ -405,147 +402,26 @@ fn shift_toggle_layer(
*focus = lightningbeam_core::selection::FocusSelection::Layers(vec![layer_id]);
}
/// Build a per-audio-layer clip instance cache from the backend snapshot.
///
/// Audio layers read clip instances from the backend snapshot (source of truth) rather
/// than from `AudioLayer::clip_instances`. The cache maps layer_id → Vec<ClipInstance>.
///
/// Clip instance UUIDs in the cache are doc UUIDs when available (via reverse lookup of
/// `clip_instance_to_backend_map`), falling back to synthetic `audio_backend_uuid` /
/// `midi_backend_uuid` values for clips not yet in the map.
fn build_audio_clip_cache(
snap: &daw_backend::AudioClipSnapshot,
layer_to_track_map: &HashMap<uuid::Uuid, daw_backend::TrackId>,
document: &lightningbeam_core::document::Document,
clip_map: &HashMap<uuid::Uuid, lightningbeam_core::action::BackendClipInstanceId>,
) -> HashMap<uuid::Uuid, Vec<ClipInstance>> {
use lightningbeam_core::action::BackendClipInstanceId;
// Build reverse maps: backend_id → doc_instance_uuid
let mut audio_id_to_doc: HashMap<u32, uuid::Uuid> = HashMap::new();
let mut midi_id_to_doc: HashMap<u32, uuid::Uuid> = HashMap::new();
for (&doc_uuid, backend_id) in clip_map {
match backend_id {
BackendClipInstanceId::Audio(id) => { audio_id_to_doc.insert(*id, doc_uuid); }
BackendClipInstanceId::Midi(id) => { midi_id_to_doc.insert(*id, doc_uuid); }
}
}
let mut cache: HashMap<uuid::Uuid, Vec<ClipInstance>> = HashMap::new();
for (&layer_id, &track_id) in layer_to_track_map {
// Only process audio layers
match document.get_layer(&layer_id) {
Some(AnyLayer::Audio(_)) => {}
_ => continue,
}
let mut instances = Vec::new();
// Sampled audio clips
if let Some(audio_clips) = snap.audio.get(&track_id) {
for ac in audio_clips {
if let Some((clip_id, _)) = document.audio_clip_by_pool_index(ac.audio_pool_index) {
// Use doc UUID if we have it; otherwise fall back to synthetic UUID
let instance_id = audio_id_to_doc.get(&ac.id)
.copied()
.unwrap_or_else(|| audio_backend_uuid(ac.id));
let mut ci = ClipInstance::new(clip_id);
ci.id = instance_id;
ci.timeline_start = ac.external_start;
ci.trim_start = ac.internal_start;
ci.trim_end = Some(ac.internal_end);
let internal_dur = ac.internal_end - ac.internal_start;
if (ac.external_duration - internal_dur).abs() > 1e-9 {
ci.timeline_duration = Some(ac.external_duration);
}
ci.gain = ac.gain;
instances.push(ci);
}
}
}
// MIDI clips
if let Some(midi_clips) = snap.midi.get(&track_id) {
for mc in midi_clips {
if let Some((clip_id, _)) = document.audio_clip_by_midi_clip_id(mc.clip_id) {
let instance_id = midi_id_to_doc.get(&mc.id)
.copied()
.unwrap_or_else(|| midi_backend_uuid(mc.id));
let mut ci = ClipInstance::new(clip_id);
ci.id = instance_id;
ci.timeline_start = mc.external_start;
ci.trim_start = mc.internal_start;
ci.trim_end = Some(mc.internal_end);
let internal_dur = mc.internal_end - mc.internal_start;
if (mc.external_duration - internal_dur).abs() > 1e-9 {
ci.timeline_duration = Some(mc.external_duration);
}
instances.push(ci);
}
}
}
// Only insert if we found clips (so layer_clips() can fall back to al.clip_instances
// for layers where the snapshot has no clips yet, e.g. during recording setup)
if !instances.is_empty() {
cache.insert(layer_id, instances);
}
}
cache
}
/// Get clip instances for a layer, using the snapshot-based cache for audio layers
/// and falling back to the doc's `clip_instances` if the cache has no entry OR is empty
/// while the doc has clips (e.g., a recording clip not yet reflected in the snapshot).
fn layer_clips<'a>(
layer: &'a AnyLayer,
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> &'a [ClipInstance] {
match layer {
AnyLayer::Audio(al) => {
match audio_cache.get(&al.layer.id) {
Some(cached) if !cached.is_empty() => cached.as_slice(),
// Cache empty or missing: fall back to doc (covers recording-in-progress)
_ => &al.clip_instances,
}
}
AnyLayer::Vector(l) => &l.clip_instances,
AnyLayer::Video(l) => &l.clip_instances,
AnyLayer::Effect(l) => &l.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
}
}
/// Collect all (layer_ref, clip_instances) tuples from context_layers,
/// recursively descending into group children.
/// Returns (&AnyLayer, &[ClipInstance]) so callers have access to both layer info and clips.
fn all_layer_clip_instances<'a>(
context_layers: &[&'a AnyLayer],
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Vec<(&'a AnyLayer, &'a [ClipInstance])> {
fn all_layer_clip_instances<'a>(context_layers: &[&'a AnyLayer]) -> Vec<(&'a AnyLayer, &'a [ClipInstance])> {
let mut result = Vec::new();
for &layer in context_layers {
collect_clip_instances(layer, audio_cache, &mut result);
collect_clip_instances(layer, &mut result);
}
result
}
fn collect_clip_instances<'a>(
layer: &'a AnyLayer,
audio_cache: &'a HashMap<uuid::Uuid, Vec<ClipInstance>>,
result: &mut Vec<(&'a AnyLayer, &'a [ClipInstance])>,
) {
fn collect_clip_instances<'a>(layer: &'a AnyLayer, result: &mut Vec<(&'a AnyLayer, &'a [ClipInstance])>) {
match layer {
AnyLayer::Audio(_) => result.push((layer, layer_clips(layer, audio_cache))),
AnyLayer::Vector(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Audio(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Video(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Effect(l) => result.push((layer, &l.clip_instances)),
AnyLayer::Group(g) => {
for child in &g.children {
collect_clip_instances(child, audio_cache, result);
collect_clip_instances(child, result);
}
}
AnyLayer::Raster(_) => {}
@ -922,7 +798,6 @@ impl TimelinePane {
content_rect: egui::Rect,
header_rect: egui::Rect,
editing_clip_id: Option<&uuid::Uuid>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Option<(ClipDragType, uuid::Uuid)> {
let context_layers = document.context_layers(editing_clip_id);
let rows = build_timeline_rows(&context_layers);
@ -952,7 +827,14 @@ impl TimelinePane {
};
let _layer_data = layer.layer();
let clip_instances = layer_clips(layer, audio_cache);
let clip_instances: &[ClipInstance] = match layer {
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// Check each clip instance
let stacking = compute_clip_stacking(document, layer, clip_instances);
@ -2065,7 +1947,6 @@ impl TimelinePane {
waveform_stereo: bool,
context_layers: &[&lightningbeam_core::layer::AnyLayer],
video_manager: &std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) -> Vec<(egui::Rect, uuid::Uuid, f64, f64)> {
let painter = ui.painter();
@ -2449,8 +2330,8 @@ impl TimelinePane {
bright_teal.a() as f32 / 255.0,
];
for child in &g.children {
if let AnyLayer::Audio(_) = child {
for ci in layer_clips(child, &audio_cache) {
if let AnyLayer::Audio(al) = child {
for ci in &al.clip_instances {
let audio_clip = match document.get_audio_clip(&ci.clip_id) {
Some(c) => c,
None => continue,
@ -2560,7 +2441,14 @@ impl TimelinePane {
};
// Draw clip instances for this layer
let clip_instances = layer_clips(layer, &audio_cache);
let clip_instances: &[ClipInstance] = match layer {
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// For moves, precompute the clamped offset so all selected clips move uniformly
let group_move_offset = if self.clip_drag_state == Some(ClipDragType::Move) {
@ -3360,7 +3248,6 @@ impl TimelinePane {
audio_controller: Option<&std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
context_layers: &[&lightningbeam_core::layer::AnyLayer],
editing_clip_id: Option<&uuid::Uuid>,
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
) {
// Only allocate content area (ruler + layers) with click and drag
let content_response = ui.allocate_rect(
@ -3430,7 +3317,14 @@ impl TimelinePane {
let _layer_data = layer.layer();
// Get clip instances for this layer
let clip_instances = layer_clips(layer, &audio_cache);
let clip_instances: &[ClipInstance] = match layer {
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
// Check if click is within any clip instance
let click_stacking = compute_clip_stacking(document, layer, clip_instances);
@ -3716,7 +3610,6 @@ impl TimelinePane {
content_rect,
header_rect,
editing_clip_id,
&audio_cache,
) {
// If this clip is not selected, select it (respecting shift key)
if !selection.contains_clip_instance(&clip_id) {
@ -3770,7 +3663,7 @@ impl TimelinePane {
HashMap::new();
// Iterate through all layers (including group children) to find selected clip instances
for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
for (layer, clip_instances) in all_layer_clip_instances(context_layers) {
let layer_id = layer.id();
// Find selected clip instances in this layer
for clip_instance in clip_instances {
@ -3812,7 +3705,7 @@ impl TimelinePane {
> = HashMap::new();
// Iterate through all layers (including group children) to find selected clip instances
for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
for (layer, clip_instances) in all_layer_clip_instances(context_layers) {
let layer_id = layer.id();
// Find selected clip instances in this layer
@ -3937,7 +3830,7 @@ impl TimelinePane {
ClipDragType::LoopExtendRight => {
let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new();
for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
for (layer, clip_instances) in all_layer_clip_instances(context_layers) {
let layer_id = layer.id();
for clip_instance in clip_instances {
@ -4003,7 +3896,7 @@ impl TimelinePane {
// Extend loop_before (pre-loop region)
let mut layer_loops: HashMap<uuid::Uuid, Vec<lightningbeam_core::actions::loop_clip_instances::LoopEntry>> = HashMap::new();
for (layer, clip_instances) in all_layer_clip_instances(context_layers, &audio_cache) {
for (layer, clip_instances) in all_layer_clip_instances(context_layers) {
let layer_id = layer.id();
for clip_instance in clip_instances {
@ -4247,7 +4140,6 @@ impl TimelinePane {
content_rect,
header_rect,
editing_clip_id,
&audio_cache,
) {
match drag_type {
ClipDragType::TrimLeft | ClipDragType::TrimRight => {
@ -4528,31 +4420,17 @@ impl PaneRenderer for TimelinePane {
// Use virtual row count (includes expanded group children) for height calculations
let layer_count = build_timeline_rows(&context_layers).len();
// Build audio clip cache from backend snapshot (backend-as-source-of-truth for audio).
// Uses doc UUIDs via reverse lookup of clip_instance_to_backend_map so that selection
// and action dispatch continue to work with doc UUIDs.
// Falls back to AudioLayer::clip_instances for layers with no snapshot data yet
// (e.g., layers where recording is in progress but not yet finalized).
let audio_cache: HashMap<uuid::Uuid, Vec<ClipInstance>> =
if let Some(snap_arc) = shared.clip_snapshot.as_ref() {
if let Ok(snap) = snap_arc.read() {
build_audio_clip_cache(
&snap,
shared.layer_to_track_map,
document,
shared.clip_instance_to_backend_map,
)
} else {
HashMap::new()
}
} else {
HashMap::new()
};
// Calculate project duration from last clip endpoint across all layers
let mut max_endpoint: f64 = 10.0; // Default minimum duration
for &layer in &context_layers {
let clip_instances = layer_clips(layer, &audio_cache);
let clip_instances: &[ClipInstance] = match layer {
lightningbeam_core::layer::AnyLayer::Vector(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Audio(al) => &al.clip_instances,
lightningbeam_core::layer::AnyLayer::Video(vl) => &vl.clip_instances,
lightningbeam_core::layer::AnyLayer::Effect(el) => &el.clip_instances,
lightningbeam_core::layer::AnyLayer::Group(_) => &[],
lightningbeam_core::layer::AnyLayer::Raster(_) => &[],
};
for clip_instance in clip_instances {
let clip_duration = effective_clip_duration(document, layer, clip_instance);
@ -4621,7 +4499,7 @@ impl PaneRenderer for TimelinePane {
// Render layer rows with clipping
ui.set_clip_rect(content_rect.intersect(original_clip_rect));
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager, &audio_cache);
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager);
// Render playhead on top (clip to timeline area)
ui.set_clip_rect(timeline_rect.intersect(original_clip_rect));
@ -4648,7 +4526,6 @@ impl PaneRenderer for TimelinePane {
shared.audio_controller,
&context_layers,
editing_clip_id.as_ref(),
&audio_cache,
);
// Context menu: detect right-click on clips or empty timeline space
@ -4657,7 +4534,7 @@ impl PaneRenderer for TimelinePane {
if secondary_clicked {
if let Some(pos) = ui.input(|i| i.pointer.interact_pos()) {
if content_rect.contains(pos) {
if let Some((_drag_type, clip_id)) = self.detect_clip_at_pointer(pos, document, content_rect, layer_headers_rect, editing_clip_id.as_ref(), &audio_cache) {
if let Some((_drag_type, clip_id)) = self.detect_clip_at_pointer(pos, document, content_rect, layer_headers_rect, editing_clip_id.as_ref()) {
// Right-clicked on a clip
if !shared.selection.contains_clip_instance(&clip_id) {
shared.selection.select_only_clip_instance(clip_id);
@ -4685,7 +4562,14 @@ impl PaneRenderer for TimelinePane {
let mut enabled = false;
if let Some(layer_id) = *shared.active_layer_id {
if let Some(layer) = document.get_layer(&layer_id) {
let instances = layer_clips(layer, &audio_cache);
let instances: &[ClipInstance] = match layer {
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
AnyLayer::Effect(el) => &el.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
};
for inst in instances {
if !shared.selection.contains_clip_instance(&inst.id) { continue; }
if let Some(dur) = document.get_clip_duration(&inst.clip_id) {
@ -4709,7 +4593,14 @@ impl PaneRenderer for TimelinePane {
let mut enabled = false;
if let Some(layer_id) = *shared.active_layer_id {
if let Some(layer) = document.get_layer(&layer_id) {
let instances = layer_clips(layer, &audio_cache);
let instances: &[ClipInstance] = match layer {
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
AnyLayer::Effect(el) => &el.clip_instances,
AnyLayer::Group(_) => &[],
AnyLayer::Raster(_) => &[],
};
// Check each selected clip
enabled = instances.iter()
.filter(|ci| shared.selection.contains_clip_instance(&ci.id))