From 516960062a53ba961bf924ed479af33a66467cce Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 11:54:41 -0500 Subject: [PATCH 1/6] Drag layers to reorder --- .../lightningbeam-core/src/actions/mod.rs | 2 + .../src/actions/move_layer.rs | 137 ++++++ .../lightningbeam-editor/src/main.rs | 2 +- .../src/panes/timeline.rs | 397 ++++++++++++++++-- 4 files changed, 509 insertions(+), 29 deletions(-) create mode 100644 lightningbeam-ui/lightningbeam-core/src/actions/move_layer.rs diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/mod.rs b/lightningbeam-ui/lightningbeam-core/src/actions/mod.rs index 66cb4ff..1d840ae 100644 --- a/lightningbeam-ui/lightningbeam-core/src/actions/mod.rs +++ b/lightningbeam-ui/lightningbeam-core/src/actions/mod.rs @@ -31,6 +31,7 @@ pub mod convert_to_movie_clip; pub mod region_split; pub mod toggle_group_expansion; pub mod group_layers; +pub mod move_layer; pub use add_clip_instance::AddClipInstanceAction; pub use add_effect::AddEffectAction; @@ -60,3 +61,4 @@ pub use convert_to_movie_clip::ConvertToMovieClipAction; pub use region_split::RegionSplitAction; pub use toggle_group_expansion::ToggleGroupExpansionAction; pub use group_layers::GroupLayersAction; +pub use move_layer::MoveLayerAction; diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/move_layer.rs b/lightningbeam-ui/lightningbeam-core/src/actions/move_layer.rs new file mode 100644 index 0000000..f210182 --- /dev/null +++ b/lightningbeam-ui/lightningbeam-core/src/actions/move_layer.rs @@ -0,0 +1,137 @@ +use crate::action::Action; +use crate::document::Document; +use crate::layer::AnyLayer; +use uuid::Uuid; + +/// Action that moves one or more layers to a new position, possibly changing their parent group. +/// All layers are inserted contiguously into the same target parent. +/// Handles batch moves atomically: removes all, then inserts all, so indices stay consistent. +pub struct MoveLayerAction { + /// (layer_id, old_parent_id) for each layer to move, in visual order (top to bottom) + layers: Vec<(Uuid, Option)>, + new_parent_id: Option, + /// Insertion index in the new parent's children vec AFTER all dragged layers have been removed + new_base_index: usize, + /// Stored during execute for rollback: (layer, old_parent_id, old_index_in_parent) + removed: Vec<(AnyLayer, Option, usize)>, +} + +impl MoveLayerAction { + pub fn new( + layers: Vec<(Uuid, Option)>, + new_parent_id: Option, + new_base_index: usize, + ) -> Self { + Self { + layers, + new_parent_id, + new_base_index, + removed: Vec::new(), + } + } +} + +fn get_parent_children( + document: &mut Document, + parent_id: Option, +) -> Result<&mut Vec, String> { + match parent_id { + None => Ok(&mut document.root.children), + Some(id) => { + let layer = document + .root + .get_child_mut(&id) + .ok_or_else(|| format!("Parent group {} not found", id))?; + match layer { + AnyLayer::Group(g) => Ok(&mut g.children), + _ => Err(format!("Layer {} is not a group", id)), + } + } + } +} + +impl Action for MoveLayerAction { + fn execute(&mut self, document: &mut Document) -> Result<(), String> { + self.removed.clear(); + + // Phase 1: Remove all layers from their old parents. + // Group removals by parent, then remove back-to-front within each parent. + // Collect (layer_id, old_parent_id) with their current index. + let mut removals: Vec<(Uuid, Option, usize)> = Vec::new(); + for (layer_id, old_parent_id) in &self.layers { + let children = get_parent_children(document, *old_parent_id)?; + let idx = children.iter().position(|l| l.id() == *layer_id) + .ok_or_else(|| format!("Layer {} not found in parent", layer_id))?; + removals.push((*layer_id, *old_parent_id, idx)); + } + + // Sort by (parent, index) descending so we remove back-to-front + removals.sort_by(|a, b| { + a.1.cmp(&b.1).then(b.2.cmp(&a.2)) + }); + + let mut removed_layers: Vec<(Uuid, AnyLayer, Option, usize)> = Vec::new(); + for (layer_id, old_parent_id, idx) in &removals { + let children = get_parent_children(document, *old_parent_id)?; + let layer = children.remove(*idx); + removed_layers.push((*layer_id, layer, *old_parent_id, *idx)); + } + + // Phase 2: Insert all at new parent, in visual order (self.layers order). + // self.new_base_index is the index in the post-removal children vec. + let new_children = get_parent_children(document, self.new_parent_id)?; + let base = self.new_base_index.min(new_children.len()); + + // Insert in forward visual order, all at `base`. Each insert pushes the previous + // one to a higher children index. Since the timeline displays children in reverse, + // a higher children index = visually higher. So the first visual layer (layers[0]) + // ends up at the highest children index = visually topmost. Correct. + for (layer_id, _) in self.layers.iter() { + // Find this layer in removed_layers + let pos = removed_layers.iter().position(|(id, _, _, _)| id == layer_id) + .ok_or_else(|| format!("Layer {} missing from removed set", layer_id))?; + let (_, layer, old_parent_id, old_idx) = removed_layers.remove(pos); + self.removed.push((layer.clone(), old_parent_id, old_idx)); + + let new_children = get_parent_children(document, self.new_parent_id)?; + let insert_at = base.min(new_children.len()); + new_children.insert(insert_at, layer); + } + + Ok(()) + } + + fn rollback(&mut self, document: &mut Document) -> Result<(), String> { + if self.removed.is_empty() { + return Err("Cannot rollback: action was not executed".to_string()); + } + + // Phase 1: Remove all layers from new parent (back-to-front by insertion order). + for (layer_id, _) in self.layers.iter().rev() { + let new_children = get_parent_children(document, self.new_parent_id)?; + let pos = new_children.iter().position(|l| l.id() == *layer_id) + .ok_or_else(|| format!("Layer {} not found in new parent for rollback", layer_id))?; + new_children.remove(pos); + } + + // Phase 2: Re-insert at old positions, sorted by (parent, index) ascending. + let mut restore: Vec<(AnyLayer, Option, usize)> = self.removed.drain(..).collect(); + restore.sort_by(|a, b| a.1.cmp(&b.1).then(a.2.cmp(&b.2))); + + for (layer, old_parent_id, old_idx) in restore { + let children = get_parent_children(document, old_parent_id)?; + let idx = old_idx.min(children.len()); + children.insert(idx, layer); + } + + Ok(()) + } + + fn description(&self) -> String { + if self.layers.len() == 1 { + "Move layer".to_string() + } else { + format!("Move {} layers", self.layers.len()) + } + } +} diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index 80819ac..ddc8020 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -6016,7 +6016,7 @@ fn render_pane( } // Icon - if let Some(icon) = ctx.icon_cache.get_or_load(tab_type, ui.ctx()) { + if let Some(icon) = ctx.shared.icon_cache.get_or_load(tab_type, ui.ctx()) { let icon_texture_id = icon.id(); ui.painter().image( icon_texture_id, diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index fd143d9..c9cde83 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -145,6 +145,20 @@ enum RecordingType { Webcam, } +/// State for an in-progress layer header drag-to-reorder operation. +struct LayerDragState { + /// IDs of the layers being dragged (in visual order, top to bottom) + layer_ids: Vec, + /// Original parent group IDs for each dragged layer (parallel to layer_ids) + source_parent_ids: Vec>, + /// Current gap position in the filtered (dragged-layers-removed) row list + gap_row_index: usize, + /// Current mouse Y in screen coordinates (for floating header rendering) + current_mouse_y: f32, + /// Y offset from the top of the topmost dragged row to the mousedown point + grab_offset_y: f32, +} + pub struct TimelinePane { /// Horizontal zoom level (pixels per second) pixels_per_second: f32, @@ -189,6 +203,12 @@ pub struct TimelinePane { /// Cached egui textures for video thumbnail strip rendering. /// Key: (clip_id, thumbnail_timestamp_millis) → TextureHandle video_thumbnail_textures: std::collections::HashMap<(uuid::Uuid, i64), egui::TextureHandle>, + + /// Layer header drag-to-reorder state (None if not dragging a layer) + layer_drag: Option, + + /// Cached mousedown position in header area (for drag threshold detection) + header_mousedown_pos: Option, } /// Check if a clip type can be dropped on a layer type @@ -428,6 +448,8 @@ impl TimelinePane { time_display_format: TimeDisplayFormat::Seconds, waveform_upload_progress: std::collections::HashMap::new(), video_thumbnail_textures: std::collections::HashMap::new(), + layer_drag: None, + header_mousedown_pos: None, } } @@ -1160,11 +1182,27 @@ impl TimelinePane { let secondary_text_color = egui::Color32::from_gray(150); // Build virtual row list (accounts for group expansion) - let rows = build_timeline_rows(context_layers); + let all_rows = build_timeline_rows(context_layers); + + // When dragging layers, filter them out and compute gap-adjusted positions + let drag_layer_ids: Vec = self.layer_drag.as_ref() + .map(|d| d.layer_ids.clone()).unwrap_or_default(); + let drag_count = drag_layer_ids.len(); + let gap_row_index = self.layer_drag.as_ref().map(|d| d.gap_row_index); + + // Build filtered row list (excluding dragged layers) + let rows: Vec<&TimelineRow> = all_rows.iter() + .filter(|r| !drag_layer_ids.contains(&r.layer_id())) + .collect(); // Draw layer headers from virtual row list - for (i, row) in rows.iter().enumerate() { - let y = rect.min.y + i as f32 * LAYER_HEIGHT - self.viewport_scroll_y; + for (filtered_i, row) in rows.iter().enumerate() { + // Compute Y with gap offset: rows at or after the gap shift down by drag_count * LAYER_HEIGHT + let visual_i = match gap_row_index { + Some(gap) if filtered_i >= gap => filtered_i + drag_count, + _ => filtered_i, + }; + let y = rect.min.y + visual_i as f32 * LAYER_HEIGHT - self.viewport_scroll_y; // Skip if layer is outside visible area if y + LAYER_HEIGHT < rect.min.y || y > rect.max.y { @@ -1550,6 +1588,101 @@ impl TimelinePane { ); } + // Draw floating dragged layer headers at mouse position with drop shadow + if let Some(ref drag_state) = self.layer_drag { + // Collect the dragged rows in order + let dragged_rows: Vec<&TimelineRow> = drag_state.layer_ids.iter() + .filter_map(|did| all_rows.iter().find(|r| r.layer_id() == *did)) + .collect(); + + let float_top_y = drag_state.current_mouse_y - drag_state.grab_offset_y; + + for (di, dragged_row) in dragged_rows.iter().enumerate() { + let float_y = float_top_y + di as f32 * LAYER_HEIGHT; + let float_rect = egui::Rect::from_min_size( + egui::pos2(rect.min.x, float_y), + egui::vec2(LAYER_HEADER_WIDTH, LAYER_HEIGHT), + ); + + // Drop shadow (offset down-right, semi-transparent black) + let shadow_rect = float_rect.translate(egui::vec2(3.0, 4.0)); + ui.painter().rect_filled(shadow_rect, 2.0, egui::Color32::from_black_alpha(80)); + + // Background (active/selected color) + ui.painter().rect_filled(float_rect, 0.0, active_color); + + // Layer info + let drag_indent = match dragged_row { + TimelineRow::GroupChild { depth, .. } => *depth as f32 * 16.0, + TimelineRow::CollapsedGroup { depth, .. } => *depth as f32 * 16.0, + _ => 0.0, + }; + let (drag_name, drag_type_str, drag_type_color) = match dragged_row { + TimelineRow::Normal(layer) => { + let (lt, tc) = match layer { + AnyLayer::Vector(_) => ("Vector", egui::Color32::from_rgb(255, 180, 100)), + AnyLayer::Audio(al) => match al.audio_layer_type { + AudioLayerType::Midi => ("MIDI", egui::Color32::from_rgb(100, 255, 150)), + AudioLayerType::Sampled => ("Audio", egui::Color32::from_rgb(100, 180, 255)), + }, + AnyLayer::Video(_) => ("Video", egui::Color32::from_rgb(180, 100, 255)), + AnyLayer::Effect(_) => ("Effect", egui::Color32::from_rgb(255, 100, 180)), + AnyLayer::Group(_) => ("Group", egui::Color32::from_rgb(0, 180, 180)), + }; + (layer.layer().name.clone(), lt, tc) + } + TimelineRow::CollapsedGroup { group, .. } => { + (group.layer.name.clone(), "Group", egui::Color32::from_rgb(0, 180, 180)) + } + TimelineRow::GroupChild { child, .. } => { + let (lt, tc) = match child { + AnyLayer::Vector(_) => ("Vector", egui::Color32::from_rgb(255, 180, 100)), + AnyLayer::Audio(al) => match al.audio_layer_type { + AudioLayerType::Midi => ("MIDI", egui::Color32::from_rgb(100, 255, 150)), + AudioLayerType::Sampled => ("Audio", egui::Color32::from_rgb(100, 180, 255)), + }, + AnyLayer::Video(_) => ("Video", egui::Color32::from_rgb(180, 100, 255)), + AnyLayer::Effect(_) => ("Effect", egui::Color32::from_rgb(255, 100, 180)), + AnyLayer::Group(_) => ("Group", egui::Color32::from_rgb(0, 180, 180)), + }; + (child.layer().name.clone(), lt, tc) + } + }; + + // Color indicator bar + let indicator_rect = egui::Rect::from_min_size( + float_rect.min + egui::vec2(drag_indent, 0.0), + egui::vec2(4.0, LAYER_HEIGHT), + ); + ui.painter().rect_filled(indicator_rect, 0.0, drag_type_color); + + // Layer name + let name_x = 10.0 + drag_indent; + ui.painter().text( + float_rect.min + egui::vec2(name_x, 10.0), + egui::Align2::LEFT_TOP, + &drag_name, + egui::FontId::proportional(14.0), + text_color, + ); + + // Type label + ui.painter().text( + float_rect.min + egui::vec2(name_x, 28.0), + egui::Align2::LEFT_TOP, + drag_type_str, + egui::FontId::proportional(11.0), + secondary_text_color, + ); + + // Separator line at bottom + ui.painter().line_segment( + [egui::pos2(float_rect.min.x, float_rect.max.y), egui::pos2(float_rect.max.x, float_rect.max.y)], + egui::Stroke::new(1.0, egui::Color32::from_gray(20)), + ); + } + } + // Right border for header column ui.painter().line_segment( [ @@ -1602,11 +1735,25 @@ impl TimelinePane { } // Build virtual row list (accounts for group expansion) - let rows = build_timeline_rows(context_layers); + let all_rows = build_timeline_rows(context_layers); + + // When dragging layers, filter them out and compute gap-adjusted positions + let drag_layer_ids_content: Vec = self.layer_drag.as_ref() + .map(|d| d.layer_ids.clone()).unwrap_or_default(); + let drag_count_content = drag_layer_ids_content.len(); + let gap_row_index_content = self.layer_drag.as_ref().map(|d| d.gap_row_index); + + let rows: Vec<&TimelineRow> = all_rows.iter() + .filter(|r| !drag_layer_ids_content.contains(&r.layer_id())) + .collect(); // Draw layer rows from virtual row list - for (i, row) in rows.iter().enumerate() { - let y = rect.min.y + i as f32 * LAYER_HEIGHT - self.viewport_scroll_y; + for (filtered_i, row) in rows.iter().enumerate() { + let visual_i = match gap_row_index_content { + Some(gap) if filtered_i >= gap => filtered_i + drag_count_content, + _ => filtered_i, + }; + let y = rect.min.y + visual_i as f32 * LAYER_HEIGHT - self.viewport_scroll_y; // Skip if layer is outside visible area if y + LAYER_HEIGHT < rect.min.y || y > rect.max.y { @@ -2766,6 +2913,18 @@ impl TimelinePane { ); } + // Draw gap slots in content area for layer drag (matching active row style) + if let Some(gap) = gap_row_index_content { + for di in 0..drag_count_content { + let gap_y = rect.min.y + (gap + di) as f32 * LAYER_HEIGHT - self.viewport_scroll_y; + let gap_rect = egui::Rect::from_min_size( + egui::pos2(rect.min.x, gap_y), + egui::vec2(rect.width(), LAYER_HEIGHT), + ); + painter.rect_filled(gap_rect, 0.0, active_color); + } + } + // Clean up stale video thumbnail textures for clips no longer visible self.video_thumbnail_textures.retain(|&(clip_id, _), _| visible_video_clip_ids.contains(&clip_id)); @@ -2793,7 +2952,6 @@ impl TimelinePane { context_layers: &[&lightningbeam_core::layer::AnyLayer], editing_clip_id: Option<&uuid::Uuid>, ) { - // Don't allocate the header area for input - let widgets handle it directly // Only allocate content area (ruler + layers) with click and drag let content_response = ui.allocate_rect( egui::Rect::from_min_size( @@ -2913,33 +3071,216 @@ impl TimelinePane { } } - // Handle layer header selection (only if no control widget was clicked) - // Check for clicks in header area using direct input query - let header_clicked = ui.input(|i| { - i.pointer.button_clicked(egui::PointerButton::Primary) && - i.pointer.interact_pos().map_or(false, |pos| header_rect.contains(pos)) - }); + // Layer header drag-to-reorder (manual pointer tracking, no allocate_rect) + let pointer_pos = ui.input(|i| i.pointer.hover_pos()); + let primary_down = ui.input(|i| i.pointer.button_down(egui::PointerButton::Primary)); + let primary_pressed = ui.input(|i| i.pointer.button_pressed(egui::PointerButton::Primary)); + let primary_released = ui.input(|i| i.pointer.button_released(egui::PointerButton::Primary)); - if header_clicked && !alt_held && !clicked_clip_instance && !self.layer_control_clicked { - if let Some(pos) = ui.input(|i| i.pointer.interact_pos()) { - let relative_y = pos.y - header_rect.min.y + self.viewport_scroll_y; - let clicked_layer_index = (relative_y / LAYER_HEIGHT) as usize; - - // Get the layer at this index (using virtual rows for group support) - let header_rows = build_timeline_rows(context_layers); - if clicked_layer_index < header_rows.len() { - let layer_id = header_rows[clicked_layer_index].layer_id(); - let clicked_parent = header_rows[clicked_layer_index].parent_id(); - *active_layer_id = Some(layer_id); - if shift_held { - shift_toggle_layer(focus, layer_id, clicked_parent, &header_rows); - } else { - *focus = lightningbeam_core::selection::FocusSelection::Layers(vec![layer_id]); + // Handle layer header selection on mousedown (immediate, not on release) + if primary_pressed && !alt_held && !self.layer_control_clicked { + if let Some(pos) = pointer_pos { + if header_rect.contains(pos) { + let relative_y = pos.y - header_rect.min.y + self.viewport_scroll_y; + let clicked_layer_index = (relative_y / LAYER_HEIGHT) as usize; + let header_rows = build_timeline_rows(context_layers); + if clicked_layer_index < header_rows.len() { + let layer_id = header_rows[clicked_layer_index].layer_id(); + let clicked_parent = header_rows[clicked_layer_index].parent_id(); + *active_layer_id = Some(layer_id); + if shift_held { + shift_toggle_layer(focus, layer_id, clicked_parent, &header_rows); + } else { + // Only change selection if the clicked layer isn't already selected + let already_selected = match focus { + lightningbeam_core::selection::FocusSelection::Layers(ids) => ids.contains(&layer_id), + _ => false, + }; + if !already_selected { + *focus = lightningbeam_core::selection::FocusSelection::Layers(vec![layer_id]); + } + } } + // Also record for potential drag + self.header_mousedown_pos = Some(pos); } } } + // Start drag after movement threshold (4px) + const LAYER_DRAG_THRESHOLD: f32 = 4.0; + if self.layer_drag.is_none() && !self.layer_control_clicked { + if let (Some(down_pos), Some(cur_pos)) = (self.header_mousedown_pos, pointer_pos) { + if primary_down && (cur_pos - down_pos).length() > LAYER_DRAG_THRESHOLD { + let relative_y = down_pos.y - header_rect.min.y + self.viewport_scroll_y; + let clicked_index = (relative_y / LAYER_HEIGHT) as usize; + let drag_rows = build_timeline_rows(context_layers); + if clicked_index < drag_rows.len() { + // Collect all selected layer IDs (in visual order) + let selected_ids: Vec = match focus { + lightningbeam_core::selection::FocusSelection::Layers(ids) => { + // Filter to only IDs present in the row list, in visual order + drag_rows.iter() + .filter(|r| ids.contains(&r.layer_id())) + .map(|r| r.layer_id()) + .collect() + } + _ => vec![drag_rows[clicked_index].layer_id()], + }; + // If clicked layer isn't in selection, just drag that one + let clicked_id = drag_rows[clicked_index].layer_id(); + let layer_ids = if selected_ids.contains(&clicked_id) { + selected_ids + } else { + vec![clicked_id] + }; + + // Find source parent IDs for each dragged layer + let source_parent_ids: Vec> = layer_ids.iter() + .map(|lid| drag_rows.iter().find(|r| r.layer_id() == *lid).and_then(|r| r.parent_id())) + .collect(); + + // Find the visual index of the first dragged layer + let first_drag_visual_idx = drag_rows.iter() + .position(|r| r.layer_id() == layer_ids[0]) + .unwrap_or(0); + + // Compute gap index in the filtered list + let gap_index = drag_rows.iter() + .take(first_drag_visual_idx) + .filter(|r| !layer_ids.contains(&r.layer_id())) + .count(); + + // Grab offset: ensure the clicked layer stays under the cursor + // in the stacked floating header view + let clicked_row_y = header_rect.min.y + clicked_index as f32 * LAYER_HEIGHT - self.viewport_scroll_y; + let clicked_within_drag = layer_ids.iter().position(|id| *id == clicked_id).unwrap_or(0); + let grab_offset = down_pos.y - clicked_row_y + clicked_within_drag as f32 * LAYER_HEIGHT; + + self.layer_drag = Some(LayerDragState { + layer_ids, + source_parent_ids, + gap_row_index: gap_index, + current_mouse_y: cur_pos.y, + grab_offset_y: grab_offset, + }); + } + self.header_mousedown_pos = None; // consumed + } + } + } + + // Update gap position and mouse Y during layer drag + if let Some(ref mut drag) = self.layer_drag { + if primary_down { + if let Some(pos) = pointer_pos { + drag.current_mouse_y = pos.y; + let relative_y = pos.y - header_rect.min.y + self.viewport_scroll_y; + let all_rows = build_timeline_rows(context_layers); + let filtered_count = all_rows.iter() + .filter(|r| !drag.layer_ids.contains(&r.layer_id())) + .count(); + let target = ((relative_y / LAYER_HEIGHT) as usize).min(filtered_count); + drag.gap_row_index = target; + } + ui.ctx().request_repaint(); + } + } + + // Drop layers on mouse release + if self.layer_drag.is_some() && primary_released { + let drag = self.layer_drag.take().unwrap(); + + // Build the row list to determine where the gap lands + let drop_rows = build_timeline_rows(context_layers); + let filtered_rows: Vec<&TimelineRow> = drop_rows.iter() + .filter(|r| !drag.layer_ids.contains(&r.layer_id())) + .collect(); + + // Determine target parent from the row above the gap + let new_parent_id = if drag.gap_row_index == 0 { + None // top of list = root + } else { + let row_above = &filtered_rows[drag.gap_row_index.min(filtered_rows.len()) - 1]; + row_above.parent_id() + }; + + // Compute insertion index in new parent's children vec AFTER dragged layers are removed. + // Get the new parent's children, filter out all dragged layers, find where the + // row-above falls in that filtered list. + let new_children: Vec = match new_parent_id { + None => context_layers.iter().map(|l| l.id()).collect(), + Some(pid) => { + if let Some(AnyLayer::Group(g)) = document.root.get_child(&pid) { + g.children.iter().map(|l| l.id()).collect() + } else { + vec![] + } + } + }; + let new_children_filtered: Vec = new_children.iter() + .filter(|id| !drag.layer_ids.contains(id)) + .copied() + .collect(); + + let new_base_index = if drag.gap_row_index == 0 { + // Gap at top = visually topmost position. + // Since timeline reverses children, this is the end of the children vec. + new_children_filtered.len() + } else { + let row_above = &filtered_rows[drag.gap_row_index.min(filtered_rows.len()) - 1]; + let above_id = row_above.layer_id(); + if let Some(pos) = new_children_filtered.iter().position(|&id| id == above_id) { + // Insert before it in children vec (visually below = lower children index) + pos + } else { + new_children_filtered.len() + } + }; + + // Build layer list: (layer_id, old_parent_id) in visual order + let layers: Vec<(uuid::Uuid, Option)> = drag.layer_ids.iter() + .zip(drag.source_parent_ids.iter()) + .map(|(id, pid)| (*id, *pid)) + .collect(); + + // Only create action if something actually changed + let anything_changed = layers.iter().enumerate().any(|(i, (lid, old_pid))| { + if *old_pid != new_parent_id { + return true; + } + // Check if position changed within same parent + let old_idx = new_children.iter().position(|id| id == lid); + let target_idx_in_original = if new_base_index < new_children_filtered.len() { + // Find where new_children_filtered[new_base_index] sits in original + new_children.iter().position(|id| *id == new_children_filtered[new_base_index]) + .map(|p| p + i) + } else { + Some(0 + i) // inserting at start of children (end of filtered = start of original) + }; + old_idx != target_idx_in_original + }); + + if anything_changed { + pending_actions.push(Box::new( + lightningbeam_core::actions::MoveLayerAction::new( + layers, + new_parent_id, + new_base_index, + ), + )); + } + } + + // Clear header mousedown if released without starting a drag + if primary_released { + self.header_mousedown_pos = None; + } + // Cancel layer drag if pointer is no longer down + if self.layer_drag.is_some() && !primary_down { + self.layer_drag = None; + } + // Cache mouse position on mousedown (before any dragging) if response.hovered() && ui.input(|i| i.pointer.button_pressed(egui::PointerButton::Primary)) { if let Some(pos) = response.hover_pos() { From 8d8f94a547868f79289cd1f362ec79deb82c423c Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 12:09:41 -0500 Subject: [PATCH 2/6] Make layer dragging graphics nicer --- .../src/panes/timeline.rs | 122 ++++++++++++++---- 1 file changed, 95 insertions(+), 27 deletions(-) diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index c9cde83..e8775c1 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -327,6 +327,43 @@ fn flatten_layer<'a>( } } +/// Paint a soft drop shadow around a rect using gradient meshes (bottom + right + corner). +/// Three non-overlapping quads so alpha doesn't double up. +fn paint_drop_shadow(painter: &egui::Painter, rect: egui::Rect, shadow_size: f32, alpha: u8) { + let c = egui::Color32::from_black_alpha(alpha); + let t = egui::Color32::TRANSPARENT; + let mut mesh = egui::Mesh::default(); + + // Bottom edge: straight down, stops at right edge + let idx = mesh.vertices.len() as u32; + mesh.colored_vertex(rect.left_bottom(), c); // 0 + mesh.colored_vertex(rect.right_bottom(), c); // 1 + mesh.colored_vertex(egui::pos2(rect.right(), rect.bottom() + shadow_size), t); // 2 + mesh.colored_vertex(egui::pos2(rect.left(), rect.bottom() + shadow_size), t); // 3 + mesh.add_triangle(idx, idx + 1, idx + 2); + mesh.add_triangle(idx, idx + 2, idx + 3); + + // Right edge: rightward, stops at bottom edge + let idx = mesh.vertices.len() as u32; + mesh.colored_vertex(rect.right_top(), c); // 0 + mesh.colored_vertex(egui::pos2(rect.right() + shadow_size, rect.top()), t); // 1 + mesh.colored_vertex(egui::pos2(rect.right() + shadow_size, rect.bottom()), t); // 2 + mesh.colored_vertex(rect.right_bottom(), c); // 3 + mesh.add_triangle(idx, idx + 1, idx + 2); + mesh.add_triangle(idx, idx + 2, idx + 3); + + // Bottom-right corner: dark at inner corner, transparent at other three + let idx = mesh.vertices.len() as u32; + mesh.colored_vertex(rect.right_bottom(), c); // 0 + mesh.colored_vertex(egui::pos2(rect.right() + shadow_size, rect.bottom()), t); // 1 + mesh.colored_vertex(egui::pos2(rect.right() + shadow_size, rect.bottom() + shadow_size), t); // 2 + mesh.colored_vertex(egui::pos2(rect.right(), rect.bottom() + shadow_size), t); // 3 + mesh.add_triangle(idx, idx + 1, idx + 2); + mesh.add_triangle(idx, idx + 2, idx + 3); + + painter.add(egui::Shape::mesh(mesh)); +} + /// Shift+click layer selection: toggle a layer in/out of the focus selection, /// enforcing the sibling constraint (all selected layers must share the same parent). fn shift_toggle_layer( @@ -1604,9 +1641,8 @@ impl TimelinePane { egui::vec2(LAYER_HEADER_WIDTH, LAYER_HEIGHT), ); - // Drop shadow (offset down-right, semi-transparent black) - let shadow_rect = float_rect.translate(egui::vec2(3.0, 4.0)); - ui.painter().rect_filled(shadow_rect, 2.0, egui::Color32::from_black_alpha(80)); + // Gradient drop shadow + paint_drop_shadow(ui.painter(), float_rect, 8.0, 60); // Background (active/selected color) ui.painter().rect_filled(float_rect, 0.0, active_color); @@ -1737,23 +1773,62 @@ impl TimelinePane { // Build virtual row list (accounts for group expansion) let all_rows = build_timeline_rows(context_layers); - // When dragging layers, filter them out and compute gap-adjusted positions + // When dragging layers, compute remapped Y positions: + // - Dragged rows render at the gap position + // - Non-dragged rows shift around the gap let drag_layer_ids_content: Vec = self.layer_drag.as_ref() .map(|d| d.layer_ids.clone()).unwrap_or_default(); let drag_count_content = drag_layer_ids_content.len(); let gap_row_index_content = self.layer_drag.as_ref().map(|d| d.gap_row_index); - let rows: Vec<&TimelineRow> = all_rows.iter() - .filter(|r| !drag_layer_ids_content.contains(&r.layer_id())) - .collect(); + // Pre-compute Y position for each row. + // Dragged rows follow the mouse continuously (matching the floating header); + // non-dragged rows snap to discrete positions shifted around the gap. + let drag_float_top_y: Option = self.layer_drag.as_ref() + .map(|d| d.current_mouse_y - d.grab_offset_y); - // Draw layer rows from virtual row list - for (filtered_i, row) in rows.iter().enumerate() { - let visual_i = match gap_row_index_content { - Some(gap) if filtered_i >= gap => filtered_i + drag_count_content, - _ => filtered_i, - }; - let y = rect.min.y + visual_i as f32 * LAYER_HEIGHT - self.viewport_scroll_y; + let row_y_positions: Vec = { + let mut positions = Vec::with_capacity(all_rows.len()); + let mut filtered_i = 0usize; + let mut drag_offset = 0usize; + for row in all_rows.iter() { + if drag_layer_ids_content.contains(&row.layer_id()) { + // Dragged row: continuous Y from mouse position + let base_y = drag_float_top_y.unwrap_or(0.0); + positions.push(base_y + drag_offset as f32 * LAYER_HEIGHT); + drag_offset += 1; + } else { + // Non-dragged row: discrete position, shifted around gap + let visual = match gap_row_index_content { + Some(gap) if filtered_i >= gap => filtered_i + drag_count_content, + _ => filtered_i, + }; + positions.push(rect.min.y + visual as f32 * LAYER_HEIGHT - self.viewport_scroll_y); + filtered_i += 1; + } + } + positions + }; + + // Draw non-dragged rows first, then dragged rows on top (so shadow/content overlaps correctly) + let draw_order: Vec = { + let mut non_dragged: Vec = Vec::new(); + let mut dragged: Vec = Vec::new(); + for (i, row) in all_rows.iter().enumerate() { + if drag_layer_ids_content.contains(&row.layer_id()) { + dragged.push(i); + } else { + non_dragged.push(i); + } + } + non_dragged.extend(dragged); + non_dragged + }; + + for &i in &draw_order { + let row = &all_rows[i]; + let y = row_y_positions[i]; + let is_being_dragged = drag_layer_ids_content.contains(&row.layer_id()); // Skip if layer is outside visible area if y + LAYER_HEIGHT < rect.min.y || y > rect.max.y { @@ -1765,6 +1840,11 @@ impl TimelinePane { egui::vec2(rect.width(), LAYER_HEIGHT), ); + // Drop shadow for dragged rows + if is_being_dragged { + paint_drop_shadow(painter, layer_rect, 8.0, 60); + } + let row_layer_id = row.layer_id(); // Active vs inactive background colors @@ -2913,18 +2993,6 @@ impl TimelinePane { ); } - // Draw gap slots in content area for layer drag (matching active row style) - if let Some(gap) = gap_row_index_content { - for di in 0..drag_count_content { - let gap_y = rect.min.y + (gap + di) as f32 * LAYER_HEIGHT - self.viewport_scroll_y; - let gap_rect = egui::Rect::from_min_size( - egui::pos2(rect.min.x, gap_y), - egui::vec2(rect.width(), LAYER_HEIGHT), - ); - painter.rect_filled(gap_rect, 0.0, active_color); - } - } - // Clean up stale video thumbnail textures for clips no longer visible self.video_thumbnail_textures.retain(|&(clip_id, _), _| visible_video_clip_ids.contains(&clip_id)); @@ -3175,7 +3243,7 @@ impl TimelinePane { if primary_down { if let Some(pos) = pointer_pos { drag.current_mouse_y = pos.y; - let relative_y = pos.y - header_rect.min.y + self.viewport_scroll_y; + let relative_y = pos.y - drag.grab_offset_y - header_rect.min.y + self.viewport_scroll_y + LAYER_HEIGHT * 0.5; let all_rows = build_timeline_rows(context_layers); let filtered_count = all_rows.iter() .filter(|r| !drag.layer_ids.contains(&r.layer_id())) From 83736ec9e310ec4708896d2c3c1a49fedf6aa7b4 Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 13:48:43 -0500 Subject: [PATCH 3/6] Record to multiple layers --- .../lightningbeam-core/src/webcam.rs | 94 ++++- .../lightningbeam-editor/src/main.rs | 149 ++++--- .../lightningbeam-editor/src/panes/mod.rs | 4 +- .../src/panes/timeline.rs | 394 +++++++++++------- 4 files changed, 420 insertions(+), 221 deletions(-) diff --git a/lightningbeam-ui/lightningbeam-core/src/webcam.rs b/lightningbeam-ui/lightningbeam-core/src/webcam.rs index 3338ad3..c275ead 100644 --- a/lightningbeam-ui/lightningbeam-core/src/webcam.rs +++ b/lightningbeam-ui/lightningbeam-core/src/webcam.rs @@ -359,12 +359,19 @@ fn capture_thread_main( let mut decoded_frame = ffmpeg::frame::Video::empty(); let mut rgba_frame = ffmpeg::frame::Video::empty(); + // Helper closure: decode current packet, scale, send preview frame, and + // optionally encode into the active recorder. Returns updated frame_count. + let row_bytes = (width * 4) as usize; + + let mut stop_result_tx: Option>> = None; + 'outer: for (stream_ref, packet) in input.packets() { if stream_ref.index() != stream_index { continue; } - // Check for commands (non-blocking). + // Check for commands BEFORE decoding so that StartRecording takes effect + // on the current packet (no lost frame at the start). while let Ok(cmd) = cmd_rx.try_recv() { match cmd { CaptureCommand::StartRecording { @@ -384,20 +391,19 @@ fn capture_thread_main( } } CaptureCommand::StopRecording { result_tx } => { - if let Some(rec) = recorder.take() { - let _ = result_tx.send(rec.finish()); - } else { - let _ = result_tx.send(Err("Not recording".into())); - } + eprintln!("[WEBCAM stop] StopRecording command received on capture thread"); + // Defer stop until AFTER we decode this packet, so the + // current frame is captured before we finalize. + stop_result_tx = Some(result_tx); } CaptureCommand::Shutdown => break 'outer, } } + // Decode current packet and process frames. decoder.send_packet(&packet).ok(); while decoder.receive_frame(&mut decoded_frame).is_ok() { - // Skip initial corrupt frames from v4l2 if frame_count < SKIP_INITIAL_FRAMES { frame_count += 1; continue; @@ -407,10 +413,8 @@ fn capture_thread_main( let timestamp = start_time.elapsed().as_secs_f64(); - // Build tightly-packed RGBA data (remove stride padding). let data = rgba_frame.data(0); let stride = rgba_frame.stride(0); - let row_bytes = (width * 4) as usize; let rgba_data = if stride == row_bytes { data[..row_bytes * height as usize].to_vec() @@ -433,13 +437,52 @@ fn capture_thread_main( let _ = frame_tx.try_send(frame); if let Some(ref mut rec) = recorder { - if let Err(e) = rec.encode_rgba(&rgba_arc, width, height, frame_count) { + if let Err(e) = rec.encode_rgba(&rgba_arc, width, height, timestamp) { eprintln!("[webcam] recording encode error: {e}"); } } frame_count += 1; } + + // Now handle deferred StopRecording (after the current packet is decoded). + if let Some(result_tx) = stop_result_tx.take() { + if let Some(mut rec) = recorder.take() { + // Flush any frames still buffered in the decoder. + let pre_drain_count = frame_count; + decoder.send_eof().ok(); + while decoder.receive_frame(&mut decoded_frame).is_ok() { + if frame_count < SKIP_INITIAL_FRAMES { + frame_count += 1; + continue; + } + scaler.run(&decoded_frame, &mut rgba_frame).ok(); + let timestamp = start_time.elapsed().as_secs_f64(); + let data = rgba_frame.data(0); + let stride = rgba_frame.stride(0); + let rgba_data = if stride == row_bytes { + data[..row_bytes * height as usize].to_vec() + } else { + let mut buf = Vec::with_capacity(row_bytes * height as usize); + for y in 0..height as usize { + buf.extend_from_slice(&data[y * stride..y * stride + row_bytes]); + } + buf + }; + let _ = rec.encode_rgba(&rgba_data, width, height, timestamp); + frame_count += 1; + } + eprintln!( + "[WEBCAM stop] drained {} extra frames from decoder (total frames={})", + frame_count - pre_drain_count, frame_count + ); + // Reset the decoder so it can accept new packets for preview. + decoder.flush(); + let _ = result_tx.send(rec.finish()); + } else { + let _ = result_tx.send(Err("Not recording".into())); + } + } } // Clean up: if still recording when shutting down, finalize. @@ -463,6 +506,10 @@ struct FrameRecorder { path: PathBuf, frame_count: u64, fps: f64, + /// Timestamp of the first recorded frame (for offsetting PTS to start at 0) + first_timestamp: Option, + /// Timestamp of the most recent frame (for computing actual duration) + last_timestamp: f64, } impl FrameRecorder { @@ -510,7 +557,10 @@ impl FrameRecorder { encoder.set_width(aligned_width); encoder.set_height(aligned_height); encoder.set_format(pixel_format); - encoder.set_time_base(ffmpeg::Rational(1, fps as i32)); + // Use microsecond time base for precise timestamp-based PTS. + // This avoids speedup artifacts when the camera delivers frames + // at irregular intervals (common under CPU load or with USB cameras). + encoder.set_time_base(ffmpeg::Rational(1, 1_000_000)); encoder.set_frame_rate(Some(ffmpeg::Rational(fps as i32, 1))); if codec_id == ffmpeg::codec::Id::H264 { @@ -549,6 +599,8 @@ impl FrameRecorder { path: path.clone(), frame_count: 0, fps, + first_timestamp: None, + last_timestamp: 0.0, }) } @@ -557,7 +609,7 @@ impl FrameRecorder { rgba_data: &[u8], width: u32, height: u32, - _global_frame: u64, + timestamp: f64, ) -> Result<(), String> { let mut src_frame = ffmpeg::frame::Video::new(ffmpeg::format::Pixel::RGBA, width, height); @@ -576,8 +628,15 @@ impl FrameRecorder { .run(&src_frame, &mut dst_frame) .map_err(|e| format!("Scale: {e}"))?; - dst_frame.set_pts(Some(self.frame_count as i64)); + // PTS in microseconds from actual capture timestamps. + // Time base is 1/1000000, so PTS = elapsed_seconds * 1000000. + // This ensures correct playback timing even when the camera delivers + // frames at irregular intervals (e.g. under CPU load). + let first_ts = *self.first_timestamp.get_or_insert(timestamp); + let elapsed_us = ((timestamp - first_ts).max(0.0) * 1_000_000.0) as i64; + dst_frame.set_pts(Some(elapsed_us)); self.frame_count += 1; + self.last_timestamp = timestamp; self.encoder .send_frame(&dst_frame) @@ -616,7 +675,14 @@ impl FrameRecorder { .write_trailer() .map_err(|e| format!("Write trailer: {e}"))?; - let duration = self.frame_count as f64 / self.fps; + let duration = match self.first_timestamp { + Some(first_ts) => self.last_timestamp - first_ts, + None => self.frame_count as f64 / self.fps, + }; + eprintln!( + "[WEBCAM finish] frames={}, first_ts={:?}, last_ts={:.4}, duration={:.4}s, fps={}", + self.frame_count, self.first_timestamp, self.last_timestamp, duration, self.fps, + ); Ok(RecordingResult { file_path: self.path, duration, diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index ddc8020..a45164d 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -771,8 +771,6 @@ struct EditorApp { webcam_frame: Option, /// Pending webcam recording command (set by timeline, processed in update) webcam_record_command: Option, - /// Layer being recorded to via webcam - webcam_recording_layer_id: Option, // Track ID mapping (Document layer UUIDs <-> daw-backend TrackIds) layer_to_track_map: HashMap, track_to_layer_map: HashMap, @@ -793,7 +791,7 @@ struct EditorApp { is_recording: bool, // Whether recording is currently active recording_clips: HashMap, // layer_id -> backend clip_id during recording recording_start_time: f64, // Playback time when recording started - recording_layer_id: Option, // Layer being recorded to (for creating clips) + recording_layer_ids: Vec, // Layers being recorded to (for creating clips) // Asset drag-and-drop state dragging_asset: Option, // Asset being dragged from Asset Library // Clipboard @@ -1032,7 +1030,6 @@ impl EditorApp { webcam: None, webcam_frame: None, webcam_record_command: None, - webcam_recording_layer_id: None, layer_to_track_map: HashMap::new(), track_to_layer_map: HashMap::new(), clip_to_metatrack_map: HashMap::new(), @@ -1045,7 +1042,7 @@ impl EditorApp { is_recording: false, // Not recording initially recording_clips: HashMap::new(), // No active recording clips recording_start_time: 0.0, // Will be set when recording starts - recording_layer_id: None, // Will be set when recording starts + recording_layer_ids: Vec::new(), // Will be populated when recording starts dragging_asset: None, // No asset being dragged initially clipboard_manager: lightningbeam_core::clipboard::ClipboardManager::new(), effect_to_load: None, @@ -4333,28 +4330,30 @@ impl eframe::App for EditorApp { AudioEvent::RecordingStarted(track_id, backend_clip_id, rec_sample_rate, rec_channels) => { println!("🎤 Recording started on track {:?}, backend_clip_id={}", track_id, backend_clip_id); - // Create clip in document and add instance to layer - if let Some(layer_id) = self.recording_layer_id { - use lightningbeam_core::clip::{AudioClip, ClipInstance}; + // Create clip in document and add instance to the layer for this track + if let Some(&layer_id) = self.track_to_layer_map.get(&track_id) { + if self.recording_layer_ids.contains(&layer_id) { + use lightningbeam_core::clip::{AudioClip, ClipInstance}; - // Create a recording-in-progress clip (no pool index yet) - let clip = AudioClip::new_recording("Recording..."); - let doc_clip_id = self.action_executor.document_mut().add_audio_clip(clip); + // Create a recording-in-progress clip (no pool index yet) + let clip = AudioClip::new_recording("Recording..."); + let doc_clip_id = self.action_executor.document_mut().add_audio_clip(clip); - // Create clip instance on the layer - let clip_instance = ClipInstance::new(doc_clip_id) - .with_timeline_start(self.recording_start_time); + // Create clip instance on the layer + let clip_instance = ClipInstance::new(doc_clip_id) + .with_timeline_start(self.recording_start_time); - // Add instance to layer (works for root and inside movie clips) - if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) { - if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer { - audio_layer.clip_instances.push(clip_instance); - println!("✅ Created recording clip instance on layer {}", layer_id); + // Add instance to layer (works for root and inside movie clips) + if let Some(layer) = self.action_executor.document_mut().get_layer_mut(&layer_id) { + if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer { + audio_layer.clip_instances.push(clip_instance); + println!("✅ Created recording clip instance on layer {}", layer_id); + } } - } - // Store mapping for later updates - self.recording_clips.insert(layer_id, backend_clip_id); + // Store mapping for later updates + self.recording_clips.insert(layer_id, backend_clip_id); + } } // Initialize live waveform cache for recording @@ -4362,11 +4361,15 @@ impl eframe::App for EditorApp { ctx.request_repaint(); } - AudioEvent::RecordingProgress(_clip_id, duration) => { + AudioEvent::RecordingProgress(_backend_clip_id, duration) => { // Update clip duration as recording progresses - if let Some(layer_id) = self.recording_layer_id { - // First, find the clip_id from the layer (read-only borrow) - let clip_id = { + // Find which layer this backend clip belongs to via recording_clips + let layer_id = self.recording_clips.iter() + .find(|(_, &cid)| cid == _backend_clip_id) + .map(|(&lid, _)| lid); + if let Some(layer_id) = layer_id { + // First, find the doc clip_id from the layer (read-only borrow) + let doc_clip_id = { let document = self.action_executor.document(); document.get_layer(&layer_id) .and_then(|layer| { @@ -4379,8 +4382,8 @@ impl eframe::App for EditorApp { }; // Then update the clip duration (mutable borrow) - if let Some(clip_id) = clip_id { - if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&clip_id) { + if let Some(doc_clip_id) = doc_clip_id { + if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&doc_clip_id) { if clip.is_recording() { clip.duration = duration; } @@ -4390,7 +4393,7 @@ impl eframe::App for EditorApp { ctx.request_repaint(); } AudioEvent::RecordingStopped(_backend_clip_id, pool_index, _waveform) => { - println!("🎤 Recording stopped: pool_index={}", pool_index); + eprintln!("[STOP] AudioEvent::RecordingStopped received (pool_index={})", pool_index); // Clean up live recording waveform cache self.raw_audio_cache.remove(&usize::MAX); @@ -4414,7 +4417,7 @@ impl eframe::App for EditorApp { let mut controller = controller_arc.lock().unwrap(); match controller.get_pool_file_info(pool_index) { Ok((dur, _, _)) => { - println!("✅ Got duration from backend: {:.2}s", dur); + eprintln!("[AUDIO] Got duration from backend: {:.4}s", dur); self.audio_duration_cache.insert(pool_index, dur); dur } @@ -4429,7 +4432,11 @@ impl eframe::App for EditorApp { // Finalize the recording clip with real pool_index and duration // and sync to backend for playback - if let Some(layer_id) = self.recording_layer_id { + // Find which layer this recording belongs to via recording_clips + let recording_layer = self.recording_clips.iter() + .find(|(_, &cid)| cid == _backend_clip_id) + .map(|(&lid, _)| lid); + if let Some(layer_id) = recording_layer { // First, find the clip instance and clip id let (clip_id, instance_id, timeline_start, trim_start) = { let document = self.action_executor.document(); @@ -4451,7 +4458,7 @@ impl eframe::App for EditorApp { if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&clip_id) { if clip.finalize_recording(pool_index, duration) { clip.name = format!("Recording {}", pool_index); - println!("✅ Finalized recording clip: pool={}, duration={:.2}s", pool_index, duration); + eprintln!("[AUDIO] Finalized recording clip: pool={}, duration={:.4}s", pool_index, duration); } } @@ -4493,22 +4500,32 @@ impl eframe::App for EditorApp { } } - // Clear recording state - self.is_recording = false; - self.recording_clips.clear(); - self.recording_layer_id = None; + // Remove this layer from active recordings + if let Some(layer_id) = recording_layer { + self.recording_layer_ids.retain(|id| *id != layer_id); + self.recording_clips.remove(&layer_id); + } + // Clear global recording state only when all recordings are done + if self.recording_layer_ids.is_empty() { + self.is_recording = false; + self.recording_clips.clear(); + } ctx.request_repaint(); } AudioEvent::RecordingError(message) => { eprintln!("❌ Recording error: {}", message); self.is_recording = false; self.recording_clips.clear(); - self.recording_layer_id = None; + self.recording_layer_ids.clear(); ctx.request_repaint(); } AudioEvent::MidiRecordingProgress(_track_id, clip_id, duration, notes) => { // Update clip duration in document (so timeline bar grows) - if let Some(layer_id) = self.recording_layer_id { + // Find layer for this track via track_to_layer_map + let midi_layer_id = self.track_to_layer_map.get(&_track_id) + .filter(|lid| self.recording_layer_ids.contains(lid)) + .copied(); + if let Some(layer_id) = midi_layer_id { let doc_clip_id = { let document = self.action_executor.document(); document.get_layer(&layer_id) @@ -4567,7 +4584,10 @@ impl eframe::App for EditorApp { self.midi_event_cache.insert(clip_id, cache_events); // Update document clip with final duration and name - if let Some(layer_id) = self.recording_layer_id { + let midi_layer_id = self.track_to_layer_map.get(&track_id) + .filter(|lid| self.recording_layer_ids.contains(lid)) + .copied(); + if let Some(layer_id) = midi_layer_id { let doc_clip_id = { let document = self.action_executor.document(); document.get_layer(&layer_id) @@ -4601,10 +4621,15 @@ impl eframe::App for EditorApp { // The backend created the instance in create_midi_clip(), but doesn't // report the instance_id back. Needed for move/trim operations later. - // Clear recording state - self.is_recording = false; - self.recording_clips.clear(); - self.recording_layer_id = None; + // Remove this MIDI layer from active recordings + if let Some(&layer_id) = self.track_to_layer_map.get(&track_id) { + self.recording_layer_ids.retain(|id| *id != layer_id); + self.recording_clips.remove(&layer_id); + } + if self.recording_layer_ids.is_empty() { + self.is_recording = false; + self.recording_clips.clear(); + } ctx.request_repaint(); } AudioEvent::AudioFileReady { pool_index, path, channels, sample_rate, duration, format } => { @@ -5031,7 +5056,7 @@ impl eframe::App for EditorApp { is_recording: &mut self.is_recording, recording_clips: &mut self.recording_clips, recording_start_time: &mut self.recording_start_time, - recording_layer_id: &mut self.recording_layer_id, + recording_layer_ids: &mut self.recording_layer_ids, dragging_asset: &mut self.dragging_asset, stroke_width: &mut self.stroke_width, fill_enabled: &mut self.fill_enabled, @@ -5157,7 +5182,7 @@ impl eframe::App for EditorApp { // Process webcam recording commands from timeline if let Some(cmd) = self.webcam_record_command.take() { match cmd { - panes::WebcamRecordCommand::Start { layer_id } => { + panes::WebcamRecordCommand::Start { .. } => { // Ensure webcam is open if self.webcam.is_none() { if let Some(device) = lightningbeam_core::webcam::default_camera() { @@ -5191,7 +5216,6 @@ impl eframe::App for EditorApp { let recording_path = recording_dir.join(format!("webcam_recording_{}.{}", timestamp, ext)); match webcam.start_recording(recording_path, codec) { Ok(()) => { - self.webcam_recording_layer_id = Some(layer_id); eprintln!("[WEBCAM] Recording started"); } Err(e) => { @@ -5201,13 +5225,25 @@ impl eframe::App for EditorApp { } } panes::WebcamRecordCommand::Stop => { + eprintln!("[STOP] Webcam stop command processed (main.rs handler)"); + // Find the webcam recording layer before stopping (need it for cleanup) + let webcam_layer_id = { + let document = self.action_executor.document(); + self.recording_layer_ids.iter().copied().find(|lid| { + document.get_layer(lid).map_or(false, |l| { + matches!(l, lightningbeam_core::layer::AnyLayer::Video(v) if v.camera_enabled) + }) + }) + }; if let Some(webcam) = &mut self.webcam { + let stop_t = std::time::Instant::now(); match webcam.stop_recording() { Ok(result) => { + eprintln!("[STOP] webcam.stop_recording() returned in {:.1}ms", stop_t.elapsed().as_secs_f64() * 1000.0); let file_path_str = result.file_path.to_string_lossy().to_string(); - eprintln!("[WEBCAM] Recording saved to: {}", file_path_str); + eprintln!("[WEBCAM] Recording saved to: {} (recorder duration={:.4}s)", file_path_str, result.duration); // Create VideoClip + ClipInstance from recorded file - if let Some(layer_id) = self.webcam_recording_layer_id.take() { + if let Some(layer_id) = webcam_layer_id { match lightningbeam_core::video::probe_video(&file_path_str) { Ok(info) => { use lightningbeam_core::clip::{VideoClip, ClipInstance}; @@ -5285,7 +5321,10 @@ impl eframe::App for EditorApp { } }); - eprintln!("[WEBCAM] Created video clip: {:.1}s @ {:.1}fps", duration, info.fps); + eprintln!( + "[WEBCAM] probe_video: duration={:.4}s, fps={:.1}, {}x{}. Using probe duration for clip.", + info.duration, info.fps, info.width, info.height, + ); } Err(e) => { eprintln!("[WEBCAM] Failed to probe recorded video: {}", e); @@ -5295,12 +5334,18 @@ impl eframe::App for EditorApp { } Err(e) => { eprintln!("[WEBCAM] Failed to stop recording: {}", e); - self.webcam_recording_layer_id = None; + // webcam layer cleanup handled by recording_layer_ids.clear() below } } } - self.is_recording = false; - self.recording_layer_id = None; + // Remove webcam layer from active recordings + if let Some(wid) = webcam_layer_id { + self.recording_layer_ids.retain(|id| *id != wid); + } + if self.recording_layer_ids.is_empty() { + self.is_recording = false; + self.recording_clips.clear(); + } } } } diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs index 3faa6d5..81d3910 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs @@ -57,8 +57,10 @@ pub struct DraggingAsset { /// Command for webcam recording (issued by timeline, processed by main) #[derive(Debug)] +#[allow(dead_code)] pub enum WebcamRecordCommand { /// Start recording on the given video layer + // TODO: remove layer_id — recording_layer_ids now tracks which layers are recording Start { layer_id: uuid::Uuid }, /// Stop current webcam recording Stop, @@ -198,7 +200,7 @@ pub struct SharedPaneState<'a> { pub is_recording: &'a mut bool, // Whether recording is currently active pub recording_clips: &'a mut std::collections::HashMap, // layer_id -> clip_id pub recording_start_time: &'a mut f64, // Playback time when recording started - pub recording_layer_id: &'a mut Option, // Layer being recorded to + pub recording_layer_ids: &'a mut Vec, // Layers being recorded to /// Asset being dragged from Asset Library (for cross-pane drag-and-drop) pub dragging_asset: &'a mut Option, // Tool-specific options for infopanel diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index e8775c1..3ed9fc0 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -138,13 +138,6 @@ enum TimeDisplayFormat { Measures, } -/// Type of recording in progress (for stop logic dispatch) -enum RecordingType { - Audio, - Midi, - Webcam, -} - /// State for an in-progress layer header drag-to-reorder operation. struct LayerDragState { /// IDs of the layers being dragged (in visual order, top to bottom) @@ -516,184 +509,254 @@ impl TimelinePane { } } - /// Start recording on the active layer (audio or video with camera) + /// Start recording on all selected recordable layers (or the active layer as fallback). + /// Groups are recursed into. At most one layer per recording type is recorded to + /// (topmost in visual order wins). fn start_recording(&mut self, shared: &mut SharedPaneState) { use lightningbeam_core::clip::{AudioClip, ClipInstance}; - let Some(active_layer_id) = *shared.active_layer_id else { - println!("⚠️ No active layer selected for recording"); - return; - }; - - // Check if this is a video layer with camera enabled - let is_video_camera = { - let document = shared.action_executor.document(); - let context_layers = document.context_layers(shared.editing_clip_id.as_ref()); - context_layers.iter().copied() - .find(|l| l.id() == active_layer_id) - .map(|layer| { - if let AnyLayer::Video(v) = layer { - v.camera_enabled - } else { - false - } - }) - .unwrap_or(false) - }; - - if is_video_camera { - // Issue webcam recording start command (processed by main.rs) - *shared.webcam_record_command = Some(super::WebcamRecordCommand::Start { - layer_id: active_layer_id, - }); - *shared.is_recording = true; - *shared.recording_start_time = *shared.playback_time; - *shared.recording_layer_id = Some(active_layer_id); - - // Auto-start playback for recording - if !*shared.is_playing { - if let Some(controller_arc) = shared.audio_controller { - let mut controller = controller_arc.lock().unwrap(); - controller.play(); - *shared.is_playing = true; - println!("▶ Auto-started playback for webcam recording"); + // Step 1: Collect candidate layer IDs from focus selection, falling back to active layer + let candidate_ids: Vec = match shared.focus { + lightningbeam_core::selection::FocusSelection::Layers(ref ids) if !ids.is_empty() => { + ids.clone() + } + _ => { + if let Some(id) = *shared.active_layer_id { + vec![id] + } else { + println!("⚠️ No active layer selected for recording"); + return; } } - println!("📹 Started webcam recording on layer {}", active_layer_id); + }; + + // Step 2: Resolve layers, recursing into groups to collect recordable leaves. + // Categorize by recording type. Use visual ordering (build_timeline_rows) to pick topmost. + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + enum RecordCandidate { + AudioSampled, + AudioMidi, + VideoCamera, + } + + let mut candidates: Vec<(uuid::Uuid, RecordCandidate, usize)> = Vec::new(); // (layer_id, type, visual_row_index) + + { + let document = shared.action_executor.document(); + let context_layers = document.context_layers(shared.editing_clip_id.as_ref()); + let rows = build_timeline_rows(&context_layers); + + // Helper: collect recordable leaf layer IDs from a layer (recurse into groups) + fn collect_recordable_leaves(layer: &AnyLayer, out: &mut Vec) { + match layer { + AnyLayer::Audio(_) => out.push(layer.id()), + AnyLayer::Video(v) if v.camera_enabled => out.push(layer.id()), + AnyLayer::Group(g) => { + for child in &g.children { + collect_recordable_leaves(child, out); + } + } + _ => {} + } + } + + let mut leaf_ids: Vec = Vec::new(); + for cid in &candidate_ids { + if let Some(layer) = context_layers.iter().copied().find(|l| l.id() == *cid) { + collect_recordable_leaves(layer, &mut leaf_ids); + } else { + // Try deeper in the tree (for layers inside groups) + if let Some(layer) = document.root.get_child(cid) { + collect_recordable_leaves(layer, &mut leaf_ids); + } + } + } + + // Deduplicate + leaf_ids.sort(); + leaf_ids.dedup(); + + // Categorize and find visual row index for ordering + for leaf_id in &leaf_ids { + let visual_idx = rows.iter().position(|r| r.layer_id() == *leaf_id).unwrap_or(usize::MAX); + if let Some(layer) = document.root.get_child(leaf_id).or_else(|| { + context_layers.iter().copied().find(|l| l.id() == *leaf_id) + }) { + let cat = match layer { + AnyLayer::Audio(a) => match a.audio_layer_type { + AudioLayerType::Sampled => Some(RecordCandidate::AudioSampled), + AudioLayerType::Midi => Some(RecordCandidate::AudioMidi), + }, + AnyLayer::Video(v) if v.camera_enabled => Some(RecordCandidate::VideoCamera), + _ => None, + }; + if let Some(cat) = cat { + candidates.push((*leaf_id, cat, visual_idx)); + } + } + } + } + + if candidates.is_empty() { + println!("⚠️ No recordable layers in selection"); return; } - // Get layer type (copy it so we can drop the document borrow before mutating) - let layer_type = { - let document = shared.action_executor.document(); - let context_layers = document.context_layers(shared.editing_clip_id.as_ref()); - let Some(layer) = context_layers.iter().copied().find(|l| l.id() == active_layer_id) else { - println!("⚠️ Active layer not found in document"); - return; - }; - let AnyLayer::Audio(audio_layer) = layer else { - println!("⚠️ Active layer is not an audio layer - cannot record"); - return; - }; - audio_layer.audio_layer_type - }; - - // Get the backend track ID for this layer - let Some(&track_id) = shared.layer_to_track_map.get(&active_layer_id) else { - println!("⚠️ No backend track mapped for layer {}", active_layer_id); - return; - }; - - let start_time = *shared.playback_time; - - // Start recording based on layer type - if let Some(controller_arc) = shared.audio_controller { - let mut controller = controller_arc.lock().unwrap(); - - match layer_type { - AudioLayerType::Midi => { - // Create backend MIDI clip and start recording - let clip_id = controller.create_midi_clip(track_id, start_time, 0.0); - controller.start_midi_recording(track_id, clip_id, start_time); - shared.recording_clips.insert(active_layer_id, clip_id); - println!("🎹 Started MIDI recording on track {:?} at {:.2}s, clip_id={}", - track_id, start_time, clip_id); - - // Drop controller lock before document mutation - drop(controller); - - // Create document clip + clip instance immediately (clip_id is known synchronously) - let doc_clip = AudioClip::new_midi("Recording...", clip_id, 0.0); - let doc_clip_id = shared.action_executor.document_mut().add_audio_clip(doc_clip); - - let clip_instance = ClipInstance::new(doc_clip_id) - .with_timeline_start(start_time); - - if let Some(layer) = shared.action_executor.document_mut().get_layer_mut(&active_layer_id) { - if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer { - audio_layer.clip_instances.push(clip_instance); - } - } - - // Initialize empty cache entry for this clip - shared.midi_event_cache.insert(clip_id, Vec::new()); + // Step 3: Sort by visual position (topmost first) and deduplicate by type + candidates.sort_by_key(|c| c.2); + let mut seen_sampled = false; + let mut seen_midi = false; + let mut seen_webcam = false; + candidates.retain(|c| { + match c.1 { + RecordCandidate::AudioSampled => { + if seen_sampled { return false; } + seen_sampled = true; } - AudioLayerType::Sampled => { - // For audio recording, backend creates the clip - controller.start_recording(track_id, start_time); - println!("🎤 Started audio recording on track {:?} at {:.2}s", track_id, start_time); - drop(controller); + RecordCandidate::AudioMidi => { + if seen_midi { return false; } + seen_midi = true; + } + RecordCandidate::VideoCamera => { + if seen_webcam { return false; } + seen_webcam = true; } } + true + }); - // Re-acquire lock for playback start - if !*shared.is_playing { + let start_time = *shared.playback_time; + shared.recording_layer_ids.clear(); + + // Step 4: Dispatch recording for each candidate + for &(layer_id, ref cat, _) in &candidates { + match cat { + RecordCandidate::VideoCamera => { + *shared.webcam_record_command = Some(super::WebcamRecordCommand::Start { + layer_id, + }); + shared.recording_layer_ids.push(layer_id); + println!("📹 Started webcam recording on layer {}", layer_id); + } + RecordCandidate::AudioSampled => { + if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) { + if let Some(controller_arc) = shared.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + controller.start_recording(track_id, start_time); + println!("🎤 Started audio recording on track {:?} at {:.2}s", track_id, start_time); + } + shared.recording_layer_ids.push(layer_id); + } else { + println!("⚠️ No backend track mapped for layer {}", layer_id); + } + } + RecordCandidate::AudioMidi => { + if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) { + if let Some(controller_arc) = shared.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + let clip_id = controller.create_midi_clip(track_id, start_time, 0.0); + controller.start_midi_recording(track_id, clip_id, start_time); + shared.recording_clips.insert(layer_id, clip_id); + println!("🎹 Started MIDI recording on track {:?} at {:.2}s, clip_id={}", + track_id, start_time, clip_id); + } + + // Create document clip + clip instance immediately + let doc_clip = AudioClip::new_midi("Recording...", + *shared.recording_clips.get(&layer_id).unwrap_or(&0), 0.0); + let doc_clip_id = shared.action_executor.document_mut().add_audio_clip(doc_clip); + + let clip_instance = ClipInstance::new(doc_clip_id) + .with_timeline_start(start_time); + + if let Some(layer) = shared.action_executor.document_mut().get_layer_mut(&layer_id) { + if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer { + audio_layer.clip_instances.push(clip_instance); + } + } + + // Initialize empty cache entry + if let Some(&clip_id) = shared.recording_clips.get(&layer_id) { + shared.midi_event_cache.insert(clip_id, Vec::new()); + } + + shared.recording_layer_ids.push(layer_id); + } else { + println!("⚠️ No backend track mapped for layer {}", layer_id); + } + } + } + } + + if shared.recording_layer_ids.is_empty() { + println!("⚠️ Failed to start recording on any layer"); + return; + } + + // Auto-start playback if needed + if !*shared.is_playing { + if let Some(controller_arc) = shared.audio_controller { let mut controller = controller_arc.lock().unwrap(); controller.play(); *shared.is_playing = true; println!("▶ Auto-started playback for recording"); } - - // Store recording state - *shared.is_recording = true; - *shared.recording_start_time = start_time; - *shared.recording_layer_id = Some(active_layer_id); - } else { - println!("⚠️ No audio controller available"); } + + *shared.is_recording = true; + *shared.recording_start_time = start_time; } - /// Stop the current recording + /// Stop all active recordings fn stop_recording(&mut self, shared: &mut SharedPaneState) { - // Determine recording type by checking the layer - let recording_type = if let Some(layer_id) = *shared.recording_layer_id { - let context_layers = shared.action_executor.document().context_layers(shared.editing_clip_id.as_ref()); - context_layers.iter().copied() - .find(|l| l.id() == layer_id) - .map(|layer| { + let stop_wall = std::time::Instant::now(); + eprintln!("[STOP] stop_recording called at {:?}", stop_wall); + + // Determine which recording types are active by checking recording_layer_ids + let mut has_audio = false; + let mut has_midi = false; + let mut has_webcam = false; + + { + let document = shared.action_executor.document(); + for layer_id in shared.recording_layer_ids.iter() { + if let Some(layer) = document.root.get_child(layer_id) { match layer { - lightningbeam_core::layer::AnyLayer::Audio(audio_layer) => { - if matches!(audio_layer.audio_layer_type, lightningbeam_core::layer::AudioLayerType::Midi) { - RecordingType::Midi - } else { - RecordingType::Audio + lightningbeam_core::layer::AnyLayer::Audio(a) => { + match a.audio_layer_type { + lightningbeam_core::layer::AudioLayerType::Sampled => has_audio = true, + lightningbeam_core::layer::AudioLayerType::Midi => has_midi = true, } } lightningbeam_core::layer::AnyLayer::Video(v) if v.camera_enabled => { - RecordingType::Webcam + has_webcam = true; } - _ => RecordingType::Audio, - } - }) - .unwrap_or(RecordingType::Audio) - } else { - RecordingType::Audio - }; - - match recording_type { - RecordingType::Webcam => { - // Issue webcam stop command (processed by main.rs) - *shared.webcam_record_command = Some(super::WebcamRecordCommand::Stop); - println!("📹 Stopped webcam recording"); - } - _ => { - if let Some(controller_arc) = shared.audio_controller { - let mut controller = controller_arc.lock().unwrap(); - - if matches!(recording_type, RecordingType::Midi) { - controller.stop_midi_recording(); - println!("🎹 Stopped MIDI recording"); - } else { - controller.stop_recording(); - println!("🎤 Stopped audio recording"); + _ => {} } } } } - // Note: Don't clear recording_layer_id here! - // The RecordingStopped/MidiRecordingStopped event handler in main.rs - // needs it to finalize the clip. It will clear the state after processing. + if has_webcam { + *shared.webcam_record_command = Some(super::WebcamRecordCommand::Stop); + eprintln!("[STOP] Webcam stop command queued at +{:.1}ms", stop_wall.elapsed().as_secs_f64() * 1000.0); + } + + if let Some(controller_arc) = shared.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + if has_midi { + controller.stop_midi_recording(); + eprintln!("[STOP] MIDI stop command sent at +{:.1}ms", stop_wall.elapsed().as_secs_f64() * 1000.0); + } + if has_audio { + controller.stop_recording(); + eprintln!("[STOP] Audio stop command sent at +{:.1}ms", stop_wall.elapsed().as_secs_f64() * 1000.0); + } + } + + // Note: Don't clear recording_layer_ids here! + // The RecordingStopped/MidiRecordingStopped event handlers in main.rs + // need them to finalize clips. They will clear the state after processing. // Only clear is_recording to update UI state immediately. *shared.is_recording = false; } @@ -3155,8 +3218,21 @@ impl TimelinePane { if clicked_layer_index < header_rows.len() { let layer_id = header_rows[clicked_layer_index].layer_id(); let clicked_parent = header_rows[clicked_layer_index].parent_id(); + let prev_active = *active_layer_id; *active_layer_id = Some(layer_id); if shift_held { + // If focus doesn't already contain the previously active layer + // (e.g. it was set by creating a layer rather than clicking), + // seed the selection with it so shift-click extends from it. + if let Some(prev) = prev_active.filter(|id| *id != layer_id) { + let active_in_focus = matches!( + &focus, + lightningbeam_core::selection::FocusSelection::Layers(ids) if ids.contains(&prev) + ); + if !active_in_focus { + *focus = lightningbeam_core::selection::FocusSelection::Layers(vec![prev]); + } + } shift_toggle_layer(focus, layer_id, clicked_parent, &header_rows); } else { // Only change selection if the clicked layer isn't already selected @@ -3742,8 +3818,18 @@ impl TimelinePane { if clicked_layer_index < empty_click_rows.len() { let layer_id = empty_click_rows[clicked_layer_index].layer_id(); let clicked_parent = empty_click_rows[clicked_layer_index].parent_id(); + let prev_active = *active_layer_id; *active_layer_id = Some(layer_id); if shift_held { + if let Some(prev) = prev_active.filter(|id| *id != layer_id) { + let active_in_focus = matches!( + &focus, + lightningbeam_core::selection::FocusSelection::Layers(ids) if ids.contains(&prev) + ); + if !active_in_focus { + *focus = lightningbeam_core::selection::FocusSelection::Layers(vec![prev]); + } + } shift_toggle_layer(focus, layer_id, clicked_parent, &empty_click_rows); } else { selection.clear_clip_instances(); From 8e9d90ed9286ff776fd12dae288b183a04811a7b Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 13:51:42 -0500 Subject: [PATCH 4/6] Fix recording to layers inside groups --- lightningbeam-ui/lightningbeam-editor/src/main.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index a45164d..b027a3f 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -4041,12 +4041,8 @@ impl eframe::App for EditorApp { // Webcam management: open/close based on camera_enabled layers, poll frames { - let any_camera_enabled = self.action_executor.document().root.children.iter().any(|layer| { - if let lightningbeam_core::layer::AnyLayer::Video(v) = layer { - v.camera_enabled - } else { - false - } + let any_camera_enabled = self.action_executor.document().all_layers().iter().any(|layer| { + matches!(layer, lightningbeam_core::layer::AnyLayer::Video(v) if v.camera_enabled) }); if any_camera_enabled && self.webcam.is_none() { From a6e04ae89befc8dcfbe9d06e5bbfd95fd1ac27fa Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 14:49:49 -0500 Subject: [PATCH 5/6] Add VU meters --- daw-backend/src/audio/engine.rs | 163 ++++++++++++------ daw-backend/src/audio/project.rs | 50 +++++- daw-backend/src/audio/track.rs | 14 +- daw-backend/src/command/types.rs | 13 ++ .../src/actions/set_layer_properties.rs | 8 + .../lightningbeam-core/src/layer.rs | 8 + .../lightningbeam-editor/src/main.rs | 77 +++++++++ .../lightningbeam-editor/src/panes/mod.rs | 7 + .../src/panes/timeline.rs | 96 ++++++++++- 9 files changed, 380 insertions(+), 56 deletions(-) diff --git a/daw-backend/src/audio/engine.rs b/daw-backend/src/audio/engine.rs index b73adcb..d6f4713 100644 --- a/daw-backend/src/audio/engine.rs +++ b/daw-backend/src/audio/engine.rs @@ -71,6 +71,15 @@ pub struct Engine { // Disk reader for streaming playback of compressed files disk_reader: Option, + // Input monitoring and metering + input_monitoring: bool, + input_gain: f32, + input_level_peak: f32, + input_level_counter: usize, + output_level_peak: f32, + output_level_counter: usize, + track_level_counter: usize, + // Callback timing diagnostics (enabled by DAW_AUDIO_DEBUG=1) debug_audio: bool, callback_count: u64, @@ -138,6 +147,13 @@ impl Engine { metronome: Metronome::new(sample_rate), recording_sample_buffer: Vec::with_capacity(4096), disk_reader: Some(disk_reader), + input_monitoring: false, + input_gain: 1.0, + input_level_peak: 0.0, + input_level_counter: 0, + output_level_peak: 0.0, + output_level_counter: 0, + track_level_counter: 0, debug_audio: std::env::var("DAW_AUDIO_DEBUG").map_or(false, |v| v == "1"), callback_count: 0, timing_worst_total_us: 0, @@ -345,6 +361,25 @@ impl Engine { self.channels, ); + // Compute output peak for master VU meter + let output_peak = output.iter().map(|s| s.abs()).fold(0.0f32, f32::max); + self.output_level_peak = self.output_level_peak.max(output_peak); + self.output_level_counter += output.len(); + let meter_interval = self.sample_rate as usize / 20; // ~50ms + if self.output_level_counter >= meter_interval { + let _ = self.event_tx.push(AudioEvent::OutputLevel(self.output_level_peak)); + self.output_level_peak = 0.0; + self.output_level_counter = 0; + } + + // Send per-track peak levels periodically (~50ms) + self.track_level_counter += output.len(); + if self.track_level_counter >= meter_interval { + let levels = self.project.collect_track_peaks(); + let _ = self.event_tx.push(AudioEvent::TrackLevels(levels)); + self.track_level_counter = 0; + } + // Update playhead (convert total samples to frames) self.playhead += (output.len() / self.channels as usize) as u64; @@ -380,73 +415,85 @@ impl Engine { self.process_live_midi(output); } - // Process recording if active (independent of playback state) - if let Some(recording) = &mut self.recording_state { + // Process input monitoring and/or recording (independent of playback state) + let is_recording = self.recording_state.is_some(); + if is_recording || self.input_monitoring { if let Some(input_rx) = &mut self.input_rx { - // Phase 1: Discard stale samples by popping without storing - // (fast — no Vec push, no add_samples overhead) - while recording.samples_to_skip > 0 { - match input_rx.pop() { - Ok(_) => recording.samples_to_skip -= 1, - Err(_) => break, + // Phase 1: Discard stale samples during recording skip phase + if let Some(recording) = &mut self.recording_state { + while recording.samples_to_skip > 0 { + match input_rx.pop() { + Ok(_) => recording.samples_to_skip -= 1, + Err(_) => break, + } } } - // Phase 2: Pull fresh samples for actual recording + // Phase 2: Pull fresh samples self.recording_sample_buffer.clear(); while let Ok(sample) = input_rx.pop() { - self.recording_sample_buffer.push(sample); + // Apply input gain + self.recording_sample_buffer.push(sample * self.input_gain); } - // Add samples to recording if !self.recording_sample_buffer.is_empty() { - // Calculate how many samples will be skipped (stale buffer data) - let skip = if recording.paused { - self.recording_sample_buffer.len() - } else { - recording.samples_to_skip.min(self.recording_sample_buffer.len()) - }; + // Compute input peak for VU metering + let input_peak = self.recording_sample_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max); + self.input_level_peak = self.input_level_peak.max(input_peak); + self.input_level_counter += self.recording_sample_buffer.len(); + let meter_interval = self.sample_rate as usize / 20; // ~50ms + if self.input_level_counter >= meter_interval { + let _ = self.event_tx.push(AudioEvent::InputLevel(self.input_level_peak)); + self.input_level_peak = 0.0; + self.input_level_counter = 0; + } - match recording.add_samples(&self.recording_sample_buffer) { - Ok(_flushed) => { - // Mirror non-skipped samples to UI for live waveform display - if skip < self.recording_sample_buffer.len() { - if let Some(ref mut mirror_tx) = self.recording_mirror_tx { - for &sample in &self.recording_sample_buffer[skip..] { - let _ = mirror_tx.push(sample); + // Feed samples to recording if active + if let Some(recording) = &mut self.recording_state { + let skip = if recording.paused { + self.recording_sample_buffer.len() + } else { + recording.samples_to_skip.min(self.recording_sample_buffer.len()) + }; + + match recording.add_samples(&self.recording_sample_buffer) { + Ok(_flushed) => { + // Mirror non-skipped samples to UI for live waveform display + if skip < self.recording_sample_buffer.len() { + if let Some(ref mut mirror_tx) = self.recording_mirror_tx { + for &sample in &self.recording_sample_buffer[skip..] { + let _ = mirror_tx.push(sample); + } } } - } - // Update clip duration every callback for sample-accurate timing - let duration = recording.duration(); - let clip_id = recording.clip_id; - let track_id = recording.track_id; + // Update clip duration every callback for sample-accurate timing + let duration = recording.duration(); + let clip_id = recording.clip_id; + let track_id = recording.track_id; - // Update clip duration in project as recording progresses - if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) { - if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) { - // Update both internal_end and external_duration as recording progresses - clip.internal_end = clip.internal_start + duration; - clip.external_duration = duration; + // Update clip duration in project as recording progresses + if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) { + if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) { + clip.internal_end = clip.internal_start + duration; + clip.external_duration = duration; + } + } + + // Send progress event periodically (every ~0.1 seconds) + self.recording_progress_counter += self.recording_sample_buffer.len(); + if self.recording_progress_counter >= (self.sample_rate as usize / 10) { + let _ = self.event_tx.push(AudioEvent::RecordingProgress(clip_id, duration)); + self.recording_progress_counter = 0; } } - - // Send progress event periodically (every ~0.1 seconds) - self.recording_progress_counter += self.recording_sample_buffer.len(); - if self.recording_progress_counter >= (self.sample_rate as usize / 10) { - let _ = self.event_tx.push(AudioEvent::RecordingProgress(clip_id, duration)); - self.recording_progress_counter = 0; + Err(e) => { + let _ = self.event_tx.push(AudioEvent::RecordingError( + format!("Recording write error: {}", e) + )); + self.recording_state = None; } } - Err(e) => { - // Recording error occurred - let _ = self.event_tx.push(AudioEvent::RecordingError( - format!("Recording write error: {}", e) - )); - // Stop recording on error - self.recording_state = None; - } } } } @@ -1136,6 +1183,14 @@ impl Engine { self.metronome.set_enabled(enabled); } + Command::SetInputMonitoring(enabled) => { + self.input_monitoring = enabled; + } + + Command::SetInputGain(gain) => { + self.input_gain = gain; + } + Command::SetTempo(bpm, time_sig) => { self.metronome.update_timing(bpm, time_sig); self.project.set_tempo(bpm, time_sig.0); @@ -2851,6 +2906,16 @@ impl EngineController { let _ = self.command_tx.push(Command::SetTrackSolo(track_id, solo)); } + /// Enable or disable input monitoring (mic level metering) + pub fn set_input_monitoring(&mut self, enabled: bool) { + let _ = self.command_tx.push(Command::SetInputMonitoring(enabled)); + } + + /// Set the input gain multiplier (applied before recording) + pub fn set_input_gain(&mut self, gain: f32) { + let _ = self.command_tx.push(Command::SetInputGain(gain)); + } + /// Move a clip to a new timeline position (changes external_start) pub fn move_clip(&mut self, track_id: TrackId, clip_id: ClipId, new_start_time: f64) { let _ = self.command_tx.push(Command::MoveClip(track_id, clip_id, new_start_time)); diff --git a/daw-backend/src/audio/project.rs b/daw-backend/src/audio/project.rs index f838c64..8fd580e 100644 --- a/daw-backend/src/audio/project.rs +++ b/daw-backend/src/audio/project.rs @@ -441,13 +441,34 @@ impl Project { // Handle audio track vs MIDI track vs group track match self.tracks.get_mut(&track_id) { Some(TrackNode::Audio(track)) => { - // Render audio track directly into output - track.render(output, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); + // Render audio track into a temp buffer for peak measurement + let mut track_buffer = buffer_pool.acquire(); + track_buffer.resize(output.len(), 0.0); + track_buffer.fill(0.0); + track.render(&mut track_buffer, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); + // Accumulate peak level for VU metering (max over meter interval) + let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max); + track.peak_level = track.peak_level.max(buffer_peak); + // Mix into output + for (out, src) in output.iter_mut().zip(track_buffer.iter()) { + *out += src; + } + buffer_pool.release(track_buffer); } Some(TrackNode::Midi(track)) => { - // Render MIDI track directly into output - // Access midi_clip_pool from self - safe because we only need immutable access - track.render(output, &self.midi_clip_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); + // Render MIDI track into a temp buffer for peak measurement + let mut track_buffer = buffer_pool.acquire(); + track_buffer.resize(output.len(), 0.0); + track_buffer.fill(0.0); + track.render(&mut track_buffer, &self.midi_clip_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); + // Accumulate peak level for VU metering (max over meter interval) + let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max); + track.peak_level = track.peak_level.max(buffer_peak); + // Mix into output + for (out, src) in output.iter_mut().zip(track_buffer.iter()) { + *out += src; + } + buffer_pool.release(track_buffer); } Some(TrackNode::Group(group)) => { // Skip rendering if playhead is outside the metatrack's trim window @@ -534,6 +555,25 @@ impl Project { } } + /// Collect per-track peak levels for VU metering and reset accumulators + pub fn collect_track_peaks(&mut self) -> Vec<(TrackId, f32)> { + let mut levels = Vec::new(); + for (id, track) in &mut self.tracks { + match track { + TrackNode::Audio(t) => { + levels.push((*id, t.peak_level)); + t.peak_level = 0.0; + } + TrackNode::Midi(t) => { + levels.push((*id, t.peak_level)); + t.peak_level = 0.0; + } + TrackNode::Group(_) => {} + } + } + levels + } + /// Stop all notes on all MIDI tracks pub fn stop_all_notes(&mut self) { for track in self.tracks.values_mut() { diff --git a/daw-backend/src/audio/track.rs b/daw-backend/src/audio/track.rs index bedaa13..4f9aa5f 100644 --- a/daw-backend/src/audio/track.rs +++ b/daw-backend/src/audio/track.rs @@ -435,6 +435,10 @@ pub struct MidiTrack { /// Used to detect when the playhead exits a clip, so we can send all-notes-off. #[serde(skip)] prev_active_instances: HashSet, + + /// Peak level of last render() call (for VU metering) + #[serde(skip, default)] + pub peak_level: f32, } impl Clone for MidiTrack { @@ -452,6 +456,7 @@ impl Clone for MidiTrack { next_automation_id: self.next_automation_id, live_midi_queue: Vec::new(), // Don't clone live MIDI queue prev_active_instances: HashSet::new(), + peak_level: 0.0, } } } @@ -479,6 +484,7 @@ impl MidiTrack { next_automation_id: 0, live_midi_queue: Vec::new(), prev_active_instances: HashSet::new(), + peak_level: 0.0, } } @@ -705,6 +711,10 @@ pub struct AudioTrack { /// Pre-allocated buffer for clip rendering (avoids heap allocation per callback) #[serde(skip, default)] clip_render_buffer: Vec, + + /// Peak level of last render() call (for VU metering) + #[serde(skip, default)] + pub peak_level: f32, } impl Clone for AudioTrack { @@ -721,6 +731,7 @@ impl Clone for AudioTrack { effects_graph_preset: self.effects_graph_preset.clone(), effects_graph: default_audio_graph(), // Create fresh graph, not cloned clip_render_buffer: Vec::new(), + peak_level: 0.0, } } } @@ -764,6 +775,7 @@ impl AudioTrack { effects_graph_preset: None, effects_graph, clip_render_buffer: Vec::new(), + peak_level: 0.0, } } @@ -987,7 +999,7 @@ impl AudioTrack { } // Calculate combined gain - let combined_gain = clip.gain * self.volume; + let combined_gain = clip.gain; let mut total_rendered = 0; diff --git a/daw-backend/src/command/types.rs b/daw-backend/src/command/types.rs index 855f0bb..75a1560 100644 --- a/daw-backend/src/command/types.rs +++ b/daw-backend/src/command/types.rs @@ -226,6 +226,12 @@ pub enum Command { priority: u8, // 0=Low, 1=Medium, 2=High }, + // Input monitoring/gain commands + /// Enable or disable input monitoring (mic level metering) + SetInputMonitoring(bool), + /// Set the input gain multiplier (applied before recording) + SetInputGain(f32), + // Async audio import /// Import an audio file asynchronously. The engine probes the file format /// and either memory-maps it (WAV/AIFF) or sets up stream decode @@ -333,6 +339,13 @@ pub enum AudioEvent { channels: u32, }, + /// Peak amplitude of mic input (for input monitoring meter) + InputLevel(f32), + /// Peak amplitude of mix output (for master meter) + OutputLevel(f32), + /// Per-track playback peak levels + TrackLevels(Vec<(TrackId, f32)>), + /// Background waveform decode progress/completion for a compressed audio file. /// Internal event — consumed by the engine to update the pool, not forwarded to UI. /// `decoded_frames` < `total_frames` means partial; equal means complete. diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs b/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs index dd8a251..c4a8c00 100644 --- a/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs +++ b/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs @@ -12,6 +12,7 @@ use uuid::Uuid; #[derive(Clone, Debug)] pub enum LayerProperty { Volume(f64), + InputGain(f64), Muted(bool), Soloed(bool), Locked(bool), @@ -25,6 +26,7 @@ pub enum LayerProperty { #[derive(Clone, Debug)] enum OldValue { Volume(f64), + InputGain(f64), Muted(bool), Soloed(bool), Locked(bool), @@ -85,6 +87,7 @@ impl Action for SetLayerPropertiesAction { if self.old_values[i].is_none() { self.old_values[i] = Some(match &self.property { LayerProperty::Volume(_) => OldValue::Volume(layer.volume()), + LayerProperty::InputGain(_) => OldValue::InputGain(layer.layer().input_gain), LayerProperty::Muted(_) => OldValue::Muted(layer.muted()), LayerProperty::Soloed(_) => OldValue::Soloed(layer.soloed()), LayerProperty::Locked(_) => OldValue::Locked(layer.locked()), @@ -104,6 +107,7 @@ impl Action for SetLayerPropertiesAction { // Set new value match &self.property { LayerProperty::Volume(v) => layer.set_volume(*v), + LayerProperty::InputGain(g) => layer.layer_mut().input_gain = *g, LayerProperty::Muted(m) => layer.set_muted(*m), LayerProperty::Soloed(s) => layer.set_soloed(*s), LayerProperty::Locked(l) => layer.set_locked(*l), @@ -128,6 +132,7 @@ impl Action for SetLayerPropertiesAction { if let Some(old_value) = &self.old_values[i] { match old_value { OldValue::Volume(v) => layer.set_volume(*v), + OldValue::InputGain(g) => layer.layer_mut().input_gain = *g, OldValue::Muted(m) => layer.set_muted(*m), OldValue::Soloed(s) => layer.set_soloed(*s), OldValue::Locked(l) => layer.set_locked(*l), @@ -159,6 +164,7 @@ impl Action for SetLayerPropertiesAction { if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) { match &self.property { LayerProperty::Volume(v) => controller.set_track_volume(track_id, *v as f32), + LayerProperty::InputGain(g) => controller.set_input_gain(*g as f32), LayerProperty::Muted(m) => controller.set_track_mute(track_id, *m), LayerProperty::Soloed(s) => controller.set_track_solo(track_id, *s), _ => {} // Locked/Opacity/Visible/CameraEnabled are UI-only @@ -183,6 +189,7 @@ impl Action for SetLayerPropertiesAction { if let Some(old_value) = &self.old_values[i] { match old_value { OldValue::Volume(v) => controller.set_track_volume(track_id, *v as f32), + OldValue::InputGain(g) => controller.set_input_gain(*g as f32), OldValue::Muted(m) => controller.set_track_mute(track_id, *m), OldValue::Soloed(s) => controller.set_track_solo(track_id, *s), _ => {} // Locked/Opacity/Visible are UI-only @@ -196,6 +203,7 @@ impl Action for SetLayerPropertiesAction { fn description(&self) -> String { let property_name = match &self.property { LayerProperty::Volume(_) => "volume", + LayerProperty::InputGain(_) => "input gain", LayerProperty::Muted(_) => "mute", LayerProperty::Soloed(_) => "solo", LayerProperty::Locked(_) => "lock", diff --git a/lightningbeam-ui/lightningbeam-core/src/layer.rs b/lightningbeam-ui/lightningbeam-core/src/layer.rs index 157132f..3ecd6ef 100644 --- a/lightningbeam-ui/lightningbeam-core/src/layer.rs +++ b/lightningbeam-ui/lightningbeam-core/src/layer.rs @@ -60,6 +60,8 @@ pub trait LayerTrait { fn set_locked(&mut self, locked: bool); } +fn default_input_gain() -> f64 { 1.0 } + /// Base layer structure #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Layer { @@ -84,6 +86,10 @@ pub struct Layer { /// Audio volume (1.0 = 100%, affects nested audio layers/clips) pub volume: f64, + /// Input gain for recording (1.0 = unity, range 0.0–4.0) + #[serde(default = "default_input_gain")] + pub input_gain: f64, + /// Audio mute state pub muted: bool, @@ -108,6 +114,7 @@ impl Layer { visible: true, opacity: 1.0, volume: 1.0, // 100% volume + input_gain: 1.0, muted: false, soloed: false, locked: false, @@ -125,6 +132,7 @@ impl Layer { visible: true, opacity: 1.0, volume: 1.0, + input_gain: 1.0, muted: false, soloed: false, locked: false, diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index b027a3f..a418258 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -814,6 +814,11 @@ struct EditorApp { region_selection: Option, region_select_mode: lightningbeam_core::tool::RegionSelectMode, + // VU meter levels + input_level: f32, + output_level: f32, + track_levels: HashMap, + /// Cache for MIDI event data (keyed by backend midi_clip_id) /// Prevents repeated backend queries for the same MIDI clip /// Format: (timestamp, note_number, velocity, is_note_on) @@ -1057,6 +1062,9 @@ impl EditorApp { polygon_sides: 5, // Default to pentagon region_selection: None, region_select_mode: lightningbeam_core::tool::RegionSelectMode::default(), + input_level: 0.0, + output_level: 0.0, + track_levels: HashMap::new(), midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache audio_duration_cache: HashMap::new(), // Initialize empty audio duration cache audio_pools_with_new_waveforms: HashSet::new(), // Track pool indices with new raw audio @@ -4672,6 +4680,18 @@ impl eframe::App for EditorApp { ); ctx.request_repaint(); } + AudioEvent::InputLevel(peak) => { + self.input_level = self.input_level.max(peak); + } + AudioEvent::OutputLevel(peak) => { + self.output_level = self.output_level.max(peak); + } + AudioEvent::TrackLevels(levels) => { + for (track_id, peak) in levels { + let entry = self.track_levels.entry(track_id).or_insert(0.0); + *entry = entry.max(peak); + } + } _ => {} // Ignore other events for now } } @@ -4686,6 +4706,38 @@ impl eframe::App for EditorApp { } } + // Update input monitoring based on active layer + if let Some(controller) = &self.audio_controller { + let should_monitor = self.active_layer_id.map_or(false, |layer_id| { + let doc = self.action_executor.document(); + if let Some(layer) = doc.get_layer(&layer_id) { + matches!(layer, lightningbeam_core::layer::AnyLayer::Audio(a) if a.audio_layer_type == lightningbeam_core::layer::AudioLayerType::Sampled) + } else { + false + } + }); + if let Ok(mut ctrl) = controller.try_lock() { + ctrl.set_input_monitoring(should_monitor); + } + } + + // Decay VU meter levels (~1.5s full fall at 60fps) + { + let decay = 0.97f32; + self.input_level *= decay; + self.output_level *= decay; + for level in self.track_levels.values_mut() { + *level *= decay; + } + // Request repaint while any level is visible + let any_active = self.input_level > 0.001 + || self.output_level > 0.001 + || self.track_levels.values().any(|&v| v > 0.001); + if any_active { + ctx.request_repaint(); + } + } + let _post_events_ms = _frame_start.elapsed().as_secs_f64() * 1000.0; // Request continuous repaints when playing to update time display @@ -4925,6 +4977,27 @@ impl eframe::App for EditorApp { } }); + // Mix output VU meter (thin bar below menu) + if self.app_mode != AppMode::StartScreen && self.output_level > 0.001 { + egui::TopBottomPanel::top("mix_meter").exact_height(4.0).show(ctx, |ui| { + let rect = ui.available_rect_before_wrap(); + let level = self.output_level.min(1.0); + let filled_width = rect.width() * level; + let color = if level > 0.9 { + egui::Color32::from_rgb(220, 50, 50) + } else if level > 0.7 { + egui::Color32::from_rgb(220, 200, 50) + } else { + egui::Color32::from_rgb(50, 200, 80) + }; + let filled_rect = egui::Rect::from_min_size( + rect.left_top(), + egui::vec2(filled_width, rect.height()), + ); + ui.painter().rect_filled(filled_rect, 0.0, color); + }); + } + // Render start screen or editor based on app mode if self.app_mode == AppMode::StartScreen { self.render_start_screen(ctx); @@ -5075,6 +5148,10 @@ impl eframe::App for EditorApp { target_format: self.target_format, pending_menu_actions: &mut pending_menu_actions, clipboard_manager: &mut self.clipboard_manager, + input_level: self.input_level, + output_level: self.output_level, + track_levels: &self.track_levels, + track_to_layer_map: &self.track_to_layer_map, waveform_stereo: self.config.waveform_stereo, project_generation: &mut self.project_generation, script_to_edit: &mut self.script_to_edit, diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs index 81d3910..5983c55 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs @@ -244,6 +244,13 @@ pub struct SharedPaneState<'a> { pub pending_menu_actions: &'a mut Vec, /// Clipboard manager for cut/copy/paste operations pub clipboard_manager: &'a mut lightningbeam_core::clipboard::ClipboardManager, + // VU meter levels + pub input_level: f32, + #[allow(dead_code)] // Used by mix meter in main.rs, available to panes + pub output_level: f32, + pub track_levels: &'a std::collections::HashMap, + #[allow(dead_code)] // Available for panes that need reverse track->layer lookup + pub track_to_layer_map: &'a std::collections::HashMap, /// Whether to show waveforms as stacked stereo (true) or combined mono (false) pub waveform_stereo: bool, /// Generation counter - incremented on project load to force reloads diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index 3ed9fc0..dd4b44a 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -1260,6 +1260,9 @@ impl TimelinePane { pending_actions: &mut Vec>, _document: &lightningbeam_core::document::Document, context_layers: &[&lightningbeam_core::layer::AnyLayer], + layer_to_track_map: &std::collections::HashMap, + track_levels: &std::collections::HashMap, + input_level: f32, ) { // Background for header column let header_style = theme.style(".timeline-header", ui.ctx()); @@ -1659,6 +1662,10 @@ impl TimelinePane { (response, temp_slider_value) }).inner; + // Block layer drag while interacting with the slider + if volume_response.0.dragged() || volume_response.0.has_focus() { + self.layer_control_clicked = true; + } if volume_response.0.changed() { self.layer_control_clicked = true; // Map slider position (0.0-1.0) back to volume (0.0-2.0) @@ -1678,6 +1685,93 @@ impl TimelinePane { )); } + // Input gain slider for sampled audio layers (below volume slider) + if let lightningbeam_core::layer::AnyLayer::Audio(audio_layer) = layer_for_controls { + if audio_layer.audio_layer_type == lightningbeam_core::layer::AudioLayerType::Sampled { + let gain_slider_rect = egui::Rect::from_min_size( + egui::pos2(controls_right - slider_width, controls_top + 22.0), + egui::vec2(slider_width, 16.0), + ); + let current_gain = audio_layer.layer.input_gain; + + // Map gain (0.0-4.0) to slider (0.0-1.0): linear + let mut slider_val = (current_gain / 4.0) as f32; + let gain_response = ui.scope_builder(egui::UiBuilder::new().max_rect(gain_slider_rect), |ui| { + let slider = egui::Slider::new(&mut slider_val, 0.0..=1.0f32) + .show_value(false); + ui.add(slider) + }).inner; + + // Block layer drag while interacting with the slider + if gain_response.dragged() || gain_response.has_focus() { + self.layer_control_clicked = true; + } + if gain_response.changed() { + self.layer_control_clicked = true; + let new_gain = (slider_val * 4.0) as f64; + pending_actions.push(Box::new( + lightningbeam_core::actions::SetLayerPropertiesAction::new( + layer_id, + lightningbeam_core::actions::LayerProperty::InputGain(new_gain), + ) + )); + } + + // Label + let label_rect = egui::Rect::from_min_size( + egui::pos2(gain_slider_rect.min.x - 26.0, controls_top + 22.0), + egui::vec2(24.0, 16.0), + ); + ui.painter().text( + label_rect.center(), + egui::Align2::CENTER_CENTER, + "Gain", + egui::FontId::proportional(9.0), + egui::Color32::from_gray(140), + ); + } + } + + // Per-layer VU meter bar (4px tall at bottom of header) + { + // Look up the track level for this layer + let mut level = 0.0f32; + if let Some(&track_id) = layer_to_track_map.get(&layer_id) { + if let Some(&track_level) = track_levels.get(&track_id) { + level = track_level; + } + } + + // For active sampled audio layer, show max of track level and input level + let is_active_sampled_audio = active_layer_id.map_or(false, |id| id == layer_id) + && matches!(layer_for_controls, lightningbeam_core::layer::AnyLayer::Audio(a) if a.audio_layer_type == lightningbeam_core::layer::AudioLayerType::Sampled); + if is_active_sampled_audio { + level = level.max(input_level); + } + + if level > 0.001 { + let meter_height = 4.0; + let meter_rect = egui::Rect::from_min_size( + egui::pos2(header_rect.min.x, header_rect.max.y - meter_height - 1.0), + egui::vec2(header_rect.width(), meter_height), + ); + let clamped = level.min(1.0); + let filled_width = meter_rect.width() * clamped; + let color = if clamped > 0.9 { + egui::Color32::from_rgb(220, 50, 50) + } else if clamped > 0.7 { + egui::Color32::from_rgb(220, 200, 50) + } else { + egui::Color32::from_rgb(50, 200, 80) + }; + let filled = egui::Rect::from_min_size( + meter_rect.left_top(), + egui::vec2(filled_width, meter_rect.height()), + ); + ui.painter().rect_filled(filled, 0.0, color); + } + } + // Separator line at bottom ui.painter().line_segment( [ @@ -4278,7 +4372,7 @@ impl PaneRenderer for TimelinePane { // Render layer header column with clipping ui.set_clip_rect(layer_headers_rect.intersect(original_clip_rect)); - self.render_layer_headers(ui, layer_headers_rect, shared.theme, shared.active_layer_id, shared.focus, &mut shared.pending_actions, document, &context_layers); + self.render_layer_headers(ui, layer_headers_rect, shared.theme, shared.active_layer_id, shared.focus, &mut shared.pending_actions, document, &context_layers, shared.layer_to_track_map, shared.track_levels, shared.input_level); // Render time ruler (clip to ruler rect) ui.set_clip_rect(ruler_rect.intersect(original_clip_rect)); From 49b822da8c133fd876abeef902c8b1b06d20fd1c Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Sun, 1 Mar 2026 15:04:58 -0500 Subject: [PATCH 6/6] Add final mix VU meters --- daw-backend/src/audio/engine.rs | 56 ++++++++++++------- daw-backend/src/command/types.rs | 4 +- .../lightningbeam-editor/src/main.rs | 35 +++--------- .../lightningbeam-editor/src/panes/mod.rs | 3 +- .../src/panes/timeline.rs | 36 ++++++++++++ 5 files changed, 82 insertions(+), 52 deletions(-) diff --git a/daw-backend/src/audio/engine.rs b/daw-backend/src/audio/engine.rs index d6f4713..b07f4ac 100644 --- a/daw-backend/src/audio/engine.rs +++ b/daw-backend/src/audio/engine.rs @@ -76,7 +76,8 @@ pub struct Engine { input_gain: f32, input_level_peak: f32, input_level_counter: usize, - output_level_peak: f32, + output_level_peak_l: f32, + output_level_peak_r: f32, output_level_counter: usize, track_level_counter: usize, @@ -151,7 +152,8 @@ impl Engine { input_gain: 1.0, input_level_peak: 0.0, input_level_counter: 0, - output_level_peak: 0.0, + output_level_peak_l: 0.0, + output_level_peak_r: 0.0, output_level_counter: 0, track_level_counter: 0, debug_audio: std::env::var("DAW_AUDIO_DEBUG").map_or(false, |v| v == "1"), @@ -361,25 +363,6 @@ impl Engine { self.channels, ); - // Compute output peak for master VU meter - let output_peak = output.iter().map(|s| s.abs()).fold(0.0f32, f32::max); - self.output_level_peak = self.output_level_peak.max(output_peak); - self.output_level_counter += output.len(); - let meter_interval = self.sample_rate as usize / 20; // ~50ms - if self.output_level_counter >= meter_interval { - let _ = self.event_tx.push(AudioEvent::OutputLevel(self.output_level_peak)); - self.output_level_peak = 0.0; - self.output_level_counter = 0; - } - - // Send per-track peak levels periodically (~50ms) - self.track_level_counter += output.len(); - if self.track_level_counter >= meter_interval { - let levels = self.project.collect_track_peaks(); - let _ = self.event_tx.push(AudioEvent::TrackLevels(levels)); - self.track_level_counter = 0; - } - // Update playhead (convert total samples to frames) self.playhead += (output.len() / self.channels as usize) as u64; @@ -415,6 +398,37 @@ impl Engine { self.process_live_midi(output); } + // Compute stereo output peaks for master VU meter (independent of playback state) + { + let channels = self.channels as usize; + for frame in output.chunks(channels) { + if channels >= 2 { + self.output_level_peak_l = self.output_level_peak_l.max(frame[0].abs()); + self.output_level_peak_r = self.output_level_peak_r.max(frame[1].abs()); + } else { + let v = frame[0].abs(); + self.output_level_peak_l = self.output_level_peak_l.max(v); + self.output_level_peak_r = self.output_level_peak_r.max(v); + } + } + self.output_level_counter += output.len(); + let meter_interval = self.sample_rate as usize / 20; // ~50ms + if self.output_level_counter >= meter_interval { + let _ = self.event_tx.push(AudioEvent::OutputLevel(self.output_level_peak_l, self.output_level_peak_r)); + self.output_level_peak_l = 0.0; + self.output_level_peak_r = 0.0; + self.output_level_counter = 0; + } + + // Send per-track peak levels periodically + self.track_level_counter += output.len(); + if self.track_level_counter >= meter_interval { + let levels = self.project.collect_track_peaks(); + let _ = self.event_tx.push(AudioEvent::TrackLevels(levels)); + self.track_level_counter = 0; + } + } + // Process input monitoring and/or recording (independent of playback state) let is_recording = self.recording_state.is_some(); if is_recording || self.input_monitoring { diff --git a/daw-backend/src/command/types.rs b/daw-backend/src/command/types.rs index 75a1560..a5611c9 100644 --- a/daw-backend/src/command/types.rs +++ b/daw-backend/src/command/types.rs @@ -341,8 +341,8 @@ pub enum AudioEvent { /// Peak amplitude of mic input (for input monitoring meter) InputLevel(f32), - /// Peak amplitude of mix output (for master meter) - OutputLevel(f32), + /// Peak amplitude of mix output (for master meter), stereo (left, right) + OutputLevel(f32, f32), /// Per-track playback peak levels TrackLevels(Vec<(TrackId, f32)>), diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index a418258..cbe5393 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -816,7 +816,7 @@ struct EditorApp { // VU meter levels input_level: f32, - output_level: f32, + output_level: (f32, f32), track_levels: HashMap, /// Cache for MIDI event data (keyed by backend midi_clip_id) @@ -1063,7 +1063,7 @@ impl EditorApp { region_selection: None, region_select_mode: lightningbeam_core::tool::RegionSelectMode::default(), input_level: 0.0, - output_level: 0.0, + output_level: (0.0, 0.0), track_levels: HashMap::new(), midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache audio_duration_cache: HashMap::new(), // Initialize empty audio duration cache @@ -4683,8 +4683,9 @@ impl eframe::App for EditorApp { AudioEvent::InputLevel(peak) => { self.input_level = self.input_level.max(peak); } - AudioEvent::OutputLevel(peak) => { - self.output_level = self.output_level.max(peak); + AudioEvent::OutputLevel(peak_l, peak_r) => { + self.output_level.0 = self.output_level.0.max(peak_l); + self.output_level.1 = self.output_level.1.max(peak_r); } AudioEvent::TrackLevels(levels) => { for (track_id, peak) in levels { @@ -4725,13 +4726,14 @@ impl eframe::App for EditorApp { { let decay = 0.97f32; self.input_level *= decay; - self.output_level *= decay; + self.output_level.0 *= decay; + self.output_level.1 *= decay; for level in self.track_levels.values_mut() { *level *= decay; } // Request repaint while any level is visible let any_active = self.input_level > 0.001 - || self.output_level > 0.001 + || self.output_level.0 > 0.001 || self.output_level.1 > 0.001 || self.track_levels.values().any(|&v| v > 0.001); if any_active { ctx.request_repaint(); @@ -4977,27 +4979,6 @@ impl eframe::App for EditorApp { } }); - // Mix output VU meter (thin bar below menu) - if self.app_mode != AppMode::StartScreen && self.output_level > 0.001 { - egui::TopBottomPanel::top("mix_meter").exact_height(4.0).show(ctx, |ui| { - let rect = ui.available_rect_before_wrap(); - let level = self.output_level.min(1.0); - let filled_width = rect.width() * level; - let color = if level > 0.9 { - egui::Color32::from_rgb(220, 50, 50) - } else if level > 0.7 { - egui::Color32::from_rgb(220, 200, 50) - } else { - egui::Color32::from_rgb(50, 200, 80) - }; - let filled_rect = egui::Rect::from_min_size( - rect.left_top(), - egui::vec2(filled_width, rect.height()), - ); - ui.painter().rect_filled(filled_rect, 0.0, color); - }); - } - // Render start screen or editor based on app mode if self.app_mode == AppMode::StartScreen { self.render_start_screen(ctx); diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs index 5983c55..eee3aaf 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs @@ -246,8 +246,7 @@ pub struct SharedPaneState<'a> { pub clipboard_manager: &'a mut lightningbeam_core::clipboard::ClipboardManager, // VU meter levels pub input_level: f32, - #[allow(dead_code)] // Used by mix meter in main.rs, available to panes - pub output_level: f32, + pub output_level: (f32, f32), pub track_levels: &'a std::collections::HashMap, #[allow(dead_code)] // Available for panes that need reverse track->layer lookup pub track_to_layer_map: &'a std::collections::HashMap, diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index dd4b44a..dc6322a 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -4234,6 +4234,42 @@ impl PaneRenderer for TimelinePane { ui.separator(); + // Stereo mix output VU meter (two stacked bars: L on top, R on bottom) + { + let meter_width = 80.0; + let meter_height = 14.0; // total height for both bars + gap + let bar_height = 6.0; + let gap = 2.0; + let (meter_rect, _) = ui.allocate_exact_size( + egui::vec2(meter_width, meter_height), + egui::Sense::hover(), + ); + // Background + ui.painter().rect_filled(meter_rect, 2.0, egui::Color32::from_gray(30)); + + let levels = [shared.output_level.0.min(1.0), shared.output_level.1.min(1.0)]; + for (i, &level) in levels.iter().enumerate() { + let bar_y = meter_rect.min.y + i as f32 * (bar_height + gap); + if level > 0.001 { + let filled_width = meter_rect.width() * level; + let color = if level > 0.9 { + egui::Color32::from_rgb(220, 50, 50) + } else if level > 0.7 { + egui::Color32::from_rgb(220, 200, 50) + } else { + egui::Color32::from_rgb(50, 200, 80) + }; + let filled_rect = egui::Rect::from_min_size( + egui::pos2(meter_rect.min.x, bar_y), + egui::vec2(filled_width, bar_height), + ); + ui.painter().rect_filled(filled_rect, 1.0, color); + } + } + } + + ui.separator(); + // BPM control let mut bpm_val = bpm; ui.label("BPM:");