Add layer controls, fix dragging for clips

This commit is contained in:
Skyler Lehmkuhl 2025-11-29 12:33:17 -05:00
parent 5fbb2c078b
commit f9761b8af3
7 changed files with 972 additions and 109 deletions

View File

@ -8,6 +8,7 @@ pub mod add_shape;
pub mod move_clip_instances;
pub mod move_objects;
pub mod paint_bucket;
pub mod set_layer_properties;
pub mod transform_clip_instances;
pub mod transform_objects;
pub mod trim_clip_instances;
@ -17,6 +18,7 @@ pub use add_shape::AddShapeAction;
pub use move_clip_instances::MoveClipInstancesAction;
pub use move_objects::MoveShapeInstancesAction;
pub use paint_bucket::PaintBucketAction;
pub use set_layer_properties::{LayerProperty, SetLayerPropertiesAction};
pub use transform_clip_instances::TransformClipInstancesAction;
pub use transform_objects::TransformShapeInstancesAction;
pub use trim_clip_instances::{TrimClipInstancesAction, TrimData, TrimType};

View File

@ -0,0 +1,224 @@
//! Set layer properties action
//!
//! Handles changing layer properties (volume, mute, solo, lock, opacity, visible)
//! with undo/redo support.
use crate::action::Action;
use crate::document::Document;
use crate::layer::LayerTrait;
use uuid::Uuid;
/// Property that can be set on a layer
#[derive(Clone, Debug)]
pub enum LayerProperty {
Volume(f64),
Muted(bool),
Soloed(bool),
Locked(bool),
Opacity(f64),
Visible(bool),
}
/// Stored old value for rollback
#[derive(Clone, Debug)]
enum OldValue {
Volume(f64),
Muted(bool),
Soloed(bool),
Locked(bool),
Opacity(f64),
Visible(bool),
}
/// Action that sets a property on one or more layers
pub struct SetLayerPropertiesAction {
/// IDs of layers to modify
layer_ids: Vec<Uuid>,
/// Property to set
property: LayerProperty,
/// Old values for rollback (stored after first execution)
old_values: Vec<Option<OldValue>>,
}
impl SetLayerPropertiesAction {
/// Create a new action to set a property on a single layer
///
/// # Arguments
///
/// * `layer_id` - ID of the layer to modify
/// * `property` - Property to set
pub fn new(layer_id: Uuid, property: LayerProperty) -> Self {
Self {
layer_ids: vec![layer_id],
property,
old_values: vec![None],
}
}
/// Create a new action to set a property on multiple layers
///
/// # Arguments
///
/// * `layer_ids` - IDs of layers to modify
/// * `property` - Property to set on all layers
pub fn new_batch(layer_ids: Vec<Uuid>, property: LayerProperty) -> Self {
let old_values = vec![None; layer_ids.len()];
Self {
layer_ids,
property,
old_values,
}
}
}
impl Action for SetLayerPropertiesAction {
fn execute(&mut self, document: &mut Document) {
for (i, &layer_id) in self.layer_ids.iter().enumerate() {
// Find the layer in the document
if let Some(layer) = document.root_mut().get_child_mut(&layer_id) {
// Store old value if not already stored
if self.old_values[i].is_none() {
self.old_values[i] = Some(match &self.property {
LayerProperty::Volume(_) => OldValue::Volume(layer.volume()),
LayerProperty::Muted(_) => OldValue::Muted(layer.muted()),
LayerProperty::Soloed(_) => OldValue::Soloed(layer.soloed()),
LayerProperty::Locked(_) => OldValue::Locked(layer.locked()),
LayerProperty::Opacity(_) => OldValue::Opacity(layer.opacity()),
LayerProperty::Visible(_) => OldValue::Visible(layer.visible()),
});
}
// Set new value
match &self.property {
LayerProperty::Volume(v) => layer.set_volume(*v),
LayerProperty::Muted(m) => layer.set_muted(*m),
LayerProperty::Soloed(s) => layer.set_soloed(*s),
LayerProperty::Locked(l) => layer.set_locked(*l),
LayerProperty::Opacity(o) => layer.set_opacity(*o),
LayerProperty::Visible(v) => layer.set_visible(*v),
}
}
}
}
fn rollback(&mut self, document: &mut Document) {
for (i, &layer_id) in self.layer_ids.iter().enumerate() {
// Find the layer in the document
if let Some(layer) = document.root_mut().get_child_mut(&layer_id) {
// Restore old value if we have one
if let Some(old_value) = &self.old_values[i] {
match old_value {
OldValue::Volume(v) => layer.set_volume(*v),
OldValue::Muted(m) => layer.set_muted(*m),
OldValue::Soloed(s) => layer.set_soloed(*s),
OldValue::Locked(l) => layer.set_locked(*l),
OldValue::Opacity(o) => layer.set_opacity(*o),
OldValue::Visible(v) => layer.set_visible(*v),
}
}
}
}
}
fn description(&self) -> String {
let property_name = match &self.property {
LayerProperty::Volume(_) => "volume",
LayerProperty::Muted(_) => "mute",
LayerProperty::Soloed(_) => "solo",
LayerProperty::Locked(_) => "lock",
LayerProperty::Opacity(_) => "opacity",
LayerProperty::Visible(_) => "visibility",
};
if self.layer_ids.len() == 1 {
format!("Set layer {}", property_name)
} else {
format!("Set layer {} on {} layers", property_name, self.layer_ids.len())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::layer::{AnyLayer, VectorLayer};
#[test]
fn test_set_volume() {
let mut document = Document::new("Test");
let layer = VectorLayer::new("Test Layer");
let layer_id = document.root_mut().add_child(AnyLayer::Vector(layer));
// Initial volume should be 1.0
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.volume(), 1.0);
// Create and execute action
let mut action = SetLayerPropertiesAction::new(layer_id, LayerProperty::Volume(0.5));
action.execute(&mut document);
// Verify volume changed
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.volume(), 0.5);
// Rollback
action.rollback(&mut document);
// Verify volume restored
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.volume(), 1.0);
}
#[test]
fn test_toggle_mute() {
let mut document = Document::new("Test");
let layer = VectorLayer::new("Test Layer");
let layer_id = document.root_mut().add_child(AnyLayer::Vector(layer));
// Initial state should be unmuted
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.muted(), false);
// Mute
let mut action = SetLayerPropertiesAction::new(layer_id, LayerProperty::Muted(true));
action.execute(&mut document);
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.muted(), true);
// Unmute via rollback
action.rollback(&mut document);
let layer_ref = document.root().find_child(&layer_id).unwrap();
assert_eq!(layer_ref.muted(), false);
}
#[test]
fn test_batch_solo() {
let mut document = Document::new("Test");
let layer1 = VectorLayer::new("Layer 1");
let layer2 = VectorLayer::new("Layer 2");
let id1 = document.root_mut().add_child(AnyLayer::Vector(layer1));
let id2 = document.root_mut().add_child(AnyLayer::Vector(layer2));
// Solo both layers
let mut action = SetLayerPropertiesAction::new_batch(
vec![id1, id2],
LayerProperty::Soloed(true),
);
action.execute(&mut document);
// Verify both soloed
assert_eq!(document.root().find_child(&id1).unwrap().soloed(), true);
assert_eq!(document.root().find_child(&id2).unwrap().soloed(), true);
// Rollback
action.rollback(&mut document);
// Verify both unsoloed
assert_eq!(document.root().find_child(&id1).unwrap().soloed(), false);
assert_eq!(document.root().find_child(&id2).unwrap().soloed(), false);
}
}

View File

@ -16,6 +16,7 @@ use crate::layer_tree::LayerTree;
use crate::object::Transform;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use vello::kurbo::{Rect, Affine, Shape as KurboShape};
/// Vector clip containing nested layers
///
@ -73,6 +74,88 @@ impl VectorClip {
layers: LayerTree::new(),
}
}
/// Calculate the bounding box of all content in this clip at a specific time
///
/// This recursively calculates the union of all shape and nested clip bounding boxes
/// across all layers, evaluating animations at the specified clip-local time.
///
/// # Arguments
/// * `document` - The document containing all clip definitions (for resolving nested clips)
/// * `clip_time` - The time within this clip (already converted from timeline time)
///
/// # Returns
/// The bounding box of all visible content at the specified time
pub fn calculate_content_bounds(&self, document: &crate::document::Document, clip_time: f64) -> Rect {
let mut combined_bounds: Option<Rect> = None;
// Iterate through all layers in the layer tree
for layer_node in self.layers.iter() {
// Only process vector layers (skip other layer types)
if let AnyLayer::Vector(vector_layer) = &layer_node.data {
// Calculate bounds for all shape instances in this layer
for shape_instance in &vector_layer.shape_instances {
// Get the shape for this instance
if let Some(shape) = vector_layer.shapes.get(&shape_instance.shape_id) {
// Get the local bounding box of the shape's path
let local_bbox = shape.path().bounding_box();
// Apply the shape instance's transform (TODO: evaluate animations at clip_time)
let instance_transform = shape_instance.to_affine();
let transformed_bbox = instance_transform.transform_rect_bbox(local_bbox);
// Union with combined bounds
combined_bounds = Some(match combined_bounds {
None => transformed_bbox,
Some(existing) => existing.union(transformed_bbox),
});
}
}
// Handle nested clip instances recursively
for clip_instance in &vector_layer.clip_instances {
// Convert parent clip time to nested clip local time
// Apply timeline offset and playback speed, then add trim offset
let nested_clip_time = ((clip_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Look up the nested clip definition
let nested_bounds = if let Some(nested_clip) = document.get_vector_clip(&clip_instance.clip_id) {
// Recursively calculate bounds for nested clip at its local time
nested_clip.calculate_content_bounds(document, nested_clip_time)
} else if let Some(video_clip) = document.get_video_clip(&clip_instance.clip_id) {
// Video clips have fixed dimensions
Rect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
// Clip not found or is audio (no spatial representation)
continue;
};
// Apply clip instance transform to the nested bounds
let instance_transform = clip_instance.transform.to_affine();
let transformed_bounds = instance_transform.transform_rect_bbox(nested_bounds);
// Union with combined bounds
combined_bounds = Some(match combined_bounds {
None => transformed_bounds,
Some(existing) => existing.union(transformed_bounds),
});
}
}
}
// If no content found, return a small rect at origin
combined_bounds.unwrap_or_else(|| Rect::new(0.0, 0.0, 1.0, 1.0))
}
/// Get the width of the content bounds at a specific time
pub fn content_width(&self, document: &crate::document::Document, clip_time: f64) -> f64 {
self.calculate_content_bounds(document, clip_time).width()
}
/// Get the height of the content bounds at a specific time
pub fn content_height(&self, document: &crate::document::Document, clip_time: f64) -> f64 {
self.calculate_content_bounds(document, clip_time).height()
}
}
/// Video clip referencing an external video file

View File

@ -203,7 +203,7 @@ pub fn hit_test_clip_instance(
point: Point,
parent_transform: Affine,
) -> bool {
// Create bounding rectangle for the clip
// Create bounding rectangle for the clip (top-left origin)
let clip_rect = Rect::new(0.0, 0.0, clip_width, clip_height);
// Combine parent transform with clip instance transform
@ -242,39 +242,48 @@ pub fn get_clip_instance_bounds(
/// Hit test clip instances at a specific point
///
/// Tests clip instances in reverse order (front to back) and returns the first hit.
/// This function requires the clip libraries to look up clip dimensions.
/// Uses dynamic bounds calculation based on clip content and current time.
///
/// # Arguments
///
/// * `clip_instances` - The clip instances to test
/// * `vector_clips` - HashMap of vector clips for looking up dimensions
/// * `video_clips` - HashMap of video clips for looking up dimensions
/// * `document` - Document containing all clip definitions
/// * `point` - The point to test in screen/canvas space
/// * `parent_transform` - Transform from parent layer/clip
/// * `timeline_time` - Current timeline time for evaluating animations
///
/// # Returns
///
/// The UUID of the first clip instance hit, or None if no hit
pub fn hit_test_clip_instances(
clip_instances: &[ClipInstance],
vector_clips: &std::collections::HashMap<Uuid, VectorClip>,
video_clips: &std::collections::HashMap<Uuid, VideoClip>,
document: &crate::document::Document,
point: Point,
parent_transform: Affine,
timeline_time: f64,
) -> Option<Uuid> {
// Test in reverse order (front to back)
for clip_instance in clip_instances.iter().rev() {
// Try to get clip dimensions from either vector or video clips
let (width, height) = if let Some(vector_clip) = vector_clips.get(&clip_instance.clip_id) {
(vector_clip.width, vector_clip.height)
} else if let Some(video_clip) = video_clips.get(&clip_instance.clip_id) {
(video_clip.width, video_clip.height)
// Calculate clip-local time from timeline time
// Apply timeline offset and playback speed, then add trim offset
let clip_time = ((timeline_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Get dynamic clip bounds from content at this time
let content_bounds = if let Some(vector_clip) = document.get_vector_clip(&clip_instance.clip_id) {
vector_clip.calculate_content_bounds(document, clip_time)
} else if let Some(video_clip) = document.get_video_clip(&clip_instance.clip_id) {
Rect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
// Clip not found or is audio (no spatial representation)
continue;
};
if hit_test_clip_instance(clip_instance, width, height, point, parent_transform) {
// Transform content bounds to screen space
let clip_transform = parent_transform * clip_instance.transform.to_affine();
let clip_bbox = clip_transform.transform_rect_bbox(content_bounds);
// Test if point is inside the transformed rectangle
if clip_bbox.contains(point) {
return Some(clip_instance.id);
}
}
@ -285,40 +294,46 @@ pub fn hit_test_clip_instances(
/// Hit test clip instances within a rectangle (for marquee selection)
///
/// Returns all clip instances whose bounding boxes intersect with the given rectangle.
/// Uses dynamic bounds calculation based on clip content and current time.
///
/// # Arguments
///
/// * `clip_instances` - The clip instances to test
/// * `vector_clips` - HashMap of vector clips for looking up dimensions
/// * `video_clips` - HashMap of video clips for looking up dimensions
/// * `document` - Document containing all clip definitions
/// * `rect` - The selection rectangle in screen/canvas space
/// * `parent_transform` - Transform from parent layer/clip
/// * `timeline_time` - Current timeline time for evaluating animations
///
/// # Returns
///
/// Vector of UUIDs for all clip instances that intersect the rectangle
pub fn hit_test_clip_instances_in_rect(
clip_instances: &[ClipInstance],
vector_clips: &std::collections::HashMap<Uuid, VectorClip>,
video_clips: &std::collections::HashMap<Uuid, VideoClip>,
document: &crate::document::Document,
rect: Rect,
parent_transform: Affine,
timeline_time: f64,
) -> Vec<Uuid> {
let mut hits = Vec::new();
for clip_instance in clip_instances {
// Try to get clip dimensions from either vector or video clips
let (width, height) = if let Some(vector_clip) = vector_clips.get(&clip_instance.clip_id) {
(vector_clip.width, vector_clip.height)
} else if let Some(video_clip) = video_clips.get(&clip_instance.clip_id) {
(video_clip.width, video_clip.height)
// Calculate clip-local time from timeline time
// Apply timeline offset and playback speed, then add trim offset
let clip_time = ((timeline_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Get dynamic clip bounds from content at this time
let content_bounds = if let Some(vector_clip) = document.get_vector_clip(&clip_instance.clip_id) {
vector_clip.calculate_content_bounds(document, clip_time)
} else if let Some(video_clip) = document.get_video_clip(&clip_instance.clip_id) {
Rect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
// Clip not found or is audio (no spatial representation)
continue;
};
// Get clip instance bounding box in screen space
let clip_bbox = get_clip_instance_bounds(clip_instance, width, height, parent_transform);
// Transform content bounds to screen space
let clip_transform = parent_transform * clip_instance.transform.to_affine();
let clip_bbox = clip_transform.transform_rect_bbox(content_bounds);
// Check if rectangles intersect
if rect.intersect(clip_bbox).area() > 0.0 {

View File

@ -4,7 +4,7 @@
use crate::animation::TransformProperty;
use crate::document::Document;
use crate::layer::{AnyLayer, VectorLayer};
use crate::layer::{AnyLayer, LayerTrait, VectorLayer};
use crate::object::ShapeInstance;
use kurbo::{Affine, Shape};
use vello::kurbo::Rect;
@ -45,16 +45,30 @@ fn render_background(document: &Document, scene: &mut Scene, base_transform: Aff
/// Recursively render the root graphics object and its children
fn render_graphics_object(document: &Document, time: f64, scene: &mut Scene, base_transform: Affine) {
// Render all visible layers in the root graphics object
// Check if any layers are soloed
let any_soloed = document.visible_layers().any(|layer| layer.soloed());
// Render layers based on solo state
// If any layer is soloed, only render soloed layers
// Otherwise, render all visible layers
// Start with full opacity (1.0)
for layer in document.visible_layers() {
render_layer(document, time, layer, scene, base_transform);
if any_soloed {
// Only render soloed layers when solo is active
if layer.soloed() {
render_layer(document, time, layer, scene, base_transform, 1.0);
}
} else {
// Render all visible layers when no solo is active
render_layer(document, time, layer, scene, base_transform, 1.0);
}
}
}
/// Render a single layer
fn render_layer(document: &Document, time: f64, layer: &AnyLayer, scene: &mut Scene, base_transform: Affine) {
fn render_layer(document: &Document, time: f64, layer: &AnyLayer, scene: &mut Scene, base_transform: Affine, parent_opacity: f64) {
match layer {
AnyLayer::Vector(vector_layer) => render_vector_layer(document, time, vector_layer, scene, base_transform),
AnyLayer::Vector(vector_layer) => render_vector_layer(document, time, vector_layer, scene, base_transform, parent_opacity),
AnyLayer::Audio(_) => {
// Audio layers don't render visually
}
@ -69,9 +83,10 @@ fn render_clip_instance(
document: &Document,
time: f64,
clip_instance: &crate::clip::ClipInstance,
_parent_opacity: f64,
parent_opacity: f64,
scene: &mut Scene,
base_transform: Affine,
animation_data: &crate::animation::AnimationData,
) {
// Try to find the clip in the document's clip libraries
// For now, only handle VectorClips (VideoClip and AudioClip rendering not yet implemented)
@ -84,28 +99,134 @@ fn render_clip_instance(
return; // Clip instance not active at this time
};
// Build transform for this clip instance
let instance_transform = base_transform * clip_instance.to_affine();
// Evaluate animated transform properties
let transform = &clip_instance.transform;
let x = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::X,
},
time,
transform.x,
);
let y = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::Y,
},
time,
transform.y,
);
let rotation = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::Rotation,
},
time,
transform.rotation,
);
let scale_x = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::ScaleX,
},
time,
transform.scale_x,
);
let scale_y = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::ScaleY,
},
time,
transform.scale_y,
);
let skew_x = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::SkewX,
},
time,
transform.skew_x,
);
let skew_y = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::SkewY,
},
time,
transform.skew_y,
);
// TODO: Properly handle clip instance opacity by threading opacity through rendering pipeline
// Currently clip_instance.opacity is not being applied to nested layers
// Build transform matrix (similar to shape instances)
// For clip instances, we don't have a path to calculate center from,
// so we use the clip's center point (width/2, height/2)
let center_x = vector_clip.width / 2.0;
let center_y = vector_clip.height / 2.0;
// Build skew transforms (applied around clip center)
let skew_transform = if skew_x != 0.0 || skew_y != 0.0 {
let skew_x_affine = if skew_x != 0.0 {
let tan_skew = skew_x.to_radians().tan();
Affine::new([1.0, 0.0, tan_skew, 1.0, 0.0, 0.0])
} else {
Affine::IDENTITY
};
let skew_y_affine = if skew_y != 0.0 {
let tan_skew = skew_y.to_radians().tan();
Affine::new([1.0, tan_skew, 0.0, 1.0, 0.0, 0.0])
} else {
Affine::IDENTITY
};
// Skew around center: translate to origin, skew, translate back
Affine::translate((center_x, center_y))
* skew_x_affine
* skew_y_affine
* Affine::translate((-center_x, -center_y))
} else {
Affine::IDENTITY
};
let clip_transform = Affine::translate((x, y))
* Affine::rotate(rotation.to_radians())
* Affine::scale_non_uniform(scale_x, scale_y)
* skew_transform;
let instance_transform = base_transform * clip_transform;
// Evaluate animated opacity
let opacity = animation_data.eval(
&crate::animation::AnimationTarget::Object {
id: clip_instance.id,
property: TransformProperty::Opacity,
},
time,
clip_instance.opacity,
);
// Cascade opacity: parent_opacity × animated opacity
let clip_opacity = parent_opacity * opacity;
// Recursively render all root layers in the clip at the remapped time
for layer_node in vector_clip.layers.iter() {
// TODO: Filter by visibility and time range once LayerNode exposes that data
render_layer(document, clip_time, &layer_node.data, scene, instance_transform);
// Skip invisible layers for performance
if !layer_node.data.visible() {
continue;
}
render_layer(document, clip_time, &layer_node.data, scene, instance_transform, clip_opacity);
}
}
/// Render a vector layer with all its clip instances and shape instances
fn render_vector_layer(document: &Document, time: f64, layer: &VectorLayer, scene: &mut Scene, base_transform: Affine) {
fn render_vector_layer(document: &Document, time: f64, layer: &VectorLayer, scene: &mut Scene, base_transform: Affine, parent_opacity: f64) {
// Get layer-level opacity
let layer_opacity = layer.layer.opacity;
// Cascade opacity: parent_opacity × layer.opacity
let layer_opacity = parent_opacity * layer.layer.opacity;
// Render clip instances first (they appear under shape instances)
for clip_instance in &layer.clip_instances {
render_clip_instance(document, time, clip_instance, layer_opacity, scene, base_transform);
render_clip_instance(document, time, clip_instance, layer_opacity, scene, base_transform, &layer.layer.animation_data);
}
// Render each shape instance in the layer
@ -259,7 +380,8 @@ fn render_vector_layer(document: &Document, time: f64, layer: &VectorLayer, scen
* skew_transform;
let affine = base_transform * object_transform;
// Calculate final opacity (layer * object)
// Calculate final opacity (cascaded from parent → layer → shape instance)
// layer_opacity already includes parent_opacity from render_vector_layer
let final_opacity = (layer_opacity * opacity) as f32;
// Render fill if present

View File

@ -206,6 +206,7 @@ struct VelloCallback {
stroke_color: egui::Color32, // Current stroke color for previews
selected_tool: lightningbeam_core::tool::Tool, // Current tool for rendering mode-specific UI
eyedropper_request: Option<(egui::Pos2, super::ColorMode)>, // Pending eyedropper sample
playback_time: f64, // Current playback time for animation evaluation
}
impl VelloCallback {
@ -223,8 +224,9 @@ impl VelloCallback {
stroke_color: egui::Color32,
selected_tool: lightningbeam_core::tool::Tool,
eyedropper_request: Option<(egui::Pos2, super::ColorMode)>,
playback_time: f64,
) -> Self {
Self { rect, pan_offset, zoom, instance_id, document, tool_state, active_layer_id, drag_delta, selection, fill_color, stroke_color, selected_tool, eyedropper_request }
Self { rect, pan_offset, zoom, instance_id, document, tool_state, active_layer_id, drag_delta, selection, fill_color, stroke_color, selected_tool, eyedropper_request, playback_time }
}
}
@ -292,6 +294,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
// Render each object at its preview position (original + delta)
for (object_id, original_pos) in original_positions {
// Try shape instance first
if let Some(_object) = vector_layer.get_object(object_id) {
if let Some(shape) = vector_layer.get_shape(&_object.shape_id) {
// New position = original + delta
@ -313,6 +316,39 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
);
}
}
// Try clip instance if not a shape instance
else if let Some(clip_inst) = vector_layer.clip_instances.iter().find(|ci| ci.id == *object_id) {
// Render clip at preview position
// For now, just render the bounding box outline in semi-transparent blue
let new_x = original_pos.x + delta.x;
let new_y = original_pos.y + delta.y;
use vello::kurbo::Stroke;
let clip_transform = Affine::translate((new_x, new_y));
let combined_transform = camera_transform * clip_transform;
// Calculate clip bounds for preview
let clip_time = ((self.playback_time - clip_inst.timeline_start) * clip_inst.playback_speed) + clip_inst.trim_start;
let content_bounds = if let Some(vector_clip) = self.document.get_vector_clip(&clip_inst.clip_id) {
vector_clip.calculate_content_bounds(&self.document, clip_time)
} else if let Some(video_clip) = self.document.get_video_clip(&clip_inst.clip_id) {
use vello::kurbo::Rect as KurboRect;
KurboRect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
continue;
};
// Draw preview outline
let alpha_color = Color::rgba8(255, 150, 100, 150); // Orange, semi-transparent
let stroke_width = 2.0 / self.zoom.max(0.5) as f64;
scene.stroke(
&Stroke::new(stroke_width),
combined_transform,
alpha_color,
None,
&content_bounds,
);
}
}
}
}
@ -387,19 +423,21 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
}
// Also draw selection outlines for clip instances
let clip_instance_count = self.selection.clip_instances().len();
for &clip_id in self.selection.clip_instances() {
if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == clip_id) {
// Get clip dimensions from document
let (width, height) = if let Some(vector_clip) = self.document.get_vector_clip(&clip_instance.clip_id) {
(vector_clip.width, vector_clip.height)
// Calculate clip-local time
let clip_time = ((self.playback_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Get dynamic clip bounds from content at current time
let bbox = if let Some(vector_clip) = self.document.get_vector_clip(&clip_instance.clip_id) {
vector_clip.calculate_content_bounds(&self.document, clip_time)
} else if let Some(video_clip) = self.document.get_video_clip(&clip_instance.clip_id) {
(video_clip.width, video_clip.height)
KurboRect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
continue; // Clip not found or is audio
};
// Create bounding box from clip dimensions
let bbox = KurboRect::new(0.0, 0.0, width, height);
// Apply clip instance transform and camera transform
let clip_transform = clip_instance.transform.to_affine();
@ -733,9 +771,14 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
// For single object: use object-aligned (rotated) bounding box
// For multiple objects: use axis-aligned bounding box (simpler for now)
if self.selection.shape_instances().len() == 1 {
let total_selected = self.selection.shape_instances().len() + self.selection.clip_instances().len();
if total_selected == 1 {
// Single object - draw rotated bounding box
let object_id = *self.selection.shape_instances().iter().next().unwrap();
let object_id = if let Some(&id) = self.selection.shape_instances().iter().next() {
id
} else {
*self.selection.clip_instances().iter().next().unwrap()
};
if let Some(object) = vector_layer.get_object(&object_id) {
if let Some(shape) = vector_layer.get_shape(&object.shape_id) {
@ -1258,10 +1301,10 @@ impl StagePane {
let document = shared.action_executor.document();
let clip_hit = hit_test::hit_test_clip_instances(
&vector_layer.clip_instances,
&document.vector_clips,
&document.video_clips,
document,
point,
Affine::IDENTITY,
*shared.playback_time,
);
let hit_result = if let Some(clip_id) = clip_hit {
@ -1373,7 +1416,11 @@ impl StagePane {
}
// Mouse up: finish interaction
if response.drag_stopped() || (ui.input(|i| i.pointer.any_released()) && matches!(shared.tool_state, ToolState::DraggingSelection { .. } | ToolState::MarqueeSelecting { .. })) {
let drag_stopped = response.drag_stopped();
let pointer_released = ui.input(|i| i.pointer.any_released());
let is_drag_or_marquee = matches!(shared.tool_state, ToolState::DraggingSelection { .. } | ToolState::MarqueeSelecting { .. });
if drag_stopped || (pointer_released && is_drag_or_marquee) {
match shared.tool_state.clone() {
ToolState::DraggingSelection { start_mouse, original_positions, .. } => {
// Calculate total delta
@ -1446,10 +1493,10 @@ impl StagePane {
let document = shared.action_executor.document();
let clip_hits = hit_test::hit_test_clip_instances_in_rect(
&vector_layer.clip_instances,
&document.vector_clips,
&document.video_clips,
document,
selection_rect,
Affine::IDENTITY,
*shared.playback_time,
);
// Hit test shape instances in rectangle
@ -2333,13 +2380,14 @@ impl StagePane {
println!("Scale world: ({:.3}, {:.3})", scale_x_world, scale_y_world);
// Apply scale to all selected objects
// Apply scale to all selected objects (both shape instances and clip instances)
for (object_id, original_transform) in original_transforms {
println!("\nObject {:?}:", object_id);
println!(" Original pos: ({:.1}, {:.1})", original_transform.x, original_transform.y);
println!(" Original rotation: {:.1}°", original_transform.rotation);
println!(" Original scale: ({:.3}, {:.3})", original_transform.scale_x, original_transform.scale_y);
// Try to apply to shape instance
vector_layer.modify_object_internal(object_id, |obj| {
// Get object's rotation in radians
let rotation_rad = original_transform.rotation.to_radians();
@ -2347,21 +2395,12 @@ impl StagePane {
let sin_r = rotation_rad.sin();
// Transform scale from world space to object's local space
// The object's local axes are rotated by rotation_rad from world axes
// We need to figure out how much to scale along each local axis
// to achieve the world-space scaling
// For a rotated object, world-space scale affects local-space scale as:
// local_x axis aligns with (cos(r), sin(r)) in world space
// local_y axis aligns with (-sin(r), cos(r)) in world space
// When we scale by (sx, sy) in world, the local scale changes by:
let cos_r_sq = cos_r * cos_r;
let sin_r_sq = sin_r * sin_r;
let sx_abs = scale_x_world.abs();
let sy_abs = scale_y_world.abs();
// Compute how much the object grows along its local axes
// when the world-space bbox is scaled
let local_scale_x = (cos_r_sq * sx_abs * sx_abs + sin_r_sq * sy_abs * sy_abs).sqrt();
let local_scale_y = (sin_r_sq * sx_abs * sx_abs + cos_r_sq * sy_abs * sy_abs).sqrt();
@ -2387,6 +2426,27 @@ impl StagePane {
// Keep rotation unchanged
obj.transform.rotation = original_transform.rotation;
});
// Also try to apply to clip instance
if let Some(clip_instance) = vector_layer.clip_instances.iter_mut().find(|ci| ci.id == *object_id) {
let rotation_rad = original_transform.rotation.to_radians();
let cos_r = rotation_rad.cos();
let sin_r = rotation_rad.sin();
let cos_r_sq = cos_r * cos_r;
let sin_r_sq = sin_r * sin_r;
let sx_abs = scale_x_world.abs();
let sy_abs = scale_y_world.abs();
let local_scale_x = (cos_r_sq * sx_abs * sx_abs + sin_r_sq * sy_abs * sy_abs).sqrt();
let local_scale_y = (sin_r_sq * sx_abs * sx_abs + cos_r_sq * sy_abs * sy_abs).sqrt();
let rel_x = original_transform.x - origin.x;
let rel_y = original_transform.y - origin.y;
clip_instance.transform.x = origin.x + rel_x * scale_x_world;
clip_instance.transform.y = origin.y + rel_y * scale_y_world;
clip_instance.transform.scale_x = original_transform.scale_x * local_scale_x;
clip_instance.transform.scale_y = original_transform.scale_y * local_scale_y;
clip_instance.transform.rotation = original_transform.rotation;
}
}
}
@ -2766,7 +2826,8 @@ impl StagePane {
// For single object: use rotated bounding box
// For multiple objects: use axis-aligned bounding box
if shared.selection.shape_instances().len() == 1 {
let total_selected = shared.selection.shape_instances().len() + shared.selection.clip_instances().len();
if total_selected == 1 {
// Single object - rotated bounding box
self.handle_transform_single_object(ui, response, point, &active_layer_id, shared);
} else {
@ -2776,6 +2837,7 @@ impl StagePane {
// Get immutable reference just for bbox calculation
if let Some(AnyLayer::Vector(vector_layer)) = shared.action_executor.document().get_layer(&active_layer_id) {
// Calculate bounding box for shape instances
for &object_id in shared.selection.shape_instances() {
if let Some(object) = vector_layer.get_object(&object_id) {
if let Some(shape) = vector_layer.get_shape(&object.shape_id) {
@ -2798,6 +2860,42 @@ impl StagePane {
}
}
}
// Calculate bounding box for clip instances
for &clip_id in shared.selection.clip_instances() {
if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == clip_id) {
// Calculate clip-local time
let clip_time = ((*shared.playback_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Get dynamic clip bounds from content at current time
use vello::kurbo::Rect as KurboRect;
let clip_bbox = if let Some(vector_clip) = shared.action_executor.document().get_vector_clip(&clip_instance.clip_id) {
vector_clip.calculate_content_bounds(shared.action_executor.document(), clip_time)
} else if let Some(video_clip) = shared.action_executor.document().get_video_clip(&clip_instance.clip_id) {
KurboRect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
continue; // Clip not found or is audio
};
println!("Multi-object clip bbox: clip_id={}, bbox=({:.1}, {:.1}, {:.1}, {:.1}), size={:.1}x{:.1}",
clip_instance.clip_id, clip_bbox.x0, clip_bbox.y0, clip_bbox.x1, clip_bbox.y1,
clip_bbox.width(), clip_bbox.height());
// Apply clip instance transform
let clip_transform = clip_instance.transform.to_affine();
println!(" Transform: x={}, y={}, scale_x={}, scale_y={}, rotation={}",
clip_instance.transform.x, clip_instance.transform.y,
clip_instance.transform.scale_x, clip_instance.transform.scale_y,
clip_instance.transform.rotation);
let transformed_bbox = clip_transform.transform_rect_bbox(clip_bbox);
combined_bbox = Some(match combined_bbox {
None => transformed_bbox,
Some(existing) => existing.union(transformed_bbox),
});
}
}
}
let bbox = match combined_bbox {
@ -2847,16 +2945,24 @@ impl StagePane {
let tolerance = 10.0; // Click tolerance in world space
if let Some(mode) = Self::hit_test_transform_handle(point, bbox, tolerance) {
// Store original transforms of all selected objects
// Store original transforms of all selected objects (shape instances and clip instances)
use std::collections::HashMap;
let mut original_transforms = HashMap::new();
if let Some(AnyLayer::Vector(vector_layer)) = shared.action_executor.document().get_layer(&active_layer_id) {
// Store shape instance transforms
for &object_id in shared.selection.shape_instances() {
if let Some(object) = vector_layer.get_object(&object_id) {
original_transforms.insert(object_id, object.transform.clone());
}
}
// Store clip instance transforms
for &clip_id in shared.selection.clip_instances() {
if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == clip_id) {
original_transforms.insert(clip_id, clip_instance.transform.clone());
}
}
}
println!("=== TRANSFORM START ===");
@ -2909,22 +3015,36 @@ impl StagePane {
if response.drag_stopped() || (ui.input(|i| i.pointer.any_released()) && matches!(shared.tool_state, ToolState::Transforming { .. })) {
if let ToolState::Transforming { original_transforms, .. } = shared.tool_state.clone() {
use std::collections::HashMap;
use lightningbeam_core::actions::TransformShapeInstancesAction;
use lightningbeam_core::actions::{TransformShapeInstancesAction, TransformClipInstancesAction};
let mut object_transforms = HashMap::new();
let mut shape_instance_transforms = HashMap::new();
let mut clip_instance_transforms = HashMap::new();
// Get current transforms and pair with originals
if let Some(AnyLayer::Vector(vector_layer)) = shared.action_executor.document().get_layer(&active_layer_id) {
for (object_id, original) in original_transforms {
// Try shape instance first
if let Some(object) = vector_layer.get_object(&object_id) {
let new_transform = object.transform.clone();
object_transforms.insert(object_id, (original, new_transform));
shape_instance_transforms.insert(object_id, (original, new_transform));
}
// Try clip instance if not found as shape instance
else if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == object_id) {
let new_transform = clip_instance.transform.clone();
clip_instance_transforms.insert(object_id, (original, new_transform));
}
}
}
if !object_transforms.is_empty() {
let action = TransformShapeInstancesAction::new(active_layer_id, object_transforms);
// Create action for shape instances
if !shape_instance_transforms.is_empty() {
let action = TransformShapeInstancesAction::new(active_layer_id, shape_instance_transforms);
shared.pending_actions.push(Box::new(action));
}
// Create action for clip instances
if !clip_instance_transforms.is_empty() {
let action = TransformClipInstancesAction::new(active_layer_id, clip_instance_transforms);
shared.pending_actions.push(Box::new(action));
}
@ -2947,11 +3067,19 @@ impl StagePane {
use lightningbeam_core::layer::AnyLayer;
use vello::kurbo::Affine;
let object_id = *shared.selection.shape_instances().iter().next().unwrap();
// Get the single selected object (either shape instance or clip instance)
let object_id = if let Some(&id) = shared.selection.shape_instances().iter().next() {
id
} else if let Some(&id) = shared.selection.clip_instances().iter().next() {
id
} else {
return; // No selection, shouldn't happen
};
// Calculate rotated bounding box corners
let (local_bbox, world_corners, obj_transform, object) = {
let (local_bbox, world_corners, obj_transform, transform) = {
if let Some(AnyLayer::Vector(vector_layer)) = shared.action_executor.document().get_layer(&active_layer_id) {
// Try shape instance first
if let Some(object) = vector_layer.get_object(&object_id) {
if let Some(shape) = vector_layer.get_shape(&object.shape_id) {
let local_bbox = shape.path().bounding_box();
@ -3000,10 +3128,47 @@ impl StagePane {
.map(|&p| obj_transform * p)
.collect();
(local_bbox, world_corners, obj_transform, object.clone())
(local_bbox, world_corners, obj_transform, object.transform.clone())
} else {
return;
}
}
// Try clip instance if not a shape instance
else if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == object_id) {
// Calculate clip-local time
let clip_time = ((*shared.playback_time - clip_instance.timeline_start) * clip_instance.playback_speed) + clip_instance.trim_start;
// Get dynamic clip bounds from content at current time
let local_bbox = if let Some(vector_clip) = shared.action_executor.document().get_vector_clip(&clip_instance.clip_id) {
vector_clip.calculate_content_bounds(shared.action_executor.document(), clip_time)
} else if let Some(video_clip) = shared.action_executor.document().get_video_clip(&clip_instance.clip_id) {
vello::kurbo::Rect::new(0.0, 0.0, video_clip.width, video_clip.height)
} else {
return; // Clip not found or is audio
};
println!("Single-object clip bbox: clip_id={}, bbox=({:.1}, {:.1}, {:.1}, {:.1}), size={:.1}x{:.1}",
clip_instance.clip_id, local_bbox.x0, local_bbox.y0, local_bbox.x1, local_bbox.y1,
local_bbox.width(), local_bbox.height());
let local_corners = [
vello::kurbo::Point::new(local_bbox.x0, local_bbox.y0),
vello::kurbo::Point::new(local_bbox.x1, local_bbox.y0),
vello::kurbo::Point::new(local_bbox.x1, local_bbox.y1),
vello::kurbo::Point::new(local_bbox.x0, local_bbox.y1),
];
// Clip instances don't have skew, so transform is simpler
let obj_transform = Affine::translate((clip_instance.transform.x, clip_instance.transform.y))
* Affine::rotate(clip_instance.transform.rotation.to_radians())
* Affine::scale_non_uniform(clip_instance.transform.scale_x, clip_instance.transform.scale_y);
let world_corners: Vec<vello::kurbo::Point> = local_corners
.iter()
.map(|&p| obj_transform * p)
.collect();
(local_bbox, world_corners, obj_transform, clip_instance.transform.clone())
} else {
return;
}
@ -3024,7 +3189,7 @@ impl StagePane {
];
// Rotation handle position
let rotation_rad = object.transform.rotation.to_radians();
let rotation_rad = transform.rotation.to_radians();
let cos_r = rotation_rad.cos();
let sin_r = rotation_rad.sin();
let rotation_handle_offset = 20.0;
@ -3134,7 +3299,7 @@ impl StagePane {
use std::collections::HashMap;
let mut original_transforms = HashMap::new();
original_transforms.insert(object_id, object.transform.clone());
original_transforms.insert(object_id, transform.clone());
*shared.tool_state = ToolState::Transforming {
mode: lightningbeam_core::tool::TransformMode::Rotate { center: visual_center },
@ -3155,7 +3320,7 @@ impl StagePane {
use std::collections::HashMap;
let mut original_transforms = HashMap::new();
original_transforms.insert(object_id, object.transform.clone());
original_transforms.insert(object_id, transform.clone());
*shared.tool_state = ToolState::Transforming {
mode: lightningbeam_core::tool::TransformMode::ScaleCorner {
@ -3178,7 +3343,7 @@ impl StagePane {
use lightningbeam_core::tool::Axis;
let mut original_transforms = HashMap::new();
original_transforms.insert(object_id, object.transform.clone());
original_transforms.insert(object_id, transform.clone());
// Determine axis and opposite edge
let (axis, opposite_edge) = match idx {
@ -3231,7 +3396,7 @@ impl StagePane {
use lightningbeam_core::tool::Axis;
let mut original_transforms = HashMap::new();
original_transforms.insert(object_id, object.transform.clone());
original_transforms.insert(object_id, transform.clone());
// Determine skew axis and origin
let (axis, opposite_edge) = match i {
@ -3551,20 +3716,33 @@ impl StagePane {
if response.drag_stopped() || (ui.input(|i| i.pointer.any_released()) && matches!(shared.tool_state, ToolState::Transforming { .. })) {
if let ToolState::Transforming { original_transforms, .. } = shared.tool_state.clone() {
use std::collections::HashMap;
use lightningbeam_core::actions::TransformShapeInstancesAction;
use lightningbeam_core::actions::{TransformShapeInstancesAction, TransformClipInstancesAction};
let mut object_transforms = HashMap::new();
let mut shape_instance_transforms = HashMap::new();
let mut clip_instance_transforms = HashMap::new();
if let Some(AnyLayer::Vector(vector_layer)) = shared.action_executor.document().get_layer(&active_layer_id) {
for (obj_id, original) in original_transforms {
// Try shape instance first
if let Some(object) = vector_layer.get_object(&obj_id) {
object_transforms.insert(obj_id, (original, object.transform.clone()));
shape_instance_transforms.insert(obj_id, (original, object.transform.clone()));
}
// Try clip instance if not found as shape instance
else if let Some(clip_instance) = vector_layer.clip_instances.iter().find(|ci| ci.id == obj_id) {
clip_instance_transforms.insert(obj_id, (original, clip_instance.transform.clone()));
}
}
}
if !object_transforms.is_empty() {
let action = TransformShapeInstancesAction::new(*active_layer_id, object_transforms);
// Create action for shape instances
if !shape_instance_transforms.is_empty() {
let action = TransformShapeInstancesAction::new(*active_layer_id, shape_instance_transforms);
shared.pending_actions.push(Box::new(action));
}
// Create action for clip instances
if !clip_instance_transforms.is_empty() {
let action = TransformClipInstancesAction::new(*active_layer_id, clip_instance_transforms);
shared.pending_actions.push(Box::new(action));
}
@ -3594,19 +3772,53 @@ impl StagePane {
if delta.x.abs() > 0.01 || delta.y.abs() > 0.01 {
if let Some(active_layer_id) = shared.active_layer_id {
use std::collections::HashMap;
let mut object_positions = HashMap::new();
use lightningbeam_core::object::Transform;
let mut shape_instance_positions = HashMap::new();
let mut clip_instance_transforms = HashMap::new();
// Separate shape instances from clip instances
for (object_id, original_pos) in original_positions {
let new_pos = Point::new(
original_pos.x + delta.x,
original_pos.y + delta.y,
);
object_positions.insert(object_id, (original_pos, new_pos));
if shared.selection.contains_shape_instance(&object_id) {
shape_instance_positions.insert(object_id, (original_pos, new_pos));
} else if shared.selection.contains_clip_instance(&object_id) {
// For clip instances, get the full transform
if let Some(layer) = shared.action_executor.document().get_layer(active_layer_id) {
if let lightningbeam_core::layer::AnyLayer::Vector(vector_layer) = layer {
if let Some(clip_inst) = vector_layer.clip_instances.iter().find(|ci| ci.id == object_id) {
let mut old_transform = clip_inst.transform.clone();
old_transform.x = original_pos.x;
old_transform.y = original_pos.y;
let mut new_transform = clip_inst.transform.clone();
new_transform.x = new_pos.x;
new_transform.y = new_pos.y;
clip_instance_transforms.insert(object_id, (old_transform, new_transform));
}
}
}
}
}
use lightningbeam_core::actions::MoveShapeInstancesAction;
let action = MoveShapeInstancesAction::new(*active_layer_id, object_positions);
shared.pending_actions.push(Box::new(action));
// Create action for shape instances
if !shape_instance_positions.is_empty() {
use lightningbeam_core::actions::MoveShapeInstancesAction;
let action = MoveShapeInstancesAction::new(*active_layer_id, shape_instance_positions);
shared.pending_actions.push(Box::new(action));
}
// Create action for clip instances
if !clip_instance_transforms.is_empty() {
use lightningbeam_core::actions::TransformClipInstancesAction;
let action = TransformClipInstancesAction::new(*active_layer_id, clip_instance_transforms);
shared.pending_actions.push(Box::new(action));
}
}
}
}
@ -3629,15 +3841,30 @@ impl StagePane {
let selection_rect = KurboRect::new(min_x, min_y, max_x, max_y);
// Hit test all objects in rectangle
let hits = hit_test::hit_test_objects_in_rect(
// Hit test clip instances in rectangle
let document = shared.action_executor.document();
let clip_hits = hit_test::hit_test_clip_instances_in_rect(
&vector_layer.clip_instances,
document,
selection_rect,
Affine::IDENTITY,
*shared.playback_time,
);
// Hit test shape instances in rectangle
let shape_hits = hit_test::hit_test_objects_in_rect(
vector_layer,
selection_rect,
Affine::IDENTITY,
);
// Add to selection
for obj_id in hits {
// Add clip instances to selection
for clip_id in clip_hits {
shared.selection.add_clip_instance(clip_id);
}
// Add shape instances to selection
for obj_id in shape_hits {
shared.selection.add_shape_instance(obj_id);
}
}
@ -3880,6 +4107,7 @@ impl PaneRenderer for StagePane {
*shared.stroke_color,
*shared.selected_tool,
self.pending_eyedropper_sample,
*shared.playback_time,
);
let cb = egui_wgpu::Callback::new_paint_callback(

View File

@ -7,6 +7,7 @@
/// - Basic layer visualization
use eframe::egui;
use lightningbeam_core::layer::LayerTrait;
use super::{NodePath, PaneRenderer, SharedPaneState};
const RULER_HEIGHT: f32 = 30.0;
@ -50,6 +51,9 @@ pub struct TimelinePane {
/// Cached mouse position from mousedown (used for edge detection when drag starts)
mousedown_pos: Option<egui::Pos2>,
/// Track if a layer control widget was clicked this frame
layer_control_clicked: bool,
}
impl TimelinePane {
@ -65,6 +69,7 @@ impl TimelinePane {
clip_drag_state: None,
drag_offset: 0.0,
mousedown_pos: None,
layer_control_clicked: false,
}
}
@ -328,19 +333,18 @@ impl TimelinePane {
/// Render layer header column (left side with track names and controls)
fn render_layer_headers(
&self,
&mut self,
ui: &mut egui::Ui,
rect: egui::Rect,
theme: &crate::theme::Theme,
document: &lightningbeam_core::document::Document,
active_layer_id: &Option<uuid::Uuid>,
pending_actions: &mut Vec<Box<dyn lightningbeam_core::action::Action>>,
document: &lightningbeam_core::document::Document,
) {
let painter = ui.painter();
// Background for header column
let header_style = theme.style(".timeline-header", ui.ctx());
let header_bg = header_style.background_color.unwrap_or(egui::Color32::from_rgb(17, 17, 17));
painter.rect_filled(
ui.painter().rect_filled(
rect,
0.0,
header_bg,
@ -379,7 +383,7 @@ impl TimelinePane {
inactive_color
};
painter.rect_filled(header_rect, 0.0, bg_color);
ui.painter().rect_filled(header_rect, 0.0, bg_color);
// Get layer info
let layer_data = layer.layer();
@ -395,10 +399,10 @@ impl TimelinePane {
header_rect.min,
egui::vec2(4.0, LAYER_HEIGHT),
);
painter.rect_filled(indicator_rect, 0.0, type_color);
ui.painter().rect_filled(indicator_rect, 0.0, type_color);
// Layer name
painter.text(
ui.painter().text(
header_rect.min + egui::vec2(10.0, 10.0),
egui::Align2::LEFT_TOP,
layer_name,
@ -408,7 +412,7 @@ impl TimelinePane {
// Layer type (smaller text below name with colored background)
let type_text_pos = header_rect.min + egui::vec2(10.0, 28.0);
let type_text_galley = painter.layout_no_wrap(
let type_text_galley = ui.painter().layout_no_wrap(
layer_type.to_string(),
egui::FontId::proportional(11.0),
secondary_text_color,
@ -419,13 +423,13 @@ impl TimelinePane {
type_text_pos + egui::vec2(-2.0, -1.0),
egui::vec2(type_text_galley.size().x + 4.0, type_text_galley.size().y + 2.0),
);
painter.rect_filled(
ui.painter().rect_filled(
type_bg_rect,
2.0,
egui::Color32::from_rgba_unmultiplied(type_color.r(), type_color.g(), type_color.b(), 60),
);
painter.text(
ui.painter().text(
type_text_pos,
egui::Align2::LEFT_TOP,
layer_type,
@ -433,8 +437,151 @@ impl TimelinePane {
secondary_text_color,
);
// Layer controls (mute, solo, lock, volume)
let controls_top = header_rect.min.y + 4.0;
let controls_right = header_rect.max.x - 8.0;
let button_size = egui::vec2(20.0, 20.0);
let slider_width = 60.0;
// Position controls from right to left
let volume_slider_rect = egui::Rect::from_min_size(
egui::pos2(controls_right - slider_width, controls_top),
egui::vec2(slider_width, 20.0),
);
let lock_button_rect = egui::Rect::from_min_size(
egui::pos2(volume_slider_rect.min.x - button_size.x - 4.0, controls_top),
button_size,
);
let solo_button_rect = egui::Rect::from_min_size(
egui::pos2(lock_button_rect.min.x - button_size.x - 4.0, controls_top),
button_size,
);
let mute_button_rect = egui::Rect::from_min_size(
egui::pos2(solo_button_rect.min.x - button_size.x - 4.0, controls_top),
button_size,
);
// Get layer ID and current property values from the layer we already have
let layer_id = layer.id();
let current_volume = layer.volume();
let is_muted = layer.muted();
let is_soloed = layer.soloed();
let is_locked = layer.locked();
// Mute button
// TODO: Replace with SVG icon (volume-up-fill.svg / volume-mute.svg)
let mute_response = ui.allocate_ui_at_rect(mute_button_rect, |ui| {
let mute_text = if is_muted { "🔇" } else { "🔊" };
let button = egui::Button::new(mute_text)
.fill(if is_muted {
egui::Color32::from_rgba_unmultiplied(255, 100, 100, 100)
} else {
egui::Color32::from_gray(40)
})
.stroke(egui::Stroke::NONE);
ui.add(button)
}).inner;
if mute_response.clicked() {
self.layer_control_clicked = true;
pending_actions.push(Box::new(
lightningbeam_core::actions::SetLayerPropertiesAction::new(
layer_id,
lightningbeam_core::actions::LayerProperty::Muted(!is_muted),
)
));
}
// Solo button
// TODO: Replace with SVG headphones icon
let solo_response = ui.allocate_ui_at_rect(solo_button_rect, |ui| {
let button = egui::Button::new("🎧")
.fill(if is_soloed {
egui::Color32::from_rgba_unmultiplied(100, 200, 100, 100)
} else {
egui::Color32::from_gray(40)
})
.stroke(egui::Stroke::NONE);
ui.add(button)
}).inner;
if solo_response.clicked() {
self.layer_control_clicked = true;
pending_actions.push(Box::new(
lightningbeam_core::actions::SetLayerPropertiesAction::new(
layer_id,
lightningbeam_core::actions::LayerProperty::Soloed(!is_soloed),
)
));
}
// Lock button
// TODO: Replace with SVG lock/lock-open icons
let lock_response = ui.allocate_ui_at_rect(lock_button_rect, |ui| {
let lock_text = if is_locked { "🔒" } else { "🔓" };
let button = egui::Button::new(lock_text)
.fill(if is_locked {
egui::Color32::from_rgba_unmultiplied(200, 150, 100, 100)
} else {
egui::Color32::from_gray(40)
})
.stroke(egui::Stroke::NONE);
ui.add(button)
}).inner;
if lock_response.clicked() {
self.layer_control_clicked = true;
pending_actions.push(Box::new(
lightningbeam_core::actions::SetLayerPropertiesAction::new(
layer_id,
lightningbeam_core::actions::LayerProperty::Locked(!is_locked),
)
));
}
// Volume slider (nonlinear: 0-70% slider = 0-100% volume, 70-100% slider = 100-200% volume)
let volume_response = ui.allocate_ui_at_rect(volume_slider_rect, |ui| {
// Map volume (0.0-2.0) to slider position (0.0-1.0)
let slider_value = if current_volume <= 1.0 {
// 0.0-1.0 volume maps to 0.0-0.7 slider (70%)
current_volume * 0.7
} else {
// 1.0-2.0 volume maps to 0.7-1.0 slider (30%)
0.7 + (current_volume - 1.0) * 0.3
};
let mut temp_slider_value = slider_value;
let slider = egui::Slider::new(&mut temp_slider_value, 0.0..=1.0)
.show_value(false);
let response = ui.add(slider);
(response, temp_slider_value)
}).inner;
if volume_response.0.changed() {
self.layer_control_clicked = true;
// Map slider position (0.0-1.0) back to volume (0.0-2.0)
let new_volume = if volume_response.1 <= 0.7 {
// 0.0-0.7 slider maps to 0.0-1.0 volume
volume_response.1 / 0.7
} else {
// 0.7-1.0 slider maps to 1.0-2.0 volume
1.0 + (volume_response.1 - 0.7) / 0.3
};
pending_actions.push(Box::new(
lightningbeam_core::actions::SetLayerPropertiesAction::new(
layer_id,
lightningbeam_core::actions::LayerProperty::Volume(new_volume),
)
));
}
// Separator line at bottom
painter.line_segment(
ui.painter().line_segment(
[
egui::pos2(header_rect.min.x, header_rect.max.y),
egui::pos2(header_rect.max.x, header_rect.max.y),
@ -444,7 +591,7 @@ impl TimelinePane {
}
// Right border for header column
painter.line_segment(
ui.painter().line_segment(
[
egui::pos2(rect.max.x, rect.min.y),
egui::pos2(rect.max.x, rect.max.y),
@ -680,10 +827,27 @@ impl TimelinePane {
is_playing: &mut bool,
audio_controller: Option<&mut daw_backend::EngineController>,
) {
let response = ui.allocate_rect(full_timeline_rect, egui::Sense::click_and_drag());
// Don't allocate the header area for input - let widgets handle it directly
// Only allocate content area (ruler + layers) with click and drag
let content_response = ui.allocate_rect(
egui::Rect::from_min_size(
egui::pos2(content_rect.min.x, ruler_rect.min.y),
egui::vec2(
content_rect.width(),
ruler_rect.height() + content_rect.height()
)
),
egui::Sense::click_and_drag()
);
let response = content_response;
// Check if mouse is over either area
let header_hovered = ui.rect_contains_pointer(header_rect);
let any_hovered = response.hovered() || header_hovered;
// Only process input if mouse is over the timeline pane
if !response.hovered() {
if !any_hovered {
self.is_panning = false;
self.last_pan_pos = None;
self.is_scrubbing = false;
@ -761,6 +925,28 @@ impl TimelinePane {
}
}
// Handle layer header selection (only if no control widget was clicked)
// Check for clicks in header area using direct input query
let header_clicked = ui.input(|i| {
i.pointer.button_clicked(egui::PointerButton::Primary) &&
i.pointer.interact_pos().map_or(false, |pos| header_rect.contains(pos))
});
if header_clicked && !alt_held && !clicked_clip_instance && !self.layer_control_clicked {
if let Some(pos) = ui.input(|i| i.pointer.interact_pos()) {
let relative_y = pos.y - header_rect.min.y + self.viewport_scroll_y;
let clicked_layer_index = (relative_y / LAYER_HEIGHT) as usize;
// Get the layer at this index (accounting for reversed display order)
if clicked_layer_index < layer_count {
let layers: Vec<_> = document.root.children.iter().rev().collect();
if let Some(layer) = layers.get(clicked_layer_index) {
*active_layer_id = Some(layer.id());
}
}
}
}
// Cache mouse position on mousedown (before any dragging)
if response.hovered() && ui.input(|i| i.pointer.button_pressed(egui::PointerButton::Primary)) {
if let Some(pos) = response.hover_pos() {
@ -1231,6 +1417,9 @@ impl PaneRenderer for TimelinePane {
_path: &NodePath,
shared: &mut SharedPaneState,
) {
// Reset layer control click flag at start of frame
self.layer_control_clicked = false;
// Sync playback_time to document
shared.action_executor.document_mut().current_time = *shared.playback_time;
@ -1320,7 +1509,7 @@ impl PaneRenderer for TimelinePane {
// Render layer header column with clipping
ui.set_clip_rect(layer_headers_rect.intersect(original_clip_rect));
self.render_layer_headers(ui, layer_headers_rect, shared.theme, document, shared.active_layer_id);
self.render_layer_headers(ui, layer_headers_rect, shared.theme, shared.active_layer_id, &mut shared.pending_actions, document);
// Render time ruler (clip to ruler rect)
ui.set_clip_rect(ruler_rect.intersect(original_clip_rect));