Merge remote and fix color space

This commit is contained in:
Skyler Lehmkuhl 2026-03-01 15:50:53 -05:00
commit 1c3f794958
13 changed files with 1422 additions and 319 deletions

View File

@ -71,6 +71,16 @@ pub struct Engine {
// Disk reader for streaming playback of compressed files // Disk reader for streaming playback of compressed files
disk_reader: Option<crate::audio::disk_reader::DiskReader>, disk_reader: Option<crate::audio::disk_reader::DiskReader>,
// Input monitoring and metering
input_monitoring: bool,
input_gain: f32,
input_level_peak: f32,
input_level_counter: usize,
output_level_peak_l: f32,
output_level_peak_r: f32,
output_level_counter: usize,
track_level_counter: usize,
// Callback timing diagnostics (enabled by DAW_AUDIO_DEBUG=1) // Callback timing diagnostics (enabled by DAW_AUDIO_DEBUG=1)
debug_audio: bool, debug_audio: bool,
callback_count: u64, callback_count: u64,
@ -138,6 +148,14 @@ impl Engine {
metronome: Metronome::new(sample_rate), metronome: Metronome::new(sample_rate),
recording_sample_buffer: Vec::with_capacity(4096), recording_sample_buffer: Vec::with_capacity(4096),
disk_reader: Some(disk_reader), disk_reader: Some(disk_reader),
input_monitoring: false,
input_gain: 1.0,
input_level_peak: 0.0,
input_level_counter: 0,
output_level_peak_l: 0.0,
output_level_peak_r: 0.0,
output_level_counter: 0,
track_level_counter: 0,
debug_audio: std::env::var("DAW_AUDIO_DEBUG").map_or(false, |v| v == "1"), debug_audio: std::env::var("DAW_AUDIO_DEBUG").map_or(false, |v| v == "1"),
callback_count: 0, callback_count: 0,
timing_worst_total_us: 0, timing_worst_total_us: 0,
@ -380,27 +398,72 @@ impl Engine {
self.process_live_midi(output); self.process_live_midi(output);
} }
// Process recording if active (independent of playback state) // Compute stereo output peaks for master VU meter (independent of playback state)
if let Some(recording) = &mut self.recording_state { {
let channels = self.channels as usize;
for frame in output.chunks(channels) {
if channels >= 2 {
self.output_level_peak_l = self.output_level_peak_l.max(frame[0].abs());
self.output_level_peak_r = self.output_level_peak_r.max(frame[1].abs());
} else {
let v = frame[0].abs();
self.output_level_peak_l = self.output_level_peak_l.max(v);
self.output_level_peak_r = self.output_level_peak_r.max(v);
}
}
self.output_level_counter += output.len();
let meter_interval = self.sample_rate as usize / 20; // ~50ms
if self.output_level_counter >= meter_interval {
let _ = self.event_tx.push(AudioEvent::OutputLevel(self.output_level_peak_l, self.output_level_peak_r));
self.output_level_peak_l = 0.0;
self.output_level_peak_r = 0.0;
self.output_level_counter = 0;
}
// Send per-track peak levels periodically
self.track_level_counter += output.len();
if self.track_level_counter >= meter_interval {
let levels = self.project.collect_track_peaks();
let _ = self.event_tx.push(AudioEvent::TrackLevels(levels));
self.track_level_counter = 0;
}
}
// Process input monitoring and/or recording (independent of playback state)
let is_recording = self.recording_state.is_some();
if is_recording || self.input_monitoring {
if let Some(input_rx) = &mut self.input_rx { if let Some(input_rx) = &mut self.input_rx {
// Phase 1: Discard stale samples by popping without storing // Phase 1: Discard stale samples during recording skip phase
// (fast — no Vec push, no add_samples overhead) if let Some(recording) = &mut self.recording_state {
while recording.samples_to_skip > 0 { while recording.samples_to_skip > 0 {
match input_rx.pop() { match input_rx.pop() {
Ok(_) => recording.samples_to_skip -= 1, Ok(_) => recording.samples_to_skip -= 1,
Err(_) => break, Err(_) => break,
} }
} }
// Phase 2: Pull fresh samples for actual recording
self.recording_sample_buffer.clear();
while let Ok(sample) = input_rx.pop() {
self.recording_sample_buffer.push(sample);
} }
// Add samples to recording // Phase 2: Pull fresh samples
self.recording_sample_buffer.clear();
while let Ok(sample) = input_rx.pop() {
// Apply input gain
self.recording_sample_buffer.push(sample * self.input_gain);
}
if !self.recording_sample_buffer.is_empty() { if !self.recording_sample_buffer.is_empty() {
// Calculate how many samples will be skipped (stale buffer data) // Compute input peak for VU metering
let input_peak = self.recording_sample_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max);
self.input_level_peak = self.input_level_peak.max(input_peak);
self.input_level_counter += self.recording_sample_buffer.len();
let meter_interval = self.sample_rate as usize / 20; // ~50ms
if self.input_level_counter >= meter_interval {
let _ = self.event_tx.push(AudioEvent::InputLevel(self.input_level_peak));
self.input_level_peak = 0.0;
self.input_level_counter = 0;
}
// Feed samples to recording if active
if let Some(recording) = &mut self.recording_state {
let skip = if recording.paused { let skip = if recording.paused {
self.recording_sample_buffer.len() self.recording_sample_buffer.len()
} else { } else {
@ -426,7 +489,6 @@ impl Engine {
// Update clip duration in project as recording progresses // Update clip duration in project as recording progresses
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) { if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) { if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
// Update both internal_end and external_duration as recording progresses
clip.internal_end = clip.internal_start + duration; clip.internal_end = clip.internal_start + duration;
clip.external_duration = duration; clip.external_duration = duration;
} }
@ -440,17 +502,16 @@ impl Engine {
} }
} }
Err(e) => { Err(e) => {
// Recording error occurred
let _ = self.event_tx.push(AudioEvent::RecordingError( let _ = self.event_tx.push(AudioEvent::RecordingError(
format!("Recording write error: {}", e) format!("Recording write error: {}", e)
)); ));
// Stop recording on error
self.recording_state = None; self.recording_state = None;
} }
} }
} }
} }
} }
}
// Timing diagnostics (DAW_AUDIO_DEBUG=1) // Timing diagnostics (DAW_AUDIO_DEBUG=1)
if let (true, Some(t_start), Some(t_commands)) = (self.debug_audio, t_start, t_commands) { if let (true, Some(t_start), Some(t_commands)) = (self.debug_audio, t_start, t_commands) {
@ -1136,6 +1197,14 @@ impl Engine {
self.metronome.set_enabled(enabled); self.metronome.set_enabled(enabled);
} }
Command::SetInputMonitoring(enabled) => {
self.input_monitoring = enabled;
}
Command::SetInputGain(gain) => {
self.input_gain = gain;
}
Command::SetTempo(bpm, time_sig) => { Command::SetTempo(bpm, time_sig) => {
self.metronome.update_timing(bpm, time_sig); self.metronome.update_timing(bpm, time_sig);
self.project.set_tempo(bpm, time_sig.0); self.project.set_tempo(bpm, time_sig.0);
@ -2851,6 +2920,16 @@ impl EngineController {
let _ = self.command_tx.push(Command::SetTrackSolo(track_id, solo)); let _ = self.command_tx.push(Command::SetTrackSolo(track_id, solo));
} }
/// Enable or disable input monitoring (mic level metering)
pub fn set_input_monitoring(&mut self, enabled: bool) {
let _ = self.command_tx.push(Command::SetInputMonitoring(enabled));
}
/// Set the input gain multiplier (applied before recording)
pub fn set_input_gain(&mut self, gain: f32) {
let _ = self.command_tx.push(Command::SetInputGain(gain));
}
/// Move a clip to a new timeline position (changes external_start) /// Move a clip to a new timeline position (changes external_start)
pub fn move_clip(&mut self, track_id: TrackId, clip_id: ClipId, new_start_time: f64) { pub fn move_clip(&mut self, track_id: TrackId, clip_id: ClipId, new_start_time: f64) {
let _ = self.command_tx.push(Command::MoveClip(track_id, clip_id, new_start_time)); let _ = self.command_tx.push(Command::MoveClip(track_id, clip_id, new_start_time));

View File

@ -441,13 +441,34 @@ impl Project {
// Handle audio track vs MIDI track vs group track // Handle audio track vs MIDI track vs group track
match self.tracks.get_mut(&track_id) { match self.tracks.get_mut(&track_id) {
Some(TrackNode::Audio(track)) => { Some(TrackNode::Audio(track)) => {
// Render audio track directly into output // Render audio track into a temp buffer for peak measurement
track.render(output, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); let mut track_buffer = buffer_pool.acquire();
track_buffer.resize(output.len(), 0.0);
track_buffer.fill(0.0);
track.render(&mut track_buffer, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
// Accumulate peak level for VU metering (max over meter interval)
let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max);
track.peak_level = track.peak_level.max(buffer_peak);
// Mix into output
for (out, src) in output.iter_mut().zip(track_buffer.iter()) {
*out += src;
}
buffer_pool.release(track_buffer);
} }
Some(TrackNode::Midi(track)) => { Some(TrackNode::Midi(track)) => {
// Render MIDI track directly into output // Render MIDI track into a temp buffer for peak measurement
// Access midi_clip_pool from self - safe because we only need immutable access let mut track_buffer = buffer_pool.acquire();
track.render(output, &self.midi_clip_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); track_buffer.resize(output.len(), 0.0);
track_buffer.fill(0.0);
track.render(&mut track_buffer, &self.midi_clip_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
// Accumulate peak level for VU metering (max over meter interval)
let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max);
track.peak_level = track.peak_level.max(buffer_peak);
// Mix into output
for (out, src) in output.iter_mut().zip(track_buffer.iter()) {
*out += src;
}
buffer_pool.release(track_buffer);
} }
Some(TrackNode::Group(group)) => { Some(TrackNode::Group(group)) => {
// Skip rendering if playhead is outside the metatrack's trim window // Skip rendering if playhead is outside the metatrack's trim window
@ -534,6 +555,25 @@ impl Project {
} }
} }
/// Collect per-track peak levels for VU metering and reset accumulators
pub fn collect_track_peaks(&mut self) -> Vec<(TrackId, f32)> {
let mut levels = Vec::new();
for (id, track) in &mut self.tracks {
match track {
TrackNode::Audio(t) => {
levels.push((*id, t.peak_level));
t.peak_level = 0.0;
}
TrackNode::Midi(t) => {
levels.push((*id, t.peak_level));
t.peak_level = 0.0;
}
TrackNode::Group(_) => {}
}
}
levels
}
/// Stop all notes on all MIDI tracks /// Stop all notes on all MIDI tracks
pub fn stop_all_notes(&mut self) { pub fn stop_all_notes(&mut self) {
for track in self.tracks.values_mut() { for track in self.tracks.values_mut() {

View File

@ -435,6 +435,10 @@ pub struct MidiTrack {
/// Used to detect when the playhead exits a clip, so we can send all-notes-off. /// Used to detect when the playhead exits a clip, so we can send all-notes-off.
#[serde(skip)] #[serde(skip)]
prev_active_instances: HashSet<MidiClipInstanceId>, prev_active_instances: HashSet<MidiClipInstanceId>,
/// Peak level of last render() call (for VU metering)
#[serde(skip, default)]
pub peak_level: f32,
} }
impl Clone for MidiTrack { impl Clone for MidiTrack {
@ -452,6 +456,7 @@ impl Clone for MidiTrack {
next_automation_id: self.next_automation_id, next_automation_id: self.next_automation_id,
live_midi_queue: Vec::new(), // Don't clone live MIDI queue live_midi_queue: Vec::new(), // Don't clone live MIDI queue
prev_active_instances: HashSet::new(), prev_active_instances: HashSet::new(),
peak_level: 0.0,
} }
} }
} }
@ -479,6 +484,7 @@ impl MidiTrack {
next_automation_id: 0, next_automation_id: 0,
live_midi_queue: Vec::new(), live_midi_queue: Vec::new(),
prev_active_instances: HashSet::new(), prev_active_instances: HashSet::new(),
peak_level: 0.0,
} }
} }
@ -705,6 +711,10 @@ pub struct AudioTrack {
/// Pre-allocated buffer for clip rendering (avoids heap allocation per callback) /// Pre-allocated buffer for clip rendering (avoids heap allocation per callback)
#[serde(skip, default)] #[serde(skip, default)]
clip_render_buffer: Vec<f32>, clip_render_buffer: Vec<f32>,
/// Peak level of last render() call (for VU metering)
#[serde(skip, default)]
pub peak_level: f32,
} }
impl Clone for AudioTrack { impl Clone for AudioTrack {
@ -721,6 +731,7 @@ impl Clone for AudioTrack {
effects_graph_preset: self.effects_graph_preset.clone(), effects_graph_preset: self.effects_graph_preset.clone(),
effects_graph: default_audio_graph(), // Create fresh graph, not cloned effects_graph: default_audio_graph(), // Create fresh graph, not cloned
clip_render_buffer: Vec::new(), clip_render_buffer: Vec::new(),
peak_level: 0.0,
} }
} }
} }
@ -764,6 +775,7 @@ impl AudioTrack {
effects_graph_preset: None, effects_graph_preset: None,
effects_graph, effects_graph,
clip_render_buffer: Vec::new(), clip_render_buffer: Vec::new(),
peak_level: 0.0,
} }
} }
@ -987,7 +999,7 @@ impl AudioTrack {
} }
// Calculate combined gain // Calculate combined gain
let combined_gain = clip.gain * self.volume; let combined_gain = clip.gain;
let mut total_rendered = 0; let mut total_rendered = 0;

View File

@ -226,6 +226,12 @@ pub enum Command {
priority: u8, // 0=Low, 1=Medium, 2=High priority: u8, // 0=Low, 1=Medium, 2=High
}, },
// Input monitoring/gain commands
/// Enable or disable input monitoring (mic level metering)
SetInputMonitoring(bool),
/// Set the input gain multiplier (applied before recording)
SetInputGain(f32),
// Async audio import // Async audio import
/// Import an audio file asynchronously. The engine probes the file format /// Import an audio file asynchronously. The engine probes the file format
/// and either memory-maps it (WAV/AIFF) or sets up stream decode /// and either memory-maps it (WAV/AIFF) or sets up stream decode
@ -333,6 +339,13 @@ pub enum AudioEvent {
channels: u32, channels: u32,
}, },
/// Peak amplitude of mic input (for input monitoring meter)
InputLevel(f32),
/// Peak amplitude of mix output (for master meter), stereo (left, right)
OutputLevel(f32, f32),
/// Per-track playback peak levels
TrackLevels(Vec<(TrackId, f32)>),
/// Background waveform decode progress/completion for a compressed audio file. /// Background waveform decode progress/completion for a compressed audio file.
/// Internal event — consumed by the engine to update the pool, not forwarded to UI. /// Internal event — consumed by the engine to update the pool, not forwarded to UI.
/// `decoded_frames` < `total_frames` means partial; equal means complete. /// `decoded_frames` < `total_frames` means partial; equal means complete.

View File

@ -32,6 +32,7 @@ pub mod region_split;
pub mod toggle_group_expansion; pub mod toggle_group_expansion;
pub mod group_layers; pub mod group_layers;
pub mod raster_stroke; pub mod raster_stroke;
pub mod move_layer;
pub use add_clip_instance::AddClipInstanceAction; pub use add_clip_instance::AddClipInstanceAction;
pub use add_effect::AddEffectAction; pub use add_effect::AddEffectAction;
@ -62,3 +63,4 @@ pub use region_split::RegionSplitAction;
pub use toggle_group_expansion::ToggleGroupExpansionAction; pub use toggle_group_expansion::ToggleGroupExpansionAction;
pub use group_layers::GroupLayersAction; pub use group_layers::GroupLayersAction;
pub use raster_stroke::RasterStrokeAction; pub use raster_stroke::RasterStrokeAction;
pub use move_layer::MoveLayerAction;

View File

@ -0,0 +1,137 @@
use crate::action::Action;
use crate::document::Document;
use crate::layer::AnyLayer;
use uuid::Uuid;
/// Action that moves one or more layers to a new position, possibly changing their parent group.
/// All layers are inserted contiguously into the same target parent.
/// Handles batch moves atomically: removes all, then inserts all, so indices stay consistent.
pub struct MoveLayerAction {
/// (layer_id, old_parent_id) for each layer to move, in visual order (top to bottom)
layers: Vec<(Uuid, Option<Uuid>)>,
new_parent_id: Option<Uuid>,
/// Insertion index in the new parent's children vec AFTER all dragged layers have been removed
new_base_index: usize,
/// Stored during execute for rollback: (layer, old_parent_id, old_index_in_parent)
removed: Vec<(AnyLayer, Option<Uuid>, usize)>,
}
impl MoveLayerAction {
pub fn new(
layers: Vec<(Uuid, Option<Uuid>)>,
new_parent_id: Option<Uuid>,
new_base_index: usize,
) -> Self {
Self {
layers,
new_parent_id,
new_base_index,
removed: Vec::new(),
}
}
}
fn get_parent_children(
document: &mut Document,
parent_id: Option<Uuid>,
) -> Result<&mut Vec<AnyLayer>, String> {
match parent_id {
None => Ok(&mut document.root.children),
Some(id) => {
let layer = document
.root
.get_child_mut(&id)
.ok_or_else(|| format!("Parent group {} not found", id))?;
match layer {
AnyLayer::Group(g) => Ok(&mut g.children),
_ => Err(format!("Layer {} is not a group", id)),
}
}
}
}
impl Action for MoveLayerAction {
fn execute(&mut self, document: &mut Document) -> Result<(), String> {
self.removed.clear();
// Phase 1: Remove all layers from their old parents.
// Group removals by parent, then remove back-to-front within each parent.
// Collect (layer_id, old_parent_id) with their current index.
let mut removals: Vec<(Uuid, Option<Uuid>, usize)> = Vec::new();
for (layer_id, old_parent_id) in &self.layers {
let children = get_parent_children(document, *old_parent_id)?;
let idx = children.iter().position(|l| l.id() == *layer_id)
.ok_or_else(|| format!("Layer {} not found in parent", layer_id))?;
removals.push((*layer_id, *old_parent_id, idx));
}
// Sort by (parent, index) descending so we remove back-to-front
removals.sort_by(|a, b| {
a.1.cmp(&b.1).then(b.2.cmp(&a.2))
});
let mut removed_layers: Vec<(Uuid, AnyLayer, Option<Uuid>, usize)> = Vec::new();
for (layer_id, old_parent_id, idx) in &removals {
let children = get_parent_children(document, *old_parent_id)?;
let layer = children.remove(*idx);
removed_layers.push((*layer_id, layer, *old_parent_id, *idx));
}
// Phase 2: Insert all at new parent, in visual order (self.layers order).
// self.new_base_index is the index in the post-removal children vec.
let new_children = get_parent_children(document, self.new_parent_id)?;
let base = self.new_base_index.min(new_children.len());
// Insert in forward visual order, all at `base`. Each insert pushes the previous
// one to a higher children index. Since the timeline displays children in reverse,
// a higher children index = visually higher. So the first visual layer (layers[0])
// ends up at the highest children index = visually topmost. Correct.
for (layer_id, _) in self.layers.iter() {
// Find this layer in removed_layers
let pos = removed_layers.iter().position(|(id, _, _, _)| id == layer_id)
.ok_or_else(|| format!("Layer {} missing from removed set", layer_id))?;
let (_, layer, old_parent_id, old_idx) = removed_layers.remove(pos);
self.removed.push((layer.clone(), old_parent_id, old_idx));
let new_children = get_parent_children(document, self.new_parent_id)?;
let insert_at = base.min(new_children.len());
new_children.insert(insert_at, layer);
}
Ok(())
}
fn rollback(&mut self, document: &mut Document) -> Result<(), String> {
if self.removed.is_empty() {
return Err("Cannot rollback: action was not executed".to_string());
}
// Phase 1: Remove all layers from new parent (back-to-front by insertion order).
for (layer_id, _) in self.layers.iter().rev() {
let new_children = get_parent_children(document, self.new_parent_id)?;
let pos = new_children.iter().position(|l| l.id() == *layer_id)
.ok_or_else(|| format!("Layer {} not found in new parent for rollback", layer_id))?;
new_children.remove(pos);
}
// Phase 2: Re-insert at old positions, sorted by (parent, index) ascending.
let mut restore: Vec<(AnyLayer, Option<Uuid>, usize)> = self.removed.drain(..).collect();
restore.sort_by(|a, b| a.1.cmp(&b.1).then(a.2.cmp(&b.2)));
for (layer, old_parent_id, old_idx) in restore {
let children = get_parent_children(document, old_parent_id)?;
let idx = old_idx.min(children.len());
children.insert(idx, layer);
}
Ok(())
}
fn description(&self) -> String {
if self.layers.len() == 1 {
"Move layer".to_string()
} else {
format!("Move {} layers", self.layers.len())
}
}
}

View File

@ -12,6 +12,7 @@ use uuid::Uuid;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum LayerProperty { pub enum LayerProperty {
Volume(f64), Volume(f64),
InputGain(f64),
Muted(bool), Muted(bool),
Soloed(bool), Soloed(bool),
Locked(bool), Locked(bool),
@ -25,6 +26,7 @@ pub enum LayerProperty {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
enum OldValue { enum OldValue {
Volume(f64), Volume(f64),
InputGain(f64),
Muted(bool), Muted(bool),
Soloed(bool), Soloed(bool),
Locked(bool), Locked(bool),
@ -85,6 +87,7 @@ impl Action for SetLayerPropertiesAction {
if self.old_values[i].is_none() { if self.old_values[i].is_none() {
self.old_values[i] = Some(match &self.property { self.old_values[i] = Some(match &self.property {
LayerProperty::Volume(_) => OldValue::Volume(layer.volume()), LayerProperty::Volume(_) => OldValue::Volume(layer.volume()),
LayerProperty::InputGain(_) => OldValue::InputGain(layer.layer().input_gain),
LayerProperty::Muted(_) => OldValue::Muted(layer.muted()), LayerProperty::Muted(_) => OldValue::Muted(layer.muted()),
LayerProperty::Soloed(_) => OldValue::Soloed(layer.soloed()), LayerProperty::Soloed(_) => OldValue::Soloed(layer.soloed()),
LayerProperty::Locked(_) => OldValue::Locked(layer.locked()), LayerProperty::Locked(_) => OldValue::Locked(layer.locked()),
@ -104,6 +107,7 @@ impl Action for SetLayerPropertiesAction {
// Set new value // Set new value
match &self.property { match &self.property {
LayerProperty::Volume(v) => layer.set_volume(*v), LayerProperty::Volume(v) => layer.set_volume(*v),
LayerProperty::InputGain(g) => layer.layer_mut().input_gain = *g,
LayerProperty::Muted(m) => layer.set_muted(*m), LayerProperty::Muted(m) => layer.set_muted(*m),
LayerProperty::Soloed(s) => layer.set_soloed(*s), LayerProperty::Soloed(s) => layer.set_soloed(*s),
LayerProperty::Locked(l) => layer.set_locked(*l), LayerProperty::Locked(l) => layer.set_locked(*l),
@ -128,6 +132,7 @@ impl Action for SetLayerPropertiesAction {
if let Some(old_value) = &self.old_values[i] { if let Some(old_value) = &self.old_values[i] {
match old_value { match old_value {
OldValue::Volume(v) => layer.set_volume(*v), OldValue::Volume(v) => layer.set_volume(*v),
OldValue::InputGain(g) => layer.layer_mut().input_gain = *g,
OldValue::Muted(m) => layer.set_muted(*m), OldValue::Muted(m) => layer.set_muted(*m),
OldValue::Soloed(s) => layer.set_soloed(*s), OldValue::Soloed(s) => layer.set_soloed(*s),
OldValue::Locked(l) => layer.set_locked(*l), OldValue::Locked(l) => layer.set_locked(*l),
@ -159,6 +164,7 @@ impl Action for SetLayerPropertiesAction {
if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) { if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) {
match &self.property { match &self.property {
LayerProperty::Volume(v) => controller.set_track_volume(track_id, *v as f32), LayerProperty::Volume(v) => controller.set_track_volume(track_id, *v as f32),
LayerProperty::InputGain(g) => controller.set_input_gain(*g as f32),
LayerProperty::Muted(m) => controller.set_track_mute(track_id, *m), LayerProperty::Muted(m) => controller.set_track_mute(track_id, *m),
LayerProperty::Soloed(s) => controller.set_track_solo(track_id, *s), LayerProperty::Soloed(s) => controller.set_track_solo(track_id, *s),
_ => {} // Locked/Opacity/Visible/CameraEnabled are UI-only _ => {} // Locked/Opacity/Visible/CameraEnabled are UI-only
@ -183,6 +189,7 @@ impl Action for SetLayerPropertiesAction {
if let Some(old_value) = &self.old_values[i] { if let Some(old_value) = &self.old_values[i] {
match old_value { match old_value {
OldValue::Volume(v) => controller.set_track_volume(track_id, *v as f32), OldValue::Volume(v) => controller.set_track_volume(track_id, *v as f32),
OldValue::InputGain(g) => controller.set_input_gain(*g as f32),
OldValue::Muted(m) => controller.set_track_mute(track_id, *m), OldValue::Muted(m) => controller.set_track_mute(track_id, *m),
OldValue::Soloed(s) => controller.set_track_solo(track_id, *s), OldValue::Soloed(s) => controller.set_track_solo(track_id, *s),
_ => {} // Locked/Opacity/Visible are UI-only _ => {} // Locked/Opacity/Visible are UI-only
@ -196,6 +203,7 @@ impl Action for SetLayerPropertiesAction {
fn description(&self) -> String { fn description(&self) -> String {
let property_name = match &self.property { let property_name = match &self.property {
LayerProperty::Volume(_) => "volume", LayerProperty::Volume(_) => "volume",
LayerProperty::InputGain(_) => "input gain",
LayerProperty::Muted(_) => "mute", LayerProperty::Muted(_) => "mute",
LayerProperty::Soloed(_) => "solo", LayerProperty::Soloed(_) => "solo",
LayerProperty::Locked(_) => "lock", LayerProperty::Locked(_) => "lock",

View File

@ -63,6 +63,8 @@ pub trait LayerTrait {
fn set_locked(&mut self, locked: bool); fn set_locked(&mut self, locked: bool);
} }
fn default_input_gain() -> f64 { 1.0 }
/// Base layer structure /// Base layer structure
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Layer { pub struct Layer {
@ -87,6 +89,10 @@ pub struct Layer {
/// Audio volume (1.0 = 100%, affects nested audio layers/clips) /// Audio volume (1.0 = 100%, affects nested audio layers/clips)
pub volume: f64, pub volume: f64,
/// Input gain for recording (1.0 = unity, range 0.04.0)
#[serde(default = "default_input_gain")]
pub input_gain: f64,
/// Audio mute state /// Audio mute state
pub muted: bool, pub muted: bool,
@ -111,6 +117,7 @@ impl Layer {
visible: true, visible: true,
opacity: 1.0, opacity: 1.0,
volume: 1.0, // 100% volume volume: 1.0, // 100% volume
input_gain: 1.0,
muted: false, muted: false,
soloed: false, soloed: false,
locked: false, locked: false,
@ -128,6 +135,7 @@ impl Layer {
visible: true, visible: true,
opacity: 1.0, opacity: 1.0,
volume: 1.0, volume: 1.0,
input_gain: 1.0,
muted: false, muted: false,
soloed: false, soloed: false,
locked: false, locked: false,

View File

@ -359,12 +359,19 @@ fn capture_thread_main(
let mut decoded_frame = ffmpeg::frame::Video::empty(); let mut decoded_frame = ffmpeg::frame::Video::empty();
let mut rgba_frame = ffmpeg::frame::Video::empty(); let mut rgba_frame = ffmpeg::frame::Video::empty();
// Helper closure: decode current packet, scale, send preview frame, and
// optionally encode into the active recorder. Returns updated frame_count.
let row_bytes = (width * 4) as usize;
let mut stop_result_tx: Option<std::sync::mpsc::Sender<Result<RecordingResult, String>>> = None;
'outer: for (stream_ref, packet) in input.packets() { 'outer: for (stream_ref, packet) in input.packets() {
if stream_ref.index() != stream_index { if stream_ref.index() != stream_index {
continue; continue;
} }
// Check for commands (non-blocking). // Check for commands BEFORE decoding so that StartRecording takes effect
// on the current packet (no lost frame at the start).
while let Ok(cmd) = cmd_rx.try_recv() { while let Ok(cmd) = cmd_rx.try_recv() {
match cmd { match cmd {
CaptureCommand::StartRecording { CaptureCommand::StartRecording {
@ -384,20 +391,19 @@ fn capture_thread_main(
} }
} }
CaptureCommand::StopRecording { result_tx } => { CaptureCommand::StopRecording { result_tx } => {
if let Some(rec) = recorder.take() { eprintln!("[WEBCAM stop] StopRecording command received on capture thread");
let _ = result_tx.send(rec.finish()); // Defer stop until AFTER we decode this packet, so the
} else { // current frame is captured before we finalize.
let _ = result_tx.send(Err("Not recording".into())); stop_result_tx = Some(result_tx);
}
} }
CaptureCommand::Shutdown => break 'outer, CaptureCommand::Shutdown => break 'outer,
} }
} }
// Decode current packet and process frames.
decoder.send_packet(&packet).ok(); decoder.send_packet(&packet).ok();
while decoder.receive_frame(&mut decoded_frame).is_ok() { while decoder.receive_frame(&mut decoded_frame).is_ok() {
// Skip initial corrupt frames from v4l2
if frame_count < SKIP_INITIAL_FRAMES { if frame_count < SKIP_INITIAL_FRAMES {
frame_count += 1; frame_count += 1;
continue; continue;
@ -407,10 +413,8 @@ fn capture_thread_main(
let timestamp = start_time.elapsed().as_secs_f64(); let timestamp = start_time.elapsed().as_secs_f64();
// Build tightly-packed RGBA data (remove stride padding).
let data = rgba_frame.data(0); let data = rgba_frame.data(0);
let stride = rgba_frame.stride(0); let stride = rgba_frame.stride(0);
let row_bytes = (width * 4) as usize;
let rgba_data = if stride == row_bytes { let rgba_data = if stride == row_bytes {
data[..row_bytes * height as usize].to_vec() data[..row_bytes * height as usize].to_vec()
@ -433,13 +437,52 @@ fn capture_thread_main(
let _ = frame_tx.try_send(frame); let _ = frame_tx.try_send(frame);
if let Some(ref mut rec) = recorder { if let Some(ref mut rec) = recorder {
if let Err(e) = rec.encode_rgba(&rgba_arc, width, height, frame_count) { if let Err(e) = rec.encode_rgba(&rgba_arc, width, height, timestamp) {
eprintln!("[webcam] recording encode error: {e}"); eprintln!("[webcam] recording encode error: {e}");
} }
} }
frame_count += 1; frame_count += 1;
} }
// Now handle deferred StopRecording (after the current packet is decoded).
if let Some(result_tx) = stop_result_tx.take() {
if let Some(mut rec) = recorder.take() {
// Flush any frames still buffered in the decoder.
let pre_drain_count = frame_count;
decoder.send_eof().ok();
while decoder.receive_frame(&mut decoded_frame).is_ok() {
if frame_count < SKIP_INITIAL_FRAMES {
frame_count += 1;
continue;
}
scaler.run(&decoded_frame, &mut rgba_frame).ok();
let timestamp = start_time.elapsed().as_secs_f64();
let data = rgba_frame.data(0);
let stride = rgba_frame.stride(0);
let rgba_data = if stride == row_bytes {
data[..row_bytes * height as usize].to_vec()
} else {
let mut buf = Vec::with_capacity(row_bytes * height as usize);
for y in 0..height as usize {
buf.extend_from_slice(&data[y * stride..y * stride + row_bytes]);
}
buf
};
let _ = rec.encode_rgba(&rgba_data, width, height, timestamp);
frame_count += 1;
}
eprintln!(
"[WEBCAM stop] drained {} extra frames from decoder (total frames={})",
frame_count - pre_drain_count, frame_count
);
// Reset the decoder so it can accept new packets for preview.
decoder.flush();
let _ = result_tx.send(rec.finish());
} else {
let _ = result_tx.send(Err("Not recording".into()));
}
}
} }
// Clean up: if still recording when shutting down, finalize. // Clean up: if still recording when shutting down, finalize.
@ -463,6 +506,10 @@ struct FrameRecorder {
path: PathBuf, path: PathBuf,
frame_count: u64, frame_count: u64,
fps: f64, fps: f64,
/// Timestamp of the first recorded frame (for offsetting PTS to start at 0)
first_timestamp: Option<f64>,
/// Timestamp of the most recent frame (for computing actual duration)
last_timestamp: f64,
} }
impl FrameRecorder { impl FrameRecorder {
@ -510,7 +557,10 @@ impl FrameRecorder {
encoder.set_width(aligned_width); encoder.set_width(aligned_width);
encoder.set_height(aligned_height); encoder.set_height(aligned_height);
encoder.set_format(pixel_format); encoder.set_format(pixel_format);
encoder.set_time_base(ffmpeg::Rational(1, fps as i32)); // Use microsecond time base for precise timestamp-based PTS.
// This avoids speedup artifacts when the camera delivers frames
// at irregular intervals (common under CPU load or with USB cameras).
encoder.set_time_base(ffmpeg::Rational(1, 1_000_000));
encoder.set_frame_rate(Some(ffmpeg::Rational(fps as i32, 1))); encoder.set_frame_rate(Some(ffmpeg::Rational(fps as i32, 1)));
if codec_id == ffmpeg::codec::Id::H264 { if codec_id == ffmpeg::codec::Id::H264 {
@ -549,6 +599,8 @@ impl FrameRecorder {
path: path.clone(), path: path.clone(),
frame_count: 0, frame_count: 0,
fps, fps,
first_timestamp: None,
last_timestamp: 0.0,
}) })
} }
@ -557,7 +609,7 @@ impl FrameRecorder {
rgba_data: &[u8], rgba_data: &[u8],
width: u32, width: u32,
height: u32, height: u32,
_global_frame: u64, timestamp: f64,
) -> Result<(), String> { ) -> Result<(), String> {
let mut src_frame = let mut src_frame =
ffmpeg::frame::Video::new(ffmpeg::format::Pixel::RGBA, width, height); ffmpeg::frame::Video::new(ffmpeg::format::Pixel::RGBA, width, height);
@ -576,8 +628,15 @@ impl FrameRecorder {
.run(&src_frame, &mut dst_frame) .run(&src_frame, &mut dst_frame)
.map_err(|e| format!("Scale: {e}"))?; .map_err(|e| format!("Scale: {e}"))?;
dst_frame.set_pts(Some(self.frame_count as i64)); // PTS in microseconds from actual capture timestamps.
// Time base is 1/1000000, so PTS = elapsed_seconds * 1000000.
// This ensures correct playback timing even when the camera delivers
// frames at irregular intervals (e.g. under CPU load).
let first_ts = *self.first_timestamp.get_or_insert(timestamp);
let elapsed_us = ((timestamp - first_ts).max(0.0) * 1_000_000.0) as i64;
dst_frame.set_pts(Some(elapsed_us));
self.frame_count += 1; self.frame_count += 1;
self.last_timestamp = timestamp;
self.encoder self.encoder
.send_frame(&dst_frame) .send_frame(&dst_frame)
@ -616,7 +675,14 @@ impl FrameRecorder {
.write_trailer() .write_trailer()
.map_err(|e| format!("Write trailer: {e}"))?; .map_err(|e| format!("Write trailer: {e}"))?;
let duration = self.frame_count as f64 / self.fps; let duration = match self.first_timestamp {
Some(first_ts) => self.last_timestamp - first_ts,
None => self.frame_count as f64 / self.fps,
};
eprintln!(
"[WEBCAM finish] frames={}, first_ts={:?}, last_ts={:.4}, duration={:.4}s, fps={}",
self.frame_count, self.first_timestamp, self.last_timestamp, duration, self.fps,
);
Ok(RecordingResult { Ok(RecordingResult {
file_path: self.path, file_path: self.path,
duration, duration,

View File

@ -786,8 +786,6 @@ struct EditorApp {
webcam_frame: Option<lightningbeam_core::webcam::CaptureFrame>, webcam_frame: Option<lightningbeam_core::webcam::CaptureFrame>,
/// Pending webcam recording command (set by timeline, processed in update) /// Pending webcam recording command (set by timeline, processed in update)
webcam_record_command: Option<panes::WebcamRecordCommand>, webcam_record_command: Option<panes::WebcamRecordCommand>,
/// Layer being recorded to via webcam
webcam_recording_layer_id: Option<Uuid>,
// Track ID mapping (Document layer UUIDs <-> daw-backend TrackIds) // Track ID mapping (Document layer UUIDs <-> daw-backend TrackIds)
layer_to_track_map: HashMap<Uuid, daw_backend::TrackId>, layer_to_track_map: HashMap<Uuid, daw_backend::TrackId>,
track_to_layer_map: HashMap<daw_backend::TrackId, Uuid>, track_to_layer_map: HashMap<daw_backend::TrackId, Uuid>,
@ -808,7 +806,7 @@ struct EditorApp {
is_recording: bool, // Whether recording is currently active is_recording: bool, // Whether recording is currently active
recording_clips: HashMap<Uuid, u32>, // layer_id -> backend clip_id during recording recording_clips: HashMap<Uuid, u32>, // layer_id -> backend clip_id during recording
recording_start_time: f64, // Playback time when recording started recording_start_time: f64, // Playback time when recording started
recording_layer_id: Option<Uuid>, // Layer being recorded to (for creating clips) recording_layer_ids: Vec<Uuid>, // Layers being recorded to (for creating clips)
// Asset drag-and-drop state // Asset drag-and-drop state
dragging_asset: Option<panes::DraggingAsset>, // Asset being dragged from Asset Library dragging_asset: Option<panes::DraggingAsset>, // Asset being dragged from Asset Library
// Clipboard // Clipboard
@ -831,6 +829,11 @@ struct EditorApp {
region_selection: Option<lightningbeam_core::selection::RegionSelection>, region_selection: Option<lightningbeam_core::selection::RegionSelection>,
region_select_mode: lightningbeam_core::tool::RegionSelectMode, region_select_mode: lightningbeam_core::tool::RegionSelectMode,
// VU meter levels
input_level: f32,
output_level: (f32, f32),
track_levels: HashMap<daw_backend::TrackId, f32>,
/// Cache for MIDI event data (keyed by backend midi_clip_id) /// Cache for MIDI event data (keyed by backend midi_clip_id)
/// Prevents repeated backend queries for the same MIDI clip /// Prevents repeated backend queries for the same MIDI clip
/// Format: (timestamp, note_number, velocity, is_note_on) /// Format: (timestamp, note_number, velocity, is_note_on)
@ -1051,7 +1054,6 @@ impl EditorApp {
webcam: None, webcam: None,
webcam_frame: None, webcam_frame: None,
webcam_record_command: None, webcam_record_command: None,
webcam_recording_layer_id: None,
layer_to_track_map: HashMap::new(), layer_to_track_map: HashMap::new(),
track_to_layer_map: HashMap::new(), track_to_layer_map: HashMap::new(),
clip_to_metatrack_map: HashMap::new(), clip_to_metatrack_map: HashMap::new(),
@ -1064,7 +1066,7 @@ impl EditorApp {
is_recording: false, // Not recording initially is_recording: false, // Not recording initially
recording_clips: HashMap::new(), // No active recording clips recording_clips: HashMap::new(), // No active recording clips
recording_start_time: 0.0, // Will be set when recording starts recording_start_time: 0.0, // Will be set when recording starts
recording_layer_id: None, // Will be set when recording starts recording_layer_ids: Vec::new(), // Will be populated when recording starts
dragging_asset: None, // No asset being dragged initially dragging_asset: None, // No asset being dragged initially
clipboard_manager: lightningbeam_core::clipboard::ClipboardManager::new(), clipboard_manager: lightningbeam_core::clipboard::ClipboardManager::new(),
effect_to_load: None, effect_to_load: None,
@ -1079,6 +1081,9 @@ impl EditorApp {
polygon_sides: 5, // Default to pentagon polygon_sides: 5, // Default to pentagon
region_selection: None, region_selection: None,
region_select_mode: lightningbeam_core::tool::RegionSelectMode::default(), region_select_mode: lightningbeam_core::tool::RegionSelectMode::default(),
input_level: 0.0,
output_level: (0.0, 0.0),
track_levels: HashMap::new(),
midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache
audio_duration_cache: HashMap::new(), // Initialize empty audio duration cache audio_duration_cache: HashMap::new(), // Initialize empty audio duration cache
audio_pools_with_new_waveforms: HashSet::new(), // Track pool indices with new raw audio audio_pools_with_new_waveforms: HashSet::new(), // Track pool indices with new raw audio
@ -4099,12 +4104,8 @@ impl eframe::App for EditorApp {
// Webcam management: open/close based on camera_enabled layers, poll frames // Webcam management: open/close based on camera_enabled layers, poll frames
{ {
let any_camera_enabled = self.action_executor.document().root.children.iter().any(|layer| { let any_camera_enabled = self.action_executor.document().all_layers().iter().any(|layer| {
if let lightningbeam_core::layer::AnyLayer::Video(v) = layer { matches!(layer, lightningbeam_core::layer::AnyLayer::Video(v) if v.camera_enabled)
v.camera_enabled
} else {
false
}
}); });
if any_camera_enabled && self.webcam.is_none() { if any_camera_enabled && self.webcam.is_none() {
@ -4388,8 +4389,9 @@ impl eframe::App for EditorApp {
AudioEvent::RecordingStarted(track_id, backend_clip_id, rec_sample_rate, rec_channels) => { AudioEvent::RecordingStarted(track_id, backend_clip_id, rec_sample_rate, rec_channels) => {
println!("🎤 Recording started on track {:?}, backend_clip_id={}", track_id, backend_clip_id); println!("🎤 Recording started on track {:?}, backend_clip_id={}", track_id, backend_clip_id);
// Create clip in document and add instance to layer // Create clip in document and add instance to the layer for this track
if let Some(layer_id) = self.recording_layer_id { if let Some(&layer_id) = self.track_to_layer_map.get(&track_id) {
if self.recording_layer_ids.contains(&layer_id) {
use lightningbeam_core::clip::{AudioClip, ClipInstance}; use lightningbeam_core::clip::{AudioClip, ClipInstance};
// Create a recording-in-progress clip (no pool index yet) // Create a recording-in-progress clip (no pool index yet)
@ -4411,17 +4413,22 @@ impl eframe::App for EditorApp {
// Store mapping for later updates // Store mapping for later updates
self.recording_clips.insert(layer_id, backend_clip_id); self.recording_clips.insert(layer_id, backend_clip_id);
} }
}
// Initialize live waveform cache for recording // Initialize live waveform cache for recording
self.raw_audio_cache.insert(usize::MAX, (Arc::new(Vec::new()), rec_sample_rate, rec_channels)); self.raw_audio_cache.insert(usize::MAX, (Arc::new(Vec::new()), rec_sample_rate, rec_channels));
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::RecordingProgress(_clip_id, duration) => { AudioEvent::RecordingProgress(_backend_clip_id, duration) => {
// Update clip duration as recording progresses // Update clip duration as recording progresses
if let Some(layer_id) = self.recording_layer_id { // Find which layer this backend clip belongs to via recording_clips
// First, find the clip_id from the layer (read-only borrow) let layer_id = self.recording_clips.iter()
let clip_id = { .find(|(_, &cid)| cid == _backend_clip_id)
.map(|(&lid, _)| lid);
if let Some(layer_id) = layer_id {
// First, find the doc clip_id from the layer (read-only borrow)
let doc_clip_id = {
let document = self.action_executor.document(); let document = self.action_executor.document();
document.get_layer(&layer_id) document.get_layer(&layer_id)
.and_then(|layer| { .and_then(|layer| {
@ -4434,8 +4441,8 @@ impl eframe::App for EditorApp {
}; };
// Then update the clip duration (mutable borrow) // Then update the clip duration (mutable borrow)
if let Some(clip_id) = clip_id { if let Some(doc_clip_id) = doc_clip_id {
if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&clip_id) { if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&doc_clip_id) {
if clip.is_recording() { if clip.is_recording() {
clip.duration = duration; clip.duration = duration;
} }
@ -4445,7 +4452,7 @@ impl eframe::App for EditorApp {
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::RecordingStopped(_backend_clip_id, pool_index, _waveform) => { AudioEvent::RecordingStopped(_backend_clip_id, pool_index, _waveform) => {
println!("🎤 Recording stopped: pool_index={}", pool_index); eprintln!("[STOP] AudioEvent::RecordingStopped received (pool_index={})", pool_index);
// Clean up live recording waveform cache // Clean up live recording waveform cache
self.raw_audio_cache.remove(&usize::MAX); self.raw_audio_cache.remove(&usize::MAX);
@ -4469,7 +4476,7 @@ impl eframe::App for EditorApp {
let mut controller = controller_arc.lock().unwrap(); let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_file_info(pool_index) { match controller.get_pool_file_info(pool_index) {
Ok((dur, _, _)) => { Ok((dur, _, _)) => {
println!("✅ Got duration from backend: {:.2}s", dur); eprintln!("[AUDIO] Got duration from backend: {:.4}s", dur);
self.audio_duration_cache.insert(pool_index, dur); self.audio_duration_cache.insert(pool_index, dur);
dur dur
} }
@ -4484,7 +4491,11 @@ impl eframe::App for EditorApp {
// Finalize the recording clip with real pool_index and duration // Finalize the recording clip with real pool_index and duration
// and sync to backend for playback // and sync to backend for playback
if let Some(layer_id) = self.recording_layer_id { // Find which layer this recording belongs to via recording_clips
let recording_layer = self.recording_clips.iter()
.find(|(_, &cid)| cid == _backend_clip_id)
.map(|(&lid, _)| lid);
if let Some(layer_id) = recording_layer {
// First, find the clip instance and clip id // First, find the clip instance and clip id
let (clip_id, instance_id, timeline_start, trim_start) = { let (clip_id, instance_id, timeline_start, trim_start) = {
let document = self.action_executor.document(); let document = self.action_executor.document();
@ -4506,7 +4517,7 @@ impl eframe::App for EditorApp {
if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&clip_id) { if let Some(clip) = self.action_executor.document_mut().audio_clips.get_mut(&clip_id) {
if clip.finalize_recording(pool_index, duration) { if clip.finalize_recording(pool_index, duration) {
clip.name = format!("Recording {}", pool_index); clip.name = format!("Recording {}", pool_index);
println!("✅ Finalized recording clip: pool={}, duration={:.2}s", pool_index, duration); eprintln!("[AUDIO] Finalized recording clip: pool={}, duration={:.4}s", pool_index, duration);
} }
} }
@ -4548,22 +4559,32 @@ impl eframe::App for EditorApp {
} }
} }
// Clear recording state // Remove this layer from active recordings
if let Some(layer_id) = recording_layer {
self.recording_layer_ids.retain(|id| *id != layer_id);
self.recording_clips.remove(&layer_id);
}
// Clear global recording state only when all recordings are done
if self.recording_layer_ids.is_empty() {
self.is_recording = false; self.is_recording = false;
self.recording_clips.clear(); self.recording_clips.clear();
self.recording_layer_id = None; }
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::RecordingError(message) => { AudioEvent::RecordingError(message) => {
eprintln!("❌ Recording error: {}", message); eprintln!("❌ Recording error: {}", message);
self.is_recording = false; self.is_recording = false;
self.recording_clips.clear(); self.recording_clips.clear();
self.recording_layer_id = None; self.recording_layer_ids.clear();
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::MidiRecordingProgress(_track_id, clip_id, duration, notes) => { AudioEvent::MidiRecordingProgress(_track_id, clip_id, duration, notes) => {
// Update clip duration in document (so timeline bar grows) // Update clip duration in document (so timeline bar grows)
if let Some(layer_id) = self.recording_layer_id { // Find layer for this track via track_to_layer_map
let midi_layer_id = self.track_to_layer_map.get(&_track_id)
.filter(|lid| self.recording_layer_ids.contains(lid))
.copied();
if let Some(layer_id) = midi_layer_id {
let doc_clip_id = { let doc_clip_id = {
let document = self.action_executor.document(); let document = self.action_executor.document();
document.get_layer(&layer_id) document.get_layer(&layer_id)
@ -4622,7 +4643,10 @@ impl eframe::App for EditorApp {
self.midi_event_cache.insert(clip_id, cache_events); self.midi_event_cache.insert(clip_id, cache_events);
// Update document clip with final duration and name // Update document clip with final duration and name
if let Some(layer_id) = self.recording_layer_id { let midi_layer_id = self.track_to_layer_map.get(&track_id)
.filter(|lid| self.recording_layer_ids.contains(lid))
.copied();
if let Some(layer_id) = midi_layer_id {
let doc_clip_id = { let doc_clip_id = {
let document = self.action_executor.document(); let document = self.action_executor.document();
document.get_layer(&layer_id) document.get_layer(&layer_id)
@ -4656,10 +4680,15 @@ impl eframe::App for EditorApp {
// The backend created the instance in create_midi_clip(), but doesn't // The backend created the instance in create_midi_clip(), but doesn't
// report the instance_id back. Needed for move/trim operations later. // report the instance_id back. Needed for move/trim operations later.
// Clear recording state // Remove this MIDI layer from active recordings
if let Some(&layer_id) = self.track_to_layer_map.get(&track_id) {
self.recording_layer_ids.retain(|id| *id != layer_id);
self.recording_clips.remove(&layer_id);
}
if self.recording_layer_ids.is_empty() {
self.is_recording = false; self.is_recording = false;
self.recording_clips.clear(); self.recording_clips.clear();
self.recording_layer_id = None; }
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::AudioFileReady { pool_index, path, channels, sample_rate, duration, format } => { AudioEvent::AudioFileReady { pool_index, path, channels, sample_rate, duration, format } => {
@ -4706,6 +4735,19 @@ impl eframe::App for EditorApp {
); );
ctx.request_repaint(); ctx.request_repaint();
} }
AudioEvent::InputLevel(peak) => {
self.input_level = self.input_level.max(peak);
}
AudioEvent::OutputLevel(peak_l, peak_r) => {
self.output_level.0 = self.output_level.0.max(peak_l);
self.output_level.1 = self.output_level.1.max(peak_r);
}
AudioEvent::TrackLevels(levels) => {
for (track_id, peak) in levels {
let entry = self.track_levels.entry(track_id).or_insert(0.0);
*entry = entry.max(peak);
}
}
_ => {} // Ignore other events for now _ => {} // Ignore other events for now
} }
} }
@ -4720,6 +4762,39 @@ impl eframe::App for EditorApp {
} }
} }
// Update input monitoring based on active layer
if let Some(controller) = &self.audio_controller {
let should_monitor = self.active_layer_id.map_or(false, |layer_id| {
let doc = self.action_executor.document();
if let Some(layer) = doc.get_layer(&layer_id) {
matches!(layer, lightningbeam_core::layer::AnyLayer::Audio(a) if a.audio_layer_type == lightningbeam_core::layer::AudioLayerType::Sampled)
} else {
false
}
});
if let Ok(mut ctrl) = controller.try_lock() {
ctrl.set_input_monitoring(should_monitor);
}
}
// Decay VU meter levels (~1.5s full fall at 60fps)
{
let decay = 0.97f32;
self.input_level *= decay;
self.output_level.0 *= decay;
self.output_level.1 *= decay;
for level in self.track_levels.values_mut() {
*level *= decay;
}
// Request repaint while any level is visible
let any_active = self.input_level > 0.001
|| self.output_level.0 > 0.001 || self.output_level.1 > 0.001
|| self.track_levels.values().any(|&v| v > 0.001);
if any_active {
ctx.request_repaint();
}
}
let _post_events_ms = _frame_start.elapsed().as_secs_f64() * 1000.0; let _post_events_ms = _frame_start.elapsed().as_secs_f64() * 1000.0;
// Request continuous repaints when playing to update time display // Request continuous repaints when playing to update time display
@ -5090,7 +5165,7 @@ impl eframe::App for EditorApp {
is_recording: &mut self.is_recording, is_recording: &mut self.is_recording,
recording_clips: &mut self.recording_clips, recording_clips: &mut self.recording_clips,
recording_start_time: &mut self.recording_start_time, recording_start_time: &mut self.recording_start_time,
recording_layer_id: &mut self.recording_layer_id, recording_layer_ids: &mut self.recording_layer_ids,
dragging_asset: &mut self.dragging_asset, dragging_asset: &mut self.dragging_asset,
stroke_width: &mut self.stroke_width, stroke_width: &mut self.stroke_width,
fill_enabled: &mut self.fill_enabled, fill_enabled: &mut self.fill_enabled,
@ -5113,6 +5188,10 @@ impl eframe::App for EditorApp {
target_format: self.target_format, target_format: self.target_format,
pending_menu_actions: &mut pending_menu_actions, pending_menu_actions: &mut pending_menu_actions,
clipboard_manager: &mut self.clipboard_manager, clipboard_manager: &mut self.clipboard_manager,
input_level: self.input_level,
output_level: self.output_level,
track_levels: &self.track_levels,
track_to_layer_map: &self.track_to_layer_map,
waveform_stereo: self.config.waveform_stereo, waveform_stereo: self.config.waveform_stereo,
project_generation: &mut self.project_generation, project_generation: &mut self.project_generation,
script_to_edit: &mut self.script_to_edit, script_to_edit: &mut self.script_to_edit,
@ -5216,7 +5295,7 @@ impl eframe::App for EditorApp {
// Process webcam recording commands from timeline // Process webcam recording commands from timeline
if let Some(cmd) = self.webcam_record_command.take() { if let Some(cmd) = self.webcam_record_command.take() {
match cmd { match cmd {
panes::WebcamRecordCommand::Start { layer_id } => { panes::WebcamRecordCommand::Start { .. } => {
// Ensure webcam is open // Ensure webcam is open
if self.webcam.is_none() { if self.webcam.is_none() {
if let Some(device) = lightningbeam_core::webcam::default_camera() { if let Some(device) = lightningbeam_core::webcam::default_camera() {
@ -5250,7 +5329,6 @@ impl eframe::App for EditorApp {
let recording_path = recording_dir.join(format!("webcam_recording_{}.{}", timestamp, ext)); let recording_path = recording_dir.join(format!("webcam_recording_{}.{}", timestamp, ext));
match webcam.start_recording(recording_path, codec) { match webcam.start_recording(recording_path, codec) {
Ok(()) => { Ok(()) => {
self.webcam_recording_layer_id = Some(layer_id);
eprintln!("[WEBCAM] Recording started"); eprintln!("[WEBCAM] Recording started");
} }
Err(e) => { Err(e) => {
@ -5260,13 +5338,25 @@ impl eframe::App for EditorApp {
} }
} }
panes::WebcamRecordCommand::Stop => { panes::WebcamRecordCommand::Stop => {
eprintln!("[STOP] Webcam stop command processed (main.rs handler)");
// Find the webcam recording layer before stopping (need it for cleanup)
let webcam_layer_id = {
let document = self.action_executor.document();
self.recording_layer_ids.iter().copied().find(|lid| {
document.get_layer(lid).map_or(false, |l| {
matches!(l, lightningbeam_core::layer::AnyLayer::Video(v) if v.camera_enabled)
})
})
};
if let Some(webcam) = &mut self.webcam { if let Some(webcam) = &mut self.webcam {
let stop_t = std::time::Instant::now();
match webcam.stop_recording() { match webcam.stop_recording() {
Ok(result) => { Ok(result) => {
eprintln!("[STOP] webcam.stop_recording() returned in {:.1}ms", stop_t.elapsed().as_secs_f64() * 1000.0);
let file_path_str = result.file_path.to_string_lossy().to_string(); let file_path_str = result.file_path.to_string_lossy().to_string();
eprintln!("[WEBCAM] Recording saved to: {}", file_path_str); eprintln!("[WEBCAM] Recording saved to: {} (recorder duration={:.4}s)", file_path_str, result.duration);
// Create VideoClip + ClipInstance from recorded file // Create VideoClip + ClipInstance from recorded file
if let Some(layer_id) = self.webcam_recording_layer_id.take() { if let Some(layer_id) = webcam_layer_id {
match lightningbeam_core::video::probe_video(&file_path_str) { match lightningbeam_core::video::probe_video(&file_path_str) {
Ok(info) => { Ok(info) => {
use lightningbeam_core::clip::{VideoClip, ClipInstance}; use lightningbeam_core::clip::{VideoClip, ClipInstance};
@ -5344,7 +5434,10 @@ impl eframe::App for EditorApp {
} }
}); });
eprintln!("[WEBCAM] Created video clip: {:.1}s @ {:.1}fps", duration, info.fps); eprintln!(
"[WEBCAM] probe_video: duration={:.4}s, fps={:.1}, {}x{}. Using probe duration for clip.",
info.duration, info.fps, info.width, info.height,
);
} }
Err(e) => { Err(e) => {
eprintln!("[WEBCAM] Failed to probe recorded video: {}", e); eprintln!("[WEBCAM] Failed to probe recorded video: {}", e);
@ -5354,12 +5447,18 @@ impl eframe::App for EditorApp {
} }
Err(e) => { Err(e) => {
eprintln!("[WEBCAM] Failed to stop recording: {}", e); eprintln!("[WEBCAM] Failed to stop recording: {}", e);
self.webcam_recording_layer_id = None; // webcam layer cleanup handled by recording_layer_ids.clear() below
} }
} }
} }
// Remove webcam layer from active recordings
if let Some(wid) = webcam_layer_id {
self.recording_layer_ids.retain(|id| *id != wid);
}
if self.recording_layer_ids.is_empty() {
self.is_recording = false; self.is_recording = false;
self.recording_layer_id = None; self.recording_clips.clear();
}
} }
} }
} }

View File

@ -57,8 +57,10 @@ pub struct DraggingAsset {
/// Command for webcam recording (issued by timeline, processed by main) /// Command for webcam recording (issued by timeline, processed by main)
#[derive(Debug)] #[derive(Debug)]
#[allow(dead_code)]
pub enum WebcamRecordCommand { pub enum WebcamRecordCommand {
/// Start recording on the given video layer /// Start recording on the given video layer
// TODO: remove layer_id — recording_layer_ids now tracks which layers are recording
Start { layer_id: uuid::Uuid }, Start { layer_id: uuid::Uuid },
/// Stop current webcam recording /// Stop current webcam recording
Stop, Stop,
@ -203,7 +205,7 @@ pub struct SharedPaneState<'a> {
pub is_recording: &'a mut bool, // Whether recording is currently active pub is_recording: &'a mut bool, // Whether recording is currently active
pub recording_clips: &'a mut std::collections::HashMap<uuid::Uuid, u32>, // layer_id -> clip_id pub recording_clips: &'a mut std::collections::HashMap<uuid::Uuid, u32>, // layer_id -> clip_id
pub recording_start_time: &'a mut f64, // Playback time when recording started pub recording_start_time: &'a mut f64, // Playback time when recording started
pub recording_layer_id: &'a mut Option<uuid::Uuid>, // Layer being recorded to pub recording_layer_ids: &'a mut Vec<uuid::Uuid>, // Layers being recorded to
/// Asset being dragged from Asset Library (for cross-pane drag-and-drop) /// Asset being dragged from Asset Library (for cross-pane drag-and-drop)
pub dragging_asset: &'a mut Option<DraggingAsset>, pub dragging_asset: &'a mut Option<DraggingAsset>,
// Tool-specific options for infopanel // Tool-specific options for infopanel
@ -247,6 +249,12 @@ pub struct SharedPaneState<'a> {
pub pending_menu_actions: &'a mut Vec<crate::menu::MenuAction>, pub pending_menu_actions: &'a mut Vec<crate::menu::MenuAction>,
/// Clipboard manager for cut/copy/paste operations /// Clipboard manager for cut/copy/paste operations
pub clipboard_manager: &'a mut lightningbeam_core::clipboard::ClipboardManager, pub clipboard_manager: &'a mut lightningbeam_core::clipboard::ClipboardManager,
// VU meter levels
pub input_level: f32,
pub output_level: (f32, f32),
pub track_levels: &'a std::collections::HashMap<daw_backend::TrackId, f32>,
#[allow(dead_code)] // Available for panes that need reverse track->layer lookup
pub track_to_layer_map: &'a std::collections::HashMap<daw_backend::TrackId, Uuid>,
/// Whether to show waveforms as stacked stereo (true) or combined mono (false) /// Whether to show waveforms as stacked stereo (true) or combined mono (false)
pub waveform_stereo: bool, pub waveform_stereo: bool,
/// Generation counter - incremented on project load to force reloads /// Generation counter - incremented on project load to force reloads

View File

@ -67,17 +67,21 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
} }
// The canvas stores premultiplied linear RGBA. // The canvas stores premultiplied linear RGBA.
// The srgb_to_linear converter downstream applies the sRGB gamma formula // The downstream pipeline (srgb_to_linear compositor) expects the sRGB
// channel-by-channel without alpha awareness. To make the round-trip // buffer to contain straight-alpha sRGB, i.e. the same format Vello outputs:
// transparent we pre-encode with linear_to_srgb here: // sRGB buffer: srgb(r_straight), srgb(g_straight), srgb(b_straight), a
// canvas (linear premul) sRGB buffer srgb_to_linear linear premul // srgb_to_linear: r_straight, g_straight, b_straight, a (linear straight)
// Without this, srgb_to_linear darkens small premultiplied values // compositor: r_straight * a * opacity (premultiplied, correct)
// (e.g. white at 10% opacity: 0.1 0.01), producing a grey halo. //
// Without unpremultiplying, the compositor would double-premultiply:
// src = (premul_r, premul_g, premul_b, a) output = premul_r * a = r * a²
// which produces a dark halo over transparent regions.
let c = textureSample(canvas_tex, canvas_sampler, canvas_uv); let c = textureSample(canvas_tex, canvas_sampler, canvas_uv);
let inv_a = select(0.0, 1.0 / c.a, c.a > 1e-6);
return vec4<f32>( return vec4<f32>(
linear_to_srgb(c.r), linear_to_srgb(c.r * inv_a),
linear_to_srgb(c.g), linear_to_srgb(c.g * inv_a),
linear_to_srgb(c.b), linear_to_srgb(c.b * inv_a),
c.a, c.a,
); );
} }

File diff suppressed because it is too large Load Diff