diff --git a/daw-backend/src/audio/export.rs b/daw-backend/src/audio/export.rs index fdceb8b..0f17b28 100644 --- a/daw-backend/src/audio/export.rs +++ b/daw-backend/src/audio/export.rs @@ -4,6 +4,10 @@ use super::project::Project; use crate::command::AudioEvent; use std::path::Path; +/// Render chunk size for offline export. Matches the real-time playback buffer size +/// so that MIDI events are processed at the same granularity, avoiding timing jitter. +const EXPORT_CHUNK_FRAMES: usize = 256; + /// Supported export formats #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ExportFormat { @@ -73,6 +77,21 @@ pub fn export_audio>( mut event_tx: Option<&mut rtrb::Producer>, ) -> Result<(), String> { + // Validate duration + let duration = settings.end_time - settings.start_time; + if duration <= 0.0 { + return Err(format!( + "Export duration is zero or negative (start={:.3}s, end={:.3}s). \ + Check that the timeline has content.", + settings.start_time, settings.end_time + )); + } + + let total_frames = (duration * settings.sample_rate as f64).round() as usize; + if total_frames == 0 { + return Err("Export would produce zero audio frames".to_string()); + } + // Reset all node graphs to clear stale effect buffers (echo, reverb, etc.) project.reset_all_graphs(); @@ -135,9 +154,7 @@ pub fn render_to_memory( println!("Export: duration={:.3}s, total_frames={}, total_samples={}, channels={}", duration, total_frames, total_samples, settings.channels); - // Render in chunks to avoid memory issues - const CHUNK_FRAMES: usize = 4096; - let chunk_samples = CHUNK_FRAMES * settings.channels as usize; + let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize; // Create buffer for rendering let mut render_buffer = vec![0.0f32; chunk_samples]; @@ -147,7 +164,7 @@ pub fn render_to_memory( let mut all_samples = Vec::with_capacity(total_samples); let mut playhead = settings.start_time; - let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64; + let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64; let mut frames_rendered = 0; // Render the entire timeline in chunks @@ -345,9 +362,8 @@ fn export_mp3>( let duration = settings.end_time - settings.start_time; let total_frames = (duration * settings.sample_rate as f64).round() as usize; - const CHUNK_FRAMES: usize = 4096; - let chunk_samples = CHUNK_FRAMES * settings.channels as usize; - let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64; + let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize; + let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64; // Create buffers for rendering let mut render_buffer = vec![0.0f32; chunk_samples]; @@ -513,9 +529,8 @@ fn export_aac>( let duration = settings.end_time - settings.start_time; let total_frames = (duration * settings.sample_rate as f64).round() as usize; - const CHUNK_FRAMES: usize = 4096; - let chunk_samples = CHUNK_FRAMES * settings.channels as usize; - let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64; + let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize; + let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64; // Create buffers for rendering let mut render_buffer = vec![0.0f32; chunk_samples]; @@ -669,9 +684,13 @@ fn encode_complete_frame_mp3( channel_layout: ffmpeg_next::channel_layout::ChannelLayout, pts: i64, ) -> Result<(), String> { + if num_frames == 0 { + return Ok(()); + } + let channels = planar_samples.len(); - // Create audio frame with exact size + // Create audio frame let mut frame = ffmpeg_next::frame::Audio::new( ffmpeg_next::format::Sample::I16(ffmpeg_next::format::sample::Type::Planar), num_frames, @@ -680,33 +699,23 @@ fn encode_complete_frame_mp3( frame.set_rate(sample_rate); frame.set_pts(Some(pts)); - // Copy all planar samples to frame - for ch in 0..channels { - let plane = frame.data_mut(ch); - let src = &planar_samples[ch]; - - // Verify buffer size - let byte_size = num_frames * std::mem::size_of::(); - if plane.len() < byte_size { - return Err(format!( - "FFmpeg frame buffer too small: {} bytes, need {} bytes", - plane.len(), byte_size - )); - } - - // Safe byte-level copy - for (i, &sample) in src.iter().enumerate() { - let bytes = sample.to_ne_bytes(); - let offset = i * 2; - plane[offset..offset + 2].copy_from_slice(&bytes); - } + // Verify frame was allocated (check linesize[0] via planes()) + if frame.planes() == 0 { + return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string()); + } + + // Copy all planar samples to frame + // Use plane_mut:: instead of data_mut โ€” data_mut(ch) is buggy for planar audio: + // FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0. + // plane_mut uses self.samples() for the length, which is correct for all planes. + for ch in 0..channels { + let plane = frame.plane_mut::(ch); + plane.copy_from_slice(&planar_samples[ch]); } - // Send frame to encoder encoder.send_frame(&frame) .map_err(|e| format!("Failed to send frame: {}", e))?; - // Receive and write packets receive_and_write_packets(encoder, output)?; Ok(()) @@ -722,9 +731,13 @@ fn encode_complete_frame_aac( channel_layout: ffmpeg_next::channel_layout::ChannelLayout, pts: i64, ) -> Result<(), String> { + if num_frames == 0 { + return Ok(()); + } + let channels = planar_samples.len(); - // Create audio frame with exact size + // Create audio frame let mut frame = ffmpeg_next::frame::Audio::new( ffmpeg_next::format::Sample::F32(ffmpeg_next::format::sample::Type::Planar), num_frames, @@ -733,33 +746,23 @@ fn encode_complete_frame_aac( frame.set_rate(sample_rate); frame.set_pts(Some(pts)); - // Copy all planar samples to frame - for ch in 0..channels { - let plane = frame.data_mut(ch); - let src = &planar_samples[ch]; - - // Verify buffer size - let byte_size = num_frames * std::mem::size_of::(); - if plane.len() < byte_size { - return Err(format!( - "FFmpeg frame buffer too small: {} bytes, need {} bytes", - plane.len(), byte_size - )); - } - - // Safe byte-level copy - for (i, &sample) in src.iter().enumerate() { - let bytes = sample.to_ne_bytes(); - let offset = i * 4; - plane[offset..offset + 4].copy_from_slice(&bytes); - } + // Verify frame was allocated + if frame.planes() == 0 { + return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string()); + } + + // Copy all planar samples to frame + // Use plane_mut:: instead of data_mut โ€” data_mut(ch) is buggy for planar audio: + // FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0. + // plane_mut uses self.samples() for the length, which is correct for all planes. + for ch in 0..channels { + let plane = frame.plane_mut::(ch); + plane.copy_from_slice(&planar_samples[ch]); } - // Send frame to encoder encoder.send_frame(&frame) .map_err(|e| format!("Failed to send frame: {}", e))?; - // Receive and write packets receive_and_write_packets(encoder, output)?; Ok(()) diff --git a/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs b/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs index 9a8753d..5cca3e6 100644 --- a/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs +++ b/lightningbeam-ui/lightningbeam-core/src/actions/set_layer_properties.rs @@ -124,6 +124,54 @@ impl Action for SetLayerPropertiesAction { Ok(()) } + fn execute_backend( + &mut self, + backend: &mut crate::action::BackendContext, + _document: &crate::document::Document, + ) -> Result<(), String> { + let controller = match backend.audio_controller.as_mut() { + Some(c) => c, + None => return Ok(()), + }; + + for &layer_id in &self.layer_ids { + if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) { + match &self.property { + LayerProperty::Volume(v) => controller.set_track_volume(track_id, *v as f32), + LayerProperty::Muted(m) => controller.set_track_mute(track_id, *m), + LayerProperty::Soloed(s) => controller.set_track_solo(track_id, *s), + _ => {} // Locked/Opacity/Visible are UI-only + } + } + } + Ok(()) + } + + fn rollback_backend( + &mut self, + backend: &mut crate::action::BackendContext, + _document: &crate::document::Document, + ) -> Result<(), String> { + let controller = match backend.audio_controller.as_mut() { + Some(c) => c, + None => return Ok(()), + }; + + for (i, &layer_id) in self.layer_ids.iter().enumerate() { + if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) { + if let Some(old_value) = &self.old_values[i] { + match old_value { + OldValue::Volume(v) => controller.set_track_volume(track_id, *v as f32), + OldValue::Muted(m) => controller.set_track_mute(track_id, *m), + OldValue::Soloed(s) => controller.set_track_solo(track_id, *s), + _ => {} // Locked/Opacity/Visible are UI-only + } + } + } + } + Ok(()) + } + fn description(&self) -> String { let property_name = match &self.property { LayerProperty::Volume(_) => "volume", diff --git a/lightningbeam-ui/lightningbeam-editor/src/export/audio_exporter.rs b/lightningbeam-ui/lightningbeam-editor/src/export/audio_exporter.rs index 6e917c2..4d2e6e4 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/export/audio_exporter.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/export/audio_exporter.rs @@ -196,26 +196,13 @@ fn export_audio_ffmpeg_mp3>( frame.set_rate(settings.sample_rate); // Copy planar samples to frame + // Use plane_mut:: instead of data_mut โ€” data_mut(ch) is buggy for planar audio: + // FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0. + // plane_mut uses self.samples() for the length, which is correct for all planes. for ch in 0..settings.channels as usize { - let plane = frame.data_mut(ch); + let plane = frame.plane_mut::(ch); let offset = samples_encoded; - let src = &planar_samples[ch][offset..offset + chunk_size]; - - // Convert i16 samples to bytes and copy - let byte_size = chunk_size * std::mem::size_of::(); - if plane.len() < byte_size { - return Err(format!( - "FFmpeg frame buffer too small: {} bytes, need {} bytes", - plane.len(), byte_size - )); - } - - // Safe byte-level copy using slice operations - for (i, &sample) in src.iter().enumerate() { - let bytes = sample.to_ne_bytes(); - let offset = i * 2; - plane[offset..offset + 2].copy_from_slice(&bytes); - } + plane.copy_from_slice(&planar_samples[ch][offset..offset + chunk_size]); } // Send frame to encoder diff --git a/lightningbeam-ui/lightningbeam-editor/src/export/dialog.rs b/lightningbeam-ui/lightningbeam-editor/src/export/dialog.rs index 5317be4..0449d65 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/export/dialog.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/export/dialog.rs @@ -88,21 +88,19 @@ impl ExportDialog { let mut should_export = false; let mut should_close = false; - let mut open = self.open; let window_title = match self.export_type { ExportType::Audio => "Export Audio", ExportType::Video => "Export Video", }; - egui::Window::new(window_title) - .open(&mut open) - .resizable(false) - .collapsible(false) - .anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO) + let modal_response = egui::Modal::new(egui::Id::new("export_dialog_modal")) .show(ctx, |ui| { ui.set_width(500.0); + ui.heading(window_title); + ui.add_space(8.0); + // Error message (if any) if let Some(error) = &self.error_message { ui.colored_label(egui::Color32::RED, error); @@ -151,8 +149,10 @@ impl ExportDialog { }); }); - // Update open state (in case user clicked X button) - self.open = open; + // Close on backdrop click or escape + if modal_response.backdrop_response.clicked() { + should_close = true; + } if should_close { self.close(); @@ -529,14 +529,13 @@ impl ExportProgressDialog { let mut should_cancel = false; - egui::Window::new("Exporting...") - .open(&mut self.open) - .resizable(false) - .collapsible(false) - .anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO) + egui::Modal::new(egui::Id::new("export_progress_modal")) .show(ctx, |ui| { ui.set_width(400.0); + ui.heading("Exporting..."); + ui.add_space(8.0); + // Status message ui.label(&self.message); ui.add_space(8.0); diff --git a/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs index 6364f76..273ed64 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/export/mod.rs @@ -479,9 +479,11 @@ impl ExportOrchestrator { ) { println!("๐Ÿงต [EXPORT THREAD] run_audio_export started"); - // Send start notification + // Send start notification with calculated total frames + let duration = settings.end_time - settings.start_time; + let total_frames = (duration * settings.sample_rate as f64).round() as usize; progress_tx - .send(ExportProgress::Started { total_frames: 0 }) + .send(ExportProgress::Started { total_frames }) .ok(); println!("๐Ÿงต [EXPORT THREAD] Sent Started progress"); diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index de2e6e5..c861788 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -2751,6 +2751,41 @@ impl EditorApp { } eprintln!("๐Ÿ“Š [APPLY] Step 7: Fetched {} raw audio samples in {:.2}ms", raw_fetched, step7_start.elapsed().as_secs_f64() * 1000.0); + // Rebuild MIDI event cache for all MIDI clips (needed for timeline/piano roll rendering) + let step8_start = std::time::Instant::now(); + self.midi_event_cache.clear(); + let midi_clip_ids: Vec = self.action_executor.document() + .audio_clips.values() + .filter_map(|clip| clip.midi_clip_id()) + .collect(); + + let mut midi_fetched = 0; + if let Some(ref controller_arc) = self.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + for clip_id in midi_clip_ids { + // track_id is unused by the query, pass 0 + match controller.query_midi_clip(0, clip_id) { + Ok(clip_data) => { + let processed_events: Vec<(f64, u8, u8, bool)> = clip_data.events.iter() + .filter_map(|event| { + let status_type = event.status & 0xF0; + if status_type == 0x90 || status_type == 0x80 { + let is_note_on = status_type == 0x90 && event.data2 > 0; + Some((event.timestamp, event.data1, event.data2, is_note_on)) + } else { + None + } + }) + .collect(); + self.midi_event_cache.insert(clip_id, processed_events); + midi_fetched += 1; + } + Err(e) => eprintln!("Failed to fetch MIDI clip {}: {}", clip_id, e), + } + } + } + eprintln!("๐Ÿ“Š [APPLY] Step 8: Rebuilt MIDI event cache for {} clips in {:.2}ms", midi_fetched, step8_start.elapsed().as_secs_f64() * 1000.0); + // Reset playback state self.playback_time = 0.0; self.is_playing = false; diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/virtual_piano.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/virtual_piano.rs index 789416b..feee43a 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/virtual_piano.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/virtual_piano.rs @@ -148,6 +148,70 @@ impl VirtualPianoPane { (start_note as u8, end_note as u8, white_key_width, 0.0) } + /// Render keys visually without any input handling (used when a modal is active) + fn render_keyboard_visual_only( + &self, + ui: &mut egui::Ui, + rect: egui::Rect, + visible_start: u8, + visible_end: u8, + white_key_width: f32, + offset_x: f32, + white_key_height: f32, + black_key_width: f32, + black_key_height: f32, + ) { + // Draw white keys + let mut white_pos = 0f32; + for note in visible_start..=visible_end { + if !Self::is_white_key(note) { + continue; + } + let x = rect.min.x + offset_x + (white_pos * white_key_width); + let key_rect = egui::Rect::from_min_size( + egui::pos2(x, rect.min.y), + egui::vec2(white_key_width - 1.0, white_key_height), + ); + let color = if self.pressed_notes.contains(¬e) { + egui::Color32::from_rgb(100, 150, 255) + } else { + egui::Color32::WHITE + }; + ui.painter().rect_filled(key_rect, 2.0, color); + ui.painter().rect_stroke( + key_rect, + 2.0, + egui::Stroke::new(1.0, egui::Color32::BLACK), + egui::StrokeKind::Middle, + ); + white_pos += 1.0; + } + + // Draw black keys + for note in visible_start..=visible_end { + if !Self::is_black_key(note) { + continue; + } + let mut white_keys_before = 0; + for n in visible_start..note { + if Self::is_white_key(n) { + white_keys_before += 1; + } + } + let x = rect.min.x + offset_x + (white_keys_before as f32 * white_key_width) - (black_key_width / 2.0); + let key_rect = egui::Rect::from_min_size( + egui::pos2(x, rect.min.y), + egui::vec2(black_key_width, black_key_height), + ); + let color = if self.pressed_notes.contains(¬e) { + egui::Color32::from_rgb(50, 100, 200) + } else { + egui::Color32::BLACK + }; + ui.painter().rect_filled(key_rect, 2.0, color); + } + } + /// Render the piano keyboard fn render_keyboard(&mut self, ui: &mut egui::Ui, rect: egui::Rect, shared: &mut SharedPaneState) { // Calculate visible range and key dimensions based on pane size @@ -158,6 +222,20 @@ impl VirtualPianoPane { let black_key_width = white_key_width * self.black_key_width_ratio; let black_key_height = white_key_height * self.black_key_height_ratio; + // If a modal dialog is open, don't process mouse input โ€” just render keys visually. + // We read raw input (ui.input) which bypasses egui's modal blocking, so we must check manually. + let modal_active = ui.ctx().memory(|m| m.top_modal_layer().is_some()); + if modal_active { + // Release any held notes so they don't get stuck + if self.dragging_note.is_some() { + if let Some(note) = self.dragging_note.take() { + self.send_note_off(note, shared); + } + } + self.render_keyboard_visual_only(ui, rect, visible_start, visible_end, white_key_width, offset_x, white_key_height, black_key_width, black_key_height); + return; + } + // Count white keys before each note for positioning let mut white_key_positions: std::collections::HashMap = std::collections::HashMap::new(); let mut white_count = 0;