diff --git a/daw-backend/src/audio/pool.rs b/daw-backend/src/audio/pool.rs index d8db76b..73b7225 100644 --- a/daw-backend/src/audio/pool.rs +++ b/daw-backend/src/audio/pool.rs @@ -136,8 +136,8 @@ impl AudioFile { let peak_start = start_frame + peak_idx * frames_per_peak; let peak_end = (start_frame + (peak_idx + 1) * frames_per_peak).min(end_frame); - let mut min = 0.0f32; - let mut max = 0.0f32; + let mut min = f32::MAX; + let mut max = f32::MIN; // Scan all samples in this window for frame_idx in peak_start..peak_end { @@ -152,6 +152,14 @@ impl AudioFile { } } + // If no samples were found, clamp to safe defaults + if min == f32::MAX { + min = 0.0; + } + if max == f32::MIN { + max = 0.0; + } + peaks.push(crate::io::WaveformPeak { min, max }); } @@ -549,13 +557,18 @@ impl AudioClipPool { entries: Vec, project_path: &Path, ) -> Result, String> { + let fn_start = std::time::Instant::now(); + eprintln!("šŸ“Š [LOAD_SERIALIZED] Starting load_from_serialized with {} entries...", entries.len()); + let project_dir = project_path.parent() .ok_or_else(|| "Project path has no parent directory".to_string())?; let mut missing_indices = Vec::new(); // Clear existing pool + let clear_start = std::time::Instant::now(); self.files.clear(); + eprintln!("šŸ“Š [LOAD_SERIALIZED] Clear pool took {:.2}ms", clear_start.elapsed().as_secs_f64() * 1000.0); // Find the maximum pool index to determine required size let max_index = entries.iter() @@ -564,12 +577,18 @@ impl AudioClipPool { .unwrap_or(0); // Ensure we have space for all entries + let resize_start = std::time::Instant::now(); self.files.resize(max_index + 1, AudioFile::new(PathBuf::new(), Vec::new(), 2, 44100)); + eprintln!("šŸ“Š [LOAD_SERIALIZED] Resize pool to {} took {:.2}ms", max_index + 1, resize_start.elapsed().as_secs_f64() * 1000.0); - for entry in entries { - let success = if let Some(embedded) = entry.embedded_data { + for (i, entry) in entries.iter().enumerate() { + let entry_start = std::time::Instant::now(); + eprintln!("šŸ“Š [LOAD_SERIALIZED] Processing entry {}/{}: '{}'", i + 1, entries.len(), entry.name); + + let success = if let Some(ref embedded) = entry.embedded_data { // Load from embedded data - match Self::load_from_embedded_into_pool(self, entry.pool_index, embedded, &entry.name) { + eprintln!("šŸ“Š [LOAD_SERIALIZED] Entry has embedded data (format: {})", embedded.format); + match Self::load_from_embedded_into_pool(self, entry.pool_index, embedded.clone(), &entry.name) { Ok(_) => { eprintln!("[AudioPool] Successfully loaded embedded audio: {}", entry.name); true @@ -579,8 +598,9 @@ impl AudioClipPool { false } } - } else if let Some(rel_path) = entry.relative_path { + } else if let Some(ref rel_path) = entry.relative_path { // Load from file path + eprintln!("šŸ“Š [LOAD_SERIALIZED] Entry has file path: {:?}", rel_path); let full_path = project_dir.join(&rel_path); if full_path.exists() { @@ -597,8 +617,12 @@ impl AudioClipPool { if !success { missing_indices.push(entry.pool_index); } + + eprintln!("šŸ“Š [LOAD_SERIALIZED] Entry {} took {:.2}ms (success: {})", i + 1, entry_start.elapsed().as_secs_f64() * 1000.0, success); } + eprintln!("šŸ“Š [LOAD_SERIALIZED] āœ… Total load_from_serialized time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0); + Ok(missing_indices) } @@ -611,20 +635,29 @@ impl AudioClipPool { ) -> Result<(), String> { use base64::{Engine as _, engine::general_purpose}; + let fn_start = std::time::Instant::now(); + eprintln!("šŸ“Š [POOL] Loading embedded audio '{}'...", name); + // Decode base64 + let step1_start = std::time::Instant::now(); let data = general_purpose::STANDARD .decode(&embedded.data_base64) .map_err(|e| format!("Failed to decode base64: {}", e))?; + eprintln!("šŸ“Š [POOL] Step 1: Decode base64 ({} bytes) took {:.2}ms", data.len(), step1_start.elapsed().as_secs_f64() * 1000.0); // Write to temporary file for symphonia to decode + let step2_start = std::time::Instant::now(); let temp_dir = std::env::temp_dir(); let temp_path = temp_dir.join(format!("lightningbeam_embedded_{}.{}", pool_index, embedded.format)); std::fs::write(&temp_path, &data) .map_err(|e| format!("Failed to write temporary file: {}", e))?; + eprintln!("šŸ“Š [POOL] Step 2: Write temp file took {:.2}ms", step2_start.elapsed().as_secs_f64() * 1000.0); // Load the temporary file using existing infrastructure + let step3_start = std::time::Instant::now(); let result = Self::load_file_into_pool(self, pool_index, &temp_path); + eprintln!("šŸ“Š [POOL] Step 3: Decode audio with Symphonia took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0); // Clean up temporary file let _ = std::fs::remove_file(&temp_path); @@ -634,6 +667,8 @@ impl AudioClipPool { self.files[pool_index].path = PathBuf::from(format!("", name)); } + eprintln!("šŸ“Š [POOL] āœ… Total load_from_embedded time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0); + result } diff --git a/lightningbeam-ui/lightningbeam-core/src/file_io.rs b/lightningbeam-ui/lightningbeam-core/src/file_io.rs index a1644e6..3e188a1 100644 --- a/lightningbeam-ui/lightningbeam-core/src/file_io.rs +++ b/lightningbeam-ui/lightningbeam-core/src/file_io.rs @@ -139,104 +139,200 @@ pub fn save_beam( audio_pool_entries: Vec, _settings: &SaveSettings, ) -> Result<(), String> { - // 1. Create backup if file exists - if path.exists() { + let fn_start = std::time::Instant::now(); + eprintln!("šŸ“Š [SAVE_BEAM] Starting save_beam()..."); + + // 1. Create backup if file exists and open it for reading old audio files + let step1_start = std::time::Instant::now(); + let mut old_zip = if path.exists() { let backup_path = path.with_extension("beam.backup"); std::fs::copy(path, &backup_path) .map_err(|e| format!("Failed to create backup: {}", e))?; - } + + // Open the backup as a ZIP archive for reading + match File::open(&backup_path) { + Ok(file) => match ZipArchive::new(file) { + Ok(archive) => { + eprintln!("šŸ“Š [SAVE_BEAM] Step 1: Create backup and open for reading took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); + Some(archive) + } + Err(e) => { + eprintln!("āš ļø [SAVE_BEAM] Failed to open backup as ZIP: {}, will not copy old audio files", e); + eprintln!("šŸ“Š [SAVE_BEAM] Step 1: Create backup took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); + None + } + }, + Err(e) => { + eprintln!("āš ļø [SAVE_BEAM] Failed to open backup: {}, will not copy old audio files", e); + eprintln!("šŸ“Š [SAVE_BEAM] Step 1: Create backup took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); + None + } + } + } else { + eprintln!("šŸ“Š [SAVE_BEAM] Step 1: No backup needed (new file)"); + None + }; // 2. Prepare audio project for serialization (save AudioGraph presets) + let step2_start = std::time::Instant::now(); audio_project.prepare_for_save(); + eprintln!("šŸ“Š [SAVE_BEAM] Step 2: Prepare audio project took {:.2}ms", step2_start.elapsed().as_secs_f64() * 1000.0); // 3. Create ZIP writer + let step3_start = std::time::Instant::now(); let file = File::create(path) .map_err(|e| format!("Failed to create file: {}", e))?; let mut zip = ZipWriter::new(file); + eprintln!("šŸ“Š [SAVE_BEAM] Step 3: Create ZIP writer took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0); // 4. Process audio pool entries and write embedded audio files to ZIP - // Smart compression: lossy formats (mp3, ogg) stored as-is, lossless data as FLAC + // Priority: old ZIP file > external file > encode PCM as FLAC + let step4_start = std::time::Instant::now(); let mut modified_entries = Vec::new(); + let mut flac_encode_time = 0.0; + let mut zip_write_time = 0.0; + let project_dir = path.parent().unwrap_or_else(|| Path::new(".")); + for entry in &audio_pool_entries { let mut modified_entry = entry.clone(); - if let Some(ref embedded_data) = entry.embedded_data { - // Decode base64 audio data - let audio_bytes = base64::decode(&embedded_data.data_base64) - .map_err(|e| format!("Failed to decode base64 audio data for pool index {}: {}", entry.pool_index, e))?; + // Try to get audio data from various sources (in priority order) + let audio_source: Option<(Vec, String)> = if let Some(ref rel_path) = entry.relative_path { + // Priority 1: Check if file is in the old ZIP + if rel_path.starts_with("media/audio/") { + if let Some(ref mut old_zip_archive) = old_zip { + match old_zip_archive.by_name(rel_path) { + Ok(mut file) => { + let mut bytes = Vec::new(); + if file.read_to_end(&mut bytes).is_ok() { + let extension = rel_path.split('.').last().unwrap_or("bin").to_string(); + eprintln!("šŸ“Š [SAVE_BEAM] Copying from old ZIP: {}", rel_path); + Some((bytes, extension)) + } else { + eprintln!("āš ļø [SAVE_BEAM] Failed to read {} from old ZIP", rel_path); + None + } + } + Err(_) => { + eprintln!("āš ļø [SAVE_BEAM] File {} not found in old ZIP", rel_path); + None + } + } + } else { + None + } + } + // Priority 2: Check external filesystem + else { + let full_path = project_dir.join(rel_path); + if full_path.exists() { + match std::fs::read(&full_path) { + Ok(bytes) => { + let extension = full_path.extension() + .and_then(|e| e.to_str()) + .unwrap_or("bin") + .to_string(); + eprintln!("šŸ“Š [SAVE_BEAM] Using external file: {:?}", full_path); + Some((bytes, extension)) + } + Err(e) => { + eprintln!("āš ļø [SAVE_BEAM] Failed to read {:?}: {}", full_path, e); + None + } + } + } else { + eprintln!("āš ļø [SAVE_BEAM] External file not found: {:?}", full_path); + None + } + } + } else { + None + }; - let format_lower = embedded_data.format.to_lowercase(); - let is_lossy = format_lower == "mp3" || format_lower == "ogg" - || format_lower == "aac" || format_lower == "m4a" - || format_lower == "opus"; + if let Some((audio_bytes, extension)) = audio_source { + // We have the original file - copy it directly + let zip_filename = format!("media/audio/{}.{}", entry.pool_index, extension); - let zip_filename = if is_lossy { - // Store lossy formats directly (no transcoding) - format!("media/audio/{}.{}", entry.pool_index, embedded_data.format) - } else { - // Store lossless data as FLAC - format!("media/audio/{}.flac", entry.pool_index) - }; - - // Write to ZIP (uncompressed - audio is already compressed) let file_options = FileOptions::default() .compression_method(CompressionMethod::Stored); zip.start_file(&zip_filename, file_options) .map_err(|e| format!("Failed to create {} in ZIP: {}", zip_filename, e))?; - if is_lossy { - // Write lossy file directly - zip.write_all(&audio_bytes) - .map_err(|e| format!("Failed to write {}: {}", zip_filename, e))?; - } else { - // Decode PCM samples and encode to FLAC - // The audio_bytes are raw PCM samples (interleaved f32 little-endian) - let samples: Vec = audio_bytes - .chunks_exact(4) - .map(|chunk| f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) - .collect(); + let write_start = std::time::Instant::now(); + zip.write_all(&audio_bytes) + .map_err(|e| format!("Failed to write {}: {}", zip_filename, e))?; + zip_write_time += write_start.elapsed().as_secs_f64() * 1000.0; - // Convert f32 samples to i32 for FLAC encoding (FLAC doesn't support f32) - // FLAC supports up to 24-bit samples: range [-8388608, 8388607] - let samples_i32: Vec = samples - .iter() - .map(|&s| { - // Clamp to [-1.0, 1.0] first, then scale to 24-bit range - let clamped = s.clamp(-1.0, 1.0); - (clamped * 8388607.0) as i32 - }) - .collect(); + // Update entry to point to ZIP file + modified_entry.embedded_data = None; + modified_entry.relative_path = Some(zip_filename); - // Configure FLAC encoder - let config = flacenc::config::Encoder::default() - .into_verified() - .map_err(|(_, e)| format!("FLAC encoder config error: {:?}", e))?; + } else if let Some(ref embedded_data) = entry.embedded_data { + // Priority 3: No original file - encode PCM as FLAC + eprintln!("šŸ“Š [SAVE_BEAM] Encoding PCM to FLAC for pool {} (no original file)", entry.pool_index); + // Embedded data is always PCM - encode as FLAC + let audio_bytes = base64::decode(&embedded_data.data_base64) + .map_err(|e| format!("Failed to decode base64 audio data for pool index {}: {}", entry.pool_index, e))?; - let source = flacenc::source::MemSource::from_samples( - &samples_i32, - entry.channels as usize, - 24, // bits per sample (FLAC max is 24-bit) - entry.sample_rate as usize, - ); + let zip_filename = format!("media/audio/{}.flac", entry.pool_index); - // Encode to FLAC - let flac_stream = flacenc::encode_with_fixed_block_size( - &config, - source, - config.block_size, - ).map_err(|e| format!("FLAC encoding failed: {:?}", e))?; + let file_options = FileOptions::default() + .compression_method(CompressionMethod::Stored); - // Convert stream to bytes - use flacenc::component::BitRepr; - let mut sink = flacenc::bitsink::ByteSink::new(); - flac_stream.write(&mut sink) - .map_err(|e| format!("Failed to write FLAC stream: {:?}", e))?; - let flac_bytes = sink.as_slice(); + zip.start_file(&zip_filename, file_options) + .map_err(|e| format!("Failed to create {} in ZIP: {}", zip_filename, e))?; - zip.write_all(flac_bytes) - .map_err(|e| format!("Failed to write {}: {}", zip_filename, e))?; - } + // Encode PCM samples to FLAC + let flac_start = std::time::Instant::now(); + + // The audio_bytes are raw PCM samples (interleaved f32 little-endian) + let samples: Vec = audio_bytes + .chunks_exact(4) + .map(|chunk| f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])) + .collect(); + + // Convert f32 samples to i32 for FLAC encoding + let samples_i32: Vec = samples + .iter() + .map(|&s| { + let clamped = s.clamp(-1.0, 1.0); + (clamped * 8388607.0) as i32 + }) + .collect(); + + // Configure FLAC encoder + let config = flacenc::config::Encoder::default() + .into_verified() + .map_err(|(_, e)| format!("FLAC encoder config error: {:?}", e))?; + + let source = flacenc::source::MemSource::from_samples( + &samples_i32, + entry.channels as usize, + 24, + entry.sample_rate as usize, + ); + + // Encode to FLAC + let flac_stream = flacenc::encode_with_fixed_block_size( + &config, + source, + config.block_size, + ).map_err(|e| format!("FLAC encoding failed: {:?}", e))?; + + // Convert stream to bytes + use flacenc::component::BitRepr; + let mut sink = flacenc::bitsink::ByteSink::new(); + flac_stream.write(&mut sink) + .map_err(|e| format!("Failed to write FLAC stream: {:?}", e))?; + let flac_bytes = sink.as_slice(); + + flac_encode_time += flac_start.elapsed().as_secs_f64() * 1000.0; + + let write_start = std::time::Instant::now(); + zip.write_all(flac_bytes) + .map_err(|e| format!("Failed to write {}: {}", zip_filename, e))?; + zip_write_time += write_start.elapsed().as_secs_f64() * 1000.0; // Update entry to point to ZIP file instead of embedding data modified_entry.embedded_data = None; @@ -245,8 +341,17 @@ pub fn save_beam( modified_entries.push(modified_entry); } + eprintln!("šŸ“Š [SAVE_BEAM] Step 4: Process audio pool ({} entries) took {:.2}ms", + audio_pool_entries.len(), step4_start.elapsed().as_secs_f64() * 1000.0); + if flac_encode_time > 0.0 { + eprintln!("šŸ“Š [SAVE_BEAM] - FLAC encoding: {:.2}ms", flac_encode_time); + } + if zip_write_time > 0.0 { + eprintln!("šŸ“Š [SAVE_BEAM] - ZIP writing: {:.2}ms", zip_write_time); + } // 5. Build BeamProject structure with modified entries + let step5_start = std::time::Instant::now(); let now = chrono::Utc::now().to_rfc3339(); let beam_project = BeamProject { version: BEAM_VERSION.to_string(), @@ -259,8 +364,10 @@ pub fn save_beam( audio_pool_entries: modified_entries, }, }; + eprintln!("šŸ“Š [SAVE_BEAM] Step 5: Build BeamProject structure took {:.2}ms", step5_start.elapsed().as_secs_f64() * 1000.0); // 6. Write project.json (compressed with DEFLATE) + let step6_start = std::time::Instant::now(); let json_options = FileOptions::default() .compression_method(CompressionMethod::Deflated) .compression_level(Some(6)); @@ -273,10 +380,15 @@ pub fn save_beam( zip.write_all(json.as_bytes()) .map_err(|e| format!("Failed to write project.json: {}", e))?; + eprintln!("šŸ“Š [SAVE_BEAM] Step 6: Write project.json ({} bytes) took {:.2}ms", json.len(), step6_start.elapsed().as_secs_f64() * 1000.0); // 7. Finalize ZIP + let step7_start = std::time::Instant::now(); zip.finish() .map_err(|e| format!("Failed to finalize ZIP: {}", e))?; + eprintln!("šŸ“Š [SAVE_BEAM] Step 7: Finalize ZIP took {:.2}ms", step7_start.elapsed().as_secs_f64() * 1000.0); + + eprintln!("šŸ“Š [SAVE_BEAM] āœ… Total save_beam() time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0); Ok(()) } @@ -296,23 +408,32 @@ pub fn save_beam( /// # Returns /// LoadedProject on success (with missing_files list), or error message pub fn load_beam(path: &Path) -> Result { + let fn_start = std::time::Instant::now(); + eprintln!("šŸ“Š [LOAD_BEAM] Starting load_beam()..."); + // 1. Open ZIP archive + let step1_start = std::time::Instant::now(); let file = File::open(path) .map_err(|e| format!("Failed to open file: {}", e))?; let mut zip = ZipArchive::new(file) .map_err(|e| format!("Failed to open ZIP archive: {}", e))?; + eprintln!("šŸ“Š [LOAD_BEAM] Step 1: Open ZIP archive took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); // 2. Read project.json + let step2_start = std::time::Instant::now(); let mut project_file = zip.by_name("project.json") .map_err(|e| format!("Failed to find project.json in archive: {}", e))?; let mut json_data = String::new(); project_file.read_to_string(&mut json_data) .map_err(|e| format!("Failed to read project.json: {}", e))?; + eprintln!("šŸ“Š [LOAD_BEAM] Step 2: Read project.json ({} bytes) took {:.2}ms", json_data.len(), step2_start.elapsed().as_secs_f64() * 1000.0); // 3. Deserialize BeamProject + let step3_start = std::time::Instant::now(); let beam_project: BeamProject = serde_json::from_str(&json_data) .map_err(|e| format!("Failed to deserialize project.json: {}", e))?; + eprintln!("šŸ“Š [LOAD_BEAM] Step 3: Deserialize BeamProject took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0); // 4. Check version compatibility if beam_project.version != BEAM_VERSION { @@ -323,17 +444,23 @@ pub fn load_beam(path: &Path) -> Result { } // 5. Extract document and audio backend state + let step5_start = std::time::Instant::now(); let document = beam_project.ui_state; let mut audio_project = beam_project.audio_backend.project; let audio_pool_entries = beam_project.audio_backend.audio_pool_entries; + eprintln!("šŸ“Š [LOAD_BEAM] Step 5: Extract document and audio state took {:.2}ms", step5_start.elapsed().as_secs_f64() * 1000.0); // 6. Rebuild AudioGraphs from presets + let step6_start = std::time::Instant::now(); audio_project.rebuild_audio_graphs(DEFAULT_BUFFER_SIZE) .map_err(|e| format!("Failed to rebuild audio graphs: {}", e))?; + eprintln!("šŸ“Š [LOAD_BEAM] Step 6: Rebuild AudioGraphs took {:.2}ms", step6_start.elapsed().as_secs_f64() * 1000.0); // 7. Extract embedded audio files from ZIP and restore to entries + let step7_start = std::time::Instant::now(); drop(project_file); // Close project.json file handle let mut restored_entries = Vec::new(); + let mut flac_decode_time = 0.0; for entry in &audio_pool_entries { let mut restored_entry = entry.clone(); @@ -357,6 +484,8 @@ pub fn load_beam(path: &Path) -> Result { // For lossy formats, store the original bytes let embedded_data = if format == "flac" { // Decode FLAC to PCM f32 samples + let flac_decode_start = std::time::Instant::now(); + let cursor = std::io::Cursor::new(&audio_bytes); let mut reader = claxon::FlacReader::new(cursor) .map_err(|e| format!("Failed to create FLAC reader: {:?}", e))?; @@ -379,6 +508,8 @@ pub fn load_beam(path: &Path) -> Result { pcm_bytes.extend_from_slice(&sample.to_le_bytes()); } + flac_decode_time += flac_decode_start.elapsed().as_secs_f64() * 1000.0; + Some(daw_backend::audio::pool::EmbeddedAudioData { data_base64: base64::encode(&pcm_bytes), format: "wav".to_string(), // Mark as WAV since it's now PCM @@ -403,10 +534,16 @@ pub fn load_beam(path: &Path) -> Result { restored_entries.push(restored_entry); } + eprintln!("šŸ“Š [LOAD_BEAM] Step 7: Extract embedded audio ({} entries) took {:.2}ms", + audio_pool_entries.len(), step7_start.elapsed().as_secs_f64() * 1000.0); + if flac_decode_time > 0.0 { + eprintln!("šŸ“Š [LOAD_BEAM] - FLAC decoding: {:.2}ms", flac_decode_time); + } // 8. Check for missing external files // An entry is missing if it has a relative_path (external reference) // but no embedded_data and the file doesn't exist + let step8_start = std::time::Instant::now(); let project_dir = path.parent().unwrap_or_else(|| Path::new(".")); let missing_files: Vec = restored_entries .iter() @@ -428,6 +565,9 @@ pub fn load_beam(path: &Path) -> Result { None }) .collect(); + eprintln!("šŸ“Š [LOAD_BEAM] Step 8: Check missing files took {:.2}ms", step8_start.elapsed().as_secs_f64() * 1000.0); + + eprintln!("šŸ“Š [LOAD_BEAM] āœ… Total load_beam() time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0); Ok(LoadedProject { document, diff --git a/lightningbeam-ui/lightningbeam-editor/src/main.rs b/lightningbeam-ui/lightningbeam-editor/src/main.rs index a6049a2..a4b76c9 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/main.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/main.rs @@ -330,8 +330,12 @@ impl FileOperationsWorker { ) { use lightningbeam_core::file_io::{save_beam, SaveSettings}; + let save_start = std::time::Instant::now(); + eprintln!("šŸ“Š [SAVE] Starting save operation..."); + // Step 1: Serialize audio pool let _ = progress_tx.send(FileProgress::SerializingAudioPool); + let step1_start = std::time::Instant::now(); let audio_pool_entries = { let mut controller = self.audio_controller.lock().unwrap(); @@ -343,8 +347,10 @@ impl FileOperationsWorker { } } }; + eprintln!("šŸ“Š [SAVE] Step 1: Serialize audio pool took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); // Step 2: Get project + let step2_start = std::time::Instant::now(); let mut audio_project = { let mut controller = self.audio_controller.lock().unwrap(); match controller.get_project() { @@ -355,13 +361,17 @@ impl FileOperationsWorker { } } }; + eprintln!("šŸ“Š [SAVE] Step 2: Get audio project took {:.2}ms", step2_start.elapsed().as_secs_f64() * 1000.0); // Step 3: Save to file let _ = progress_tx.send(FileProgress::WritingZip); + let step3_start = std::time::Instant::now(); let settings = SaveSettings::default(); match save_beam(&path, &document, &mut audio_project, audio_pool_entries, &settings) { Ok(()) => { + eprintln!("šŸ“Š [SAVE] Step 3: save_beam() took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0); + eprintln!("šŸ“Š [SAVE] āœ… Total save time: {:.2}ms", save_start.elapsed().as_secs_f64() * 1000.0); println!("āœ… Saved to: {}", path.display()); let _ = progress_tx.send(FileProgress::Done); } @@ -379,8 +389,12 @@ impl FileOperationsWorker { ) { use lightningbeam_core::file_io::load_beam; + let load_start = std::time::Instant::now(); + eprintln!("šŸ“Š [LOAD] Starting load operation..."); + // Step 1: Load from file let _ = progress_tx.send(FileProgress::LoadingProject); + let step1_start = std::time::Instant::now(); let loaded_project = match load_beam(&path) { Ok(p) => p, @@ -389,6 +403,7 @@ impl FileOperationsWorker { return; } }; + eprintln!("šŸ“Š [LOAD] Step 1: load_beam() took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); // Check for missing files if !loaded_project.missing_files.is_empty() { @@ -398,6 +413,8 @@ impl FileOperationsWorker { } } + eprintln!("šŸ“Š [LOAD] āœ… Total load time: {:.2}ms", load_start.elapsed().as_secs_f64() * 1000.0); + // Send the loaded project back to UI thread for processing let _ = progress_tx.send(FileProgress::Complete(Ok(loaded_project))); } @@ -455,6 +472,10 @@ struct EditorApp { /// Prevents repeated backend queries for the same MIDI clip /// Format: (timestamp, note_number, is_note_on) midi_event_cache: HashMap>, + /// Cache for audio waveform data (keyed by audio_pool_index) + /// Prevents repeated backend queries for the same audio file + /// Format: Vec of WaveformPeak (min/max pairs) + waveform_cache: HashMap>, /// Current file path (None if not yet saved) current_file_path: Option, @@ -578,6 +599,7 @@ impl EditorApp { paint_bucket_gap_tolerance: 5.0, // Default gap tolerance polygon_sides: 5, // Default to pentagon midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache + waveform_cache: HashMap::new(), // Initialize empty waveform cache current_file_path: None, // No file loaded initially file_command_tx, file_operation: None, // No file operation in progress initially @@ -590,48 +612,71 @@ impl EditorApp { /// - After loading a document from file /// - After creating a new document with pre-existing MIDI layers /// - /// For each MIDI audio layer: - /// 1. Creates a daw-backend MIDI track - /// 2. Loads the default instrument + /// For each audio layer (MIDI or Sampled): + /// 1. Creates a daw-backend track (MIDI or Audio) + /// 2. For MIDI: Loads the default instrument /// 3. Stores the bidirectional mapping /// 4. Syncs any existing clips on the layer - fn sync_midi_layers_to_backend(&mut self) { + fn sync_audio_layers_to_backend(&mut self) { use lightningbeam_core::layer::{AnyLayer, AudioLayerType}; // Iterate through all layers in the document for layer in &self.action_executor.document().root.children { - // Only process Audio layers with MIDI type + // Only process Audio layers if let AnyLayer::Audio(audio_layer) = layer { - if audio_layer.audio_layer_type == AudioLayerType::Midi { - let layer_id = audio_layer.layer.id; - let layer_name = &audio_layer.layer.name; + let layer_id = audio_layer.layer.id; + let layer_name = &audio_layer.layer.name; - // Skip if already mapped (shouldn't happen, but be defensive) - if self.layer_to_track_map.contains_key(&layer_id) { - continue; - } + // Skip if already mapped (shouldn't happen, but be defensive) + if self.layer_to_track_map.contains_key(&layer_id) { + continue; + } - // Create daw-backend MIDI track - if let Some(ref controller_arc) = self.audio_controller { - let mut controller = controller_arc.lock().unwrap(); - match controller.create_midi_track_sync(layer_name.clone()) { - Ok(track_id) => { - // Store bidirectional mapping - self.layer_to_track_map.insert(layer_id, track_id); - self.track_to_layer_map.insert(track_id, layer_id); + // Handle both MIDI and Sampled audio tracks + match audio_layer.audio_layer_type { + AudioLayerType::Midi => { + // Create daw-backend MIDI track + if let Some(ref controller_arc) = self.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + match controller.create_midi_track_sync(layer_name.clone()) { + Ok(track_id) => { + // Store bidirectional mapping + self.layer_to_track_map.insert(layer_id, track_id); + self.track_to_layer_map.insert(track_id, layer_id); - // Load default instrument - if let Err(e) = default_instrument::load_default_instrument(&mut *controller, track_id) { - eprintln!("āš ļø Failed to load default instrument for {}: {}", layer_name, e); - } else { - println!("āœ… Synced MIDI layer '{}' to backend (TrackId: {})", layer_name, track_id); + // Load default instrument + if let Err(e) = default_instrument::load_default_instrument(&mut *controller, track_id) { + eprintln!("āš ļø Failed to load default instrument for {}: {}", layer_name, e); + } else { + println!("āœ… Synced MIDI layer '{}' to backend (TrackId: {})", layer_name, track_id); + } + + // TODO: Sync any existing clips on this layer to the backend + // This will be implemented when we add clip synchronization + } + Err(e) => { + eprintln!("āš ļø Failed to create daw-backend track for MIDI layer '{}': {}", layer_name, e); } - - // TODO: Sync any existing clips on this layer to the backend - // This will be implemented when we add clip synchronization } - Err(e) => { - eprintln!("āš ļø Failed to create daw-backend track for MIDI layer '{}': {}", layer_name, e); + } + } + AudioLayerType::Sampled => { + // Create daw-backend Audio track + if let Some(ref controller_arc) = self.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + match controller.create_audio_track_sync(layer_name.clone()) { + Ok(track_id) => { + // Store bidirectional mapping + self.layer_to_track_map.insert(layer_id, track_id); + self.track_to_layer_map.insert(track_id, layer_id); + println!("āœ… Synced Audio layer '{}' to backend (TrackId: {})", layer_name, track_id); + + // TODO: Sync any existing clips on this layer to the backend + // This will be implemented when we add clip synchronization + } + Err(e) => { + eprintln!("āš ļø Failed to create daw-backend audio track for '{}': {}", layer_name, e); + } } } } @@ -640,6 +685,34 @@ impl EditorApp { } } + /// Fetch waveform data from backend for a specific audio pool index + /// Returns cached data if available, otherwise queries backend + fn fetch_waveform(&mut self, pool_index: usize) -> Option> { + // Check if already cached + if let Some(waveform) = self.waveform_cache.get(&pool_index) { + return Some(waveform.clone()); + } + + // Fetch from backend + // Request 20,000 peaks for high-detail waveform visualization + // For a 200s file, this gives ~100 peaks/second, providing smooth visualization at all zoom levels + if let Some(ref controller_arc) = self.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + match controller.get_pool_waveform(pool_index, 20000) { + Ok(waveform) => { + self.waveform_cache.insert(pool_index, waveform.clone()); + Some(waveform) + } + Err(e) => { + eprintln!("āš ļø Failed to fetch waveform for pool index {}: {}", pool_index, e); + None + } + } + } else { + None + } + } + fn switch_layout(&mut self, index: usize) { self.current_layout_index = index; self.current_layout = self.layouts[index].layout.clone(); @@ -969,8 +1042,37 @@ impl EditorApp { // TODO: Implement add video layer } MenuAction::AddAudioTrack => { - println!("Menu: Add Audio Track"); - // TODO: Implement add audio track + // Create a new sampled audio layer with a default name + let layer_count = self.action_executor.document().root.children.len(); + let layer_name = format!("Audio Track {}", layer_count + 1); + + // Create audio layer in document + let audio_layer = AudioLayer::new_sampled(layer_name.clone()); + let action = lightningbeam_core::actions::AddLayerAction::new(AnyLayer::Audio(audio_layer)); + self.action_executor.execute(Box::new(action)); + + // Get the newly created layer ID + if let Some(last_layer) = self.action_executor.document().root.children.last() { + let layer_id = last_layer.id(); + self.active_layer_id = Some(layer_id); + + // Create corresponding daw-backend audio track + if let Some(ref controller_arc) = self.audio_controller { + let mut controller = controller_arc.lock().unwrap(); + match controller.create_audio_track_sync(layer_name.clone()) { + Ok(track_id) => { + // Store bidirectional mapping + self.layer_to_track_map.insert(layer_id, track_id); + self.track_to_layer_map.insert(track_id, layer_id); + println!("āœ… Created {} (backend TrackId: {})", layer_name, track_id); + } + Err(e) => { + eprintln!("āš ļø Failed to create daw-backend audio track for {}: {}", layer_name, e); + eprintln!(" Layer created but will be silent until backend track is available"); + } + } + } + } } MenuAction::AddMidiTrack => { // Create a new MIDI audio layer with a default name @@ -1270,6 +1372,9 @@ impl EditorApp { fn apply_loaded_project(&mut self, loaded_project: lightningbeam_core::file_io::LoadedProject, path: std::path::PathBuf) { use lightningbeam_core::action::ActionExecutor; + let apply_start = std::time::Instant::now(); + eprintln!("šŸ“Š [APPLY] Starting apply_loaded_project() on UI thread..."); + // Check for missing files if !loaded_project.missing_files.is_empty() { eprintln!("āš ļø {} missing files", loaded_project.missing_files.len()); @@ -1280,33 +1385,78 @@ impl EditorApp { } // Replace document + let step1_start = std::time::Instant::now(); self.action_executor = ActionExecutor::new(loaded_project.document); + eprintln!("šŸ“Š [APPLY] Step 1: Replace document took {:.2}ms", step1_start.elapsed().as_secs_f64() * 1000.0); // Restore UI layout from loaded document + let step2_start = std::time::Instant::now(); self.restore_layout_from_document(); + eprintln!("šŸ“Š [APPLY] Step 2: Restore UI layout took {:.2}ms", step2_start.elapsed().as_secs_f64() * 1000.0); // Set project in audio engine via query + let step3_start = std::time::Instant::now(); if let Some(ref controller_arc) = self.audio_controller { let mut controller = controller_arc.lock().unwrap(); if let Err(e) = controller.set_project(loaded_project.audio_project) { eprintln!("āŒ Failed to set project: {}", e); return; } + eprintln!("šŸ“Š [APPLY] Step 3: Set audio project took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0); - // Load audio pool - if let Err(e) = controller.load_audio_pool( - loaded_project.audio_pool_entries, - &path, - ) { - eprintln!("āŒ Failed to load audio pool: {}", e); - return; - } + // Load audio pool asynchronously to avoid blocking UI + let step4_start = std::time::Instant::now(); + let controller_clone = controller_arc.clone(); + let path_clone = path.clone(); + let audio_pool_entries = loaded_project.audio_pool_entries; + + std::thread::spawn(move || { + eprintln!("šŸ“Š [APPLY] Step 4: Starting async audio pool load..."); + let load_start = std::time::Instant::now(); + let mut controller = controller_clone.lock().unwrap(); + if let Err(e) = controller.load_audio_pool(audio_pool_entries, &path_clone) { + eprintln!("āŒ Failed to load audio pool: {}", e); + } else { + eprintln!("šŸ“Š [APPLY] Step 4: Async audio pool load completed in {:.2}ms", load_start.elapsed().as_secs_f64() * 1000.0); + } + }); + eprintln!("šŸ“Š [APPLY] Step 4: Spawned async audio pool load in {:.2}ms", step4_start.elapsed().as_secs_f64() * 1000.0); } // Reset state + let step5_start = std::time::Instant::now(); self.layer_to_track_map.clear(); self.track_to_layer_map.clear(); - self.sync_midi_layers_to_backend(); + eprintln!("šŸ“Š [APPLY] Step 5: Clear track maps took {:.2}ms", step5_start.elapsed().as_secs_f64() * 1000.0); + + // Sync audio layers (MIDI and Sampled) + let step6_start = std::time::Instant::now(); + self.sync_audio_layers_to_backend(); + eprintln!("šŸ“Š [APPLY] Step 6: Sync audio layers took {:.2}ms", step6_start.elapsed().as_secs_f64() * 1000.0); + + // Fetch waveforms for all audio clips in the loaded project + let step7_start = std::time::Instant::now(); + // Collect pool indices first to avoid borrowing issues + let pool_indices: Vec = self.action_executor.document() + .audio_clips.values() + .filter_map(|clip| { + if let lightningbeam_core::clip::AudioClipType::Sampled { audio_pool_index } = &clip.clip_type { + Some(*audio_pool_index) + } else { + None + } + }) + .collect(); + + let mut waveforms_fetched = 0; + for pool_index in pool_indices { + if self.fetch_waveform(pool_index).is_some() { + waveforms_fetched += 1; + } + } + eprintln!("šŸ“Š [APPLY] Step 7: Fetched {} waveforms in {:.2}ms", waveforms_fetched, step7_start.elapsed().as_secs_f64() * 1000.0); + + // Reset playback state self.playback_time = 0.0; self.is_playing = false; self.current_file_path = Some(path.clone()); @@ -1316,6 +1466,7 @@ impl EditorApp { self.active_layer_id = Some(first.id()); } + eprintln!("šŸ“Š [APPLY] āœ… Total apply_loaded_project() time: {:.2}ms", apply_start.elapsed().as_secs_f64() * 1000.0); println!("āœ… Loaded from: {}", path.display()); } @@ -1376,23 +1527,31 @@ impl EditorApp { // Add to audio engine pool if available if let Some(ref controller_arc) = self.audio_controller { - let mut controller = controller_arc.lock().unwrap(); - // Send audio data to the engine - let path_str = path.to_string_lossy().to_string(); - controller.add_audio_file( - path_str.clone(), - audio_file.data, - channels, - sample_rate, - ); + let pool_index = { + let mut controller = controller_arc.lock().unwrap(); + // Send audio data to the engine + let path_str = path.to_string_lossy().to_string(); + controller.add_audio_file( + path_str.clone(), + audio_file.data, + channels, + sample_rate, + ); - // For now, use a placeholder pool index (the engine will assign the real one) - // In a full implementation, we'd wait for the AudioFileAdded event - let pool_index = self.action_executor.document().audio_clips.len(); + // For now, use a placeholder pool index (the engine will assign the real one) + // In a full implementation, we'd wait for the AudioFileAdded event + self.action_executor.document().audio_clips.len() + }; // Controller lock is dropped here // Create audio clip in document let clip = AudioClip::new_sampled(&name, pool_index, duration); let clip_id = self.action_executor.document_mut().add_audio_clip(clip); + + // Fetch waveform from backend and cache it for rendering + if let Some(waveform) = self.fetch_waveform(pool_index) { + println!("āœ… Cached waveform with {} peaks", waveform.len()); + } + println!("Imported audio '{}' ({:.1}s, {}ch, {}Hz) - ID: {}", name, duration, channels, sample_rate, clip_id); } else { @@ -1504,6 +1663,29 @@ impl eframe::App for EditorApp { } } + // Fetch missing waveforms on-demand (for lazy loading after project load) + // Collect pool indices that need waveforms + let missing_waveforms: Vec = self.action_executor.document() + .audio_clips.values() + .filter_map(|clip| { + if let lightningbeam_core::clip::AudioClipType::Sampled { audio_pool_index } = &clip.clip_type { + // Check if not already cached + if !self.waveform_cache.contains_key(audio_pool_index) { + Some(*audio_pool_index) + } else { + None + } + } else { + None + } + }) + .collect(); + + // Fetch missing waveforms + for pool_index in missing_waveforms { + self.fetch_waveform(pool_index); + } + // Handle file operation progress if let Some(ref mut operation) = self.file_operation { // Set wait cursor @@ -1675,6 +1857,7 @@ impl eframe::App for EditorApp { polygon_sides: &mut self.polygon_sides, layer_to_track_map: &self.layer_to_track_map, midi_event_cache: &self.midi_event_cache, + waveform_cache: &self.waveform_cache, }; render_layout_node( @@ -1843,6 +2026,8 @@ struct RenderContext<'a> { layer_to_track_map: &'a std::collections::HashMap, /// Cache of MIDI events for rendering (keyed by backend midi_clip_id) midi_event_cache: &'a HashMap>, + /// Cache of waveform data for rendering (keyed by audio_pool_index) + waveform_cache: &'a HashMap>, } /// Recursively render a layout node with drag support @@ -2314,6 +2499,7 @@ fn render_pane( paint_bucket_gap_tolerance: ctx.paint_bucket_gap_tolerance, polygon_sides: ctx.polygon_sides, midi_event_cache: ctx.midi_event_cache, + waveform_cache: ctx.waveform_cache, }; pane_instance.render_header(&mut header_ui, &mut shared); } @@ -2368,6 +2554,7 @@ fn render_pane( paint_bucket_gap_tolerance: ctx.paint_bucket_gap_tolerance, polygon_sides: ctx.polygon_sides, midi_event_cache: ctx.midi_event_cache, + waveform_cache: ctx.waveform_cache, }; // Render pane content (header was already rendered above) diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs index 95e8142..3d9e5c5 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/mod.rs @@ -127,6 +127,8 @@ pub struct SharedPaneState<'a> { pub polygon_sides: &'a mut u32, /// Cache of MIDI events for rendering (keyed by backend midi_clip_id) pub midi_event_cache: &'a std::collections::HashMap>, + /// Cache of waveform data for rendering (keyed by audio_pool_index) + pub waveform_cache: &'a std::collections::HashMap>, } /// Trait for pane rendering diff --git a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs index a4581a1..2f9929d 100644 --- a/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs +++ b/lightningbeam-ui/lightningbeam-editor/src/panes/timeline.rs @@ -14,7 +14,7 @@ use super::{DragClipType, NodePath, PaneRenderer, SharedPaneState}; const RULER_HEIGHT: f32 = 30.0; const LAYER_HEIGHT: f32 = 60.0; const LAYER_HEADER_WIDTH: f32 = 200.0; -const MIN_PIXELS_PER_SECOND: f32 = 20.0; +const MIN_PIXELS_PER_SECOND: f32 = 1.0; // Allow zooming out to see 10+ minutes const MAX_PIXELS_PER_SECOND: f32 = 500.0; const EDGE_DETECTION_PIXELS: f32 = 8.0; // Distance from edge to detect trim handles @@ -456,6 +456,135 @@ impl TimelinePane { } } + /// Render waveform visualization for audio clips on timeline + /// Uses peak-based rendering: each waveform sample has a fixed pixel width that scales with zoom + #[allow(clippy::too_many_arguments)] + fn render_audio_waveform( + painter: &egui::Painter, + clip_rect: egui::Rect, + clip_start_x: f32, // Absolute screen x where clip starts (can be offscreen) + clip_bg_color: egui::Color32, // Background color of the clip + waveform: &[daw_backend::WaveformPeak], + clip_duration: f64, + pixels_per_second: f32, + trim_start: f64, + theme: &crate::theme::Theme, + ctx: &egui::Context, + ) { + if waveform.is_empty() { + return; + } + + let clip_height = clip_rect.height(); + let center_y = clip_rect.center().y; + + // Calculate waveform color: lighten the clip background color + // Blend clip background with white (70% white + 30% clip color) for subtle tint + // Use full opacity to prevent overlapping lines from blending lighter when zoomed out + let r = ((255.0 * 0.7) + (clip_bg_color.r() as f32 * 0.3)) as u8; + let g = ((255.0 * 0.7) + (clip_bg_color.g() as f32 * 0.3)) as u8; + let b = ((255.0 * 0.7) + (clip_bg_color.b() as f32 * 0.3)) as u8; + let waveform_color = egui::Color32::from_rgb(r, g, b); + + // Calculate how wide each peak should be at current zoom (mirrors JavaScript) + // fullSourceWidth = sourceDuration * pixelsPerSecond + // pixelsPerPeak = fullSourceWidth / waveformData.length + let full_source_width = clip_duration * pixels_per_second as f64; + let pixels_per_peak = full_source_width / waveform.len() as f64; + + // Calculate which peak corresponds to the clip's offset (trimmed left edge) + let offset_peak_index = ((trim_start / clip_duration) * waveform.len() as f64).floor() as usize; + let offset_peak_index = offset_peak_index.min(waveform.len().saturating_sub(1)); + + // Calculate visible peak range + // firstVisiblePeak = max(offsetPeakIndex, floor((visibleStart - startX) / pixelsPerPeak) + offsetPeakIndex) + let visible_start = clip_rect.min.x; + let visible_end = clip_rect.max.x; + + let first_visible_peak_from_viewport = if pixels_per_peak > 0.0 { + (((visible_start - clip_start_x) as f64 / pixels_per_peak).floor() as isize + offset_peak_index as isize).max(0) + } else { + offset_peak_index as isize + }; + let first_visible_peak = (first_visible_peak_from_viewport as usize).max(offset_peak_index); + + let last_visible_peak_from_viewport = if pixels_per_peak > 0.0 { + ((visible_end - clip_start_x) as f64 / pixels_per_peak).ceil() as isize + offset_peak_index as isize + } else { + offset_peak_index as isize + }; + let last_visible_peak = (last_visible_peak_from_viewport as usize) + .min(waveform.len().saturating_sub(1)); + + if first_visible_peak > last_visible_peak || first_visible_peak >= waveform.len() { + return; + } + + println!("\nšŸŽµ WAVEFORM RENDER:"); + println!(" Waveform total peaks: {}", waveform.len()); + println!(" Clip duration: {:.2}s", clip_duration); + println!(" Pixels per second: {}", pixels_per_second); + println!(" Pixels per peak: {:.4}", pixels_per_peak); + println!(" Trim start: {:.2}s", trim_start); + println!(" Offset peak index: {}", offset_peak_index); + println!(" Clip start X: {:.1}", clip_start_x); + println!(" Clip rect: x=[{:.1}, {:.1}], y=[{:.1}, {:.1}]", + clip_rect.min.x, clip_rect.max.x, clip_rect.min.y, clip_rect.max.y); + println!(" Visible start: {:.1}, end: {:.1}", visible_start, visible_end); + println!(" First visible peak: {} (time: {:.2}s)", + first_visible_peak, first_visible_peak as f64 * clip_duration / waveform.len() as f64); + println!(" Last visible peak: {} (time: {:.2}s)", + last_visible_peak, last_visible_peak as f64 * clip_duration / waveform.len() as f64); + println!(" Peak range size: {}", last_visible_peak - first_visible_peak + 1); + + // Draw waveform as vertical lines from min to max + // Line width scales with zoom to avoid gaps between peaks + let line_width = if pixels_per_peak > 1.0 { + pixels_per_peak.ceil() as f32 + } else { + 1.0 + }; + + let mut peaks_drawn = 0; + let mut lines = Vec::new(); + + for i in first_visible_peak..=last_visible_peak { + if i >= waveform.len() { + break; + } + + let peak_x = clip_start_x + ((i as isize - offset_peak_index as isize) as f64 * pixels_per_peak) as f32; + let peak = &waveform[i]; + + // Calculate Y positions for min and max + let max_y = center_y + (peak.max * clip_height * 0.45); + let min_y = center_y + (peak.min * clip_height * 0.45); + + if peaks_drawn < 3 { + println!(" PEAK[{}]: x={:.1}, min={:.3} (y={:.1}), max={:.3} (y={:.1})", + i, peak_x, peak.min, min_y, peak.max, max_y); + } + + // Draw vertical line from min to max + lines.push(( + egui::pos2(peak_x, max_y), + egui::pos2(peak_x, min_y), + )); + + peaks_drawn += 1; + } + + println!(" Peaks drawn: {}, line width: {:.1}px", peaks_drawn, line_width); + + // Draw all lines with clipping + for (start, end) in lines { + painter.with_clip_rect(clip_rect).line_segment( + [start, end], + egui::Stroke::new(line_width, waveform_color), + ); + } + } + /// Render layer header column (left side with track names and controls) fn render_layer_headers( &mut self, @@ -514,9 +643,14 @@ impl TimelinePane { let layer_data = layer.layer(); let layer_name = &layer_data.name; let (layer_type, type_color) = match layer { - lightningbeam_core::layer::AnyLayer::Vector(_) => ("Vector", egui::Color32::from_rgb(100, 150, 255)), // Blue - lightningbeam_core::layer::AnyLayer::Audio(_) => ("Audio", egui::Color32::from_rgb(100, 255, 150)), // Green - lightningbeam_core::layer::AnyLayer::Video(_) => ("Video", egui::Color32::from_rgb(255, 150, 100)), // Orange + lightningbeam_core::layer::AnyLayer::Vector(_) => ("Vector", egui::Color32::from_rgb(255, 180, 100)), // Orange + lightningbeam_core::layer::AnyLayer::Audio(audio_layer) => { + match audio_layer.audio_layer_type { + lightningbeam_core::layer::AudioLayerType::Midi => ("MIDI", egui::Color32::from_rgb(100, 255, 150)), // Green + lightningbeam_core::layer::AudioLayerType::Sampled => ("Audio", egui::Color32::from_rgb(100, 180, 255)), // Blue + } + } + lightningbeam_core::layer::AnyLayer::Video(_) => ("Video", egui::Color32::from_rgb(255, 150, 100)), // Orange/Red }; // Color indicator bar on the left edge @@ -735,6 +869,7 @@ impl TimelinePane { active_layer_id: &Option, selection: &lightningbeam_core::selection::Selection, midi_event_cache: &std::collections::HashMap>, + waveform_cache: &std::collections::HashMap>, ) { let painter = ui.painter(); @@ -875,16 +1010,24 @@ impl TimelinePane { // Choose color based on layer type let (clip_color, bright_color) = match layer { lightningbeam_core::layer::AnyLayer::Vector(_) => ( - egui::Color32::from_rgb(100, 150, 255), // Blue - egui::Color32::from_rgb(150, 200, 255), // Bright blue - ), - lightningbeam_core::layer::AnyLayer::Audio(_) => ( - egui::Color32::from_rgb(100, 255, 150), // Green - egui::Color32::from_rgb(150, 255, 200), // Bright green + egui::Color32::from_rgb(220, 150, 80), // Orange + egui::Color32::from_rgb(255, 210, 150), // Bright orange ), + lightningbeam_core::layer::AnyLayer::Audio(audio_layer) => { + match audio_layer.audio_layer_type { + lightningbeam_core::layer::AudioLayerType::Midi => ( + egui::Color32::from_rgb(100, 200, 150), // Green + egui::Color32::from_rgb(150, 255, 200), // Bright green + ), + lightningbeam_core::layer::AudioLayerType::Sampled => ( + egui::Color32::from_rgb(80, 150, 220), // Blue + egui::Color32::from_rgb(150, 210, 255), // Bright blue + ), + } + } lightningbeam_core::layer::AnyLayer::Video(_) => ( - egui::Color32::from_rgb(255, 150, 100), // Orange - egui::Color32::from_rgb(255, 200, 150), // Bright orange + egui::Color32::from_rgb(255, 150, 100), // Orange/Red + egui::Color32::from_rgb(255, 200, 150), // Bright orange/red ), }; @@ -900,24 +1043,47 @@ impl TimelinePane { clip_color, ); - // MIDI VISUALIZATION: Draw piano roll overlay for MIDI clips + // AUDIO VISUALIZATION: Draw piano roll or waveform overlay if let lightningbeam_core::layer::AnyLayer::Audio(_) = layer { if let Some(clip) = document.get_audio_clip(&clip_instance.clip_id) { - if let lightningbeam_core::clip::AudioClipType::Midi { midi_clip_id } = &clip.clip_type { - if let Some(events) = midi_event_cache.get(midi_clip_id) { - Self::render_midi_piano_roll( - painter, - clip_rect, - rect.min.x, // Pass timeline panel left edge for proper positioning - events, - clip_instance.trim_start, - instance_duration, - instance_start, - self.viewport_start_time, - self.pixels_per_second, - theme, - ui.ctx(), - ); + match &clip.clip_type { + // MIDI: Draw piano roll + lightningbeam_core::clip::AudioClipType::Midi { midi_clip_id } => { + if let Some(events) = midi_event_cache.get(midi_clip_id) { + Self::render_midi_piano_roll( + painter, + clip_rect, + rect.min.x, // Pass timeline panel left edge for proper positioning + events, + clip_instance.trim_start, + instance_duration, + instance_start, + self.viewport_start_time, + self.pixels_per_second, + theme, + ui.ctx(), + ); + } + } + // Sampled Audio: Draw waveform + lightningbeam_core::clip::AudioClipType::Sampled { audio_pool_index } => { + if let Some(waveform) = waveform_cache.get(audio_pool_index) { + // Calculate absolute screen x where clip starts (can be offscreen) + let clip_start_x = rect.min.x + start_x; + + Self::render_audio_waveform( + painter, + clip_rect, + clip_start_x, + clip_color, // Pass clip background color for tinting + waveform, + clip.duration, + self.pixels_per_second, + clip_instance.trim_start, + theme, + ui.ctx(), + ); + } } } } @@ -1673,7 +1839,7 @@ impl PaneRenderer for TimelinePane { // Render layer rows with clipping ui.set_clip_rect(content_rect.intersect(original_clip_rect)); - self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.selection, shared.midi_event_cache); + self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.selection, shared.midi_event_cache, shared.waveform_cache); // Render playhead on top (clip to timeline area) ui.set_clip_rect(timeline_rect.intersect(original_clip_rect));