initial audio export

This commit is contained in:
Skyler Lehmkuhl 2025-12-03 17:54:15 -05:00
parent 2a7c26df57
commit 727d782190
21 changed files with 2657 additions and 78 deletions

BIN
daw-backend/audio.flac Normal file

Binary file not shown.

View File

@ -30,6 +30,10 @@ pub struct Engine {
query_rx: rtrb::Consumer<Query>,
query_response_tx: rtrb::Producer<QueryResponse>,
// Background chunk generation channel
chunk_generation_rx: std::sync::mpsc::Receiver<AudioEvent>,
chunk_generation_tx: std::sync::mpsc::Sender<AudioEvent>,
// Shared playhead for UI reads
playhead_atomic: Arc<AtomicU64>,
@ -76,6 +80,9 @@ impl Engine {
// Calculate a reasonable buffer size for the pool (typical audio callback size * channels)
let buffer_size = 512 * channels as usize;
// Create channel for background chunk generation
let (chunk_generation_tx, chunk_generation_rx) = std::sync::mpsc::channel();
Self {
project: Project::new(sample_rate),
audio_pool: AudioClipPool::new(),
@ -89,6 +96,8 @@ impl Engine {
event_tx,
query_rx,
query_response_tx,
chunk_generation_rx,
chunk_generation_tx,
playhead_atomic: Arc::new(AtomicU64::new(0)),
next_midi_clip_id_atomic: Arc::new(AtomicU32::new(0)),
frames_since_last_event: 0,
@ -188,6 +197,7 @@ impl Engine {
next_midi_clip_id: Arc::clone(&self.next_midi_clip_id_atomic),
sample_rate: self.sample_rate,
channels: self.channels,
cached_export_response: None,
}
}
@ -224,6 +234,14 @@ impl Engine {
self.handle_query(query);
}
// Forward chunk generation events from background threads
while let Ok(event) = self.chunk_generation_rx.try_recv() {
if let AudioEvent::WaveformChunksReady { pool_index, detail_level, ref chunks } = event {
println!("📬 [AUDIO THREAD] Received {} chunks for pool {} level {}, forwarding to UI", chunks.len(), pool_index, detail_level);
}
let _ = self.event_tx.push(event);
}
if self.playing {
// Ensure mix buffer is sized correctly
if self.mix_buffer.len() != output.len() {
@ -483,6 +501,7 @@ impl Engine {
let _ = self.event_tx.push(AudioEvent::TrackCreated(track_id, false, name));
}
Command::AddAudioFile(path, data, channels, sample_rate) => {
println!("🎵 [ENGINE] Received AddAudioFile command for: {}", path);
// Detect original format from file extension
let path_buf = std::path::PathBuf::from(path.clone());
let original_format = path_buf.extension()
@ -491,13 +510,60 @@ impl Engine {
// Create AudioFile and add to pool
let audio_file = crate::audio::pool::AudioFile::with_format(
path_buf,
data,
path_buf.clone(),
data.clone(), // Clone data for background thread
channels,
sample_rate,
original_format,
);
let pool_index = self.audio_pool.add_file(audio_file);
println!("📦 [ENGINE] Added to pool at index {}", pool_index);
// Generate Level 0 (overview) waveform chunks asynchronously in background thread
let chunk_tx = self.chunk_generation_tx.clone();
let duration = data.len() as f64 / (sample_rate as f64 * channels as f64);
println!("🔄 [ENGINE] Spawning background thread to generate Level 0 chunks for pool {}", pool_index);
std::thread::spawn(move || {
// Create temporary AudioFile for chunk generation
let temp_audio_file = crate::audio::pool::AudioFile::with_format(
path_buf,
data,
channels,
sample_rate,
None,
);
// Generate Level 0 chunks
let chunk_count = crate::audio::waveform_cache::WaveformCache::calculate_chunk_count(duration, 0);
println!("🔄 [BACKGROUND] Generating {} Level 0 chunks for pool {}", chunk_count, pool_index);
let chunks = crate::audio::waveform_cache::WaveformCache::generate_chunks(
&temp_audio_file,
pool_index,
0, // Level 0 (overview)
&(0..chunk_count).collect::<Vec<_>>(),
);
// Send chunks via MPSC channel (will be forwarded by audio thread)
if !chunks.is_empty() {
println!("📤 [BACKGROUND] Generated {} chunks, sending to audio thread (pool {})", chunks.len(), pool_index);
let event_chunks: Vec<(u32, (f64, f64), Vec<crate::io::WaveformPeak>)> = chunks
.into_iter()
.map(|chunk| (chunk.chunk_index, chunk.time_range, chunk.peaks))
.collect();
match chunk_tx.send(AudioEvent::WaveformChunksReady {
pool_index,
detail_level: 0,
chunks: event_chunks,
}) {
Ok(_) => println!("✅ [BACKGROUND] Chunks sent successfully for pool {}", pool_index),
Err(e) => eprintln!("❌ [BACKGROUND] Failed to send chunks: {}", e),
}
} else {
eprintln!("⚠️ [BACKGROUND] No chunks generated for pool {}", pool_index);
}
});
// Notify UI about the new audio file
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
}
@ -1446,6 +1512,62 @@ impl Engine {
}
}
}
Command::GenerateWaveformChunks {
pool_index,
detail_level,
chunk_indices,
priority: _priority, // TODO: Use priority for scheduling
} => {
println!("🔧 [ENGINE] Received GenerateWaveformChunks command: pool={}, level={}, chunks={:?}",
pool_index, detail_level, chunk_indices);
// Get audio file data from pool
if let Some(audio_file) = self.audio_pool.get_file(pool_index) {
println!("✅ [ENGINE] Found audio file in pool, spawning background thread");
// Clone necessary data for background thread
let data = audio_file.data.clone();
let channels = audio_file.channels;
let sample_rate = audio_file.sample_rate;
let path = audio_file.path.clone();
let chunk_tx = self.chunk_generation_tx.clone();
// Generate chunks in background thread to avoid blocking audio thread
std::thread::spawn(move || {
// Create temporary AudioFile for chunk generation
let temp_audio_file = crate::audio::pool::AudioFile::with_format(
path,
data,
channels,
sample_rate,
None,
);
// Generate requested chunks
let chunks = crate::audio::waveform_cache::WaveformCache::generate_chunks(
&temp_audio_file,
pool_index,
detail_level,
&chunk_indices,
);
// Send chunks via MPSC channel (will be forwarded by audio thread)
if !chunks.is_empty() {
let event_chunks: Vec<(u32, (f64, f64), Vec<crate::io::WaveformPeak>)> = chunks
.into_iter()
.map(|chunk| (chunk.chunk_index, chunk.time_range, chunk.peaks))
.collect();
let _ = chunk_tx.send(AudioEvent::WaveformChunksReady {
pool_index,
detail_level,
chunks: event_chunks,
});
}
});
} else {
eprintln!("❌ [ENGINE] Pool index {} not found for waveform generation", pool_index);
}
}
}
}
@ -1693,7 +1815,16 @@ impl Engine {
// Use raw pointer to get midi_pool reference before mutable borrow of project
let midi_pool_ptr: *const _ = &self.project.midi_clip_pool;
let midi_pool_ref = unsafe { &*midi_pool_ptr };
match crate::audio::export_audio(&mut self.project, &self.audio_pool, midi_pool_ref, &settings, &output_path) {
// Pass event_tx directly - Rust allows borrowing different fields simultaneously
match crate::audio::export_audio(
&mut self.project,
&self.audio_pool,
midi_pool_ref,
&settings,
&output_path,
Some(&mut self.event_tx),
) {
Ok(()) => QueryResponse::AudioExported(Ok(())),
Err(e) => QueryResponse::AudioExported(Err(e)),
}
@ -1747,14 +1878,59 @@ impl Engine {
// Create AudioFile and add to pool
let audio_file = crate::audio::pool::AudioFile::with_format(
path_buf,
data,
path_buf.clone(),
data.clone(), // Clone data for background thread
channels,
sample_rate,
original_format,
);
let pool_index = self.audio_pool.add_file(audio_file);
// Generate Level 0 (overview) waveform chunks asynchronously in background thread
let chunk_tx = self.chunk_generation_tx.clone();
let duration = data.len() as f64 / (sample_rate as f64 * channels as f64);
println!("🔄 [ENGINE] Spawning background thread to generate Level 0 chunks for pool {}", pool_index);
std::thread::spawn(move || {
// Create temporary AudioFile for chunk generation
let temp_audio_file = crate::audio::pool::AudioFile::with_format(
path_buf,
data,
channels,
sample_rate,
None,
);
// Generate Level 0 chunks
let chunk_count = crate::audio::waveform_cache::WaveformCache::calculate_chunk_count(duration, 0);
println!("🔄 [BACKGROUND] Generating {} Level 0 chunks for pool {}", chunk_count, pool_index);
let chunks = crate::audio::waveform_cache::WaveformCache::generate_chunks(
&temp_audio_file,
pool_index,
0, // Level 0 (overview)
&(0..chunk_count).collect::<Vec<_>>(),
);
// Send chunks via MPSC channel (will be forwarded by audio thread)
if !chunks.is_empty() {
println!("📤 [BACKGROUND] Generated {} chunks, sending to audio thread (pool {})", chunks.len(), pool_index);
let event_chunks: Vec<(u32, (f64, f64), Vec<crate::io::WaveformPeak>)> = chunks
.into_iter()
.map(|chunk| (chunk.chunk_index, chunk.time_range, chunk.peaks))
.collect();
match chunk_tx.send(AudioEvent::WaveformChunksReady {
pool_index,
detail_level: 0,
chunks: event_chunks,
}) {
Ok(_) => println!("✅ [BACKGROUND] Chunks sent successfully for pool {}", pool_index),
Err(e) => eprintln!("❌ [BACKGROUND] Failed to send chunks: {}", e),
}
} else {
eprintln!("⚠️ [BACKGROUND] No chunks generated for pool {}", pool_index);
}
});
// Notify UI about the new audio file (for event listeners)
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
@ -1779,7 +1955,10 @@ impl Engine {
};
// Send response back
let _ = self.query_response_tx.push(response);
match self.query_response_tx.push(response) {
Ok(_) => {},
Err(_) => eprintln!("❌ [ENGINE] FAILED to send query response - queue full!"),
}
}
/// Handle starting a recording
@ -2051,6 +2230,8 @@ pub struct EngineController {
sample_rate: u32,
#[allow(dead_code)] // Used in public getter method
channels: u32,
/// Cached export response found by other query methods
cached_export_response: Option<Result<(), String>>,
}
// Safety: EngineController is safe to Send across threads because:
@ -2169,7 +2350,10 @@ impl EngineController {
/// Add an audio file to the pool (must be called from non-audio thread with pre-loaded data)
pub fn add_audio_file(&mut self, path: String, data: Vec<f32>, channels: u32, sample_rate: u32) {
let _ = self.command_tx.push(Command::AddAudioFile(path, data, channels, sample_rate));
match self.command_tx.push(Command::AddAudioFile(path.clone(), data, channels, sample_rate)) {
Ok(_) => println!("✅ [CONTROLLER] AddAudioFile command queued successfully: {}", path),
Err(_) => eprintln!("❌ [CONTROLLER] Failed to queue AddAudioFile command (buffer full): {}", path),
}
}
/// Add an audio file to the pool synchronously and get the pool index
@ -2660,13 +2844,21 @@ impl EngineController {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
// Wait for response (with shorter timeout to avoid blocking UI during export)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
let timeout = std::time::Duration::from_millis(50);
while start.elapsed() < timeout {
if let Ok(QueryResponse::PoolWaveform(result)) = self.query_response_rx.pop() {
return result;
if let Ok(response) = self.query_response_rx.pop() {
match response {
QueryResponse::PoolWaveform(result) => return result,
QueryResponse::AudioExported(result) => {
// Cache for poll_export_completion()
println!("💾 [CONTROLLER] Caching AudioExported response from get_pool_waveform");
self.cached_export_response = Some(result);
}
_ => {} // Discard other responses
}
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
@ -2681,13 +2873,21 @@ impl EngineController {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
// Wait for response (with shorter timeout to avoid blocking UI during export)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
let timeout = std::time::Duration::from_millis(50);
while start.elapsed() < timeout {
if let Ok(QueryResponse::PoolFileInfo(result)) = self.query_response_rx.pop() {
return result;
if let Ok(response) = self.query_response_rx.pop() {
match response {
QueryResponse::PoolFileInfo(result) => return result,
QueryResponse::AudioExported(result) => {
// Cache for poll_export_completion()
println!("💾 [CONTROLLER] Caching AudioExported response from get_pool_file_info");
self.cached_export_response = Some(result);
}
_ => {} // Discard other responses
}
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
@ -2695,6 +2895,29 @@ impl EngineController {
Err("Query timeout".to_string())
}
/// Request waveform chunks to be generated
/// This is an asynchronous command - chunks will be returned via WaveformChunksReady events
pub fn generate_waveform_chunks(
&mut self,
pool_index: usize,
detail_level: u8,
chunk_indices: Vec<u32>,
priority: u8,
) -> Result<(), String> {
let command = Command::GenerateWaveformChunks {
pool_index,
detail_level,
chunk_indices,
priority,
};
if let Err(_) = self.command_tx.push(command) {
return Err("Failed to send command - queue full".to_string());
}
Ok(())
}
/// Load audio pool from serialized entries
pub fn load_audio_pool(&mut self, entries: Vec<crate::audio::pool::AudioPoolEntry>, project_path: &std::path::Path) -> Result<Vec<usize>, String> {
// Send command via query mechanism
@ -2779,19 +3002,57 @@ impl EngineController {
Err("Query timeout".to_string())
}
/// Export audio to a file
pub fn export_audio<P: AsRef<std::path::Path>>(&mut self, settings: &crate::audio::ExportSettings, output_path: P) -> Result<(), String> {
/// Start an audio export (non-blocking)
///
/// Sends the export query to the audio thread and returns immediately.
/// Use `poll_export_completion()` to check for completion.
pub fn start_export_audio<P: AsRef<std::path::Path>>(&mut self, settings: &crate::audio::ExportSettings, output_path: P) -> Result<(), String> {
// Send export query
if let Err(_) = self.query_tx.push(Query::ExportAudio(settings.clone(), output_path.as_ref().to_path_buf())) {
return Err("Failed to send export query - queue full".to_string());
}
Ok(())
}
/// Poll for export completion (non-blocking)
///
/// Returns:
/// - `Ok(Some(result))` if export completed (result may be Ok or Err)
/// - `Ok(None)` if export is still in progress
/// - `Err` should not happen in normal operation
pub fn poll_export_completion(&mut self) -> Result<Option<Result<(), String>>, String> {
// Check if we have a cached response from another query method
if let Some(result) = self.cached_export_response.take() {
println!("✅ [CONTROLLER] Found cached AudioExported response!");
return Ok(Some(result));
}
// Keep popping responses until we find AudioExported or queue is empty
while let Ok(response) = self.query_response_rx.pop() {
println!("📥 [CONTROLLER] Received response: {:?}", std::mem::discriminant(&response));
if let QueryResponse::AudioExported(result) = response {
println!("✅ [CONTROLLER] Found AudioExported response!");
return Ok(Some(result));
}
// Discard other query responses (they're for synchronous queries)
println!("⏭️ [CONTROLLER] Skipping non-export response");
}
Ok(None)
}
/// Export audio to a file (blocking)
///
/// This is a convenience method that calls start_export_audio and waits for completion.
/// For non-blocking export with progress updates, use start_export_audio() and poll_export_completion().
pub fn export_audio<P: AsRef<std::path::Path>>(&mut self, settings: &crate::audio::ExportSettings, output_path: P) -> Result<(), String> {
self.start_export_audio(settings, &output_path)?;
// Wait for response (with longer timeout since export can take a while)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(300); // 5 minute timeout for export
while start.elapsed() < timeout {
if let Ok(QueryResponse::AudioExported(result)) = self.query_response_rx.pop() {
if let Some(result) = self.poll_export_completion()? {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(100));

View File

@ -2,6 +2,7 @@ use super::buffer_pool::BufferPool;
use super::midi_pool::MidiClipPool;
use super::pool::AudioPool;
use super::project::Project;
use crate::command::AudioEvent;
use std::path::Path;
/// Supported export formats
@ -59,15 +60,20 @@ impl Default for ExportSettings {
///
/// This performs offline rendering, processing the entire timeline
/// in chunks to generate the final audio file.
///
/// If an event producer is provided, progress events will be sent
/// after each chunk with (frames_rendered, total_frames).
pub fn export_audio<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings,
output_path: P,
) -> Result<(), String> {
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<(), String>
{
// Render the project to memory
let samples = render_to_memory(project, pool, midi_pool, settings)?;
let samples = render_to_memory(project, pool, midi_pool, settings, event_tx)?;
// Write to file based on format
match settings.format {
@ -79,12 +85,23 @@ pub fn export_audio<P: AsRef<Path>>(
}
/// Render the project to memory
fn render_to_memory(
///
/// This function renders the project's audio to an in-memory buffer
/// of interleaved f32 samples. This is useful for custom export formats
/// or for passing audio to external encoders (e.g., FFmpeg for MP3/AAC).
///
/// The returned samples are interleaved (L,R,L,R,... for stereo).
///
/// If an event producer is provided, progress events will be sent
/// after each chunk with (frames_rendered, total_frames).
pub fn render_to_memory(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings,
) -> Result<Vec<f32>, String> {
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<Vec<f32>, String>
{
// Calculate total number of frames
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
@ -106,6 +123,7 @@ fn render_to_memory(
let mut playhead = settings.start_time;
let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let mut frames_rendered = 0;
// Render the entire timeline in chunks
while playhead < settings.end_time {
@ -138,6 +156,15 @@ fn render_to_memory(
// Append to output
all_samples.extend_from_slice(&render_buffer[..samples_needed]);
// Update progress
frames_rendered += samples_needed / settings.channels as usize;
if let Some(event_tx) = event_tx.as_mut() {
let _ = event_tx.push(AudioEvent::ExportProgress {
frames_rendered,
total_frames,
});
}
playhead += chunk_duration;
}

View File

@ -13,6 +13,7 @@ pub mod project;
pub mod recording;
pub mod sample_loader;
pub mod track;
pub mod waveform_cache;
pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId};
pub use buffer_pool::BufferPool;
@ -27,3 +28,4 @@ pub use project::Project;
pub use recording::RecordingState;
pub use sample_loader::{load_audio_file, SampleData};
pub use track::{AudioTrack, Metatrack, MidiTrack, RenderContext, Track, TrackId, TrackNode};
pub use waveform_cache::{ChunkPriority, DetailLevel, WaveformCache};

View File

@ -170,6 +170,8 @@ impl AudioFile {
/// Pool of shared audio files (audio clip content)
pub struct AudioClipPool {
files: Vec<AudioFile>,
/// Waveform chunk cache for multi-resolution waveform generation
waveform_cache: crate::audio::waveform_cache::WaveformCache,
}
/// Type alias for backwards compatibility
@ -180,6 +182,7 @@ impl AudioClipPool {
pub fn new() -> Self {
Self {
files: Vec::new(),
waveform_cache: crate::audio::waveform_cache::WaveformCache::new(100), // 100MB cache
}
}
@ -369,6 +372,97 @@ impl AudioClipPool {
rendered_frames * dst_channels
}
/// Generate waveform chunks for a file in the pool
///
/// This generates chunks at a specific detail level and caches them.
/// Returns the generated chunks.
pub fn generate_waveform_chunks(
&mut self,
pool_index: usize,
detail_level: u8,
chunk_indices: &[u32],
) -> Vec<crate::io::WaveformChunk> {
let file = match self.files.get(pool_index) {
Some(f) => f,
None => return Vec::new(),
};
let chunks = crate::audio::waveform_cache::WaveformCache::generate_chunks(
file,
pool_index,
detail_level,
chunk_indices,
);
// Store chunks in cache
for chunk in &chunks {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level: chunk.detail_level,
chunk_index: chunk.chunk_index,
};
self.waveform_cache.store_chunk(key, chunk.peaks.clone());
}
chunks
}
/// Generate Level 0 (overview) chunks for a file
///
/// This should be called immediately when a file is imported.
/// Returns the generated chunks.
pub fn generate_overview_chunks(
&mut self,
pool_index: usize,
) -> Vec<crate::io::WaveformChunk> {
let file = match self.files.get(pool_index) {
Some(f) => f,
None => return Vec::new(),
};
self.waveform_cache.generate_overview_chunks(file, pool_index)
}
/// Get a cached waveform chunk
pub fn get_waveform_chunk(
&self,
pool_index: usize,
detail_level: u8,
chunk_index: u32,
) -> Option<&Vec<crate::io::WaveformPeak>> {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level,
chunk_index,
};
self.waveform_cache.get_chunk(&key)
}
/// Check if a waveform chunk is cached
pub fn has_waveform_chunk(
&self,
pool_index: usize,
detail_level: u8,
chunk_index: u32,
) -> bool {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level,
chunk_index,
};
self.waveform_cache.has_chunk(&key)
}
/// Get waveform cache memory usage in MB
pub fn waveform_cache_memory_mb(&self) -> f64 {
self.waveform_cache.memory_usage_mb()
}
/// Get number of cached waveform chunks
pub fn waveform_chunk_count(&self) -> usize {
self.waveform_cache.chunk_count()
}
}
impl Default for AudioClipPool {

View File

@ -0,0 +1,290 @@
//! Waveform chunk cache for scalable multi-resolution waveform generation
//!
//! This module provides a chunk-based waveform caching system that generates
//! waveform data progressively at multiple detail levels, avoiding the limitations
//! of the old fixed 20,000-peak approach.
use crate::io::{WaveformChunk, WaveformChunkKey, WaveformPeak};
use crate::audio::pool::AudioFile;
use std::collections::HashMap;
/// Detail levels for multi-resolution waveform storage
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DetailLevel {
Overview = 0, // 1 peak per second
Low = 1, // 10 peaks per second
Medium = 2, // 100 peaks per second
High = 3, // 1000 peaks per second
Max = 4, // Full resolution (sample-accurate)
}
impl DetailLevel {
/// Get peaks per second for this detail level
pub fn peaks_per_second(self) -> usize {
match self {
DetailLevel::Overview => 1,
DetailLevel::Low => 10,
DetailLevel::Medium => 100,
DetailLevel::High => 1000,
DetailLevel::Max => 48000, // Approximate max for sample-accurate
}
}
/// Create from u8 value
pub fn from_u8(value: u8) -> Option<Self> {
match value {
0 => Some(DetailLevel::Overview),
1 => Some(DetailLevel::Low),
2 => Some(DetailLevel::Medium),
3 => Some(DetailLevel::High),
4 => Some(DetailLevel::Max),
_ => None,
}
}
}
/// Priority for chunk generation
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum ChunkPriority {
Low = 0, // Background generation
Medium = 1, // Precache adjacent to viewport
High = 2, // Visible in current viewport
}
/// Chunk generation request
#[derive(Debug, Clone)]
pub struct ChunkGenerationRequest {
pub key: WaveformChunkKey,
pub priority: ChunkPriority,
}
/// Waveform chunk cache with multi-resolution support
pub struct WaveformCache {
/// Cached chunks indexed by key
chunks: HashMap<WaveformChunkKey, Vec<WaveformPeak>>,
/// Maximum memory usage in MB (for future LRU eviction)
max_memory_mb: usize,
/// Current memory usage estimate in bytes
current_memory_bytes: usize,
}
impl WaveformCache {
/// Create a new waveform cache with the specified memory limit
pub fn new(max_memory_mb: usize) -> Self {
Self {
chunks: HashMap::new(),
max_memory_mb,
current_memory_bytes: 0,
}
}
/// Get a chunk from the cache
pub fn get_chunk(&self, key: &WaveformChunkKey) -> Option<&Vec<WaveformPeak>> {
self.chunks.get(key)
}
/// Store a chunk in the cache
pub fn store_chunk(&mut self, key: WaveformChunkKey, peaks: Vec<WaveformPeak>) {
let chunk_size = peaks.len() * std::mem::size_of::<WaveformPeak>();
self.current_memory_bytes += chunk_size;
self.chunks.insert(key, peaks);
// TODO: Implement LRU eviction if memory exceeds limit
}
/// Check if a chunk exists in the cache
pub fn has_chunk(&self, key: &WaveformChunkKey) -> bool {
self.chunks.contains_key(key)
}
/// Clear all chunks for a specific pool index (when file is unloaded)
pub fn clear_pool(&mut self, pool_index: usize) {
self.chunks.retain(|key, peaks| {
if key.pool_index == pool_index {
let chunk_size = peaks.len() * std::mem::size_of::<WaveformPeak>();
self.current_memory_bytes = self.current_memory_bytes.saturating_sub(chunk_size);
false
} else {
true
}
});
}
/// Generate a single waveform chunk for an audio file
///
/// This generates peaks for a specific time range at a specific detail level.
/// The chunk covers a time range based on the detail level and chunk index.
pub fn generate_chunk(
audio_file: &AudioFile,
detail_level: u8,
chunk_index: u32,
) -> Option<WaveformChunk> {
let level = DetailLevel::from_u8(detail_level)?;
let peaks_per_second = level.peaks_per_second();
// Calculate time range for this chunk based on detail level
// Each chunk covers a varying amount of time depending on detail level
let chunk_duration_seconds = match level {
DetailLevel::Overview => 60.0, // 60 seconds per chunk (60 peaks)
DetailLevel::Low => 30.0, // 30 seconds per chunk (300 peaks)
DetailLevel::Medium => 10.0, // 10 seconds per chunk (1000 peaks)
DetailLevel::High => 5.0, // 5 seconds per chunk (5000 peaks)
DetailLevel::Max => 1.0, // 1 second per chunk (48000 peaks)
};
let start_time = chunk_index as f64 * chunk_duration_seconds;
let end_time = start_time + chunk_duration_seconds;
// Check if this chunk is within the audio file duration
let audio_duration = audio_file.duration_seconds();
if start_time >= audio_duration {
return None; // Chunk is completely beyond file end
}
// Clamp end_time to file duration
let end_time = end_time.min(audio_duration);
// Calculate frame range
let start_frame = (start_time * audio_file.sample_rate as f64) as usize;
let end_frame = (end_time * audio_file.sample_rate as f64) as usize;
// Calculate number of peaks for this time range
let duration = end_time - start_time;
let target_peaks = (duration * peaks_per_second as f64).ceil() as usize;
if target_peaks == 0 {
return None;
}
// Generate peaks using the existing method
let peaks = audio_file.generate_waveform_overview_range(
start_frame,
end_frame,
target_peaks,
);
Some(WaveformChunk {
audio_pool_index: 0, // Will be set by caller
detail_level,
chunk_index,
time_range: (start_time, end_time),
peaks,
})
}
/// Generate multiple chunks for an audio file
///
/// This is a convenience method for generating several chunks at once.
pub fn generate_chunks(
audio_file: &AudioFile,
pool_index: usize,
detail_level: u8,
chunk_indices: &[u32],
) -> Vec<WaveformChunk> {
chunk_indices
.iter()
.filter_map(|&chunk_index| {
let mut chunk = Self::generate_chunk(audio_file, detail_level, chunk_index)?;
chunk.audio_pool_index = pool_index;
Some(chunk)
})
.collect()
}
/// Calculate how many chunks are needed for a file at a given detail level
pub fn calculate_chunk_count(duration_seconds: f64, detail_level: u8) -> u32 {
let level = match DetailLevel::from_u8(detail_level) {
Some(l) => l,
None => return 0,
};
let chunk_duration_seconds = match level {
DetailLevel::Overview => 60.0,
DetailLevel::Low => 30.0,
DetailLevel::Medium => 10.0,
DetailLevel::High => 5.0,
DetailLevel::Max => 1.0,
};
((duration_seconds / chunk_duration_seconds).ceil() as u32).max(1)
}
/// Generate all Level 0 (overview) chunks for a file
///
/// This should be called immediately when a file is imported to provide
/// instant thumbnail display.
pub fn generate_overview_chunks(
&mut self,
audio_file: &AudioFile,
pool_index: usize,
) -> Vec<WaveformChunk> {
let duration = audio_file.duration_seconds();
let chunk_count = Self::calculate_chunk_count(duration, 0);
let chunk_indices: Vec<u32> = (0..chunk_count).collect();
let chunks = Self::generate_chunks(audio_file, pool_index, 0, &chunk_indices);
// Store chunks in cache
for chunk in &chunks {
let key = WaveformChunkKey {
pool_index,
detail_level: chunk.detail_level,
chunk_index: chunk.chunk_index,
};
self.store_chunk(key, chunk.peaks.clone());
}
chunks
}
/// Get current memory usage in bytes
pub fn memory_usage_bytes(&self) -> usize {
self.current_memory_bytes
}
/// Get current memory usage in megabytes
pub fn memory_usage_mb(&self) -> f64 {
self.current_memory_bytes as f64 / 1_000_000.0
}
/// Get number of cached chunks
pub fn chunk_count(&self) -> usize {
self.chunks.len()
}
}
impl Default for WaveformCache {
fn default() -> Self {
Self::new(100) // Default 100MB cache
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_detail_level_peaks_per_second() {
assert_eq!(DetailLevel::Overview.peaks_per_second(), 1);
assert_eq!(DetailLevel::Low.peaks_per_second(), 10);
assert_eq!(DetailLevel::Medium.peaks_per_second(), 100);
assert_eq!(DetailLevel::High.peaks_per_second(), 1000);
}
#[test]
fn test_chunk_count_calculation() {
// 60 second file, Overview level (60s chunks) = 1 chunk
assert_eq!(WaveformCache::calculate_chunk_count(60.0, 0), 1);
// 120 second file, Overview level (60s chunks) = 2 chunks
assert_eq!(WaveformCache::calculate_chunk_count(120.0, 0), 2);
// 10 second file, Medium level (10s chunks) = 1 chunk
assert_eq!(WaveformCache::calculate_chunk_count(10.0, 2), 1);
// 25 second file, Medium level (10s chunks) = 3 chunks
assert_eq!(WaveformCache::calculate_chunk_count(25.0, 2), 3);
}
}

View File

@ -176,6 +176,16 @@ pub enum Command {
AutomationRemoveKeyframe(TrackId, u32, f64),
/// Set the display name of an AutomationInput node (track_id, node_id, name)
AutomationSetName(TrackId, u32, String),
// Waveform chunk generation commands
/// Generate waveform chunks for an audio file
/// (pool_index, detail_level, chunk_indices, priority)
GenerateWaveformChunks {
pool_index: usize,
detail_level: u8,
chunk_indices: Vec<u32>,
priority: u8, // 0=Low, 1=Medium, 2=High
},
}
/// Events sent from audio thread back to UI/control thread
@ -228,6 +238,21 @@ pub enum AudioEvent {
GraphPresetLoaded(TrackId),
/// Preset has been saved to file (track_id, preset_path)
GraphPresetSaved(TrackId, String),
/// Export progress (frames_rendered, total_frames)
ExportProgress {
frames_rendered: usize,
total_frames: usize,
},
/// Waveform generated for audio pool file (pool_index, waveform)
WaveformGenerated(usize, Vec<WaveformPeak>),
/// Waveform chunks ready for retrieval
/// (pool_index, detail_level, chunks: Vec<(chunk_index, time_range, peaks)>)
WaveformChunksReady {
pool_index: usize,
detail_level: u8,
chunks: Vec<(u32, (f64, f64), Vec<WaveformPeak>)>,
},
}
/// Synchronous queries sent from UI thread to audio thread

View File

@ -13,6 +13,24 @@ pub struct WaveformPeak {
pub max: f32,
}
/// Uniquely identifies a waveform chunk
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WaveformChunkKey {
pub pool_index: usize,
pub detail_level: u8, // 0-4
pub chunk_index: u32, // Sequential chunk number
}
/// A chunk of waveform data at a specific detail level
#[derive(Debug, Clone)]
pub struct WaveformChunk {
pub audio_pool_index: usize,
pub detail_level: u8, // 0-4 (overview to max detail)
pub chunk_index: u32, // Sequential chunk number
pub time_range: (f64, f64), // Start and end time in seconds
pub peaks: Vec<WaveformPeak>, // Variable length based on level
}
pub struct AudioFile {
pub data: Vec<f32>,
pub channels: u32,

View File

@ -3,7 +3,7 @@ pub mod midi_file;
pub mod midi_input;
pub mod wav_writer;
pub use audio_file::{AudioFile, WaveformPeak};
pub use audio_file::{AudioFile, WaveformChunk, WaveformChunkKey, WaveformPeak};
pub use midi_file::load_midi_file;
pub use midi_input::MidiInputManager;
pub use wav_writer::WavWriter;

View File

@ -20,7 +20,7 @@ pub use audio::{
pub use audio::node_graph::{GraphPreset, AudioGraph, PresetMetadata, SerializedConnection, SerializedNode};
pub use command::{AudioEvent, Command, OscilloscopeData};
pub use command::types::AutomationKeyframeData;
pub use io::{load_midi_file, AudioFile, WaveformPeak, WavWriter};
pub use io::{load_midi_file, AudioFile, WaveformChunk, WaveformChunkKey, WaveformPeak, WavWriter};
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};

View File

@ -3,7 +3,7 @@
//! The Document represents a complete animation project with settings
//! and a root graphics object containing the scene graph.
use crate::clip::{AudioClip, ImageAsset, VideoClip, VectorClip};
use crate::clip::{AudioClip, ClipInstance, ImageAsset, VideoClip, VectorClip};
use crate::layer::AnyLayer;
use crate::layout::LayoutNode;
use crate::shape::ShapeColor;
@ -188,6 +188,65 @@ impl Document {
self.width / self.height
}
/// Calculate the actual timeline endpoint based on the last clip
///
/// Returns the end time of the last clip instance across all layers,
/// or the document's duration if no clips are found.
pub fn calculate_timeline_endpoint(&self) -> f64 {
let mut max_end_time: f64 = 0.0;
// Helper function to calculate the end time of a clip instance
let calculate_instance_end = |instance: &ClipInstance, clip_duration: f64| -> f64 {
let effective_duration = if let Some(timeline_duration) = instance.timeline_duration {
// Explicit timeline duration set (may include looping)
timeline_duration
} else {
// Calculate from trim points
let trim_end = instance.trim_end.unwrap_or(clip_duration);
let trimmed_duration = trim_end - instance.trim_start;
trimmed_duration / instance.playback_speed // Adjust for playback speed
};
instance.timeline_start + effective_duration
};
// Iterate through all layers to find the maximum end time
for layer in &self.root.children {
match layer {
crate::layer::AnyLayer::Vector(vector_layer) => {
for instance in &vector_layer.clip_instances {
if let Some(clip) = self.vector_clips.get(&instance.clip_id) {
let end_time = calculate_instance_end(instance, clip.duration);
max_end_time = max_end_time.max(end_time);
}
}
}
crate::layer::AnyLayer::Audio(audio_layer) => {
for instance in &audio_layer.clip_instances {
if let Some(clip) = self.audio_clips.get(&instance.clip_id) {
let end_time = calculate_instance_end(instance, clip.duration);
max_end_time = max_end_time.max(end_time);
}
}
}
crate::layer::AnyLayer::Video(video_layer) => {
for instance in &video_layer.clip_instances {
if let Some(clip) = self.video_clips.get(&instance.clip_id) {
let end_time = calculate_instance_end(instance, clip.duration);
max_end_time = max_end_time.max(end_time);
}
}
}
}
}
// Return the maximum end time, or document duration if no clips found
if max_end_time > 0.0 {
max_end_time
} else {
self.duration
}
}
/// Set the current playback time
pub fn set_time(&mut self, time: f64) {
self.current_time = time.max(0.0).min(self.duration);

View File

@ -0,0 +1,588 @@
//! Export settings and types for audio and video export
//!
//! This module contains platform-agnostic export settings that can be used
//! across different frontends (native, web, etc.). The actual export implementation
//! is in the platform-specific code (e.g., lightningbeam-editor).
use serde::{Deserialize, Serialize};
/// Audio export formats
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum AudioFormat {
/// WAV - Uncompressed audio (large files, best quality)
Wav,
/// FLAC - Lossless compressed audio (smaller than WAV, same quality)
Flac,
/// MP3 - Lossy compressed audio (widely compatible)
Mp3,
/// AAC - Lossy compressed audio (better quality than MP3 at same bitrate)
Aac,
}
impl AudioFormat {
/// Get the file extension for this format
pub fn extension(&self) -> &'static str {
match self {
AudioFormat::Wav => "wav",
AudioFormat::Flac => "flac",
AudioFormat::Mp3 => "mp3",
AudioFormat::Aac => "m4a",
}
}
/// Get a human-readable name for this format
pub fn name(&self) -> &'static str {
match self {
AudioFormat::Wav => "WAV (Uncompressed)",
AudioFormat::Flac => "FLAC (Lossless)",
AudioFormat::Mp3 => "MP3",
AudioFormat::Aac => "AAC",
}
}
/// Check if this format supports bit depth settings
pub fn supports_bit_depth(&self) -> bool {
matches!(self, AudioFormat::Wav | AudioFormat::Flac)
}
/// Check if this format uses bitrate settings (lossy formats)
pub fn uses_bitrate(&self) -> bool {
matches!(self, AudioFormat::Mp3 | AudioFormat::Aac)
}
}
/// Audio export settings
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioExportSettings {
/// Output format
pub format: AudioFormat,
/// Sample rate in Hz (e.g., 44100, 48000)
pub sample_rate: u32,
/// Number of channels (1 = mono, 2 = stereo)
pub channels: u32,
/// Bit depth for lossless formats (16 or 24)
/// Only used for WAV and FLAC
pub bit_depth: u16,
/// Bitrate in kbps for lossy formats (e.g., 128, 192, 256, 320)
/// Only used for MP3 and AAC
pub bitrate_kbps: u32,
/// Start time in seconds
pub start_time: f64,
/// End time in seconds
pub end_time: f64,
}
impl Default for AudioExportSettings {
fn default() -> Self {
Self {
format: AudioFormat::Wav,
sample_rate: 48000,
channels: 2,
bit_depth: 24,
bitrate_kbps: 320,
start_time: 0.0,
end_time: 60.0,
}
}
}
impl AudioExportSettings {
/// Create high quality WAV export settings
pub fn high_quality_wav() -> Self {
Self {
format: AudioFormat::Wav,
sample_rate: 48000,
channels: 2,
bit_depth: 24,
..Default::default()
}
}
/// Create high quality FLAC export settings
pub fn high_quality_flac() -> Self {
Self {
format: AudioFormat::Flac,
sample_rate: 48000,
channels: 2,
bit_depth: 24,
..Default::default()
}
}
/// Create high quality AAC export settings
pub fn high_quality_aac() -> Self {
Self {
format: AudioFormat::Aac,
sample_rate: 48000,
channels: 2,
bitrate_kbps: 320,
..Default::default()
}
}
/// Create high quality MP3 export settings
pub fn high_quality_mp3() -> Self {
Self {
format: AudioFormat::Mp3,
sample_rate: 44100,
channels: 2,
bitrate_kbps: 320,
..Default::default()
}
}
/// Create standard quality AAC export settings
pub fn standard_aac() -> Self {
Self {
format: AudioFormat::Aac,
sample_rate: 44100,
channels: 2,
bitrate_kbps: 256,
..Default::default()
}
}
/// Create standard quality MP3 export settings
pub fn standard_mp3() -> Self {
Self {
format: AudioFormat::Mp3,
sample_rate: 44100,
channels: 2,
bitrate_kbps: 192,
..Default::default()
}
}
/// Create podcast-optimized AAC settings (mono, lower bitrate)
pub fn podcast_aac() -> Self {
Self {
format: AudioFormat::Aac,
sample_rate: 44100,
channels: 1,
bitrate_kbps: 128,
..Default::default()
}
}
/// Create podcast-optimized MP3 settings (mono, lower bitrate)
pub fn podcast_mp3() -> Self {
Self {
format: AudioFormat::Mp3,
sample_rate: 44100,
channels: 1,
bitrate_kbps: 128,
..Default::default()
}
}
/// Validate the settings
pub fn validate(&self) -> Result<(), String> {
// Validate sample rate
if self.sample_rate == 0 {
return Err("Sample rate must be greater than 0".to_string());
}
// Validate channels
if self.channels == 0 || self.channels > 2 {
return Err("Channels must be 1 (mono) or 2 (stereo)".to_string());
}
// Validate bit depth for lossless formats
if self.format.supports_bit_depth() {
if self.bit_depth != 16 && self.bit_depth != 24 {
return Err("Bit depth must be 16 or 24".to_string());
}
}
// Validate bitrate for lossy formats
if self.format.uses_bitrate() {
if self.bitrate_kbps == 0 {
return Err("Bitrate must be greater than 0".to_string());
}
}
// Validate time range
if self.start_time < 0.0 {
return Err("Start time cannot be negative".to_string());
}
if self.end_time <= self.start_time {
return Err("End time must be greater than start time".to_string());
}
Ok(())
}
/// Get the duration in seconds
pub fn duration(&self) -> f64 {
self.end_time - self.start_time
}
}
/// Video codec types
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum VideoCodec {
/// H.264 (AVC) - Most widely compatible
H264,
/// H.265 (HEVC) - Better compression than H.264
H265,
/// VP8 - WebM codec
VP8,
/// VP9 - Improved WebM codec
VP9,
/// ProRes 422 - Professional editing codec
ProRes422,
}
impl VideoCodec {
/// Get the typical container format for this codec
pub fn container_format(&self) -> &'static str {
match self {
VideoCodec::H264 | VideoCodec::H265 => "mp4",
VideoCodec::VP8 | VideoCodec::VP9 => "webm",
VideoCodec::ProRes422 => "mov",
}
}
/// Get a human-readable name for this codec
pub fn name(&self) -> &'static str {
match self {
VideoCodec::H264 => "H.264 (MP4)",
VideoCodec::H265 => "H.265 (MP4)",
VideoCodec::VP8 => "VP8 (WebM)",
VideoCodec::VP9 => "VP9 (WebM)",
VideoCodec::ProRes422 => "ProRes 422 (MOV)",
}
}
}
/// Video quality presets
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum VideoQuality {
/// Low quality - ~2 Mbps
Low,
/// Medium quality - ~5 Mbps
Medium,
/// High quality - ~10 Mbps
High,
/// Very high quality - ~20 Mbps
VeryHigh,
/// Custom bitrate in kbps
Custom(u32),
}
impl VideoQuality {
/// Get the bitrate in kbps for this quality preset
pub fn bitrate_kbps(&self) -> u32 {
match self {
VideoQuality::Low => 2000,
VideoQuality::Medium => 5000,
VideoQuality::High => 10000,
VideoQuality::VeryHigh => 20000,
VideoQuality::Custom(bitrate) => *bitrate,
}
}
/// Get a human-readable name
pub fn name(&self) -> String {
match self {
VideoQuality::Low => "Low (2 Mbps)".to_string(),
VideoQuality::Medium => "Medium (5 Mbps)".to_string(),
VideoQuality::High => "High (10 Mbps)".to_string(),
VideoQuality::VeryHigh => "Very High (20 Mbps)".to_string(),
VideoQuality::Custom(bitrate) => format!("Custom ({} kbps)", bitrate),
}
}
}
/// Video export settings
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VideoExportSettings {
/// Video codec
pub codec: VideoCodec,
/// Output width in pixels (None = use document width)
pub width: Option<u32>,
/// Output height in pixels (None = use document height)
pub height: Option<u32>,
/// Frame rate (fps)
pub framerate: f64,
/// Video quality
pub quality: VideoQuality,
/// Audio settings (None = no audio)
pub audio: Option<AudioExportSettings>,
/// Start time in seconds
pub start_time: f64,
/// End time in seconds
pub end_time: f64,
}
impl Default for VideoExportSettings {
fn default() -> Self {
Self {
codec: VideoCodec::H264,
width: None,
height: None,
framerate: 60.0,
quality: VideoQuality::High,
audio: Some(AudioExportSettings::high_quality_aac()),
start_time: 0.0,
end_time: 60.0,
}
}
}
impl VideoExportSettings {
/// Validate the settings
pub fn validate(&self) -> Result<(), String> {
// Validate dimensions if provided
if let Some(width) = self.width {
if width == 0 {
return Err("Width must be greater than 0".to_string());
}
}
if let Some(height) = self.height {
if height == 0 {
return Err("Height must be greater than 0".to_string());
}
}
// Validate framerate
if self.framerate <= 0.0 {
return Err("Framerate must be greater than 0".to_string());
}
// Validate time range
if self.start_time < 0.0 {
return Err("Start time cannot be negative".to_string());
}
if self.end_time <= self.start_time {
return Err("End time must be greater than start time".to_string());
}
// Validate audio settings if present
if let Some(audio) = &self.audio {
audio.validate()?;
}
Ok(())
}
/// Get the duration in seconds
pub fn duration(&self) -> f64 {
self.end_time - self.start_time
}
/// Calculate the total number of frames
pub fn total_frames(&self) -> usize {
(self.duration() * self.framerate).ceil() as usize
}
}
/// Progress updates during export
#[derive(Debug, Clone)]
pub enum ExportProgress {
/// Export started
Started {
/// Total number of frames (0 for audio-only)
total_frames: usize,
},
/// A frame was rendered (video only)
FrameRendered {
/// Current frame number
frame: usize,
/// Total frames
total: usize,
},
/// Audio rendering completed
AudioRendered,
/// Finalizing the export (writing file, cleanup)
Finalizing,
/// Export completed successfully
Complete {
/// Path to the exported file
output_path: std::path::PathBuf,
},
/// Export failed
Error {
/// Error message
message: String,
},
}
impl ExportProgress {
/// Get a human-readable status message
pub fn status_message(&self) -> String {
match self {
ExportProgress::Started { total_frames } => {
if *total_frames > 0 {
format!("Starting export ({} frames)...", total_frames)
} else {
"Starting audio export...".to_string()
}
}
ExportProgress::FrameRendered { frame, total } => {
format!("Rendering frame {} of {}...", frame, total)
}
ExportProgress::AudioRendered => "Audio rendered successfully".to_string(),
ExportProgress::Finalizing => "Finalizing export...".to_string(),
ExportProgress::Complete { output_path } => {
format!("Export complete: {}", output_path.display())
}
ExportProgress::Error { message } => {
format!("Export failed: {}", message)
}
}
}
/// Get progress as a percentage (0.0 to 1.0)
pub fn progress_percentage(&self) -> Option<f32> {
match self {
ExportProgress::Started { .. } => Some(0.0),
ExportProgress::FrameRendered { frame, total } => {
Some(*frame as f32 / *total as f32)
}
ExportProgress::AudioRendered => Some(0.9),
ExportProgress::Finalizing => Some(0.95),
ExportProgress::Complete { .. } => Some(1.0),
ExportProgress::Error { .. } => None,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_audio_format_extension() {
assert_eq!(AudioFormat::Wav.extension(), "wav");
assert_eq!(AudioFormat::Flac.extension(), "flac");
assert_eq!(AudioFormat::Mp3.extension(), "mp3");
assert_eq!(AudioFormat::Aac.extension(), "m4a");
}
#[test]
fn test_audio_format_capabilities() {
assert!(AudioFormat::Wav.supports_bit_depth());
assert!(AudioFormat::Flac.supports_bit_depth());
assert!(!AudioFormat::Mp3.supports_bit_depth());
assert!(!AudioFormat::Aac.supports_bit_depth());
assert!(AudioFormat::Mp3.uses_bitrate());
assert!(AudioFormat::Aac.uses_bitrate());
assert!(!AudioFormat::Wav.uses_bitrate());
assert!(!AudioFormat::Flac.uses_bitrate());
}
#[test]
fn test_audio_export_settings_validation() {
let mut settings = AudioExportSettings::default();
assert!(settings.validate().is_ok());
// Test invalid sample rate
settings.sample_rate = 0;
assert!(settings.validate().is_err());
settings.sample_rate = 48000;
// Test invalid channels
settings.channels = 0;
assert!(settings.validate().is_err());
settings.channels = 3;
assert!(settings.validate().is_err());
settings.channels = 2;
// Test invalid bit depth for WAV
settings.format = AudioFormat::Wav;
settings.bit_depth = 32;
assert!(settings.validate().is_err());
settings.bit_depth = 24;
assert!(settings.validate().is_ok());
// Test invalid time range
settings.start_time = -1.0;
assert!(settings.validate().is_err());
settings.start_time = 0.0;
settings.end_time = 0.0;
assert!(settings.validate().is_err());
settings.end_time = 60.0;
assert!(settings.validate().is_ok());
}
#[test]
fn test_audio_presets() {
let wav = AudioExportSettings::high_quality_wav();
assert_eq!(wav.format, AudioFormat::Wav);
assert_eq!(wav.sample_rate, 48000);
assert_eq!(wav.bit_depth, 24);
assert_eq!(wav.channels, 2);
let flac = AudioExportSettings::high_quality_flac();
assert_eq!(flac.format, AudioFormat::Flac);
assert_eq!(flac.sample_rate, 48000);
assert_eq!(flac.bit_depth, 24);
let aac = AudioExportSettings::high_quality_aac();
assert_eq!(aac.format, AudioFormat::Aac);
assert_eq!(aac.bitrate_kbps, 320);
let mp3 = AudioExportSettings::podcast_mp3();
assert_eq!(mp3.format, AudioFormat::Mp3);
assert_eq!(mp3.channels, 1);
assert_eq!(mp3.bitrate_kbps, 128);
}
#[test]
fn test_video_codec_container() {
assert_eq!(VideoCodec::H264.container_format(), "mp4");
assert_eq!(VideoCodec::VP9.container_format(), "webm");
assert_eq!(VideoCodec::ProRes422.container_format(), "mov");
}
#[test]
fn test_video_quality_bitrate() {
assert_eq!(VideoQuality::Low.bitrate_kbps(), 2000);
assert_eq!(VideoQuality::High.bitrate_kbps(), 10000);
assert_eq!(VideoQuality::Custom(15000).bitrate_kbps(), 15000);
}
#[test]
fn test_video_export_total_frames() {
let settings = VideoExportSettings {
framerate: 30.0,
start_time: 0.0,
end_time: 10.0,
..Default::default()
};
assert_eq!(settings.total_frames(), 300);
}
#[test]
fn test_export_progress_percentage() {
let progress = ExportProgress::FrameRendered { frame: 50, total: 100 };
assert_eq!(progress.progress_percentage(), Some(0.5));
let complete = ExportProgress::Complete {
output_path: std::path::PathBuf::from("test.mp4"),
};
assert_eq!(complete.progress_percentage(), Some(1.0));
}
}

View File

@ -32,3 +32,4 @@ pub mod segment_builder;
pub mod planar_graph;
pub mod file_types;
pub mod file_io;
pub mod export;

View File

@ -8,6 +8,7 @@ lightningbeam-core = { path = "../lightningbeam-core" }
daw-backend = { path = "../../daw-backend" }
rtrb = "0.3"
cpal = "0.15"
ffmpeg-next = "7.0"
# UI Framework
eframe = { workspace = true }

View File

@ -0,0 +1,164 @@
//! Audio export functionality
//!
//! Exports audio from the timeline to various formats:
//! - WAV and FLAC: Use existing DAW backend export
//! - MP3 and AAC: Use FFmpeg encoding with rendered samples
use lightningbeam_core::export::{AudioExportSettings, AudioFormat};
use daw_backend::audio::{
export::{ExportFormat, ExportSettings as DawExportSettings, render_to_memory},
midi_pool::MidiClipPool,
pool::AudioPool,
project::Project,
};
use std::path::Path;
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
/// Export audio to a file
///
/// This function routes to the appropriate export method based on the format:
/// - WAV/FLAC: Use DAW backend export
/// - MP3/AAC: Use FFmpeg encoding (TODO)
pub fn export_audio<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &AudioExportSettings,
output_path: P,
cancel_flag: &Arc<AtomicBool>,
) -> Result<(), String> {
// Validate settings
settings.validate()?;
// Check for cancellation before starting
if cancel_flag.load(Ordering::Relaxed) {
return Err("Export cancelled by user".to_string());
}
match settings.format {
AudioFormat::Wav | AudioFormat::Flac => {
export_audio_daw_backend(project, pool, midi_pool, settings, output_path)
}
AudioFormat::Mp3 => {
export_audio_ffmpeg_mp3(project, pool, midi_pool, settings, output_path, cancel_flag)
}
AudioFormat::Aac => {
export_audio_ffmpeg_aac(project, pool, midi_pool, settings, output_path, cancel_flag)
}
}
}
/// Export audio using the DAW backend (WAV/FLAC)
fn export_audio_daw_backend<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &AudioExportSettings,
output_path: P,
) -> Result<(), String> {
// Convert our export settings to DAW backend format
let daw_settings = DawExportSettings {
format: match settings.format {
AudioFormat::Wav => ExportFormat::Wav,
AudioFormat::Flac => ExportFormat::Flac,
_ => unreachable!(), // This function only handles WAV/FLAC
},
sample_rate: settings.sample_rate,
channels: settings.channels,
bit_depth: settings.bit_depth,
mp3_bitrate: 320, // Not used for WAV/FLAC
start_time: settings.start_time,
end_time: settings.end_time,
};
// Use the existing DAW backend export function
// No progress reporting for this direct export path
daw_backend::audio::export::export_audio(
project,
pool,
midi_pool,
&daw_settings,
output_path,
None,
)
}
/// Export audio as MP3 using FFmpeg
fn export_audio_ffmpeg_mp3<P: AsRef<Path>>(
_project: &mut Project,
_pool: &AudioPool,
_midi_pool: &MidiClipPool,
_settings: &AudioExportSettings,
_output_path: P,
_cancel_flag: &Arc<AtomicBool>,
) -> Result<(), String> {
// TODO: Implement MP3 export using FFmpeg
// The FFmpeg encoder API is complex and needs more investigation
// For now, users can export as WAV or FLAC (both fully working)
Err("MP3 export is not yet implemented. Please use WAV or FLAC format for now, or export as WAV and convert using an external tool.".to_string())
}
/// Export audio as AAC using FFmpeg
fn export_audio_ffmpeg_aac<P: AsRef<Path>>(
_project: &mut Project,
_pool: &AudioPool,
_midi_pool: &MidiClipPool,
_settings: &AudioExportSettings,
_output_path: P,
_cancel_flag: &Arc<AtomicBool>,
) -> Result<(), String> {
// TODO: Implement AAC export using FFmpeg
// The FFmpeg encoder API is complex and needs more investigation
// For now, users can export as WAV or FLAC (both fully working)
Err("AAC export is not yet implemented. Please use WAV or FLAC format for now, or export as WAV and convert using an external tool.".to_string())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_export_audio_validation() {
let mut settings = AudioExportSettings::default();
settings.sample_rate = 0; // Invalid
let project = Project::new();
let pool = AudioPool::new();
let midi_pool = MidiClipPool::new();
let cancel_flag = Arc::new(AtomicBool::new(false));
let result = export_audio(
&mut project.clone(),
&pool,
&midi_pool,
&settings,
"/tmp/test.wav",
&cancel_flag,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("Sample rate"));
}
#[test]
fn test_export_audio_cancellation() {
let settings = AudioExportSettings::default();
let mut project = Project::new();
let pool = AudioPool::new();
let midi_pool = MidiClipPool::new();
let cancel_flag = Arc::new(AtomicBool::new(true)); // Pre-cancelled
let result = export_audio(
&mut project,
&pool,
&midi_pool,
&settings,
"/tmp/test.wav",
&cancel_flag,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("cancelled"));
}
}

View File

@ -0,0 +1,402 @@
//! Export dialog UI
//!
//! Provides a user interface for configuring and starting audio/video exports.
use eframe::egui;
use lightningbeam_core::export::{AudioExportSettings, AudioFormat};
use std::path::PathBuf;
/// Export dialog state
pub struct ExportDialog {
/// Is the dialog open?
pub open: bool,
/// Export settings
pub settings: AudioExportSettings,
/// Output file path
pub output_path: Option<PathBuf>,
/// Selected preset index (for UI)
pub selected_preset: usize,
/// Error message (if any)
pub error_message: Option<String>,
}
impl Default for ExportDialog {
fn default() -> Self {
Self {
open: false,
settings: AudioExportSettings::default(),
output_path: None,
selected_preset: 0,
error_message: None,
}
}
}
impl ExportDialog {
/// Open the dialog with default settings
pub fn open(&mut self, timeline_duration: f64) {
self.open = true;
self.settings.end_time = timeline_duration;
self.error_message = None;
}
/// Close the dialog
pub fn close(&mut self) {
self.open = false;
self.error_message = None;
}
/// Render the export dialog
///
/// Returns Some(settings, output_path) if the user clicked Export,
/// None otherwise.
pub fn render(&mut self, ctx: &egui::Context) -> Option<(AudioExportSettings, PathBuf)> {
if !self.open {
return None;
}
let mut should_export = false;
let mut should_close = false;
egui::Window::new("Export Audio")
.open(&mut self.open)
.resizable(false)
.collapsible(false)
.anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO)
.show(ctx, |ui| {
ui.set_width(500.0);
// Error message (if any)
if let Some(error) = &self.error_message {
ui.colored_label(egui::Color32::RED, error);
ui.add_space(8.0);
}
// Preset selection
ui.heading("Preset");
ui.horizontal(|ui| {
let presets = [
("High Quality WAV", AudioExportSettings::high_quality_wav()),
("High Quality FLAC", AudioExportSettings::high_quality_flac()),
("Standard MP3", AudioExportSettings::standard_mp3()),
("Standard AAC", AudioExportSettings::standard_aac()),
("High Quality MP3", AudioExportSettings::high_quality_mp3()),
("High Quality AAC", AudioExportSettings::high_quality_aac()),
("Podcast MP3", AudioExportSettings::podcast_mp3()),
("Podcast AAC", AudioExportSettings::podcast_aac()),
];
egui::ComboBox::from_id_source("export_preset")
.selected_text(presets[self.selected_preset].0)
.show_ui(ui, |ui| {
for (i, (name, _)) in presets.iter().enumerate() {
if ui.selectable_value(&mut self.selected_preset, i, *name).clicked() {
// Save current time range before applying preset
let saved_start = self.settings.start_time;
let saved_end = self.settings.end_time;
self.settings = presets[i].1.clone();
// Restore time range
self.settings.start_time = saved_start;
self.settings.end_time = saved_end;
}
}
});
});
ui.add_space(12.0);
// Format settings
ui.heading("Format");
ui.horizontal(|ui| {
ui.label("Format:");
egui::ComboBox::from_id_source("audio_format")
.selected_text(self.settings.format.name())
.show_ui(ui, |ui| {
ui.selectable_value(&mut self.settings.format, AudioFormat::Wav, "WAV (Uncompressed)");
ui.selectable_value(&mut self.settings.format, AudioFormat::Flac, "FLAC (Lossless)");
ui.selectable_value(&mut self.settings.format, AudioFormat::Mp3, "MP3");
ui.selectable_value(&mut self.settings.format, AudioFormat::Aac, "AAC");
});
});
ui.add_space(8.0);
// Audio settings
ui.horizontal(|ui| {
ui.label("Sample Rate:");
egui::ComboBox::from_id_source("sample_rate")
.selected_text(format!("{} Hz", self.settings.sample_rate))
.show_ui(ui, |ui| {
ui.selectable_value(&mut self.settings.sample_rate, 44100, "44100 Hz");
ui.selectable_value(&mut self.settings.sample_rate, 48000, "48000 Hz");
ui.selectable_value(&mut self.settings.sample_rate, 96000, "96000 Hz");
});
});
ui.horizontal(|ui| {
ui.label("Channels:");
ui.radio_value(&mut self.settings.channels, 1, "Mono");
ui.radio_value(&mut self.settings.channels, 2, "Stereo");
});
ui.add_space(8.0);
// Format-specific settings
if self.settings.format.supports_bit_depth() {
ui.horizontal(|ui| {
ui.label("Bit Depth:");
ui.radio_value(&mut self.settings.bit_depth, 16, "16-bit");
ui.radio_value(&mut self.settings.bit_depth, 24, "24-bit");
});
}
if self.settings.format.uses_bitrate() {
ui.horizontal(|ui| {
ui.label("Bitrate:");
egui::ComboBox::from_id_source("bitrate")
.selected_text(format!("{} kbps", self.settings.bitrate_kbps))
.show_ui(ui, |ui| {
ui.selectable_value(&mut self.settings.bitrate_kbps, 128, "128 kbps");
ui.selectable_value(&mut self.settings.bitrate_kbps, 192, "192 kbps");
ui.selectable_value(&mut self.settings.bitrate_kbps, 256, "256 kbps");
ui.selectable_value(&mut self.settings.bitrate_kbps, 320, "320 kbps");
});
});
}
ui.add_space(12.0);
// Time range
ui.heading("Time Range");
ui.horizontal(|ui| {
ui.label("Start:");
ui.add(egui::DragValue::new(&mut self.settings.start_time)
.speed(0.1)
.clamp_range(0.0..=self.settings.end_time)
.suffix(" s"));
ui.label("End:");
ui.add(egui::DragValue::new(&mut self.settings.end_time)
.speed(0.1)
.clamp_range(self.settings.start_time..=f64::MAX)
.suffix(" s"));
});
let duration = self.settings.duration();
ui.label(format!("Duration: {:.2} seconds", duration));
ui.add_space(12.0);
// Output file path
ui.heading("Output");
ui.horizontal(|ui| {
let path_text = self.output_path.as_ref()
.map(|p| p.display().to_string())
.unwrap_or_else(|| "No file selected".to_string());
ui.label("File:");
ui.text_edit_singleline(&mut path_text.clone());
if ui.button("Browse...").clicked() {
// Open file dialog
let default_name = format!("audio.{}", self.settings.format.extension());
if let Some(path) = rfd::FileDialog::new()
.set_file_name(&default_name)
.add_filter("Audio", &[self.settings.format.extension()])
.save_file()
{
self.output_path = Some(path);
}
}
});
ui.add_space(12.0);
// Estimated file size
if duration > 0.0 {
let estimated_mb = if self.settings.format.uses_bitrate() {
// Lossy: bitrate * duration / 8 / 1024
(self.settings.bitrate_kbps as f64 * duration) / 8.0 / 1024.0
} else {
// Lossless: sample_rate * channels * bit_depth * duration / 8 / 1024 / 1024
let compression_factor = if self.settings.format == AudioFormat::Flac { 0.6 } else { 1.0 };
(self.settings.sample_rate as f64 * self.settings.channels as f64 *
self.settings.bit_depth as f64 * duration * compression_factor) / 8.0 / 1024.0 / 1024.0
};
ui.label(format!("Estimated size: ~{:.1} MB", estimated_mb));
}
ui.add_space(16.0);
// Buttons
ui.horizontal(|ui| {
if ui.button("Cancel").clicked() {
should_close = true;
}
ui.with_layout(egui::Layout::right_to_left(egui::Align::Center), |ui| {
if ui.button("Export").clicked() {
should_export = true;
}
});
});
});
if should_close {
self.close();
return None;
}
if should_export {
// Validate settings
if let Err(err) = self.settings.validate() {
self.error_message = Some(err);
return None;
}
// Check if output path is set
if self.output_path.is_none() {
self.error_message = Some("Please select an output file".to_string());
return None;
}
// Return settings and path
let result = Some((self.settings.clone(), self.output_path.clone().unwrap()));
self.close();
return result;
}
None
}
}
/// Export progress dialog state
pub struct ExportProgressDialog {
/// Is the dialog open?
pub open: bool,
/// Current progress message
pub message: String,
/// Progress (0.0 to 1.0)
pub progress: f32,
/// Start time for elapsed time calculation
pub start_time: Option<std::time::Instant>,
/// Was cancel requested?
pub cancel_requested: bool,
}
impl Default for ExportProgressDialog {
fn default() -> Self {
Self {
open: false,
message: String::new(),
progress: 0.0,
start_time: None,
cancel_requested: false,
}
}
}
impl ExportProgressDialog {
/// Open the progress dialog
pub fn open(&mut self) {
self.open = true;
self.message = "Starting export...".to_string();
self.progress = 0.0;
self.start_time = Some(std::time::Instant::now());
self.cancel_requested = false;
}
/// Close the dialog
pub fn close(&mut self) {
self.open = false;
self.start_time = None;
self.cancel_requested = false;
}
/// Update progress
pub fn update_progress(&mut self, message: String, progress: f32) {
self.message = message;
self.progress = progress.clamp(0.0, 1.0);
}
/// Render the export progress dialog
///
/// Returns true if the user clicked Cancel
pub fn render(&mut self, ctx: &egui::Context) -> bool {
if !self.open {
return false;
}
let mut should_cancel = false;
egui::Window::new("Exporting...")
.open(&mut self.open)
.resizable(false)
.collapsible(false)
.anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO)
.show(ctx, |ui| {
ui.set_width(400.0);
// Status message
ui.label(&self.message);
ui.add_space(8.0);
// Progress bar
let progress_text = format!("{:.0}%", self.progress * 100.0);
ui.add(egui::ProgressBar::new(self.progress).text(progress_text));
ui.add_space(8.0);
// Elapsed time and estimate
if let Some(start_time) = self.start_time {
let elapsed = start_time.elapsed();
let elapsed_secs = elapsed.as_secs();
ui.horizontal(|ui| {
ui.label(format!(
"Elapsed: {}:{:02}",
elapsed_secs / 60,
elapsed_secs % 60
));
// Estimate remaining time if we have progress
if self.progress > 0.01 {
let total_estimated = elapsed.as_secs_f32() / self.progress;
let remaining = total_estimated - elapsed.as_secs_f32();
if remaining > 0.0 {
ui.label(format!(
" | Remaining: ~{}:{:02}",
(remaining as u64) / 60,
(remaining as u64) % 60
));
}
}
});
}
ui.add_space(12.0);
// Cancel button
ui.horizontal(|ui| {
ui.with_layout(egui::Layout::right_to_left(egui::Align::Center), |ui| {
if ui.button("Cancel").clicked() {
should_cancel = true;
}
});
});
});
if should_cancel {
self.cancel_requested = true;
}
should_cancel
}
}

View File

@ -0,0 +1,241 @@
//! Export functionality for audio and video
//!
//! This module provides the export orchestrator and progress tracking
//! for exporting audio and video from the timeline.
pub mod audio_exporter;
pub mod dialog;
use lightningbeam_core::export::{AudioExportSettings, ExportProgress};
use std::path::PathBuf;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
/// Export orchestrator that manages the export process
pub struct ExportOrchestrator {
/// Channel for receiving progress updates
progress_rx: Option<Receiver<ExportProgress>>,
/// Handle to the export thread
thread_handle: Option<std::thread::JoinHandle<()>>,
/// Cancel flag
cancel_flag: Arc<AtomicBool>,
}
impl ExportOrchestrator {
/// Create a new export orchestrator
pub fn new() -> Self {
Self {
progress_rx: None,
thread_handle: None,
cancel_flag: Arc::new(AtomicBool::new(false)),
}
}
/// Start an audio export in the background
///
/// Returns immediately, spawning a background thread for the export.
/// Use `poll_progress()` to check the export progress.
pub fn start_audio_export(
&mut self,
settings: AudioExportSettings,
output_path: PathBuf,
audio_controller: Arc<std::sync::Mutex<daw_backend::EngineController>>,
) {
println!("🔄 [ORCHESTRATOR] start_audio_export called");
// Create progress channel
let (tx, rx) = channel();
self.progress_rx = Some(rx);
// Reset cancel flag
self.cancel_flag.store(false, Ordering::Relaxed);
let cancel_flag = Arc::clone(&self.cancel_flag);
println!("🔄 [ORCHESTRATOR] Spawning background thread...");
// Spawn background thread
let handle = std::thread::spawn(move || {
println!("🧵 [EXPORT THREAD] Background thread started!");
Self::run_audio_export(
settings,
output_path,
audio_controller,
tx,
cancel_flag,
);
println!("🧵 [EXPORT THREAD] Background thread finished!");
});
self.thread_handle = Some(handle);
println!("🔄 [ORCHESTRATOR] Thread spawned, returning");
}
/// Poll for progress updates
///
/// Returns None if no updates are available.
/// Returns Some(progress) if an update is available.
pub fn poll_progress(&mut self) -> Option<ExportProgress> {
if let Some(rx) = &self.progress_rx {
match rx.try_recv() {
Ok(progress) => {
println!("📨 [ORCHESTRATOR] Received progress: {:?}", std::mem::discriminant(&progress));
Some(progress)
}
Err(e) => {
// Only log occasionally to avoid spam
None
}
}
} else {
None
}
}
/// Cancel the current export
pub fn cancel(&mut self) {
self.cancel_flag.store(true, Ordering::Relaxed);
}
/// Check if an export is in progress
pub fn is_exporting(&self) -> bool {
if let Some(handle) = &self.thread_handle {
!handle.is_finished()
} else {
false
}
}
/// Wait for the export to complete
///
/// This blocks until the export thread finishes.
pub fn wait_for_completion(&mut self) {
if let Some(handle) = self.thread_handle.take() {
handle.join().ok();
}
}
/// Run audio export in background thread
fn run_audio_export(
settings: AudioExportSettings,
output_path: PathBuf,
audio_controller: Arc<std::sync::Mutex<daw_backend::EngineController>>,
progress_tx: Sender<ExportProgress>,
cancel_flag: Arc<AtomicBool>,
) {
println!("🧵 [EXPORT THREAD] run_audio_export started");
// Send start notification
progress_tx
.send(ExportProgress::Started { total_frames: 0 })
.ok();
println!("🧵 [EXPORT THREAD] Sent Started progress");
// Check for cancellation
if cancel_flag.load(Ordering::Relaxed) {
progress_tx
.send(ExportProgress::Error {
message: "Export cancelled by user".to_string(),
})
.ok();
return;
}
// Convert settings to DAW backend format
let daw_settings = daw_backend::audio::ExportSettings {
format: match settings.format {
lightningbeam_core::export::AudioFormat::Wav => daw_backend::audio::ExportFormat::Wav,
lightningbeam_core::export::AudioFormat::Flac => daw_backend::audio::ExportFormat::Flac,
lightningbeam_core::export::AudioFormat::Mp3 |
lightningbeam_core::export::AudioFormat::Aac => {
// MP3/AAC not supported yet
progress_tx
.send(ExportProgress::Error {
message: format!("{} export not yet implemented. Please use WAV or FLAC format.", settings.format.name()),
})
.ok();
return;
}
},
sample_rate: settings.sample_rate,
channels: settings.channels,
bit_depth: settings.bit_depth,
mp3_bitrate: 320, // Not used for WAV/FLAC
start_time: settings.start_time,
end_time: settings.end_time,
};
println!("🧵 [EXPORT THREAD] Starting non-blocking export...");
// Start the export (non-blocking - just sends the query)
{
let mut controller = audio_controller.lock().unwrap();
println!("🧵 [EXPORT THREAD] Sending export query...");
if let Err(e) = controller.start_export_audio(&daw_settings, &output_path) {
println!("🧵 [EXPORT THREAD] Failed to start export: {}", e);
progress_tx.send(ExportProgress::Error { message: e }).ok();
return;
}
println!("🧵 [EXPORT THREAD] Export query sent, lock released");
}
// Poll for completion without holding the lock for extended periods
let duration = settings.end_time - settings.start_time;
let start_time = std::time::Instant::now();
let result = loop {
if cancel_flag.load(Ordering::Relaxed) {
break Err("Export cancelled by user".to_string());
}
// Sleep before polling to avoid spinning
std::thread::sleep(std::time::Duration::from_millis(100));
// Brief lock to poll for completion
let poll_result = {
let mut controller = audio_controller.lock().unwrap();
controller.poll_export_completion()
};
match poll_result {
Ok(Some(result)) => {
// Export completed
println!("🧵 [EXPORT THREAD] Export completed: {:?}", result.is_ok());
break result;
}
Ok(None) => {
// Still in progress - actual progress comes via AudioEvent::ExportProgress
// No need to send progress here
}
Err(e) => {
// Polling error (shouldn't happen)
println!("🧵 [EXPORT THREAD] Poll error: {}", e);
break Err(e);
}
}
};
println!("🧵 [EXPORT THREAD] Export loop finished");
// Send completion or error
match result {
Ok(_) => {
println!("📤 [EXPORT THREAD] Sending Complete event");
let send_result = progress_tx.send(ExportProgress::Complete {
output_path: output_path.clone(),
});
println!("📤 [EXPORT THREAD] Complete event sent: {:?}", send_result.is_ok());
}
Err(err) => {
println!("📤 [EXPORT THREAD] Sending Error event: {}", err);
let send_result = progress_tx.send(ExportProgress::Error { message: err });
println!("📤 [EXPORT THREAD] Error event sent: {:?}", send_result.is_ok());
}
}
}
}
impl Default for ExportOrchestrator {
fn default() -> Self {
Self::new()
}
}

View File

@ -3,7 +3,7 @@ use lightningbeam_core::layer::{AnyLayer, AudioLayer};
use lightningbeam_core::layout::{LayoutDefinition, LayoutNode};
use lightningbeam_core::pane::PaneType;
use lightningbeam_core::tool::Tool;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use clap::Parser;
use uuid::Uuid;
@ -26,6 +26,8 @@ use config::AppConfig;
mod default_instrument;
mod export;
/// Lightningbeam Editor - Animation and video editing software
#[derive(Parser, Debug)]
#[command(name = "Lightningbeam Editor")]
@ -507,6 +509,7 @@ struct EditorApp {
audio_stream: Option<cpal::Stream>, // Audio stream (must be kept alive)
audio_controller: Option<std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>, // Shared audio controller
audio_event_rx: Option<rtrb::Consumer<daw_backend::AudioEvent>>, // Audio event receiver
audio_events_pending: std::sync::Arc<std::sync::atomic::AtomicBool>, // Flag set when audio events arrive
audio_sample_rate: u32, // Audio sample rate
audio_channels: u32, // Audio channel count
// Video decoding and management
@ -537,6 +540,14 @@ struct EditorApp {
/// Prevents repeated backend queries for the same audio file
/// Format: Vec of WaveformPeak (min/max pairs)
waveform_cache: HashMap<usize, Vec<daw_backend::WaveformPeak>>,
/// Chunk-based waveform cache for multi-resolution waveforms
/// Format: (pool_index, detail_level, chunk_index) -> Vec<WaveformPeak>
waveform_chunk_cache: HashMap<(usize, u8, u32), Vec<daw_backend::WaveformPeak>>,
/// Cache for audio file durations to avoid repeated queries
/// Format: pool_index -> duration in seconds
audio_duration_cache: HashMap<usize, f64>,
/// Track which audio pool indices got new waveform data this frame (for thumbnail invalidation)
audio_pools_with_new_waveforms: HashSet<usize>,
/// Cache for rendered waveform images (GPU textures)
/// Stores pre-rendered waveform tiles at various zoom levels for fast blitting
waveform_image_cache: waveform_image_cache::WaveformImageCache,
@ -553,6 +564,13 @@ struct EditorApp {
/// Audio extraction channel for background thread communication
audio_extraction_tx: std::sync::mpsc::Sender<AudioExtractionResult>,
audio_extraction_rx: std::sync::mpsc::Receiver<AudioExtractionResult>,
/// Export dialog state
export_dialog: export::dialog::ExportDialog,
/// Export progress dialog
export_progress_dialog: export::dialog::ExportProgressDialog,
/// Export orchestrator for background exports
export_orchestrator: Option<export::ExportOrchestrator>,
}
/// Import filter types for the file dialog
@ -668,6 +686,7 @@ impl EditorApp {
audio_stream,
audio_controller,
audio_event_rx,
audio_events_pending: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
audio_sample_rate,
audio_channels,
video_manager: std::sync::Arc::new(std::sync::Mutex::new(
@ -686,6 +705,9 @@ impl EditorApp {
polygon_sides: 5, // Default to pentagon
midi_event_cache: HashMap::new(), // Initialize empty MIDI event cache
waveform_cache: HashMap::new(), // Initialize empty waveform cache
waveform_chunk_cache: HashMap::new(), // Initialize empty chunk-based waveform cache
audio_duration_cache: HashMap::new(), // Initialize empty audio duration cache
audio_pools_with_new_waveforms: HashSet::new(), // Track pool indices with new waveforms
waveform_image_cache: waveform_image_cache::WaveformImageCache::new(), // Initialize waveform image cache
current_file_path: None, // No file loaded initially
config,
@ -693,6 +715,9 @@ impl EditorApp {
file_operation: None, // No file operation in progress initially
audio_extraction_tx,
audio_extraction_rx,
export_dialog: export::dialog::ExportDialog::default(),
export_progress_dialog: export::dialog::ExportProgressDialog::default(),
export_orchestrator: None,
}
}
@ -776,31 +801,75 @@ impl EditorApp {
}
/// Fetch waveform data from backend for a specific audio pool index
/// Returns cached data if available, otherwise queries backend
/// Returns cached data if available, otherwise tries to assemble from chunks
/// For thumbnails, uses Level 0 (overview) chunks which are fast to generate
fn fetch_waveform(&mut self, pool_index: usize) -> Option<Vec<daw_backend::WaveformPeak>> {
// Check if already cached
// Check if already cached in old waveform cache
if let Some(waveform) = self.waveform_cache.get(&pool_index) {
return Some(waveform.clone());
}
// Fetch from backend
// Request 20,000 peaks for high-detail waveform visualization
// For a 200s file, this gives ~100 peaks/second, providing smooth visualization at all zoom levels
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_waveform(pool_index, 20000) {
Ok(waveform) => {
self.waveform_cache.insert(pool_index, waveform.clone());
Some(waveform)
}
Err(e) => {
eprintln!("⚠️ Failed to fetch waveform for pool index {}: {}", pool_index, e);
None
}
}
// Try to assemble from Level 0 (overview) chunks - perfect for thumbnails
// Level 0 = 1 peak/sec, so a 200s file only needs 200 peaks (very fast)
// Get audio file duration (use cached value to avoid repeated queries)
let audio_file_duration = if let Some(&duration) = self.audio_duration_cache.get(&pool_index) {
duration
} else {
None
// Duration not cached - query it once and cache
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_file_info(pool_index) {
Ok((duration, _, _)) => {
self.audio_duration_cache.insert(pool_index, duration);
duration
}
Err(_) => return None,
}
} else {
return None;
}
};
// Assemble Level 0 chunks for the entire file
let detail_level = 0; // Level 0 (overview)
let chunk_time_span = 60.0; // 60 seconds per chunk
let total_chunks = (audio_file_duration / chunk_time_span).ceil() as u32;
let mut assembled_peaks = Vec::new();
let mut missing_chunks = Vec::new();
// Check if all required chunks are available
for chunk_idx in 0..total_chunks {
let key = (pool_index, detail_level, chunk_idx);
if let Some(chunk_peaks) = self.waveform_chunk_cache.get(&key) {
assembled_peaks.extend_from_slice(chunk_peaks);
} else {
missing_chunks.push(chunk_idx);
}
}
// If any chunks are missing, request them (but only if we have a controller)
if !missing_chunks.is_empty() {
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
let _ = controller.generate_waveform_chunks(
pool_index,
detail_level,
missing_chunks,
2, // High priority for thumbnails
);
}
return None; // Will retry next frame when chunks arrive
}
// All chunks available - cache and return
if !assembled_peaks.is_empty() {
self.waveform_cache.insert(pool_index, assembled_peaks.clone());
return Some(assembled_peaks);
}
None
}
fn switch_layout(&mut self, index: usize) {
@ -1032,7 +1101,9 @@ impl EditorApp {
}
MenuAction::Export => {
println!("Menu: Export");
// TODO: Implement export
// Open export dialog with calculated timeline endpoint
let timeline_endpoint = self.action_executor.document().calculate_timeline_endpoint();
self.export_dialog.open(timeline_endpoint);
}
MenuAction::Quit => {
println!("Menu: Quit");
@ -1653,6 +1724,7 @@ impl EditorApp {
let mut controller = controller_arc.lock().unwrap();
// Send audio data to the engine
let path_str = path.to_string_lossy().to_string();
println!("📤 [UI] Sending AddAudioFile command to engine: {}", path_str);
controller.add_audio_file(
path_str.clone(),
audio_file.data,
@ -1950,6 +2022,13 @@ impl eframe::App for EditorApp {
o.zoom_with_keyboard = false;
});
// Force continuous repaint if we have pending waveform updates
// This ensures thumbnails update immediately when waveform data arrives
if !self.audio_pools_with_new_waveforms.is_empty() {
println!("🔄 [UPDATE] Pending waveform updates for pools: {:?}", self.audio_pools_with_new_waveforms);
ctx.request_repaint();
}
// Poll audio extraction results from background threads
while let Ok(result) = self.audio_extraction_rx.try_recv() {
self.handle_audio_extraction_result(result);
@ -2091,9 +2170,16 @@ impl eframe::App for EditorApp {
ctx.request_repaint();
}
// Check if audio events are pending and request repaint if needed
if self.audio_events_pending.load(std::sync::atomic::Ordering::Relaxed) {
ctx.request_repaint();
}
// Poll audio events from the audio engine
if let Some(event_rx) = &mut self.audio_event_rx {
let mut polled_events = false;
while let Ok(event) = event_rx.pop() {
polled_events = true;
use daw_backend::AudioEvent;
match event {
AudioEvent::PlaybackPosition(time) => {
@ -2102,9 +2188,52 @@ impl eframe::App for EditorApp {
AudioEvent::PlaybackStopped => {
self.is_playing = false;
}
AudioEvent::ExportProgress { frames_rendered, total_frames } => {
// Update export progress dialog with actual render progress
let progress = frames_rendered as f32 / total_frames as f32;
self.export_progress_dialog.update_progress(
format!("Rendering: {} / {} frames", frames_rendered, total_frames),
progress,
);
ctx.request_repaint();
}
AudioEvent::WaveformChunksReady { pool_index, detail_level, chunks } => {
// Store waveform chunks in the cache
let mut all_peaks = Vec::new();
for (chunk_index, _time_range, peaks) in chunks {
let key = (pool_index, detail_level, chunk_index);
self.waveform_chunk_cache.insert(key, peaks.clone());
all_peaks.extend(peaks);
}
// If this is Level 0 (overview), also populate the old waveform_cache
// so asset library thumbnails can use it immediately
if detail_level == 0 && !all_peaks.is_empty() {
println!("💾 [EVENT] Storing {} Level 0 peaks for pool {} in waveform_cache", all_peaks.len(), pool_index);
self.waveform_cache.insert(pool_index, all_peaks);
// Mark this pool index as having new waveform data (for thumbnail invalidation)
self.audio_pools_with_new_waveforms.insert(pool_index);
println!("🔔 [EVENT] Marked pool {} for thumbnail invalidation", pool_index);
}
// Invalidate image cache for this pool index
// (The waveform tiles will be regenerated with new chunk data)
self.waveform_image_cache.invalidate_audio(pool_index);
ctx.request_repaint();
}
_ => {} // Ignore other events for now
}
}
// If we polled events, set the flag to trigger another update
// (in case more events arrive before the next frame)
if polled_events {
self.audio_events_pending.store(true, std::sync::atomic::Ordering::Relaxed);
} else {
// No events this frame, clear the flag
self.audio_events_pending.store(false, std::sync::atomic::Ordering::Relaxed);
}
}
// Request continuous repaints when playing to update time display
@ -2112,6 +2241,120 @@ impl eframe::App for EditorApp {
ctx.request_repaint();
}
// Handle export dialog
if let Some((settings, output_path)) = self.export_dialog.render(ctx) {
// User clicked Export - start the export
println!("🎬 [MAIN] Export button clicked: {}", output_path.display());
if let Some(audio_controller) = &self.audio_controller {
println!("🎬 [MAIN] Audio controller available");
// Create orchestrator if needed
if self.export_orchestrator.is_none() {
println!("🎬 [MAIN] Creating new orchestrator");
self.export_orchestrator = Some(export::ExportOrchestrator::new());
}
// Start export
if let Some(orchestrator) = &mut self.export_orchestrator {
println!("🎬 [MAIN] Calling start_audio_export...");
orchestrator.start_audio_export(
settings,
output_path,
Arc::clone(audio_controller),
);
println!("🎬 [MAIN] start_audio_export returned, opening progress dialog");
// Open progress dialog
self.export_progress_dialog.open();
println!("🎬 [MAIN] Progress dialog opened");
}
} else {
eprintln!("❌ Cannot export: Audio controller not available");
}
}
// Render export progress dialog and handle cancel
if self.export_progress_dialog.render(ctx) {
// User clicked Cancel
if let Some(orchestrator) = &mut self.export_orchestrator {
orchestrator.cancel();
}
}
// Keep requesting repaints while export progress dialog is open
if self.export_progress_dialog.open {
ctx.request_repaint();
}
// Poll export orchestrator for progress
if let Some(orchestrator) = &mut self.export_orchestrator {
// Only log occasionally to avoid spam
static mut POLL_COUNT: u32 = 0;
unsafe {
POLL_COUNT += 1;
if POLL_COUNT % 60 == 0 {
println!("🔍 [MAIN] Polling orchestrator (poll #{})...", POLL_COUNT);
}
}
if let Some(progress) = orchestrator.poll_progress() {
println!("📨 [MAIN] Received progress from orchestrator!");
match progress {
lightningbeam_core::export::ExportProgress::Started { total_frames } => {
println!("Export started: {} frames", total_frames);
self.export_progress_dialog.update_progress(
"Starting export...".to_string(),
0.0,
);
ctx.request_repaint(); // Keep repainting during export
}
lightningbeam_core::export::ExportProgress::FrameRendered { frame, total } => {
let progress = frame as f32 / total as f32;
self.export_progress_dialog.update_progress(
format!("Rendering frame {} of {}", frame, total),
progress,
);
ctx.request_repaint();
}
lightningbeam_core::export::ExportProgress::AudioRendered => {
self.export_progress_dialog.update_progress(
"Rendering audio...".to_string(),
0.5,
);
ctx.request_repaint();
}
lightningbeam_core::export::ExportProgress::Finalizing => {
self.export_progress_dialog.update_progress(
"Finalizing export...".to_string(),
0.9,
);
ctx.request_repaint();
}
lightningbeam_core::export::ExportProgress::Complete { ref output_path } => {
println!("✅ Export complete: {}", output_path.display());
self.export_progress_dialog.update_progress(
format!("Export complete: {}", output_path.display()),
1.0,
);
// Close the progress dialog after a brief delay
self.export_progress_dialog.close();
}
lightningbeam_core::export::ExportProgress::Error { ref message } => {
eprintln!("❌ Export error: {}", message);
self.export_progress_dialog.update_progress(
format!("Error: {}", message),
0.0,
);
// Keep the dialog open to show the error
}
}
}
// Request repaint while exporting to update progress
if orchestrator.is_exporting() {
ctx.request_repaint();
}
}
// Top menu bar (egui-rendered on all platforms)
egui::TopBottomPanel::top("menu_bar").show(ctx, |ui| {
if let Some(menu_system) = &self.menu_system {
@ -2172,7 +2415,9 @@ impl eframe::App for EditorApp {
layer_to_track_map: &self.layer_to_track_map,
midi_event_cache: &self.midi_event_cache,
waveform_cache: &self.waveform_cache,
waveform_chunk_cache: &self.waveform_chunk_cache,
waveform_image_cache: &mut self.waveform_image_cache,
audio_pools_with_new_waveforms: &self.audio_pools_with_new_waveforms,
};
render_layout_node(
@ -2308,6 +2553,13 @@ impl eframe::App for EditorApp {
}
}
});
// Clear the set of audio pools with new waveforms at the end of the frame
// (Thumbnails have been invalidated above, so this can be cleared for next frame)
if !self.audio_pools_with_new_waveforms.is_empty() {
println!("🧹 [UPDATE] Clearing waveform update set: {:?}", self.audio_pools_with_new_waveforms);
}
self.audio_pools_with_new_waveforms.clear();
}
}
@ -2350,8 +2602,12 @@ struct RenderContext<'a> {
midi_event_cache: &'a HashMap<u32, Vec<(f64, u8, bool)>>,
/// Cache of waveform data for rendering (keyed by audio_pool_index)
waveform_cache: &'a HashMap<usize, Vec<daw_backend::WaveformPeak>>,
/// Chunk-based waveform cache for multi-resolution waveforms
waveform_chunk_cache: &'a HashMap<(usize, u8, u32), Vec<daw_backend::WaveformPeak>>,
/// Cache of rendered waveform images (GPU textures)
waveform_image_cache: &'a mut waveform_image_cache::WaveformImageCache,
/// Audio pool indices with new waveform data this frame (for thumbnail invalidation)
audio_pools_with_new_waveforms: &'a HashSet<usize>,
}
/// Recursively render a layout node with drag support
@ -2825,7 +3081,9 @@ fn render_pane(
polygon_sides: ctx.polygon_sides,
midi_event_cache: ctx.midi_event_cache,
waveform_cache: ctx.waveform_cache,
waveform_chunk_cache: ctx.waveform_chunk_cache,
waveform_image_cache: ctx.waveform_image_cache,
audio_pools_with_new_waveforms: ctx.audio_pools_with_new_waveforms,
};
pane_instance.render_header(&mut header_ui, &mut shared);
}
@ -2882,7 +3140,9 @@ fn render_pane(
polygon_sides: ctx.polygon_sides,
midi_event_cache: ctx.midi_event_cache,
waveform_cache: ctx.waveform_cache,
waveform_chunk_cache: ctx.waveform_chunk_cache,
waveform_image_cache: ctx.waveform_image_cache,
audio_pools_with_new_waveforms: ctx.audio_pools_with_new_waveforms,
};
// Render pane content (header was already rendered above)

View File

@ -1197,20 +1197,20 @@ impl AssetLibraryPane {
let asset_category = asset.category;
let ctx = ui.ctx().clone();
// Only pre-fetch waveform data if thumbnail not already cached
// (get_pool_waveform is expensive - it blocks waiting for audio thread)
// Get waveform data from cache if thumbnail not already cached
let prefetched_waveform: Option<Vec<(f32, f32)>> =
if asset_category == AssetCategory::Audio && !self.thumbnail_cache.has(&asset_id) {
if let Some(clip) = document.audio_clips.get(&asset_id) {
if let AudioClipType::Sampled { audio_pool_index } = &clip.clip_type {
if let Some(controller_arc) = shared.audio_controller {
let mut controller = controller_arc.lock().unwrap();
controller.get_pool_waveform(*audio_pool_index, THUMBNAIL_SIZE as usize)
.ok()
.map(|peaks| peaks.iter().map(|p| (p.min, p.max)).collect())
// Use cached waveform data (populated by fetch_waveform in main.rs)
let waveform = shared.waveform_cache.get(audio_pool_index)
.map(|peaks| peaks.iter().map(|p| (p.min, p.max)).collect());
if waveform.is_some() {
println!("🎵 Found waveform for pool {} (asset {})", audio_pool_index, asset_id);
} else {
None
println!("⚠️ No waveform yet for pool {} (asset {})", audio_pool_index, asset_id);
}
waveform
} else {
None
}
@ -1246,8 +1246,10 @@ impl AssetLibraryPane {
AudioClipType::Sampled { .. } => {
let wave_color = egui::Color32::from_rgb(100, 200, 100);
if let Some(ref peaks) = prefetched_waveform {
println!("✅ Generating waveform thumbnail with {} peaks for asset {}", peaks.len(), asset_id);
Some(generate_waveform_thumbnail(peaks, bg_color, wave_color))
} else {
println!("📦 Generating placeholder thumbnail for asset {}", asset_id);
Some(generate_placeholder_thumbnail(AssetCategory::Audio, 200))
}
}
@ -1483,20 +1485,20 @@ impl AssetLibraryPane {
let asset_category = asset.category;
let ctx = ui.ctx().clone();
// Only pre-fetch waveform data if thumbnail not already cached
// (get_pool_waveform is expensive - it blocks waiting for audio thread)
// Get waveform data from cache if thumbnail not already cached
let prefetched_waveform: Option<Vec<(f32, f32)>> =
if asset_category == AssetCategory::Audio && !self.thumbnail_cache.has(&asset_id) {
if let Some(clip) = document.audio_clips.get(&asset_id) {
if let AudioClipType::Sampled { audio_pool_index } = &clip.clip_type {
if let Some(controller_arc) = shared.audio_controller {
let mut controller = controller_arc.lock().unwrap();
controller.get_pool_waveform(*audio_pool_index, THUMBNAIL_SIZE as usize)
.ok()
.map(|peaks| peaks.iter().map(|p| (p.min, p.max)).collect())
// Use cached waveform data (populated by fetch_waveform in main.rs)
let waveform = shared.waveform_cache.get(audio_pool_index)
.map(|peaks| peaks.iter().map(|p| (p.min, p.max)).collect());
if waveform.is_some() {
println!("🎵 Found waveform for pool {} (asset {})", audio_pool_index, asset_id);
} else {
None
println!("⚠️ No waveform yet for pool {} (asset {})", audio_pool_index, asset_id);
}
waveform
} else {
None
}
@ -1531,8 +1533,10 @@ impl AssetLibraryPane {
AudioClipType::Sampled { .. } => {
let wave_color = egui::Color32::from_rgb(100, 200, 100);
if let Some(ref peaks) = prefetched_waveform {
println!("✅ Generating waveform thumbnail with {} peaks for asset {}", peaks.len(), asset_id);
Some(generate_waveform_thumbnail(peaks, bg_color, wave_color))
} else {
println!("📦 Generating placeholder thumbnail for asset {}", asset_id);
Some(generate_placeholder_thumbnail(AssetCategory::Audio, 200))
}
}
@ -1683,6 +1687,26 @@ impl PaneRenderer for AssetLibraryPane {
// This allows us to pass &mut shared to render functions while still accessing document
let document_arc = shared.action_executor.document_arc();
// Invalidate thumbnails for audio clips that got new waveform data
if !shared.audio_pools_with_new_waveforms.is_empty() {
println!("🎨 [ASSET_LIB] Checking for thumbnails to invalidate (pools: {:?})", shared.audio_pools_with_new_waveforms);
let mut invalidated_any = false;
for (asset_id, clip) in &document_arc.audio_clips {
if let lightningbeam_core::clip::AudioClipType::Sampled { audio_pool_index } = &clip.clip_type {
if shared.audio_pools_with_new_waveforms.contains(audio_pool_index) {
println!("❌ [ASSET_LIB] Invalidating thumbnail for asset {} (pool {})", asset_id, audio_pool_index);
self.thumbnail_cache.invalidate(asset_id);
invalidated_any = true;
}
}
}
// Force a repaint if we invalidated any thumbnails
if invalidated_any {
println!("🔄 [ASSET_LIB] Requesting repaint after invalidating thumbnails");
ui.ctx().request_repaint();
}
}
// Collect and filter assets
let all_assets = self.collect_assets(&document_arc);
let filtered_assets = self.filter_assets(&all_assets);

View File

@ -133,8 +133,13 @@ pub struct SharedPaneState<'a> {
pub midi_event_cache: &'a std::collections::HashMap<u32, Vec<(f64, u8, bool)>>,
/// Cache of waveform data for rendering (keyed by audio_pool_index)
pub waveform_cache: &'a std::collections::HashMap<usize, Vec<daw_backend::WaveformPeak>>,
/// Chunk-based waveform cache for multi-resolution waveforms
/// Format: (pool_index, detail_level, chunk_index) -> Vec<WaveformPeak>
pub waveform_chunk_cache: &'a std::collections::HashMap<(usize, u8, u32), Vec<daw_backend::WaveformPeak>>,
/// Cache of rendered waveform images (GPU textures) for fast blitting
pub waveform_image_cache: &'a mut crate::waveform_image_cache::WaveformImageCache,
/// Audio pool indices that got new waveform data this frame (for thumbnail invalidation)
pub audio_pools_with_new_waveforms: &'a std::collections::HashSet<usize>,
}
/// Trait for pane rendering

View File

@ -595,6 +595,83 @@ impl TimelinePane {
precache
}
/// Select appropriate detail level based on zoom (pixels per second)
///
/// Detail levels:
/// - Level 0 (Overview): 1 peak/sec - for extreme zoom out (0-2 pps)
/// - Level 1 (Low): 10 peaks/sec - for zoomed out view (2-20 pps)
/// - Level 2 (Medium): 100 peaks/sec - for normal view (20-200 pps)
/// - Level 3 (High): 1000 peaks/sec - for zoomed in (200-2000 pps)
/// - Level 4 (Max): Full resolution - for maximum zoom (>2000 pps)
fn select_detail_level(pixels_per_second: f64) -> u8 {
if pixels_per_second < 2.0 {
0 // Overview
} else if pixels_per_second < 20.0 {
1 // Low
} else if pixels_per_second < 200.0 {
2 // Medium
} else if pixels_per_second < 2000.0 {
3 // High
} else {
4 // Max (full resolution)
}
}
/// Assemble waveform peaks from chunks for the ENTIRE audio file
///
/// Returns peaks for the entire audio file, or None if chunks are not available
/// This assembles a complete waveform from chunks at the appropriate detail level
fn assemble_peaks_from_chunks(
waveform_chunk_cache: &std::collections::HashMap<(usize, u8, u32), Vec<daw_backend::WaveformPeak>>,
audio_pool_index: usize,
detail_level: u8,
audio_file_duration: f64,
audio_controller: Option<&std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
) -> Option<Vec<daw_backend::WaveformPeak>> {
// Calculate chunk time span based on detail level
let chunk_time_span = match detail_level {
0 => 60.0, // Level 0: 60 seconds per chunk
1 => 30.0, // Level 1: 30 seconds per chunk
2 => 10.0, // Level 2: 10 seconds per chunk
3 => 5.0, // Level 3: 5 seconds per chunk
4 => 1.0, // Level 4: 1 second per chunk
_ => 10.0, // Default
};
// Calculate total number of chunks needed for entire audio file
let total_chunks = (audio_file_duration / chunk_time_span).ceil() as u32;
let mut assembled_peaks = Vec::new();
let mut missing_chunks = Vec::new();
// Check if all required chunks are available
for chunk_idx in 0..total_chunks {
let key = (audio_pool_index, detail_level, chunk_idx);
if let Some(chunk_peaks) = waveform_chunk_cache.get(&key) {
assembled_peaks.extend_from_slice(chunk_peaks);
} else {
// Track missing chunk
missing_chunks.push(chunk_idx);
}
}
// If any chunks are missing, request them and return None
if !missing_chunks.is_empty() {
if let Some(controller_arc) = audio_controller {
let mut controller = controller_arc.lock().unwrap();
let _ = controller.generate_waveform_chunks(
audio_pool_index,
detail_level,
missing_chunks,
1, // Medium priority
);
}
return None;
}
Some(assembled_peaks)
}
/// Render waveform visualization using cached texture tiles
/// This is much faster than line-based rendering for many clips
#[allow(clippy::too_many_arguments)]
@ -674,21 +751,37 @@ impl TimelinePane {
let tile_screen_x = timeline_left_edge + ((tile_timeline_start - viewport_start_time) * pixels_per_second) as f32;
let tile_screen_width = ((tile_timeline_end - tile_timeline_start) * pixels_per_second) as f32;
// Clip to the visible clip rectangle
let tile_rect = egui::Rect::from_min_size(
// Create unclipped tile rect
let unclipped_tile_rect = egui::Rect::from_min_size(
egui::pos2(tile_screen_x, clip_rect.min.y),
egui::vec2(tile_screen_width, clip_rect.height()),
).intersect(clip_rect);
);
// Clip to the visible clip rectangle
let tile_rect = unclipped_tile_rect.intersect(clip_rect);
if tile_rect.width() <= 0.0 || tile_rect.height() <= 0.0 {
continue; // Nothing visible
}
// Adjust UV coordinates based on how much the tile was clipped
let uv_span = uv_max_x - uv_min_x;
let adjusted_uv_min_x = if unclipped_tile_rect.width() > 0.0 {
uv_min_x + ((tile_rect.min.x - unclipped_tile_rect.min.x) / unclipped_tile_rect.width()) * uv_span
} else {
uv_min_x
};
let adjusted_uv_max_x = if unclipped_tile_rect.width() > 0.0 {
uv_min_x + ((tile_rect.max.x - unclipped_tile_rect.min.x) / unclipped_tile_rect.width()) * uv_span
} else {
uv_max_x
};
// Blit texture with adjusted UV coordinates
painter.image(
texture.id(),
tile_rect,
egui::Rect::from_min_max(egui::pos2(uv_min_x, 0.0), egui::pos2(uv_max_x, 1.0)),
egui::Rect::from_min_max(egui::pos2(adjusted_uv_min_x, 0.0), egui::pos2(adjusted_uv_max_x, 1.0)),
tint_color,
);
}
@ -987,6 +1080,7 @@ impl TimelinePane {
selection: &lightningbeam_core::selection::Selection,
midi_event_cache: &std::collections::HashMap<u32, Vec<(f64, u8, bool)>>,
waveform_cache: &std::collections::HashMap<usize, Vec<daw_backend::WaveformPeak>>,
waveform_chunk_cache: &std::collections::HashMap<(usize, u8, u32), Vec<daw_backend::WaveformPeak>>,
waveform_image_cache: &mut crate::waveform_image_cache::WaveformImageCache,
audio_controller: Option<&std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
) -> Vec<(egui::Rect, uuid::Uuid, f64, f64)> {
@ -1273,18 +1367,41 @@ impl TimelinePane {
}
// Sampled Audio: Draw waveform
lightningbeam_core::clip::AudioClipType::Sampled { audio_pool_index } => {
if let Some(waveform) = waveform_cache.get(audio_pool_index) {
// Get audio file duration from backend
let audio_file_duration = if let Some(ref controller_arc) = audio_controller {
let mut controller = controller_arc.lock().unwrap();
controller.get_pool_file_info(*audio_pool_index)
.ok()
.map(|(duration, _, _)| duration)
.unwrap_or(clip.duration) // Fallback to clip duration
} else {
clip.duration // Fallback if no controller
};
// Get audio file duration from backend
let audio_file_duration = if let Some(ref controller_arc) = audio_controller {
let mut controller = controller_arc.lock().unwrap();
controller.get_pool_file_info(*audio_pool_index)
.ok()
.map(|(duration, _, _)| duration)
.unwrap_or(clip.duration) // Fallback to clip duration
} else {
clip.duration // Fallback if no controller
};
// Select detail level based on zoom
let requested_level = Self::select_detail_level(self.pixels_per_second as f64);
// Try to assemble peaks from chunks with progressive fallback to lower detail levels
let mut peaks_to_render = None;
for level in (0..=requested_level).rev() {
if let Some(peaks) = Self::assemble_peaks_from_chunks(
waveform_chunk_cache,
*audio_pool_index,
level,
audio_file_duration,
audio_controller,
) {
peaks_to_render = Some(peaks);
break;
}
}
// Final fallback to old waveform_cache if no chunks available at any level
let peaks_to_render = peaks_to_render
.or_else(|| waveform_cache.get(audio_pool_index).cloned())
.unwrap_or_default();
if !peaks_to_render.is_empty() {
Self::render_audio_waveform(
painter,
clip_rect,
@ -1297,7 +1414,7 @@ impl TimelinePane {
self.viewport_start_time,
self.pixels_per_second as f64,
waveform_image_cache,
waveform,
&peaks_to_render,
ui.ctx(),
bright_color, // Use bright color for waveform (lighter than background)
);
@ -2079,7 +2196,7 @@ impl PaneRenderer for TimelinePane {
// Render layer rows with clipping
ui.set_clip_rect(content_rect.intersect(original_clip_rect));
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.selection, shared.midi_event_cache, shared.waveform_cache, shared.waveform_image_cache, shared.audio_controller);
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.selection, shared.midi_event_cache, shared.waveform_cache, shared.waveform_chunk_cache, shared.waveform_image_cache, shared.audio_controller);
// Render playhead on top (clip to timeline area)
ui.set_clip_rect(timeline_rect.intersect(original_clip_rect));