File save/load for audio projects

This commit is contained in:
Skyler Lehmkuhl 2025-11-03 02:46:43 -05:00
parent 9702a501bd
commit 1ee86af94d
13 changed files with 1292 additions and 42 deletions

View File

@ -416,6 +416,7 @@ dependencies = [
"dasp_sample",
"dasp_signal",
"midly",
"pathdiff",
"petgraph 0.6.5",
"rand",
"ratatui",
@ -847,6 +848,12 @@ version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pathdiff"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3"
[[package]]
name = "petgraph"
version = "0.5.1"

View File

@ -13,6 +13,7 @@ ratatui = "0.26"
crossterm = "0.27"
rand = "0.8"
base64 = "0.22"
pathdiff = "0.2"
# Node-based audio graph dependencies
dasp_graph = "0.11"

View File

@ -338,10 +338,18 @@ impl Engine {
}
}
Command::MoveClip(track_id, clip_id, new_start_time) => {
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
clip.start_time = new_start_time;
match self.project.get_track_mut(track_id) {
Some(crate::audio::track::TrackNode::Audio(track)) => {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
clip.start_time = new_start_time;
}
}
Some(crate::audio::track::TrackNode::Midi(track)) => {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
clip.start_time = new_start_time;
}
}
_ => {}
}
}
Command::CreateMetatrack(name) => {
@ -390,6 +398,19 @@ impl Engine {
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
}
Command::AddAudioClip(track_id, pool_index, start_time, duration, offset) => {
eprintln!("[Engine] AddAudioClip: track_id={}, pool_index={}, start_time={}, duration={}",
track_id, pool_index, start_time, duration);
// Check if pool index is valid
let pool_size = self.audio_pool.len();
if pool_index >= pool_size {
eprintln!("[Engine] ERROR: pool_index {} is out of bounds (pool size: {})",
pool_index, pool_size);
} else {
eprintln!("[Engine] Pool index {} is valid, pool has {} files",
pool_index, pool_size);
}
// Create a new clip with unique ID
let clip_id = self.next_clip_id;
self.next_clip_id += 1;
@ -404,8 +425,11 @@ impl Engine {
// Add clip to track
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
track.clips.push(clip);
eprintln!("[Engine] Clip {} added to track {} successfully", clip_id, track_id);
// Notify UI about the new clip
let _ = self.event_tx.push(AudioEvent::ClipAdded(track_id, clip_id));
} else {
eprintln!("[Engine] ERROR: Track {} not found or is not an audio track", track_id);
}
}
Command::CreateMidiTrack(name) => {
@ -1405,6 +1429,106 @@ impl Engine {
QueryResponse::AutomationName(Err(format!("Track {} not found or is not a MIDI track", track_id)))
}
}
Query::SerializeAudioPool(project_path) => {
QueryResponse::AudioPoolSerialized(self.audio_pool.serialize(&project_path))
}
Query::LoadAudioPool(entries, project_path) => {
QueryResponse::AudioPoolLoaded(self.audio_pool.load_from_serialized(entries, &project_path))
}
Query::ResolveMissingAudioFile(pool_index, new_path) => {
QueryResponse::AudioFileResolved(self.audio_pool.resolve_missing_file(pool_index, &new_path))
}
Query::SerializeTrackGraph(track_id, _project_path) => {
// Get the track and serialize its graph
if let Some(track_node) = self.project.get_track(track_id) {
let preset_json = match track_node {
TrackNode::Audio(track) => {
// Serialize effects graph
let preset = track.effects_graph.to_preset(format!("track_{}_effects", track_id));
serde_json::to_string_pretty(&preset)
.map_err(|e| format!("Failed to serialize effects graph: {}", e))
}
TrackNode::Midi(track) => {
// Serialize instrument graph
let preset = track.instrument_graph.to_preset(format!("track_{}_instrument", track_id));
serde_json::to_string_pretty(&preset)
.map_err(|e| format!("Failed to serialize instrument graph: {}", e))
}
TrackNode::Group(_) => {
// TODO: Add graph serialization when we add graphs to group tracks
Err("Group tracks don't have graphs to serialize yet".to_string())
}
};
QueryResponse::TrackGraphSerialized(preset_json)
} else {
QueryResponse::TrackGraphSerialized(Err(format!("Track {} not found", track_id)))
}
}
Query::LoadTrackGraph(track_id, preset_json, project_path) => {
// Parse preset and load into track's graph
use crate::audio::node_graph::preset::GraphPreset;
let result = (|| -> Result<(), String> {
let preset: GraphPreset = serde_json::from_str(&preset_json)
.map_err(|e| format!("Failed to parse preset JSON: {}", e))?;
let preset_base_path = project_path.parent();
if let Some(track_node) = self.project.get_track_mut(track_id) {
match track_node {
TrackNode::Audio(track) => {
// Load into effects graph with proper buffer size (8192 to handle any callback size)
track.effects_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path)?;
Ok(())
}
TrackNode::Midi(track) => {
// Load into instrument graph with proper buffer size (8192 to handle any callback size)
track.instrument_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path)?;
Ok(())
}
TrackNode::Group(_) => {
// TODO: Add graph loading when we add graphs to group tracks
Err("Group tracks don't have graphs to load yet".to_string())
}
}
} else {
Err(format!("Track {} not found", track_id))
}
})();
QueryResponse::TrackGraphLoaded(result)
}
Query::CreateAudioTrackSync(name) => {
let track_id = self.project.add_audio_track(name.clone(), None);
eprintln!("[Engine] Created audio track '{}' with ID {}", name, track_id);
// Notify UI about the new audio track
let _ = self.event_tx.push(AudioEvent::TrackCreated(track_id, false, name));
QueryResponse::TrackCreated(Ok(track_id))
}
Query::CreateMidiTrackSync(name) => {
let track_id = self.project.add_midi_track(name.clone(), None);
eprintln!("[Engine] Created MIDI track '{}' with ID {}", name, track_id);
// Notify UI about the new MIDI track
let _ = self.event_tx.push(AudioEvent::TrackCreated(track_id, false, name));
QueryResponse::TrackCreated(Ok(track_id))
}
Query::GetPoolWaveform(pool_index, target_peaks) => {
match self.audio_pool.generate_waveform(pool_index, target_peaks) {
Some(waveform) => QueryResponse::PoolWaveform(Ok(waveform)),
None => QueryResponse::PoolWaveform(Err(format!("Pool index {} not found", pool_index))),
}
}
Query::GetPoolFileInfo(pool_index) => {
match self.audio_pool.get_file_info(pool_index) {
Some(info) => QueryResponse::PoolFileInfo(Ok(info)),
None => QueryResponse::PoolFileInfo(Err(format!("Pool index {} not found", pool_index))),
}
}
};
// Send response back
@ -1792,6 +1916,46 @@ impl EngineController {
let _ = self.command_tx.push(Command::CreateMidiTrack(name));
}
/// Create a new audio track synchronously (waits for creation to complete)
pub fn create_audio_track_sync(&mut self, name: String) -> Result<TrackId, String> {
if let Err(_) = self.query_tx.push(Query::CreateAudioTrackSync(name)) {
return Err("Failed to send track creation query".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
while start.elapsed() < timeout {
if let Ok(QueryResponse::TrackCreated(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
Err("Track creation timeout".to_string())
}
/// Create a new MIDI track synchronously (waits for creation to complete)
pub fn create_midi_track_sync(&mut self, name: String) -> Result<TrackId, String> {
if let Err(_) = self.query_tx.push(Query::CreateMidiTrackSync(name)) {
return Err("Failed to send track creation query".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
while start.elapsed() < timeout {
if let Ok(QueryResponse::TrackCreated(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
Err("Track creation timeout".to_string())
}
/// Create a new MIDI clip on a track
pub fn create_midi_clip(&mut self, track_id: TrackId, start_time: f64, duration: f64) -> MidiClipId {
// Peek at the next clip ID that will be used
@ -2141,4 +2305,151 @@ impl EngineController {
Err("Query timeout".to_string())
}
/// Serialize the audio pool for project saving
pub fn serialize_audio_pool(&mut self, project_path: &std::path::Path) -> Result<Vec<crate::audio::pool::AudioPoolEntry>, String> {
// Send query
if let Err(_) = self.query_tx.push(Query::SerializeAudioPool(project_path.to_path_buf())) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(5); // Longer timeout for file operations
while start.elapsed() < timeout {
if let Ok(QueryResponse::AudioPoolSerialized(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
Err("Query timeout".to_string())
}
/// Get waveform for a pool index
pub fn get_pool_waveform(&mut self, pool_index: usize, target_peaks: usize) -> Result<Vec<crate::io::WaveformPeak>, String> {
// Send query
if let Err(_) = self.query_tx.push(Query::GetPoolWaveform(pool_index, target_peaks)) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
while start.elapsed() < timeout {
if let Ok(QueryResponse::PoolWaveform(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
Err("Query timeout".to_string())
}
/// Get file info from pool (duration, sample_rate, channels)
pub fn get_pool_file_info(&mut self, pool_index: usize) -> Result<(f64, u32, u32), String> {
// Send query
if let Err(_) = self.query_tx.push(Query::GetPoolFileInfo(pool_index)) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(2);
while start.elapsed() < timeout {
if let Ok(QueryResponse::PoolFileInfo(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(1));
}
Err("Query timeout".to_string())
}
/// Load audio pool from serialized entries
pub fn load_audio_pool(&mut self, entries: Vec<crate::audio::pool::AudioPoolEntry>, project_path: &std::path::Path) -> Result<Vec<usize>, String> {
// Send command via query mechanism
if let Err(_) = self.query_tx.push(Query::LoadAudioPool(entries, project_path.to_path_buf())) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(10); // Long timeout for loading multiple files
while start.elapsed() < timeout {
if let Ok(QueryResponse::AudioPoolLoaded(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
Err("Query timeout".to_string())
}
/// Resolve a missing audio file by loading from a new path
pub fn resolve_missing_audio_file(&mut self, pool_index: usize, new_path: &std::path::Path) -> Result<(), String> {
// Send command via query mechanism
if let Err(_) = self.query_tx.push(Query::ResolveMissingAudioFile(pool_index, new_path.to_path_buf())) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(5);
while start.elapsed() < timeout {
if let Ok(QueryResponse::AudioFileResolved(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
Err("Query timeout".to_string())
}
/// Serialize a track's effects/instrument graph to JSON
pub fn serialize_track_graph(&mut self, track_id: TrackId, project_path: &std::path::Path) -> Result<String, String> {
// Send query
if let Err(_) = self.query_tx.push(Query::SerializeTrackGraph(track_id, project_path.to_path_buf())) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(5);
while start.elapsed() < timeout {
if let Ok(QueryResponse::TrackGraphSerialized(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
Err("Query timeout".to_string())
}
/// Load a track's effects/instrument graph from JSON
pub fn load_track_graph(&mut self, track_id: TrackId, preset_json: &str, project_path: &std::path::Path) -> Result<(), String> {
// Send query
if let Err(_) = self.query_tx.push(Query::LoadTrackGraph(track_id, preset_json.to_string(), project_path.to_path_buf())) {
return Err("Failed to send query - queue full".to_string());
}
// Wait for response (with timeout)
let start = std::time::Instant::now();
let timeout = std::time::Duration::from_secs(10); // Longer timeout for loading presets
while start.elapsed() < timeout {
if let Ok(QueryResponse::TrackGraphLoaded(result)) = self.query_response_rx.pop() {
return result;
}
std::thread::sleep(std::time::Duration::from_millis(10));
}
Err("Query timeout".to_string())
}
}

View File

@ -494,12 +494,14 @@ impl AudioGraph {
node.node.process(&input_slices, &mut output_slices, &midi_input_slices, &mut midi_output_refs, self.sample_rate);
}
// Copy output node's first output to the provided buffer
// Mix output node's first output into the provided buffer
if let Some(output_idx) = self.output_node {
if let Some(output_node) = self.graph.node_weight(output_idx) {
if !output_node.output_buffers.is_empty() {
let len = output_buffer.len().min(output_node.output_buffers[0].len());
output_buffer[..len].copy_from_slice(&output_node.output_buffers[0][..len]);
for i in 0..len {
output_buffer[i] += output_node.output_buffers[0][i];
}
}
}
}
@ -837,6 +839,7 @@ impl AudioGraph {
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input")),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")),
"AudioInput" => Box::new(AudioInputNode::new("Audio Input")),
"AutomationInput" => Box::new(AutomationInputNode::new("Automation")),
"Oscilloscope" => Box::new(OscilloscopeNode::new("Oscilloscope")),
"TemplateInput" => Box::new(TemplateInputNode::new("Template Input")),

View File

@ -1,5 +1,6 @@
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::f32::consts::PI;
use serde::{Deserialize, Serialize};
/// Windowed sinc interpolation for high-quality time stretching
/// This is stateless and can handle arbitrary fractional positions
@ -77,6 +78,45 @@ impl AudioFile {
pub fn duration_seconds(&self) -> f64 {
self.frames as f64 / self.sample_rate as f64
}
/// Generate a waveform overview with the specified number of peaks
/// This creates a downsampled representation suitable for timeline visualization
pub fn generate_waveform_overview(&self, target_peaks: usize) -> Vec<crate::io::WaveformPeak> {
if self.frames == 0 || target_peaks == 0 {
return Vec::new();
}
let total_frames = self.frames as usize;
let frames_per_peak = (total_frames / target_peaks).max(1);
let actual_peaks = (total_frames + frames_per_peak - 1) / frames_per_peak;
let mut peaks = Vec::with_capacity(actual_peaks);
for peak_idx in 0..actual_peaks {
let start_frame = peak_idx * frames_per_peak;
let end_frame = ((peak_idx + 1) * frames_per_peak).min(total_frames);
let mut min = 0.0f32;
let mut max = 0.0f32;
// Scan all samples in this window
for frame_idx in start_frame..end_frame {
// For multi-channel audio, combine all channels
for ch in 0..self.channels as usize {
let sample_idx = frame_idx * self.channels as usize + ch;
if sample_idx < self.data.len() {
let sample = self.data[sample_idx];
min = min.min(sample);
max = max.max(sample);
}
}
}
peaks.push(crate::io::WaveformPeak { min, max });
}
peaks
}
}
/// Pool of shared audio files
@ -92,6 +132,30 @@ impl AudioPool {
}
}
/// Get the number of files in the pool
pub fn len(&self) -> usize {
self.files.len()
}
/// Check if the pool is empty
pub fn is_empty(&self) -> bool {
self.files.is_empty()
}
/// Get file info for waveform generation (duration, sample_rate, channels)
pub fn get_file_info(&self, pool_index: usize) -> Option<(f64, u32, u32)> {
self.files.get(pool_index).map(|file| {
(file.duration_seconds(), file.sample_rate, file.channels)
})
}
/// Generate waveform overview for a file in the pool
pub fn generate_waveform(&self, pool_index: usize, target_peaks: usize) -> Option<Vec<crate::io::WaveformPeak>> {
self.files.get(pool_index).map(|file| {
file.generate_waveform_overview(target_peaks)
})
}
/// Add an audio file to the pool and return its index
pub fn add_file(&mut self, file: AudioFile) -> usize {
let index = self.files.len();
@ -242,3 +306,345 @@ impl Default for AudioPool {
Self::new()
}
}
/// Embedded audio data stored as base64 in the project file
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EmbeddedAudioData {
/// Base64-encoded audio data
pub data_base64: String,
/// Original file format (wav, mp3, etc.)
pub format: String,
}
/// Serializable audio pool entry for project save/load
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioPoolEntry {
/// Index in the audio pool
pub pool_index: usize,
/// Original filename
pub name: String,
/// Path relative to project file (None if embedded)
pub relative_path: Option<String>,
/// Duration in seconds
pub duration: f64,
/// Sample rate
pub sample_rate: u32,
/// Number of channels
pub channels: u32,
/// Embedded audio data (for files < 10MB)
pub embedded_data: Option<EmbeddedAudioData>,
}
impl AudioPool {
/// Serialize the audio pool for project saving
///
/// Files smaller than 10MB are embedded as base64.
/// Larger files are stored as relative paths to the project file.
pub fn serialize(&self, project_path: &Path) -> Result<Vec<AudioPoolEntry>, String> {
let project_dir = project_path.parent()
.ok_or_else(|| "Project path has no parent directory".to_string())?;
let mut entries = Vec::new();
for (index, file) in self.files.iter().enumerate() {
let file_path = &file.path;
let file_path_str = file_path.to_string_lossy();
// Check if this is a temp file (from recording) or previously embedded audio
// Always embed these
let is_temp_file = file_path.starts_with(std::env::temp_dir());
let is_embedded = file_path_str.starts_with("<embedded:");
// Try to get relative path (unless it's a temp/embedded file)
let relative_path = if is_temp_file || is_embedded {
None // Don't store path for temp/embedded files, they'll be embedded
} else if let Some(rel) = pathdiff::diff_paths(file_path, project_dir) {
Some(rel.to_string_lossy().to_string())
} else {
// Fall back to absolute path if relative path fails
Some(file_path.to_string_lossy().to_string())
};
// Check if we should embed this file
// Always embed temp files (recordings) and previously embedded audio,
// otherwise use size threshold
let embedded_data = if is_temp_file || is_embedded || Self::should_embed(file_path) {
// Embed from memory - we already have the audio data loaded
Some(Self::embed_from_memory(file))
} else {
None
};
let entry = AudioPoolEntry {
pool_index: index,
name: file_path
.file_name()
.map(|n| n.to_string_lossy().to_string())
.unwrap_or_else(|| format!("file_{}", index)),
relative_path,
duration: file.duration_seconds(),
sample_rate: file.sample_rate,
channels: file.channels,
embedded_data,
};
entries.push(entry);
}
Ok(entries)
}
/// Check if a file should be embedded (< 10MB)
fn should_embed(file_path: &Path) -> bool {
const TEN_MB: u64 = 10_000_000;
std::fs::metadata(file_path)
.map(|m| m.len() < TEN_MB)
.unwrap_or(false)
}
/// Embed audio from memory (already loaded in the pool)
fn embed_from_memory(audio_file: &AudioFile) -> EmbeddedAudioData {
use base64::{Engine as _, engine::general_purpose};
// Convert the f32 interleaved samples to WAV format bytes
let wav_data = Self::encode_wav(
&audio_file.data,
audio_file.channels,
audio_file.sample_rate
);
let data_base64 = general_purpose::STANDARD.encode(&wav_data);
EmbeddedAudioData {
data_base64,
format: "wav".to_string(),
}
}
/// Encode f32 interleaved samples as WAV file bytes
fn encode_wav(samples: &[f32], channels: u32, sample_rate: u32) -> Vec<u8> {
let num_samples = samples.len();
let bytes_per_sample = 4; // 32-bit float
let data_size = num_samples * bytes_per_sample;
let file_size = 36 + data_size;
let mut wav_data = Vec::with_capacity(44 + data_size);
// RIFF header
wav_data.extend_from_slice(b"RIFF");
wav_data.extend_from_slice(&(file_size as u32).to_le_bytes());
wav_data.extend_from_slice(b"WAVE");
// fmt chunk
wav_data.extend_from_slice(b"fmt ");
wav_data.extend_from_slice(&16u32.to_le_bytes()); // chunk size
wav_data.extend_from_slice(&3u16.to_le_bytes()); // format code (3 = IEEE float)
wav_data.extend_from_slice(&(channels as u16).to_le_bytes());
wav_data.extend_from_slice(&sample_rate.to_le_bytes());
wav_data.extend_from_slice(&(sample_rate * channels * bytes_per_sample as u32).to_le_bytes()); // byte rate
wav_data.extend_from_slice(&((channels * bytes_per_sample as u32) as u16).to_le_bytes()); // block align
wav_data.extend_from_slice(&32u16.to_le_bytes()); // bits per sample
// data chunk
wav_data.extend_from_slice(b"data");
wav_data.extend_from_slice(&(data_size as u32).to_le_bytes());
// Write samples as little-endian f32
for &sample in samples {
wav_data.extend_from_slice(&sample.to_le_bytes());
}
wav_data
}
/// Load audio pool from serialized entries
///
/// Returns a list of pool indices that failed to load (missing files).
/// The caller should present these to the user for resolution.
pub fn load_from_serialized(
&mut self,
entries: Vec<AudioPoolEntry>,
project_path: &Path,
) -> Result<Vec<usize>, String> {
let project_dir = project_path.parent()
.ok_or_else(|| "Project path has no parent directory".to_string())?;
let mut missing_indices = Vec::new();
// Clear existing pool
self.files.clear();
// Find the maximum pool index to determine required size
let max_index = entries.iter()
.map(|e| e.pool_index)
.max()
.unwrap_or(0);
// Ensure we have space for all entries
self.files.resize(max_index + 1, AudioFile::new(PathBuf::new(), Vec::new(), 2, 44100));
for entry in entries {
let success = if let Some(embedded) = entry.embedded_data {
// Load from embedded data
match Self::load_from_embedded_into_pool(self, entry.pool_index, embedded, &entry.name) {
Ok(_) => {
eprintln!("[AudioPool] Successfully loaded embedded audio: {}", entry.name);
true
}
Err(e) => {
eprintln!("[AudioPool] Failed to load embedded audio {}: {}", entry.name, e);
false
}
}
} else if let Some(rel_path) = entry.relative_path {
// Load from file path
let full_path = project_dir.join(&rel_path);
if full_path.exists() {
Self::load_file_into_pool(self, entry.pool_index, &full_path).is_ok()
} else {
eprintln!("[AudioPool] File not found: {:?}", full_path);
false
}
} else {
eprintln!("[AudioPool] Entry has neither embedded data nor path: {}", entry.name);
false
};
if !success {
missing_indices.push(entry.pool_index);
}
}
Ok(missing_indices)
}
/// Load audio from embedded base64 data
fn load_from_embedded_into_pool(
&mut self,
pool_index: usize,
embedded: EmbeddedAudioData,
name: &str,
) -> Result<(), String> {
use base64::{Engine as _, engine::general_purpose};
// Decode base64
let data = general_purpose::STANDARD
.decode(&embedded.data_base64)
.map_err(|e| format!("Failed to decode base64: {}", e))?;
// Write to temporary file for symphonia to decode
let temp_dir = std::env::temp_dir();
let temp_path = temp_dir.join(format!("lightningbeam_embedded_{}.{}", pool_index, embedded.format));
std::fs::write(&temp_path, &data)
.map_err(|e| format!("Failed to write temporary file: {}", e))?;
// Load the temporary file using existing infrastructure
let result = Self::load_file_into_pool(self, pool_index, &temp_path);
// Clean up temporary file
let _ = std::fs::remove_file(&temp_path);
// Update the path to reflect it was embedded
if result.is_ok() && pool_index < self.files.len() {
self.files[pool_index].path = PathBuf::from(format!("<embedded: {}>", name));
}
result
}
/// Load an audio file into a specific pool index
fn load_file_into_pool(&mut self, pool_index: usize, file_path: &Path) -> Result<(), String> {
use symphonia::core::audio::SampleBuffer;
use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL};
use symphonia::core::formats::FormatOptions;
use symphonia::core::io::MediaSourceStream;
use symphonia::core::meta::MetadataOptions;
use symphonia::core::probe::Hint;
let file = std::fs::File::open(file_path)
.map_err(|e| format!("Failed to open audio file: {}", e))?;
let mss = MediaSourceStream::new(Box::new(file), Default::default());
let mut hint = Hint::new();
if let Some(ext) = file_path.extension() {
hint.with_extension(&ext.to_string_lossy());
}
let format_opts = FormatOptions::default();
let metadata_opts = MetadataOptions::default();
let decoder_opts = DecoderOptions::default();
let probed = symphonia::default::get_probe()
.format(&hint, mss, &format_opts, &metadata_opts)
.map_err(|e| format!("Failed to probe audio file: {}", e))?;
let mut format = probed.format;
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
.ok_or_else(|| "No audio track found".to_string())?;
let mut decoder = symphonia::default::get_codecs()
.make(&track.codec_params, &decoder_opts)
.map_err(|e| format!("Failed to create decoder: {}", e))?;
let track_id = track.id;
let sample_rate = track.codec_params.sample_rate.unwrap_or(44100);
let channels = track.codec_params.channels.map(|c| c.count()).unwrap_or(2) as u32;
let mut samples = Vec::new();
let mut sample_buf = None;
loop {
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(_) => break,
};
if packet.track_id() != track_id {
continue;
}
match decoder.decode(&packet) {
Ok(decoded) => {
if sample_buf.is_none() {
let spec = *decoded.spec();
let duration = decoded.capacity() as u64;
sample_buf = Some(SampleBuffer::<f32>::new(duration, spec));
}
if let Some(ref mut buf) = sample_buf {
buf.copy_interleaved_ref(decoded);
samples.extend_from_slice(buf.samples());
}
}
Err(_) => continue,
}
}
let audio_file = AudioFile::new(
file_path.to_path_buf(),
samples,
channels,
sample_rate,
);
if pool_index >= self.files.len() {
return Err(format!("Pool index {} out of bounds", pool_index));
}
self.files[pool_index] = audio_file;
Ok(())
}
/// Resolve a missing audio file by loading from a new path
/// This is called from the UI when the user manually locates a missing file
pub fn resolve_missing_file(&mut self, pool_index: usize, new_path: &Path) -> Result<(), String> {
Self::load_file_into_pool(self, pool_index, new_path)
}
}

View File

@ -617,9 +617,6 @@ impl AudioTrack {
}
}
// Clear output buffer before graph processing to ensure clean output
output.fill(0.0);
// Find and inject audio into the AudioInputNode
let node_indices: Vec<_> = self.effects_graph.node_indices().collect();
for node_idx in node_indices {

View File

@ -224,6 +224,24 @@ pub enum Query {
GetAutomationKeyframes(TrackId, u32),
/// Get the display name of an AutomationInput node (track_id, node_id)
GetAutomationName(TrackId, u32),
/// Serialize audio pool for project saving (project_path)
SerializeAudioPool(std::path::PathBuf),
/// Load audio pool from serialized entries (entries, project_path)
LoadAudioPool(Vec<crate::audio::pool::AudioPoolEntry>, std::path::PathBuf),
/// Resolve a missing audio file (pool_index, new_path)
ResolveMissingAudioFile(usize, std::path::PathBuf),
/// Serialize a track's effects/instrument graph (track_id, project_path)
SerializeTrackGraph(TrackId, std::path::PathBuf),
/// Load a track's effects/instrument graph (track_id, preset_json, project_path)
LoadTrackGraph(TrackId, String, std::path::PathBuf),
/// Create a new audio track (name) - returns track ID synchronously
CreateAudioTrackSync(String),
/// Create a new MIDI track (name) - returns track ID synchronously
CreateMidiTrackSync(String),
/// Get waveform data from audio pool (pool_index, target_peaks)
GetPoolWaveform(usize, usize),
/// Get file info from audio pool (pool_index) - returns (duration, sample_rate, channels)
GetPoolFileInfo(usize),
}
/// Oscilloscope data from a node
@ -265,4 +283,20 @@ pub enum QueryResponse {
AutomationKeyframes(Result<Vec<AutomationKeyframeData>, String>),
/// Automation node name
AutomationName(Result<String, String>),
/// Serialized audio pool entries
AudioPoolSerialized(Result<Vec<crate::audio::pool::AudioPoolEntry>, String>),
/// Audio pool loaded (returns list of missing pool indices)
AudioPoolLoaded(Result<Vec<usize>, String>),
/// Audio file resolved
AudioFileResolved(Result<(), String>),
/// Track graph serialized as JSON
TrackGraphSerialized(Result<String, String>),
/// Track graph loaded
TrackGraphLoaded(Result<(), String>),
/// Track created (returns track ID)
TrackCreated(Result<TrackId, String>),
/// Pool waveform data
PoolWaveform(Result<Vec<crate::io::WaveformPeak>, String>),
/// Pool file info (duration, sample_rate, channels)
PoolFileInfo(Result<(f64, u32, u32), String>),
}

1
src-tauri/Cargo.lock generated
View File

@ -1025,6 +1025,7 @@ dependencies = [
"dasp_sample",
"dasp_signal",
"midly",
"pathdiff",
"petgraph 0.6.5",
"rand 0.8.5",
"ratatui",

View File

@ -1,6 +1,8 @@
use daw_backend::{AudioEvent, AudioSystem, EngineController, EventEmitter, WaveformPeak};
use daw_backend::audio::pool::AudioPoolEntry;
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use std::path::Path;
use tauri::{Emitter, Manager};
#[derive(serde::Serialize)]
@ -154,6 +156,17 @@ pub async fn audio_init(
Ok(info)
}
#[tauri::command]
pub async fn audio_reset(state: tauri::State<'_, Arc<Mutex<AudioState>>>) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.reset();
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command]
pub async fn audio_play(state: tauri::State<'_, Arc<Mutex<AudioState>>>) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
@ -239,17 +252,12 @@ pub async fn audio_create_track(
) -> Result<u32, String> {
let mut audio_state = state.lock().unwrap();
// Get track ID and increment counter before borrowing controller
let track_id = audio_state.next_track_id;
audio_state.next_track_id += 1;
if let Some(controller) = &mut audio_state.controller {
match track_type.as_str() {
"audio" => controller.create_audio_track(name),
"midi" => controller.create_midi_track(name),
"audio" => controller.create_audio_track_sync(name),
"midi" => controller.create_midi_track_sync(name),
_ => return Err(format!("Unknown track type: {}", track_type)),
}
Ok(track_id)
} else {
Err("Audio not initialized".to_string())
}
@ -619,6 +627,35 @@ pub async fn audio_update_midi_clip_notes(
}
}
#[tauri::command]
pub async fn audio_get_pool_file_info(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
pool_index: usize,
) -> Result<(f64, u32, u32), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.get_pool_file_info(pool_index)
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command]
pub async fn audio_get_pool_waveform(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
pool_index: usize,
target_peaks: usize,
) -> Result<Vec<daw_backend::io::WaveformPeak>, String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.get_pool_waveform(pool_index, target_peaks)
} else {
Err("Audio not initialized".to_string())
}
}
// Node graph commands
#[tauri::command]
@ -1340,3 +1377,84 @@ pub enum SerializedAudioEvent {
}
// audio_get_events command removed - events are now pushed via Tauri event system
/// Serialize the audio pool for project saving
#[tauri::command]
pub async fn audio_serialize_pool(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
project_path: String,
) -> Result<Vec<AudioPoolEntry>, String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.serialize_audio_pool(Path::new(&project_path))
} else {
Err("Audio not initialized".to_string())
}
}
/// Load audio pool from serialized entries
/// Returns a list of pool indices that failed to load (missing files)
#[tauri::command]
pub async fn audio_load_pool(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
entries: Vec<AudioPoolEntry>,
project_path: String,
) -> Result<Vec<usize>, String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.load_audio_pool(entries, Path::new(&project_path))
} else {
Err("Audio not initialized".to_string())
}
}
/// Resolve a missing audio file by loading from a new path
#[tauri::command]
pub async fn audio_resolve_missing_file(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
pool_index: usize,
new_path: String,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.resolve_missing_audio_file(pool_index, Path::new(&new_path))
} else {
Err("Audio not initialized".to_string())
}
}
/// Serialize a track's effects/instrument graph to JSON
#[tauri::command]
pub async fn audio_serialize_track_graph(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
project_path: String,
) -> Result<String, String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.serialize_track_graph(track_id, Path::new(&project_path))
} else {
Err("Audio not initialized".to_string())
}
}
/// Load a track's effects/instrument graph from JSON
#[tauri::command]
pub async fn audio_load_track_graph(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
preset_json: String,
project_path: String,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.load_track_graph(track_id, &preset_json, Path::new(&project_path))
} else {
Err("Audio not initialized".to_string())
}
}

View File

@ -194,6 +194,7 @@ pub fn run() {
.invoke_handler(tauri::generate_handler![
greet, trace, debug, info, warn, error, create_window,
audio::audio_init,
audio::audio_reset,
audio::audio_play,
audio::audio_stop,
audio::audio_seek,
@ -216,6 +217,8 @@ pub fn run() {
audio::audio_update_midi_clip_notes,
audio::audio_send_midi_note_on,
audio::audio_send_midi_note_off,
audio::audio_get_pool_file_info,
audio::audio_get_pool_waveform,
audio::graph_add_node,
audio::graph_add_node_to_template,
audio::graph_remove_node,
@ -241,6 +244,11 @@ pub fn run() {
audio::automation_get_keyframes,
audio::automation_set_name,
audio::automation_get_name,
audio::audio_serialize_pool,
audio::audio_load_pool,
audio::audio_resolve_missing_file,
audio::audio_serialize_track_graph,
audio::audio_load_track_graph,
])
// .manage(window_counter)
.build(tauri::generate_context!())

View File

@ -211,7 +211,7 @@ let layoutElements = [];
// 1.6: object coordinates are created relative to their location
let minFileVersion = "1.3";
let maxFileVersion = "2.0";
let maxFileVersion = "2.1";
let filePath = undefined;
let fileExportPath = undefined;
@ -1138,6 +1138,7 @@ async function handleAudioEvent(event) {
console.log('[FRONTEND] Creating clip object for clip', event.clip_id, 'on track', event.track_id, 'at time', startTime);
recordingTrack.clips.push({
clipId: event.clip_id,
name: recordingTrack.name,
poolIndex: null, // Will be set when recording stops
startTime: startTime,
duration: 0, // Will grow as recording progresses
@ -1537,13 +1538,20 @@ function newWindow(path) {
invoke("create_window", {app: window.__TAURI__.app, path: path})
}
function _newFile(width, height, fps, layoutKey) {
async function _newFile(width, height, fps, layoutKey) {
console.log('[_newFile] REPLACING ROOT - Creating new file with fps:', fps, 'layout:', layoutKey);
console.trace('[_newFile] Stack trace for root replacement:');
const oldRoot = root;
console.log('[_newFile] Old root:', oldRoot, 'frameRate:', oldRoot?.frameRate);
// Reset audio engine to clear any previous session data
try {
await invoke('audio_reset');
} catch (error) {
console.warn('Failed to reset audio engine:', error);
}
// Determine initial child type based on layout
const initialChildType = layoutKey === 'audioDaw' ? 'midi' : 'layer';
root = new GraphicsObject("root", initialChildType);
@ -1598,6 +1606,12 @@ function _newFile(width, height, fps, layoutKey) {
undoStack.length = 0; // Clear without breaking reference
redoStack.length = 0; // Clear without breaking reference
console.log('[_newFile] Before updateUI - root.frameRate:', root.frameRate);
// Ensure there's an active layer - set to first layer if none is active
if (!context.activeObject.activeLayer && context.activeObject.layers.length > 0) {
context.activeObject.activeLayer = context.activeObject.layers[0];
}
updateUI();
console.log('[_newFile] After updateUI - root.frameRate:', root.frameRate);
updateLayers();
@ -1629,13 +1643,43 @@ async function _save(path) {
// for (let action of undoStack) {
// console.log(action.name);
// }
// Serialize audio pool (files < 10MB embedded, larger files saved as relative paths)
let audioPool = [];
try {
audioPool = await invoke('audio_serialize_pool', { projectPath: path });
} catch (error) {
console.warn('Failed to serialize audio pool:', error);
// Continue saving without audio pool - user may not have audio initialized
}
// Serialize track graphs (node graphs for each track)
const trackGraphs = {};
for (const track of root.audioTracks) {
if (track.audioTrackId !== null) {
try {
const graphJson = await invoke('audio_serialize_track_graph', {
trackId: track.audioTrackId,
projectPath: path
});
trackGraphs[track.idx] = graphJson;
} catch (error) {
console.warn(`Failed to serialize graph for track ${track.name}:`, error);
}
}
}
const fileData = {
version: "1.7.7",
version: "2.0.0",
width: config.fileWidth,
height: config.fileHeight,
fps: config.framerate,
actions: undoStack,
json: root.toJSON(),
// Audio pool at the end for human readability
audioPool: audioPool,
// Track graphs for instruments/effects
trackGraphs: trackGraphs,
};
if (config.debug) {
// Pretty print file structure when debugging
@ -1677,6 +1721,58 @@ async function saveAs() {
if (path != undefined) _save(path);
}
/**
* Handle missing audio files by prompting the user to locate them
* @param {number[]} missingIndices - Array of pool indices that failed to load
* @param {Object[]} audioPool - The audio pool entries from the project file
* @param {string} projectPath - Path to the project file
*/
async function handleMissingAudioFiles(missingIndices, audioPool, projectPath) {
const { open } = window.__TAURI__.dialog;
for (const poolIndex of missingIndices) {
const entry = audioPool[poolIndex];
if (!entry) continue;
const message = `Cannot find audio file:\n${entry.name}\n\nExpected location: ${entry.relativePath || 'embedded'}\n\nWould you like to locate this file?`;
const result = await window.__TAURI__.dialog.confirm(message, {
title: 'Missing Audio File',
kind: 'warning',
okLabel: 'Locate File',
cancelLabel: 'Skip'
});
if (result) {
// Let user browse for the file
const selected = await open({
title: `Locate ${entry.name}`,
multiple: false,
filters: [{
name: 'Audio Files',
extensions: audioExtensions
}]
});
if (selected) {
try {
await invoke('audio_resolve_missing_file', {
poolIndex: poolIndex,
newPath: selected
});
console.log(`Successfully loaded ${entry.name} from ${selected}`);
} catch (error) {
console.error(`Failed to load ${entry.name}:`, error);
await messageDialog(
`Failed to load file: ${error}`,
{ title: "Load Error", kind: "error" }
);
}
}
}
}
}
async function _open(path, returnJson = false) {
document.body.style.cursor = "wait"
closeDialog();
@ -1702,7 +1798,7 @@ async function _open(path, returnJson = false) {
document.body.style.cursor = "default"
return file.json;
} else {
_newFile(file.width, file.height, file.fps);
await _newFile(file.width, file.height, file.fps);
if (file.actions == undefined) {
await messageDialog("File has no content!", {
title: "Parse error",
@ -1902,12 +1998,207 @@ async function _open(path, returnJson = false) {
context.objectStack = [root]
}
// Reset audio engine to clear any previous session data
try {
await invoke('audio_reset');
} catch (error) {
console.warn('Failed to reset audio engine:', error);
}
// Load audio pool if present
if (file.audioPool && file.audioPool.length > 0) {
console.log('[JS] Loading audio pool with', file.audioPool.length, 'entries');
// Validate audioPool entries - skip if they don't have the expected structure
const validEntries = file.audioPool.filter(entry => {
// Check basic structure
if (!entry || typeof entry.name !== 'string' || typeof entry.pool_index !== 'number') {
console.warn('[JS] Skipping invalid audio pool entry (bad structure):', entry);
return false;
}
// Log the full entry structure for debugging
console.log('[JS] Validating entry:', JSON.stringify({
name: entry.name,
pool_index: entry.pool_index,
has_embedded_data: !!entry.embedded_data,
embedded_data_keys: entry.embedded_data ? Object.keys(entry.embedded_data) : [],
relative_path: entry.relative_path,
all_keys: Object.keys(entry)
}, null, 2));
// Check if it has either embedded data or a valid file path
const hasEmbedded = entry.embedded_data &&
entry.embedded_data.data_base64 &&
entry.embedded_data.format;
const hasValidPath = entry.relative_path &&
entry.relative_path.length > 0 &&
!entry.relative_path.startsWith('<embedded:');
if (!hasEmbedded && !hasValidPath) {
console.warn('[JS] Skipping invalid audio pool entry (no valid data or path):', {
name: entry.name,
pool_index: entry.pool_index,
hasEmbedded: !!entry.embedded_data,
relativePath: entry.relative_path
});
return false;
}
return true;
});
if (validEntries.length === 0) {
console.warn('[JS] No valid audio pool entries found, skipping audio pool load');
} else {
validEntries.forEach((entry, i) => {
console.log(`[JS] Entry ${i}:`, JSON.stringify({
pool_index: entry.pool_index,
name: entry.name,
hasEmbedded: !!entry.embedded_data,
hasPath: !!entry.relative_path,
relativePath: entry.relative_path,
embeddedFormat: entry.embedded_data?.format,
embeddedSize: entry.embedded_data?.data_base64?.length
}, null, 2));
});
try {
const missingIndices = await invoke('audio_load_pool', {
entries: validEntries,
projectPath: path
});
// If there are missing files, show a dialog to help user locate them
if (missingIndices.length > 0) {
await handleMissingAudioFiles(missingIndices, validEntries, path);
}
} catch (error) {
console.error('Failed to load audio pool:', error);
await messageDialog(
`Failed to load audio files: ${error}`,
{ title: "Audio Load Error", kind: "warning" }
);
}
}
}
lastSaveIndex = undoStack.length;
filePath = path;
// Tauri thinks it is setting the title here, but it isn't getting updated
await getCurrentWindow().setTitle(await basename(filePath));
addRecentFile(path);
// Ensure there's an active layer - set to first layer if none is active
if (!context.activeObject.activeLayer && context.activeObject.layers.length > 0) {
context.activeObject.activeLayer = context.activeObject.layers[0];
}
// Restore audio tracks and clips to the Rust backend
// The fromJSON method only creates JavaScript objects,
// but doesn't initialize them in the audio engine
for (const audioTrack of context.activeObject.audioTracks) {
// First, initialize the track in the Rust backend
if (audioTrack.audioTrackId === null) {
console.log(`[JS] Initializing track ${audioTrack.name} in audio engine`);
try {
await audioTrack.initializeTrack();
} catch (error) {
console.error(`[JS] Failed to initialize track ${audioTrack.name}:`, error);
continue;
}
}
// Then restore clips if any
if (audioTrack.clips && audioTrack.clips.length > 0) {
console.log(`[JS] Restoring ${audioTrack.clips.length} clips for track ${audioTrack.name}`);
for (const clip of audioTrack.clips) {
try {
// Handle MIDI clips differently from audio clips
if (audioTrack.type === 'midi') {
// For MIDI clips, restore the notes
if (clip.notes && clip.notes.length > 0) {
// Create the clip first
await invoke('audio_create_midi_clip', {
trackId: audioTrack.audioTrackId,
startTime: clip.startTime,
duration: clip.duration
});
// Update with notes
const noteData = clip.notes.map(note => [
note.startTime || note.start_time,
note.note,
note.velocity,
note.duration
]);
await invoke('audio_update_midi_clip_notes', {
trackId: audioTrack.audioTrackId,
clipId: clip.clipId,
notes: noteData
});
console.log(`[JS] Restored MIDI clip ${clip.name} with ${clip.notes.length} notes`);
}
} else {
// For audio clips, restore from pool
await invoke('audio_add_clip', {
trackId: audioTrack.audioTrackId,
poolIndex: clip.poolIndex,
startTime: clip.startTime,
duration: clip.duration,
offset: clip.offset || 0.0
});
console.log(`[JS] Restored clip ${clip.name} at poolIndex ${clip.poolIndex}`);
// Generate waveform for the restored clip
try {
const fileInfo = await invoke('audio_get_pool_file_info', {
poolIndex: clip.poolIndex
});
const duration = fileInfo[0];
const targetPeaks = Math.floor(duration * 300);
const clampedPeaks = Math.max(1000, Math.min(20000, targetPeaks));
const waveform = await invoke('audio_get_pool_waveform', {
poolIndex: clip.poolIndex,
targetPeaks: clampedPeaks
});
clip.waveform = waveform;
console.log(`[JS] Generated waveform for clip ${clip.name} (${waveform.length} peaks)`);
} catch (waveformError) {
console.error(`[JS] Failed to generate waveform for clip ${clip.name}:`, waveformError);
}
}
} catch (error) {
console.error(`[JS] Failed to restore clip ${clip.name}:`, error);
}
}
}
// Restore track graph (node graph for instruments/effects)
if (file.trackGraphs && file.trackGraphs[audioTrack.idx]) {
try {
await invoke('audio_load_track_graph', {
trackId: audioTrack.audioTrackId,
presetJson: file.trackGraphs[audioTrack.idx],
projectPath: path
});
console.log(`[JS] Restored graph for track ${audioTrack.name}`);
} catch (error) {
console.error(`[JS] Failed to restore graph for track ${audioTrack.name}:`, error);
}
}
}
// Trigger UI and timeline redraw after all waveforms are loaded
updateUI();
updateLayers();
if (context.timelineWidget) {
context.timelineWidget.requestRedraw();
}
}
} else {
await messageDialog(
@ -4691,7 +4982,7 @@ async function startup() {
if (options.type === 'new') {
// Create new project with selected focus
_newFile(
await _newFile(
options.width || 800,
options.height || 600,
options.fps || 24,
@ -6205,12 +6496,12 @@ async function renderMenu() {
action: actions.deleteLayer.create,
},
{
text: context.activeObject.activeLayer.visible
text: context.activeObject.activeLayer?.visible
? "Hide Layer"
: "Show Layer",
enabled: true,
enabled: !!context.activeObject.activeLayer,
action: () => {
context.activeObject.activeLayer.toggleVisibility();
context.activeObject.activeLayer?.toggleVisibility();
},
},
],

View File

@ -1163,7 +1163,7 @@ class AudioTrack {
}
static fromJSON(json) {
const audioTrack = new AudioTrack(json.idx, json.name);
const audioTrack = new AudioTrack(json.idx, json.name, json.trackType || 'audio');
// Load AnimationData if present
if (json.animationData) {
@ -1172,14 +1172,27 @@ class AudioTrack {
// Load clips if present
if (json.clips) {
audioTrack.clips = json.clips.map(clip => ({
clipId: clip.clipId,
poolIndex: clip.poolIndex,
name: clip.name,
startTime: clip.startTime,
duration: clip.duration,
offset: clip.offset
}));
audioTrack.clips = json.clips.map(clip => {
const clipData = {
clipId: clip.clipId,
name: clip.name,
startTime: clip.startTime,
duration: clip.duration,
};
// Restore audio-specific fields
if (clip.poolIndex !== undefined) {
clipData.poolIndex = clip.poolIndex;
clipData.offset = clip.offset;
}
// Restore MIDI-specific fields
if (clip.notes) {
clipData.notes = clip.notes;
}
return clipData;
});
}
audioTrack.audible = json.audible;
@ -1191,20 +1204,34 @@ class AudioTrack {
type: "AudioTrack",
idx: randomizeUuid ? uuidv4() : this.idx,
name: randomizeUuid ? this.name + " copy" : this.name,
trackType: this.type, // 'audio' or 'midi'
audible: this.audible,
// AnimationData (includes automation curves)
animationData: this.animationData.toJSON(),
// Clips
clips: this.clips.map(clip => ({
clipId: clip.clipId,
poolIndex: clip.poolIndex,
name: clip.name,
startTime: clip.startTime,
duration: clip.duration,
offset: clip.offset
}))
clips: this.clips.map(clip => {
const clipData = {
clipId: clip.clipId,
name: clip.name,
startTime: clip.startTime,
duration: clip.duration,
};
// Add audio-specific fields
if (clip.poolIndex !== undefined) {
clipData.poolIndex = clip.poolIndex;
clipData.offset = clip.offset;
}
// Add MIDI-specific fields
if (clip.notes) {
clipData.notes = clip.notes;
}
return clipData;
})
};
return json;

View File

@ -2104,6 +2104,11 @@ class TimelineWindowV2 extends Widget {
// Check if clicking on audio clip to start dragging
const audioClipInfo = this.getAudioClipAtPoint(track, adjustedX, adjustedY)
if (audioClipInfo) {
// Skip drag if right-clicking (button 2)
if (this.lastClickEvent?.button === 2) {
return false
}
// Select the track
this.selectTrack(track)
@ -3674,13 +3679,22 @@ class TimelineWindowV2 extends Widget {
* Shift+right-click for quick delete
*/
contextmenu(x, y, event) {
// Check if right-clicking in timeline area with curves
// Check if right-clicking in timeline area
const trackY = y - this.ruler.height
if (trackY >= 0 && x >= this.trackHeaderWidth) {
const adjustedY = trackY - this.trackScrollOffset
const adjustedX = x - this.trackHeaderWidth
const track = this.trackHierarchy.getTrackAtY(adjustedY)
// First check if clicking on a clip (audio or MIDI)
if (track && (track.type === 'audio')) {
const clipInfo = this.getAudioClipAtPoint(track, adjustedX, adjustedY)
if (clipInfo) {
this.showClipContextMenu(clipInfo.clip, clipInfo.audioTrack)
return true
}
}
if (track && (track.type === 'object' || track.type === 'shape') && track.object.curvesMode === 'curve') {
// Use similar logic to handleCurveClick to find if we're clicking on a keyframe
const trackIndex = this.trackHierarchy.tracks.indexOf(track)
@ -3943,6 +3957,38 @@ class TimelineWindowV2 extends Widget {
await menu.popup(position)
}
/**
* Show context menu for audio/MIDI clips
* Currently supports: Rename
*/
async showClipContextMenu(clip, audioTrack) {
const { Menu, MenuItem } = window.__TAURI__.menu
const { PhysicalPosition } = window.__TAURI__.dpi
const items = []
// Rename option
items.push(await MenuItem.new({
text: 'Rename',
action: async () => {
const newName = prompt('Enter new name for clip:', clip.name || '')
if (newName !== null && newName.trim() !== '') {
clip.name = newName.trim()
console.log(`Renamed clip to "${clip.name}"`)
if (this.requestRedraw) this.requestRedraw()
}
}
}))
const menu = await Menu.new({ items })
// Show menu at mouse position
const clientX = this.lastEvent?.clientX || 0
const clientY = this.lastEvent?.clientY || 0
const position = new PhysicalPosition(clientX, clientY)
await menu.popup(position)
}
/**
* Copy selected keyframes to clipboard (Phase 6)
*/