Compare commits

...

6 Commits

Author SHA1 Message Date
Skyler Lehmkuhl ffe7799b6a rewrite unsafe code in midi handling 2026-02-16 00:34:59 -05:00
Skyler Lehmkuhl 9db34daf85 make default voice polyphonic 2026-02-16 00:19:15 -05:00
Skyler Lehmkuhl 6c4cc62098 rewrite unsafe code in ffmpeg ffi 2026-02-15 23:35:30 -05:00
Skyler Lehmkuhl a16c14a6a8 Keep voices around while notes are releasing 2026-02-15 23:27:15 -05:00
Skyler Lehmkuhl 06c5342724 rewrite unsafe code in voice allocator 2026-02-15 23:22:36 -05:00
Skyler Lehmkuhl 72f10db64d add voice allocator node 2026-02-15 23:10:00 -05:00
19 changed files with 1156 additions and 542 deletions

View File

@ -317,15 +317,9 @@ impl Engine {
self.project.reset_read_ahead_targets(); self.project.reset_read_ahead_targets();
// Render the entire project hierarchy into the mix buffer // Render the entire project hierarchy into the mix buffer
// Note: We need to use a raw pointer to avoid borrow checker issues
// The midi_clip_pool is part of project, so we extract a reference before mutable borrow
let midi_pool_ptr = &self.project.midi_clip_pool as *const _;
// SAFETY: The midi_clip_pool is not mutated during render, only read
let midi_pool_ref = unsafe { &*midi_pool_ptr };
self.project.render( self.project.render(
&mut self.mix_buffer, &mut self.mix_buffer,
&self.audio_pool, &self.audio_pool,
midi_pool_ref,
&mut self.buffer_pool, &mut self.buffer_pool,
playhead_seconds, playhead_seconds,
self.sample_rate, self.sample_rate,
@ -1131,8 +1125,10 @@ impl Engine {
// Save position // Save position
graph.set_node_position(node_idx, x, y); graph.set_node_position(node_idx, x, y);
// Automatically set MIDI-receiving nodes as MIDI targets // Automatically set MIDI source nodes as MIDI targets
if node_type == "MidiInput" || node_type == "VoiceAllocator" { // VoiceAllocator receives MIDI through its input port via connections,
// not directly — it needs a MidiInput node connected to its MIDI In
if node_type == "MidiInput" {
graph.set_midi_target(node_idx, true); graph.set_midi_target(node_idx, true);
} }
@ -1149,7 +1145,7 @@ impl Engine {
} }
} }
Command::GraphAddNodeToTemplate(track_id, voice_allocator_id, node_type, _x, _y) => { Command::GraphAddNodeToTemplate(track_id, voice_allocator_id, node_type, x, y) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph; let graph = &mut track.instrument_graph;
{ {
@ -1209,7 +1205,9 @@ impl Engine {
// Add node to VoiceAllocator's template graph // Add node to VoiceAllocator's template graph
match graph.add_node_to_voice_allocator_template(va_idx, node) { match graph.add_node_to_voice_allocator_template(va_idx, node) {
Ok(node_id) => { Ok(node_id) => {
println!("Added node {} (ID: {}) to VoiceAllocator {} template", node_type, node_id, voice_allocator_id); // Set node position in the template graph
graph.set_position_in_voice_allocator_template(va_idx, node_id, x, y);
println!("Added node {} (ID: {}) to VoiceAllocator {} template at ({}, {})", node_type, node_id, voice_allocator_id, x, y);
let _ = self.event_tx.push(AudioEvent::GraphNodeAdded(track_id, node_id, node_type.clone())); let _ = self.event_tx.push(AudioEvent::GraphNodeAdded(track_id, node_id, node_type.clone()));
} }
Err(e) => { Err(e) => {
@ -1298,6 +1296,58 @@ impl Engine {
} }
} }
Command::GraphDisconnectInTemplate(track_id, voice_allocator_id, from, from_port, to, to_port) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let va_idx = NodeIndex::new(voice_allocator_id as usize);
match graph.disconnect_in_voice_allocator_template(va_idx, from, from_port, to, to_port) {
Ok(()) => {
let _ = self.event_tx.push(AudioEvent::GraphStateChanged(track_id));
}
Err(e) => {
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
track_id,
format!("Failed to disconnect in template: {}", e)
));
}
}
}
}
Command::GraphRemoveNodeFromTemplate(track_id, voice_allocator_id, node_index) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let va_idx = NodeIndex::new(voice_allocator_id as usize);
match graph.remove_node_from_voice_allocator_template(va_idx, node_index) {
Ok(()) => {
let _ = self.event_tx.push(AudioEvent::GraphStateChanged(track_id));
}
Err(e) => {
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
track_id,
format!("Failed to remove node from template: {}", e)
));
}
}
}
}
Command::GraphSetParameterInTemplate(track_id, voice_allocator_id, node_index, param_id, value) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let va_idx = NodeIndex::new(voice_allocator_id as usize);
if let Err(e) = graph.set_parameter_in_voice_allocator_template(va_idx, node_index, param_id, value) {
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
track_id,
format!("Failed to set parameter in template: {}", e)
));
}
}
}
Command::GraphDisconnect(track_id, from, from_port, to, to_port) => { Command::GraphDisconnect(track_id, from, from_port, to, to_port) => {
eprintln!("[AUDIO ENGINE] GraphDisconnect: track={}, from={}, from_port={}, to={}, to_port={}", track_id, from, from_port, to, to_port); eprintln!("[AUDIO ENGINE] GraphDisconnect: track={}, from={}, from_port={}, to={}, to_port={}", track_id, from, from_port, to, to_port);
let graph = match self.project.get_track_mut(track_id) { let graph = match self.project.get_track_mut(track_id) {
@ -1346,6 +1396,14 @@ impl Engine {
} }
} }
Command::GraphSetNodePositionInTemplate(track_id, voice_allocator_id, node_index, x, y) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let va_idx = NodeIndex::new(voice_allocator_id as usize);
graph.set_position_in_voice_allocator_template(va_idx, node_index, x, y);
}
}
Command::GraphSetMidiTarget(track_id, node_index, enabled) => { Command::GraphSetMidiTarget(track_id, node_index, enabled) => {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph; let graph = &mut track.instrument_graph;
@ -1467,12 +1525,8 @@ impl Engine {
// Get the VoiceAllocator node and serialize its template // Get the VoiceAllocator node and serialize its template
if let Some(node) = graph.get_node(va_idx) { if let Some(node) = graph.get_node(va_idx) {
// Downcast to VoiceAllocatorNode // Downcast to VoiceAllocatorNode using safe Any trait
let node_ptr = node as *const dyn crate::audio::node_graph::AudioNode; if let Some(va_node) = node.as_any().downcast_ref::<VoiceAllocatorNode>() {
let node_ptr = node_ptr as *const VoiceAllocatorNode;
unsafe {
let va_node = &*node_ptr;
let template_preset = va_node.template_graph().to_preset(&preset_name); let template_preset = va_node.template_graph().to_preset(&preset_name);
// Write to file // Write to file
@ -1494,12 +1548,8 @@ impl Engine {
let node_idx = NodeIndex::new(node_id as usize); let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to SimpleSamplerNode // Downcast to SimpleSamplerNode using safe Any trait
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; if let Some(sampler_node) = graph_node.node.as_any_mut().downcast_mut::<SimpleSamplerNode>() {
let node_ptr = node_ptr as *mut SimpleSamplerNode;
unsafe {
let sampler_node = &mut *node_ptr;
if let Err(e) = sampler_node.load_sample_from_file(&file_path) { if let Err(e) = sampler_node.load_sample_from_file(&file_path) {
eprintln!("Failed to load sample: {}", e); eprintln!("Failed to load sample: {}", e);
} }
@ -1516,12 +1566,8 @@ impl Engine {
let node_idx = NodeIndex::new(node_id as usize); let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode // Downcast to MultiSamplerNode using safe Any trait
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; if let Some(multi_sampler_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.load_layer_from_file(&file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) { if let Err(e) = multi_sampler_node.load_layer_from_file(&file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) {
eprintln!("Failed to add sample layer: {}", e); eprintln!("Failed to add sample layer: {}", e);
} }
@ -1538,12 +1584,8 @@ impl Engine {
let node_idx = NodeIndex::new(node_id as usize); let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode // Downcast to MultiSamplerNode using safe Any trait
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; if let Some(multi_sampler_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.update_layer(layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) { if let Err(e) = multi_sampler_node.update_layer(layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) {
eprintln!("Failed to update sample layer: {}", e); eprintln!("Failed to update sample layer: {}", e);
} }
@ -1560,12 +1602,8 @@ impl Engine {
let node_idx = NodeIndex::new(node_id as usize); let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode // Downcast to MultiSamplerNode using safe Any trait
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; if let Some(multi_sampler_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.remove_layer(layer_index) { if let Err(e) = multi_sampler_node.remove_layer(layer_index) {
eprintln!("Failed to remove sample layer: {}", e); eprintln!("Failed to remove sample layer: {}", e);
} }
@ -1875,16 +1913,15 @@ impl Engine {
let graph = &mut track.instrument_graph; let graph = &mut track.instrument_graph;
let node_idx = NodeIndex::new(voice_allocator_id as usize); let node_idx = NodeIndex::new(voice_allocator_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to VoiceAllocatorNode // Downcast to VoiceAllocatorNode using safe Any trait
let node_ptr = &*graph_node.node as *const dyn crate::audio::node_graph::AudioNode; if let Some(va_node) = graph_node.node.as_any().downcast_ref::<VoiceAllocatorNode>() {
let node_ptr = node_ptr as *const VoiceAllocatorNode;
unsafe {
let va_node = &*node_ptr;
let template_preset = va_node.template_graph().to_preset("template"); let template_preset = va_node.template_graph().to_preset("template");
match template_preset.to_json() { match template_preset.to_json() {
Ok(json) => QueryResponse::GraphState(Ok(json)), Ok(json) => QueryResponse::GraphState(Ok(json)),
Err(e) => QueryResponse::GraphState(Err(format!("Failed to serialize template: {:?}", e))), Err(e) => QueryResponse::GraphState(Err(format!("Failed to serialize template: {:?}", e))),
} }
} else {
QueryResponse::GraphState(Err("Node is not a VoiceAllocatorNode".to_string()))
} }
} else { } else {
QueryResponse::GraphState(Err("Voice allocator node not found".to_string())) QueryResponse::GraphState(Err("Voice allocator node not found".to_string()))
@ -2106,15 +2143,11 @@ impl Engine {
Query::ExportAudio(settings, output_path) => { Query::ExportAudio(settings, output_path) => {
// Perform export directly - this will block the audio thread but that's okay // Perform export directly - this will block the audio thread but that's okay
// since we're exporting and not playing back anyway // since we're exporting and not playing back anyway
// Use raw pointer to get midi_pool reference before mutable borrow of project
let midi_pool_ptr: *const _ = &self.project.midi_clip_pool;
let midi_pool_ref = unsafe { &*midi_pool_ptr };
// Pass event_tx directly - Rust allows borrowing different fields simultaneously // Pass event_tx directly - Rust allows borrowing different fields simultaneously
match crate::audio::export_audio( match crate::audio::export_audio(
&mut self.project, &mut self.project,
&self.audio_pool, &self.audio_pool,
midi_pool_ref,
&settings, &settings,
&output_path, &output_path,
Some(&mut self.event_tx), Some(&mut self.event_tx),
@ -2945,6 +2978,18 @@ impl EngineController {
let _ = self.command_tx.push(Command::GraphConnectInTemplate(track_id, voice_allocator_id, from_node, from_port, to_node, to_port)); let _ = self.command_tx.push(Command::GraphConnectInTemplate(track_id, voice_allocator_id, from_node, from_port, to_node, to_port));
} }
pub fn graph_disconnect_in_template(&mut self, track_id: TrackId, voice_allocator_id: u32, from_node: u32, from_port: usize, to_node: u32, to_port: usize) {
let _ = self.command_tx.push(Command::GraphDisconnectInTemplate(track_id, voice_allocator_id, from_node, from_port, to_node, to_port));
}
pub fn graph_remove_node_from_template(&mut self, track_id: TrackId, voice_allocator_id: u32, node_id: u32) {
let _ = self.command_tx.push(Command::GraphRemoveNodeFromTemplate(track_id, voice_allocator_id, node_id));
}
pub fn graph_set_parameter_in_template(&mut self, track_id: TrackId, voice_allocator_id: u32, node_id: u32, param_id: u32, value: f32) {
let _ = self.command_tx.push(Command::GraphSetParameterInTemplate(track_id, voice_allocator_id, node_id, param_id, value));
}
/// Remove a node from a track's instrument graph /// Remove a node from a track's instrument graph
pub fn graph_remove_node(&mut self, track_id: TrackId, node_id: u32) { pub fn graph_remove_node(&mut self, track_id: TrackId, node_id: u32) {
let _ = self.command_tx.push(Command::GraphRemoveNode(track_id, node_id)); let _ = self.command_tx.push(Command::GraphRemoveNode(track_id, node_id));
@ -2970,6 +3015,10 @@ impl EngineController {
let _ = self.command_tx.push(Command::GraphSetNodePosition(track_id, node_id, x, y)); let _ = self.command_tx.push(Command::GraphSetNodePosition(track_id, node_id, x, y));
} }
pub fn graph_set_node_position_in_template(&mut self, track_id: TrackId, voice_allocator_id: u32, node_id: u32, x: f32, y: f32) {
let _ = self.command_tx.push(Command::GraphSetNodePositionInTemplate(track_id, voice_allocator_id, node_id, x, y));
}
/// Set which node receives MIDI events in a track's instrument graph /// Set which node receives MIDI events in a track's instrument graph
pub fn graph_set_midi_target(&mut self, track_id: TrackId, node_id: u32, enabled: bool) { pub fn graph_set_midi_target(&mut self, track_id: TrackId, node_id: u32, enabled: bool) {
let _ = self.command_tx.push(Command::GraphSetMidiTarget(track_id, node_id, enabled)); let _ = self.command_tx.push(Command::GraphSetMidiTarget(track_id, node_id, enabled));

View File

@ -1,5 +1,4 @@
use super::buffer_pool::BufferPool; use super::buffer_pool::BufferPool;
use super::midi_pool::MidiClipPool;
use super::pool::AudioPool; use super::pool::AudioPool;
use super::project::Project; use super::project::Project;
use crate::command::AudioEvent; use crate::command::AudioEvent;
@ -69,7 +68,6 @@ impl Default for ExportSettings {
pub fn export_audio<P: AsRef<Path>>( pub fn export_audio<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings, settings: &ExportSettings,
output_path: P, output_path: P,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>, mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
@ -87,7 +85,7 @@ pub fn export_audio<P: AsRef<Path>>(
// Ensure export mode is disabled even if an error occurs. // Ensure export mode is disabled even if an error occurs.
let result = match settings.format { let result = match settings.format {
ExportFormat::Wav | ExportFormat::Flac => { ExportFormat::Wav | ExportFormat::Flac => {
let samples = render_to_memory(project, pool, midi_pool, settings, event_tx.as_mut().map(|tx| &mut **tx))?; let samples = render_to_memory(project, pool, settings, event_tx.as_mut().map(|tx| &mut **tx))?;
// Signal that rendering is done and we're now writing the file // Signal that rendering is done and we're now writing the file
if let Some(ref mut tx) = event_tx { if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportFinalizing); let _ = tx.push(AudioEvent::ExportFinalizing);
@ -99,10 +97,10 @@ pub fn export_audio<P: AsRef<Path>>(
} }
} }
ExportFormat::Mp3 => { ExportFormat::Mp3 => {
export_mp3(project, pool, midi_pool, settings, output_path, event_tx) export_mp3(project, pool, settings, output_path, event_tx)
} }
ExportFormat::Aac => { ExportFormat::Aac => {
export_aac(project, pool, midi_pool, settings, output_path, event_tx) export_aac(project, pool, settings, output_path, event_tx)
} }
}; };
@ -125,7 +123,6 @@ pub fn export_audio<P: AsRef<Path>>(
pub fn render_to_memory( pub fn render_to_memory(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings, settings: &ExportSettings,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>, mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<Vec<f32>, String> ) -> Result<Vec<f32>, String>
@ -162,7 +159,6 @@ pub fn render_to_memory(
project.render( project.render(
&mut render_buffer, &mut render_buffer,
pool, pool,
midi_pool,
&mut buffer_pool, &mut buffer_pool,
playhead, playhead,
settings.sample_rate, settings.sample_rate,
@ -302,7 +298,6 @@ fn write_flac<P: AsRef<Path>>(
fn export_mp3<P: AsRef<Path>>( fn export_mp3<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings, settings: &ExportSettings,
output_path: P, output_path: P,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>, mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
@ -382,7 +377,6 @@ fn export_mp3<P: AsRef<Path>>(
project.render( project.render(
&mut render_buffer, &mut render_buffer,
pool, pool,
midi_pool,
&mut buffer_pool, &mut buffer_pool,
playhead, playhead,
settings.sample_rate, settings.sample_rate,
@ -472,7 +466,6 @@ fn export_mp3<P: AsRef<Path>>(
fn export_aac<P: AsRef<Path>>( fn export_aac<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings, settings: &ExportSettings,
output_path: P, output_path: P,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>, mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
@ -552,7 +545,6 @@ fn export_aac<P: AsRef<Path>>(
project.render( project.render(
&mut render_buffer, &mut render_buffer,
pool, pool,
midi_pool,
&mut buffer_pool, &mut buffer_pool,
playhead, playhead,
settings.sample_rate, settings.sample_rate,
@ -689,16 +681,24 @@ fn encode_complete_frame_mp3(
frame.set_pts(Some(pts)); frame.set_pts(Some(pts));
// Copy all planar samples to frame // Copy all planar samples to frame
unsafe { for ch in 0..channels {
for ch in 0..channels { let plane = frame.data_mut(ch);
let plane = frame.data_mut(ch); let src = &planar_samples[ch];
let src = &planar_samples[ch];
std::ptr::copy_nonoverlapping( // Verify buffer size
src.as_ptr() as *const u8, let byte_size = num_frames * std::mem::size_of::<i16>();
plane.as_mut_ptr(), if plane.len() < byte_size {
num_frames * std::mem::size_of::<i16>(), return Err(format!(
); "FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 2;
plane[offset..offset + 2].copy_from_slice(&bytes);
} }
} }
@ -734,16 +734,24 @@ fn encode_complete_frame_aac(
frame.set_pts(Some(pts)); frame.set_pts(Some(pts));
// Copy all planar samples to frame // Copy all planar samples to frame
unsafe { for ch in 0..channels {
for ch in 0..channels { let plane = frame.data_mut(ch);
let plane = frame.data_mut(ch); let src = &planar_samples[ch];
let src = &planar_samples[ch];
std::ptr::copy_nonoverlapping( // Verify buffer size
src.as_ptr() as *const u8, let byte_size = num_frames * std::mem::size_of::<f32>();
plane.as_mut_ptr(), if plane.len() < byte_size {
num_frames * std::mem::size_of::<f32>(), return Err(format!(
); "FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 4;
plane[offset..offset + 4].copy_from_slice(&bytes);
} }
} }

View File

@ -281,29 +281,19 @@ impl AudioGraph {
// This is tricky with trait objects, so we'll need to use Any // This is tricky with trait objects, so we'll need to use Any
// For now, let's use a different approach - store the node pointer temporarily // For now, let's use a different approach - store the node pointer temporarily
// Check node type first // Downcast to VoiceAllocatorNode using safe Any trait
if graph_node.node.node_type() != "VoiceAllocator" { let va = graph_node.node.as_any_mut()
return Err("Node is not a VoiceAllocator".to_string()); .downcast_mut::<VoiceAllocatorNode>()
} .ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
// Get mutable reference and downcast using raw pointers // Add node to template graph
let node_ptr = &mut *graph_node.node as *mut dyn AudioNode; let node_idx = va.template_graph_mut().add_node(node);
let node_id = node_idx.index() as u32;
// SAFETY: We just checked that this is a VoiceAllocator // Rebuild voice instances from template
// This is safe because we know the concrete type va.rebuild_voices();
unsafe {
let va_ptr = node_ptr as *mut VoiceAllocatorNode;
let va = &mut *va_ptr;
// Add node to template graph return Ok(node_id);
let node_idx = va.template_graph_mut().add_node(node);
let node_id = node_idx.index() as u32;
// Rebuild voice instances from template
va.rebuild_voices();
return Ok(node_id);
}
} }
Err("VoiceAllocator node not found".to_string()) Err("VoiceAllocator node not found".to_string())
@ -322,36 +312,130 @@ impl AudioGraph {
// Get the VoiceAllocator node // Get the VoiceAllocator node
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) { if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Check node type first // Downcast to VoiceAllocatorNode using safe Any trait
if graph_node.node.node_type() != "VoiceAllocator" { let va = graph_node.node.as_any_mut()
return Err("Node is not a VoiceAllocator".to_string()); .downcast_mut::<VoiceAllocatorNode>()
} .ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
// Get mutable reference and downcast using raw pointers // Connect in template graph
let node_ptr = &mut *graph_node.node as *mut dyn AudioNode; let from_idx = NodeIndex::new(from_node as usize);
let to_idx = NodeIndex::new(to_node as usize);
// SAFETY: We just checked that this is a VoiceAllocator va.template_graph_mut().connect(from_idx, from_port, to_idx, to_port)
unsafe { .map_err(|e| format!("{:?}", e))?;
let va_ptr = node_ptr as *mut VoiceAllocatorNode;
let va = &mut *va_ptr;
// Connect in template graph // Rebuild voice instances from template
let from_idx = NodeIndex::new(from_node as usize); va.rebuild_voices();
let to_idx = NodeIndex::new(to_node as usize);
va.template_graph_mut().connect(from_idx, from_port, to_idx, to_port) return Ok(());
.map_err(|e| format!("{:?}", e))?;
// Rebuild voice instances from template
va.rebuild_voices();
return Ok(());
}
} }
Err("VoiceAllocator node not found".to_string()) Err("VoiceAllocator node not found".to_string())
} }
/// Disconnect two nodes in a VoiceAllocator's template graph
pub fn disconnect_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
from_node: u32,
from_port: usize,
to_node: u32,
to_port: usize,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let from_idx = NodeIndex::new(from_node as usize);
let to_idx = NodeIndex::new(to_node as usize);
va.template_graph_mut().disconnect(from_idx, from_port, to_idx, to_port);
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Remove a node from a VoiceAllocator's template graph
pub fn remove_node_from_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let node_idx = NodeIndex::new(node_id as usize);
va.template_graph_mut().remove_node(node_idx);
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Set a parameter on a node in a VoiceAllocator's template graph
pub fn set_parameter_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
param_id: u32,
value: f32,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let node_idx = NodeIndex::new(node_id as usize);
if let Some(template_node) = va.template_graph_mut().get_graph_node_mut(node_idx) {
template_node.node.set_parameter(param_id, value);
} else {
return Err("Node not found in template".to_string());
}
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Set the position of a node in a VoiceAllocator's template graph
pub fn set_position_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
x: f32,
y: f32,
) {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
if let Some(va) = graph_node.node.as_any_mut().downcast_mut::<VoiceAllocatorNode>() {
let node_idx = NodeIndex::new(node_id as usize);
va.template_graph_mut().set_node_position(node_idx, x, y);
}
}
}
/// Process the graph and produce audio output /// Process the graph and produce audio output
pub fn process(&mut self, output_buffer: &mut [f32], midi_events: &[MidiEvent], playback_time: f64) { pub fn process(&mut self, output_buffer: &mut [f32], midi_events: &[MidiEvent], playback_time: f64) {
// Update playback time // Update playback time
@ -523,30 +607,21 @@ impl AudioGraph {
let num_midi_outputs = outputs.iter().filter(|p| p.signal_type == SignalType::Midi).count(); let num_midi_outputs = outputs.iter().filter(|p| p.signal_type == SignalType::Midi).count();
// Create mutable slices for audio/CV outputs // Create mutable slices for audio/CV outputs
let mut output_slices: Vec<&mut [f32]> = Vec::with_capacity(num_audio_cv_outputs); // Each buffer is independent, so this is safe
for i in 0..num_audio_cv_outputs { let mut output_slices: Vec<&mut [f32]> = node.output_buffers
if i < node.output_buffers.len() { .iter_mut()
// Safety: We need to work around borrowing rules here .take(num_audio_cv_outputs)
// This is safe because each output buffer is independent .map(|buf| {
let buffer = &mut node.output_buffers[i] as *mut Vec<f32>; let len = buf.len();
unsafe { &mut buf[..process_size.min(len)]
let slice = &mut (&mut *buffer)[..process_size.min((*buffer).len())]; })
output_slices.push(slice); .collect();
}
}
}
// Create mutable references for MIDI outputs // Create mutable references for MIDI outputs
let mut midi_output_refs: Vec<&mut Vec<MidiEvent>> = Vec::with_capacity(num_midi_outputs); let mut midi_output_refs: Vec<&mut Vec<MidiEvent>> = node.midi_output_buffers
for i in 0..num_midi_outputs { .iter_mut()
if i < node.midi_output_buffers.len() { .take(num_midi_outputs)
// Safety: Similar to above .collect();
let buffer = &mut node.midi_output_buffers[i] as *mut Vec<MidiEvent>;
unsafe {
midi_output_refs.push(&mut *buffer);
}
}
}
// Process the node with both audio/CV and MIDI // Process the node with both audio/CV and MIDI
node.node.process(&input_slices, &mut output_slices, &midi_input_slices, &mut midi_output_refs, self.sample_rate); node.node.process(&input_slices, &mut output_slices, &midi_input_slices, &mut midi_output_refs, self.sample_rate);
@ -679,14 +754,9 @@ impl AudioGraph {
} }
// For VoiceAllocator nodes, serialize the template graph // For VoiceAllocator nodes, serialize the template graph
// We need to downcast to access template_graph()
// This is safe because we know the node type
if node.node_type() == "VoiceAllocator" { if node.node_type() == "VoiceAllocator" {
// Use Any to downcast // Downcast using safe Any trait
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode; if let Some(va_node) = node.as_any().downcast_ref::<VoiceAllocatorNode>() {
let node_ptr = node_ptr as *const VoiceAllocatorNode;
unsafe {
let va_node = &*node_ptr;
let template_preset = va_node.template_graph().to_preset("template"); let template_preset = va_node.template_graph().to_preset("template");
serialized.template_graph = Some(Box::new(template_preset)); serialized.template_graph = Some(Box::new(template_preset));
} }
@ -698,10 +768,8 @@ impl AudioGraph {
use crate::audio::node_graph::preset::{EmbeddedSampleData, SampleData}; use crate::audio::node_graph::preset::{EmbeddedSampleData, SampleData};
use base64::{Engine as _, engine::general_purpose}; use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode; // Downcast using safe Any trait
let node_ptr = node_ptr as *const SimpleSamplerNode; if let Some(sampler_node) = node.as_any().downcast_ref::<SimpleSamplerNode>() {
unsafe {
let sampler_node = &*node_ptr;
if let Some(sample_path) = sampler_node.get_sample_path() { if let Some(sample_path) = sampler_node.get_sample_path() {
// Check file size // Check file size
let should_embed = std::fs::metadata(sample_path) let should_embed = std::fs::metadata(sample_path)
@ -745,10 +813,8 @@ impl AudioGraph {
use crate::audio::node_graph::preset::{EmbeddedSampleData, LayerData, SampleData}; use crate::audio::node_graph::preset::{EmbeddedSampleData, LayerData, SampleData};
use base64::{Engine as _, engine::general_purpose}; use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode; // Downcast using safe Any trait
let node_ptr = node_ptr as *const MultiSamplerNode; if let Some(multi_sampler_node) = node.as_any().downcast_ref::<MultiSamplerNode>() {
unsafe {
let multi_sampler_node = &*node_ptr;
let layers_info = multi_sampler_node.get_layers_info(); let layers_info = multi_sampler_node.get_layers_info();
if !layers_info.is_empty() { if !layers_info.is_empty() {
let layers: Vec<LayerData> = layers_info let layers: Vec<LayerData> = layers_info
@ -911,7 +977,6 @@ impl AudioGraph {
// If there's a template graph, deserialize and set it // If there's a template graph, deserialize and set it
if let Some(ref template_preset) = serialized_node.template_graph { if let Some(ref template_preset) = serialized_node.template_graph {
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path)?; let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path)?;
// Set the template graph (we'll need to add this method to VoiceAllocator)
*va.template_graph_mut() = template_graph; *va.template_graph_mut() = template_graph;
va.rebuild_voices(); va.rebuild_voices();
} }
@ -938,10 +1003,8 @@ impl AudioGraph {
crate::audio::node_graph::preset::SampleData::SimpleSampler { file_path, embedded_data } => { crate::audio::node_graph::preset::SampleData::SimpleSampler { file_path, embedded_data } => {
// Load sample into SimpleSampler // Load sample into SimpleSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; // Downcast using safe Any trait
let node_ptr = node_ptr as *mut SimpleSamplerNode; if let Some(sampler_node) = graph_node.node.as_any_mut().downcast_mut::<SimpleSamplerNode>() {
unsafe {
let sampler_node = &mut *node_ptr;
// Try embedded data first, then fall back to file path // Try embedded data first, then fall back to file path
if let Some(ref embedded) = embedded_data { if let Some(ref embedded) = embedded_data {
@ -972,10 +1035,8 @@ impl AudioGraph {
crate::audio::node_graph::preset::SampleData::MultiSampler { layers } => { crate::audio::node_graph::preset::SampleData::MultiSampler { layers } => {
// Load layers into MultiSampler // Load layers into MultiSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; // Downcast using safe Any trait
let node_ptr = node_ptr as *mut MultiSamplerNode; if let Some(multi_sampler_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
unsafe {
let multi_sampler_node = &mut *node_ptr;
for layer in layers { for layer in layers {
// Try embedded data first, then fall back to file path // Try embedded data first, then fall back to file path
if let Some(ref embedded) = layer.embedded_data { if let Some(ref embedded) = layer.embedded_data {

View File

@ -188,10 +188,10 @@ impl AudioNode for FilterNode {
// Set filter to match current type // Set filter to match current type
match self.filter_type { match self.filter_type {
FilterType::Lowpass => { FilterType::Lowpass => {
new_filter.set_lowpass(self.sample_rate as f32, self.cutoff, self.resonance); new_filter.set_lowpass(self.cutoff, self.resonance, self.sample_rate as f32);
} }
FilterType::Highpass => { FilterType::Highpass => {
new_filter.set_highpass(self.sample_rate as f32, self.cutoff, self.resonance); new_filter.set_highpass(self.cutoff, self.resonance, self.sample_rate as f32);
} }
} }

View File

@ -9,6 +9,7 @@ const DEFAULT_VOICES: usize = 8;
#[derive(Clone)] #[derive(Clone)]
struct VoiceState { struct VoiceState {
active: bool, active: bool,
releasing: bool, // Note-off received, still processing (e.g. ADSR release)
note: u8, note: u8,
age: u32, // For voice stealing age: u32, // For voice stealing
pending_events: Vec<MidiEvent>, // MIDI events to send to this voice pending_events: Vec<MidiEvent>, // MIDI events to send to this voice
@ -18,6 +19,7 @@ impl VoiceState {
fn new() -> Self { fn new() -> Self {
Self { Self {
active: false, active: false,
releasing: false,
note: 0, note: 0,
age: 0, age: 0,
pending_events: Vec::new(), pending_events: Vec::new(),
@ -72,8 +74,19 @@ impl VoiceAllocatorNode {
Parameter::new(PARAM_VOICE_COUNT, "Voices", 1.0, MAX_VOICES as f32, DEFAULT_VOICES as f32, ParameterUnit::Generic), Parameter::new(PARAM_VOICE_COUNT, "Voices", 1.0, MAX_VOICES as f32, DEFAULT_VOICES as f32, ParameterUnit::Generic),
]; ];
// Create empty template graph // Create template graph with default TemplateInput and TemplateOutput nodes
let template_graph = AudioGraph::new(sample_rate, buffer_size); let mut template_graph = AudioGraph::new(sample_rate, buffer_size);
{
use super::template_io::{TemplateInputNode, TemplateOutputNode};
let input_node = Box::new(TemplateInputNode::new("Template Input"));
let output_node = Box::new(TemplateOutputNode::new("Template Output"));
let input_idx = template_graph.add_node(input_node);
let output_idx = template_graph.add_node(output_node);
template_graph.set_node_position(input_idx, -200.0, 0.0);
template_graph.set_node_position(output_idx, 200.0, 0.0);
template_graph.set_midi_target(input_idx, true);
template_graph.set_output_node(Some(output_idx));
}
// Create voice instances (initially empty clones of template) // Create voice instances (initially empty clones of template)
let voice_instances: Vec<AudioGraph> = (0..MAX_VOICES) let voice_instances: Vec<AudioGraph> = (0..MAX_VOICES)
@ -134,9 +147,9 @@ impl VoiceAllocatorNode {
} }
} }
/// Find a free voice, or steal the oldest one /// Find a free voice, or steal one
/// Priority: inactive → oldest releasing → oldest held
fn find_voice_for_note_on(&mut self) -> usize { fn find_voice_for_note_on(&mut self) -> usize {
// Only search within active voice_count
// First, look for an inactive voice // First, look for an inactive voice
for (i, voice) in self.voices[..self.voice_count].iter().enumerate() { for (i, voice) in self.voices[..self.voice_count].iter().enumerate() {
if !voice.active { if !voice.active {
@ -144,7 +157,17 @@ impl VoiceAllocatorNode {
} }
} }
// No free voices, steal the oldest one within voice_count // No inactive voices — steal the oldest releasing voice
if let Some((i, _)) = self.voices[..self.voice_count]
.iter()
.enumerate()
.filter(|(_, v)| v.releasing)
.max_by_key(|(_, v)| v.age)
{
return i;
}
// No releasing voices either — steal the oldest held voice
self.voices[..self.voice_count] self.voices[..self.voice_count]
.iter() .iter()
.enumerate() .enumerate()
@ -153,13 +176,13 @@ impl VoiceAllocatorNode {
.unwrap_or(0) .unwrap_or(0)
} }
/// Find all voices playing a specific note /// Find all voices playing a specific note (held, not yet releasing)
fn find_voices_for_note_off(&self, note: u8) -> Vec<usize> { fn find_voices_for_note_off(&self, note: u8) -> Vec<usize> {
self.voices[..self.voice_count] self.voices[..self.voice_count]
.iter() .iter()
.enumerate() .enumerate()
.filter_map(|(i, v)| { .filter_map(|(i, v)| {
if v.active && v.note == note { if v.active && !v.releasing && v.note == note {
Some(i) Some(i)
} else { } else {
None None
@ -195,6 +218,7 @@ impl AudioNode for VoiceAllocatorNode {
// Stop voices beyond the new count // Stop voices beyond the new count
for voice in &mut self.voices[new_count..] { for voice in &mut self.voices[new_count..] {
voice.active = false; voice.active = false;
voice.releasing = false;
} }
} }
} }
@ -218,25 +242,26 @@ impl AudioNode for VoiceAllocatorNode {
if event.data2 > 0 { if event.data2 > 0 {
let voice_idx = self.find_voice_for_note_on(); let voice_idx = self.find_voice_for_note_on();
self.voices[voice_idx].active = true; self.voices[voice_idx].active = true;
self.voices[voice_idx].releasing = false;
self.voices[voice_idx].note = event.data1; self.voices[voice_idx].note = event.data1;
self.voices[voice_idx].age = 0; self.voices[voice_idx].age = 0;
// Store MIDI event for this voice to process // Store MIDI event for this voice to process
self.voices[voice_idx].pending_events.push(*event); self.voices[voice_idx].pending_events.push(*event);
} else { } else {
// Velocity = 0 means note off - send to ALL voices playing this note // Velocity = 0 means note off — mark releasing, keep active for ADSR release
let voice_indices = self.find_voices_for_note_off(event.data1); let voice_indices = self.find_voices_for_note_off(event.data1);
for voice_idx in voice_indices { for voice_idx in voice_indices {
self.voices[voice_idx].active = false; self.voices[voice_idx].releasing = true;
self.voices[voice_idx].pending_events.push(*event); self.voices[voice_idx].pending_events.push(*event);
} }
} }
} }
0x80 => { 0x80 => {
// Note off - send to ALL voices playing this note // Note off — mark releasing, keep active for ADSR release
let voice_indices = self.find_voices_for_note_off(event.data1); let voice_indices = self.find_voices_for_note_off(event.data1);
for voice_idx in voice_indices { for voice_idx in voice_indices {
self.voices[voice_idx].active = false; self.voices[voice_idx].releasing = true;
self.voices[voice_idx].pending_events.push(*event); self.voices[voice_idx].pending_events.push(*event);
} }
} }
@ -291,26 +316,28 @@ impl AudioNode for VoiceAllocatorNode {
// Note: playback_time is 0.0 since voice allocator doesn't track time // Note: playback_time is 0.0 since voice allocator doesn't track time
self.voice_instances[voice_idx].process(mix_slice, &midi_events, 0.0); self.voice_instances[voice_idx].process(mix_slice, &midi_events, 0.0);
// Auto-deactivate releasing voices that have gone silent
if voice_state.releasing {
let peak = mix_slice.iter().fold(0.0f32, |max, &s| max.max(s.abs()));
if peak < 1e-6 {
voice_state.active = false;
voice_state.releasing = false;
continue; // Don't mix silent output
}
}
// Mix into output (accumulate) // Mix into output (accumulate)
for (i, sample) in mix_slice.iter().enumerate() { for (i, sample) in mix_slice.iter().enumerate() {
output[i] += sample; output[i] += sample;
} }
} }
} }
// Apply normalization to prevent clipping (divide by active voice count)
let active_count = self.voices[..self.voice_count].iter().filter(|v| v.active).count();
if active_count > 1 {
let scale = 1.0 / (active_count as f32).sqrt(); // Use sqrt for better loudness perception
for sample in output.iter_mut() {
*sample *= scale;
}
}
} }
fn reset(&mut self) { fn reset(&mut self) {
for voice in &mut self.voices { for voice in &mut self.voices {
voice.active = false; voice.active = false;
voice.releasing = false;
voice.pending_events.clear(); voice.pending_events.clear();
} }
for graph in &mut self.voice_instances { for graph in &mut self.voice_instances {

View File

@ -349,7 +349,6 @@ impl Project {
&mut self, &mut self,
output: &mut [f32], output: &mut [f32],
audio_pool: &AudioClipPool, audio_pool: &AudioClipPool,
midi_pool: &MidiClipPool,
buffer_pool: &mut BufferPool, buffer_pool: &mut BufferPool,
playhead_seconds: f64, playhead_seconds: f64,
sample_rate: u32, sample_rate: u32,
@ -374,7 +373,6 @@ impl Project {
track_id, track_id,
output, output,
audio_pool, audio_pool,
midi_pool,
buffer_pool, buffer_pool,
ctx, ctx,
any_solo, any_solo,
@ -389,7 +387,6 @@ impl Project {
track_id: TrackId, track_id: TrackId,
output: &mut [f32], output: &mut [f32],
audio_pool: &AudioClipPool, audio_pool: &AudioClipPool,
midi_pool: &MidiClipPool,
buffer_pool: &mut BufferPool, buffer_pool: &mut BufferPool,
ctx: RenderContext, ctx: RenderContext,
any_solo: bool, any_solo: bool,
@ -437,7 +434,8 @@ impl Project {
} }
Some(TrackNode::Midi(track)) => { Some(TrackNode::Midi(track)) => {
// Render MIDI track directly into output // Render MIDI track directly into output
track.render(output, midi_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels); // Access midi_clip_pool from self - safe because we only need immutable access
track.render(output, &self.midi_clip_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
} }
Some(TrackNode::Group(group)) => { Some(TrackNode::Group(group)) => {
// Read group properties and transform context (index-based child iteration to avoid clone) // Read group properties and transform context (index-based child iteration to avoid clone)
@ -462,7 +460,6 @@ impl Project {
child_id, child_id,
&mut group_buffer, &mut group_buffer,
audio_pool, audio_pool,
midi_pool,
buffer_pool, buffer_pool,
child_ctx, child_ctx,
any_solo, any_solo,

View File

@ -365,12 +365,16 @@ impl MidiTrack {
// Use a large buffer size that can accommodate any callback // Use a large buffer size that can accommodate any callback
let default_buffer_size = 8192; let default_buffer_size = 8192;
// Start with empty graph — the frontend loads a default instrument preset
// (bass.json) via graph_load_preset which replaces the entire graph
let instrument_graph = AudioGraph::new(sample_rate, default_buffer_size);
Self { Self {
id, id,
name, name,
clip_instances: Vec::new(), clip_instances: Vec::new(),
instrument_graph_preset: None, instrument_graph_preset: None,
instrument_graph: AudioGraph::new(sample_rate, default_buffer_size), instrument_graph,
volume: 1.0, volume: 1.0,
muted: false, muted: false,
solo: false, solo: false,

View File

@ -146,10 +146,18 @@ pub enum Command {
GraphConnectInTemplate(TrackId, u32, u32, usize, u32, usize), GraphConnectInTemplate(TrackId, u32, u32, usize, u32, usize),
/// Disconnect two nodes in a track's graph (track_id, from_node, from_port, to_node, to_port) /// Disconnect two nodes in a track's graph (track_id, from_node, from_port, to_node, to_port)
GraphDisconnect(TrackId, u32, usize, u32, usize), GraphDisconnect(TrackId, u32, usize, u32, usize),
/// Disconnect nodes in a VoiceAllocator template (track_id, voice_allocator_node_id, from_node, from_port, to_node, to_port)
GraphDisconnectInTemplate(TrackId, u32, u32, usize, u32, usize),
/// Remove a node from a VoiceAllocator's template graph (track_id, voice_allocator_node_id, node_index)
GraphRemoveNodeFromTemplate(TrackId, u32, u32),
/// Set a parameter on a node (track_id, node_index, param_id, value) /// Set a parameter on a node (track_id, node_index, param_id, value)
GraphSetParameter(TrackId, u32, u32, f32), GraphSetParameter(TrackId, u32, u32, f32),
/// Set a parameter on a node in a VoiceAllocator's template graph (track_id, voice_allocator_node_id, node_index, param_id, value)
GraphSetParameterInTemplate(TrackId, u32, u32, u32, f32),
/// Set the UI position of a node (track_id, node_index, x, y) /// Set the UI position of a node (track_id, node_index, x, y)
GraphSetNodePosition(TrackId, u32, f32, f32), GraphSetNodePosition(TrackId, u32, f32, f32),
/// Set the UI position of a node in a VoiceAllocator's template (track_id, voice_allocator_id, node_index, x, y)
GraphSetNodePositionInTemplate(TrackId, u32, u32, f32, f32),
/// Set which node receives MIDI events (track_id, node_index, enabled) /// Set which node receives MIDI events (track_id, node_index, enabled)
GraphSetMidiTarget(TrackId, u32, bool), GraphSetMidiTarget(TrackId, u32, bool),
/// Set which node is the audio output (track_id, node_index) /// Set which node is the audio output (track_id, node_index)

View File

@ -62,6 +62,8 @@ pub enum NodeResponse<UserResponse: UserResponseTrait, NodeData: NodeDataTrait>
node: NodeId, node: NodeId,
drag_delta: Vec2, drag_delta: Vec2,
}, },
/// Emitted when a node's title bar is double-clicked.
DoubleClick(NodeId),
User(UserResponse), User(UserResponse),
} }
@ -479,6 +481,9 @@ where
} }
} }
} }
NodeResponse::DoubleClick(_) => {
// Handled by user code.
}
NodeResponse::User(_) => { NodeResponse::User(_) => {
// These are handled by the user code. // These are handled by the user code.
} }
@ -1172,6 +1177,11 @@ where
responses.push(NodeResponse::RaiseNode(self.node_id)); responses.push(NodeResponse::RaiseNode(self.node_id));
} }
// Double-click detection (emitted alongside other responses)
if window_response.double_clicked() {
responses.push(NodeResponse::DoubleClick(self.node_id));
}
responses responses
} }

View File

@ -769,6 +769,26 @@ pub fn extract_audio_from_video(path: &str) -> Result<Option<ExtractedAudio>, St
// Extract f32 samples (interleaved format) // Extract f32 samples (interleaved format)
let data_ptr = resampled_frame.data(0).as_ptr() as *const f32; let data_ptr = resampled_frame.data(0).as_ptr() as *const f32;
let total_samples = resampled_frame.samples() * frame_channels; let total_samples = resampled_frame.samples() * frame_channels;
// Safety checks before creating slice from FFmpeg data
// 1. Verify f32 alignment (required: 4 bytes)
if data_ptr.align_offset(std::mem::align_of::<f32>()) != 0 {
return Err("FFmpeg audio data is not properly aligned for f32".to_string());
}
// 2. Verify the frame actually has enough data
let byte_size = resampled_frame.data(0).len();
let expected_bytes = total_samples * std::mem::size_of::<f32>();
if byte_size < expected_bytes {
return Err(format!(
"FFmpeg frame buffer too small: {} bytes, need {} bytes",
byte_size, expected_bytes
));
}
// SAFETY: We verified alignment and bounds above.
// The slice lifetime is tied to resampled_frame which lives until
// after extend_from_slice completes.
let samples_slice = unsafe { let samples_slice = unsafe {
std::slice::from_raw_parts(data_ptr, total_samples) std::slice::from_raw_parts(data_ptr, total_samples)
}; };
@ -800,6 +820,26 @@ pub fn extract_audio_from_video(path: &str) -> Result<Option<ExtractedAudio>, St
let data_ptr = resampled_frame.data(0).as_ptr() as *const f32; let data_ptr = resampled_frame.data(0).as_ptr() as *const f32;
let total_samples = resampled_frame.samples() * frame_channels; let total_samples = resampled_frame.samples() * frame_channels;
// Safety checks before creating slice from FFmpeg data
// 1. Verify f32 alignment (required: 4 bytes)
if data_ptr.align_offset(std::mem::align_of::<f32>()) != 0 {
return Err("FFmpeg audio data is not properly aligned for f32".to_string());
}
// 2. Verify the frame actually has enough data
let byte_size = resampled_frame.data(0).len();
let expected_bytes = total_samples * std::mem::size_of::<f32>();
if byte_size < expected_bytes {
return Err(format!(
"FFmpeg frame buffer too small: {} bytes, need {} bytes",
byte_size, expected_bytes
));
}
// SAFETY: We verified alignment and bounds above.
// The slice lifetime is tied to resampled_frame which lives until
// after extend_from_slice completes.
let samples_slice = unsafe { let samples_slice = unsafe {
std::slice::from_raw_parts(data_ptr, total_samples) std::slice::from_raw_parts(data_ptr, total_samples)
}; };

View File

@ -195,17 +195,16 @@ fn encode_pcm_to_mp3(
frame.set_rate(sample_rate); frame.set_rate(sample_rate);
// Copy planar samples to frame // Copy planar samples to frame
unsafe { for ch in 0..channels as usize {
for ch in 0..channels as usize { let plane = frame.data_mut(ch);
let plane = frame.data_mut(ch); let offset = samples_encoded;
let offset = samples_encoded; let src = &planar_samples[ch][offset..offset + chunk_size];
let src = &planar_samples[ch][offset..offset + chunk_size];
std::ptr::copy_nonoverlapping( // Safe byte-level copy
src.as_ptr() as *const u8, for (i, &sample) in src.iter().enumerate() {
plane.as_mut_ptr(), let bytes = sample.to_ne_bytes();
chunk_size * std::mem::size_of::<i16>(), let byte_offset = i * 2;
); plane[byte_offset..byte_offset + 2].copy_from_slice(&bytes);
} }
} }
@ -360,17 +359,16 @@ fn encode_pcm_to_aac(
frame.set_rate(sample_rate); frame.set_rate(sample_rate);
// Copy planar samples to frame // Copy planar samples to frame
unsafe { for ch in 0..channels as usize {
for ch in 0..channels as usize { let plane = frame.data_mut(ch);
let plane = frame.data_mut(ch); let offset = samples_encoded;
let offset = samples_encoded; let src = &planar_samples[ch][offset..offset + chunk_size];
let src = &planar_samples[ch][offset..offset + chunk_size];
std::ptr::copy_nonoverlapping( // Safe byte-level copy
src.as_ptr() as *const u8, for (i, &sample) in src.iter().enumerate() {
plane.as_mut_ptr(), let bytes = sample.to_ne_bytes();
chunk_size * std::mem::size_of::<f32>(), let byte_offset = i * 4;
); plane[byte_offset..byte_offset + 4].copy_from_slice(&bytes);
} }
} }

View File

@ -115,17 +115,18 @@ fn main() -> Result<(), String> {
height, height,
); );
// Copy YUV planes // Copy YUV planes (safe slice copy)
unsafe { let y_plane = video_frame.data_mut(0);
let y_plane = video_frame.data_mut(0); let y_len = y.len().min(y_plane.len());
std::ptr::copy_nonoverlapping(y.as_ptr(), y_plane.as_mut_ptr(), y.len()); y_plane[..y_len].copy_from_slice(&y[..y_len]);
let u_plane = video_frame.data_mut(1); let u_plane = video_frame.data_mut(1);
std::ptr::copy_nonoverlapping(u.as_ptr(), u_plane.as_mut_ptr(), u.len()); let u_len = u.len().min(u_plane.len());
u_plane[..u_len].copy_from_slice(&u[..u_len]);
let v_plane = video_frame.data_mut(2); let v_plane = video_frame.data_mut(2);
std::ptr::copy_nonoverlapping(v.as_ptr(), v_plane.as_mut_ptr(), v.len()); let v_len = v.len().min(v_plane.len());
} v_plane[..v_len].copy_from_slice(&v[..v_len]);
// Set PTS // Set PTS
let timestamp = frame_num as f64 / framerate; let timestamp = frame_num as f64 / framerate;

View File

@ -54,7 +54,7 @@ pub fn export_audio<P: AsRef<Path>>(
fn export_audio_daw_backend<P: AsRef<Path>>( fn export_audio_daw_backend<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool, _midi_pool: &MidiClipPool,
settings: &AudioExportSettings, settings: &AudioExportSettings,
output_path: P, output_path: P,
) -> Result<(), String> { ) -> Result<(), String> {
@ -78,7 +78,6 @@ fn export_audio_daw_backend<P: AsRef<Path>>(
daw_backend::audio::export::export_audio( daw_backend::audio::export::export_audio(
project, project,
pool, pool,
midi_pool,
&daw_settings, &daw_settings,
output_path, output_path,
None, None,
@ -89,7 +88,7 @@ fn export_audio_daw_backend<P: AsRef<Path>>(
fn export_audio_ffmpeg_mp3<P: AsRef<Path>>( fn export_audio_ffmpeg_mp3<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool, _midi_pool: &MidiClipPool,
settings: &AudioExportSettings, settings: &AudioExportSettings,
output_path: P, output_path: P,
cancel_flag: &Arc<AtomicBool>, cancel_flag: &Arc<AtomicBool>,
@ -114,7 +113,6 @@ fn export_audio_ffmpeg_mp3<P: AsRef<Path>>(
let pcm_samples = render_to_memory( let pcm_samples = render_to_memory(
project, project,
pool, pool,
midi_pool,
&daw_settings, &daw_settings,
None, // No progress events for now None, // No progress events for now
)?; )?;
@ -198,17 +196,25 @@ fn export_audio_ffmpeg_mp3<P: AsRef<Path>>(
frame.set_rate(settings.sample_rate); frame.set_rate(settings.sample_rate);
// Copy planar samples to frame // Copy planar samples to frame
unsafe { for ch in 0..settings.channels as usize {
for ch in 0..settings.channels as usize { let plane = frame.data_mut(ch);
let plane = frame.data_mut(ch); let offset = samples_encoded;
let offset = samples_encoded; let src = &planar_samples[ch][offset..offset + chunk_size];
let src = &planar_samples[ch][offset..offset + chunk_size];
std::ptr::copy_nonoverlapping( // Convert i16 samples to bytes and copy
src.as_ptr() as *const u8, let byte_size = chunk_size * std::mem::size_of::<i16>();
plane.as_mut_ptr(), if plane.len() < byte_size {
chunk_size * std::mem::size_of::<i16>(), return Err(format!(
); "FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy using slice operations
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 2;
plane[offset..offset + 2].copy_from_slice(&bytes);
} }
} }
@ -284,7 +290,7 @@ fn receive_and_write_packets(
fn export_audio_ffmpeg_aac<P: AsRef<Path>>( fn export_audio_ffmpeg_aac<P: AsRef<Path>>(
project: &mut Project, project: &mut Project,
pool: &AudioPool, pool: &AudioPool,
midi_pool: &MidiClipPool, _midi_pool: &MidiClipPool,
settings: &AudioExportSettings, settings: &AudioExportSettings,
output_path: P, output_path: P,
cancel_flag: &Arc<AtomicBool>, cancel_flag: &Arc<AtomicBool>,
@ -309,7 +315,6 @@ fn export_audio_ffmpeg_aac<P: AsRef<Path>>(
let pcm_samples = render_to_memory( let pcm_samples = render_to_memory(
project, project,
pool, pool,
midi_pool,
&daw_settings, &daw_settings,
None, // No progress events for now None, // No progress events for now
)?; )?;

View File

@ -1182,16 +1182,18 @@ impl ExportOrchestrator {
); );
// Copy YUV planes to frame // Copy YUV planes to frame
unsafe { // Use safe slice copy - LLVM optimizes this to memcpy, same performance as copy_nonoverlapping
let y_dest = video_frame.data_mut(0); let y_dest = video_frame.data_mut(0);
std::ptr::copy_nonoverlapping(y_plane.as_ptr(), y_dest.as_mut_ptr(), y_plane.len()); let y_len = y_plane.len().min(y_dest.len());
y_dest[..y_len].copy_from_slice(&y_plane[..y_len]);
let u_dest = video_frame.data_mut(1); let u_dest = video_frame.data_mut(1);
std::ptr::copy_nonoverlapping(u_plane.as_ptr(), u_dest.as_mut_ptr(), u_plane.len()); let u_len = u_plane.len().min(u_dest.len());
u_dest[..u_len].copy_from_slice(&u_plane[..u_len]);
let v_dest = video_frame.data_mut(2); let v_dest = video_frame.data_mut(2);
std::ptr::copy_nonoverlapping(v_plane.as_ptr(), v_dest.as_mut_ptr(), v_plane.len()); let v_len = v_plane.len().min(v_dest.len());
} v_dest[..v_len].copy_from_slice(&v_plane[..v_len]);
// Set PTS (presentation timestamp) in encoder's time base // Set PTS (presentation timestamp) in encoder's time base
// Encoder time base is 1/(framerate * 1000), so PTS = timestamp * (framerate * 1000) // Encoder time base is 1/(framerate * 1000), so PTS = timestamp * (framerate * 1000)

View File

@ -125,9 +125,13 @@ impl GraphBackend for AudioGraphBackend {
Ok(()) Ok(())
} }
fn get_state(&self) -> Result<GraphState, String> { fn get_state_json(&self) -> Result<String, String> {
let mut controller = self.audio_controller.lock().unwrap(); let mut controller = self.audio_controller.lock().unwrap();
let json = controller.query_graph_state(self.track_id)?; controller.query_graph_state(self.track_id)
}
fn get_state(&self) -> Result<GraphState, String> {
let json = self.get_state_json()?;
// Parse the GraphPreset JSON from backend // Parse the GraphPreset JSON from backend
let preset: daw_backend::audio::node_graph::GraphPreset = let preset: daw_backend::audio::node_graph::GraphPreset =

View File

@ -55,6 +55,9 @@ pub trait GraphBackend: Send {
/// Get current graph state (for serialization) /// Get current graph state (for serialization)
fn get_state(&self) -> Result<GraphState, String>; fn get_state(&self) -> Result<GraphState, String>;
/// Get current graph state as raw JSON (GraphPreset format from backend)
fn get_state_json(&self) -> Result<String, String>;
/// Load graph state (for presets) /// Load graph state (for presets)
fn load_state(&mut self, state: &GraphState) -> Result<(), String>; fn load_state(&mut self, state: &GraphState) -> Result<(), String>;

View File

@ -68,6 +68,11 @@ pub enum NodeTemplate {
// Advanced // Advanced
VoiceAllocator, VoiceAllocator,
Group,
// Subgraph I/O (only visible when editing inside a container node)
TemplateInput,
TemplateOutput,
// Outputs // Outputs
AudioOutput, AudioOutput,
@ -117,6 +122,9 @@ impl NodeTemplate {
NodeTemplate::Mod => "Mod", NodeTemplate::Mod => "Mod",
NodeTemplate::Oscilloscope => "Oscilloscope", NodeTemplate::Oscilloscope => "Oscilloscope",
NodeTemplate::VoiceAllocator => "VoiceAllocator", NodeTemplate::VoiceAllocator => "VoiceAllocator",
NodeTemplate::Group => "Group",
NodeTemplate::TemplateInput => "TemplateInput",
NodeTemplate::TemplateOutput => "TemplateOutput",
NodeTemplate::AudioOutput => "AudioOutput", NodeTemplate::AudioOutput => "AudioOutput",
} }
} }
@ -282,6 +290,10 @@ impl NodeTemplateTrait for NodeTemplate {
NodeTemplate::Oscilloscope => "Oscilloscope".into(), NodeTemplate::Oscilloscope => "Oscilloscope".into(),
// Advanced // Advanced
NodeTemplate::VoiceAllocator => "Voice Allocator".into(), NodeTemplate::VoiceAllocator => "Voice Allocator".into(),
NodeTemplate::Group => "Group".into(),
// Subgraph I/O
NodeTemplate::TemplateInput => "Template Input".into(),
NodeTemplate::TemplateOutput => "Template Output".into(),
// Outputs // Outputs
NodeTemplate::AudioOutput => "Audio Output".into(), NodeTemplate::AudioOutput => "Audio Output".into(),
} }
@ -301,7 +313,8 @@ impl NodeTemplateTrait for NodeTemplate {
| NodeTemplate::SampleHold | NodeTemplate::SlewLimiter | NodeTemplate::Quantizer | NodeTemplate::SampleHold | NodeTemplate::SlewLimiter | NodeTemplate::Quantizer
| NodeTemplate::EnvelopeFollower | NodeTemplate::BpmDetector | NodeTemplate::Mod => vec!["Utilities"], | NodeTemplate::EnvelopeFollower | NodeTemplate::BpmDetector | NodeTemplate::Mod => vec!["Utilities"],
NodeTemplate::Oscilloscope => vec!["Analysis"], NodeTemplate::Oscilloscope => vec!["Analysis"],
NodeTemplate::VoiceAllocator => vec!["Advanced"], NodeTemplate::VoiceAllocator | NodeTemplate::Group => vec!["Advanced"],
NodeTemplate::TemplateInput | NodeTemplate::TemplateOutput => vec!["Subgraph I/O"],
NodeTemplate::AudioOutput => vec!["Outputs"], NodeTemplate::AudioOutput => vec!["Outputs"],
} }
} }
@ -667,8 +680,24 @@ impl NodeTemplateTrait for NodeTemplate {
} }
NodeTemplate::VoiceAllocator => { NodeTemplate::VoiceAllocator => {
graph.add_input_param(node_id, "MIDI In".into(), DataType::Midi, ValueType::float(0.0), InputParamKind::ConnectionOnly, true); graph.add_input_param(node_id, "MIDI In".into(), DataType::Midi, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_input_param(node_id, "Voices".into(), DataType::CV,
ValueType::float_param(8.0, 1.0, 16.0, "", 0, None), InputParamKind::ConstantOnly, true);
graph.add_output_param(node_id, "Audio Out".into(), DataType::Audio); graph.add_output_param(node_id, "Audio Out".into(), DataType::Audio);
} }
NodeTemplate::Group => {
// Ports are dynamic based on subgraph TemplateInput/Output nodes.
// Start with one audio pass-through by default.
graph.add_input_param(node_id, "Audio In".into(), DataType::Audio, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_output_param(node_id, "Audio Out".into(), DataType::Audio);
}
NodeTemplate::TemplateInput => {
// Inside a VA template: provides MIDI from the allocator
graph.add_output_param(node_id, "MIDI Out".into(), DataType::Midi);
}
NodeTemplate::TemplateOutput => {
// Inside a VA template: sends audio back to the allocator
graph.add_input_param(node_id, "Audio In".into(), DataType::Audio, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
}
} }
} }
} }
@ -766,9 +795,23 @@ impl NodeDataTrait for NodeData {
} }
} }
// Iterator for all node templates // Iterator for all node templates (track-level graph)
pub struct AllNodeTemplates; pub struct AllNodeTemplates;
/// Iterator for subgraph node templates (includes TemplateInput/Output)
pub struct SubgraphNodeTemplates;
impl NodeTemplateIter for SubgraphNodeTemplates {
type Item = NodeTemplate;
fn all_kinds(&self) -> Vec<Self::Item> {
let mut templates = AllNodeTemplates.all_kinds();
templates.push(NodeTemplate::TemplateInput);
templates.push(NodeTemplate::TemplateOutput);
templates
}
}
impl NodeTemplateIter for AllNodeTemplates { impl NodeTemplateIter for AllNodeTemplates {
type Item = NodeTemplate; type Item = NodeTemplate;
@ -820,6 +863,9 @@ impl NodeTemplateIter for AllNodeTemplates {
NodeTemplate::Oscilloscope, NodeTemplate::Oscilloscope,
// Advanced // Advanced
NodeTemplate::VoiceAllocator, NodeTemplate::VoiceAllocator,
NodeTemplate::Group,
// Note: TemplateInput/TemplateOutput are excluded from the default finder.
// They are added dynamically when editing inside a subgraph.
// Outputs // Outputs
NodeTemplate::AudioOutput, NodeTemplate::AudioOutput,
] ]

View File

@ -9,13 +9,35 @@ pub mod graph_data;
pub mod node_types; pub mod node_types;
use backend::{BackendNodeId, GraphBackend}; use backend::{BackendNodeId, GraphBackend};
use graph_data::{AllNodeTemplates, DataType, GraphState, NodeData, NodeTemplate, ValueType}; use graph_data::{AllNodeTemplates, SubgraphNodeTemplates, DataType, GraphState, NodeData, NodeTemplate, ValueType};
use super::NodePath; use super::NodePath;
use eframe::egui; use eframe::egui;
use egui_node_graph2::*; use egui_node_graph2::*;
use std::collections::HashMap; use std::collections::HashMap;
use uuid::Uuid; use uuid::Uuid;
/// What kind of container we've entered for subgraph editing
#[derive(Clone, Debug)]
enum SubgraphContext {
VoiceAllocator { frontend_id: NodeId, backend_id: BackendNodeId },
Group { frontend_id: NodeId, backend_id: BackendNodeId, name: String },
}
/// One level of subgraph editing — stores the parent state we'll restore on exit
struct SubgraphFrame {
context: SubgraphContext,
saved_state: SavedGraphState,
}
/// Saved graph editor state for restoring when exiting a subgraph
struct SavedGraphState {
state: GraphEditorState<NodeData, DataType, ValueType, NodeTemplate, GraphState>,
user_state: GraphState,
node_id_map: HashMap<NodeId, BackendNodeId>,
backend_to_frontend_map: HashMap<BackendNodeId, NodeId>,
parameter_values: HashMap<InputId, f32>,
}
/// Node graph pane with egui_node_graph2 integration /// Node graph pane with egui_node_graph2 integration
pub struct NodeGraphPane { pub struct NodeGraphPane {
/// The graph editor state /// The graph editor state
@ -56,6 +78,10 @@ pub struct NodeGraphPane {
dragging_node: Option<NodeId>, dragging_node: Option<NodeId>,
/// Connection that would be targeted for insertion (highlighted during drag) /// Connection that would be targeted for insertion (highlighted during drag)
insert_target: Option<(InputId, OutputId)>, insert_target: Option<(InputId, OutputId)>,
/// Stack of subgraph contexts — empty = editing track-level graph,
/// non-empty = editing nested subgraph(s). Supports arbitrary nesting depth.
subgraph_stack: Vec<SubgraphFrame>,
} }
impl NodeGraphPane { impl NodeGraphPane {
@ -74,8 +100,8 @@ impl NodeGraphPane {
parameter_values: HashMap::new(), parameter_values: HashMap::new(),
last_project_generation: 0, last_project_generation: 0,
dragging_node: None, dragging_node: None,
insert_target: None, insert_target: None,
subgraph_stack: Vec::new(),
} }
} }
@ -102,8 +128,8 @@ impl NodeGraphPane {
parameter_values: HashMap::new(), parameter_values: HashMap::new(),
last_project_generation: 0, last_project_generation: 0,
dragging_node: None, dragging_node: None,
insert_target: None, insert_target: None,
subgraph_stack: Vec::new(),
}; };
// Load existing graph from backend // Load existing graph from backend
@ -116,166 +142,13 @@ impl NodeGraphPane {
/// Load the graph state from the backend and populate the frontend /// Load the graph state from the backend and populate the frontend
fn load_graph_from_backend(&mut self) -> Result<(), String> { fn load_graph_from_backend(&mut self) -> Result<(), String> {
let graph_state = if let Some(backend) = &self.backend { let json = if let Some(backend) = &self.backend {
backend.get_state()? backend.get_state_json()?
} else { } else {
return Err("No backend available".to_string()); return Err("No backend available".to_string());
}; };
// Clear existing graph self.load_graph_from_json(&json)
self.state.graph.nodes.clear();
self.state.graph.inputs.clear();
self.state.graph.outputs.clear();
self.state.graph.connections.clear();
self.state.node_order.clear();
self.state.node_positions.clear();
self.state.selected_nodes.clear();
self.state.connection_in_progress = None;
self.state.ongoing_box_selection = None;
self.node_id_map.clear();
self.backend_to_frontend_map.clear();
// Create nodes in frontend
for node in &graph_state.nodes {
// Parse node type from string (e.g., "Oscillator" -> NodeTemplate::Oscillator)
let node_template = match node.node_type.as_str() {
// Inputs
"MidiInput" => graph_data::NodeTemplate::MidiInput,
"AudioInput" => graph_data::NodeTemplate::AudioInput,
"AutomationInput" => graph_data::NodeTemplate::AutomationInput,
// Generators
"Oscillator" => graph_data::NodeTemplate::Oscillator,
"WavetableOscillator" => graph_data::NodeTemplate::WavetableOscillator,
"FMSynth" => graph_data::NodeTemplate::FmSynth,
"NoiseGenerator" => graph_data::NodeTemplate::Noise,
"SimpleSampler" => graph_data::NodeTemplate::SimpleSampler,
"MultiSampler" => graph_data::NodeTemplate::MultiSampler,
// Effects
"Filter" => graph_data::NodeTemplate::Filter,
"Gain" => graph_data::NodeTemplate::Gain,
"Echo" | "Delay" => graph_data::NodeTemplate::Echo,
"Reverb" => graph_data::NodeTemplate::Reverb,
"Chorus" => graph_data::NodeTemplate::Chorus,
"Flanger" => graph_data::NodeTemplate::Flanger,
"Phaser" => graph_data::NodeTemplate::Phaser,
"Distortion" => graph_data::NodeTemplate::Distortion,
"BitCrusher" => graph_data::NodeTemplate::BitCrusher,
"Compressor" => graph_data::NodeTemplate::Compressor,
"Limiter" => graph_data::NodeTemplate::Limiter,
"EQ" => graph_data::NodeTemplate::Eq,
"Pan" => graph_data::NodeTemplate::Pan,
"RingModulator" => graph_data::NodeTemplate::RingModulator,
"Vocoder" => graph_data::NodeTemplate::Vocoder,
// Utilities
"ADSR" => graph_data::NodeTemplate::Adsr,
"LFO" => graph_data::NodeTemplate::Lfo,
"Mixer" => graph_data::NodeTemplate::Mixer,
"Splitter" => graph_data::NodeTemplate::Splitter,
"Constant" => graph_data::NodeTemplate::Constant,
"MidiToCV" => graph_data::NodeTemplate::MidiToCv,
"AudioToCV" => graph_data::NodeTemplate::AudioToCv,
"Math" => graph_data::NodeTemplate::Math,
"SampleHold" => graph_data::NodeTemplate::SampleHold,
"SlewLimiter" => graph_data::NodeTemplate::SlewLimiter,
"Quantizer" => graph_data::NodeTemplate::Quantizer,
"EnvelopeFollower" => graph_data::NodeTemplate::EnvelopeFollower,
"BPMDetector" => graph_data::NodeTemplate::BpmDetector,
"Mod" => graph_data::NodeTemplate::Mod,
// Analysis
"Oscilloscope" => graph_data::NodeTemplate::Oscilloscope,
// Advanced
"VoiceAllocator" => graph_data::NodeTemplate::VoiceAllocator,
// Outputs
"AudioOutput" => graph_data::NodeTemplate::AudioOutput,
_ => {
eprintln!("Unknown node type: {}", node.node_type);
continue;
}
};
// Create node directly in the graph
use egui_node_graph2::Node;
let frontend_id = self.state.graph.nodes.insert(Node {
id: egui_node_graph2::NodeId::default(), // Will be replaced by insert
label: node.node_type.clone(),
inputs: vec![],
outputs: vec![],
user_data: graph_data::NodeData { template: node_template },
});
// Build the node's inputs and outputs (this adds them to graph.inputs and graph.outputs)
// build_node() automatically populates the node's inputs/outputs vectors with correct names and order
node_template.build_node(&mut self.state.graph, &mut self.user_state, frontend_id);
// Set position
self.state.node_positions.insert(
frontend_id,
egui::pos2(node.position.0, node.position.1),
);
// Add to node order for rendering
self.state.node_order.push(frontend_id);
// Map frontend ID to backend ID
let backend_id = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(node.id as usize));
self.node_id_map.insert(frontend_id, backend_id);
self.backend_to_frontend_map.insert(backend_id, frontend_id);
// Set parameter values from backend
if let Some(node_data) = self.state.graph.nodes.get(frontend_id) {
let input_ids: Vec<InputId> = node_data.inputs.iter().map(|(_, id)| *id).collect();
for input_id in input_ids {
if let Some(input_param) = self.state.graph.inputs.get_mut(input_id) {
if let ValueType::Float { value, backend_param_id: Some(pid), .. } = &mut input_param.value {
if let Some(&backend_value) = node.parameters.get(pid) {
*value = backend_value as f32;
}
}
}
}
}
}
// Create connections in frontend
for conn in &graph_state.connections {
let from_backend = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(conn.from_node as usize));
let to_backend = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(conn.to_node as usize));
if let (Some(&from_id), Some(&to_id)) = (
self.backend_to_frontend_map.get(&from_backend),
self.backend_to_frontend_map.get(&to_backend),
) {
// Find output param on from_node
if let Some(from_node) = self.state.graph.nodes.get(from_id) {
if let Some((_name, output_id)) = from_node.outputs.get(conn.from_port) {
// Find input param on to_node
if let Some(to_node) = self.state.graph.nodes.get(to_id) {
if let Some((_name, input_id)) = to_node.inputs.get(conn.to_port) {
// Check max_connections to avoid panic in egui_node_graph2 rendering
let max_conns = self.state.graph.inputs.get(*input_id)
.and_then(|p| p.max_connections)
.map(|n| n.get() as usize)
.unwrap_or(usize::MAX);
let current_count = self.state.graph.connections.get(*input_id)
.map(|c| c.len())
.unwrap_or(0);
if current_count < max_conns {
if let Some(connections) = self.state.graph.connections.get_mut(*input_id) {
connections.push(*output_id);
} else {
self.state.graph.connections.insert(*input_id, vec![*output_id]);
}
}
}
}
}
}
}
}
Ok(())
} }
fn handle_graph_response( fn handle_graph_response(
@ -305,12 +178,43 @@ impl NodeGraphPane {
let position = (center_graph.x, center_graph.y); let position = (center_graph.x, center_graph.y);
if let Some(track_id) = self.track_id { if let Some(track_id) = self.track_id {
let action = Box::new(actions::NodeGraphAction::AddNode( if let Some(va_id) = self.va_context() {
actions::AddNodeAction::new(track_id, node_type.clone(), position) // Inside VA template — call template command directly
)); if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
self.pending_action = Some(action); if let Some(audio_controller) = &shared.audio_controller {
// Track this addition so we can update ID mappings after execution let mut controller = audio_controller.lock().unwrap();
self.pending_node_addition = Some((node_id, node_type, position)); controller.graph_add_node_to_template(
backend_track_id, va_id, node_type.clone(),
position.0, position.1,
);
// Query template state to get the new node's backend ID
std::thread::sleep(std::time::Duration::from_millis(10));
if let Ok(json) = controller.query_template_state(backend_track_id, va_id) {
if let Ok(state) = serde_json::from_str::<daw_backend::audio::node_graph::GraphPreset>(&json) {
// Find the new node by type and position
if let Some(backend_node) = state.nodes.iter().find(|n| {
n.node_type == node_type &&
(n.position.0 - position.0).abs() < 1.0 &&
(n.position.1 - position.1).abs() < 1.0
}) {
let backend_id = BackendNodeId::Audio(
petgraph::stable_graph::NodeIndex::new(backend_node.id as usize)
);
self.node_id_map.insert(node_id, backend_id);
self.backend_to_frontend_map.insert(backend_id, node_id);
}
}
}
}
}
} else {
// Normal track graph — use action system
let action = Box::new(actions::NodeGraphAction::AddNode(
actions::AddNodeAction::new(track_id, node_type.clone(), position)
));
self.pending_action = Some(action);
self.pending_node_addition = Some((node_id, node_type, position));
}
} }
} }
} }
@ -335,16 +239,29 @@ impl NodeGraphPane {
let to_backend = self.node_id_map.get(&to_node_id); let to_backend = self.node_id_map.get(&to_node_id);
if let (Some(&from_id), Some(&to_id)) = (from_backend, to_backend) { if let (Some(&from_id), Some(&to_id)) = (from_backend, to_backend) {
let action = Box::new(actions::NodeGraphAction::Connect( let BackendNodeId::Audio(from_idx) = from_id;
actions::ConnectAction::new( let BackendNodeId::Audio(to_idx) = to_id;
track_id,
from_id, if let Some(va_id) = self.va_context() {
from_port, // Inside VA template
to_id, if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
to_port, if let Some(audio_controller) = &shared.audio_controller {
) let mut controller = audio_controller.lock().unwrap();
)); controller.graph_connect_in_template(
self.pending_action = Some(action); backend_track_id, va_id,
from_idx.index() as u32, from_port,
to_idx.index() as u32, to_port,
);
}
}
} else {
let action = Box::new(actions::NodeGraphAction::Connect(
actions::ConnectAction::new(
track_id, from_id, from_port, to_id, to_port,
)
));
self.pending_action = Some(action);
}
} }
} }
} }
@ -352,12 +269,10 @@ impl NodeGraphPane {
NodeResponse::DisconnectEvent { output, input } => { NodeResponse::DisconnectEvent { output, input } => {
// Connection was removed // Connection was removed
if let Some(track_id) = self.track_id { if let Some(track_id) = self.track_id {
// Get the nodes that own these params
let from_node = self.state.graph.outputs.get(output).map(|o| o.node); let from_node = self.state.graph.outputs.get(output).map(|o| o.node);
let to_node = self.state.graph.inputs.get(input).map(|i| i.node); let to_node = self.state.graph.inputs.get(input).map(|i| i.node);
if let (Some(from_node_id), Some(to_node_id)) = (from_node, to_node) { if let (Some(from_node_id), Some(to_node_id)) = (from_node, to_node) {
// Find port indices
let from_port = self.state.graph.nodes.get(from_node_id) let from_port = self.state.graph.nodes.get(from_node_id)
.and_then(|n| n.outputs.iter().position(|(_, id)| *id == output)) .and_then(|n| n.outputs.iter().position(|(_, id)| *id == output))
.unwrap_or(0); .unwrap_or(0);
@ -365,21 +280,33 @@ impl NodeGraphPane {
.and_then(|n| n.inputs.iter().position(|(_, id)| *id == input)) .and_then(|n| n.inputs.iter().position(|(_, id)| *id == input))
.unwrap_or(0); .unwrap_or(0);
// Map frontend IDs to backend IDs
let from_backend = self.node_id_map.get(&from_node_id); let from_backend = self.node_id_map.get(&from_node_id);
let to_backend = self.node_id_map.get(&to_node_id); let to_backend = self.node_id_map.get(&to_node_id);
if let (Some(&from_id), Some(&to_id)) = (from_backend, to_backend) { if let (Some(&from_id), Some(&to_id)) = (from_backend, to_backend) {
let action = Box::new(actions::NodeGraphAction::Disconnect( let BackendNodeId::Audio(from_idx) = from_id;
actions::DisconnectAction::new( let BackendNodeId::Audio(to_idx) = to_id;
track_id,
from_id, if let Some(va_id) = self.va_context() {
from_port, // Inside VA template
to_id, if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
to_port, if let Some(audio_controller) = &shared.audio_controller {
) let mut controller = audio_controller.lock().unwrap();
)); controller.graph_disconnect_in_template(
self.pending_action = Some(action); backend_track_id, va_id,
from_idx.index() as u32, from_port,
to_idx.index() as u32, to_port,
);
}
}
} else {
let action = Box::new(actions::NodeGraphAction::Disconnect(
actions::DisconnectAction::new(
track_id, from_id, from_port, to_id, to_port,
)
));
self.pending_action = Some(action);
}
} }
} }
} }
@ -388,10 +315,24 @@ impl NodeGraphPane {
// Node was deleted // Node was deleted
if let Some(track_id) = self.track_id { if let Some(track_id) = self.track_id {
if let Some(&backend_id) = self.node_id_map.get(&node_id) { if let Some(&backend_id) = self.node_id_map.get(&node_id) {
let action = Box::new(actions::NodeGraphAction::RemoveNode( let BackendNodeId::Audio(node_idx) = backend_id;
actions::RemoveNodeAction::new(track_id, backend_id)
)); if let Some(va_id) = self.va_context() {
self.pending_action = Some(action); // Inside VA template
if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
if let Some(audio_controller) = &shared.audio_controller {
let mut controller = audio_controller.lock().unwrap();
controller.graph_remove_node_from_template(
backend_track_id, va_id, node_idx.index() as u32,
);
}
}
} else {
let action = Box::new(actions::NodeGraphAction::RemoveNode(
actions::RemoveNodeAction::new(track_id, backend_id)
));
self.pending_action = Some(action);
}
// Remove from ID map // Remove from ID map
self.node_id_map.remove(&node_id); self.node_id_map.remove(&node_id);
@ -412,14 +353,60 @@ impl NodeGraphPane {
if let Some(audio_controller) = &shared.audio_controller { if let Some(audio_controller) = &shared.audio_controller {
if let Some(&backend_track_id) = self.track_id.and_then(|tid| shared.layer_to_track_map.get(&tid)) { if let Some(&backend_track_id) = self.track_id.and_then(|tid| shared.layer_to_track_map.get(&tid)) {
let mut controller = audio_controller.lock().unwrap(); let mut controller = audio_controller.lock().unwrap();
controller.graph_set_node_position( if let Some(va_id) = self.va_context() {
backend_track_id, controller.graph_set_node_position_in_template(
node_index, backend_track_id,
pos.x, va_id,
pos.y, node_index,
pos.x,
pos.y,
);
} else {
controller.graph_set_node_position(
backend_track_id,
node_index,
pos.x,
pos.y,
);
}
}
}
}
}
}
NodeResponse::DoubleClick(node_id) => {
// Check if this is a container node we can enter
if let Some(node) = self.state.graph.nodes.get(node_id) {
match node.user_data.template {
NodeTemplate::VoiceAllocator => {
// VA can only be entered at track level (depth 0)
if !self.in_subgraph() {
if let Some(&backend_id) = self.node_id_map.get(&node_id) {
self.enter_subgraph(
SubgraphContext::VoiceAllocator {
frontend_id: node_id,
backend_id,
},
shared,
);
}
}
}
NodeTemplate::Group => {
// Groups can nest arbitrarily deep
if let Some(&backend_id) = self.node_id_map.get(&node_id) {
let name = node.label.clone();
self.enter_subgraph(
SubgraphContext::Group {
frontend_id: node_id,
backend_id,
name,
},
shared,
); );
} }
} }
_ => {}
} }
} }
} }
@ -481,7 +468,7 @@ impl NodeGraphPane {
} }
} }
fn check_parameter_changes(&mut self) { fn check_parameter_changes(&mut self, shared: &mut crate::panes::SharedPaneState) {
// Check all input parameters for value changes // Check all input parameters for value changes
let mut _checked_count = 0; let mut _checked_count = 0;
let mut _connection_only_count = 0; let mut _connection_only_count = 0;
@ -518,24 +505,36 @@ impl NodeGraphPane {
}; };
if has_changed { if has_changed {
// Value has changed, create SetParameterAction // Value has changed — send update to backend
if let Some(track_id) = self.track_id { if let Some(track_id) = self.track_id {
let node_id = input_param.node; let node_id = input_param.node;
// Get backend node ID and use stored param ID
if let Some(&backend_id) = self.node_id_map.get(&node_id) { if let Some(&backend_id) = self.node_id_map.get(&node_id) {
if let Some(param_id) = backend_param_id { if let Some(param_id) = backend_param_id {
eprintln!("[DEBUG] Parameter changed: node {:?} param {} from {:?} to {}", let BackendNodeId::Audio(node_idx) = backend_id;
backend_id, param_id, previous_value, current_value);
let action = Box::new(actions::NodeGraphAction::SetParameter( if let Some(va_id) = self.va_context() {
actions::SetParameterAction::new( // Inside VA template — call template command directly
track_id, if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
backend_id, if let Some(audio_controller) = &shared.audio_controller {
param_id, let mut controller = audio_controller.lock().unwrap();
current_value as f64, controller.graph_set_parameter_in_template(
) backend_track_id, va_id,
)); node_idx.index() as u32, param_id, current_value,
self.pending_action = Some(action); );
}
}
} else {
let action = Box::new(actions::NodeGraphAction::SetParameter(
actions::SetParameterAction::new(
track_id,
backend_id,
param_id,
current_value as f64,
)
));
self.pending_action = Some(action);
}
} }
} }
} }
@ -897,6 +896,252 @@ impl NodeGraphPane {
self.state.graph.connections.insert(target_input, vec![drag_output_id]); self.state.graph.connections.insert(target_input, vec![drag_output_id]);
} }
} }
/// Enter a subgraph for editing (VA template or Group internals)
fn enter_subgraph(
&mut self,
context: SubgraphContext,
shared: &mut crate::panes::SharedPaneState,
) {
// Save current state
let saved = SavedGraphState {
state: std::mem::replace(&mut self.state, GraphEditorState::new(1.0)),
user_state: std::mem::replace(&mut self.user_state, GraphState::default()),
node_id_map: std::mem::take(&mut self.node_id_map),
backend_to_frontend_map: std::mem::take(&mut self.backend_to_frontend_map),
parameter_values: std::mem::take(&mut self.parameter_values),
};
self.subgraph_stack.push(SubgraphFrame {
context: context.clone(),
saved_state: saved,
});
// Load the subgraph state from backend
match &context {
SubgraphContext::VoiceAllocator { backend_id, .. } => {
let BackendNodeId::Audio(va_idx) = *backend_id;
if let Some(track_id) = self.track_id {
if let Some(&backend_track_id) = shared.layer_to_track_map.get(&track_id) {
if let Some(audio_controller) = &shared.audio_controller {
let mut controller = audio_controller.lock().unwrap();
match controller.query_template_state(backend_track_id, va_idx.index() as u32) {
Ok(json) => {
if let Err(e) = self.load_graph_from_json(&json) {
eprintln!("Failed to load template state: {}", e);
}
}
Err(e) => {
eprintln!("Failed to query template state: {}", e);
}
}
}
}
}
}
SubgraphContext::Group { .. } => {
// TODO: query_subgraph_state when group backend is implemented
}
}
}
/// Exit the current subgraph level, restoring parent state
fn exit_subgraph(&mut self) {
if let Some(frame) = self.subgraph_stack.pop() {
self.state = frame.saved_state.state;
self.user_state = frame.saved_state.user_state;
self.node_id_map = frame.saved_state.node_id_map;
self.backend_to_frontend_map = frame.saved_state.backend_to_frontend_map;
self.parameter_values = frame.saved_state.parameter_values;
}
}
/// Exit to a specific depth in the subgraph stack (0 = track level)
fn exit_to_level(&mut self, target_depth: usize) {
while self.subgraph_stack.len() > target_depth {
self.exit_subgraph();
}
}
/// Load graph state from a JSON string (used for both track graphs and subgraphs)
fn load_graph_from_json(&mut self, json: &str) -> Result<(), String> {
let graph_state: daw_backend::audio::node_graph::GraphPreset =
serde_json::from_str(json).map_err(|e| format!("Failed to parse graph state: {}", e))?;
// Clear existing graph
self.state.graph.nodes.clear();
self.state.graph.inputs.clear();
self.state.graph.outputs.clear();
self.state.graph.connections.clear();
self.state.node_order.clear();
self.state.node_positions.clear();
self.state.selected_nodes.clear();
self.state.connection_in_progress = None;
self.state.ongoing_box_selection = None;
self.node_id_map.clear();
self.backend_to_frontend_map.clear();
// Create nodes in frontend
for node in &graph_state.nodes {
let node_template = match node.node_type.as_str() {
"MidiInput" => NodeTemplate::MidiInput,
"AudioInput" => NodeTemplate::AudioInput,
"AutomationInput" => NodeTemplate::AutomationInput,
"Oscillator" => NodeTemplate::Oscillator,
"WavetableOscillator" => NodeTemplate::WavetableOscillator,
"FMSynth" => NodeTemplate::FmSynth,
"NoiseGenerator" => NodeTemplate::Noise,
"SimpleSampler" => NodeTemplate::SimpleSampler,
"MultiSampler" => NodeTemplate::MultiSampler,
"Filter" => NodeTemplate::Filter,
"Gain" => NodeTemplate::Gain,
"Echo" | "Delay" => NodeTemplate::Echo,
"Reverb" => NodeTemplate::Reverb,
"Chorus" => NodeTemplate::Chorus,
"Flanger" => NodeTemplate::Flanger,
"Phaser" => NodeTemplate::Phaser,
"Distortion" => NodeTemplate::Distortion,
"BitCrusher" => NodeTemplate::BitCrusher,
"Compressor" => NodeTemplate::Compressor,
"Limiter" => NodeTemplate::Limiter,
"EQ" => NodeTemplate::Eq,
"Pan" => NodeTemplate::Pan,
"RingModulator" => NodeTemplate::RingModulator,
"Vocoder" => NodeTemplate::Vocoder,
"ADSR" => NodeTemplate::Adsr,
"LFO" => NodeTemplate::Lfo,
"Mixer" => NodeTemplate::Mixer,
"Splitter" => NodeTemplate::Splitter,
"Constant" => NodeTemplate::Constant,
"MidiToCV" => NodeTemplate::MidiToCv,
"AudioToCV" => NodeTemplate::AudioToCv,
"Math" => NodeTemplate::Math,
"SampleHold" => NodeTemplate::SampleHold,
"SlewLimiter" => NodeTemplate::SlewLimiter,
"Quantizer" => NodeTemplate::Quantizer,
"EnvelopeFollower" => NodeTemplate::EnvelopeFollower,
"BPMDetector" => NodeTemplate::BpmDetector,
"Mod" => NodeTemplate::Mod,
"Oscilloscope" => NodeTemplate::Oscilloscope,
"VoiceAllocator" => NodeTemplate::VoiceAllocator,
"Group" => NodeTemplate::Group,
"TemplateInput" => NodeTemplate::TemplateInput,
"TemplateOutput" => NodeTemplate::TemplateOutput,
"AudioOutput" => NodeTemplate::AudioOutput,
_ => {
eprintln!("Unknown node type: {}", node.node_type);
continue;
}
};
use egui_node_graph2::Node;
let frontend_id = self.state.graph.nodes.insert(Node {
id: NodeId::default(),
label: node.node_type.clone(),
inputs: vec![],
outputs: vec![],
user_data: NodeData { template: node_template },
});
node_template.build_node(&mut self.state.graph, &mut self.user_state, frontend_id);
self.state.node_positions.insert(
frontend_id,
egui::pos2(node.position.0, node.position.1),
);
self.state.node_order.push(frontend_id);
let backend_id = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(node.id as usize));
self.node_id_map.insert(frontend_id, backend_id);
self.backend_to_frontend_map.insert(backend_id, frontend_id);
// Set parameter values from backend
if let Some(node_data) = self.state.graph.nodes.get(frontend_id) {
let input_ids: Vec<InputId> = node_data.inputs.iter().map(|(_, id)| *id).collect();
for input_id in input_ids {
if let Some(input_param) = self.state.graph.inputs.get_mut(input_id) {
if let ValueType::Float { value, backend_param_id: Some(pid), .. } = &mut input_param.value {
if let Some(&backend_value) = node.parameters.get(pid) {
*value = backend_value as f32;
}
}
}
}
}
}
// Create connections in frontend
for conn in &graph_state.connections {
let from_backend = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(conn.from_node as usize));
let to_backend = BackendNodeId::Audio(petgraph::stable_graph::NodeIndex::new(conn.to_node as usize));
if let (Some(&from_id), Some(&to_id)) = (
self.backend_to_frontend_map.get(&from_backend),
self.backend_to_frontend_map.get(&to_backend),
) {
if let Some(from_node) = self.state.graph.nodes.get(from_id) {
if let Some((_name, output_id)) = from_node.outputs.get(conn.from_port) {
if let Some(to_node) = self.state.graph.nodes.get(to_id) {
if let Some((_name, input_id)) = to_node.inputs.get(conn.to_port) {
let max_conns = self.state.graph.inputs.get(*input_id)
.and_then(|p| p.max_connections)
.map(|n| n.get() as usize)
.unwrap_or(usize::MAX);
let current_count = self.state.graph.connections.get(*input_id)
.map(|c| c.len())
.unwrap_or(0);
if current_count < max_conns {
if let Some(connections) = self.state.graph.connections.get_mut(*input_id) {
connections.push(*output_id);
} else {
self.state.graph.connections.insert(*input_id, vec![*output_id]);
}
}
}
}
}
}
}
}
Ok(())
}
/// Get the VA backend node ID if we're editing inside a VoiceAllocator template
fn va_context(&self) -> Option<u32> {
match self.current_subgraph()? {
SubgraphContext::VoiceAllocator { backend_id, .. } => {
let BackendNodeId::Audio(idx) = *backend_id;
Some(idx.index() as u32)
}
_ => None,
}
}
/// Whether we're currently editing inside a subgraph
fn in_subgraph(&self) -> bool {
!self.subgraph_stack.is_empty()
}
/// Get the current subgraph context (top of stack)
fn current_subgraph(&self) -> Option<&SubgraphContext> {
self.subgraph_stack.last().map(|f| &f.context)
}
/// Build breadcrumb segments for the current subgraph stack
fn breadcrumb_segments(&self) -> Vec<String> {
let mut segments = vec!["Track Graph".to_string()];
for frame in &self.subgraph_stack {
match &frame.context {
SubgraphContext::VoiceAllocator { .. } => segments.push("Voice Allocator".to_string()),
SubgraphContext::Group { name, .. } => segments.push(format!("Group '{}'", name)),
}
}
segments
}
} }
impl crate::panes::PaneRenderer for NodeGraphPane { impl crate::panes::PaneRenderer for NodeGraphPane {
@ -929,7 +1174,8 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
}; };
if is_valid_track { if is_valid_track {
// Reload graph for new track // Reload graph for new track — exit any subgraph editing
self.subgraph_stack.clear();
self.track_id = Some(new_track_id); self.track_id = Some(new_track_id);
// Recreate backend // Recreate backend
@ -973,8 +1219,75 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
let bg_color = bg_style.background_color.unwrap_or(egui::Color32::from_gray(45)); let bg_color = bg_style.background_color.unwrap_or(egui::Color32::from_gray(45));
let grid_color = grid_style.background_color.unwrap_or(egui::Color32::from_gray(55)); let grid_color = grid_style.background_color.unwrap_or(egui::Color32::from_gray(55));
// Draw breadcrumb bar when editing a subgraph
let breadcrumb_height = if self.in_subgraph() { 28.0 } else { 0.0 };
let graph_rect = if self.in_subgraph() {
// Draw breadcrumb bar at top
let breadcrumb_rect = egui::Rect::from_min_size(
rect.min,
egui::vec2(rect.width(), breadcrumb_height),
);
let painter = ui.painter();
painter.rect_filled(breadcrumb_rect, 0.0, egui::Color32::from_gray(35));
painter.line_segment(
[breadcrumb_rect.left_bottom(), breadcrumb_rect.right_bottom()],
egui::Stroke::new(1.0, egui::Color32::from_gray(60)),
);
// Draw clickable breadcrumb segments
let segments = self.breadcrumb_segments();
let mut x = rect.min.x + 8.0;
let y = rect.min.y + 6.0;
let mut clicked_level: Option<usize> = None;
for (i, segment) in segments.iter().enumerate() {
let is_last = i == segments.len() - 1;
let text_color = if is_last {
egui::Color32::from_gray(220)
} else {
egui::Color32::from_rgb(100, 180, 255)
};
let font_id = egui::FontId::proportional(13.0);
let galley = painter.layout_no_wrap(segment.clone(), font_id, text_color);
let text_rect = egui::Rect::from_min_size(egui::pos2(x, y), galley.size());
if !is_last {
let response = ui.interact(text_rect, ui.id().with(("breadcrumb", i)), egui::Sense::click());
if response.clicked() {
clicked_level = Some(i);
}
if response.hovered() {
painter.rect_stroke(text_rect.expand(2.0), 2.0, egui::Stroke::new(1.0, egui::Color32::from_gray(80)), egui::StrokeKind::Outside);
}
}
painter.galley(egui::pos2(x, y), galley, text_color);
x += text_rect.width();
if !is_last {
let sep = " > ";
let sep_galley = painter.layout_no_wrap(sep.to_string(), egui::FontId::proportional(13.0), egui::Color32::from_gray(100));
painter.galley(egui::pos2(x, y), sep_galley, egui::Color32::from_gray(100));
x += 20.0;
}
}
if let Some(level) = clicked_level {
self.exit_to_level(level);
}
// Shrink graph rect to below breadcrumb
egui::Rect::from_min_max(
egui::pos2(rect.min.x, rect.min.y + breadcrumb_height),
rect.max,
)
} else {
rect
};
// Allocate the rect and render the graph editor within it // Allocate the rect and render the graph editor within it
ui.scope_builder(egui::UiBuilder::new().max_rect(rect), |ui| { ui.scope_builder(egui::UiBuilder::new().max_rect(graph_rect), |ui| {
// Check for scroll input to override library's default zoom behavior // Check for scroll input to override library's default zoom behavior
// Only handle scroll when mouse is over the node graph area // Only handle scroll when mouse is over the node graph area
let pointer_over_graph = ui.rect_contains_pointer(rect); let pointer_over_graph = ui.rect_contains_pointer(rect);
@ -1007,21 +1320,30 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
// Draw dot grid background with pan/zoom // Draw dot grid background with pan/zoom
let pan_zoom = &self.state.pan_zoom; let pan_zoom = &self.state.pan_zoom;
Self::draw_dot_grid_background(ui, rect, bg_color, grid_color, pan_zoom); Self::draw_dot_grid_background(ui, graph_rect, bg_color, grid_color, pan_zoom);
// Draw the graph editor (library will process scroll as zoom by default) // Draw the graph editor with context-aware node templates
let graph_response = self.state.draw_graph_editor( let graph_response = if self.in_subgraph() {
ui, self.state.draw_graph_editor(
AllNodeTemplates, ui,
&mut self.user_state, SubgraphNodeTemplates,
Vec::default(), &mut self.user_state,
); Vec::default(),
)
} else {
self.state.draw_graph_editor(
ui,
AllNodeTemplates,
&mut self.user_state,
Vec::default(),
)
};
// Handle graph events and create actions // Handle graph events and create actions
self.handle_graph_response(graph_response, shared, rect); self.handle_graph_response(graph_response, shared, graph_rect);
// Check for parameter value changes and send updates to backend // Check for parameter value changes and send updates to backend
self.check_parameter_changes(); self.check_parameter_changes(shared);
// Execute any parameter change actions // Execute any parameter change actions
self.execute_pending_action(shared); self.execute_pending_action(shared);

View File

@ -1,97 +1,126 @@
{ {
"metadata": { "metadata": {
"name": "Deep Bass", "name": "Deep Bass",
"description": "Thick sub bass with sawtooth oscillator", "description": "Thick sub bass with sawtooth oscillator (polyphonic)",
"author": "Lightningbeam", "author": "Lightningbeam",
"version": 1, "version": 2,
"tags": ["bass", "sub", "synth"] "tags": ["bass", "sub", "synth"]
}, },
"midi_targets": [0], "midi_targets": [0],
"output_node": 7, "output_node": 2,
"nodes": [ "nodes": [
{ {
"id": 0, "id": 0,
"node_type": "MidiInput", "node_type": "MidiInput",
"name": "MIDI In", "name": "MIDI In",
"parameters": {}, "parameters": {},
"position": [100.0, 100.0] "position": [100.0, 150.0]
}, },
{ {
"id": 1, "id": 1,
"node_type": "MidiToCV", "node_type": "VoiceAllocator",
"name": "MIDI→CV", "name": "Voice Allocator",
"parameters": {}, "parameters": {
"position": [400.0, 100.0] "0": 8.0
},
"position": [400.0, 150.0],
"template_graph": {
"metadata": {
"name": "Voice Template",
"description": "Per-voice synth patch",
"author": "Lightningbeam",
"version": 1,
"tags": []
},
"midi_targets": [0],
"output_node": 6,
"nodes": [
{
"id": 0,
"node_type": "TemplateInput",
"name": "Template Input",
"parameters": {},
"position": [-200.0, 0.0]
},
{
"id": 1,
"node_type": "MidiToCV",
"name": "MIDI→CV",
"parameters": {},
"position": [100.0, 0.0]
},
{
"id": 2,
"node_type": "Oscillator",
"name": "Osc",
"parameters": {
"0": 110.0,
"1": 0.7,
"2": 1.0
},
"position": [400.0, -100.0]
},
{
"id": 3,
"node_type": "ADSR",
"name": "Amp Env",
"parameters": {
"0": 0.005,
"1": 0.2,
"2": 0.8,
"3": 0.3
},
"position": [400.0, 200.0]
},
{
"id": 4,
"node_type": "Gain",
"name": "VCA",
"parameters": {
"0": 1.0
},
"position": [700.0, 0.0]
},
{
"id": 5,
"node_type": "Filter",
"name": "LP Filter",
"parameters": {
"0": 800.0,
"1": 1.5,
"2": 0.0
},
"position": [900.0, 0.0]
},
{
"id": 6,
"node_type": "TemplateOutput",
"name": "Template Output",
"parameters": {},
"position": [1100.0, 0.0]
}
],
"connections": [
{ "from_node": 0, "from_port": 0, "to_node": 1, "to_port": 0 },
{ "from_node": 1, "from_port": 0, "to_node": 2, "to_port": 0 },
{ "from_node": 1, "from_port": 1, "to_node": 3, "to_port": 0 },
{ "from_node": 2, "from_port": 0, "to_node": 4, "to_port": 0 },
{ "from_node": 3, "from_port": 0, "to_node": 4, "to_port": 1 },
{ "from_node": 4, "from_port": 0, "to_node": 5, "to_port": 0 },
{ "from_node": 5, "from_port": 0, "to_node": 6, "to_port": 0 }
]
}
}, },
{ {
"id": 2, "id": 2,
"node_type": "Oscillator",
"name": "Osc",
"parameters": {
"0": 110.0,
"1": 0.7,
"2": 1.0
},
"position": [700.0, -100.0]
},
{
"id": 3,
"node_type": "ADSR",
"name": "Amp Env",
"parameters": {
"0": 0.005,
"1": 0.2,
"2": 0.8,
"3": 0.3
},
"position": [700.0, 200.0]
},
{
"id": 4,
"node_type": "Gain",
"name": "VCA",
"parameters": {
"0": 1.0
},
"position": [1000.0, 100.0]
},
{
"id": 5,
"node_type": "Gain",
"name": "Velocity",
"parameters": {
"0": 1.0
},
"position": [1150.0, 100.0]
},
{
"id": 6,
"node_type": "Filter",
"name": "LP Filter",
"parameters": {
"0": 800.0,
"1": 1.5,
"2": 0.0
},
"position": [1300.0, 100.0]
},
{
"id": 7,
"node_type": "AudioOutput", "node_type": "AudioOutput",
"name": "Out", "name": "Out",
"parameters": {}, "parameters": {},
"position": [1600.0, 100.0] "position": [700.0, 150.0]
} }
], ],
"connections": [ "connections": [
{ "from_node": 0, "from_port": 0, "to_node": 1, "to_port": 0 }, { "from_node": 0, "from_port": 0, "to_node": 1, "to_port": 0 },
{ "from_node": 1, "from_port": 0, "to_node": 2, "to_port": 0 }, { "from_node": 1, "from_port": 0, "to_node": 2, "to_port": 0 }
{ "from_node": 1, "from_port": 1, "to_node": 3, "to_port": 0 },
{ "from_node": 1, "from_port": 2, "to_node": 5, "to_port": 1 },
{ "from_node": 2, "from_port": 0, "to_node": 4, "to_port": 0 },
{ "from_node": 3, "from_port": 0, "to_node": 4, "to_port": 1 },
{ "from_node": 4, "from_port": 0, "to_node": 5, "to_port": 0 },
{ "from_node": 5, "from_port": 0, "to_node": 6, "to_port": 0 },
{ "from_node": 6, "from_port": 0, "to_node": 7, "to_port": 0 }
] ]
} }