Add sampler nodes and startup screen

This commit is contained in:
Skyler Lehmkuhl 2025-10-28 01:32:51 -04:00
parent e57ae51397
commit 2e9699b524
30 changed files with 5012 additions and 215 deletions

View File

@ -51,6 +51,12 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]] [[package]]
name = "bindgen" name = "bindgen"
version = "0.72.1" version = "0.72.1"
@ -398,6 +404,7 @@ dependencies = [
name = "daw-backend" name = "daw-backend"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"base64",
"cpal", "cpal",
"crossterm", "crossterm",
"dasp_envelope", "dasp_envelope",

View File

@ -12,6 +12,7 @@ serde = { version = "1.0", features = ["derive"] }
ratatui = "0.26" ratatui = "0.26"
crossterm = "0.27" crossterm = "0.27"
rand = "0.8" rand = "0.8"
base64 = "0.22"
# Node-based audio graph dependencies # Node-based audio graph dependencies
dasp_graph = "0.11" dasp_graph = "0.11"

View File

@ -743,6 +743,14 @@ impl Engine {
"NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())), "NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())),
"Splitter" => Box::new(SplitterNode::new("Splitter".to_string())), "Splitter" => Box::new(SplitterNode::new("Splitter".to_string())),
"Pan" => Box::new(PanNode::new("Pan".to_string())), "Pan" => Box::new(PanNode::new("Pan".to_string())),
"Delay" => Box::new(DelayNode::new("Delay".to_string())),
"Reverb" => Box::new(ReverbNode::new("Reverb".to_string())),
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
"Flanger" => Box::new(FlangerNode::new("Flanger".to_string())),
"FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())),
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())),
"SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler".to_string())),
"MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler".to_string())),
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())),
@ -794,6 +802,14 @@ impl Engine {
"NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())), "NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())),
"Splitter" => Box::new(SplitterNode::new("Splitter".to_string())), "Splitter" => Box::new(SplitterNode::new("Splitter".to_string())),
"Pan" => Box::new(PanNode::new("Pan".to_string())), "Pan" => Box::new(PanNode::new("Pan".to_string())),
"Delay" => Box::new(DelayNode::new("Delay".to_string())),
"Reverb" => Box::new(ReverbNode::new("Reverb".to_string())),
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
"Flanger" => Box::new(FlangerNode::new("Flanger".to_string())),
"FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())),
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())),
"SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler".to_string())),
"MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler".to_string())),
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())),
@ -1013,6 +1029,98 @@ impl Engine {
} }
} }
} }
Command::SamplerLoadSample(track_id, node_id, file_path) => {
use crate::audio::node_graph::nodes::SimpleSamplerNode;
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
if let Some(ref mut graph) = track.instrument_graph {
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to SimpleSamplerNode
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut SimpleSamplerNode;
unsafe {
let sampler_node = &mut *node_ptr;
if let Err(e) = sampler_node.load_sample_from_file(&file_path) {
eprintln!("Failed to load sample: {}", e);
}
}
}
}
}
}
Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
if let Some(ref mut graph) = track.instrument_graph {
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.load_layer_from_file(&file_path, key_min, key_max, root_key, velocity_min, velocity_max) {
eprintln!("Failed to add sample layer: {}", e);
}
}
}
}
}
}
Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
if let Some(ref mut graph) = track.instrument_graph {
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.update_layer(layer_index, key_min, key_max, root_key, velocity_min, velocity_max) {
eprintln!("Failed to update sample layer: {}", e);
}
}
}
}
}
}
Command::MultiSamplerRemoveLayer(track_id, node_id, layer_index) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
if let Some(ref mut graph) = track.instrument_graph {
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
// Downcast to MultiSamplerNode
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
if let Err(e) = multi_sampler_node.remove_layer(layer_index) {
eprintln!("Failed to remove sample layer: {}", e);
}
}
}
}
}
}
} }
} }
@ -1498,4 +1606,24 @@ impl EngineController {
pub fn graph_save_template_preset(&mut self, track_id: TrackId, voice_allocator_id: u32, preset_path: String, preset_name: String) { pub fn graph_save_template_preset(&mut self, track_id: TrackId, voice_allocator_id: u32, preset_path: String, preset_name: String) {
let _ = self.command_tx.push(Command::GraphSaveTemplatePreset(track_id, voice_allocator_id, preset_path, preset_name)); let _ = self.command_tx.push(Command::GraphSaveTemplatePreset(track_id, voice_allocator_id, preset_path, preset_name));
} }
/// Load a sample into a SimpleSampler node
pub fn sampler_load_sample(&mut self, track_id: TrackId, node_id: u32, file_path: String) {
let _ = self.command_tx.push(Command::SamplerLoadSample(track_id, node_id, file_path));
}
/// Add a sample layer to a MultiSampler node
pub fn multi_sampler_add_layer(&mut self, track_id: TrackId, node_id: u32, file_path: String, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8) {
let _ = self.command_tx.push(Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max));
}
/// Update a MultiSampler layer's configuration
pub fn multi_sampler_update_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8) {
let _ = self.command_tx.push(Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max));
}
/// Remove a layer from a MultiSampler node
pub fn multi_sampler_remove_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize) {
let _ = self.command_tx.push(Command::MultiSamplerRemoveLayer(track_id, node_id, layer_index));
}
} }

View File

@ -7,6 +7,7 @@ pub mod node_graph;
pub mod pool; pub mod pool;
pub mod project; pub mod project;
pub mod recording; pub mod recording;
pub mod sample_loader;
pub mod track; pub mod track;
pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId}; pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId};
@ -17,4 +18,5 @@ pub use midi::{MidiClip, MidiClipId, MidiEvent};
pub use pool::{AudioFile as PoolAudioFile, AudioPool}; pub use pool::{AudioFile as PoolAudioFile, AudioPool};
pub use project::Project; pub use project::Project;
pub use recording::RecordingState; pub use recording::RecordingState;
pub use sample_loader::{load_audio_file, SampleData};
pub use track::{AudioTrack, Metatrack, MidiTrack, RenderContext, Track, TrackId, TrackNode}; pub use track::{AudioTrack, Metatrack, MidiTrack, RenderContext, Track, TrackId, TrackNode};

View File

@ -107,6 +107,11 @@ impl InstrumentGraph {
self.graph.add_node(graph_node) self.graph.add_node(graph_node)
} }
/// Get the number of nodes in the graph
pub fn node_count(&self) -> usize {
self.graph.node_count()
}
/// Set the UI position for a node /// Set the UI position for a node
pub fn set_node_position(&mut self, node: NodeIndex, x: f32, y: f32) { pub fn set_node_position(&mut self, node: NodeIndex, x: f32, y: f32) {
self.node_positions.insert(node.index() as u32, (x, y)); self.node_positions.insert(node.index() as u32, (x, y));
@ -125,13 +130,10 @@ impl InstrumentGraph {
to: NodeIndex, to: NodeIndex,
to_port: usize, to_port: usize,
) -> Result<(), ConnectionError> { ) -> Result<(), ConnectionError> {
eprintln!("[GRAPH] connect() called: {:?} port {} -> {:?} port {}", from, from_port, to, to_port);
// Check if this exact connection already exists // Check if this exact connection already exists
if let Some(edge_idx) = self.graph.find_edge(from, to) { if let Some(edge_idx) = self.graph.find_edge(from, to) {
let existing_conn = &self.graph[edge_idx]; let existing_conn = &self.graph[edge_idx];
if existing_conn.from_port == from_port && existing_conn.to_port == to_port { if existing_conn.from_port == from_port && existing_conn.to_port == to_port {
eprintln!("[GRAPH] Connection already exists, skipping duplicate");
return Ok(()); // Connection already exists, don't create duplicate return Ok(()); // Connection already exists, don't create duplicate
} }
} }
@ -321,11 +323,6 @@ impl InstrumentGraph {
// Use the requested output buffer size for processing // Use the requested output buffer size for processing
let process_size = output_buffer.len(); let process_size = output_buffer.len();
if process_size > self.buffer_size * 2 {
eprintln!("[GRAPH] WARNING: process_size {} > allocated buffer_size {} * 2",
process_size, self.buffer_size);
}
// Clear all output buffers (audio/CV and MIDI) // Clear all output buffers (audio/CV and MIDI)
for node in self.graph.node_weights_mut() { for node in self.graph.node_weights_mut() {
for buffer in &mut node.output_buffers { for buffer in &mut node.output_buffers {
@ -609,6 +606,113 @@ impl InstrumentGraph {
} }
} }
// For SimpleSampler nodes, serialize the loaded sample
if node.node_type() == "SimpleSampler" {
use crate::audio::node_graph::nodes::SimpleSamplerNode;
use crate::audio::node_graph::preset::{EmbeddedSampleData, SampleData};
use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *const SimpleSamplerNode;
unsafe {
let sampler_node = &*node_ptr;
if let Some(sample_path) = sampler_node.get_sample_path() {
// Check file size
let should_embed = std::fs::metadata(sample_path)
.map(|m| m.len() < 100_000) // < 100KB
.unwrap_or(false);
if should_embed {
// Embed the sample data
let (sample_data, sample_rate) = sampler_node.get_sample_data_for_embedding();
// Convert f32 samples to bytes
let bytes: Vec<u8> = sample_data
.iter()
.flat_map(|&f| f.to_le_bytes())
.collect();
// Encode to base64
let data_base64 = general_purpose::STANDARD.encode(&bytes);
serialized.sample_data = Some(SampleData::SimpleSampler {
file_path: Some(sample_path.to_string()),
embedded_data: Some(EmbeddedSampleData {
data_base64,
sample_rate: sample_rate as u32,
}),
});
} else {
// Just save the file path
serialized.sample_data = Some(SampleData::SimpleSampler {
file_path: Some(sample_path.to_string()),
embedded_data: None,
});
}
}
}
}
// For MultiSampler nodes, serialize all loaded layers
if node.node_type() == "MultiSampler" {
use crate::audio::node_graph::nodes::MultiSamplerNode;
use crate::audio::node_graph::preset::{EmbeddedSampleData, LayerData, SampleData};
use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *const MultiSamplerNode;
unsafe {
let multi_sampler_node = &*node_ptr;
let layers_info = multi_sampler_node.get_layers_info();
if !layers_info.is_empty() {
let layers: Vec<LayerData> = layers_info
.iter()
.enumerate()
.map(|(layer_index, info)| {
// Check if we should embed this layer
let should_embed = std::fs::metadata(&info.file_path)
.map(|m| m.len() < 100_000) // < 100KB
.unwrap_or(false);
let embedded_data = if should_embed {
// Get the sample data for this layer
if let Some((sample_data, sample_rate)) = multi_sampler_node.get_layer_data(layer_index) {
// Convert f32 samples to bytes
let bytes: Vec<u8> = sample_data
.iter()
.flat_map(|&f| f.to_le_bytes())
.collect();
// Encode to base64
let data_base64 = general_purpose::STANDARD.encode(&bytes);
Some(EmbeddedSampleData {
data_base64,
sample_rate: sample_rate as u32,
})
} else {
None
}
} else {
None
};
LayerData {
file_path: Some(info.file_path.clone()),
embedded_data,
key_min: info.key_min,
key_max: info.key_max,
root_key: info.root_key,
velocity_min: info.velocity_min,
velocity_max: info.velocity_max,
}
})
.collect();
serialized.sample_data = Some(SampleData::MultiSampler { layers });
}
}
}
// Save position if available // Save position if available
if let Some(pos) = self.get_node_position(node_idx) { if let Some(pos) = self.get_node_position(node_idx) {
serialized.set_position(pos.0, pos.1); serialized.set_position(pos.0, pos.1);
@ -659,6 +763,18 @@ impl InstrumentGraph {
"Mixer" => Box::new(MixerNode::new("Mixer")), "Mixer" => Box::new(MixerNode::new("Mixer")),
"Filter" => Box::new(FilterNode::new("Filter")), "Filter" => Box::new(FilterNode::new("Filter")),
"ADSR" => Box::new(ADSRNode::new("ADSR")), "ADSR" => Box::new(ADSRNode::new("ADSR")),
"LFO" => Box::new(LFONode::new("LFO")),
"NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise")),
"Splitter" => Box::new(SplitterNode::new("Splitter")),
"Pan" => Box::new(PanNode::new("Pan")),
"Delay" => Box::new(DelayNode::new("Delay")),
"Reverb" => Box::new(ReverbNode::new("Reverb")),
"Chorus" => Box::new(ChorusNode::new("Chorus")),
"Flanger" => Box::new(FlangerNode::new("Flanger")),
"FMSynth" => Box::new(FMSynthNode::new("FM Synth")),
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable")),
"SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler")),
"MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler")),
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input")), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input")),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")),
@ -686,45 +802,121 @@ impl InstrumentGraph {
index_map.insert(serialized_node.id, node_idx); index_map.insert(serialized_node.id, node_idx);
// Set parameters // Set parameters
eprintln!("[PRESET] Node {}: type={}, params={:?}", serialized_node.id, serialized_node.node_type, serialized_node.parameters);
for (&param_id, &value) in &serialized_node.parameters { for (&param_id, &value) in &serialized_node.parameters {
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
eprintln!("[PRESET] Setting param {} = {}", param_id, value);
graph_node.node.set_parameter(param_id, value); graph_node.node.set_parameter(param_id, value);
} }
} }
// Restore sample data for sampler nodes
if let Some(ref sample_data) = serialized_node.sample_data {
match sample_data {
crate::audio::node_graph::preset::SampleData::SimpleSampler { file_path, embedded_data } => {
// Load sample into SimpleSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut SimpleSamplerNode;
unsafe {
let sampler_node = &mut *node_ptr;
// Try embedded data first, then fall back to file path
if let Some(ref embedded) = embedded_data {
use base64::{Engine as _, engine::general_purpose};
// Decode base64
if let Ok(bytes) = general_purpose::STANDARD.decode(&embedded.data_base64) {
// Convert bytes back to f32 samples
let samples: Vec<f32> = bytes
.chunks_exact(4)
.map(|chunk| {
f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])
})
.collect();
sampler_node.set_sample(samples, embedded.sample_rate as f32);
}
} else if let Some(ref path) = file_path {
// Fall back to loading from file
let _ = sampler_node.load_sample_from_file(path);
}
}
}
}
crate::audio::node_graph::preset::SampleData::MultiSampler { layers } => {
// Load layers into MultiSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
for layer in layers {
// Try embedded data first, then fall back to file path
if let Some(ref embedded) = layer.embedded_data {
use base64::{Engine as _, engine::general_purpose};
// Decode base64
if let Ok(bytes) = general_purpose::STANDARD.decode(&embedded.data_base64) {
// Convert bytes back to f32 samples
let samples: Vec<f32> = bytes
.chunks_exact(4)
.map(|chunk| {
f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]])
})
.collect();
multi_sampler_node.add_layer(
samples,
embedded.sample_rate as f32,
layer.key_min,
layer.key_max,
layer.root_key,
layer.velocity_min,
layer.velocity_max,
);
}
} else if let Some(ref path) = layer.file_path {
// Fall back to loading from file
let _ = multi_sampler_node.load_layer_from_file(
path,
layer.key_min,
layer.key_max,
layer.root_key,
layer.velocity_min,
layer.velocity_max,
);
}
}
}
}
}
}
}
// Restore position // Restore position
graph.set_node_position(node_idx, serialized_node.position.0, serialized_node.position.1); graph.set_node_position(node_idx, serialized_node.position.0, serialized_node.position.1);
} }
// Create connections // Create connections
eprintln!("[PRESET] Creating {} connections", preset.connections.len());
for conn in &preset.connections { for conn in &preset.connections {
let from_idx = index_map.get(&conn.from_node) let from_idx = index_map.get(&conn.from_node)
.ok_or_else(|| format!("Connection from unknown node {}", conn.from_node))?; .ok_or_else(|| format!("Connection from unknown node {}", conn.from_node))?;
let to_idx = index_map.get(&conn.to_node) let to_idx = index_map.get(&conn.to_node)
.ok_or_else(|| format!("Connection to unknown node {}", conn.to_node))?; .ok_or_else(|| format!("Connection to unknown node {}", conn.to_node))?;
eprintln!("[PRESET] Connecting: node {} port {} -> node {} port {}", conn.from_node, conn.from_port, conn.to_node, conn.to_port);
graph.connect(*from_idx, conn.from_port, *to_idx, conn.to_port) graph.connect(*from_idx, conn.from_port, *to_idx, conn.to_port)
.map_err(|e| format!("Failed to connect nodes: {:?}", e))?; .map_err(|e| format!("Failed to connect nodes: {:?}", e))?;
} }
// Set MIDI targets // Set MIDI targets
eprintln!("[PRESET] Setting MIDI targets: {:?}", preset.midi_targets);
for &target_id in &preset.midi_targets { for &target_id in &preset.midi_targets {
if let Some(&target_idx) = index_map.get(&target_id) { if let Some(&target_idx) = index_map.get(&target_id) {
eprintln!("[PRESET] MIDI target: node {} -> index {:?}", target_id, target_idx);
graph.set_midi_target(target_idx, true); graph.set_midi_target(target_idx, true);
} }
} }
// Set output node // Set output node
eprintln!("[PRESET] Setting output node: {:?}", preset.output_node);
if let Some(output_id) = preset.output_node { if let Some(output_id) = preset.output_node {
if let Some(&output_idx) = index_map.get(&output_id) { if let Some(&output_idx) = index_map.get(&output_id) {
eprintln!("[PRESET] Output node: {} -> index {:?}", output_id, output_idx);
graph.output_node = Some(output_idx); graph.output_node = Some(output_idx);
} }
} }

View File

@ -0,0 +1,234 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
const PARAM_RATE: u32 = 0;
const PARAM_DEPTH: u32 = 1;
const PARAM_WET_DRY: u32 = 2;
const MAX_DELAY_MS: f32 = 50.0;
const BASE_DELAY_MS: f32 = 15.0;
/// Chorus effect using modulated delay lines
pub struct ChorusNode {
name: String,
rate: f32, // LFO rate in Hz (0.1 to 5 Hz)
depth: f32, // Modulation depth 0.0 to 1.0
wet_dry: f32, // 0.0 = dry only, 1.0 = wet only
// Delay buffers for left and right channels
delay_buffer_left: Vec<f32>,
delay_buffer_right: Vec<f32>,
write_position: usize,
max_delay_samples: usize,
sample_rate: u32,
// LFO state
lfo_phase: f32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl ChorusNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_RATE, "Rate", 0.1, 5.0, 1.0, ParameterUnit::Frequency),
Parameter::new(PARAM_DEPTH, "Depth", 0.0, 1.0, 0.5, ParameterUnit::Generic),
Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic),
];
// Allocate max delay buffer size
let max_delay_samples = ((MAX_DELAY_MS / 1000.0) * 48000.0) as usize;
Self {
name,
rate: 1.0,
depth: 0.5,
wet_dry: 0.5,
delay_buffer_left: vec![0.0; max_delay_samples],
delay_buffer_right: vec![0.0; max_delay_samples],
write_position: 0,
max_delay_samples,
sample_rate: 48000,
lfo_phase: 0.0,
inputs,
outputs,
parameters,
}
}
fn read_interpolated_sample(&self, buffer: &[f32], delay_samples: f32) -> f32 {
// Linear interpolation for smooth delay modulation
let delay_samples = delay_samples.clamp(0.0, (self.max_delay_samples - 1) as f32);
let read_pos_float = self.write_position as f32 - delay_samples;
let read_pos_float = if read_pos_float < 0.0 {
read_pos_float + self.max_delay_samples as f32
} else {
read_pos_float
};
let read_pos_int = read_pos_float.floor() as usize;
let frac = read_pos_float - read_pos_int as f32;
let sample1 = buffer[read_pos_int % self.max_delay_samples];
let sample2 = buffer[(read_pos_int + 1) % self.max_delay_samples];
sample1 * (1.0 - frac) + sample2 * frac
}
}
impl AudioNode for ChorusNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_RATE => {
self.rate = value.clamp(0.1, 5.0);
}
PARAM_DEPTH => {
self.depth = value.clamp(0.0, 1.0);
}
PARAM_WET_DRY => {
self.wet_dry = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_RATE => self.rate,
PARAM_DEPTH => self.depth,
PARAM_WET_DRY => self.wet_dry,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
// Update sample rate if changed
if self.sample_rate != sample_rate {
self.sample_rate = sample_rate;
self.max_delay_samples = ((MAX_DELAY_MS / 1000.0) * sample_rate as f32) as usize;
self.delay_buffer_left.resize(self.max_delay_samples, 0.0);
self.delay_buffer_right.resize(self.max_delay_samples, 0.0);
self.write_position = 0;
}
let input = inputs[0];
let output = &mut outputs[0];
// Audio signals are stereo (interleaved L/R)
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
let dry_gain = 1.0 - self.wet_dry;
let wet_gain = self.wet_dry;
let base_delay_samples = (BASE_DELAY_MS / 1000.0) * self.sample_rate as f32;
let max_modulation_samples = (MAX_DELAY_MS - BASE_DELAY_MS) / 1000.0 * self.sample_rate as f32;
for frame in 0..frames_to_process {
let left_in = input[frame * 2];
let right_in = input[frame * 2 + 1];
// Generate LFO value (sine wave, 0 to 1)
let lfo_value = ((self.lfo_phase * 2.0 * PI).sin() * 0.5 + 0.5) * self.depth;
// Calculate modulated delay time
let delay_samples = base_delay_samples + lfo_value * max_modulation_samples;
// Read delayed samples with interpolation
let left_delayed = self.read_interpolated_sample(&self.delay_buffer_left, delay_samples);
let right_delayed = self.read_interpolated_sample(&self.delay_buffer_right, delay_samples);
// Mix dry and wet signals
output[frame * 2] = left_in * dry_gain + left_delayed * wet_gain;
output[frame * 2 + 1] = right_in * dry_gain + right_delayed * wet_gain;
// Write to delay buffer
self.delay_buffer_left[self.write_position] = left_in;
self.delay_buffer_right[self.write_position] = right_in;
// Advance write position
self.write_position = (self.write_position + 1) % self.max_delay_samples;
// Advance LFO phase
self.lfo_phase += self.rate / self.sample_rate as f32;
if self.lfo_phase >= 1.0 {
self.lfo_phase -= 1.0;
}
}
}
fn reset(&mut self) {
self.delay_buffer_left.fill(0.0);
self.delay_buffer_right.fill(0.0);
self.write_position = 0;
self.lfo_phase = 0.0;
}
fn node_type(&self) -> &str {
"Chorus"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
rate: self.rate,
depth: self.depth,
wet_dry: self.wet_dry,
delay_buffer_left: vec![0.0; self.max_delay_samples],
delay_buffer_right: vec![0.0; self.max_delay_samples],
write_position: 0,
max_delay_samples: self.max_delay_samples,
sample_rate: self.sample_rate,
lfo_phase: 0.0,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
}

View File

@ -0,0 +1,211 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_DELAY_TIME: u32 = 0;
const PARAM_FEEDBACK: u32 = 1;
const PARAM_WET_DRY: u32 = 2;
const MAX_DELAY_SECONDS: f32 = 2.0;
/// Stereo delay node with feedback
pub struct DelayNode {
name: String,
delay_time: f32, // seconds
feedback: f32, // 0.0 to 0.95
wet_dry: f32, // 0.0 = dry only, 1.0 = wet only
// Delay buffers for left and right channels
delay_buffer_left: Vec<f32>,
delay_buffer_right: Vec<f32>,
write_position: usize,
max_delay_samples: usize,
sample_rate: u32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl DelayNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_DELAY_TIME, "Delay Time", 0.001, MAX_DELAY_SECONDS, 0.5, ParameterUnit::Time),
Parameter::new(PARAM_FEEDBACK, "Feedback", 0.0, 0.95, 0.5, ParameterUnit::Generic),
Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic),
];
// Allocate max delay buffer size (will be initialized properly when we get sample rate)
let max_delay_samples = (MAX_DELAY_SECONDS * 48000.0) as usize; // Assume max 48kHz
Self {
name,
delay_time: 0.5,
feedback: 0.5,
wet_dry: 0.5,
delay_buffer_left: vec![0.0; max_delay_samples],
delay_buffer_right: vec![0.0; max_delay_samples],
write_position: 0,
max_delay_samples,
sample_rate: 48000,
inputs,
outputs,
parameters,
}
}
fn get_delay_samples(&self) -> usize {
(self.delay_time * self.sample_rate as f32) as usize
}
fn read_delayed_sample(&self, buffer: &[f32], delay_samples: usize) -> f32 {
// Calculate read position (wrap around)
let read_pos = if self.write_position >= delay_samples {
self.write_position - delay_samples
} else {
self.max_delay_samples + self.write_position - delay_samples
};
buffer[read_pos % self.max_delay_samples]
}
}
impl AudioNode for DelayNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_DELAY_TIME => {
self.delay_time = value.clamp(0.001, MAX_DELAY_SECONDS);
}
PARAM_FEEDBACK => {
self.feedback = value.clamp(0.0, 0.95);
}
PARAM_WET_DRY => {
self.wet_dry = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_DELAY_TIME => self.delay_time,
PARAM_FEEDBACK => self.feedback,
PARAM_WET_DRY => self.wet_dry,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
// Update sample rate if changed
if self.sample_rate != sample_rate {
self.sample_rate = sample_rate;
self.max_delay_samples = (MAX_DELAY_SECONDS * sample_rate as f32) as usize;
self.delay_buffer_left.resize(self.max_delay_samples, 0.0);
self.delay_buffer_right.resize(self.max_delay_samples, 0.0);
self.write_position = 0;
}
let input = inputs[0];
let output = &mut outputs[0];
// Audio signals are stereo (interleaved L/R)
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
let delay_samples = self.get_delay_samples().max(1).min(self.max_delay_samples - 1);
for frame in 0..frames_to_process {
let left_in = input[frame * 2];
let right_in = input[frame * 2 + 1];
// Read delayed samples
let left_delayed = self.read_delayed_sample(&self.delay_buffer_left, delay_samples);
let right_delayed = self.read_delayed_sample(&self.delay_buffer_right, delay_samples);
// Mix dry and wet signals
let dry_gain = 1.0 - self.wet_dry;
let wet_gain = self.wet_dry;
let left_out = left_in * dry_gain + left_delayed * wet_gain;
let right_out = right_in * dry_gain + right_delayed * wet_gain;
output[frame * 2] = left_out;
output[frame * 2 + 1] = right_out;
// Write to delay buffer with feedback
self.delay_buffer_left[self.write_position] = left_in + left_delayed * self.feedback;
self.delay_buffer_right[self.write_position] = right_in + right_delayed * self.feedback;
// Advance write position
self.write_position = (self.write_position + 1) % self.max_delay_samples;
}
}
fn reset(&mut self) {
self.delay_buffer_left.fill(0.0);
self.delay_buffer_right.fill(0.0);
self.write_position = 0;
}
fn node_type(&self) -> &str {
"Delay"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
delay_time: self.delay_time,
feedback: self.feedback,
wet_dry: self.wet_dry,
delay_buffer_left: vec![0.0; self.max_delay_samples],
delay_buffer_right: vec![0.0; self.max_delay_samples],
write_position: 0,
max_delay_samples: self.max_delay_samples,
sample_rate: self.sample_rate,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
}

View File

@ -0,0 +1,243 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
const PARAM_RATE: u32 = 0;
const PARAM_DEPTH: u32 = 1;
const PARAM_FEEDBACK: u32 = 2;
const PARAM_WET_DRY: u32 = 3;
const MAX_DELAY_MS: f32 = 10.0;
const BASE_DELAY_MS: f32 = 1.0;
/// Flanger effect using modulated delay lines with feedback
pub struct FlangerNode {
name: String,
rate: f32, // LFO rate in Hz (0.1 to 10 Hz)
depth: f32, // Modulation depth 0.0 to 1.0
feedback: f32, // Feedback amount -0.95 to 0.95
wet_dry: f32, // 0.0 = dry only, 1.0 = wet only
// Delay buffers for left and right channels
delay_buffer_left: Vec<f32>,
delay_buffer_right: Vec<f32>,
write_position: usize,
max_delay_samples: usize,
sample_rate: u32,
// LFO state
lfo_phase: f32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl FlangerNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_RATE, "Rate", 0.1, 10.0, 0.5, ParameterUnit::Frequency),
Parameter::new(PARAM_DEPTH, "Depth", 0.0, 1.0, 0.7, ParameterUnit::Generic),
Parameter::new(PARAM_FEEDBACK, "Feedback", -0.95, 0.95, 0.5, ParameterUnit::Generic),
Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic),
];
// Allocate max delay buffer size
let max_delay_samples = ((MAX_DELAY_MS / 1000.0) * 48000.0) as usize;
Self {
name,
rate: 0.5,
depth: 0.7,
feedback: 0.5,
wet_dry: 0.5,
delay_buffer_left: vec![0.0; max_delay_samples],
delay_buffer_right: vec![0.0; max_delay_samples],
write_position: 0,
max_delay_samples,
sample_rate: 48000,
lfo_phase: 0.0,
inputs,
outputs,
parameters,
}
}
fn read_interpolated_sample(&self, buffer: &[f32], delay_samples: f32) -> f32 {
// Linear interpolation for smooth delay modulation
let delay_samples = delay_samples.clamp(0.0, (self.max_delay_samples - 1) as f32);
let read_pos_float = self.write_position as f32 - delay_samples;
let read_pos_float = if read_pos_float < 0.0 {
read_pos_float + self.max_delay_samples as f32
} else {
read_pos_float
};
let read_pos_int = read_pos_float.floor() as usize;
let frac = read_pos_float - read_pos_int as f32;
let sample1 = buffer[read_pos_int % self.max_delay_samples];
let sample2 = buffer[(read_pos_int + 1) % self.max_delay_samples];
sample1 * (1.0 - frac) + sample2 * frac
}
}
impl AudioNode for FlangerNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_RATE => {
self.rate = value.clamp(0.1, 10.0);
}
PARAM_DEPTH => {
self.depth = value.clamp(0.0, 1.0);
}
PARAM_FEEDBACK => {
self.feedback = value.clamp(-0.95, 0.95);
}
PARAM_WET_DRY => {
self.wet_dry = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_RATE => self.rate,
PARAM_DEPTH => self.depth,
PARAM_FEEDBACK => self.feedback,
PARAM_WET_DRY => self.wet_dry,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
// Update sample rate if changed
if self.sample_rate != sample_rate {
self.sample_rate = sample_rate;
self.max_delay_samples = ((MAX_DELAY_MS / 1000.0) * sample_rate as f32) as usize;
self.delay_buffer_left.resize(self.max_delay_samples, 0.0);
self.delay_buffer_right.resize(self.max_delay_samples, 0.0);
self.write_position = 0;
}
let input = inputs[0];
let output = &mut outputs[0];
// Audio signals are stereo (interleaved L/R)
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
let dry_gain = 1.0 - self.wet_dry;
let wet_gain = self.wet_dry;
let base_delay_samples = (BASE_DELAY_MS / 1000.0) * self.sample_rate as f32;
let max_modulation_samples = (MAX_DELAY_MS - BASE_DELAY_MS) / 1000.0 * self.sample_rate as f32;
for frame in 0..frames_to_process {
let left_in = input[frame * 2];
let right_in = input[frame * 2 + 1];
// Generate LFO value (sine wave, 0 to 1)
let lfo_value = ((self.lfo_phase * 2.0 * PI).sin() * 0.5 + 0.5) * self.depth;
// Calculate modulated delay time
let delay_samples = base_delay_samples + lfo_value * max_modulation_samples;
// Read delayed samples with interpolation
let left_delayed = self.read_interpolated_sample(&self.delay_buffer_left, delay_samples);
let right_delayed = self.read_interpolated_sample(&self.delay_buffer_right, delay_samples);
// Mix dry and wet signals
output[frame * 2] = left_in * dry_gain + left_delayed * wet_gain;
output[frame * 2 + 1] = right_in * dry_gain + right_delayed * wet_gain;
// Write to delay buffer with feedback
self.delay_buffer_left[self.write_position] = left_in + left_delayed * self.feedback;
self.delay_buffer_right[self.write_position] = right_in + right_delayed * self.feedback;
// Advance write position
self.write_position = (self.write_position + 1) % self.max_delay_samples;
// Advance LFO phase
self.lfo_phase += self.rate / self.sample_rate as f32;
if self.lfo_phase >= 1.0 {
self.lfo_phase -= 1.0;
}
}
}
fn reset(&mut self) {
self.delay_buffer_left.fill(0.0);
self.delay_buffer_right.fill(0.0);
self.write_position = 0;
self.lfo_phase = 0.0;
}
fn node_type(&self) -> &str {
"Flanger"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
rate: self.rate,
depth: self.depth,
feedback: self.feedback,
wet_dry: self.wet_dry,
delay_buffer_left: vec![0.0; self.max_delay_samples],
delay_buffer_right: vec![0.0; self.max_delay_samples],
write_position: 0,
max_delay_samples: self.max_delay_samples,
sample_rate: self.sample_rate,
lfo_phase: 0.0,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
}

View File

@ -0,0 +1,303 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
// Parameters for the FM synth
const PARAM_ALGORITHM: u32 = 0;
const PARAM_OP1_RATIO: u32 = 1;
const PARAM_OP1_LEVEL: u32 = 2;
const PARAM_OP2_RATIO: u32 = 3;
const PARAM_OP2_LEVEL: u32 = 4;
const PARAM_OP3_RATIO: u32 = 5;
const PARAM_OP3_LEVEL: u32 = 6;
const PARAM_OP4_RATIO: u32 = 7;
const PARAM_OP4_LEVEL: u32 = 8;
/// FM Algorithm types (inspired by DX7)
/// Algorithm determines how operators modulate each other
#[derive(Debug, Clone, Copy, PartialEq)]
enum FMAlgorithm {
/// Stack: 1->2->3->4 (most harmonic)
Stack = 0,
/// Parallel: All operators to output (organ-like)
Parallel = 1,
/// Bell: 1->2, 3->4, both to output
Bell = 2,
/// Dual: 1->2->output, 3->4->output
Dual = 3,
}
impl FMAlgorithm {
fn from_u32(value: u32) -> Self {
match value {
0 => FMAlgorithm::Stack,
1 => FMAlgorithm::Parallel,
2 => FMAlgorithm::Bell,
3 => FMAlgorithm::Dual,
_ => FMAlgorithm::Stack,
}
}
}
/// Single FM operator (oscillator)
struct FMOperator {
phase: f32,
frequency_ratio: f32, // Multiplier of base frequency (e.g., 1.0, 2.0, 0.5)
level: f32, // Output amplitude 0.0-1.0
}
impl FMOperator {
fn new() -> Self {
Self {
phase: 0.0,
frequency_ratio: 1.0,
level: 1.0,
}
}
/// Process one sample with optional frequency modulation
fn process(&mut self, base_freq: f32, modulation: f32, sample_rate: f32) -> f32 {
let freq = base_freq * self.frequency_ratio;
// Phase modulation (PM, which sounds like FM)
let output = (self.phase * 2.0 * PI + modulation).sin() * self.level;
// Advance phase
self.phase += freq / sample_rate;
if self.phase >= 1.0 {
self.phase -= 1.0;
}
output
}
fn reset(&mut self) {
self.phase = 0.0;
}
}
/// 4-operator FM synthesizer node
pub struct FMSynthNode {
name: String,
algorithm: FMAlgorithm,
// Four operators
operators: [FMOperator; 4],
// Current frequency from V/oct input
current_frequency: f32,
gate_active: bool,
sample_rate: u32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl FMSynthNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0),
NodePort::new("Gate", SignalType::CV, 1),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_ALGORITHM, "Algorithm", 0.0, 3.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP1_RATIO, "Op1 Ratio", 0.25, 16.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP1_LEVEL, "Op1 Level", 0.0, 1.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP2_RATIO, "Op2 Ratio", 0.25, 16.0, 2.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP2_LEVEL, "Op2 Level", 0.0, 1.0, 0.8, ParameterUnit::Generic),
Parameter::new(PARAM_OP3_RATIO, "Op3 Ratio", 0.25, 16.0, 3.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP3_LEVEL, "Op3 Level", 0.0, 1.0, 0.6, ParameterUnit::Generic),
Parameter::new(PARAM_OP4_RATIO, "Op4 Ratio", 0.25, 16.0, 4.0, ParameterUnit::Generic),
Parameter::new(PARAM_OP4_LEVEL, "Op4 Level", 0.0, 1.0, 0.4, ParameterUnit::Generic),
];
Self {
name,
algorithm: FMAlgorithm::Stack,
operators: [
FMOperator::new(),
FMOperator::new(),
FMOperator::new(),
FMOperator::new(),
],
current_frequency: 440.0,
gate_active: false,
sample_rate: 48000,
inputs,
outputs,
parameters,
}
}
/// Convert V/oct CV to frequency
fn voct_to_freq(voct: f32) -> f32 {
440.0 * 2.0_f32.powf(voct)
}
/// Process FM synthesis based on current algorithm
fn process_algorithm(&mut self) -> f32 {
if !self.gate_active {
return 0.0;
}
let base_freq = self.current_frequency;
let sr = self.sample_rate as f32;
match self.algorithm {
FMAlgorithm::Stack => {
// 1 -> 2 -> 3 -> 4 -> output
let op4_out = self.operators[3].process(base_freq, 0.0, sr);
let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr);
let op2_out = self.operators[1].process(base_freq, op3_out * 2.0, sr);
let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr);
op1_out
}
FMAlgorithm::Parallel => {
// All operators output directly (no modulation)
let op1_out = self.operators[0].process(base_freq, 0.0, sr);
let op2_out = self.operators[1].process(base_freq, 0.0, sr);
let op3_out = self.operators[2].process(base_freq, 0.0, sr);
let op4_out = self.operators[3].process(base_freq, 0.0, sr);
(op1_out + op2_out + op3_out + op4_out) * 0.25
}
FMAlgorithm::Bell => {
// 1 -> 2, 3 -> 4, both to output
let op2_out = self.operators[1].process(base_freq, 0.0, sr);
let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr);
let op4_out = self.operators[3].process(base_freq, 0.0, sr);
let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr);
(op1_out + op3_out) * 0.5
}
FMAlgorithm::Dual => {
// 1 -> 2 -> output, 3 -> 4 -> output
let op2_out = self.operators[1].process(base_freq, 0.0, sr);
let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr);
let op4_out = self.operators[3].process(base_freq, 0.0, sr);
let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr);
(op1_out + op3_out) * 0.5
}
}
}
}
impl AudioNode for FMSynthNode {
fn category(&self) -> NodeCategory {
NodeCategory::Generator
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_ALGORITHM => {
self.algorithm = FMAlgorithm::from_u32(value as u32);
}
PARAM_OP1_RATIO => self.operators[0].frequency_ratio = value.clamp(0.25, 16.0),
PARAM_OP1_LEVEL => self.operators[0].level = value.clamp(0.0, 1.0),
PARAM_OP2_RATIO => self.operators[1].frequency_ratio = value.clamp(0.25, 16.0),
PARAM_OP2_LEVEL => self.operators[1].level = value.clamp(0.0, 1.0),
PARAM_OP3_RATIO => self.operators[2].frequency_ratio = value.clamp(0.25, 16.0),
PARAM_OP3_LEVEL => self.operators[2].level = value.clamp(0.0, 1.0),
PARAM_OP4_RATIO => self.operators[3].frequency_ratio = value.clamp(0.25, 16.0),
PARAM_OP4_LEVEL => self.operators[3].level = value.clamp(0.0, 1.0),
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_ALGORITHM => self.algorithm as u32 as f32,
PARAM_OP1_RATIO => self.operators[0].frequency_ratio,
PARAM_OP1_LEVEL => self.operators[0].level,
PARAM_OP2_RATIO => self.operators[1].frequency_ratio,
PARAM_OP2_LEVEL => self.operators[1].level,
PARAM_OP3_RATIO => self.operators[2].frequency_ratio,
PARAM_OP3_LEVEL => self.operators[2].level,
PARAM_OP4_RATIO => self.operators[3].frequency_ratio,
PARAM_OP4_LEVEL => self.operators[3].level,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.is_empty() {
return;
}
self.sample_rate = sample_rate;
let output = &mut outputs[0];
let frames = output.len() / 2;
for frame in 0..frames {
// Read CV inputs
let voct = if inputs.len() > 0 && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0
};
let gate = if inputs.len() > 1 && !inputs[1].is_empty() {
inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2]
} else {
0.0
};
// Update state
self.current_frequency = Self::voct_to_freq(voct);
self.gate_active = gate > 0.5;
// Generate sample
let sample = self.process_algorithm() * 0.3; // Scale down to prevent clipping
// Output stereo (same signal to both channels)
output[frame * 2] = sample;
output[frame * 2 + 1] = sample;
}
}
fn reset(&mut self) {
for op in &mut self.operators {
op.reset();
}
self.gate_active = false;
}
fn node_type(&self) -> &str {
"FMSynth"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self::new(self.name.clone()))
}
}

View File

@ -8,7 +8,7 @@ pub struct MidiToCVNode {
note: u8, // Current MIDI note number note: u8, // Current MIDI note number
gate: f32, // Gate CV (1.0 when note on, 0.0 when off) gate: f32, // Gate CV (1.0 when note on, 0.0 when off)
velocity: f32, // Velocity CV (0.0-1.0) velocity: f32, // Velocity CV (0.0-1.0)
pitch_cv: f32, // Pitch CV (0.0-1.0 V/oct) pitch_cv: f32, // Pitch CV (V/Oct: 0V = A4, ±1V per octave)
inputs: Vec<NodePort>, inputs: Vec<NodePort>,
outputs: Vec<NodePort>, outputs: Vec<NodePort>,
parameters: Vec<Parameter>, parameters: Vec<Parameter>,
@ -24,7 +24,7 @@ impl MidiToCVNode {
]; ];
let outputs = vec![ let outputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0), // 0.0-1.0 pitch CV NodePort::new("V/Oct", SignalType::CV, 0), // V/Oct: 0V = A4, ±1V per octave
NodePort::new("Gate", SignalType::CV, 1), // 1.0 = on, 0.0 = off NodePort::new("Gate", SignalType::CV, 1), // 1.0 = on, 0.0 = off
NodePort::new("Velocity", SignalType::CV, 2), // 0.0-1.0 NodePort::new("Velocity", SignalType::CV, 2), // 0.0-1.0
]; ];
@ -41,11 +41,12 @@ impl MidiToCVNode {
} }
} }
/// Convert MIDI note to V/oct CV (0-1 range representing pitch) /// Convert MIDI note to V/oct CV (proper V/Oct standard)
/// Maps MIDI notes 0-127 to CV 0.0-1.0 for pitch tracking /// 0V = A4 (MIDI 69), ±1V per octave
/// Middle C (MIDI 60) = -0.75V, A5 (MIDI 81) = +1.0V
fn midi_note_to_voct(note: u8) -> f32 { fn midi_note_to_voct(note: u8) -> f32 {
// Simple linear mapping: each semitone is 1/127 of the CV range // Standard V/Oct: 0V at A4, 1V per octave (12 semitones)
note as f32 / 127.0 (note as f32 - 69.0) / 12.0
} }
} }

View File

@ -1,33 +1,49 @@
mod adsr; mod adsr;
mod audio_to_cv; mod audio_to_cv;
mod chorus;
mod delay;
mod filter; mod filter;
mod flanger;
mod fm_synth;
mod gain; mod gain;
mod lfo; mod lfo;
mod midi_input; mod midi_input;
mod midi_to_cv; mod midi_to_cv;
mod mixer; mod mixer;
mod multi_sampler;
mod noise; mod noise;
mod oscillator; mod oscillator;
mod oscilloscope; mod oscilloscope;
mod output; mod output;
mod pan; mod pan;
mod reverb;
mod simple_sampler;
mod splitter; mod splitter;
mod template_io; mod template_io;
mod voice_allocator; mod voice_allocator;
mod wavetable_oscillator;
pub use adsr::ADSRNode; pub use adsr::ADSRNode;
pub use audio_to_cv::AudioToCVNode; pub use audio_to_cv::AudioToCVNode;
pub use chorus::ChorusNode;
pub use delay::DelayNode;
pub use filter::FilterNode; pub use filter::FilterNode;
pub use flanger::FlangerNode;
pub use fm_synth::FMSynthNode;
pub use gain::GainNode; pub use gain::GainNode;
pub use lfo::LFONode; pub use lfo::LFONode;
pub use midi_input::MidiInputNode; pub use midi_input::MidiInputNode;
pub use midi_to_cv::MidiToCVNode; pub use midi_to_cv::MidiToCVNode;
pub use mixer::MixerNode; pub use mixer::MixerNode;
pub use multi_sampler::MultiSamplerNode;
pub use noise::NoiseGeneratorNode; pub use noise::NoiseGeneratorNode;
pub use oscillator::OscillatorNode; pub use oscillator::OscillatorNode;
pub use oscilloscope::OscilloscopeNode; pub use oscilloscope::OscilloscopeNode;
pub use output::AudioOutputNode; pub use output::AudioOutputNode;
pub use pan::PanNode; pub use pan::PanNode;
pub use reverb::ReverbNode;
pub use simple_sampler::SimpleSamplerNode;
pub use splitter::SplitterNode; pub use splitter::SplitterNode;
pub use template_io::{TemplateInputNode, TemplateOutputNode}; pub use template_io::{TemplateInputNode, TemplateOutputNode};
pub use voice_allocator::VoiceAllocatorNode; pub use voice_allocator::VoiceAllocatorNode;
pub use wavetable_oscillator::WavetableOscillatorNode;

View File

@ -0,0 +1,566 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
// Parameters
const PARAM_GAIN: u32 = 0;
const PARAM_ATTACK: u32 = 1;
const PARAM_RELEASE: u32 = 2;
const PARAM_TRANSPOSE: u32 = 3;
/// Metadata about a loaded sample layer (for preset serialization)
#[derive(Clone, Debug)]
pub struct LayerInfo {
pub file_path: String,
pub key_min: u8,
pub key_max: u8,
pub root_key: u8,
pub velocity_min: u8,
pub velocity_max: u8,
}
/// Single sample with velocity range and key range
#[derive(Clone)]
struct SampleLayer {
sample_data: Vec<f32>,
sample_rate: f32,
// Key range: C-1 = 0, C0 = 12, middle C (C4) = 60, C9 = 120
key_min: u8,
key_max: u8,
root_key: u8, // The original pitch of the sample
// Velocity range: 0-127
velocity_min: u8,
velocity_max: u8,
}
impl SampleLayer {
fn new(
sample_data: Vec<f32>,
sample_rate: f32,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) -> Self {
Self {
sample_data,
sample_rate,
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
}
}
/// Check if this layer matches the given key and velocity
fn matches(&self, key: u8, velocity: u8) -> bool {
key >= self.key_min
&& key <= self.key_max
&& velocity >= self.velocity_min
&& velocity <= self.velocity_max
}
}
/// Active voice playing a sample
struct Voice {
layer_index: usize,
playhead: f32,
note: u8,
velocity: u8,
is_active: bool,
// Envelope
envelope_phase: EnvelopePhase,
envelope_value: f32,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum EnvelopePhase {
Attack,
Sustain,
Release,
}
impl Voice {
fn new(layer_index: usize, note: u8, velocity: u8) -> Self {
Self {
layer_index,
playhead: 0.0,
note,
velocity,
is_active: true,
envelope_phase: EnvelopePhase::Attack,
envelope_value: 0.0,
}
}
}
/// Multi-sample instrument with velocity layers and key zones
pub struct MultiSamplerNode {
name: String,
// Sample layers
layers: Vec<SampleLayer>,
layer_infos: Vec<LayerInfo>, // Metadata about loaded layers
// Voice management
voices: Vec<Voice>,
max_voices: usize,
// Parameters
gain: f32,
attack_time: f32, // seconds
release_time: f32, // seconds
transpose: i8, // semitones
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl MultiSamplerNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("MIDI In", SignalType::Midi, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_GAIN, "Gain", 0.0, 2.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_ATTACK, "Attack", 0.001, 1.0, 0.01, ParameterUnit::Time),
Parameter::new(PARAM_RELEASE, "Release", 0.01, 5.0, 0.1, ParameterUnit::Time),
Parameter::new(PARAM_TRANSPOSE, "Transpose", -24.0, 24.0, 0.0, ParameterUnit::Generic),
];
Self {
name,
layers: Vec::new(),
layer_infos: Vec::new(),
voices: Vec::new(),
max_voices: 16,
gain: 1.0,
attack_time: 0.01,
release_time: 0.1,
transpose: 0,
inputs,
outputs,
parameters,
}
}
/// Add a sample layer
pub fn add_layer(
&mut self,
sample_data: Vec<f32>,
sample_rate: f32,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) {
let layer = SampleLayer::new(
sample_data,
sample_rate,
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
);
self.layers.push(layer);
}
/// Load a sample layer from a file path
pub fn load_layer_from_file(
&mut self,
path: &str,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) -> Result<(), String> {
use crate::audio::sample_loader::load_audio_file;
let sample_data = load_audio_file(path)?;
self.add_layer(
sample_data.samples,
sample_data.sample_rate as f32,
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
);
// Store layer metadata for preset serialization
self.layer_infos.push(LayerInfo {
file_path: path.to_string(),
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
});
Ok(())
}
/// Get information about all loaded layers
pub fn get_layers_info(&self) -> &[LayerInfo] {
&self.layer_infos
}
/// Get sample data for a specific layer (for preset embedding)
pub fn get_layer_data(&self, layer_index: usize) -> Option<(Vec<f32>, f32)> {
self.layers.get(layer_index).map(|layer| {
(layer.sample_data.clone(), layer.sample_rate)
})
}
/// Update a layer's configuration
pub fn update_layer(
&mut self,
layer_index: usize,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) -> Result<(), String> {
if layer_index >= self.layers.len() {
return Err("Layer index out of bounds".to_string());
}
// Update the layer data
self.layers[layer_index].key_min = key_min;
self.layers[layer_index].key_max = key_max;
self.layers[layer_index].root_key = root_key;
self.layers[layer_index].velocity_min = velocity_min;
self.layers[layer_index].velocity_max = velocity_max;
// Update the layer info
if layer_index < self.layer_infos.len() {
self.layer_infos[layer_index].key_min = key_min;
self.layer_infos[layer_index].key_max = key_max;
self.layer_infos[layer_index].root_key = root_key;
self.layer_infos[layer_index].velocity_min = velocity_min;
self.layer_infos[layer_index].velocity_max = velocity_max;
}
Ok(())
}
/// Remove a layer
pub fn remove_layer(&mut self, layer_index: usize) -> Result<(), String> {
if layer_index >= self.layers.len() {
return Err("Layer index out of bounds".to_string());
}
self.layers.remove(layer_index);
if layer_index < self.layer_infos.len() {
self.layer_infos.remove(layer_index);
}
// Stop any voices playing this layer
for voice in &mut self.voices {
if voice.layer_index == layer_index {
voice.is_active = false;
} else if voice.layer_index > layer_index {
// Adjust indices for layers that were shifted down
voice.layer_index -= 1;
}
}
Ok(())
}
/// Find the best matching layer for a given note and velocity
fn find_layer(&self, note: u8, velocity: u8) -> Option<usize> {
self.layers
.iter()
.enumerate()
.find(|(_, layer)| layer.matches(note, velocity))
.map(|(index, _)| index)
}
/// Trigger a note
fn note_on(&mut self, note: u8, velocity: u8) {
let transposed_note = (note as i16 + self.transpose as i16).clamp(0, 127) as u8;
if let Some(layer_index) = self.find_layer(transposed_note, velocity) {
// Find an inactive voice or reuse the oldest one
let voice_index = self
.voices
.iter()
.position(|v| !v.is_active)
.unwrap_or_else(|| {
// All voices active, reuse the first one
if self.voices.len() < self.max_voices {
self.voices.len()
} else {
0
}
});
let voice = Voice::new(layer_index, note, velocity);
if voice_index < self.voices.len() {
self.voices[voice_index] = voice;
} else {
self.voices.push(voice);
}
}
}
/// Release a note
fn note_off(&mut self, note: u8) {
for voice in &mut self.voices {
if voice.note == note && voice.is_active {
voice.envelope_phase = EnvelopePhase::Release;
}
}
}
/// Calculate playback speed from pitch difference
fn calculate_speed(&self, layer: &SampleLayer, note: u8) -> f32 {
let semitone_diff = note as i16 - layer.root_key as i16;
2.0_f32.powf(semitone_diff as f32 / 12.0)
}
/// Read sample at playhead with linear interpolation
fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 {
if sample.is_empty() || playhead < 0.0 {
return 0.0;
}
let index = playhead.floor() as usize;
if index >= sample.len() {
return 0.0;
}
let frac = playhead - playhead.floor();
let sample1 = sample[index];
let sample2 = if index + 1 < sample.len() {
sample[index + 1]
} else {
0.0
};
sample1 + (sample2 - sample1) * frac
}
/// Process envelope for a voice
fn process_envelope(&self, voice: &mut Voice, sample_rate: f32) -> f32 {
match voice.envelope_phase {
EnvelopePhase::Attack => {
let attack_samples = self.attack_time * sample_rate;
voice.envelope_value += 1.0 / attack_samples;
if voice.envelope_value >= 1.0 {
voice.envelope_value = 1.0;
voice.envelope_phase = EnvelopePhase::Sustain;
}
}
EnvelopePhase::Sustain => {
voice.envelope_value = 1.0;
}
EnvelopePhase::Release => {
let release_samples = self.release_time * sample_rate;
voice.envelope_value -= 1.0 / release_samples;
if voice.envelope_value <= 0.0 {
voice.envelope_value = 0.0;
voice.is_active = false;
}
}
}
voice.envelope_value.clamp(0.0, 1.0)
}
}
impl AudioNode for MultiSamplerNode {
fn category(&self) -> NodeCategory {
NodeCategory::Generator
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_GAIN => {
self.gain = value.clamp(0.0, 2.0);
}
PARAM_ATTACK => {
self.attack_time = value.clamp(0.001, 1.0);
}
PARAM_RELEASE => {
self.release_time = value.clamp(0.01, 5.0);
}
PARAM_TRANSPOSE => {
self.transpose = value.clamp(-24.0, 24.0) as i8;
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_GAIN => self.gain,
PARAM_ATTACK => self.attack_time,
PARAM_RELEASE => self.release_time,
PARAM_TRANSPOSE => self.transpose as f32,
_ => 0.0,
}
}
fn process(
&mut self,
_inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.is_empty() {
return;
}
let output = &mut outputs[0];
let frames = output.len() / 2;
// Clear output
output.fill(0.0);
// Process MIDI events
if !midi_inputs.is_empty() {
for event in midi_inputs[0].iter() {
if event.is_note_on() {
self.note_on(event.data1, event.data2);
} else if event.is_note_off() {
self.note_off(event.data1);
}
}
}
// Extract parameters needed for processing
let gain = self.gain;
let attack_time = self.attack_time;
let release_time = self.release_time;
// Process all active voices
for voice in &mut self.voices {
if !voice.is_active {
continue;
}
if voice.layer_index >= self.layers.len() {
continue;
}
let layer = &self.layers[voice.layer_index];
// Calculate playback speed
let semitone_diff = voice.note as i16 - layer.root_key as i16;
let speed = 2.0_f32.powf(semitone_diff as f32 / 12.0);
let speed_adjusted = speed * (layer.sample_rate / sample_rate as f32);
for frame in 0..frames {
// Read sample with linear interpolation
let playhead = voice.playhead;
let sample = if !layer.sample_data.is_empty() && playhead >= 0.0 {
let index = playhead.floor() as usize;
if index < layer.sample_data.len() {
let frac = playhead - playhead.floor();
let sample1 = layer.sample_data[index];
let sample2 = if index + 1 < layer.sample_data.len() {
layer.sample_data[index + 1]
} else {
0.0
};
sample1 + (sample2 - sample1) * frac
} else {
0.0
}
} else {
0.0
};
// Process envelope
match voice.envelope_phase {
EnvelopePhase::Attack => {
let attack_samples = attack_time * sample_rate as f32;
voice.envelope_value += 1.0 / attack_samples;
if voice.envelope_value >= 1.0 {
voice.envelope_value = 1.0;
voice.envelope_phase = EnvelopePhase::Sustain;
}
}
EnvelopePhase::Sustain => {
voice.envelope_value = 1.0;
}
EnvelopePhase::Release => {
let release_samples = release_time * sample_rate as f32;
voice.envelope_value -= 1.0 / release_samples;
if voice.envelope_value <= 0.0 {
voice.envelope_value = 0.0;
voice.is_active = false;
}
}
}
let envelope = voice.envelope_value.clamp(0.0, 1.0);
// Apply velocity scaling (0-127 -> 0-1)
let velocity_scale = voice.velocity as f32 / 127.0;
// Mix into output
let final_sample = sample * envelope * velocity_scale * gain;
output[frame * 2] += final_sample;
output[frame * 2 + 1] += final_sample;
// Advance playhead
voice.playhead += speed_adjusted;
// Stop if we've reached the end
if voice.playhead >= layer.sample_data.len() as f32 {
voice.is_active = false;
break;
}
}
}
}
fn reset(&mut self) {
self.voices.clear();
}
fn node_type(&self) -> &str {
"MultiSampler"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self::new(self.name.clone()))
}
}

View File

@ -127,15 +127,14 @@ impl AudioNode for OscillatorNode {
// Start with base frequency // Start with base frequency
let mut frequency = self.frequency; let mut frequency = self.frequency;
// V/Oct input: 0.0-1.0 maps to MIDI notes 0-127 // V/Oct input: Standard V/Oct (0V = A4 440Hz, ±1V per octave)
if !inputs.is_empty() && frame < inputs[0].len() { if !inputs.is_empty() && frame < inputs[0].len() {
let voct = inputs[0][frame]; // Read V/Oct CV (mono) let voct = inputs[0][frame]; // Read V/Oct CV (mono)
if voct > 0.001 { // Convert V/Oct to frequency: f = 440 * 2^(voct)
// Convert CV to MIDI note number (0-1 -> 0-127) // voct = 0.0 -> 440 Hz (A4)
let midi_note = voct * 127.0; // voct = 1.0 -> 880 Hz (A5)
// Convert MIDI note to frequency: f = 440 * 2^((n-69)/12) // voct = -0.75 -> 261.6 Hz (C4, middle C)
frequency = 440.0 * 2.0_f32.powf((midi_note - 69.0) / 12.0); frequency = 440.0 * 2.0_f32.powf(voct);
}
} }
// FM input: modulates the frequency // FM input: modulates the frequency

View File

@ -0,0 +1,313 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_ROOM_SIZE: u32 = 0;
const PARAM_DAMPING: u32 = 1;
const PARAM_WET_DRY: u32 = 2;
// Schroeder reverb uses a parallel bank of comb filters followed by series all-pass filters
// Comb filter delays (in samples at 48kHz)
const COMB_DELAYS: [usize; 8] = [1557, 1617, 1491, 1422, 1277, 1356, 1188, 1116];
// All-pass filter delays (in samples at 48kHz)
const ALLPASS_DELAYS: [usize; 4] = [225, 556, 441, 341];
/// Process a single channel through comb and all-pass filters
fn process_channel(
input: f32,
comb_filters: &mut [CombFilter],
allpass_filters: &mut [AllPassFilter],
) -> f32 {
// Sum parallel comb filters and scale down to prevent excessive gain
// With 8 comb filters, we need to scale the output significantly
let mut output = 0.0;
for comb in comb_filters.iter_mut() {
output += comb.process(input);
}
output *= 0.015; // Scale down the summed comb output
// Series all-pass filters
for allpass in allpass_filters.iter_mut() {
output = allpass.process(output);
}
output
}
/// Single comb filter for reverb
struct CombFilter {
buffer: Vec<f32>,
buffer_size: usize,
filter_store: f32,
write_pos: usize,
damp: f32,
feedback: f32,
}
impl CombFilter {
fn new(size: usize) -> Self {
Self {
buffer: vec![0.0; size],
buffer_size: size,
filter_store: 0.0,
write_pos: 0,
damp: 0.5,
feedback: 0.5,
}
}
fn process(&mut self, input: f32) -> f32 {
let output = self.buffer[self.write_pos];
// One-pole lowpass filter
self.filter_store = output * (1.0 - self.damp) + self.filter_store * self.damp;
self.buffer[self.write_pos] = input + self.filter_store * self.feedback;
self.write_pos = (self.write_pos + 1) % self.buffer_size;
output
}
fn mute(&mut self) {
self.buffer.fill(0.0);
self.filter_store = 0.0;
}
fn set_damp(&mut self, val: f32) {
self.damp = val;
}
fn set_feedback(&mut self, val: f32) {
self.feedback = val;
}
}
/// Single all-pass filter for reverb
struct AllPassFilter {
buffer: Vec<f32>,
buffer_size: usize,
write_pos: usize,
}
impl AllPassFilter {
fn new(size: usize) -> Self {
Self {
buffer: vec![0.0; size],
buffer_size: size,
write_pos: 0,
}
}
fn process(&mut self, input: f32) -> f32 {
let delayed = self.buffer[self.write_pos];
let output = -input + delayed;
self.buffer[self.write_pos] = input + delayed * 0.5;
self.write_pos = (self.write_pos + 1) % self.buffer_size;
output
}
fn mute(&mut self) {
self.buffer.fill(0.0);
}
}
/// Schroeder reverb node with room size and damping controls
pub struct ReverbNode {
name: String,
room_size: f32, // 0.0 to 1.0
damping: f32, // 0.0 to 1.0
wet_dry: f32, // 0.0 = dry only, 1.0 = wet only
// Left channel filters
comb_filters_left: Vec<CombFilter>,
allpass_filters_left: Vec<AllPassFilter>,
// Right channel filters
comb_filters_right: Vec<CombFilter>,
allpass_filters_right: Vec<AllPassFilter>,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl ReverbNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_ROOM_SIZE, "Room Size", 0.0, 1.0, 0.5, ParameterUnit::Generic),
Parameter::new(PARAM_DAMPING, "Damping", 0.0, 1.0, 0.5, ParameterUnit::Generic),
Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.3, ParameterUnit::Generic),
];
// Create comb filters for both channels
// Right channel has slightly different delays to create stereo effect
let comb_filters_left: Vec<CombFilter> = COMB_DELAYS.iter().map(|&d| CombFilter::new(d)).collect();
let comb_filters_right: Vec<CombFilter> = COMB_DELAYS.iter().map(|&d| CombFilter::new(d + 23)).collect();
// Create all-pass filters for both channels
let allpass_filters_left: Vec<AllPassFilter> = ALLPASS_DELAYS.iter().map(|&d| AllPassFilter::new(d)).collect();
let allpass_filters_right: Vec<AllPassFilter> = ALLPASS_DELAYS.iter().map(|&d| AllPassFilter::new(d + 23)).collect();
let mut node = Self {
name,
room_size: 0.5,
damping: 0.5,
wet_dry: 0.3,
comb_filters_left,
allpass_filters_left,
comb_filters_right,
allpass_filters_right,
inputs,
outputs,
parameters,
};
node.update_filters();
node
}
fn update_filters(&mut self) {
// Room size affects feedback (larger room = more feedback)
let feedback = 0.28 + self.room_size * 0.7;
// Update all comb filters
for comb in &mut self.comb_filters_left {
comb.set_feedback(feedback);
comb.set_damp(self.damping);
}
for comb in &mut self.comb_filters_right {
comb.set_feedback(feedback);
comb.set_damp(self.damping);
}
}
}
impl AudioNode for ReverbNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_ROOM_SIZE => {
self.room_size = value.clamp(0.0, 1.0);
self.update_filters();
}
PARAM_DAMPING => {
self.damping = value.clamp(0.0, 1.0);
self.update_filters();
}
PARAM_WET_DRY => {
self.wet_dry = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_ROOM_SIZE => self.room_size,
PARAM_DAMPING => self.damping,
PARAM_WET_DRY => self.wet_dry,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
let input = inputs[0];
let output = &mut outputs[0];
// Audio signals are stereo (interleaved L/R)
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
let dry_gain = 1.0 - self.wet_dry;
let wet_gain = self.wet_dry;
for frame in 0..frames_to_process {
let left_in = input[frame * 2];
let right_in = input[frame * 2 + 1];
// Process both channels
let left_wet = process_channel(
left_in,
&mut self.comb_filters_left,
&mut self.allpass_filters_left,
);
let right_wet = process_channel(
right_in,
&mut self.comb_filters_right,
&mut self.allpass_filters_right,
);
// Mix dry and wet signals
output[frame * 2] = left_in * dry_gain + left_wet * wet_gain;
output[frame * 2 + 1] = right_in * dry_gain + right_wet * wet_gain;
}
}
fn reset(&mut self) {
for comb in &mut self.comb_filters_left {
comb.mute();
}
for comb in &mut self.comb_filters_right {
comb.mute();
}
for allpass in &mut self.allpass_filters_left {
allpass.mute();
}
for allpass in &mut self.allpass_filters_right {
allpass.mute();
}
}
fn node_type(&self) -> &str {
"Reverb"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self::new(self.name.clone()))
}
}

View File

@ -0,0 +1,278 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::sync::{Arc, Mutex};
// Parameters
const PARAM_GAIN: u32 = 0;
const PARAM_LOOP: u32 = 1;
const PARAM_PITCH_SHIFT: u32 = 2;
/// Simple single-sample playback node with pitch shifting
pub struct SimpleSamplerNode {
name: String,
// Sample data (shared, can be set externally)
sample_data: Arc<Mutex<Vec<f32>>>,
sample_rate_original: f32,
sample_path: Option<String>, // Path to loaded sample file
// Playback state
playhead: f32, // Fractional position in sample
is_playing: bool,
gate_prev: bool,
// Parameters
gain: f32,
loop_enabled: bool,
pitch_shift: f32, // Additional pitch shift in semitones
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl SimpleSamplerNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0),
NodePort::new("Gate", SignalType::CV, 1),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_GAIN, "Gain", 0.0, 2.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_LOOP, "Loop", 0.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_PITCH_SHIFT, "Pitch Shift", -12.0, 12.0, 0.0, ParameterUnit::Generic),
];
Self {
name,
sample_data: Arc::new(Mutex::new(Vec::new())),
sample_rate_original: 48000.0,
sample_path: None,
playhead: 0.0,
is_playing: false,
gate_prev: false,
gain: 1.0,
loop_enabled: false,
pitch_shift: 0.0,
inputs,
outputs,
parameters,
}
}
/// Set the sample data (mono)
pub fn set_sample(&mut self, data: Vec<f32>, sample_rate: f32) {
let mut sample = self.sample_data.lock().unwrap();
*sample = data;
self.sample_rate_original = sample_rate;
}
/// Get the sample data reference (for external loading)
pub fn get_sample_data(&self) -> Arc<Mutex<Vec<f32>>> {
Arc::clone(&self.sample_data)
}
/// Load a sample from a file path
pub fn load_sample_from_file(&mut self, path: &str) -> Result<(), String> {
use crate::audio::sample_loader::load_audio_file;
let sample_data = load_audio_file(path)?;
self.set_sample(sample_data.samples, sample_data.sample_rate as f32);
self.sample_path = Some(path.to_string());
Ok(())
}
/// Get the currently loaded sample path
pub fn get_sample_path(&self) -> Option<&str> {
self.sample_path.as_deref()
}
/// Get the current sample data and sample rate (for preset embedding)
pub fn get_sample_data_for_embedding(&self) -> (Vec<f32>, f32) {
let sample = self.sample_data.lock().unwrap();
(sample.clone(), self.sample_rate_original)
}
/// Convert V/oct CV to playback speed multiplier
/// 0V = 1.0 (original speed), +1V = 2.0 (one octave up), -1V = 0.5 (one octave down)
fn voct_to_speed(&self, voct: f32) -> f32 {
// Add pitch shift parameter
let total_semitones = voct * 12.0 + self.pitch_shift;
2.0_f32.powf(total_semitones / 12.0)
}
/// Read sample at playhead with linear interpolation
fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 {
if sample.is_empty() {
return 0.0;
}
let index = playhead.floor() as usize;
let frac = playhead - playhead.floor();
if index >= sample.len() {
return 0.0;
}
let sample1 = sample[index];
let sample2 = if index + 1 < sample.len() {
sample[index + 1]
} else if self.loop_enabled {
sample[0] // Loop back to start
} else {
0.0
};
// Linear interpolation
sample1 + (sample2 - sample1) * frac
}
}
impl AudioNode for SimpleSamplerNode {
fn category(&self) -> NodeCategory {
NodeCategory::Generator
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_GAIN => {
self.gain = value.clamp(0.0, 2.0);
}
PARAM_LOOP => {
self.loop_enabled = value > 0.5;
}
PARAM_PITCH_SHIFT => {
self.pitch_shift = value.clamp(-12.0, 12.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_GAIN => self.gain,
PARAM_LOOP => if self.loop_enabled { 1.0 } else { 0.0 },
PARAM_PITCH_SHIFT => self.pitch_shift,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.is_empty() {
return;
}
// Lock the sample data
let sample_data = self.sample_data.lock().unwrap();
if sample_data.is_empty() {
// No sample loaded, output silence
for output in outputs.iter_mut() {
output.fill(0.0);
}
return;
}
let output = &mut outputs[0];
let frames = output.len() / 2;
for frame in 0..frames {
// Read CV inputs
let voct = if !inputs.is_empty() && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0 // Default to original pitch
};
let gate = if inputs.len() > 1 && !inputs[1].is_empty() {
inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2]
} else {
0.0
};
// Detect gate trigger (rising edge)
let gate_active = gate > 0.5;
if gate_active && !self.gate_prev {
// Trigger: start playback from beginning
self.playhead = 0.0;
self.is_playing = true;
}
self.gate_prev = gate_active;
// Generate sample
let sample = if self.is_playing {
let s = self.read_sample(self.playhead, &sample_data);
// Calculate playback speed from V/Oct
let speed = self.voct_to_speed(voct);
// Advance playhead with resampling
let speed_adjusted = speed * (self.sample_rate_original / sample_rate as f32);
self.playhead += speed_adjusted;
// Check if we've reached the end
if self.playhead >= sample_data.len() as f32 {
if self.loop_enabled {
// Loop back to start
self.playhead = self.playhead % sample_data.len() as f32;
} else {
// Stop playback
self.is_playing = false;
self.playhead = 0.0;
}
}
s * self.gain
} else {
0.0
};
// Output stereo (same signal to both channels)
output[frame * 2] = sample;
output[frame * 2 + 1] = sample;
}
}
fn reset(&mut self) {
self.playhead = 0.0;
self.is_playing = false;
self.gate_prev = false;
}
fn node_type(&self) -> &str {
"SimpleSampler"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self::new(self.name.clone()))
}
}

View File

@ -0,0 +1,286 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
const WAVETABLE_SIZE: usize = 256;
// Parameters
const PARAM_WAVETABLE: u32 = 0;
const PARAM_FINE_TUNE: u32 = 1;
const PARAM_POSITION: u32 = 2;
/// Types of preset wavetables
#[derive(Debug, Clone, Copy, PartialEq)]
enum WavetableType {
Sine = 0,
Saw = 1,
Square = 2,
Triangle = 3,
PWM = 4, // Pulse Width Modulated
Harmonic = 5, // Rich harmonics
Inharmonic = 6, // Metallic/bell-like
Digital = 7, // Stepped/digital artifacts
}
impl WavetableType {
fn from_u32(value: u32) -> Self {
match value {
0 => WavetableType::Sine,
1 => WavetableType::Saw,
2 => WavetableType::Square,
3 => WavetableType::Triangle,
4 => WavetableType::PWM,
5 => WavetableType::Harmonic,
6 => WavetableType::Inharmonic,
7 => WavetableType::Digital,
_ => WavetableType::Sine,
}
}
}
/// Generate a wavetable of the specified type
fn generate_wavetable(wave_type: WavetableType) -> Vec<f32> {
let mut table = vec![0.0; WAVETABLE_SIZE];
match wave_type {
WavetableType::Sine => {
for i in 0..WAVETABLE_SIZE {
let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI;
table[i] = phase.sin();
}
}
WavetableType::Saw => {
for i in 0..WAVETABLE_SIZE {
let t = i as f32 / WAVETABLE_SIZE as f32;
table[i] = 2.0 * t - 1.0;
}
}
WavetableType::Square => {
for i in 0..WAVETABLE_SIZE {
table[i] = if i < WAVETABLE_SIZE / 2 { 1.0 } else { -1.0 };
}
}
WavetableType::Triangle => {
for i in 0..WAVETABLE_SIZE {
let t = i as f32 / WAVETABLE_SIZE as f32;
table[i] = if t < 0.5 {
4.0 * t - 1.0
} else {
-4.0 * t + 3.0
};
}
}
WavetableType::PWM => {
// Variable pulse width
for i in 0..WAVETABLE_SIZE {
let duty = 0.25; // 25% duty cycle
table[i] = if (i as f32 / WAVETABLE_SIZE as f32) < duty { 1.0 } else { -1.0 };
}
}
WavetableType::Harmonic => {
// Multiple harmonics for rich sound
for i in 0..WAVETABLE_SIZE {
let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI;
table[i] = phase.sin() * 0.5
+ (phase * 2.0).sin() * 0.25
+ (phase * 3.0).sin() * 0.125
+ (phase * 4.0).sin() * 0.0625;
}
}
WavetableType::Inharmonic => {
// Non-integer harmonics for metallic/bell-like sounds
for i in 0..WAVETABLE_SIZE {
let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI;
table[i] = phase.sin() * 0.4
+ (phase * 2.13).sin() * 0.3
+ (phase * 3.76).sin() * 0.2
+ (phase * 5.41).sin() * 0.1;
}
}
WavetableType::Digital => {
// Stepped waveform with digital artifacts
for i in 0..WAVETABLE_SIZE {
let steps = 8;
let step = (i * steps / WAVETABLE_SIZE) as f32 / steps as f32;
table[i] = step * 2.0 - 1.0;
}
}
}
table
}
/// Wavetable oscillator node
pub struct WavetableOscillatorNode {
name: String,
// Current wavetable
wavetable_type: WavetableType,
wavetable: Vec<f32>,
// Oscillator state
phase: f32,
fine_tune: f32, // -1.0 to 1.0 semitones
position: f32, // 0.0 to 1.0 (for future multi-cycle wavetables)
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl WavetableOscillatorNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_WAVETABLE, "Wavetable", 0.0, 7.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_FINE_TUNE, "Fine Tune", -1.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_POSITION, "Position", 0.0, 1.0, 0.0, ParameterUnit::Generic),
];
let wavetable_type = WavetableType::Sine;
let wavetable = generate_wavetable(wavetable_type);
Self {
name,
wavetable_type,
wavetable,
phase: 0.0,
fine_tune: 0.0,
position: 0.0,
inputs,
outputs,
parameters,
}
}
/// Convert V/oct CV to frequency with fine tune
fn voct_to_freq(&self, voct: f32) -> f32 {
let semitones = voct * 12.0 + self.fine_tune;
440.0 * 2.0_f32.powf(semitones / 12.0)
}
/// Read from wavetable with linear interpolation
fn read_wavetable(&self, phase: f32) -> f32 {
let index = phase * WAVETABLE_SIZE as f32;
let index_floor = index.floor() as usize % WAVETABLE_SIZE;
let index_ceil = (index_floor + 1) % WAVETABLE_SIZE;
let frac = index - index.floor();
// Linear interpolation
let sample1 = self.wavetable[index_floor];
let sample2 = self.wavetable[index_ceil];
sample1 + (sample2 - sample1) * frac
}
}
impl AudioNode for WavetableOscillatorNode {
fn category(&self) -> NodeCategory {
NodeCategory::Generator
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_WAVETABLE => {
let new_type = WavetableType::from_u32(value as u32);
if new_type != self.wavetable_type {
self.wavetable_type = new_type;
self.wavetable = generate_wavetable(new_type);
}
}
PARAM_FINE_TUNE => {
self.fine_tune = value.clamp(-1.0, 1.0);
}
PARAM_POSITION => {
self.position = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_WAVETABLE => self.wavetable_type as u32 as f32,
PARAM_FINE_TUNE => self.fine_tune,
PARAM_POSITION => self.position,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.is_empty() {
return;
}
let output = &mut outputs[0];
let frames = output.len() / 2;
for frame in 0..frames {
// Read V/Oct input
let voct = if !inputs.is_empty() && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0 // Default to A4 (440 Hz)
};
// Calculate frequency
let freq = self.voct_to_freq(voct);
// Read from wavetable
let sample = self.read_wavetable(self.phase);
// Advance phase
self.phase += freq / sample_rate as f32;
if self.phase >= 1.0 {
self.phase -= 1.0;
}
// Output stereo (same signal to both channels)
output[frame * 2] = sample * 0.5; // Scale down to prevent clipping
output[frame * 2 + 1] = sample * 0.5;
}
}
fn reset(&mut self) {
self.phase = 0.0;
}
fn node_type(&self) -> &str {
"WavetableOscillator"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self::new(self.name.clone()))
}
}

View File

@ -1,6 +1,44 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashMap; use std::collections::HashMap;
/// Sample data for preset serialization
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum SampleData {
#[serde(rename = "simple_sampler")]
SimpleSampler {
#[serde(skip_serializing_if = "Option::is_none")]
file_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
embedded_data: Option<EmbeddedSampleData>,
},
#[serde(rename = "multi_sampler")]
MultiSampler { layers: Vec<LayerData> },
}
/// Embedded sample data (base64-encoded for JSON compatibility)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EmbeddedSampleData {
/// Base64-encoded audio samples (f32 little-endian)
pub data_base64: String,
/// Original sample rate
pub sample_rate: u32,
}
/// Layer data for MultiSampler
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LayerData {
#[serde(skip_serializing_if = "Option::is_none")]
pub file_path: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub embedded_data: Option<EmbeddedSampleData>,
pub key_min: u8,
pub key_max: u8,
pub root_key: u8,
pub velocity_min: u8,
pub velocity_max: u8,
}
/// Serializable representation of a node graph preset /// Serializable representation of a node graph preset
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GraphPreset { pub struct GraphPreset {
@ -66,6 +104,10 @@ pub struct SerializedNode {
/// For VoiceAllocator nodes: the nested template graph /// For VoiceAllocator nodes: the nested template graph
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub template_graph: Option<Box<GraphPreset>>, pub template_graph: Option<Box<GraphPreset>>,
/// For sampler nodes: loaded sample data
#[serde(skip_serializing_if = "Option::is_none")]
pub sample_data: Option<SampleData>,
} }
/// Serialized connection between nodes /// Serialized connection between nodes
@ -132,6 +174,7 @@ impl SerializedNode {
parameters: HashMap::new(), parameters: HashMap::new(),
position: (0.0, 0.0), position: (0.0, 0.0),
template_graph: None, template_graph: None,
sample_data: None,
} }
} }

View File

@ -0,0 +1,316 @@
use symphonia::core::audio::{AudioBufferRef, Signal};
use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL};
use symphonia::core::errors::Error as SymphoniaError;
use symphonia::core::formats::FormatOptions;
use symphonia::core::io::MediaSourceStream;
use symphonia::core::meta::MetadataOptions;
use symphonia::core::probe::Hint;
use std::fs::File;
use std::path::Path;
/// Loaded audio sample data
#[derive(Debug, Clone)]
pub struct SampleData {
/// Audio samples (mono, f32 format)
pub samples: Vec<f32>,
/// Original sample rate
pub sample_rate: u32,
}
/// Load an audio file and decode it to mono f32 samples
pub fn load_audio_file(path: impl AsRef<Path>) -> Result<SampleData, String> {
let path = path.as_ref();
// Open the file
let file = File::open(path)
.map_err(|e| format!("Failed to open file: {}", e))?;
// Create a media source stream
let mss = MediaSourceStream::new(Box::new(file), Default::default());
// Create a hint to help the format registry guess the format
let mut hint = Hint::new();
if let Some(extension) = path.extension() {
if let Some(ext_str) = extension.to_str() {
hint.with_extension(ext_str);
}
}
// Probe the media source for a format
let format_opts = FormatOptions::default();
let metadata_opts = MetadataOptions::default();
let probed = symphonia::default::get_probe()
.format(&hint, mss, &format_opts, &metadata_opts)
.map_err(|e| format!("Failed to probe format: {}", e))?;
let mut format = probed.format;
// Find the first audio track
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
.ok_or_else(|| "No audio tracks found".to_string())?;
let track_id = track.id;
let sample_rate = track.codec_params.sample_rate.unwrap_or(48000);
// Create a decoder for the track
let dec_opts = DecoderOptions::default();
let mut decoder = symphonia::default::get_codecs()
.make(&track.codec_params, &dec_opts)
.map_err(|e| format!("Failed to create decoder: {}", e))?;
// Decode all packets
let mut all_samples = Vec::new();
loop {
// Get the next packet
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(SymphoniaError::IoError(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
// End of stream
break;
}
Err(e) => {
return Err(format!("Error reading packet: {}", e));
}
};
// Skip packets that don't belong to the selected track
if packet.track_id() != track_id {
continue;
}
// Decode the packet
let decoded = decoder
.decode(&packet)
.map_err(|e| format!("Failed to decode packet: {}", e))?;
// Convert to f32 samples and mix to mono
let samples = convert_to_mono_f32(&decoded);
all_samples.extend_from_slice(&samples);
}
Ok(SampleData {
samples: all_samples,
sample_rate,
})
}
/// Convert an audio buffer to mono f32 samples
fn convert_to_mono_f32(buf: &AudioBufferRef) -> Vec<f32> {
match buf {
AudioBufferRef::F32(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
// Already mono
mono.extend_from_slice(buf.chan(0));
} else {
// Mix down to mono by averaging all channels
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame];
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::U8(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push((sample as f32 - 128.0) / 128.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += (buf.chan(ch)[frame] as f32 - 128.0) / 128.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::U16(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push((sample as f32 - 32768.0) / 32768.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += (buf.chan(ch)[frame] as f32 - 32768.0) / 32768.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::U24(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push((sample.inner() as f32 - 8388608.0) / 8388608.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += (buf.chan(ch)[frame].inner() as f32 - 8388608.0) / 8388608.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::U32(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push((sample as f32 - 2147483648.0) / 2147483648.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += (buf.chan(ch)[frame] as f32 - 2147483648.0) / 2147483648.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::S8(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push(sample as f32 / 128.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame] as f32 / 128.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::S16(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push(sample as f32 / 32768.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame] as f32 / 32768.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::S24(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push(sample.inner() as f32 / 8388608.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame].inner() as f32 / 8388608.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::S32(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push(sample as f32 / 2147483648.0);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame] as f32 / 2147483648.0;
}
mono.push(sum / channels as f32);
}
}
mono
}
AudioBufferRef::F64(buf) => {
let channels = buf.spec().channels.count();
let frames = buf.frames();
let mut mono = Vec::with_capacity(frames);
if channels == 1 {
for &sample in buf.chan(0) {
mono.push(sample as f32);
}
} else {
for frame in 0..frames {
let mut sum = 0.0;
for ch in 0..channels {
sum += buf.chan(ch)[frame] as f32;
}
mono.push(sum / channels as f32);
}
}
mono
}
}
}

View File

@ -144,6 +144,15 @@ pub enum Command {
GraphLoadPreset(TrackId, String), GraphLoadPreset(TrackId, String),
/// Save a VoiceAllocator's template graph as a preset (track_id, voice_allocator_id, preset_path, preset_name) /// Save a VoiceAllocator's template graph as a preset (track_id, voice_allocator_id, preset_path, preset_name)
GraphSaveTemplatePreset(TrackId, u32, String, String), GraphSaveTemplatePreset(TrackId, u32, String, String),
/// Load a sample into a SimpleSampler node (track_id, node_id, file_path)
SamplerLoadSample(TrackId, u32, String),
/// Add a sample layer to a MultiSampler node (track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max)
MultiSamplerAddLayer(TrackId, u32, String, u8, u8, u8, u8, u8),
/// Update a MultiSampler layer's configuration (track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max)
MultiSamplerUpdateLayer(TrackId, u32, usize, u8, u8, u8, u8, u8),
/// Remove a layer from a MultiSampler node (track_id, node_id, layer_index)
MultiSamplerRemoveLayer(TrackId, u32, usize),
} }
/// Events sent from audio thread back to UI/control thread /// Events sent from audio thread back to UI/control thread

1
src-tauri/Cargo.lock generated
View File

@ -1013,6 +1013,7 @@ dependencies = [
name = "daw-backend" name = "daw-backend"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"base64 0.22.1",
"cpal", "cpal",
"crossterm", "crossterm",
"dasp_envelope", "dasp_envelope",

View File

@ -743,7 +743,20 @@ pub async fn graph_load_preset(
track_id: u32, track_id: u32,
preset_path: String, preset_path: String,
) -> Result<(), String> { ) -> Result<(), String> {
use daw_backend::GraphPreset;
let mut audio_state = state.lock().unwrap(); let mut audio_state = state.lock().unwrap();
// Load the preset JSON to count nodes
let json = std::fs::read_to_string(&preset_path)
.map_err(|e| format!("Failed to read preset file: {}", e))?;
let preset = GraphPreset::from_json(&json)
.map_err(|e| format!("Failed to parse preset: {}", e))?;
// Update the node ID counter to account for nodes in the preset
let node_count = preset.nodes.len() as u32;
audio_state.next_graph_node_id = node_count;
if let Some(controller) = &mut audio_state.controller { if let Some(controller) = &mut audio_state.controller {
// Send command to load preset // Send command to load preset
controller.graph_load_preset(track_id, preset_path); controller.graph_load_preset(track_id, preset_path);
@ -936,6 +949,180 @@ pub async fn graph_get_template_state(
} }
} }
#[tauri::command]
pub async fn sampler_load_sample(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
node_id: u32,
file_path: String,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.sampler_load_sample(track_id, node_id, file_path);
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command]
pub async fn multi_sampler_add_layer(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
node_id: u32,
file_path: String,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.multi_sampler_add_layer(
track_id,
node_id,
file_path,
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
);
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[derive(serde::Serialize)]
pub struct LayerInfo {
pub file_path: String,
pub key_min: u8,
pub key_max: u8,
pub root_key: u8,
pub velocity_min: u8,
pub velocity_max: u8,
}
#[tauri::command]
pub async fn multi_sampler_get_layers(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
node_id: u32,
) -> Result<Vec<LayerInfo>, String> {
use daw_backend::GraphPreset;
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
// Use preset serialization to get node data including layers
// Use timestamp to ensure unique temp file for each query to avoid conflicts
let timestamp = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let temp_path = std::env::temp_dir().join(format!("temp_layers_query_{}_{}_{}.json", track_id, node_id, timestamp));
let temp_path_str = temp_path.to_string_lossy().to_string();
controller.graph_save_preset(
track_id,
temp_path_str.clone(),
"temp".to_string(),
"".to_string(),
vec![]
);
// Give the audio thread time to process
std::thread::sleep(std::time::Duration::from_millis(50));
// Read the temp file and parse it
match std::fs::read_to_string(&temp_path) {
Ok(json) => {
// Clean up temp file
let _ = std::fs::remove_file(&temp_path);
// Parse the preset JSON
let preset: GraphPreset = match serde_json::from_str(&json) {
Ok(p) => p,
Err(e) => return Err(format!("Failed to parse preset: {}", e)),
};
// Find the node with the matching ID
if let Some(node) = preset.nodes.iter().find(|n| n.id == node_id) {
if let Some(ref sample_data) = node.sample_data {
// Check if it's a MultiSampler
if let daw_backend::audio::node_graph::preset::SampleData::MultiSampler { layers } = sample_data {
return Ok(layers.iter().map(|layer| LayerInfo {
file_path: layer.file_path.clone().unwrap_or_default(),
key_min: layer.key_min,
key_max: layer.key_max,
root_key: layer.root_key,
velocity_min: layer.velocity_min,
velocity_max: layer.velocity_max,
}).collect());
}
}
}
Ok(Vec::new())
}
Err(_) => Ok(Vec::new()), // Return empty list if file doesn't exist
}
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command]
pub async fn multi_sampler_update_layer(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
node_id: u32,
layer_index: usize,
key_min: u8,
key_max: u8,
root_key: u8,
velocity_min: u8,
velocity_max: u8,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.multi_sampler_update_layer(
track_id,
node_id,
layer_index,
key_min,
key_max,
root_key,
velocity_min,
velocity_max,
);
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command]
pub async fn multi_sampler_remove_layer(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
node_id: u32,
layer_index: usize,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.multi_sampler_remove_layer(track_id, node_id, layer_index);
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[derive(serde::Serialize, Clone)] #[derive(serde::Serialize, Clone)]
#[serde(tag = "type")] #[serde(tag = "type")]
pub enum SerializedAudioEvent { pub enum SerializedAudioEvent {

View File

@ -228,6 +228,11 @@ pub fn run() {
audio::graph_delete_preset, audio::graph_delete_preset,
audio::graph_get_state, audio::graph_get_state,
audio::graph_get_template_state, audio::graph_get_template_state,
audio::sampler_load_sample,
audio::multi_sampler_add_layer,
audio::multi_sampler_get_layers,
audio::multi_sampler_update_layer,
audio::multi_sampler_remove_layer,
]) ])
// .manage(window_counter) // .manage(window_counter)
.build(tauri::generate_context!()) .build(tauri::generate_context!())

View File

@ -0,0 +1,5 @@
<svg width="100" height="100" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg">
<!-- Placeholder animation/drawing icon -->
<path d="M20,80 Q30,60 50,50 T80,20" stroke="currentColor" stroke-width="4" fill="none" stroke-linecap="round"/>
<circle cx="30" cy="70" r="8" fill="currentColor"/>
</svg>

After

Width:  |  Height:  |  Size: 310 B

View File

@ -0,0 +1,10 @@
<svg width="100" height="100" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg">
<!-- Placeholder music/piano icon -->
<rect x="10" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/>
<rect x="30" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/>
<rect x="50" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/>
<rect x="70" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/>
<rect x="24" y="20" width="12" height="35" fill="currentColor"/>
<rect x="44" y="20" width="12" height="35" fill="currentColor"/>
<rect x="74" y="20" width="12" height="35" fill="currentColor"/>
</svg>

After

Width:  |  Height:  |  Size: 728 B

View File

@ -0,0 +1,9 @@
<svg width="100" height="100" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg">
<!-- Placeholder video/clapperboard icon -->
<rect x="15" y="40" width="70" height="45" fill="currentColor" rx="4"/>
<rect x="15" y="25" width="70" height="15" fill="none" stroke="currentColor" stroke-width="3" rx="4"/>
<rect x="20" y="25" width="7" height="15" fill="currentColor"/>
<rect x="35" y="25" width="7" height="15" fill="currentColor"/>
<rect x="50" y="25" width="7" height="15" fill="currentColor"/>
<rect x="65" y="25" width="7" height="15" fill="currentColor"/>
</svg>

After

Width:  |  Height:  |  Size: 585 B

View File

@ -8,6 +8,12 @@ import {
showNewFileDialog, showNewFileDialog,
closeDialog, closeDialog,
} from "./newfile.js"; } from "./newfile.js";
import {
createStartScreen,
updateStartScreen,
showStartScreen,
hideStartScreen,
} from "./startscreen.js";
import { import {
titleCase, titleCase,
getMousePositionFraction, getMousePositionFraction,
@ -125,6 +131,12 @@ const { PhysicalPosition, LogicalPosition } = window.__TAURI__.dpi;
const { getCurrentWindow } = window.__TAURI__.window; const { getCurrentWindow } = window.__TAURI__.window;
const { getVersion } = window.__TAURI__.app; const { getVersion } = window.__TAURI__.app;
// Supported file extensions
const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"];
const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"];
const midiExtensions = ["mid", "midi"];
const beamExtensions = ["beam"];
// import init, { CoreInterface } from './pkg/lightningbeam_core.js'; // import init, { CoreInterface } from './pkg/lightningbeam_core.js';
window.onerror = (message, source, lineno, colno, error) => { window.onerror = (message, source, lineno, colno, error) => {
@ -1369,7 +1381,9 @@ function _newFile(width, height, fps, layoutKey) {
const oldRoot = root; const oldRoot = root;
console.log('[_newFile] Old root:', oldRoot, 'frameRate:', oldRoot?.frameRate); console.log('[_newFile] Old root:', oldRoot, 'frameRate:', oldRoot?.frameRate);
root = new GraphicsObject("root"); // Determine initial child type based on layout
const initialChildType = layoutKey === 'audioDaw' ? 'midi' : 'layer';
root = new GraphicsObject("root", initialChildType);
// Switch to the selected layout if provided // Switch to the selected layout if provided
if (layoutKey) { if (layoutKey) {
@ -1791,12 +1805,6 @@ function revert() {
} }
async function importFile() { async function importFile() {
// Define supported extensions
const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"];
const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"];
const midiExtensions = ["mid", "midi"];
const beamExtensions = ["beam"];
// Define filters in consistent order // Define filters in consistent order
const allFilters = [ const allFilters = [
{ {
@ -4363,12 +4371,33 @@ function outliner(object = undefined) {
async function startup() { async function startup() {
await loadConfig(); await loadConfig();
createNewFileDialog(_newFile, _open, config); createNewFileDialog(_newFile, _open, config);
// Create start screen with callback
createStartScreen(async (options) => {
hideStartScreen();
if (options.type === 'new') {
// Create new project with selected focus
_newFile(
options.width || 800,
options.height || 600,
options.fps || 24,
options.projectFocus
);
} else if (options.type === 'reopen' || options.type === 'recent') {
// Open existing file
await _open(options.filePath);
}
});
if (!window.openedFiles?.length) { if (!window.openedFiles?.length) {
if (config.reopenLastSession && config.recentFiles?.length) { if (config.reopenLastSession && config.recentFiles?.length) {
document.body.style.cursor = "wait" document.body.style.cursor = "wait"
setTimeout(()=>_open(config.recentFiles[0]), 10) setTimeout(()=>_open(config.recentFiles[0]), 10)
} else { } else {
showNewFileDialog(config); // Show start screen instead of new file dialog
await updateStartScreen(config);
showStartScreen();
} }
} }
} }
@ -6074,9 +6103,11 @@ function nodeEditor() {
// Create the Drawflow canvas // Create the Drawflow canvas
const editorDiv = document.createElement("div"); const editorDiv = document.createElement("div");
editorDiv.id = "drawflow"; editorDiv.id = "drawflow";
editorDiv.style.width = "100%"; editorDiv.style.position = "absolute";
editorDiv.style.height = "calc(100% - 40px)"; // Account for header editorDiv.style.top = "40px"; // Start below header
editorDiv.style.position = "relative"; editorDiv.style.left = "0";
editorDiv.style.right = "0";
editorDiv.style.bottom = "0";
container.appendChild(editorDiv); container.appendChild(editorDiv);
// Create node palette // Create node palette
@ -6611,7 +6642,7 @@ function nodeEditor() {
const nodeElement = document.getElementById(`node-${nodeId}`); const nodeElement = document.getElementById(`node-${nodeId}`);
if (!nodeElement) return; if (!nodeElement) return;
const sliders = nodeElement.querySelectorAll(".node-slider"); const sliders = nodeElement.querySelectorAll('input[type="range"]');
sliders.forEach(slider => { sliders.forEach(slider => {
// Prevent node dragging when interacting with slider // Prevent node dragging when interacting with slider
slider.addEventListener("mousedown", (e) => { slider.addEventListener("mousedown", (e) => {
@ -6654,6 +6685,115 @@ function nodeEditor() {
} }
}); });
}); });
// Handle Load Sample button for SimpleSampler
const loadSampleBtn = nodeElement.querySelector(".load-sample-btn");
if (loadSampleBtn) {
loadSampleBtn.addEventListener("mousedown", (e) => e.stopPropagation());
loadSampleBtn.addEventListener("pointerdown", (e) => e.stopPropagation());
loadSampleBtn.addEventListener("click", async (e) => {
e.stopPropagation();
const nodeData = editor.getNodeFromId(nodeId);
if (!nodeData || nodeData.data.backendId === null) {
showError("Node not yet created on backend");
return;
}
const currentTrackId = getCurrentMidiTrack();
if (currentTrackId === null) {
showError("No MIDI track selected");
return;
}
try {
const filePath = await openFileDialog({
title: "Load Audio Sample",
filters: [{
name: "Audio Files",
extensions: audioExtensions
}]
});
if (filePath) {
await invoke("sampler_load_sample", {
trackId: currentTrackId,
nodeId: nodeData.data.backendId,
filePath: filePath
});
// Update UI to show filename
const sampleInfo = nodeElement.querySelector(`#sample-info-${nodeId}`);
if (sampleInfo) {
const filename = filePath.split('/').pop().split('\\').pop();
sampleInfo.textContent = filename;
}
}
} catch (err) {
console.error("Failed to load sample:", err);
showError(`Failed to load sample: ${err}`);
}
});
}
// Handle Add Layer button for MultiSampler
const addLayerBtn = nodeElement.querySelector(".add-layer-btn");
if (addLayerBtn) {
addLayerBtn.addEventListener("mousedown", (e) => e.stopPropagation());
addLayerBtn.addEventListener("pointerdown", (e) => e.stopPropagation());
addLayerBtn.addEventListener("click", async (e) => {
e.stopPropagation();
const nodeData = editor.getNodeFromId(nodeId);
if (!nodeData || nodeData.data.backendId === null) {
showError("Node not yet created on backend");
return;
}
const currentTrackId = getCurrentMidiTrack();
if (currentTrackId === null) {
showError("No MIDI track selected");
return;
}
try {
const filePath = await openFileDialog({
title: "Add Sample Layer",
filters: [{
name: "Audio Files",
extensions: audioExtensions
}]
});
if (filePath) {
// Show dialog to configure layer mapping
const layerConfig = await showLayerConfigDialog(filePath);
if (layerConfig) {
await invoke("multi_sampler_add_layer", {
trackId: currentTrackId,
nodeId: nodeData.data.backendId,
filePath: filePath,
keyMin: layerConfig.keyMin,
keyMax: layerConfig.keyMax,
rootKey: layerConfig.rootKey,
velocityMin: layerConfig.velocityMin,
velocityMax: layerConfig.velocityMax
});
// Wait a bit for the audio thread to process the add command
await new Promise(resolve => setTimeout(resolve, 100));
// Refresh the layers list
await refreshSampleLayersList(nodeId);
}
}
} catch (err) {
console.error("Failed to add layer:", err);
showError(`Failed to add layer: ${err}`);
}
});
}
}, 100); }, 100);
} }
@ -6682,6 +6822,134 @@ function nodeEditor() {
enterTemplate(node.data.backendId, nodeName); enterTemplate(node.data.backendId, nodeName);
} }
// Refresh the layers list for a MultiSampler node
async function refreshSampleLayersList(nodeId) {
const nodeData = editor.getNodeFromId(nodeId);
if (!nodeData || nodeData.data.backendId === null) {
return;
}
const currentTrackId = getCurrentMidiTrack();
if (currentTrackId === null) {
return;
}
try {
const layers = await invoke("multi_sampler_get_layers", {
trackId: currentTrackId,
nodeId: nodeData.data.backendId
});
const layersList = document.querySelector(`#sample-layers-list-${nodeId}`);
const layersContainer = document.querySelector(`#sample-layers-container-${nodeId}`);
if (!layersList) return;
// Prevent scroll events from bubbling to canvas
if (layersContainer && !layersContainer.dataset.scrollListenerAdded) {
layersContainer.addEventListener('wheel', (e) => {
e.stopPropagation();
}, { passive: false });
layersContainer.dataset.scrollListenerAdded = 'true';
}
if (layers.length === 0) {
layersList.innerHTML = '<tr><td colspan="5" class="sample-layers-empty">No layers loaded</td></tr>';
} else {
layersList.innerHTML = layers.map((layer, index) => {
const filename = layer.file_path.split('/').pop().split('\\').pop();
const keyRange = `${midiToNoteName(layer.key_min)}-${midiToNoteName(layer.key_max)}`;
const rootNote = midiToNoteName(layer.root_key);
const velRange = `${layer.velocity_min}-${layer.velocity_max}`;
return `
<tr data-index="${index}">
<td class="sample-layer-filename" title="${filename}">${filename}</td>
<td>${keyRange}</td>
<td>${rootNote}</td>
<td>${velRange}</td>
<td>
<div class="sample-layer-actions">
<button class="btn-edit-layer" data-node="${nodeId}" data-index="${index}">Edit</button>
<button class="btn-delete-layer" data-node="${nodeId}" data-index="${index}">Del</button>
</div>
</td>
</tr>
`;
}).join('');
// Add event listeners for edit buttons
const editButtons = layersList.querySelectorAll('.btn-edit-layer');
editButtons.forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
const index = parseInt(btn.dataset.index);
const layer = layers[index];
// Show edit dialog with current values
const layerConfig = await showLayerConfigDialog(layer.file_path, {
keyMin: layer.key_min,
keyMax: layer.key_max,
rootKey: layer.root_key,
velocityMin: layer.velocity_min,
velocityMax: layer.velocity_max
});
if (layerConfig) {
try {
await invoke("multi_sampler_update_layer", {
trackId: currentTrackId,
nodeId: nodeData.data.backendId,
layerIndex: index,
keyMin: layerConfig.keyMin,
keyMax: layerConfig.keyMax,
rootKey: layerConfig.rootKey,
velocityMin: layerConfig.velocityMin,
velocityMax: layerConfig.velocityMax
});
// Refresh the list
await refreshSampleLayersList(nodeId);
} catch (err) {
console.error("Failed to update layer:", err);
showError(`Failed to update layer: ${err}`);
}
}
});
});
// Add event listeners for delete buttons
const deleteButtons = layersList.querySelectorAll('.btn-delete-layer');
deleteButtons.forEach(btn => {
btn.addEventListener('click', async (e) => {
e.stopPropagation();
const index = parseInt(btn.dataset.index);
const layer = layers[index];
const filename = layer.file_path.split('/').pop().split('\\').pop();
if (confirm(`Delete layer "${filename}"?`)) {
try {
await invoke("multi_sampler_remove_layer", {
trackId: currentTrackId,
nodeId: nodeData.data.backendId,
layerIndex: index
});
// Refresh the list
await refreshSampleLayersList(nodeId);
} catch (err) {
console.error("Failed to remove layer:", err);
showError(`Failed to remove layer: ${err}`);
}
}
});
});
}
} catch (err) {
console.error("Failed to get layers:", err);
}
}
// Handle connection creation // Handle connection creation
function handleConnectionCreated(connection) { function handleConnectionCreated(connection) {
console.log("handleConnectionCreated called:", connection); console.log("handleConnectionCreated called:", connection);
@ -7508,6 +7776,161 @@ function showSavePresetDialog(container) {
}); });
} }
// Helper function to convert MIDI note number to note name
function midiToNoteName(midiNote) {
const noteNames = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'];
const octave = Math.floor(midiNote / 12) - 1;
const noteName = noteNames[midiNote % 12];
return `${noteName}${octave}`;
}
// Show dialog to configure MultiSampler layer zones
function showLayerConfigDialog(filePath, existingConfig = null) {
return new Promise((resolve) => {
const filename = filePath.split('/').pop().split('\\').pop();
const isEdit = existingConfig !== null;
// Use existing values or defaults
const keyMin = existingConfig?.keyMin ?? 0;
const keyMax = existingConfig?.keyMax ?? 127;
const rootKey = existingConfig?.rootKey ?? 60;
const velocityMin = existingConfig?.velocityMin ?? 0;
const velocityMax = existingConfig?.velocityMax ?? 127;
// Create modal dialog
const dialog = document.createElement('div');
dialog.className = 'modal-overlay';
dialog.innerHTML = `
<div class="modal-dialog">
<h3>${isEdit ? 'Edit' : 'Configure'} Sample Layer</h3>
<p style="font-size: 12px; color: #666; margin-bottom: 16px;">
File: <strong>${filename}</strong>
</p>
<form id="layer-config-form">
<div class="form-group">
<label>Key Range</label>
<div class="form-group-inline">
<div>
<label style="font-size: 11px; color: #888;">Min</label>
<input type="number" id="key-min" min="0" max="127" value="${keyMin}" required />
<div id="key-min-name" class="form-note-name">${midiToNoteName(keyMin)}</div>
</div>
<span>-</span>
<div>
<label style="font-size: 11px; color: #888;">Max</label>
<input type="number" id="key-max" min="0" max="127" value="${keyMax}" required />
<div id="key-max-name" class="form-note-name">${midiToNoteName(keyMax)}</div>
</div>
</div>
</div>
<div class="form-group">
<label>Root Key (original pitch)</label>
<input type="number" id="root-key" min="0" max="127" value="${rootKey}" required />
<div id="root-key-name" class="form-note-name">${midiToNoteName(rootKey)}</div>
</div>
<div class="form-group">
<label>Velocity Range</label>
<div class="form-group-inline">
<div>
<label style="font-size: 11px; color: #888;">Min</label>
<input type="number" id="velocity-min" min="0" max="127" value="${velocityMin}" required />
</div>
<span>-</span>
<div>
<label style="font-size: 11px; color: #888;">Max</label>
<input type="number" id="velocity-max" min="0" max="127" value="${velocityMax}" required />
</div>
</div>
</div>
<div class="form-actions">
<button type="button" class="btn-cancel">Cancel</button>
<button type="submit" class="btn-primary">${isEdit ? 'Update' : 'Add'} Layer</button>
</div>
</form>
</div>
`;
document.body.appendChild(dialog);
// Update note names when inputs change
const keyMinInput = dialog.querySelector('#key-min');
const keyMaxInput = dialog.querySelector('#key-max');
const rootKeyInput = dialog.querySelector('#root-key');
const updateKeyMinName = () => {
const note = parseInt(keyMinInput.value) || 0;
dialog.querySelector('#key-min-name').textContent = midiToNoteName(note);
};
const updateKeyMaxName = () => {
const note = parseInt(keyMaxInput.value) || 127;
dialog.querySelector('#key-max-name').textContent = midiToNoteName(note);
};
const updateRootKeyName = () => {
const note = parseInt(rootKeyInput.value) || 60;
dialog.querySelector('#root-key-name').textContent = midiToNoteName(note);
};
keyMinInput.addEventListener('input', updateKeyMinName);
keyMaxInput.addEventListener('input', updateKeyMaxName);
rootKeyInput.addEventListener('input', updateRootKeyName);
// Focus first input
setTimeout(() => dialog.querySelector('#key-min')?.focus(), 100);
// Handle cancel
dialog.querySelector('.btn-cancel').addEventListener('click', () => {
dialog.remove();
resolve(null);
});
// Handle submit
dialog.querySelector('#layer-config-form').addEventListener('submit', (e) => {
e.preventDefault();
const keyMin = parseInt(keyMinInput.value);
const keyMax = parseInt(keyMaxInput.value);
const rootKey = parseInt(rootKeyInput.value);
const velocityMin = parseInt(dialog.querySelector('#velocity-min').value);
const velocityMax = parseInt(dialog.querySelector('#velocity-max').value);
// Validate ranges
if (keyMin > keyMax) {
alert('Key Min must be less than or equal to Key Max');
return;
}
if (velocityMin > velocityMax) {
alert('Velocity Min must be less than or equal to Velocity Max');
return;
}
if (rootKey < keyMin || rootKey > keyMax) {
alert('Root Key must be within the key range');
return;
}
dialog.remove();
resolve({
keyMin,
keyMax,
rootKey,
velocityMin,
velocityMax
});
});
// Close on background click
dialog.addEventListener('click', (e) => {
if (e.target === dialog) {
dialog.remove();
resolve(null);
}
});
});
}
function filterPresets(container) { function filterPresets(container) {
const searchTerm = container.querySelector('#preset-search')?.value.toLowerCase() || ''; const searchTerm = container.querySelector('#preset-search')?.value.toLowerCase() || '';
const selectedTag = container.querySelector('#preset-tag-filter')?.value || ''; const selectedTag = container.querySelector('#preset-tag-filter')?.value || '';

View File

@ -31,7 +31,7 @@ export function initializeGraphicsObjectDependencies(deps) {
} }
class GraphicsObject extends Widget { class GraphicsObject extends Widget {
constructor(uuid) { constructor(uuid, initialChildType = 'layer') {
super(0, 0) super(0, 0)
this.rotation = 0; // in radians this.rotation = 0; // in radians
this.scale_x = 1; this.scale_x = 1;
@ -48,10 +48,31 @@ class GraphicsObject extends Widget {
this.currentTime = 0; // New: continuous time for AnimationData curves this.currentTime = 0; // New: continuous time for AnimationData curves
this.currentLayer = 0; this.currentLayer = 0;
this._activeAudioTrack = null; // Reference to active audio track (if any) this._activeAudioTrack = null; // Reference to active audio track (if any)
this.children = [new Layer(uuid + "-L1", this)];
// this.layers = [new Layer(uuid + "-L1")]; // Initialize children and audioTracks based on initialChildType
this.children = [];
this.audioTracks = []; this.audioTracks = [];
// this.children = []
if (initialChildType === 'layer') {
this.children = [new Layer(uuid + "-L1", this)];
this.currentLayer = 0; // Set first layer as active
} else if (initialChildType === 'midi') {
const midiTrack = new AudioTrack(uuid + "-M1", "MIDI 1", 'midi');
this.audioTracks.push(midiTrack);
this._activeAudioTrack = midiTrack; // Set MIDI track as active (the object, not index)
// Initialize the MIDI track in the audio backend
midiTrack.initializeTrack().catch(err => {
console.error('Failed to initialize MIDI track:', err);
});
} else if (initialChildType === 'audio') {
const audioTrack = new AudioTrack(uuid + "-A1", "Audio 1", 'audio');
this.audioTracks.push(audioTrack);
this._activeAudioTrack = audioTrack; // Set audio track as active (the object, not index)
audioTrack.initializeTrack().catch(err => {
console.error('Failed to initialize audio track:', err);
});
}
// If initialChildType is 'none' or anything else, leave both arrays empty
this.shapes = []; this.shapes = [];

View File

@ -56,7 +56,7 @@ export const nodeTypes = {
<div class="node-param"> <div class="node-param">
<label>Waveform: <span id="wave-${nodeId}">Sine</span></label> <label>Waveform: <span id="wave-${nodeId}">Sine</span></label>
<input type="range" <input type="range"
class="node-slider"
data-node="${nodeId}" data-node="${nodeId}"
data-param="2" data-param="2"
min="0" min="0"
@ -67,7 +67,7 @@ export const nodeTypes = {
<div class="node-param"> <div class="node-param">
<label>Frequency: <span id="freq-${nodeId}">440</span> Hz</label> <label>Frequency: <span id="freq-${nodeId}">440</span> Hz</label>
<input type="range" <input type="range"
class="node-slider"
data-node="${nodeId}" data-node="${nodeId}"
data-param="0" data-param="0"
min="20" min="20"
@ -78,7 +78,7 @@ export const nodeTypes = {
<div class="node-param"> <div class="node-param">
<label>Amplitude: <span id="amp-${nodeId}">0.5</span></label> <label>Amplitude: <span id="amp-${nodeId}">0.5</span></label>
<input type="range" <input type="range"
class="node-slider"
data-node="${nodeId}" data-node="${nodeId}"
data-param="1" data-param="1"
min="0" min="0"
@ -110,7 +110,7 @@ export const nodeTypes = {
<div class="node-param"> <div class="node-param">
<label>Gain: <span id="gain-${nodeId}">1.0</span>x</label> <label>Gain: <span id="gain-${nodeId}">1.0</span>x</label>
<input type="range" <input type="range"
class="node-slider"
data-node="${nodeId}" data-node="${nodeId}"
data-param="0" data-param="0"
min="0" min="0"
@ -146,19 +146,19 @@ export const nodeTypes = {
<div class="node-title">Mixer</div> <div class="node-title">Mixer</div>
<div class="node-param"> <div class="node-param">
<label>Gain 1: <span id="g1-${nodeId}">1.0</span>x</label> <label>Gain 1: <span id="g1-${nodeId}">1.0</span>x</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="0" max="2" value="1" step="0.01"> <input type="range" data-node="${nodeId}" data-param="0" min="0" max="2" value="1" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Gain 2: <span id="g2-${nodeId}">1.0</span>x</label> <label>Gain 2: <span id="g2-${nodeId}">1.0</span>x</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0" max="2" value="1" step="0.01"> <input type="range" data-node="${nodeId}" data-param="1" min="0" max="2" value="1" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Gain 3: <span id="g3-${nodeId}">1.0</span>x</label> <label>Gain 3: <span id="g3-${nodeId}">1.0</span>x</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="2" min="0" max="2" value="1" step="0.01"> <input type="range" data-node="${nodeId}" data-param="2" min="0" max="2" value="1" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Gain 4: <span id="g4-${nodeId}">1.0</span>x</label> <label>Gain 4: <span id="g4-${nodeId}">1.0</span>x</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="3" min="0" max="2" value="1" step="0.01"> <input type="range" data-node="${nodeId}" data-param="3" min="0" max="2" value="1" step="0.01">
</div> </div>
</div> </div>
` `
@ -185,15 +185,15 @@ export const nodeTypes = {
<div class="node-title">Filter</div> <div class="node-title">Filter</div>
<div class="node-param"> <div class="node-param">
<label>Cutoff: <span id="cutoff-${nodeId}">1000</span> Hz</label> <label>Cutoff: <span id="cutoff-${nodeId}">1000</span> Hz</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="20" max="20000" value="1000" step="1"> <input type="range" data-node="${nodeId}" data-param="0" min="20" max="20000" value="1000" step="1">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Resonance: <span id="res-${nodeId}">0.707</span></label> <label>Resonance: <span id="res-${nodeId}">0.707</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0.1" max="10" value="0.707" step="0.01"> <input type="range" data-node="${nodeId}" data-param="1" min="0.1" max="10" value="0.707" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Type: <span id="ftype-${nodeId}">LP</span></label> <label>Type: <span id="ftype-${nodeId}">LP</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="2" min="0" max="1" value="0" step="1"> <input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0" step="1">
</div> </div>
</div> </div>
` `
@ -220,19 +220,19 @@ export const nodeTypes = {
<div class="node-title">ADSR</div> <div class="node-title">ADSR</div>
<div class="node-param"> <div class="node-param">
<label>A: <span id="a-${nodeId}">0.01</span>s</label> <label>A: <span id="a-${nodeId}">0.01</span>s</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="0.001" max="5" value="0.01" step="0.001"> <input type="range" data-node="${nodeId}" data-param="0" min="0.001" max="5" value="0.01" step="0.001">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>D: <span id="d-${nodeId}">0.1</span>s</label> <label>D: <span id="d-${nodeId}">0.1</span>s</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0.001" max="5" value="0.1" step="0.001"> <input type="range" data-node="${nodeId}" data-param="1" min="0.001" max="5" value="0.1" step="0.001">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>S: <span id="s-${nodeId}">0.7</span></label> <label>S: <span id="s-${nodeId}">0.7</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.7" step="0.01"> <input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.7" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>R: <span id="r-${nodeId}">0.2</span>s</label> <label>R: <span id="r-${nodeId}">0.2</span>s</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="3" min="0.001" max="5" value="0.2" step="0.001"> <input type="range" data-node="${nodeId}" data-param="3" min="0.001" max="5" value="0.2" step="0.001">
</div> </div>
</div> </div>
` `
@ -295,11 +295,11 @@ export const nodeTypes = {
<div class="node-title">AudioCV</div> <div class="node-title">AudioCV</div>
<div class="node-param"> <div class="node-param">
<label>Attack: <span id="att-${nodeId}">0.01</span>s</label> <label>Attack: <span id="att-${nodeId}">0.01</span>s</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="0.001" max="1.0" value="0.01" step="0.001"> <input type="range" data-node="${nodeId}" data-param="0" min="0.001" max="1.0" value="0.01" step="0.001">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Release: <span id="rel-${nodeId}">0.1</span>s</label> <label>Release: <span id="rel-${nodeId}">0.1</span>s</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0.001" max="1.0" value="0.1" step="0.001"> <input type="range" data-node="${nodeId}" data-param="1" min="0.001" max="1.0" value="0.1" step="0.001">
</div> </div>
</div> </div>
` `
@ -325,11 +325,11 @@ export const nodeTypes = {
<div class="node-title">Oscilloscope</div> <div class="node-title">Oscilloscope</div>
<div class="node-param"> <div class="node-param">
<label>Time: <span id="time-${nodeId}">100</span>ms</label> <label>Time: <span id="time-${nodeId}">100</span>ms</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="10" max="1000" value="100" step="10"> <input type="range" data-node="${nodeId}" data-param="0" min="10" max="1000" value="100" step="10">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Trigger: <span id="trig-${nodeId}">Free</span></label> <label>Trigger: <span id="trig-${nodeId}">Free</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0" max="2" value="0" step="1"> <input type="range" data-node="${nodeId}" data-param="1" min="0" max="2" value="0" step="1">
</div> </div>
<div class="node-info" style="margin-top: 4px; font-size: 10px;">Pass-through monitor</div> <div class="node-info" style="margin-top: 4px; font-size: 10px;">Pass-through monitor</div>
</div> </div>
@ -355,7 +355,7 @@ export const nodeTypes = {
<div class="node-title">Voice Allocator</div> <div class="node-title">Voice Allocator</div>
<div class="node-param"> <div class="node-param">
<label>Voices: <span id="voices-${nodeId}">8</span></label> <label>Voices: <span id="voices-${nodeId}">8</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="1" max="16" value="8" step="1"> <input type="range" data-node="${nodeId}" data-param="0" min="1" max="16" value="8" step="1">
</div> </div>
<div class="node-info" style="margin-top: 4px; font-size: 10px;">Double-click to edit</div> <div class="node-info" style="margin-top: 4px; font-size: 10px;">Double-click to edit</div>
</div> </div>
@ -443,15 +443,15 @@ export const nodeTypes = {
<div class="node-title">LFO</div> <div class="node-title">LFO</div>
<div class="node-param"> <div class="node-param">
<label>Wave: <span id="lfowave-${nodeId}">Sine</span></label> <label>Wave: <span id="lfowave-${nodeId}">Sine</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="2" min="0" max="4" value="0" step="1"> <input type="range" data-node="${nodeId}" data-param="2" min="0" max="4" value="0" step="1">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Freq: <span id="lfofreq-${nodeId}">1.0</span> Hz</label> <label>Freq: <span id="lfofreq-${nodeId}">1.0</span> Hz</label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="0.01" max="20" value="1.0" step="0.01"> <input type="range" data-node="${nodeId}" data-param="0" min="0.01" max="20" value="1.0" step="0.01">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Depth: <span id="lfoamp-${nodeId}">1.0</span></label> <label>Depth: <span id="lfoamp-${nodeId}">1.0</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0" max="1" value="1.0" step="0.01"> <input type="range" data-node="${nodeId}" data-param="1" min="0" max="1" value="1.0" step="0.01">
</div> </div>
</div> </div>
` `
@ -474,11 +474,11 @@ export const nodeTypes = {
<div class="node-title">Noise</div> <div class="node-title">Noise</div>
<div class="node-param"> <div class="node-param">
<label>Color: <span id="noisecolor-${nodeId}">White</span></label> <label>Color: <span id="noisecolor-${nodeId}">White</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="1" min="0" max="1" value="0" step="1"> <input type="range" data-node="${nodeId}" data-param="1" min="0" max="1" value="0" step="1">
</div> </div>
<div class="node-param"> <div class="node-param">
<label>Level: <span id="noiselevel-${nodeId}">0.5</span></label> <label>Level: <span id="noiselevel-${nodeId}">0.5</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="0" max="1" value="0.5" step="0.01"> <input type="range" data-node="${nodeId}" data-param="0" min="0" max="1" value="0.5" step="0.01">
</div> </div>
</div> </div>
` `
@ -525,7 +525,362 @@ export const nodeTypes = {
<div class="node-title">Pan</div> <div class="node-title">Pan</div>
<div class="node-param"> <div class="node-param">
<label>Position: <span id="panpos-${nodeId}">0.0</span></label> <label>Position: <span id="panpos-${nodeId}">0.0</span></label>
<input type="range" class="node-slider" data-node="${nodeId}" data-param="0" min="-1" max="1" value="0" step="0.01"> <input type="range" data-node="${nodeId}" data-param="0" min="-1" max="1" value="0" step="0.01">
</div>
</div>
`
},
Delay: {
name: 'Delay',
category: NodeCategory.EFFECT,
description: 'Stereo delay with feedback',
inputs: [
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'delay_time', label: 'Delay Time', min: 0.001, max: 2.0, default: 0.5, unit: 's' },
{ id: 1, name: 'feedback', label: 'Feedback', min: 0, max: 0.95, default: 0.5, unit: '' },
{ id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Delay</div>
<div class="node-param">
<label>Time: <span id="delaytime-${nodeId}">0.5</span>s</label>
<input type="range" data-node="${nodeId}" data-param="0" min="0.001" max="2" value="0.5" step="0.001">
</div>
<div class="node-param">
<label>Feedback: <span id="feedback-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="0" max="0.95" value="0.5" step="0.01">
</div>
<div class="node-param">
<label>Wet/Dry: <span id="wetdry-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.5" step="0.01">
</div>
</div>
`
},
Reverb: {
name: 'Reverb',
category: NodeCategory.EFFECT,
description: 'Schroeder reverb with room size and damping',
inputs: [
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'room_size', label: 'Room Size', min: 0, max: 1, default: 0.5, unit: '' },
{ id: 1, name: 'damping', label: 'Damping', min: 0, max: 1, default: 0.5, unit: '' },
{ id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.3, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Reverb</div>
<div class="node-param">
<label>Room Size: <span id="roomsize-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="0" min="0" max="1" value="0.5" step="0.01">
</div>
<div class="node-param">
<label>Damping: <span id="damping-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="0" max="1" value="0.5" step="0.01">
</div>
<div class="node-param">
<label>Wet/Dry: <span id="wetdry-${nodeId}">0.3</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.3" step="0.01">
</div>
</div>
`
},
Chorus: {
name: 'Chorus',
category: NodeCategory.EFFECT,
description: 'Chorus effect with modulated delay',
inputs: [
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'rate', label: 'Rate', min: 0.1, max: 5.0, default: 1.0, unit: 'Hz' },
{ id: 1, name: 'depth', label: 'Depth', min: 0, max: 1, default: 0.5, unit: '' },
{ id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Chorus</div>
<div class="node-param">
<label>Rate: <span id="chorusrate-${nodeId}">1.0</span>Hz</label>
<input type="range" data-node="${nodeId}" data-param="0" min="0.1" max="5" value="1.0" step="0.1">
</div>
<div class="node-param">
<label>Depth: <span id="chorusdepth-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="0" max="1" value="0.5" step="0.01">
</div>
<div class="node-param">
<label>Wet/Dry: <span id="choruswetdry-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.5" step="0.01">
</div>
</div>
`
},
Flanger: {
name: 'Flanger',
category: NodeCategory.EFFECT,
description: 'Flanger effect with feedback',
inputs: [
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'rate', label: 'Rate', min: 0.1, max: 10.0, default: 0.5, unit: 'Hz' },
{ id: 1, name: 'depth', label: 'Depth', min: 0, max: 1, default: 0.7, unit: '' },
{ id: 2, name: 'feedback', label: 'Feedback', min: -0.95, max: 0.95, default: 0.5, unit: '' },
{ id: 3, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Flanger</div>
<div class="node-param">
<label>Rate: <span id="flangerrate-${nodeId}">0.5</span>Hz</label>
<input type="range" data-node="${nodeId}" data-param="0" min="0.1" max="10" value="0.5" step="0.1">
</div>
<div class="node-param">
<label>Depth: <span id="flangerdepth-${nodeId}">0.7</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="0" max="1" value="0.7" step="0.01">
</div>
<div class="node-param">
<label>Feedback: <span id="flangerfeedback-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="-0.95" max="0.95" value="0.5" step="0.01">
</div>
<div class="node-param">
<label>Wet/Dry: <span id="flangerwetdry-${nodeId}">0.5</span></label>
<input type="range" data-node="${nodeId}" data-param="3" min="0" max="1" value="0.5" step="0.01">
</div>
</div>
`
},
FMSynth: {
name: 'FM Synth',
category: NodeCategory.GENERATOR,
description: '4-operator FM synthesizer',
inputs: [
{ name: 'V/Oct', type: SignalType.CV, index: 0 },
{ name: 'Gate', type: SignalType.CV, index: 1 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'algorithm', label: 'Algorithm', min: 0, max: 3, default: 0, unit: '' },
{ id: 1, name: 'op1_ratio', label: 'Op1 Ratio', min: 0.25, max: 16, default: 1.0, unit: '' },
{ id: 2, name: 'op1_level', label: 'Op1 Level', min: 0, max: 1, default: 1.0, unit: '' },
{ id: 3, name: 'op2_ratio', label: 'Op2 Ratio', min: 0.25, max: 16, default: 2.0, unit: '' },
{ id: 4, name: 'op2_level', label: 'Op2 Level', min: 0, max: 1, default: 0.8, unit: '' },
{ id: 5, name: 'op3_ratio', label: 'Op3 Ratio', min: 0.25, max: 16, default: 3.0, unit: '' },
{ id: 6, name: 'op3_level', label: 'Op3 Level', min: 0, max: 1, default: 0.6, unit: '' },
{ id: 7, name: 'op4_ratio', label: 'Op4 Ratio', min: 0.25, max: 16, default: 4.0, unit: '' },
{ id: 8, name: 'op4_level', label: 'Op4 Level', min: 0, max: 1, default: 0.4, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">FM Synth</div>
<div class="node-param">
<label>Algorithm: <span id="fmalgo-${nodeId}">0</span></label>
<select data-node="${nodeId}" data-param="0" style="width: 100%; padding: 2px;">
<option value="0">Stack (1234)</option>
<option value="1">Parallel</option>
<option value="2">Bell (12, 34)</option>
<option value="3">Dual (12, 34)</option>
</select>
</div>
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Operator 1</div>
<div class="node-param">
<label>Ratio: <span id="op1ratio-${nodeId}">1.0</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="0.25" max="16" value="1.0" step="0.25">
</div>
<div class="node-param">
<label>Level: <span id="op1level-${nodeId}">1.0</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="1.0" step="0.01">
</div>
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Operator 2</div>
<div class="node-param">
<label>Ratio: <span id="op2ratio-${nodeId}">2.0</span></label>
<input type="range" data-node="${nodeId}" data-param="3" min="0.25" max="16" value="2.0" step="0.25">
</div>
<div class="node-param">
<label>Level: <span id="op2level-${nodeId}">0.8</span></label>
<input type="range" data-node="${nodeId}" data-param="4" min="0" max="1" value="0.8" step="0.01">
</div>
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Operator 3</div>
<div class="node-param">
<label>Ratio: <span id="op3ratio-${nodeId}">3.0</span></label>
<input type="range" data-node="${nodeId}" data-param="5" min="0.25" max="16" value="3.0" step="0.25">
</div>
<div class="node-param">
<label>Level: <span id="op3level-${nodeId}">0.6</span></label>
<input type="range" data-node="${nodeId}" data-param="6" min="0" max="1" value="0.6" step="0.01">
</div>
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Operator 4</div>
<div class="node-param">
<label>Ratio: <span id="op4ratio-${nodeId}">4.0</span></label>
<input type="range" data-node="${nodeId}" data-param="7" min="0.25" max="16" value="4.0" step="0.25">
</div>
<div class="node-param">
<label>Level: <span id="op4level-${nodeId}">0.4</span></label>
<input type="range" data-node="${nodeId}" data-param="8" min="0" max="1" value="0.4" step="0.01">
</div>
</div>
`
},
WavetableOscillator: {
name: 'Wavetable',
category: NodeCategory.GENERATOR,
description: 'Wavetable oscillator with preset waveforms',
inputs: [
{ name: 'V/Oct', type: SignalType.CV, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'wavetable', label: 'Wavetable', min: 0, max: 7, default: 0, unit: '' },
{ id: 1, name: 'fine_tune', label: 'Fine Tune', min: -1, max: 1, default: 0, unit: '' },
{ id: 2, name: 'position', label: 'Position', min: 0, max: 1, default: 0, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Wavetable</div>
<div class="node-param">
<label>Waveform: <span id="wavetable-${nodeId}">Sine</span></label>
<select data-node="${nodeId}" data-param="0" style="width: 100%; padding: 2px;">
<option value="0">Sine</option>
<option value="1">Saw</option>
<option value="2">Square</option>
<option value="3">Triangle</option>
<option value="4">PWM</option>
<option value="5">Harmonic</option>
<option value="6">Inharmonic</option>
<option value="7">Digital</option>
</select>
</div>
<div class="node-param">
<label>Fine: <span id="finetune-${nodeId}">0.00</span></label>
<input type="range" data-node="${nodeId}" data-param="1" min="-1" max="1" value="0" step="0.01">
</div>
<div class="node-param">
<label>Position: <span id="position-${nodeId}">0.00</span></label>
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0" step="0.01">
</div>
</div>
`
},
SimpleSampler: {
name: 'Sampler',
category: NodeCategory.GENERATOR,
description: 'Simple sample playback with pitch shifting',
inputs: [
{ name: 'V/Oct', type: SignalType.CV, index: 0 },
{ name: 'Gate', type: SignalType.CV, index: 1 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'gain', label: 'Gain', min: 0, max: 2, default: 1.0, unit: '' },
{ id: 1, name: 'loop', label: 'Loop', min: 0, max: 1, default: 0, unit: '' },
{ id: 2, name: 'pitch_shift', label: 'Pitch Shift', min: -12, max: 12, default: 0, unit: 'semi' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Sampler</div>
<div class="node-param">
<label>Gain: <span id="gain-${nodeId}">1.00</span></label>
<input type="range" data-node="${nodeId}" data-param="0" min="0" max="2" value="1.0" step="0.01">
</div>
<div class="node-param">
<label>Loop: <span id="loop-${nodeId}">Off</span></label>
<input type="checkbox" class="node-checkbox" data-node="${nodeId}" data-param="1">
</div>
<div class="node-param">
<label>Pitch: <span id="pitch-${nodeId}">0</span> semi</label>
<input type="range" data-node="${nodeId}" data-param="2" min="-12" max="12" value="0" step="1">
</div>
<div class="node-param" style="margin-top: 4px;">
<button class="load-sample-btn" data-node="${nodeId}" style="width: 100%; padding: 4px; font-size: 10px;">Load Sample</button>
</div>
<div id="sample-info-${nodeId}" style="font-size: 9px; color: #888; margin-top: 2px; text-align: center;">No sample loaded</div>
</div>
`
},
MultiSampler: {
name: 'Multi Sampler',
category: NodeCategory.GENERATOR,
description: 'Multi-sample instrument with velocity layers and key zones',
inputs: [
{ name: 'MIDI In', type: SignalType.MIDI, index: 0 }
],
outputs: [
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
],
parameters: [
{ id: 0, name: 'gain', label: 'Gain', min: 0, max: 2, default: 1.0, unit: '' },
{ id: 1, name: 'attack', label: 'Attack', min: 0.001, max: 1, default: 0.01, unit: 's' },
{ id: 2, name: 'release', label: 'Release', min: 0.01, max: 5, default: 0.1, unit: 's' },
{ id: 3, name: 'transpose', label: 'Transpose', min: -24, max: 24, default: 0, unit: 'semi' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">Multi Sampler</div>
<div class="node-param">
<label>Gain: <span id="gain-${nodeId}">1.00</span></label>
<input type="range" data-node="${nodeId}" data-param="0" min="0" max="2" value="1.0" step="0.01">
</div>
<div class="node-param">
<label>Attack: <span id="attack-${nodeId}">0.01</span>s</label>
<input type="range" data-node="${nodeId}" data-param="1" min="0.001" max="1" value="0.01" step="0.001">
</div>
<div class="node-param">
<label>Release: <span id="release-${nodeId}">0.10</span>s</label>
<input type="range" data-node="${nodeId}" data-param="2" min="0.01" max="5" value="0.1" step="0.01">
</div>
<div class="node-param">
<label>Transpose: <span id="transpose-${nodeId}">0</span> semi</label>
<input type="range" data-node="${nodeId}" data-param="3" min="-24" max="24" value="0" step="1">
</div>
<div class="node-param" style="margin-top: 4px;">
<button class="add-layer-btn" data-node="${nodeId}" style="width: 100%; padding: 4px; font-size: 10px;">Add Sample Layer</button>
</div>
<div id="sample-layers-container-${nodeId}" class="sample-layers-container">
<table id="sample-layers-table-${nodeId}" class="sample-layers-table">
<thead>
<tr>
<th>File</th>
<th>Range</th>
<th>Root</th>
<th>Vel</th>
<th></th>
</tr>
</thead>
<tbody id="sample-layers-list-${nodeId}">
<tr><td colspan="5" class="sample-layers-empty">No layers loaded</td></tr>
</tbody>
</table>
</div> </div>
</div> </div>
` `

229
src/startscreen.js Normal file
View File

@ -0,0 +1,229 @@
const { basename, dirname, join } = window.__TAURI__.path;
let startScreenContainer;
let onProjectStartCallback;
/**
* Creates the start screen UI
* @param {Function} callback - Called when user selects a project type or opens a file
* callback receives: { type: 'new'|'reopen'|'recent', projectFocus?: string, filePath?: string, width?: number, height?: number, fps?: number }
*/
export function createStartScreen(callback) {
onProjectStartCallback = callback;
startScreenContainer = document.createElement('div');
startScreenContainer.id = 'startScreen';
startScreenContainer.className = 'start-screen';
// Create welcome title
const title = document.createElement('h1');
title.textContent = 'Welcome to Lightningbeam!';
title.className = 'start-screen-title';
startScreenContainer.appendChild(title);
// Create main content container
const contentContainer = document.createElement('div');
contentContainer.className = 'start-screen-content';
startScreenContainer.appendChild(contentContainer);
// Left panel - Recent files
const leftPanel = createLeftPanel();
contentContainer.appendChild(leftPanel);
// Right panel - New project
const rightPanel = createRightPanel();
contentContainer.appendChild(rightPanel);
document.body.appendChild(startScreenContainer);
}
function createLeftPanel() {
const leftPanel = document.createElement('div');
leftPanel.className = 'start-screen-left-panel';
// Reopen last session section
const reopenSection = document.createElement('div');
reopenSection.className = 'start-screen-section';
const reopenTitle = document.createElement('h3');
reopenTitle.textContent = 'Reopen last session';
reopenTitle.className = 'start-screen-section-title';
reopenSection.appendChild(reopenTitle);
const lastSessionDiv = document.createElement('div');
lastSessionDiv.id = 'lastSessionFile';
lastSessionDiv.className = 'start-screen-file-item';
lastSessionDiv.textContent = 'No recent session';
reopenSection.appendChild(lastSessionDiv);
leftPanel.appendChild(reopenSection);
// Recent projects section
const recentSection = document.createElement('div');
recentSection.className = 'start-screen-section';
const recentTitle = document.createElement('h3');
recentTitle.textContent = 'Recent projects';
recentTitle.className = 'start-screen-section-title';
recentSection.appendChild(recentTitle);
const recentList = document.createElement('ul');
recentList.id = 'recentProjectsList';
recentList.className = 'start-screen-recent-list';
recentSection.appendChild(recentList);
leftPanel.appendChild(recentSection);
return leftPanel;
}
function createRightPanel() {
const rightPanel = document.createElement('div');
rightPanel.className = 'start-screen-right-panel';
const heading = document.createElement('h2');
heading.textContent = 'Create a new project';
heading.className = 'start-screen-heading';
rightPanel.appendChild(heading);
// Project focus options container
const focusContainer = document.createElement('div');
focusContainer.className = 'start-screen-focus-grid';
const focusTypes = [
{
name: 'Animation',
value: 'animation',
iconSvg: '<svg width="80" height="80" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg"><path d="M20,80 Q30,60 50,50 T80,20" stroke="currentColor" stroke-width="4" fill="none" stroke-linecap="round"/><circle cx="30" cy="70" r="8" fill="currentColor"/></svg>',
description: 'Drawing tools and timeline'
},
{
name: 'Music',
value: 'audioDaw',
iconSvg: '<svg width="80" height="80" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg"><rect x="10" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/><rect x="30" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/><rect x="50" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/><rect x="70" y="20" width="18" height="60" fill="none" stroke="currentColor" stroke-width="3"/><rect x="24" y="20" width="12" height="35" fill="currentColor"/><rect x="44" y="20" width="12" height="35" fill="currentColor"/><rect x="74" y="20" width="12" height="35" fill="currentColor"/></svg>',
description: 'Audio tracks and mixer'
},
{
name: 'Video editing',
value: 'videoEditing',
iconSvg: '<svg width="80" height="80" viewBox="0 0 100 100" xmlns="http://www.w3.org/2000/svg"><rect x="15" y="40" width="70" height="45" fill="currentColor" rx="4"/><rect x="15" y="25" width="70" height="15" fill="none" stroke="currentColor" stroke-width="3" rx="4"/><rect x="20" y="25" width="7" height="15" fill="currentColor"/><rect x="35" y="25" width="7" height="15" fill="currentColor"/><rect x="50" y="25" width="7" height="15" fill="currentColor"/><rect x="65" y="25" width="7" height="15" fill="currentColor"/></svg>',
description: 'Clip timeline and effects'
}
];
focusTypes.forEach(focus => {
const focusCard = createFocusCard(focus);
focusContainer.appendChild(focusCard);
});
rightPanel.appendChild(focusContainer);
return rightPanel;
}
function createFocusCard(focus) {
const card = document.createElement('div');
card.className = 'focus-card';
// Icon container
const iconContainer = document.createElement('div');
iconContainer.className = 'focus-card-icon-container';
const iconWrapper = document.createElement('div');
iconWrapper.className = 'focus-card-icon';
iconWrapper.innerHTML = focus.iconSvg;
iconContainer.appendChild(iconWrapper);
card.appendChild(iconContainer);
// Label
const label = document.createElement('div');
label.textContent = focus.name;
label.className = 'focus-card-label';
card.appendChild(label);
// Click handler
card.addEventListener('click', () => {
onProjectStartCallback({
type: 'new',
projectFocus: focus.value,
width: 800,
height: 600,
fps: 24
});
});
return card;
}
/**
* Updates the recent files list and last session
*/
export async function updateStartScreen(config) {
if (!startScreenContainer) return;
// Update last session
const lastSessionDiv = document.getElementById('lastSessionFile');
if (lastSessionDiv) {
if (config.recentFiles && config.recentFiles.length > 0) {
const lastFile = config.recentFiles[0];
const filename = await basename(lastFile);
lastSessionDiv.textContent = filename;
lastSessionDiv.onclick = () => {
onProjectStartCallback({
type: 'reopen',
filePath: lastFile
});
};
lastSessionDiv.classList.add('clickable');
} else {
lastSessionDiv.textContent = 'No recent session';
lastSessionDiv.classList.remove('clickable');
lastSessionDiv.onclick = null;
}
}
// Update recent projects list
const recentList = document.getElementById('recentProjectsList');
if (recentList) {
recentList.innerHTML = '';
if (config.recentFiles && config.recentFiles.length > 1) {
// Show up to 4 recent files (excluding the most recent which is shown as last session)
const recentFiles = config.recentFiles.slice(1, 5);
for (const filePath of recentFiles) {
const filename = await basename(filePath);
const listItem = document.createElement('li');
listItem.textContent = filename;
listItem.className = 'start-screen-file-item clickable';
listItem.onclick = () => {
onProjectStartCallback({
type: 'recent',
filePath: filePath
});
};
recentList.appendChild(listItem);
}
}
}
}
/**
* Shows the start screen
*/
export function showStartScreen() {
if (startScreenContainer) {
startScreenContainer.style.display = 'flex';
}
}
/**
* Hides the start screen
*/
export function hideStartScreen() {
if (startScreenContainer) {
startScreenContainer.style.display = 'none';
}
}

File diff suppressed because it is too large Load Diff