From 2e9699b524ede8bd48f413f102dd794a9d126c76 Mon Sep 17 00:00:00 2001 From: Skyler Lehmkuhl Date: Tue, 28 Oct 2025 01:32:51 -0400 Subject: [PATCH] Add sampler nodes and startup screen --- daw-backend/Cargo.lock | 7 + daw-backend/Cargo.toml | 1 + daw-backend/src/audio/engine.rs | 128 ++++ daw-backend/src/audio/mod.rs | 2 + daw-backend/src/audio/node_graph/graph.rs | 224 +++++- .../src/audio/node_graph/nodes/chorus.rs | 234 ++++++ .../src/audio/node_graph/nodes/delay.rs | 211 ++++++ .../src/audio/node_graph/nodes/flanger.rs | 243 ++++++ .../src/audio/node_graph/nodes/fm_synth.rs | 303 ++++++++ .../src/audio/node_graph/nodes/midi_to_cv.rs | 13 +- daw-backend/src/audio/node_graph/nodes/mod.rs | 16 + .../audio/node_graph/nodes/multi_sampler.rs | 566 ++++++++++++++ .../src/audio/node_graph/nodes/oscillator.rs | 13 +- .../src/audio/node_graph/nodes/reverb.rs | 313 ++++++++ .../audio/node_graph/nodes/simple_sampler.rs | 278 +++++++ .../node_graph/nodes/wavetable_oscillator.rs | 286 ++++++++ daw-backend/src/audio/node_graph/preset.rs | 43 ++ daw-backend/src/audio/sample_loader.rs | 316 ++++++++ daw-backend/src/command/types.rs | 9 + src-tauri/Cargo.lock | 1 + src-tauri/src/audio.rs | 187 +++++ src-tauri/src/lib.rs | 5 + src/assets/focus-animation.svg | 5 + src/assets/focus-music.svg | 10 + src/assets/focus-video.svg | 9 + src/main.js | 447 ++++++++++- src/models/graphics-object.js | 29 +- src/nodeTypes.js | 407 +++++++++- src/startscreen.js | 229 ++++++ src/styles.css | 692 ++++++++++++++---- 30 files changed, 5012 insertions(+), 215 deletions(-) create mode 100644 daw-backend/src/audio/node_graph/nodes/chorus.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/delay.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/flanger.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/fm_synth.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/multi_sampler.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/reverb.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/simple_sampler.rs create mode 100644 daw-backend/src/audio/node_graph/nodes/wavetable_oscillator.rs create mode 100644 daw-backend/src/audio/sample_loader.rs create mode 100644 src/assets/focus-animation.svg create mode 100644 src/assets/focus-music.svg create mode 100644 src/assets/focus-video.svg create mode 100644 src/startscreen.js diff --git a/daw-backend/Cargo.lock b/daw-backend/Cargo.lock index 85561ce..df5b73b 100644 --- a/daw-backend/Cargo.lock +++ b/daw-backend/Cargo.lock @@ -51,6 +51,12 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "bindgen" version = "0.72.1" @@ -398,6 +404,7 @@ dependencies = [ name = "daw-backend" version = "0.1.0" dependencies = [ + "base64", "cpal", "crossterm", "dasp_envelope", diff --git a/daw-backend/Cargo.toml b/daw-backend/Cargo.toml index 6e33395..7d4a1a5 100644 --- a/daw-backend/Cargo.toml +++ b/daw-backend/Cargo.toml @@ -12,6 +12,7 @@ serde = { version = "1.0", features = ["derive"] } ratatui = "0.26" crossterm = "0.27" rand = "0.8" +base64 = "0.22" # Node-based audio graph dependencies dasp_graph = "0.11" diff --git a/daw-backend/src/audio/engine.rs b/daw-backend/src/audio/engine.rs index 0201f68..4eab893 100644 --- a/daw-backend/src/audio/engine.rs +++ b/daw-backend/src/audio/engine.rs @@ -743,6 +743,14 @@ impl Engine { "NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())), "Splitter" => Box::new(SplitterNode::new("Splitter".to_string())), "Pan" => Box::new(PanNode::new("Pan".to_string())), + "Delay" => Box::new(DelayNode::new("Delay".to_string())), + "Reverb" => Box::new(ReverbNode::new("Reverb".to_string())), + "Chorus" => Box::new(ChorusNode::new("Chorus".to_string())), + "Flanger" => Box::new(FlangerNode::new("Flanger".to_string())), + "FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())), + "WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())), + "SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler".to_string())), + "MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler".to_string())), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())), @@ -794,6 +802,14 @@ impl Engine { "NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise".to_string())), "Splitter" => Box::new(SplitterNode::new("Splitter".to_string())), "Pan" => Box::new(PanNode::new("Pan".to_string())), + "Delay" => Box::new(DelayNode::new("Delay".to_string())), + "Reverb" => Box::new(ReverbNode::new("Reverb".to_string())), + "Chorus" => Box::new(ChorusNode::new("Chorus".to_string())), + "Flanger" => Box::new(FlangerNode::new("Flanger".to_string())), + "FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())), + "WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())), + "SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler".to_string())), + "MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler".to_string())), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input".to_string())), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV".to_string())), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV".to_string())), @@ -1013,6 +1029,98 @@ impl Engine { } } } + + Command::SamplerLoadSample(track_id, node_id, file_path) => { + use crate::audio::node_graph::nodes::SimpleSamplerNode; + + if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { + if let Some(ref mut graph) = track.instrument_graph { + let node_idx = NodeIndex::new(node_id as usize); + + if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { + // Downcast to SimpleSamplerNode + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut SimpleSamplerNode; + + unsafe { + let sampler_node = &mut *node_ptr; + if let Err(e) = sampler_node.load_sample_from_file(&file_path) { + eprintln!("Failed to load sample: {}", e); + } + } + } + } + } + } + + Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max) => { + use crate::audio::node_graph::nodes::MultiSamplerNode; + + if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { + if let Some(ref mut graph) = track.instrument_graph { + let node_idx = NodeIndex::new(node_id as usize); + + if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { + // Downcast to MultiSamplerNode + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut MultiSamplerNode; + + unsafe { + let multi_sampler_node = &mut *node_ptr; + if let Err(e) = multi_sampler_node.load_layer_from_file(&file_path, key_min, key_max, root_key, velocity_min, velocity_max) { + eprintln!("Failed to add sample layer: {}", e); + } + } + } + } + } + } + + Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max) => { + use crate::audio::node_graph::nodes::MultiSamplerNode; + + if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { + if let Some(ref mut graph) = track.instrument_graph { + let node_idx = NodeIndex::new(node_id as usize); + + if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { + // Downcast to MultiSamplerNode + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut MultiSamplerNode; + + unsafe { + let multi_sampler_node = &mut *node_ptr; + if let Err(e) = multi_sampler_node.update_layer(layer_index, key_min, key_max, root_key, velocity_min, velocity_max) { + eprintln!("Failed to update sample layer: {}", e); + } + } + } + } + } + } + + Command::MultiSamplerRemoveLayer(track_id, node_id, layer_index) => { + use crate::audio::node_graph::nodes::MultiSamplerNode; + + if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) { + if let Some(ref mut graph) = track.instrument_graph { + let node_idx = NodeIndex::new(node_id as usize); + + if let Some(graph_node) = graph.get_graph_node_mut(node_idx) { + // Downcast to MultiSamplerNode + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut MultiSamplerNode; + + unsafe { + let multi_sampler_node = &mut *node_ptr; + if let Err(e) = multi_sampler_node.remove_layer(layer_index) { + eprintln!("Failed to remove sample layer: {}", e); + } + } + } + } + } + } } } @@ -1498,4 +1606,24 @@ impl EngineController { pub fn graph_save_template_preset(&mut self, track_id: TrackId, voice_allocator_id: u32, preset_path: String, preset_name: String) { let _ = self.command_tx.push(Command::GraphSaveTemplatePreset(track_id, voice_allocator_id, preset_path, preset_name)); } + + /// Load a sample into a SimpleSampler node + pub fn sampler_load_sample(&mut self, track_id: TrackId, node_id: u32, file_path: String) { + let _ = self.command_tx.push(Command::SamplerLoadSample(track_id, node_id, file_path)); + } + + /// Add a sample layer to a MultiSampler node + pub fn multi_sampler_add_layer(&mut self, track_id: TrackId, node_id: u32, file_path: String, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8) { + let _ = self.command_tx.push(Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max)); + } + + /// Update a MultiSampler layer's configuration + pub fn multi_sampler_update_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8) { + let _ = self.command_tx.push(Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max)); + } + + /// Remove a layer from a MultiSampler node + pub fn multi_sampler_remove_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize) { + let _ = self.command_tx.push(Command::MultiSamplerRemoveLayer(track_id, node_id, layer_index)); + } } diff --git a/daw-backend/src/audio/mod.rs b/daw-backend/src/audio/mod.rs index 8c7b6bc..a39620b 100644 --- a/daw-backend/src/audio/mod.rs +++ b/daw-backend/src/audio/mod.rs @@ -7,6 +7,7 @@ pub mod node_graph; pub mod pool; pub mod project; pub mod recording; +pub mod sample_loader; pub mod track; pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId}; @@ -17,4 +18,5 @@ pub use midi::{MidiClip, MidiClipId, MidiEvent}; pub use pool::{AudioFile as PoolAudioFile, AudioPool}; pub use project::Project; pub use recording::RecordingState; +pub use sample_loader::{load_audio_file, SampleData}; pub use track::{AudioTrack, Metatrack, MidiTrack, RenderContext, Track, TrackId, TrackNode}; diff --git a/daw-backend/src/audio/node_graph/graph.rs b/daw-backend/src/audio/node_graph/graph.rs index 031244d..866053f 100644 --- a/daw-backend/src/audio/node_graph/graph.rs +++ b/daw-backend/src/audio/node_graph/graph.rs @@ -107,6 +107,11 @@ impl InstrumentGraph { self.graph.add_node(graph_node) } + /// Get the number of nodes in the graph + pub fn node_count(&self) -> usize { + self.graph.node_count() + } + /// Set the UI position for a node pub fn set_node_position(&mut self, node: NodeIndex, x: f32, y: f32) { self.node_positions.insert(node.index() as u32, (x, y)); @@ -125,13 +130,10 @@ impl InstrumentGraph { to: NodeIndex, to_port: usize, ) -> Result<(), ConnectionError> { - eprintln!("[GRAPH] connect() called: {:?} port {} -> {:?} port {}", from, from_port, to, to_port); - // Check if this exact connection already exists if let Some(edge_idx) = self.graph.find_edge(from, to) { let existing_conn = &self.graph[edge_idx]; if existing_conn.from_port == from_port && existing_conn.to_port == to_port { - eprintln!("[GRAPH] Connection already exists, skipping duplicate"); return Ok(()); // Connection already exists, don't create duplicate } } @@ -321,11 +323,6 @@ impl InstrumentGraph { // Use the requested output buffer size for processing let process_size = output_buffer.len(); - if process_size > self.buffer_size * 2 { - eprintln!("[GRAPH] WARNING: process_size {} > allocated buffer_size {} * 2", - process_size, self.buffer_size); - } - // Clear all output buffers (audio/CV and MIDI) for node in self.graph.node_weights_mut() { for buffer in &mut node.output_buffers { @@ -609,6 +606,113 @@ impl InstrumentGraph { } } + // For SimpleSampler nodes, serialize the loaded sample + if node.node_type() == "SimpleSampler" { + use crate::audio::node_graph::nodes::SimpleSamplerNode; + use crate::audio::node_graph::preset::{EmbeddedSampleData, SampleData}; + use base64::{Engine as _, engine::general_purpose}; + + let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *const SimpleSamplerNode; + unsafe { + let sampler_node = &*node_ptr; + if let Some(sample_path) = sampler_node.get_sample_path() { + // Check file size + let should_embed = std::fs::metadata(sample_path) + .map(|m| m.len() < 100_000) // < 100KB + .unwrap_or(false); + + if should_embed { + // Embed the sample data + let (sample_data, sample_rate) = sampler_node.get_sample_data_for_embedding(); + + // Convert f32 samples to bytes + let bytes: Vec = sample_data + .iter() + .flat_map(|&f| f.to_le_bytes()) + .collect(); + + // Encode to base64 + let data_base64 = general_purpose::STANDARD.encode(&bytes); + + serialized.sample_data = Some(SampleData::SimpleSampler { + file_path: Some(sample_path.to_string()), + embedded_data: Some(EmbeddedSampleData { + data_base64, + sample_rate: sample_rate as u32, + }), + }); + } else { + // Just save the file path + serialized.sample_data = Some(SampleData::SimpleSampler { + file_path: Some(sample_path.to_string()), + embedded_data: None, + }); + } + } + } + } + + // For MultiSampler nodes, serialize all loaded layers + if node.node_type() == "MultiSampler" { + use crate::audio::node_graph::nodes::MultiSamplerNode; + use crate::audio::node_graph::preset::{EmbeddedSampleData, LayerData, SampleData}; + use base64::{Engine as _, engine::general_purpose}; + + let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *const MultiSamplerNode; + unsafe { + let multi_sampler_node = &*node_ptr; + let layers_info = multi_sampler_node.get_layers_info(); + if !layers_info.is_empty() { + let layers: Vec = layers_info + .iter() + .enumerate() + .map(|(layer_index, info)| { + // Check if we should embed this layer + let should_embed = std::fs::metadata(&info.file_path) + .map(|m| m.len() < 100_000) // < 100KB + .unwrap_or(false); + + let embedded_data = if should_embed { + // Get the sample data for this layer + if let Some((sample_data, sample_rate)) = multi_sampler_node.get_layer_data(layer_index) { + // Convert f32 samples to bytes + let bytes: Vec = sample_data + .iter() + .flat_map(|&f| f.to_le_bytes()) + .collect(); + + // Encode to base64 + let data_base64 = general_purpose::STANDARD.encode(&bytes); + + Some(EmbeddedSampleData { + data_base64, + sample_rate: sample_rate as u32, + }) + } else { + None + } + } else { + None + }; + + LayerData { + file_path: Some(info.file_path.clone()), + embedded_data, + key_min: info.key_min, + key_max: info.key_max, + root_key: info.root_key, + velocity_min: info.velocity_min, + velocity_max: info.velocity_max, + } + }) + .collect(); + serialized.sample_data = Some(SampleData::MultiSampler { layers }); + } + } + } + // Save position if available if let Some(pos) = self.get_node_position(node_idx) { serialized.set_position(pos.0, pos.1); @@ -659,6 +763,18 @@ impl InstrumentGraph { "Mixer" => Box::new(MixerNode::new("Mixer")), "Filter" => Box::new(FilterNode::new("Filter")), "ADSR" => Box::new(ADSRNode::new("ADSR")), + "LFO" => Box::new(LFONode::new("LFO")), + "NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise")), + "Splitter" => Box::new(SplitterNode::new("Splitter")), + "Pan" => Box::new(PanNode::new("Pan")), + "Delay" => Box::new(DelayNode::new("Delay")), + "Reverb" => Box::new(ReverbNode::new("Reverb")), + "Chorus" => Box::new(ChorusNode::new("Chorus")), + "Flanger" => Box::new(FlangerNode::new("Flanger")), + "FMSynth" => Box::new(FMSynthNode::new("FM Synth")), + "WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable")), + "SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler")), + "MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler")), "MidiInput" => Box::new(MidiInputNode::new("MIDI Input")), "MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")), "AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")), @@ -686,45 +802,121 @@ impl InstrumentGraph { index_map.insert(serialized_node.id, node_idx); // Set parameters - eprintln!("[PRESET] Node {}: type={}, params={:?}", serialized_node.id, serialized_node.node_type, serialized_node.parameters); for (¶m_id, &value) in &serialized_node.parameters { if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { - eprintln!("[PRESET] Setting param {} = {}", param_id, value); graph_node.node.set_parameter(param_id, value); } } + // Restore sample data for sampler nodes + if let Some(ref sample_data) = serialized_node.sample_data { + match sample_data { + crate::audio::node_graph::preset::SampleData::SimpleSampler { file_path, embedded_data } => { + // Load sample into SimpleSampler + if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut SimpleSamplerNode; + unsafe { + let sampler_node = &mut *node_ptr; + + // Try embedded data first, then fall back to file path + if let Some(ref embedded) = embedded_data { + use base64::{Engine as _, engine::general_purpose}; + + // Decode base64 + if let Ok(bytes) = general_purpose::STANDARD.decode(&embedded.data_base64) { + // Convert bytes back to f32 samples + let samples: Vec = bytes + .chunks_exact(4) + .map(|chunk| { + f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]) + }) + .collect(); + + sampler_node.set_sample(samples, embedded.sample_rate as f32); + } + } else if let Some(ref path) = file_path { + // Fall back to loading from file + let _ = sampler_node.load_sample_from_file(path); + } + } + } + } + crate::audio::node_graph::preset::SampleData::MultiSampler { layers } => { + // Load layers into MultiSampler + if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) { + let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode; + let node_ptr = node_ptr as *mut MultiSamplerNode; + unsafe { + let multi_sampler_node = &mut *node_ptr; + for layer in layers { + // Try embedded data first, then fall back to file path + if let Some(ref embedded) = layer.embedded_data { + use base64::{Engine as _, engine::general_purpose}; + + // Decode base64 + if let Ok(bytes) = general_purpose::STANDARD.decode(&embedded.data_base64) { + // Convert bytes back to f32 samples + let samples: Vec = bytes + .chunks_exact(4) + .map(|chunk| { + f32::from_le_bytes([chunk[0], chunk[1], chunk[2], chunk[3]]) + }) + .collect(); + + multi_sampler_node.add_layer( + samples, + embedded.sample_rate as f32, + layer.key_min, + layer.key_max, + layer.root_key, + layer.velocity_min, + layer.velocity_max, + ); + } + } else if let Some(ref path) = layer.file_path { + // Fall back to loading from file + let _ = multi_sampler_node.load_layer_from_file( + path, + layer.key_min, + layer.key_max, + layer.root_key, + layer.velocity_min, + layer.velocity_max, + ); + } + } + } + } + } + } + } + // Restore position graph.set_node_position(node_idx, serialized_node.position.0, serialized_node.position.1); } // Create connections - eprintln!("[PRESET] Creating {} connections", preset.connections.len()); for conn in &preset.connections { let from_idx = index_map.get(&conn.from_node) .ok_or_else(|| format!("Connection from unknown node {}", conn.from_node))?; let to_idx = index_map.get(&conn.to_node) .ok_or_else(|| format!("Connection to unknown node {}", conn.to_node))?; - eprintln!("[PRESET] Connecting: node {} port {} -> node {} port {}", conn.from_node, conn.from_port, conn.to_node, conn.to_port); graph.connect(*from_idx, conn.from_port, *to_idx, conn.to_port) .map_err(|e| format!("Failed to connect nodes: {:?}", e))?; } // Set MIDI targets - eprintln!("[PRESET] Setting MIDI targets: {:?}", preset.midi_targets); for &target_id in &preset.midi_targets { if let Some(&target_idx) = index_map.get(&target_id) { - eprintln!("[PRESET] MIDI target: node {} -> index {:?}", target_id, target_idx); graph.set_midi_target(target_idx, true); } } // Set output node - eprintln!("[PRESET] Setting output node: {:?}", preset.output_node); if let Some(output_id) = preset.output_node { if let Some(&output_idx) = index_map.get(&output_id) { - eprintln!("[PRESET] Output node: {} -> index {:?}", output_id, output_idx); graph.output_node = Some(output_idx); } } diff --git a/daw-backend/src/audio/node_graph/nodes/chorus.rs b/daw-backend/src/audio/node_graph/nodes/chorus.rs new file mode 100644 index 0000000..f86f915 --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/chorus.rs @@ -0,0 +1,234 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; +use std::f32::consts::PI; + +const PARAM_RATE: u32 = 0; +const PARAM_DEPTH: u32 = 1; +const PARAM_WET_DRY: u32 = 2; + +const MAX_DELAY_MS: f32 = 50.0; +const BASE_DELAY_MS: f32 = 15.0; + +/// Chorus effect using modulated delay lines +pub struct ChorusNode { + name: String, + rate: f32, // LFO rate in Hz (0.1 to 5 Hz) + depth: f32, // Modulation depth 0.0 to 1.0 + wet_dry: f32, // 0.0 = dry only, 1.0 = wet only + + // Delay buffers for left and right channels + delay_buffer_left: Vec, + delay_buffer_right: Vec, + write_position: usize, + max_delay_samples: usize, + sample_rate: u32, + + // LFO state + lfo_phase: f32, + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl ChorusNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("Audio In", SignalType::Audio, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_RATE, "Rate", 0.1, 5.0, 1.0, ParameterUnit::Frequency), + Parameter::new(PARAM_DEPTH, "Depth", 0.0, 1.0, 0.5, ParameterUnit::Generic), + Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic), + ]; + + // Allocate max delay buffer size + let max_delay_samples = ((MAX_DELAY_MS / 1000.0) * 48000.0) as usize; + + Self { + name, + rate: 1.0, + depth: 0.5, + wet_dry: 0.5, + delay_buffer_left: vec![0.0; max_delay_samples], + delay_buffer_right: vec![0.0; max_delay_samples], + write_position: 0, + max_delay_samples, + sample_rate: 48000, + lfo_phase: 0.0, + inputs, + outputs, + parameters, + } + } + + fn read_interpolated_sample(&self, buffer: &[f32], delay_samples: f32) -> f32 { + // Linear interpolation for smooth delay modulation + let delay_samples = delay_samples.clamp(0.0, (self.max_delay_samples - 1) as f32); + + let read_pos_float = self.write_position as f32 - delay_samples; + let read_pos_float = if read_pos_float < 0.0 { + read_pos_float + self.max_delay_samples as f32 + } else { + read_pos_float + }; + + let read_pos_int = read_pos_float.floor() as usize; + let frac = read_pos_float - read_pos_int as f32; + + let sample1 = buffer[read_pos_int % self.max_delay_samples]; + let sample2 = buffer[(read_pos_int + 1) % self.max_delay_samples]; + + sample1 * (1.0 - frac) + sample2 * frac + } +} + +impl AudioNode for ChorusNode { + fn category(&self) -> NodeCategory { + NodeCategory::Effect + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_RATE => { + self.rate = value.clamp(0.1, 5.0); + } + PARAM_DEPTH => { + self.depth = value.clamp(0.0, 1.0); + } + PARAM_WET_DRY => { + self.wet_dry = value.clamp(0.0, 1.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_RATE => self.rate, + PARAM_DEPTH => self.depth, + PARAM_WET_DRY => self.wet_dry, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if inputs.is_empty() || outputs.is_empty() { + return; + } + + // Update sample rate if changed + if self.sample_rate != sample_rate { + self.sample_rate = sample_rate; + self.max_delay_samples = ((MAX_DELAY_MS / 1000.0) * sample_rate as f32) as usize; + self.delay_buffer_left.resize(self.max_delay_samples, 0.0); + self.delay_buffer_right.resize(self.max_delay_samples, 0.0); + self.write_position = 0; + } + + let input = inputs[0]; + let output = &mut outputs[0]; + + // Audio signals are stereo (interleaved L/R) + let frames = input.len() / 2; + let output_frames = output.len() / 2; + let frames_to_process = frames.min(output_frames); + + let dry_gain = 1.0 - self.wet_dry; + let wet_gain = self.wet_dry; + + let base_delay_samples = (BASE_DELAY_MS / 1000.0) * self.sample_rate as f32; + let max_modulation_samples = (MAX_DELAY_MS - BASE_DELAY_MS) / 1000.0 * self.sample_rate as f32; + + for frame in 0..frames_to_process { + let left_in = input[frame * 2]; + let right_in = input[frame * 2 + 1]; + + // Generate LFO value (sine wave, 0 to 1) + let lfo_value = ((self.lfo_phase * 2.0 * PI).sin() * 0.5 + 0.5) * self.depth; + + // Calculate modulated delay time + let delay_samples = base_delay_samples + lfo_value * max_modulation_samples; + + // Read delayed samples with interpolation + let left_delayed = self.read_interpolated_sample(&self.delay_buffer_left, delay_samples); + let right_delayed = self.read_interpolated_sample(&self.delay_buffer_right, delay_samples); + + // Mix dry and wet signals + output[frame * 2] = left_in * dry_gain + left_delayed * wet_gain; + output[frame * 2 + 1] = right_in * dry_gain + right_delayed * wet_gain; + + // Write to delay buffer + self.delay_buffer_left[self.write_position] = left_in; + self.delay_buffer_right[self.write_position] = right_in; + + // Advance write position + self.write_position = (self.write_position + 1) % self.max_delay_samples; + + // Advance LFO phase + self.lfo_phase += self.rate / self.sample_rate as f32; + if self.lfo_phase >= 1.0 { + self.lfo_phase -= 1.0; + } + } + } + + fn reset(&mut self) { + self.delay_buffer_left.fill(0.0); + self.delay_buffer_right.fill(0.0); + self.write_position = 0; + self.lfo_phase = 0.0; + } + + fn node_type(&self) -> &str { + "Chorus" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self { + name: self.name.clone(), + rate: self.rate, + depth: self.depth, + wet_dry: self.wet_dry, + delay_buffer_left: vec![0.0; self.max_delay_samples], + delay_buffer_right: vec![0.0; self.max_delay_samples], + write_position: 0, + max_delay_samples: self.max_delay_samples, + sample_rate: self.sample_rate, + lfo_phase: 0.0, + inputs: self.inputs.clone(), + outputs: self.outputs.clone(), + parameters: self.parameters.clone(), + }) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/delay.rs b/daw-backend/src/audio/node_graph/nodes/delay.rs new file mode 100644 index 0000000..0787d82 --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/delay.rs @@ -0,0 +1,211 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; + +const PARAM_DELAY_TIME: u32 = 0; +const PARAM_FEEDBACK: u32 = 1; +const PARAM_WET_DRY: u32 = 2; + +const MAX_DELAY_SECONDS: f32 = 2.0; + +/// Stereo delay node with feedback +pub struct DelayNode { + name: String, + delay_time: f32, // seconds + feedback: f32, // 0.0 to 0.95 + wet_dry: f32, // 0.0 = dry only, 1.0 = wet only + + // Delay buffers for left and right channels + delay_buffer_left: Vec, + delay_buffer_right: Vec, + write_position: usize, + max_delay_samples: usize, + sample_rate: u32, + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl DelayNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("Audio In", SignalType::Audio, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_DELAY_TIME, "Delay Time", 0.001, MAX_DELAY_SECONDS, 0.5, ParameterUnit::Time), + Parameter::new(PARAM_FEEDBACK, "Feedback", 0.0, 0.95, 0.5, ParameterUnit::Generic), + Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic), + ]; + + // Allocate max delay buffer size (will be initialized properly when we get sample rate) + let max_delay_samples = (MAX_DELAY_SECONDS * 48000.0) as usize; // Assume max 48kHz + + Self { + name, + delay_time: 0.5, + feedback: 0.5, + wet_dry: 0.5, + delay_buffer_left: vec![0.0; max_delay_samples], + delay_buffer_right: vec![0.0; max_delay_samples], + write_position: 0, + max_delay_samples, + sample_rate: 48000, + inputs, + outputs, + parameters, + } + } + + fn get_delay_samples(&self) -> usize { + (self.delay_time * self.sample_rate as f32) as usize + } + + fn read_delayed_sample(&self, buffer: &[f32], delay_samples: usize) -> f32 { + // Calculate read position (wrap around) + let read_pos = if self.write_position >= delay_samples { + self.write_position - delay_samples + } else { + self.max_delay_samples + self.write_position - delay_samples + }; + + buffer[read_pos % self.max_delay_samples] + } +} + +impl AudioNode for DelayNode { + fn category(&self) -> NodeCategory { + NodeCategory::Effect + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_DELAY_TIME => { + self.delay_time = value.clamp(0.001, MAX_DELAY_SECONDS); + } + PARAM_FEEDBACK => { + self.feedback = value.clamp(0.0, 0.95); + } + PARAM_WET_DRY => { + self.wet_dry = value.clamp(0.0, 1.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_DELAY_TIME => self.delay_time, + PARAM_FEEDBACK => self.feedback, + PARAM_WET_DRY => self.wet_dry, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if inputs.is_empty() || outputs.is_empty() { + return; + } + + // Update sample rate if changed + if self.sample_rate != sample_rate { + self.sample_rate = sample_rate; + self.max_delay_samples = (MAX_DELAY_SECONDS * sample_rate as f32) as usize; + self.delay_buffer_left.resize(self.max_delay_samples, 0.0); + self.delay_buffer_right.resize(self.max_delay_samples, 0.0); + self.write_position = 0; + } + + let input = inputs[0]; + let output = &mut outputs[0]; + + // Audio signals are stereo (interleaved L/R) + let frames = input.len() / 2; + let output_frames = output.len() / 2; + let frames_to_process = frames.min(output_frames); + + let delay_samples = self.get_delay_samples().max(1).min(self.max_delay_samples - 1); + + for frame in 0..frames_to_process { + let left_in = input[frame * 2]; + let right_in = input[frame * 2 + 1]; + + // Read delayed samples + let left_delayed = self.read_delayed_sample(&self.delay_buffer_left, delay_samples); + let right_delayed = self.read_delayed_sample(&self.delay_buffer_right, delay_samples); + + // Mix dry and wet signals + let dry_gain = 1.0 - self.wet_dry; + let wet_gain = self.wet_dry; + + let left_out = left_in * dry_gain + left_delayed * wet_gain; + let right_out = right_in * dry_gain + right_delayed * wet_gain; + + output[frame * 2] = left_out; + output[frame * 2 + 1] = right_out; + + // Write to delay buffer with feedback + self.delay_buffer_left[self.write_position] = left_in + left_delayed * self.feedback; + self.delay_buffer_right[self.write_position] = right_in + right_delayed * self.feedback; + + // Advance write position + self.write_position = (self.write_position + 1) % self.max_delay_samples; + } + } + + fn reset(&mut self) { + self.delay_buffer_left.fill(0.0); + self.delay_buffer_right.fill(0.0); + self.write_position = 0; + } + + fn node_type(&self) -> &str { + "Delay" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self { + name: self.name.clone(), + delay_time: self.delay_time, + feedback: self.feedback, + wet_dry: self.wet_dry, + delay_buffer_left: vec![0.0; self.max_delay_samples], + delay_buffer_right: vec![0.0; self.max_delay_samples], + write_position: 0, + max_delay_samples: self.max_delay_samples, + sample_rate: self.sample_rate, + inputs: self.inputs.clone(), + outputs: self.outputs.clone(), + parameters: self.parameters.clone(), + }) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/flanger.rs b/daw-backend/src/audio/node_graph/nodes/flanger.rs new file mode 100644 index 0000000..26c2dde --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/flanger.rs @@ -0,0 +1,243 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; +use std::f32::consts::PI; + +const PARAM_RATE: u32 = 0; +const PARAM_DEPTH: u32 = 1; +const PARAM_FEEDBACK: u32 = 2; +const PARAM_WET_DRY: u32 = 3; + +const MAX_DELAY_MS: f32 = 10.0; +const BASE_DELAY_MS: f32 = 1.0; + +/// Flanger effect using modulated delay lines with feedback +pub struct FlangerNode { + name: String, + rate: f32, // LFO rate in Hz (0.1 to 10 Hz) + depth: f32, // Modulation depth 0.0 to 1.0 + feedback: f32, // Feedback amount -0.95 to 0.95 + wet_dry: f32, // 0.0 = dry only, 1.0 = wet only + + // Delay buffers for left and right channels + delay_buffer_left: Vec, + delay_buffer_right: Vec, + write_position: usize, + max_delay_samples: usize, + sample_rate: u32, + + // LFO state + lfo_phase: f32, + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl FlangerNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("Audio In", SignalType::Audio, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_RATE, "Rate", 0.1, 10.0, 0.5, ParameterUnit::Frequency), + Parameter::new(PARAM_DEPTH, "Depth", 0.0, 1.0, 0.7, ParameterUnit::Generic), + Parameter::new(PARAM_FEEDBACK, "Feedback", -0.95, 0.95, 0.5, ParameterUnit::Generic), + Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.5, ParameterUnit::Generic), + ]; + + // Allocate max delay buffer size + let max_delay_samples = ((MAX_DELAY_MS / 1000.0) * 48000.0) as usize; + + Self { + name, + rate: 0.5, + depth: 0.7, + feedback: 0.5, + wet_dry: 0.5, + delay_buffer_left: vec![0.0; max_delay_samples], + delay_buffer_right: vec![0.0; max_delay_samples], + write_position: 0, + max_delay_samples, + sample_rate: 48000, + lfo_phase: 0.0, + inputs, + outputs, + parameters, + } + } + + fn read_interpolated_sample(&self, buffer: &[f32], delay_samples: f32) -> f32 { + // Linear interpolation for smooth delay modulation + let delay_samples = delay_samples.clamp(0.0, (self.max_delay_samples - 1) as f32); + + let read_pos_float = self.write_position as f32 - delay_samples; + let read_pos_float = if read_pos_float < 0.0 { + read_pos_float + self.max_delay_samples as f32 + } else { + read_pos_float + }; + + let read_pos_int = read_pos_float.floor() as usize; + let frac = read_pos_float - read_pos_int as f32; + + let sample1 = buffer[read_pos_int % self.max_delay_samples]; + let sample2 = buffer[(read_pos_int + 1) % self.max_delay_samples]; + + sample1 * (1.0 - frac) + sample2 * frac + } +} + +impl AudioNode for FlangerNode { + fn category(&self) -> NodeCategory { + NodeCategory::Effect + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_RATE => { + self.rate = value.clamp(0.1, 10.0); + } + PARAM_DEPTH => { + self.depth = value.clamp(0.0, 1.0); + } + PARAM_FEEDBACK => { + self.feedback = value.clamp(-0.95, 0.95); + } + PARAM_WET_DRY => { + self.wet_dry = value.clamp(0.0, 1.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_RATE => self.rate, + PARAM_DEPTH => self.depth, + PARAM_FEEDBACK => self.feedback, + PARAM_WET_DRY => self.wet_dry, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if inputs.is_empty() || outputs.is_empty() { + return; + } + + // Update sample rate if changed + if self.sample_rate != sample_rate { + self.sample_rate = sample_rate; + self.max_delay_samples = ((MAX_DELAY_MS / 1000.0) * sample_rate as f32) as usize; + self.delay_buffer_left.resize(self.max_delay_samples, 0.0); + self.delay_buffer_right.resize(self.max_delay_samples, 0.0); + self.write_position = 0; + } + + let input = inputs[0]; + let output = &mut outputs[0]; + + // Audio signals are stereo (interleaved L/R) + let frames = input.len() / 2; + let output_frames = output.len() / 2; + let frames_to_process = frames.min(output_frames); + + let dry_gain = 1.0 - self.wet_dry; + let wet_gain = self.wet_dry; + + let base_delay_samples = (BASE_DELAY_MS / 1000.0) * self.sample_rate as f32; + let max_modulation_samples = (MAX_DELAY_MS - BASE_DELAY_MS) / 1000.0 * self.sample_rate as f32; + + for frame in 0..frames_to_process { + let left_in = input[frame * 2]; + let right_in = input[frame * 2 + 1]; + + // Generate LFO value (sine wave, 0 to 1) + let lfo_value = ((self.lfo_phase * 2.0 * PI).sin() * 0.5 + 0.5) * self.depth; + + // Calculate modulated delay time + let delay_samples = base_delay_samples + lfo_value * max_modulation_samples; + + // Read delayed samples with interpolation + let left_delayed = self.read_interpolated_sample(&self.delay_buffer_left, delay_samples); + let right_delayed = self.read_interpolated_sample(&self.delay_buffer_right, delay_samples); + + // Mix dry and wet signals + output[frame * 2] = left_in * dry_gain + left_delayed * wet_gain; + output[frame * 2 + 1] = right_in * dry_gain + right_delayed * wet_gain; + + // Write to delay buffer with feedback + self.delay_buffer_left[self.write_position] = left_in + left_delayed * self.feedback; + self.delay_buffer_right[self.write_position] = right_in + right_delayed * self.feedback; + + // Advance write position + self.write_position = (self.write_position + 1) % self.max_delay_samples; + + // Advance LFO phase + self.lfo_phase += self.rate / self.sample_rate as f32; + if self.lfo_phase >= 1.0 { + self.lfo_phase -= 1.0; + } + } + } + + fn reset(&mut self) { + self.delay_buffer_left.fill(0.0); + self.delay_buffer_right.fill(0.0); + self.write_position = 0; + self.lfo_phase = 0.0; + } + + fn node_type(&self) -> &str { + "Flanger" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self { + name: self.name.clone(), + rate: self.rate, + depth: self.depth, + feedback: self.feedback, + wet_dry: self.wet_dry, + delay_buffer_left: vec![0.0; self.max_delay_samples], + delay_buffer_right: vec![0.0; self.max_delay_samples], + write_position: 0, + max_delay_samples: self.max_delay_samples, + sample_rate: self.sample_rate, + lfo_phase: 0.0, + inputs: self.inputs.clone(), + outputs: self.outputs.clone(), + parameters: self.parameters.clone(), + }) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/fm_synth.rs b/daw-backend/src/audio/node_graph/nodes/fm_synth.rs new file mode 100644 index 0000000..6940e16 --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/fm_synth.rs @@ -0,0 +1,303 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; +use std::f32::consts::PI; + +// Parameters for the FM synth +const PARAM_ALGORITHM: u32 = 0; +const PARAM_OP1_RATIO: u32 = 1; +const PARAM_OP1_LEVEL: u32 = 2; +const PARAM_OP2_RATIO: u32 = 3; +const PARAM_OP2_LEVEL: u32 = 4; +const PARAM_OP3_RATIO: u32 = 5; +const PARAM_OP3_LEVEL: u32 = 6; +const PARAM_OP4_RATIO: u32 = 7; +const PARAM_OP4_LEVEL: u32 = 8; + +/// FM Algorithm types (inspired by DX7) +/// Algorithm determines how operators modulate each other +#[derive(Debug, Clone, Copy, PartialEq)] +enum FMAlgorithm { + /// Stack: 1->2->3->4 (most harmonic) + Stack = 0, + /// Parallel: All operators to output (organ-like) + Parallel = 1, + /// Bell: 1->2, 3->4, both to output + Bell = 2, + /// Dual: 1->2->output, 3->4->output + Dual = 3, +} + +impl FMAlgorithm { + fn from_u32(value: u32) -> Self { + match value { + 0 => FMAlgorithm::Stack, + 1 => FMAlgorithm::Parallel, + 2 => FMAlgorithm::Bell, + 3 => FMAlgorithm::Dual, + _ => FMAlgorithm::Stack, + } + } +} + +/// Single FM operator (oscillator) +struct FMOperator { + phase: f32, + frequency_ratio: f32, // Multiplier of base frequency (e.g., 1.0, 2.0, 0.5) + level: f32, // Output amplitude 0.0-1.0 +} + +impl FMOperator { + fn new() -> Self { + Self { + phase: 0.0, + frequency_ratio: 1.0, + level: 1.0, + } + } + + /// Process one sample with optional frequency modulation + fn process(&mut self, base_freq: f32, modulation: f32, sample_rate: f32) -> f32 { + let freq = base_freq * self.frequency_ratio; + + // Phase modulation (PM, which sounds like FM) + let output = (self.phase * 2.0 * PI + modulation).sin() * self.level; + + // Advance phase + self.phase += freq / sample_rate; + if self.phase >= 1.0 { + self.phase -= 1.0; + } + + output + } + + fn reset(&mut self) { + self.phase = 0.0; + } +} + +/// 4-operator FM synthesizer node +pub struct FMSynthNode { + name: String, + algorithm: FMAlgorithm, + + // Four operators + operators: [FMOperator; 4], + + // Current frequency from V/oct input + current_frequency: f32, + gate_active: bool, + + sample_rate: u32, + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl FMSynthNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("V/Oct", SignalType::CV, 0), + NodePort::new("Gate", SignalType::CV, 1), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_ALGORITHM, "Algorithm", 0.0, 3.0, 0.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP1_RATIO, "Op1 Ratio", 0.25, 16.0, 1.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP1_LEVEL, "Op1 Level", 0.0, 1.0, 1.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP2_RATIO, "Op2 Ratio", 0.25, 16.0, 2.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP2_LEVEL, "Op2 Level", 0.0, 1.0, 0.8, ParameterUnit::Generic), + Parameter::new(PARAM_OP3_RATIO, "Op3 Ratio", 0.25, 16.0, 3.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP3_LEVEL, "Op3 Level", 0.0, 1.0, 0.6, ParameterUnit::Generic), + Parameter::new(PARAM_OP4_RATIO, "Op4 Ratio", 0.25, 16.0, 4.0, ParameterUnit::Generic), + Parameter::new(PARAM_OP4_LEVEL, "Op4 Level", 0.0, 1.0, 0.4, ParameterUnit::Generic), + ]; + + Self { + name, + algorithm: FMAlgorithm::Stack, + operators: [ + FMOperator::new(), + FMOperator::new(), + FMOperator::new(), + FMOperator::new(), + ], + current_frequency: 440.0, + gate_active: false, + sample_rate: 48000, + inputs, + outputs, + parameters, + } + } + + /// Convert V/oct CV to frequency + fn voct_to_freq(voct: f32) -> f32 { + 440.0 * 2.0_f32.powf(voct) + } + + /// Process FM synthesis based on current algorithm + fn process_algorithm(&mut self) -> f32 { + if !self.gate_active { + return 0.0; + } + + let base_freq = self.current_frequency; + let sr = self.sample_rate as f32; + + match self.algorithm { + FMAlgorithm::Stack => { + // 1 -> 2 -> 3 -> 4 -> output + let op4_out = self.operators[3].process(base_freq, 0.0, sr); + let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr); + let op2_out = self.operators[1].process(base_freq, op3_out * 2.0, sr); + let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr); + op1_out + } + FMAlgorithm::Parallel => { + // All operators output directly (no modulation) + let op1_out = self.operators[0].process(base_freq, 0.0, sr); + let op2_out = self.operators[1].process(base_freq, 0.0, sr); + let op3_out = self.operators[2].process(base_freq, 0.0, sr); + let op4_out = self.operators[3].process(base_freq, 0.0, sr); + (op1_out + op2_out + op3_out + op4_out) * 0.25 + } + FMAlgorithm::Bell => { + // 1 -> 2, 3 -> 4, both to output + let op2_out = self.operators[1].process(base_freq, 0.0, sr); + let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr); + let op4_out = self.operators[3].process(base_freq, 0.0, sr); + let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr); + (op1_out + op3_out) * 0.5 + } + FMAlgorithm::Dual => { + // 1 -> 2 -> output, 3 -> 4 -> output + let op2_out = self.operators[1].process(base_freq, 0.0, sr); + let op1_out = self.operators[0].process(base_freq, op2_out * 2.0, sr); + let op4_out = self.operators[3].process(base_freq, 0.0, sr); + let op3_out = self.operators[2].process(base_freq, op4_out * 2.0, sr); + (op1_out + op3_out) * 0.5 + } + } + } +} + +impl AudioNode for FMSynthNode { + fn category(&self) -> NodeCategory { + NodeCategory::Generator + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_ALGORITHM => { + self.algorithm = FMAlgorithm::from_u32(value as u32); + } + PARAM_OP1_RATIO => self.operators[0].frequency_ratio = value.clamp(0.25, 16.0), + PARAM_OP1_LEVEL => self.operators[0].level = value.clamp(0.0, 1.0), + PARAM_OP2_RATIO => self.operators[1].frequency_ratio = value.clamp(0.25, 16.0), + PARAM_OP2_LEVEL => self.operators[1].level = value.clamp(0.0, 1.0), + PARAM_OP3_RATIO => self.operators[2].frequency_ratio = value.clamp(0.25, 16.0), + PARAM_OP3_LEVEL => self.operators[2].level = value.clamp(0.0, 1.0), + PARAM_OP4_RATIO => self.operators[3].frequency_ratio = value.clamp(0.25, 16.0), + PARAM_OP4_LEVEL => self.operators[3].level = value.clamp(0.0, 1.0), + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_ALGORITHM => self.algorithm as u32 as f32, + PARAM_OP1_RATIO => self.operators[0].frequency_ratio, + PARAM_OP1_LEVEL => self.operators[0].level, + PARAM_OP2_RATIO => self.operators[1].frequency_ratio, + PARAM_OP2_LEVEL => self.operators[1].level, + PARAM_OP3_RATIO => self.operators[2].frequency_ratio, + PARAM_OP3_LEVEL => self.operators[2].level, + PARAM_OP4_RATIO => self.operators[3].frequency_ratio, + PARAM_OP4_LEVEL => self.operators[3].level, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if outputs.is_empty() { + return; + } + + self.sample_rate = sample_rate; + + let output = &mut outputs[0]; + let frames = output.len() / 2; + + for frame in 0..frames { + // Read CV inputs + let voct = if inputs.len() > 0 && !inputs[0].is_empty() { + inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2] + } else { + 0.0 + }; + + let gate = if inputs.len() > 1 && !inputs[1].is_empty() { + inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2] + } else { + 0.0 + }; + + // Update state + self.current_frequency = Self::voct_to_freq(voct); + self.gate_active = gate > 0.5; + + // Generate sample + let sample = self.process_algorithm() * 0.3; // Scale down to prevent clipping + + // Output stereo (same signal to both channels) + output[frame * 2] = sample; + output[frame * 2 + 1] = sample; + } + } + + fn reset(&mut self) { + for op in &mut self.operators { + op.reset(); + } + self.gate_active = false; + } + + fn node_type(&self) -> &str { + "FMSynth" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self::new(self.name.clone())) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/midi_to_cv.rs b/daw-backend/src/audio/node_graph/nodes/midi_to_cv.rs index 9ba62f0..f2e26cc 100644 --- a/daw-backend/src/audio/node_graph/nodes/midi_to_cv.rs +++ b/daw-backend/src/audio/node_graph/nodes/midi_to_cv.rs @@ -8,7 +8,7 @@ pub struct MidiToCVNode { note: u8, // Current MIDI note number gate: f32, // Gate CV (1.0 when note on, 0.0 when off) velocity: f32, // Velocity CV (0.0-1.0) - pitch_cv: f32, // Pitch CV (0.0-1.0 V/oct) + pitch_cv: f32, // Pitch CV (V/Oct: 0V = A4, ±1V per octave) inputs: Vec, outputs: Vec, parameters: Vec, @@ -24,7 +24,7 @@ impl MidiToCVNode { ]; let outputs = vec![ - NodePort::new("V/Oct", SignalType::CV, 0), // 0.0-1.0 pitch CV + NodePort::new("V/Oct", SignalType::CV, 0), // V/Oct: 0V = A4, ±1V per octave NodePort::new("Gate", SignalType::CV, 1), // 1.0 = on, 0.0 = off NodePort::new("Velocity", SignalType::CV, 2), // 0.0-1.0 ]; @@ -41,11 +41,12 @@ impl MidiToCVNode { } } - /// Convert MIDI note to V/oct CV (0-1 range representing pitch) - /// Maps MIDI notes 0-127 to CV 0.0-1.0 for pitch tracking + /// Convert MIDI note to V/oct CV (proper V/Oct standard) + /// 0V = A4 (MIDI 69), ±1V per octave + /// Middle C (MIDI 60) = -0.75V, A5 (MIDI 81) = +1.0V fn midi_note_to_voct(note: u8) -> f32 { - // Simple linear mapping: each semitone is 1/127 of the CV range - note as f32 / 127.0 + // Standard V/Oct: 0V at A4, 1V per octave (12 semitones) + (note as f32 - 69.0) / 12.0 } } diff --git a/daw-backend/src/audio/node_graph/nodes/mod.rs b/daw-backend/src/audio/node_graph/nodes/mod.rs index 42e6b1b..6b05851 100644 --- a/daw-backend/src/audio/node_graph/nodes/mod.rs +++ b/daw-backend/src/audio/node_graph/nodes/mod.rs @@ -1,33 +1,49 @@ mod adsr; mod audio_to_cv; +mod chorus; +mod delay; mod filter; +mod flanger; +mod fm_synth; mod gain; mod lfo; mod midi_input; mod midi_to_cv; mod mixer; +mod multi_sampler; mod noise; mod oscillator; mod oscilloscope; mod output; mod pan; +mod reverb; +mod simple_sampler; mod splitter; mod template_io; mod voice_allocator; +mod wavetable_oscillator; pub use adsr::ADSRNode; pub use audio_to_cv::AudioToCVNode; +pub use chorus::ChorusNode; +pub use delay::DelayNode; pub use filter::FilterNode; +pub use flanger::FlangerNode; +pub use fm_synth::FMSynthNode; pub use gain::GainNode; pub use lfo::LFONode; pub use midi_input::MidiInputNode; pub use midi_to_cv::MidiToCVNode; pub use mixer::MixerNode; +pub use multi_sampler::MultiSamplerNode; pub use noise::NoiseGeneratorNode; pub use oscillator::OscillatorNode; pub use oscilloscope::OscilloscopeNode; pub use output::AudioOutputNode; pub use pan::PanNode; +pub use reverb::ReverbNode; +pub use simple_sampler::SimpleSamplerNode; pub use splitter::SplitterNode; pub use template_io::{TemplateInputNode, TemplateOutputNode}; pub use voice_allocator::VoiceAllocatorNode; +pub use wavetable_oscillator::WavetableOscillatorNode; diff --git a/daw-backend/src/audio/node_graph/nodes/multi_sampler.rs b/daw-backend/src/audio/node_graph/nodes/multi_sampler.rs new file mode 100644 index 0000000..77ea06b --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/multi_sampler.rs @@ -0,0 +1,566 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; + +// Parameters +const PARAM_GAIN: u32 = 0; +const PARAM_ATTACK: u32 = 1; +const PARAM_RELEASE: u32 = 2; +const PARAM_TRANSPOSE: u32 = 3; + +/// Metadata about a loaded sample layer (for preset serialization) +#[derive(Clone, Debug)] +pub struct LayerInfo { + pub file_path: String, + pub key_min: u8, + pub key_max: u8, + pub root_key: u8, + pub velocity_min: u8, + pub velocity_max: u8, +} + +/// Single sample with velocity range and key range +#[derive(Clone)] +struct SampleLayer { + sample_data: Vec, + sample_rate: f32, + + // Key range: C-1 = 0, C0 = 12, middle C (C4) = 60, C9 = 120 + key_min: u8, + key_max: u8, + root_key: u8, // The original pitch of the sample + + // Velocity range: 0-127 + velocity_min: u8, + velocity_max: u8, +} + +impl SampleLayer { + fn new( + sample_data: Vec, + sample_rate: f32, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, + ) -> Self { + Self { + sample_data, + sample_rate, + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + } + } + + /// Check if this layer matches the given key and velocity + fn matches(&self, key: u8, velocity: u8) -> bool { + key >= self.key_min + && key <= self.key_max + && velocity >= self.velocity_min + && velocity <= self.velocity_max + } +} + +/// Active voice playing a sample +struct Voice { + layer_index: usize, + playhead: f32, + note: u8, + velocity: u8, + is_active: bool, + + // Envelope + envelope_phase: EnvelopePhase, + envelope_value: f32, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum EnvelopePhase { + Attack, + Sustain, + Release, +} + +impl Voice { + fn new(layer_index: usize, note: u8, velocity: u8) -> Self { + Self { + layer_index, + playhead: 0.0, + note, + velocity, + is_active: true, + envelope_phase: EnvelopePhase::Attack, + envelope_value: 0.0, + } + } +} + +/// Multi-sample instrument with velocity layers and key zones +pub struct MultiSamplerNode { + name: String, + + // Sample layers + layers: Vec, + layer_infos: Vec, // Metadata about loaded layers + + // Voice management + voices: Vec, + max_voices: usize, + + // Parameters + gain: f32, + attack_time: f32, // seconds + release_time: f32, // seconds + transpose: i8, // semitones + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl MultiSamplerNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("MIDI In", SignalType::Midi, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_GAIN, "Gain", 0.0, 2.0, 1.0, ParameterUnit::Generic), + Parameter::new(PARAM_ATTACK, "Attack", 0.001, 1.0, 0.01, ParameterUnit::Time), + Parameter::new(PARAM_RELEASE, "Release", 0.01, 5.0, 0.1, ParameterUnit::Time), + Parameter::new(PARAM_TRANSPOSE, "Transpose", -24.0, 24.0, 0.0, ParameterUnit::Generic), + ]; + + Self { + name, + layers: Vec::new(), + layer_infos: Vec::new(), + voices: Vec::new(), + max_voices: 16, + gain: 1.0, + attack_time: 0.01, + release_time: 0.1, + transpose: 0, + inputs, + outputs, + parameters, + } + } + + /// Add a sample layer + pub fn add_layer( + &mut self, + sample_data: Vec, + sample_rate: f32, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, + ) { + let layer = SampleLayer::new( + sample_data, + sample_rate, + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + ); + self.layers.push(layer); + } + + /// Load a sample layer from a file path + pub fn load_layer_from_file( + &mut self, + path: &str, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, + ) -> Result<(), String> { + use crate::audio::sample_loader::load_audio_file; + + let sample_data = load_audio_file(path)?; + self.add_layer( + sample_data.samples, + sample_data.sample_rate as f32, + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + ); + + // Store layer metadata for preset serialization + self.layer_infos.push(LayerInfo { + file_path: path.to_string(), + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + }); + + Ok(()) + } + + /// Get information about all loaded layers + pub fn get_layers_info(&self) -> &[LayerInfo] { + &self.layer_infos + } + + /// Get sample data for a specific layer (for preset embedding) + pub fn get_layer_data(&self, layer_index: usize) -> Option<(Vec, f32)> { + self.layers.get(layer_index).map(|layer| { + (layer.sample_data.clone(), layer.sample_rate) + }) + } + + /// Update a layer's configuration + pub fn update_layer( + &mut self, + layer_index: usize, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, + ) -> Result<(), String> { + if layer_index >= self.layers.len() { + return Err("Layer index out of bounds".to_string()); + } + + // Update the layer data + self.layers[layer_index].key_min = key_min; + self.layers[layer_index].key_max = key_max; + self.layers[layer_index].root_key = root_key; + self.layers[layer_index].velocity_min = velocity_min; + self.layers[layer_index].velocity_max = velocity_max; + + // Update the layer info + if layer_index < self.layer_infos.len() { + self.layer_infos[layer_index].key_min = key_min; + self.layer_infos[layer_index].key_max = key_max; + self.layer_infos[layer_index].root_key = root_key; + self.layer_infos[layer_index].velocity_min = velocity_min; + self.layer_infos[layer_index].velocity_max = velocity_max; + } + + Ok(()) + } + + /// Remove a layer + pub fn remove_layer(&mut self, layer_index: usize) -> Result<(), String> { + if layer_index >= self.layers.len() { + return Err("Layer index out of bounds".to_string()); + } + + self.layers.remove(layer_index); + if layer_index < self.layer_infos.len() { + self.layer_infos.remove(layer_index); + } + + // Stop any voices playing this layer + for voice in &mut self.voices { + if voice.layer_index == layer_index { + voice.is_active = false; + } else if voice.layer_index > layer_index { + // Adjust indices for layers that were shifted down + voice.layer_index -= 1; + } + } + + Ok(()) + } + + /// Find the best matching layer for a given note and velocity + fn find_layer(&self, note: u8, velocity: u8) -> Option { + self.layers + .iter() + .enumerate() + .find(|(_, layer)| layer.matches(note, velocity)) + .map(|(index, _)| index) + } + + /// Trigger a note + fn note_on(&mut self, note: u8, velocity: u8) { + let transposed_note = (note as i16 + self.transpose as i16).clamp(0, 127) as u8; + + if let Some(layer_index) = self.find_layer(transposed_note, velocity) { + // Find an inactive voice or reuse the oldest one + let voice_index = self + .voices + .iter() + .position(|v| !v.is_active) + .unwrap_or_else(|| { + // All voices active, reuse the first one + if self.voices.len() < self.max_voices { + self.voices.len() + } else { + 0 + } + }); + + let voice = Voice::new(layer_index, note, velocity); + + if voice_index < self.voices.len() { + self.voices[voice_index] = voice; + } else { + self.voices.push(voice); + } + } + } + + /// Release a note + fn note_off(&mut self, note: u8) { + for voice in &mut self.voices { + if voice.note == note && voice.is_active { + voice.envelope_phase = EnvelopePhase::Release; + } + } + } + + /// Calculate playback speed from pitch difference + fn calculate_speed(&self, layer: &SampleLayer, note: u8) -> f32 { + let semitone_diff = note as i16 - layer.root_key as i16; + 2.0_f32.powf(semitone_diff as f32 / 12.0) + } + + /// Read sample at playhead with linear interpolation + fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 { + if sample.is_empty() || playhead < 0.0 { + return 0.0; + } + + let index = playhead.floor() as usize; + if index >= sample.len() { + return 0.0; + } + + let frac = playhead - playhead.floor(); + let sample1 = sample[index]; + let sample2 = if index + 1 < sample.len() { + sample[index + 1] + } else { + 0.0 + }; + + sample1 + (sample2 - sample1) * frac + } + + /// Process envelope for a voice + fn process_envelope(&self, voice: &mut Voice, sample_rate: f32) -> f32 { + match voice.envelope_phase { + EnvelopePhase::Attack => { + let attack_samples = self.attack_time * sample_rate; + voice.envelope_value += 1.0 / attack_samples; + if voice.envelope_value >= 1.0 { + voice.envelope_value = 1.0; + voice.envelope_phase = EnvelopePhase::Sustain; + } + } + EnvelopePhase::Sustain => { + voice.envelope_value = 1.0; + } + EnvelopePhase::Release => { + let release_samples = self.release_time * sample_rate; + voice.envelope_value -= 1.0 / release_samples; + if voice.envelope_value <= 0.0 { + voice.envelope_value = 0.0; + voice.is_active = false; + } + } + } + + voice.envelope_value.clamp(0.0, 1.0) + } +} + +impl AudioNode for MultiSamplerNode { + fn category(&self) -> NodeCategory { + NodeCategory::Generator + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_GAIN => { + self.gain = value.clamp(0.0, 2.0); + } + PARAM_ATTACK => { + self.attack_time = value.clamp(0.001, 1.0); + } + PARAM_RELEASE => { + self.release_time = value.clamp(0.01, 5.0); + } + PARAM_TRANSPOSE => { + self.transpose = value.clamp(-24.0, 24.0) as i8; + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_GAIN => self.gain, + PARAM_ATTACK => self.attack_time, + PARAM_RELEASE => self.release_time, + PARAM_TRANSPOSE => self.transpose as f32, + _ => 0.0, + } + } + + fn process( + &mut self, + _inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if outputs.is_empty() { + return; + } + + let output = &mut outputs[0]; + let frames = output.len() / 2; + + // Clear output + output.fill(0.0); + + // Process MIDI events + if !midi_inputs.is_empty() { + for event in midi_inputs[0].iter() { + if event.is_note_on() { + self.note_on(event.data1, event.data2); + } else if event.is_note_off() { + self.note_off(event.data1); + } + } + } + + // Extract parameters needed for processing + let gain = self.gain; + let attack_time = self.attack_time; + let release_time = self.release_time; + + // Process all active voices + for voice in &mut self.voices { + if !voice.is_active { + continue; + } + + if voice.layer_index >= self.layers.len() { + continue; + } + + let layer = &self.layers[voice.layer_index]; + + // Calculate playback speed + let semitone_diff = voice.note as i16 - layer.root_key as i16; + let speed = 2.0_f32.powf(semitone_diff as f32 / 12.0); + let speed_adjusted = speed * (layer.sample_rate / sample_rate as f32); + + for frame in 0..frames { + // Read sample with linear interpolation + let playhead = voice.playhead; + let sample = if !layer.sample_data.is_empty() && playhead >= 0.0 { + let index = playhead.floor() as usize; + if index < layer.sample_data.len() { + let frac = playhead - playhead.floor(); + let sample1 = layer.sample_data[index]; + let sample2 = if index + 1 < layer.sample_data.len() { + layer.sample_data[index + 1] + } else { + 0.0 + }; + sample1 + (sample2 - sample1) * frac + } else { + 0.0 + } + } else { + 0.0 + }; + + // Process envelope + match voice.envelope_phase { + EnvelopePhase::Attack => { + let attack_samples = attack_time * sample_rate as f32; + voice.envelope_value += 1.0 / attack_samples; + if voice.envelope_value >= 1.0 { + voice.envelope_value = 1.0; + voice.envelope_phase = EnvelopePhase::Sustain; + } + } + EnvelopePhase::Sustain => { + voice.envelope_value = 1.0; + } + EnvelopePhase::Release => { + let release_samples = release_time * sample_rate as f32; + voice.envelope_value -= 1.0 / release_samples; + if voice.envelope_value <= 0.0 { + voice.envelope_value = 0.0; + voice.is_active = false; + } + } + } + let envelope = voice.envelope_value.clamp(0.0, 1.0); + + // Apply velocity scaling (0-127 -> 0-1) + let velocity_scale = voice.velocity as f32 / 127.0; + + // Mix into output + let final_sample = sample * envelope * velocity_scale * gain; + output[frame * 2] += final_sample; + output[frame * 2 + 1] += final_sample; + + // Advance playhead + voice.playhead += speed_adjusted; + + // Stop if we've reached the end + if voice.playhead >= layer.sample_data.len() as f32 { + voice.is_active = false; + break; + } + } + } + } + + fn reset(&mut self) { + self.voices.clear(); + } + + fn node_type(&self) -> &str { + "MultiSampler" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self::new(self.name.clone())) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/oscillator.rs b/daw-backend/src/audio/node_graph/nodes/oscillator.rs index 34ff0fa..3d88a11 100644 --- a/daw-backend/src/audio/node_graph/nodes/oscillator.rs +++ b/daw-backend/src/audio/node_graph/nodes/oscillator.rs @@ -127,15 +127,14 @@ impl AudioNode for OscillatorNode { // Start with base frequency let mut frequency = self.frequency; - // V/Oct input: 0.0-1.0 maps to MIDI notes 0-127 + // V/Oct input: Standard V/Oct (0V = A4 440Hz, ±1V per octave) if !inputs.is_empty() && frame < inputs[0].len() { let voct = inputs[0][frame]; // Read V/Oct CV (mono) - if voct > 0.001 { - // Convert CV to MIDI note number (0-1 -> 0-127) - let midi_note = voct * 127.0; - // Convert MIDI note to frequency: f = 440 * 2^((n-69)/12) - frequency = 440.0 * 2.0_f32.powf((midi_note - 69.0) / 12.0); - } + // Convert V/Oct to frequency: f = 440 * 2^(voct) + // voct = 0.0 -> 440 Hz (A4) + // voct = 1.0 -> 880 Hz (A5) + // voct = -0.75 -> 261.6 Hz (C4, middle C) + frequency = 440.0 * 2.0_f32.powf(voct); } // FM input: modulates the frequency diff --git a/daw-backend/src/audio/node_graph/nodes/reverb.rs b/daw-backend/src/audio/node_graph/nodes/reverb.rs new file mode 100644 index 0000000..723d5f8 --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/reverb.rs @@ -0,0 +1,313 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; + +const PARAM_ROOM_SIZE: u32 = 0; +const PARAM_DAMPING: u32 = 1; +const PARAM_WET_DRY: u32 = 2; + +// Schroeder reverb uses a parallel bank of comb filters followed by series all-pass filters +// Comb filter delays (in samples at 48kHz) +const COMB_DELAYS: [usize; 8] = [1557, 1617, 1491, 1422, 1277, 1356, 1188, 1116]; +// All-pass filter delays (in samples at 48kHz) +const ALLPASS_DELAYS: [usize; 4] = [225, 556, 441, 341]; + +/// Process a single channel through comb and all-pass filters +fn process_channel( + input: f32, + comb_filters: &mut [CombFilter], + allpass_filters: &mut [AllPassFilter], +) -> f32 { + // Sum parallel comb filters and scale down to prevent excessive gain + // With 8 comb filters, we need to scale the output significantly + let mut output = 0.0; + for comb in comb_filters.iter_mut() { + output += comb.process(input); + } + output *= 0.015; // Scale down the summed comb output + + // Series all-pass filters + for allpass in allpass_filters.iter_mut() { + output = allpass.process(output); + } + + output +} + +/// Single comb filter for reverb +struct CombFilter { + buffer: Vec, + buffer_size: usize, + filter_store: f32, + write_pos: usize, + damp: f32, + feedback: f32, +} + +impl CombFilter { + fn new(size: usize) -> Self { + Self { + buffer: vec![0.0; size], + buffer_size: size, + filter_store: 0.0, + write_pos: 0, + damp: 0.5, + feedback: 0.5, + } + } + + fn process(&mut self, input: f32) -> f32 { + let output = self.buffer[self.write_pos]; + + // One-pole lowpass filter + self.filter_store = output * (1.0 - self.damp) + self.filter_store * self.damp; + + self.buffer[self.write_pos] = input + self.filter_store * self.feedback; + + self.write_pos = (self.write_pos + 1) % self.buffer_size; + + output + } + + fn mute(&mut self) { + self.buffer.fill(0.0); + self.filter_store = 0.0; + } + + fn set_damp(&mut self, val: f32) { + self.damp = val; + } + + fn set_feedback(&mut self, val: f32) { + self.feedback = val; + } +} + +/// Single all-pass filter for reverb +struct AllPassFilter { + buffer: Vec, + buffer_size: usize, + write_pos: usize, +} + +impl AllPassFilter { + fn new(size: usize) -> Self { + Self { + buffer: vec![0.0; size], + buffer_size: size, + write_pos: 0, + } + } + + fn process(&mut self, input: f32) -> f32 { + let delayed = self.buffer[self.write_pos]; + let output = -input + delayed; + + self.buffer[self.write_pos] = input + delayed * 0.5; + + self.write_pos = (self.write_pos + 1) % self.buffer_size; + + output + } + + fn mute(&mut self) { + self.buffer.fill(0.0); + } +} + +/// Schroeder reverb node with room size and damping controls +pub struct ReverbNode { + name: String, + room_size: f32, // 0.0 to 1.0 + damping: f32, // 0.0 to 1.0 + wet_dry: f32, // 0.0 = dry only, 1.0 = wet only + + // Left channel filters + comb_filters_left: Vec, + allpass_filters_left: Vec, + + // Right channel filters + comb_filters_right: Vec, + allpass_filters_right: Vec, + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl ReverbNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("Audio In", SignalType::Audio, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_ROOM_SIZE, "Room Size", 0.0, 1.0, 0.5, ParameterUnit::Generic), + Parameter::new(PARAM_DAMPING, "Damping", 0.0, 1.0, 0.5, ParameterUnit::Generic), + Parameter::new(PARAM_WET_DRY, "Wet/Dry", 0.0, 1.0, 0.3, ParameterUnit::Generic), + ]; + + // Create comb filters for both channels + // Right channel has slightly different delays to create stereo effect + let comb_filters_left: Vec = COMB_DELAYS.iter().map(|&d| CombFilter::new(d)).collect(); + let comb_filters_right: Vec = COMB_DELAYS.iter().map(|&d| CombFilter::new(d + 23)).collect(); + + // Create all-pass filters for both channels + let allpass_filters_left: Vec = ALLPASS_DELAYS.iter().map(|&d| AllPassFilter::new(d)).collect(); + let allpass_filters_right: Vec = ALLPASS_DELAYS.iter().map(|&d| AllPassFilter::new(d + 23)).collect(); + + let mut node = Self { + name, + room_size: 0.5, + damping: 0.5, + wet_dry: 0.3, + comb_filters_left, + allpass_filters_left, + comb_filters_right, + allpass_filters_right, + inputs, + outputs, + parameters, + }; + + node.update_filters(); + node + } + + fn update_filters(&mut self) { + // Room size affects feedback (larger room = more feedback) + let feedback = 0.28 + self.room_size * 0.7; + + // Update all comb filters + for comb in &mut self.comb_filters_left { + comb.set_feedback(feedback); + comb.set_damp(self.damping); + } + for comb in &mut self.comb_filters_right { + comb.set_feedback(feedback); + comb.set_damp(self.damping); + } + } + +} + +impl AudioNode for ReverbNode { + fn category(&self) -> NodeCategory { + NodeCategory::Effect + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_ROOM_SIZE => { + self.room_size = value.clamp(0.0, 1.0); + self.update_filters(); + } + PARAM_DAMPING => { + self.damping = value.clamp(0.0, 1.0); + self.update_filters(); + } + PARAM_WET_DRY => { + self.wet_dry = value.clamp(0.0, 1.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_ROOM_SIZE => self.room_size, + PARAM_DAMPING => self.damping, + PARAM_WET_DRY => self.wet_dry, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + _sample_rate: u32, + ) { + if inputs.is_empty() || outputs.is_empty() { + return; + } + + let input = inputs[0]; + let output = &mut outputs[0]; + + // Audio signals are stereo (interleaved L/R) + let frames = input.len() / 2; + let output_frames = output.len() / 2; + let frames_to_process = frames.min(output_frames); + + let dry_gain = 1.0 - self.wet_dry; + let wet_gain = self.wet_dry; + + for frame in 0..frames_to_process { + let left_in = input[frame * 2]; + let right_in = input[frame * 2 + 1]; + + // Process both channels + let left_wet = process_channel( + left_in, + &mut self.comb_filters_left, + &mut self.allpass_filters_left, + ); + let right_wet = process_channel( + right_in, + &mut self.comb_filters_right, + &mut self.allpass_filters_right, + ); + + // Mix dry and wet signals + output[frame * 2] = left_in * dry_gain + left_wet * wet_gain; + output[frame * 2 + 1] = right_in * dry_gain + right_wet * wet_gain; + } + } + + fn reset(&mut self) { + for comb in &mut self.comb_filters_left { + comb.mute(); + } + for comb in &mut self.comb_filters_right { + comb.mute(); + } + for allpass in &mut self.allpass_filters_left { + allpass.mute(); + } + for allpass in &mut self.allpass_filters_right { + allpass.mute(); + } + } + + fn node_type(&self) -> &str { + "Reverb" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self::new(self.name.clone())) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/simple_sampler.rs b/daw-backend/src/audio/node_graph/nodes/simple_sampler.rs new file mode 100644 index 0000000..f1b692a --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/simple_sampler.rs @@ -0,0 +1,278 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; +use std::sync::{Arc, Mutex}; + +// Parameters +const PARAM_GAIN: u32 = 0; +const PARAM_LOOP: u32 = 1; +const PARAM_PITCH_SHIFT: u32 = 2; + +/// Simple single-sample playback node with pitch shifting +pub struct SimpleSamplerNode { + name: String, + + // Sample data (shared, can be set externally) + sample_data: Arc>>, + sample_rate_original: f32, + sample_path: Option, // Path to loaded sample file + + // Playback state + playhead: f32, // Fractional position in sample + is_playing: bool, + gate_prev: bool, + + // Parameters + gain: f32, + loop_enabled: bool, + pitch_shift: f32, // Additional pitch shift in semitones + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl SimpleSamplerNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("V/Oct", SignalType::CV, 0), + NodePort::new("Gate", SignalType::CV, 1), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_GAIN, "Gain", 0.0, 2.0, 1.0, ParameterUnit::Generic), + Parameter::new(PARAM_LOOP, "Loop", 0.0, 1.0, 0.0, ParameterUnit::Generic), + Parameter::new(PARAM_PITCH_SHIFT, "Pitch Shift", -12.0, 12.0, 0.0, ParameterUnit::Generic), + ]; + + Self { + name, + sample_data: Arc::new(Mutex::new(Vec::new())), + sample_rate_original: 48000.0, + sample_path: None, + playhead: 0.0, + is_playing: false, + gate_prev: false, + gain: 1.0, + loop_enabled: false, + pitch_shift: 0.0, + inputs, + outputs, + parameters, + } + } + + /// Set the sample data (mono) + pub fn set_sample(&mut self, data: Vec, sample_rate: f32) { + let mut sample = self.sample_data.lock().unwrap(); + *sample = data; + self.sample_rate_original = sample_rate; + } + + /// Get the sample data reference (for external loading) + pub fn get_sample_data(&self) -> Arc>> { + Arc::clone(&self.sample_data) + } + + /// Load a sample from a file path + pub fn load_sample_from_file(&mut self, path: &str) -> Result<(), String> { + use crate::audio::sample_loader::load_audio_file; + + let sample_data = load_audio_file(path)?; + self.set_sample(sample_data.samples, sample_data.sample_rate as f32); + self.sample_path = Some(path.to_string()); + Ok(()) + } + + /// Get the currently loaded sample path + pub fn get_sample_path(&self) -> Option<&str> { + self.sample_path.as_deref() + } + + /// Get the current sample data and sample rate (for preset embedding) + pub fn get_sample_data_for_embedding(&self) -> (Vec, f32) { + let sample = self.sample_data.lock().unwrap(); + (sample.clone(), self.sample_rate_original) + } + + /// Convert V/oct CV to playback speed multiplier + /// 0V = 1.0 (original speed), +1V = 2.0 (one octave up), -1V = 0.5 (one octave down) + fn voct_to_speed(&self, voct: f32) -> f32 { + // Add pitch shift parameter + let total_semitones = voct * 12.0 + self.pitch_shift; + 2.0_f32.powf(total_semitones / 12.0) + } + + /// Read sample at playhead with linear interpolation + fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 { + if sample.is_empty() { + return 0.0; + } + + let index = playhead.floor() as usize; + let frac = playhead - playhead.floor(); + + if index >= sample.len() { + return 0.0; + } + + let sample1 = sample[index]; + let sample2 = if index + 1 < sample.len() { + sample[index + 1] + } else if self.loop_enabled { + sample[0] // Loop back to start + } else { + 0.0 + }; + + // Linear interpolation + sample1 + (sample2 - sample1) * frac + } +} + +impl AudioNode for SimpleSamplerNode { + fn category(&self) -> NodeCategory { + NodeCategory::Generator + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_GAIN => { + self.gain = value.clamp(0.0, 2.0); + } + PARAM_LOOP => { + self.loop_enabled = value > 0.5; + } + PARAM_PITCH_SHIFT => { + self.pitch_shift = value.clamp(-12.0, 12.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_GAIN => self.gain, + PARAM_LOOP => if self.loop_enabled { 1.0 } else { 0.0 }, + PARAM_PITCH_SHIFT => self.pitch_shift, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if outputs.is_empty() { + return; + } + + // Lock the sample data + let sample_data = self.sample_data.lock().unwrap(); + if sample_data.is_empty() { + // No sample loaded, output silence + for output in outputs.iter_mut() { + output.fill(0.0); + } + return; + } + + let output = &mut outputs[0]; + let frames = output.len() / 2; + + for frame in 0..frames { + // Read CV inputs + let voct = if !inputs.is_empty() && !inputs[0].is_empty() { + inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2] + } else { + 0.0 // Default to original pitch + }; + + let gate = if inputs.len() > 1 && !inputs[1].is_empty() { + inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2] + } else { + 0.0 + }; + + // Detect gate trigger (rising edge) + let gate_active = gate > 0.5; + if gate_active && !self.gate_prev { + // Trigger: start playback from beginning + self.playhead = 0.0; + self.is_playing = true; + } + self.gate_prev = gate_active; + + // Generate sample + let sample = if self.is_playing { + let s = self.read_sample(self.playhead, &sample_data); + + // Calculate playback speed from V/Oct + let speed = self.voct_to_speed(voct); + + // Advance playhead with resampling + let speed_adjusted = speed * (self.sample_rate_original / sample_rate as f32); + self.playhead += speed_adjusted; + + // Check if we've reached the end + if self.playhead >= sample_data.len() as f32 { + if self.loop_enabled { + // Loop back to start + self.playhead = self.playhead % sample_data.len() as f32; + } else { + // Stop playback + self.is_playing = false; + self.playhead = 0.0; + } + } + + s * self.gain + } else { + 0.0 + }; + + // Output stereo (same signal to both channels) + output[frame * 2] = sample; + output[frame * 2 + 1] = sample; + } + } + + fn reset(&mut self) { + self.playhead = 0.0; + self.is_playing = false; + self.gate_prev = false; + } + + fn node_type(&self) -> &str { + "SimpleSampler" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self::new(self.name.clone())) + } +} diff --git a/daw-backend/src/audio/node_graph/nodes/wavetable_oscillator.rs b/daw-backend/src/audio/node_graph/nodes/wavetable_oscillator.rs new file mode 100644 index 0000000..1f35a92 --- /dev/null +++ b/daw-backend/src/audio/node_graph/nodes/wavetable_oscillator.rs @@ -0,0 +1,286 @@ +use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType}; +use crate::audio::midi::MidiEvent; +use std::f32::consts::PI; + +const WAVETABLE_SIZE: usize = 256; + +// Parameters +const PARAM_WAVETABLE: u32 = 0; +const PARAM_FINE_TUNE: u32 = 1; +const PARAM_POSITION: u32 = 2; + +/// Types of preset wavetables +#[derive(Debug, Clone, Copy, PartialEq)] +enum WavetableType { + Sine = 0, + Saw = 1, + Square = 2, + Triangle = 3, + PWM = 4, // Pulse Width Modulated + Harmonic = 5, // Rich harmonics + Inharmonic = 6, // Metallic/bell-like + Digital = 7, // Stepped/digital artifacts +} + +impl WavetableType { + fn from_u32(value: u32) -> Self { + match value { + 0 => WavetableType::Sine, + 1 => WavetableType::Saw, + 2 => WavetableType::Square, + 3 => WavetableType::Triangle, + 4 => WavetableType::PWM, + 5 => WavetableType::Harmonic, + 6 => WavetableType::Inharmonic, + 7 => WavetableType::Digital, + _ => WavetableType::Sine, + } + } +} + +/// Generate a wavetable of the specified type +fn generate_wavetable(wave_type: WavetableType) -> Vec { + let mut table = vec![0.0; WAVETABLE_SIZE]; + + match wave_type { + WavetableType::Sine => { + for i in 0..WAVETABLE_SIZE { + let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI; + table[i] = phase.sin(); + } + } + WavetableType::Saw => { + for i in 0..WAVETABLE_SIZE { + let t = i as f32 / WAVETABLE_SIZE as f32; + table[i] = 2.0 * t - 1.0; + } + } + WavetableType::Square => { + for i in 0..WAVETABLE_SIZE { + table[i] = if i < WAVETABLE_SIZE / 2 { 1.0 } else { -1.0 }; + } + } + WavetableType::Triangle => { + for i in 0..WAVETABLE_SIZE { + let t = i as f32 / WAVETABLE_SIZE as f32; + table[i] = if t < 0.5 { + 4.0 * t - 1.0 + } else { + -4.0 * t + 3.0 + }; + } + } + WavetableType::PWM => { + // Variable pulse width + for i in 0..WAVETABLE_SIZE { + let duty = 0.25; // 25% duty cycle + table[i] = if (i as f32 / WAVETABLE_SIZE as f32) < duty { 1.0 } else { -1.0 }; + } + } + WavetableType::Harmonic => { + // Multiple harmonics for rich sound + for i in 0..WAVETABLE_SIZE { + let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI; + table[i] = phase.sin() * 0.5 + + (phase * 2.0).sin() * 0.25 + + (phase * 3.0).sin() * 0.125 + + (phase * 4.0).sin() * 0.0625; + } + } + WavetableType::Inharmonic => { + // Non-integer harmonics for metallic/bell-like sounds + for i in 0..WAVETABLE_SIZE { + let phase = (i as f32 / WAVETABLE_SIZE as f32) * 2.0 * PI; + table[i] = phase.sin() * 0.4 + + (phase * 2.13).sin() * 0.3 + + (phase * 3.76).sin() * 0.2 + + (phase * 5.41).sin() * 0.1; + } + } + WavetableType::Digital => { + // Stepped waveform with digital artifacts + for i in 0..WAVETABLE_SIZE { + let steps = 8; + let step = (i * steps / WAVETABLE_SIZE) as f32 / steps as f32; + table[i] = step * 2.0 - 1.0; + } + } + } + + table +} + +/// Wavetable oscillator node +pub struct WavetableOscillatorNode { + name: String, + + // Current wavetable + wavetable_type: WavetableType, + wavetable: Vec, + + // Oscillator state + phase: f32, + fine_tune: f32, // -1.0 to 1.0 semitones + position: f32, // 0.0 to 1.0 (for future multi-cycle wavetables) + + inputs: Vec, + outputs: Vec, + parameters: Vec, +} + +impl WavetableOscillatorNode { + pub fn new(name: impl Into) -> Self { + let name = name.into(); + + let inputs = vec![ + NodePort::new("V/Oct", SignalType::CV, 0), + ]; + + let outputs = vec![ + NodePort::new("Audio Out", SignalType::Audio, 0), + ]; + + let parameters = vec![ + Parameter::new(PARAM_WAVETABLE, "Wavetable", 0.0, 7.0, 0.0, ParameterUnit::Generic), + Parameter::new(PARAM_FINE_TUNE, "Fine Tune", -1.0, 1.0, 0.0, ParameterUnit::Generic), + Parameter::new(PARAM_POSITION, "Position", 0.0, 1.0, 0.0, ParameterUnit::Generic), + ]; + + let wavetable_type = WavetableType::Sine; + let wavetable = generate_wavetable(wavetable_type); + + Self { + name, + wavetable_type, + wavetable, + phase: 0.0, + fine_tune: 0.0, + position: 0.0, + inputs, + outputs, + parameters, + } + } + + /// Convert V/oct CV to frequency with fine tune + fn voct_to_freq(&self, voct: f32) -> f32 { + let semitones = voct * 12.0 + self.fine_tune; + 440.0 * 2.0_f32.powf(semitones / 12.0) + } + + /// Read from wavetable with linear interpolation + fn read_wavetable(&self, phase: f32) -> f32 { + let index = phase * WAVETABLE_SIZE as f32; + let index_floor = index.floor() as usize % WAVETABLE_SIZE; + let index_ceil = (index_floor + 1) % WAVETABLE_SIZE; + let frac = index - index.floor(); + + // Linear interpolation + let sample1 = self.wavetable[index_floor]; + let sample2 = self.wavetable[index_ceil]; + sample1 + (sample2 - sample1) * frac + } +} + +impl AudioNode for WavetableOscillatorNode { + fn category(&self) -> NodeCategory { + NodeCategory::Generator + } + + fn inputs(&self) -> &[NodePort] { + &self.inputs + } + + fn outputs(&self) -> &[NodePort] { + &self.outputs + } + + fn parameters(&self) -> &[Parameter] { + &self.parameters + } + + fn set_parameter(&mut self, id: u32, value: f32) { + match id { + PARAM_WAVETABLE => { + let new_type = WavetableType::from_u32(value as u32); + if new_type != self.wavetable_type { + self.wavetable_type = new_type; + self.wavetable = generate_wavetable(new_type); + } + } + PARAM_FINE_TUNE => { + self.fine_tune = value.clamp(-1.0, 1.0); + } + PARAM_POSITION => { + self.position = value.clamp(0.0, 1.0); + } + _ => {} + } + } + + fn get_parameter(&self, id: u32) -> f32 { + match id { + PARAM_WAVETABLE => self.wavetable_type as u32 as f32, + PARAM_FINE_TUNE => self.fine_tune, + PARAM_POSITION => self.position, + _ => 0.0, + } + } + + fn process( + &mut self, + inputs: &[&[f32]], + outputs: &mut [&mut [f32]], + _midi_inputs: &[&[MidiEvent]], + _midi_outputs: &mut [&mut Vec], + sample_rate: u32, + ) { + if outputs.is_empty() { + return; + } + + let output = &mut outputs[0]; + let frames = output.len() / 2; + + for frame in 0..frames { + // Read V/Oct input + let voct = if !inputs.is_empty() && !inputs[0].is_empty() { + inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2] + } else { + 0.0 // Default to A4 (440 Hz) + }; + + // Calculate frequency + let freq = self.voct_to_freq(voct); + + // Read from wavetable + let sample = self.read_wavetable(self.phase); + + // Advance phase + self.phase += freq / sample_rate as f32; + if self.phase >= 1.0 { + self.phase -= 1.0; + } + + // Output stereo (same signal to both channels) + output[frame * 2] = sample * 0.5; // Scale down to prevent clipping + output[frame * 2 + 1] = sample * 0.5; + } + } + + fn reset(&mut self) { + self.phase = 0.0; + } + + fn node_type(&self) -> &str { + "WavetableOscillator" + } + + fn name(&self) -> &str { + &self.name + } + + fn clone_node(&self) -> Box { + Box::new(Self::new(self.name.clone())) + } +} diff --git a/daw-backend/src/audio/node_graph/preset.rs b/daw-backend/src/audio/node_graph/preset.rs index bee7840..45a3545 100644 --- a/daw-backend/src/audio/node_graph/preset.rs +++ b/daw-backend/src/audio/node_graph/preset.rs @@ -1,6 +1,44 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; +/// Sample data for preset serialization +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum SampleData { + #[serde(rename = "simple_sampler")] + SimpleSampler { + #[serde(skip_serializing_if = "Option::is_none")] + file_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + embedded_data: Option, + }, + #[serde(rename = "multi_sampler")] + MultiSampler { layers: Vec }, +} + +/// Embedded sample data (base64-encoded for JSON compatibility) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EmbeddedSampleData { + /// Base64-encoded audio samples (f32 little-endian) + pub data_base64: String, + /// Original sample rate + pub sample_rate: u32, +} + +/// Layer data for MultiSampler +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct LayerData { + #[serde(skip_serializing_if = "Option::is_none")] + pub file_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub embedded_data: Option, + pub key_min: u8, + pub key_max: u8, + pub root_key: u8, + pub velocity_min: u8, + pub velocity_max: u8, +} + /// Serializable representation of a node graph preset #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GraphPreset { @@ -66,6 +104,10 @@ pub struct SerializedNode { /// For VoiceAllocator nodes: the nested template graph #[serde(skip_serializing_if = "Option::is_none")] pub template_graph: Option>, + + /// For sampler nodes: loaded sample data + #[serde(skip_serializing_if = "Option::is_none")] + pub sample_data: Option, } /// Serialized connection between nodes @@ -132,6 +174,7 @@ impl SerializedNode { parameters: HashMap::new(), position: (0.0, 0.0), template_graph: None, + sample_data: None, } } diff --git a/daw-backend/src/audio/sample_loader.rs b/daw-backend/src/audio/sample_loader.rs new file mode 100644 index 0000000..c378fe7 --- /dev/null +++ b/daw-backend/src/audio/sample_loader.rs @@ -0,0 +1,316 @@ +use symphonia::core::audio::{AudioBufferRef, Signal}; +use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL}; +use symphonia::core::errors::Error as SymphoniaError; +use symphonia::core::formats::FormatOptions; +use symphonia::core::io::MediaSourceStream; +use symphonia::core::meta::MetadataOptions; +use symphonia::core::probe::Hint; +use std::fs::File; +use std::path::Path; + +/// Loaded audio sample data +#[derive(Debug, Clone)] +pub struct SampleData { + /// Audio samples (mono, f32 format) + pub samples: Vec, + /// Original sample rate + pub sample_rate: u32, +} + +/// Load an audio file and decode it to mono f32 samples +pub fn load_audio_file(path: impl AsRef) -> Result { + let path = path.as_ref(); + + // Open the file + let file = File::open(path) + .map_err(|e| format!("Failed to open file: {}", e))?; + + // Create a media source stream + let mss = MediaSourceStream::new(Box::new(file), Default::default()); + + // Create a hint to help the format registry guess the format + let mut hint = Hint::new(); + if let Some(extension) = path.extension() { + if let Some(ext_str) = extension.to_str() { + hint.with_extension(ext_str); + } + } + + // Probe the media source for a format + let format_opts = FormatOptions::default(); + let metadata_opts = MetadataOptions::default(); + + let probed = symphonia::default::get_probe() + .format(&hint, mss, &format_opts, &metadata_opts) + .map_err(|e| format!("Failed to probe format: {}", e))?; + + let mut format = probed.format; + + // Find the first audio track + let track = format + .tracks() + .iter() + .find(|t| t.codec_params.codec != CODEC_TYPE_NULL) + .ok_or_else(|| "No audio tracks found".to_string())?; + + let track_id = track.id; + let sample_rate = track.codec_params.sample_rate.unwrap_or(48000); + + // Create a decoder for the track + let dec_opts = DecoderOptions::default(); + let mut decoder = symphonia::default::get_codecs() + .make(&track.codec_params, &dec_opts) + .map_err(|e| format!("Failed to create decoder: {}", e))?; + + // Decode all packets + let mut all_samples = Vec::new(); + + loop { + // Get the next packet + let packet = match format.next_packet() { + Ok(packet) => packet, + Err(SymphoniaError::IoError(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => { + // End of stream + break; + } + Err(e) => { + return Err(format!("Error reading packet: {}", e)); + } + }; + + // Skip packets that don't belong to the selected track + if packet.track_id() != track_id { + continue; + } + + // Decode the packet + let decoded = decoder + .decode(&packet) + .map_err(|e| format!("Failed to decode packet: {}", e))?; + + // Convert to f32 samples and mix to mono + let samples = convert_to_mono_f32(&decoded); + all_samples.extend_from_slice(&samples); + } + + Ok(SampleData { + samples: all_samples, + sample_rate, + }) +} + +/// Convert an audio buffer to mono f32 samples +fn convert_to_mono_f32(buf: &AudioBufferRef) -> Vec { + match buf { + AudioBufferRef::F32(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + // Already mono + mono.extend_from_slice(buf.chan(0)); + } else { + // Mix down to mono by averaging all channels + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame]; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::U8(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push((sample as f32 - 128.0) / 128.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += (buf.chan(ch)[frame] as f32 - 128.0) / 128.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::U16(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push((sample as f32 - 32768.0) / 32768.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += (buf.chan(ch)[frame] as f32 - 32768.0) / 32768.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::U24(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push((sample.inner() as f32 - 8388608.0) / 8388608.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += (buf.chan(ch)[frame].inner() as f32 - 8388608.0) / 8388608.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::U32(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push((sample as f32 - 2147483648.0) / 2147483648.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += (buf.chan(ch)[frame] as f32 - 2147483648.0) / 2147483648.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::S8(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push(sample as f32 / 128.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame] as f32 / 128.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::S16(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push(sample as f32 / 32768.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame] as f32 / 32768.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::S24(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push(sample.inner() as f32 / 8388608.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame].inner() as f32 / 8388608.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::S32(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push(sample as f32 / 2147483648.0); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame] as f32 / 2147483648.0; + } + mono.push(sum / channels as f32); + } + } + + mono + } + AudioBufferRef::F64(buf) => { + let channels = buf.spec().channels.count(); + let frames = buf.frames(); + let mut mono = Vec::with_capacity(frames); + + if channels == 1 { + for &sample in buf.chan(0) { + mono.push(sample as f32); + } + } else { + for frame in 0..frames { + let mut sum = 0.0; + for ch in 0..channels { + sum += buf.chan(ch)[frame] as f32; + } + mono.push(sum / channels as f32); + } + } + + mono + } + } +} diff --git a/daw-backend/src/command/types.rs b/daw-backend/src/command/types.rs index d133ae4..4e89016 100644 --- a/daw-backend/src/command/types.rs +++ b/daw-backend/src/command/types.rs @@ -144,6 +144,15 @@ pub enum Command { GraphLoadPreset(TrackId, String), /// Save a VoiceAllocator's template graph as a preset (track_id, voice_allocator_id, preset_path, preset_name) GraphSaveTemplatePreset(TrackId, u32, String, String), + + /// Load a sample into a SimpleSampler node (track_id, node_id, file_path) + SamplerLoadSample(TrackId, u32, String), + /// Add a sample layer to a MultiSampler node (track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max) + MultiSamplerAddLayer(TrackId, u32, String, u8, u8, u8, u8, u8), + /// Update a MultiSampler layer's configuration (track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max) + MultiSamplerUpdateLayer(TrackId, u32, usize, u8, u8, u8, u8, u8), + /// Remove a layer from a MultiSampler node (track_id, node_id, layer_index) + MultiSamplerRemoveLayer(TrackId, u32, usize), } /// Events sent from audio thread back to UI/control thread diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index def7dfb..c2a9e24 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -1013,6 +1013,7 @@ dependencies = [ name = "daw-backend" version = "0.1.0" dependencies = [ + "base64 0.22.1", "cpal", "crossterm", "dasp_envelope", diff --git a/src-tauri/src/audio.rs b/src-tauri/src/audio.rs index ff959b5..1a5469c 100644 --- a/src-tauri/src/audio.rs +++ b/src-tauri/src/audio.rs @@ -743,7 +743,20 @@ pub async fn graph_load_preset( track_id: u32, preset_path: String, ) -> Result<(), String> { + use daw_backend::GraphPreset; + let mut audio_state = state.lock().unwrap(); + + // Load the preset JSON to count nodes + let json = std::fs::read_to_string(&preset_path) + .map_err(|e| format!("Failed to read preset file: {}", e))?; + let preset = GraphPreset::from_json(&json) + .map_err(|e| format!("Failed to parse preset: {}", e))?; + + // Update the node ID counter to account for nodes in the preset + let node_count = preset.nodes.len() as u32; + audio_state.next_graph_node_id = node_count; + if let Some(controller) = &mut audio_state.controller { // Send command to load preset controller.graph_load_preset(track_id, preset_path); @@ -936,6 +949,180 @@ pub async fn graph_get_template_state( } } +#[tauri::command] +pub async fn sampler_load_sample( + state: tauri::State<'_, Arc>>, + track_id: u32, + node_id: u32, + file_path: String, +) -> Result<(), String> { + let mut audio_state = state.lock().unwrap(); + + if let Some(controller) = &mut audio_state.controller { + controller.sampler_load_sample(track_id, node_id, file_path); + Ok(()) + } else { + Err("Audio not initialized".to_string()) + } +} + +#[tauri::command] +pub async fn multi_sampler_add_layer( + state: tauri::State<'_, Arc>>, + track_id: u32, + node_id: u32, + file_path: String, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, +) -> Result<(), String> { + let mut audio_state = state.lock().unwrap(); + + if let Some(controller) = &mut audio_state.controller { + controller.multi_sampler_add_layer( + track_id, + node_id, + file_path, + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + ); + Ok(()) + } else { + Err("Audio not initialized".to_string()) + } +} + +#[derive(serde::Serialize)] +pub struct LayerInfo { + pub file_path: String, + pub key_min: u8, + pub key_max: u8, + pub root_key: u8, + pub velocity_min: u8, + pub velocity_max: u8, +} + +#[tauri::command] +pub async fn multi_sampler_get_layers( + state: tauri::State<'_, Arc>>, + track_id: u32, + node_id: u32, +) -> Result, String> { + use daw_backend::GraphPreset; + + let mut audio_state = state.lock().unwrap(); + if let Some(controller) = &mut audio_state.controller { + // Use preset serialization to get node data including layers + // Use timestamp to ensure unique temp file for each query to avoid conflicts + let timestamp = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos(); + let temp_path = std::env::temp_dir().join(format!("temp_layers_query_{}_{}_{}.json", track_id, node_id, timestamp)); + let temp_path_str = temp_path.to_string_lossy().to_string(); + + controller.graph_save_preset( + track_id, + temp_path_str.clone(), + "temp".to_string(), + "".to_string(), + vec![] + ); + + // Give the audio thread time to process + std::thread::sleep(std::time::Duration::from_millis(50)); + + // Read the temp file and parse it + match std::fs::read_to_string(&temp_path) { + Ok(json) => { + // Clean up temp file + let _ = std::fs::remove_file(&temp_path); + + // Parse the preset JSON + let preset: GraphPreset = match serde_json::from_str(&json) { + Ok(p) => p, + Err(e) => return Err(format!("Failed to parse preset: {}", e)), + }; + + // Find the node with the matching ID + if let Some(node) = preset.nodes.iter().find(|n| n.id == node_id) { + if let Some(ref sample_data) = node.sample_data { + // Check if it's a MultiSampler + if let daw_backend::audio::node_graph::preset::SampleData::MultiSampler { layers } = sample_data { + return Ok(layers.iter().map(|layer| LayerInfo { + file_path: layer.file_path.clone().unwrap_or_default(), + key_min: layer.key_min, + key_max: layer.key_max, + root_key: layer.root_key, + velocity_min: layer.velocity_min, + velocity_max: layer.velocity_max, + }).collect()); + } + } + } + + Ok(Vec::new()) + } + Err(_) => Ok(Vec::new()), // Return empty list if file doesn't exist + } + } else { + Err("Audio not initialized".to_string()) + } +} + +#[tauri::command] +pub async fn multi_sampler_update_layer( + state: tauri::State<'_, Arc>>, + track_id: u32, + node_id: u32, + layer_index: usize, + key_min: u8, + key_max: u8, + root_key: u8, + velocity_min: u8, + velocity_max: u8, +) -> Result<(), String> { + let mut audio_state = state.lock().unwrap(); + + if let Some(controller) = &mut audio_state.controller { + controller.multi_sampler_update_layer( + track_id, + node_id, + layer_index, + key_min, + key_max, + root_key, + velocity_min, + velocity_max, + ); + Ok(()) + } else { + Err("Audio not initialized".to_string()) + } +} + +#[tauri::command] +pub async fn multi_sampler_remove_layer( + state: tauri::State<'_, Arc>>, + track_id: u32, + node_id: u32, + layer_index: usize, +) -> Result<(), String> { + let mut audio_state = state.lock().unwrap(); + + if let Some(controller) = &mut audio_state.controller { + controller.multi_sampler_remove_layer(track_id, node_id, layer_index); + Ok(()) + } else { + Err("Audio not initialized".to_string()) + } +} + #[derive(serde::Serialize, Clone)] #[serde(tag = "type")] pub enum SerializedAudioEvent { diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 08cae97..1264d5a 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -228,6 +228,11 @@ pub fn run() { audio::graph_delete_preset, audio::graph_get_state, audio::graph_get_template_state, + audio::sampler_load_sample, + audio::multi_sampler_add_layer, + audio::multi_sampler_get_layers, + audio::multi_sampler_update_layer, + audio::multi_sampler_remove_layer, ]) // .manage(window_counter) .build(tauri::generate_context!()) diff --git a/src/assets/focus-animation.svg b/src/assets/focus-animation.svg new file mode 100644 index 0000000..50b2ed1 --- /dev/null +++ b/src/assets/focus-animation.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/src/assets/focus-music.svg b/src/assets/focus-music.svg new file mode 100644 index 0000000..e415f3c --- /dev/null +++ b/src/assets/focus-music.svg @@ -0,0 +1,10 @@ + + + + + + + + + + diff --git a/src/assets/focus-video.svg b/src/assets/focus-video.svg new file mode 100644 index 0000000..a806538 --- /dev/null +++ b/src/assets/focus-video.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/src/main.js b/src/main.js index bce3947..fea0971 100644 --- a/src/main.js +++ b/src/main.js @@ -8,6 +8,12 @@ import { showNewFileDialog, closeDialog, } from "./newfile.js"; +import { + createStartScreen, + updateStartScreen, + showStartScreen, + hideStartScreen, +} from "./startscreen.js"; import { titleCase, getMousePositionFraction, @@ -125,6 +131,12 @@ const { PhysicalPosition, LogicalPosition } = window.__TAURI__.dpi; const { getCurrentWindow } = window.__TAURI__.window; const { getVersion } = window.__TAURI__.app; +// Supported file extensions +const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"]; +const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"]; +const midiExtensions = ["mid", "midi"]; +const beamExtensions = ["beam"]; + // import init, { CoreInterface } from './pkg/lightningbeam_core.js'; window.onerror = (message, source, lineno, colno, error) => { @@ -1369,7 +1381,9 @@ function _newFile(width, height, fps, layoutKey) { const oldRoot = root; console.log('[_newFile] Old root:', oldRoot, 'frameRate:', oldRoot?.frameRate); - root = new GraphicsObject("root"); + // Determine initial child type based on layout + const initialChildType = layoutKey === 'audioDaw' ? 'midi' : 'layer'; + root = new GraphicsObject("root", initialChildType); // Switch to the selected layout if provided if (layoutKey) { @@ -1791,12 +1805,6 @@ function revert() { } async function importFile() { - // Define supported extensions - const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"]; - const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"]; - const midiExtensions = ["mid", "midi"]; - const beamExtensions = ["beam"]; - // Define filters in consistent order const allFilters = [ { @@ -4363,12 +4371,33 @@ function outliner(object = undefined) { async function startup() { await loadConfig(); createNewFileDialog(_newFile, _open, config); + + // Create start screen with callback + createStartScreen(async (options) => { + hideStartScreen(); + + if (options.type === 'new') { + // Create new project with selected focus + _newFile( + options.width || 800, + options.height || 600, + options.fps || 24, + options.projectFocus + ); + } else if (options.type === 'reopen' || options.type === 'recent') { + // Open existing file + await _open(options.filePath); + } + }); + if (!window.openedFiles?.length) { if (config.reopenLastSession && config.recentFiles?.length) { document.body.style.cursor = "wait" setTimeout(()=>_open(config.recentFiles[0]), 10) } else { - showNewFileDialog(config); + // Show start screen instead of new file dialog + await updateStartScreen(config); + showStartScreen(); } } } @@ -6074,9 +6103,11 @@ function nodeEditor() { // Create the Drawflow canvas const editorDiv = document.createElement("div"); editorDiv.id = "drawflow"; - editorDiv.style.width = "100%"; - editorDiv.style.height = "calc(100% - 40px)"; // Account for header - editorDiv.style.position = "relative"; + editorDiv.style.position = "absolute"; + editorDiv.style.top = "40px"; // Start below header + editorDiv.style.left = "0"; + editorDiv.style.right = "0"; + editorDiv.style.bottom = "0"; container.appendChild(editorDiv); // Create node palette @@ -6611,7 +6642,7 @@ function nodeEditor() { const nodeElement = document.getElementById(`node-${nodeId}`); if (!nodeElement) return; - const sliders = nodeElement.querySelectorAll(".node-slider"); + const sliders = nodeElement.querySelectorAll('input[type="range"]'); sliders.forEach(slider => { // Prevent node dragging when interacting with slider slider.addEventListener("mousedown", (e) => { @@ -6654,6 +6685,115 @@ function nodeEditor() { } }); }); + + // Handle Load Sample button for SimpleSampler + const loadSampleBtn = nodeElement.querySelector(".load-sample-btn"); + if (loadSampleBtn) { + loadSampleBtn.addEventListener("mousedown", (e) => e.stopPropagation()); + loadSampleBtn.addEventListener("pointerdown", (e) => e.stopPropagation()); + loadSampleBtn.addEventListener("click", async (e) => { + e.stopPropagation(); + + const nodeData = editor.getNodeFromId(nodeId); + if (!nodeData || nodeData.data.backendId === null) { + showError("Node not yet created on backend"); + return; + } + + const currentTrackId = getCurrentMidiTrack(); + if (currentTrackId === null) { + showError("No MIDI track selected"); + return; + } + + try { + const filePath = await openFileDialog({ + title: "Load Audio Sample", + filters: [{ + name: "Audio Files", + extensions: audioExtensions + }] + }); + + if (filePath) { + await invoke("sampler_load_sample", { + trackId: currentTrackId, + nodeId: nodeData.data.backendId, + filePath: filePath + }); + + // Update UI to show filename + const sampleInfo = nodeElement.querySelector(`#sample-info-${nodeId}`); + if (sampleInfo) { + const filename = filePath.split('/').pop().split('\\').pop(); + sampleInfo.textContent = filename; + } + } + } catch (err) { + console.error("Failed to load sample:", err); + showError(`Failed to load sample: ${err}`); + } + }); + } + + // Handle Add Layer button for MultiSampler + const addLayerBtn = nodeElement.querySelector(".add-layer-btn"); + if (addLayerBtn) { + addLayerBtn.addEventListener("mousedown", (e) => e.stopPropagation()); + addLayerBtn.addEventListener("pointerdown", (e) => e.stopPropagation()); + addLayerBtn.addEventListener("click", async (e) => { + e.stopPropagation(); + + const nodeData = editor.getNodeFromId(nodeId); + if (!nodeData || nodeData.data.backendId === null) { + showError("Node not yet created on backend"); + return; + } + + const currentTrackId = getCurrentMidiTrack(); + if (currentTrackId === null) { + showError("No MIDI track selected"); + return; + } + + try { + const filePath = await openFileDialog({ + title: "Add Sample Layer", + filters: [{ + name: "Audio Files", + extensions: audioExtensions + }] + }); + + if (filePath) { + // Show dialog to configure layer mapping + const layerConfig = await showLayerConfigDialog(filePath); + + if (layerConfig) { + await invoke("multi_sampler_add_layer", { + trackId: currentTrackId, + nodeId: nodeData.data.backendId, + filePath: filePath, + keyMin: layerConfig.keyMin, + keyMax: layerConfig.keyMax, + rootKey: layerConfig.rootKey, + velocityMin: layerConfig.velocityMin, + velocityMax: layerConfig.velocityMax + }); + + // Wait a bit for the audio thread to process the add command + await new Promise(resolve => setTimeout(resolve, 100)); + + // Refresh the layers list + await refreshSampleLayersList(nodeId); + } + } + } catch (err) { + console.error("Failed to add layer:", err); + showError(`Failed to add layer: ${err}`); + } + }); + } }, 100); } @@ -6682,6 +6822,134 @@ function nodeEditor() { enterTemplate(node.data.backendId, nodeName); } + // Refresh the layers list for a MultiSampler node + async function refreshSampleLayersList(nodeId) { + const nodeData = editor.getNodeFromId(nodeId); + if (!nodeData || nodeData.data.backendId === null) { + return; + } + + const currentTrackId = getCurrentMidiTrack(); + if (currentTrackId === null) { + return; + } + + try { + const layers = await invoke("multi_sampler_get_layers", { + trackId: currentTrackId, + nodeId: nodeData.data.backendId + }); + + const layersList = document.querySelector(`#sample-layers-list-${nodeId}`); + const layersContainer = document.querySelector(`#sample-layers-container-${nodeId}`); + + if (!layersList) return; + + // Prevent scroll events from bubbling to canvas + if (layersContainer && !layersContainer.dataset.scrollListenerAdded) { + layersContainer.addEventListener('wheel', (e) => { + e.stopPropagation(); + }, { passive: false }); + layersContainer.dataset.scrollListenerAdded = 'true'; + } + + if (layers.length === 0) { + layersList.innerHTML = 'No layers loaded'; + } else { + layersList.innerHTML = layers.map((layer, index) => { + const filename = layer.file_path.split('/').pop().split('\\').pop(); + const keyRange = `${midiToNoteName(layer.key_min)}-${midiToNoteName(layer.key_max)}`; + const rootNote = midiToNoteName(layer.root_key); + const velRange = `${layer.velocity_min}-${layer.velocity_max}`; + + return ` + + ${filename} + ${keyRange} + ${rootNote} + ${velRange} + +
+ + +
+ + + `; + }).join(''); + + // Add event listeners for edit buttons + const editButtons = layersList.querySelectorAll('.btn-edit-layer'); + editButtons.forEach(btn => { + btn.addEventListener('click', async (e) => { + e.stopPropagation(); + const index = parseInt(btn.dataset.index); + const layer = layers[index]; + + // Show edit dialog with current values + const layerConfig = await showLayerConfigDialog(layer.file_path, { + keyMin: layer.key_min, + keyMax: layer.key_max, + rootKey: layer.root_key, + velocityMin: layer.velocity_min, + velocityMax: layer.velocity_max + }); + + if (layerConfig) { + try { + await invoke("multi_sampler_update_layer", { + trackId: currentTrackId, + nodeId: nodeData.data.backendId, + layerIndex: index, + keyMin: layerConfig.keyMin, + keyMax: layerConfig.keyMax, + rootKey: layerConfig.rootKey, + velocityMin: layerConfig.velocityMin, + velocityMax: layerConfig.velocityMax + }); + + // Refresh the list + await refreshSampleLayersList(nodeId); + } catch (err) { + console.error("Failed to update layer:", err); + showError(`Failed to update layer: ${err}`); + } + } + }); + }); + + // Add event listeners for delete buttons + const deleteButtons = layersList.querySelectorAll('.btn-delete-layer'); + deleteButtons.forEach(btn => { + btn.addEventListener('click', async (e) => { + e.stopPropagation(); + const index = parseInt(btn.dataset.index); + const layer = layers[index]; + const filename = layer.file_path.split('/').pop().split('\\').pop(); + + if (confirm(`Delete layer "${filename}"?`)) { + try { + await invoke("multi_sampler_remove_layer", { + trackId: currentTrackId, + nodeId: nodeData.data.backendId, + layerIndex: index + }); + + // Refresh the list + await refreshSampleLayersList(nodeId); + } catch (err) { + console.error("Failed to remove layer:", err); + showError(`Failed to remove layer: ${err}`); + } + } + }); + }); + } + } catch (err) { + console.error("Failed to get layers:", err); + } + } + // Handle connection creation function handleConnectionCreated(connection) { console.log("handleConnectionCreated called:", connection); @@ -7508,6 +7776,161 @@ function showSavePresetDialog(container) { }); } +// Helper function to convert MIDI note number to note name +function midiToNoteName(midiNote) { + const noteNames = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']; + const octave = Math.floor(midiNote / 12) - 1; + const noteName = noteNames[midiNote % 12]; + return `${noteName}${octave}`; +} + +// Show dialog to configure MultiSampler layer zones +function showLayerConfigDialog(filePath, existingConfig = null) { + return new Promise((resolve) => { + const filename = filePath.split('/').pop().split('\\').pop(); + const isEdit = existingConfig !== null; + + // Use existing values or defaults + const keyMin = existingConfig?.keyMin ?? 0; + const keyMax = existingConfig?.keyMax ?? 127; + const rootKey = existingConfig?.rootKey ?? 60; + const velocityMin = existingConfig?.velocityMin ?? 0; + const velocityMax = existingConfig?.velocityMax ?? 127; + + // Create modal dialog + const dialog = document.createElement('div'); + dialog.className = 'modal-overlay'; + dialog.innerHTML = ` + + `; + + document.body.appendChild(dialog); + + // Update note names when inputs change + const keyMinInput = dialog.querySelector('#key-min'); + const keyMaxInput = dialog.querySelector('#key-max'); + const rootKeyInput = dialog.querySelector('#root-key'); + + const updateKeyMinName = () => { + const note = parseInt(keyMinInput.value) || 0; + dialog.querySelector('#key-min-name').textContent = midiToNoteName(note); + }; + + const updateKeyMaxName = () => { + const note = parseInt(keyMaxInput.value) || 127; + dialog.querySelector('#key-max-name').textContent = midiToNoteName(note); + }; + + const updateRootKeyName = () => { + const note = parseInt(rootKeyInput.value) || 60; + dialog.querySelector('#root-key-name').textContent = midiToNoteName(note); + }; + + keyMinInput.addEventListener('input', updateKeyMinName); + keyMaxInput.addEventListener('input', updateKeyMaxName); + rootKeyInput.addEventListener('input', updateRootKeyName); + + // Focus first input + setTimeout(() => dialog.querySelector('#key-min')?.focus(), 100); + + // Handle cancel + dialog.querySelector('.btn-cancel').addEventListener('click', () => { + dialog.remove(); + resolve(null); + }); + + // Handle submit + dialog.querySelector('#layer-config-form').addEventListener('submit', (e) => { + e.preventDefault(); + + const keyMin = parseInt(keyMinInput.value); + const keyMax = parseInt(keyMaxInput.value); + const rootKey = parseInt(rootKeyInput.value); + const velocityMin = parseInt(dialog.querySelector('#velocity-min').value); + const velocityMax = parseInt(dialog.querySelector('#velocity-max').value); + + // Validate ranges + if (keyMin > keyMax) { + alert('Key Min must be less than or equal to Key Max'); + return; + } + + if (velocityMin > velocityMax) { + alert('Velocity Min must be less than or equal to Velocity Max'); + return; + } + + if (rootKey < keyMin || rootKey > keyMax) { + alert('Root Key must be within the key range'); + return; + } + + dialog.remove(); + resolve({ + keyMin, + keyMax, + rootKey, + velocityMin, + velocityMax + }); + }); + + // Close on background click + dialog.addEventListener('click', (e) => { + if (e.target === dialog) { + dialog.remove(); + resolve(null); + } + }); + }); +} + function filterPresets(container) { const searchTerm = container.querySelector('#preset-search')?.value.toLowerCase() || ''; const selectedTag = container.querySelector('#preset-tag-filter')?.value || ''; diff --git a/src/models/graphics-object.js b/src/models/graphics-object.js index f9087a3..4f8044b 100644 --- a/src/models/graphics-object.js +++ b/src/models/graphics-object.js @@ -31,7 +31,7 @@ export function initializeGraphicsObjectDependencies(deps) { } class GraphicsObject extends Widget { - constructor(uuid) { + constructor(uuid, initialChildType = 'layer') { super(0, 0) this.rotation = 0; // in radians this.scale_x = 1; @@ -48,10 +48,31 @@ class GraphicsObject extends Widget { this.currentTime = 0; // New: continuous time for AnimationData curves this.currentLayer = 0; this._activeAudioTrack = null; // Reference to active audio track (if any) - this.children = [new Layer(uuid + "-L1", this)]; - // this.layers = [new Layer(uuid + "-L1")]; + + // Initialize children and audioTracks based on initialChildType + this.children = []; this.audioTracks = []; - // this.children = [] + + if (initialChildType === 'layer') { + this.children = [new Layer(uuid + "-L1", this)]; + this.currentLayer = 0; // Set first layer as active + } else if (initialChildType === 'midi') { + const midiTrack = new AudioTrack(uuid + "-M1", "MIDI 1", 'midi'); + this.audioTracks.push(midiTrack); + this._activeAudioTrack = midiTrack; // Set MIDI track as active (the object, not index) + // Initialize the MIDI track in the audio backend + midiTrack.initializeTrack().catch(err => { + console.error('Failed to initialize MIDI track:', err); + }); + } else if (initialChildType === 'audio') { + const audioTrack = new AudioTrack(uuid + "-A1", "Audio 1", 'audio'); + this.audioTracks.push(audioTrack); + this._activeAudioTrack = audioTrack; // Set audio track as active (the object, not index) + audioTrack.initializeTrack().catch(err => { + console.error('Failed to initialize audio track:', err); + }); + } + // If initialChildType is 'none' or anything else, leave both arrays empty this.shapes = []; diff --git a/src/nodeTypes.js b/src/nodeTypes.js index d8443ec..51639b4 100644 --- a/src/nodeTypes.js +++ b/src/nodeTypes.js @@ -56,7 +56,7 @@ export const nodeTypes = {
Mixer
- +
- +
- +
- +
` @@ -185,15 +185,15 @@ export const nodeTypes = {
Filter
- +
- +
- +
` @@ -220,19 +220,19 @@ export const nodeTypes = {
ADSR
- +
- +
- +
- +
` @@ -295,11 +295,11 @@ export const nodeTypes = {
Audio→CV
- +
- +
` @@ -325,11 +325,11 @@ export const nodeTypes = {
Oscilloscope
- +
- +
Pass-through monitor
@@ -355,7 +355,7 @@ export const nodeTypes = {
Voice Allocator
- +
Double-click to edit
@@ -443,15 +443,15 @@ export const nodeTypes = {
LFO
- +
- +
- +
` @@ -474,11 +474,11 @@ export const nodeTypes = {
Noise
- +
- +
` @@ -525,7 +525,362 @@ export const nodeTypes = {
Pan
- + +
+ + ` + }, + + Delay: { + name: 'Delay', + category: NodeCategory.EFFECT, + description: 'Stereo delay with feedback', + inputs: [ + { name: 'Audio In', type: SignalType.AUDIO, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'delay_time', label: 'Delay Time', min: 0.001, max: 2.0, default: 0.5, unit: 's' }, + { id: 1, name: 'feedback', label: 'Feedback', min: 0, max: 0.95, default: 0.5, unit: '' }, + { id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' } + ], + getHTML: (nodeId) => ` +
+
Delay
+
+ + +
+
+ + +
+
+ + +
+
+ ` + }, + + Reverb: { + name: 'Reverb', + category: NodeCategory.EFFECT, + description: 'Schroeder reverb with room size and damping', + inputs: [ + { name: 'Audio In', type: SignalType.AUDIO, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'room_size', label: 'Room Size', min: 0, max: 1, default: 0.5, unit: '' }, + { id: 1, name: 'damping', label: 'Damping', min: 0, max: 1, default: 0.5, unit: '' }, + { id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.3, unit: '' } + ], + getHTML: (nodeId) => ` +
+
Reverb
+
+ + +
+
+ + +
+
+ + +
+
+ ` + }, + + Chorus: { + name: 'Chorus', + category: NodeCategory.EFFECT, + description: 'Chorus effect with modulated delay', + inputs: [ + { name: 'Audio In', type: SignalType.AUDIO, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'rate', label: 'Rate', min: 0.1, max: 5.0, default: 1.0, unit: 'Hz' }, + { id: 1, name: 'depth', label: 'Depth', min: 0, max: 1, default: 0.5, unit: '' }, + { id: 2, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' } + ], + getHTML: (nodeId) => ` +
+
Chorus
+
+ + +
+
+ + +
+
+ + +
+
+ ` + }, + + Flanger: { + name: 'Flanger', + category: NodeCategory.EFFECT, + description: 'Flanger effect with feedback', + inputs: [ + { name: 'Audio In', type: SignalType.AUDIO, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'rate', label: 'Rate', min: 0.1, max: 10.0, default: 0.5, unit: 'Hz' }, + { id: 1, name: 'depth', label: 'Depth', min: 0, max: 1, default: 0.7, unit: '' }, + { id: 2, name: 'feedback', label: 'Feedback', min: -0.95, max: 0.95, default: 0.5, unit: '' }, + { id: 3, name: 'wet_dry', label: 'Wet/Dry', min: 0, max: 1, default: 0.5, unit: '' } + ], + getHTML: (nodeId) => ` +
+
Flanger
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ ` + }, + + FMSynth: { + name: 'FM Synth', + category: NodeCategory.GENERATOR, + description: '4-operator FM synthesizer', + inputs: [ + { name: 'V/Oct', type: SignalType.CV, index: 0 }, + { name: 'Gate', type: SignalType.CV, index: 1 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'algorithm', label: 'Algorithm', min: 0, max: 3, default: 0, unit: '' }, + { id: 1, name: 'op1_ratio', label: 'Op1 Ratio', min: 0.25, max: 16, default: 1.0, unit: '' }, + { id: 2, name: 'op1_level', label: 'Op1 Level', min: 0, max: 1, default: 1.0, unit: '' }, + { id: 3, name: 'op2_ratio', label: 'Op2 Ratio', min: 0.25, max: 16, default: 2.0, unit: '' }, + { id: 4, name: 'op2_level', label: 'Op2 Level', min: 0, max: 1, default: 0.8, unit: '' }, + { id: 5, name: 'op3_ratio', label: 'Op3 Ratio', min: 0.25, max: 16, default: 3.0, unit: '' }, + { id: 6, name: 'op3_level', label: 'Op3 Level', min: 0, max: 1, default: 0.6, unit: '' }, + { id: 7, name: 'op4_ratio', label: 'Op4 Ratio', min: 0.25, max: 16, default: 4.0, unit: '' }, + { id: 8, name: 'op4_level', label: 'Op4 Level', min: 0, max: 1, default: 0.4, unit: '' } + ], + getHTML: (nodeId) => ` +
+
FM Synth
+
+ + +
+
Operator 1
+
+ + +
+
+ + +
+
Operator 2
+
+ + +
+
+ + +
+
Operator 3
+
+ + +
+
+ + +
+
Operator 4
+
+ + +
+
+ + +
+
+ ` + }, + + WavetableOscillator: { + name: 'Wavetable', + category: NodeCategory.GENERATOR, + description: 'Wavetable oscillator with preset waveforms', + inputs: [ + { name: 'V/Oct', type: SignalType.CV, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'wavetable', label: 'Wavetable', min: 0, max: 7, default: 0, unit: '' }, + { id: 1, name: 'fine_tune', label: 'Fine Tune', min: -1, max: 1, default: 0, unit: '' }, + { id: 2, name: 'position', label: 'Position', min: 0, max: 1, default: 0, unit: '' } + ], + getHTML: (nodeId) => ` +
+
Wavetable
+
+ + +
+
+ + +
+
+ + +
+
+ ` + }, + + SimpleSampler: { + name: 'Sampler', + category: NodeCategory.GENERATOR, + description: 'Simple sample playback with pitch shifting', + inputs: [ + { name: 'V/Oct', type: SignalType.CV, index: 0 }, + { name: 'Gate', type: SignalType.CV, index: 1 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'gain', label: 'Gain', min: 0, max: 2, default: 1.0, unit: '' }, + { id: 1, name: 'loop', label: 'Loop', min: 0, max: 1, default: 0, unit: '' }, + { id: 2, name: 'pitch_shift', label: 'Pitch Shift', min: -12, max: 12, default: 0, unit: 'semi' } + ], + getHTML: (nodeId) => ` +
+
Sampler
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
No sample loaded
+
+ ` + }, + + MultiSampler: { + name: 'Multi Sampler', + category: NodeCategory.GENERATOR, + description: 'Multi-sample instrument with velocity layers and key zones', + inputs: [ + { name: 'MIDI In', type: SignalType.MIDI, index: 0 } + ], + outputs: [ + { name: 'Audio Out', type: SignalType.AUDIO, index: 0 } + ], + parameters: [ + { id: 0, name: 'gain', label: 'Gain', min: 0, max: 2, default: 1.0, unit: '' }, + { id: 1, name: 'attack', label: 'Attack', min: 0.001, max: 1, default: 0.01, unit: 's' }, + { id: 2, name: 'release', label: 'Release', min: 0.01, max: 5, default: 0.1, unit: 's' }, + { id: 3, name: 'transpose', label: 'Transpose', min: -24, max: 24, default: 0, unit: 'semi' } + ], + getHTML: (nodeId) => ` +
+
Multi Sampler
+
+ + +
+
+ + +
+
+ + +
+
+ + +
+
+ +
+
+ + + + + + + + + + + + + +
FileRangeRootVel
No layers loaded
` diff --git a/src/startscreen.js b/src/startscreen.js new file mode 100644 index 0000000..aeee142 --- /dev/null +++ b/src/startscreen.js @@ -0,0 +1,229 @@ +const { basename, dirname, join } = window.__TAURI__.path; + +let startScreenContainer; +let onProjectStartCallback; + +/** + * Creates the start screen UI + * @param {Function} callback - Called when user selects a project type or opens a file + * callback receives: { type: 'new'|'reopen'|'recent', projectFocus?: string, filePath?: string, width?: number, height?: number, fps?: number } + */ +export function createStartScreen(callback) { + onProjectStartCallback = callback; + + startScreenContainer = document.createElement('div'); + startScreenContainer.id = 'startScreen'; + startScreenContainer.className = 'start-screen'; + + // Create welcome title + const title = document.createElement('h1'); + title.textContent = 'Welcome to Lightningbeam!'; + title.className = 'start-screen-title'; + startScreenContainer.appendChild(title); + + // Create main content container + const contentContainer = document.createElement('div'); + contentContainer.className = 'start-screen-content'; + startScreenContainer.appendChild(contentContainer); + + // Left panel - Recent files + const leftPanel = createLeftPanel(); + contentContainer.appendChild(leftPanel); + + // Right panel - New project + const rightPanel = createRightPanel(); + contentContainer.appendChild(rightPanel); + + document.body.appendChild(startScreenContainer); +} + +function createLeftPanel() { + const leftPanel = document.createElement('div'); + leftPanel.className = 'start-screen-left-panel'; + + // Reopen last session section + const reopenSection = document.createElement('div'); + reopenSection.className = 'start-screen-section'; + + const reopenTitle = document.createElement('h3'); + reopenTitle.textContent = 'Reopen last session'; + reopenTitle.className = 'start-screen-section-title'; + reopenSection.appendChild(reopenTitle); + + const lastSessionDiv = document.createElement('div'); + lastSessionDiv.id = 'lastSessionFile'; + lastSessionDiv.className = 'start-screen-file-item'; + lastSessionDiv.textContent = 'No recent session'; + reopenSection.appendChild(lastSessionDiv); + + leftPanel.appendChild(reopenSection); + + // Recent projects section + const recentSection = document.createElement('div'); + recentSection.className = 'start-screen-section'; + + const recentTitle = document.createElement('h3'); + recentTitle.textContent = 'Recent projects'; + recentTitle.className = 'start-screen-section-title'; + recentSection.appendChild(recentTitle); + + const recentList = document.createElement('ul'); + recentList.id = 'recentProjectsList'; + recentList.className = 'start-screen-recent-list'; + recentSection.appendChild(recentList); + + leftPanel.appendChild(recentSection); + + return leftPanel; +} + +function createRightPanel() { + const rightPanel = document.createElement('div'); + rightPanel.className = 'start-screen-right-panel'; + + const heading = document.createElement('h2'); + heading.textContent = 'Create a new project'; + heading.className = 'start-screen-heading'; + rightPanel.appendChild(heading); + + // Project focus options container + const focusContainer = document.createElement('div'); + focusContainer.className = 'start-screen-focus-grid'; + + const focusTypes = [ + { + name: 'Animation', + value: 'animation', + iconSvg: '', + description: 'Drawing tools and timeline' + }, + { + name: 'Music', + value: 'audioDaw', + iconSvg: '', + description: 'Audio tracks and mixer' + }, + { + name: 'Video editing', + value: 'videoEditing', + iconSvg: '', + description: 'Clip timeline and effects' + } + ]; + + focusTypes.forEach(focus => { + const focusCard = createFocusCard(focus); + focusContainer.appendChild(focusCard); + }); + + rightPanel.appendChild(focusContainer); + + return rightPanel; +} + +function createFocusCard(focus) { + const card = document.createElement('div'); + card.className = 'focus-card'; + + // Icon container + const iconContainer = document.createElement('div'); + iconContainer.className = 'focus-card-icon-container'; + + const iconWrapper = document.createElement('div'); + iconWrapper.className = 'focus-card-icon'; + iconWrapper.innerHTML = focus.iconSvg; + iconContainer.appendChild(iconWrapper); + card.appendChild(iconContainer); + + // Label + const label = document.createElement('div'); + label.textContent = focus.name; + label.className = 'focus-card-label'; + card.appendChild(label); + + // Click handler + card.addEventListener('click', () => { + onProjectStartCallback({ + type: 'new', + projectFocus: focus.value, + width: 800, + height: 600, + fps: 24 + }); + }); + + return card; +} + +/** + * Updates the recent files list and last session + */ +export async function updateStartScreen(config) { + if (!startScreenContainer) return; + + // Update last session + const lastSessionDiv = document.getElementById('lastSessionFile'); + if (lastSessionDiv) { + if (config.recentFiles && config.recentFiles.length > 0) { + const lastFile = config.recentFiles[0]; + const filename = await basename(lastFile); + lastSessionDiv.textContent = filename; + lastSessionDiv.onclick = () => { + onProjectStartCallback({ + type: 'reopen', + filePath: lastFile + }); + }; + lastSessionDiv.classList.add('clickable'); + } else { + lastSessionDiv.textContent = 'No recent session'; + lastSessionDiv.classList.remove('clickable'); + lastSessionDiv.onclick = null; + } + } + + // Update recent projects list + const recentList = document.getElementById('recentProjectsList'); + if (recentList) { + recentList.innerHTML = ''; + + if (config.recentFiles && config.recentFiles.length > 1) { + // Show up to 4 recent files (excluding the most recent which is shown as last session) + const recentFiles = config.recentFiles.slice(1, 5); + + for (const filePath of recentFiles) { + const filename = await basename(filePath); + const listItem = document.createElement('li'); + listItem.textContent = filename; + listItem.className = 'start-screen-file-item clickable'; + + listItem.onclick = () => { + onProjectStartCallback({ + type: 'recent', + filePath: filePath + }); + }; + + recentList.appendChild(listItem); + } + } + } +} + +/** + * Shows the start screen + */ +export function showStartScreen() { + if (startScreenContainer) { + startScreenContainer.style.display = 'flex'; + } +} + +/** + * Hides the start screen + */ +export function hideStartScreen() { + if (startScreenContainer) { + startScreenContainer.style.display = 'none'; + } +} diff --git a/src/styles.css b/src/styles.css index 730cdf1..c09c4a1 100644 --- a/src/styles.css +++ b/src/styles.css @@ -20,13 +20,92 @@ textarea { } :root { --lineheight: 24px; + + /* Semantic color system matching styles.js */ + --background-color: #ccc; + --foreground-color: #ddd; + --highlight: #ddd; + --shadow: #999; + --shade: #aaa; + --scrubber-color: #cc2222; + --label-color: black; + + /* Base colors */ + --white: #ffffff; + --black: #0f0f0f; + --pure-black: #000; + + /* Additional semantic colors */ + --surface: #f6f6f6; + --surface-light: #fff; + --surface-dark: #e8e8e8; + --surface-darker: #555; + + --text-primary: #0f0f0f; + --text-secondary: #666; + --text-tertiary: #999; + --text-inverse: #f6f6f6; + + --border-light: #bbb; + --border-medium: #999; + --border-dark: #555; + + /* Interactive elements */ + --button-hover: #396cd8; + --button-active: #e8e8e8; + --link-color: #646cff; + --link-hover: #535bf2; + + /* Status colors */ + --success: #4CAF50; + --success-hover: #45a049; + --success-dark: #2E7D32; + --error: #f44336; + --error-dark: #cc0000; + --info: #2196F3; + --info-dark: #1565C0; + --warning: #FF9800; + --warning-dark: #E65100; + + /* Timeline/Animation colors */ + --motion: #7a00b3; + --motion-hover: #530379; + --motion-border: #450264; + --shape: #9bff9b; + --shape-hover: #38f538; + --shape-border: #26ac26; + --keyframe: #222; + --selection: #00ffff; + + /* Audio layer colors */ + --audio-layer: #8281cc; + --audio-layer-light: #9a99db; + --audio-layer-dark: #817db9; + + /* Node editor */ + --node-bg: #2d2d2d; + --node-border: #4d4d4d; + --node-selected: #4CAF50; + --node-primary: #7c7cff; + --node-template: #9d4edd; + --node-template-light: #c77dff; + --node-child: #5a5aaa; + + /* UI specific */ + --panel-bg: #aaa; + --header-bg: #ccc; + --toolbar-bg: #ccc; + --grid-bg: #555; + --grid-hover: #666; + --horiz-break: #999; + font-family: Inter, Avenir, Helvetica, Arial, sans-serif; font-size: 16px; line-height: var(--lineheight); font-weight: 400; - color: #0f0f0f; - background-color: #f6f6f6; + color: var(--text-primary); + background-color: var(--surface); font-synthesis: none; text-rendering: optimizeLegibility; @@ -63,12 +142,12 @@ textarea { a { font-weight: 500; - color: #646cff; + color: var(--link-color); text-decoration: inherit; } a:hover { - color: #535bf2; + color: var(--link-hover); } h1 { @@ -84,8 +163,8 @@ button { font-size: 1em; font-weight: 500; font-family: inherit; - color: #0f0f0f; - background-color: #ffffff; + color: var(--black); + background-color: var(--white); transition: border-color 0.25s; box-shadow: 0 4px 4px rgba(0, 0, 0, 0.2); box-sizing: border-box; @@ -102,11 +181,11 @@ button { } button:hover { - border-color: #396cd8; + border-color: var(--button-hover); } button:active { - border-color: #396cd8; - background-color: #e8e8e8; + border-color: var(--button-hover); + background-color: var(--button-active); } input, @@ -122,7 +201,7 @@ button { .header { height: 60px; min-width: 100%; - background-color: #ccc; + background-color: var(--header-bg); text-align: left; z-index: 1; display: flex; @@ -143,7 +222,7 @@ button { .horizontal-grid, .vertical-grid { display: flex; - background-color: #555; + background-color: var(--grid-bg); width: 100%; height: 100%; contain: strict; @@ -160,18 +239,18 @@ button { } /* I don't fully understand this selector but it works for now */ .horizontal-grid:hover:not(:has(*:hover)):not(.panecontainer > .horizontal-grid) { - background: #666; + background: var(--grid-hover); cursor: ew-resize; } .vertical-grid:hover:not(:has(*:hover)):not(.panecontainer > .vertical-grid) { - background: #666; + background: var(--grid-hover); cursor: ns-resize } .scroll { overflow: scroll; width: 100%; height: 100%; - background-color: #555; + background-color: var(--grid-bg); } .stage { width: 100%; @@ -189,7 +268,7 @@ button { height: 300px; left: 100px; top: 100px; - border: 1px solid #00ffff; + border: 1px solid var(--selection); display: none; user-select: none; pointer-events: none; @@ -198,7 +277,7 @@ button { position: absolute; width: 10px; height: 10px; - background-color: black; + background-color: var(--label-color); transition: width 0.2s ease, height 0.2s linear; user-select: none; @@ -280,7 +359,7 @@ button { .toolbtn { width: calc( 3 * var(--lineheight) ); height: calc( 3 * var(--lineheight) ); - background-color: #ccc; + background-color: var(--toolbar-bg); } .toolbtn img { filter: invert(1); @@ -290,7 +369,7 @@ button { width: 100%; height: 5px; - background-color: #999; + background-color: var(--horiz-break); } .color-field { position: relative; @@ -306,7 +385,7 @@ button { .color-field::before { content: var(--label-text);; font-size: 16px; - color: black; + color: var(--label-color); margin-right: 10px; } @@ -333,7 +412,7 @@ button { .infopanel { width: 100%; height: 100%; - background-color: #aaa; + background-color: var(--panel-bg); display: flex; box-sizing: border-box; gap: calc( var(--lineheight) / 2 ); @@ -356,14 +435,14 @@ button { width: 50%; } .layers { - background-color: #aaa; + background-color: var(--panel-bg); display: flex; flex-direction: column; flex-wrap: nowrap; min-height: 100%; } .frames-container { - background-color: #aaa; + background-color: var(--panel-bg); display: flex; flex-direction: column; flex-wrap: nowrap; @@ -375,9 +454,9 @@ button { .layer-header { width: 100%; height: calc( 2 * var(--lineheight)); - background-color: #aaa; - border-top: 1px solid #ddd; - border-bottom: 1px solid #999; + background-color: var(--shade); + border-top: 1px solid var(--foreground-color); + border-bottom: 1px solid var(--shadow); flex-shrink: 0; display: flex; @@ -386,22 +465,22 @@ button { cursor: pointer; } .layer-header.active { - background-color: #ccc; + background-color: var(--background-color); } .layer-header.audio { - background-color: #8281cc; - border-top: 1px solid #9a99db; - border-bottom: 1px solid #817db9; + background-color: var(--audio-layer); + border-top: 1px solid var(--audio-layer-light); + border-bottom: 1px solid var(--audio-layer-dark); } .layer-name { padding-left: 1em; padding-top: 5px; display: inline-block; - color: #666; + color: var(--text-secondary); cursor: text; } .layer-header.active > .layer-name { - color: #000; + color: var(--pure-black); } /* Visibility icon positioning */ .visibility-icon { @@ -415,17 +494,17 @@ button { height: calc( 2 * var(--lineheight)); /* background: repeating-linear-gradient(to right, transparent, transparent 24px, #aaa 24px, #aaa 25px), repeating-linear-gradient(to right, #bbb, #bbb 100px, #aaa 100px, #aaa 125px); */ - background-image: + background-image: /* Layer 1: frame dividers */ - linear-gradient(to right, transparent 24px, #aaa 24px 25px), + linear-gradient(to right, transparent 24px, var(--shade) 24px 25px), /* Layer 2: highlight every 5th frame */ - linear-gradient(to right, #bbb 100px, #aaa 100px 125px); + linear-gradient(to right, var(--border-light) 100px, var(--shade) 100px 125px); background-repeat: repeat-x, repeat-x; background-size: 25px 100%, 125px 100%; display: flex; flex-direction: row; - border-top: 1px solid #bbb; - border-bottom: 1px solid #ccc; + border-top: 1px solid var(--border-light); + border-bottom: 1px solid var(--background-color); flex-shrink: 0; } .layer-track.invisible { @@ -435,17 +514,17 @@ button { width: 25px; height: 100%; - background-color: #ccc; + background-color: var(--background-color); flex-grow: 0; flex-shrink: 0; - border-right: 1px solid #bbb; - border-left: 1px solid #ddd; + border-right: 1px solid var(--border-light); + border-left: 1px solid var(--foreground-color); } .frame:hover { - background-color: #555555; + background-color: var(--surface-darker); } .frame.active { - background-color: #fff; + background-color: var(--surface-light); } .frame.keyframe { position: relative; @@ -460,26 +539,26 @@ button { height: 0; /* Initially set to 0 */ padding-bottom: 50%; /* Set padding-bottom to 50% of the div's width to create a circle */ border-radius: 50%; /* Make the shape a circle */ - background-color: #222; /* Set the color of the circle (black in this case) */ + background-color: var(--keyframe); /* Set the color of the circle (black in this case) */ margin-bottom: 5px; } .frame.motion { - background-color: #7a00b3; + background-color: var(--motion); border: none; } .frame.motion:hover, .frame.motion.active { - background-color: #530379; - border-left: 1px solid #450264; - border-right: 1px solid #450264; + background-color: var(--motion-hover); + border-left: 1px solid var(--motion-border); + border-right: 1px solid var(--motion-border); } .frame.shape { - background-color: #9bff9b; + background-color: var(--shape); border: none; } .frame.shape:hover, .frame.shape.active { - background-color: #38f538; - border-left: 1px solid #26ac26; - border-right: 1px solid #26ac26; + background-color: var(--shape-hover); + border-left: 1px solid var(--shape-border); + border-right: 1px solid var(--shape-border); } /* :nth-child(1 of .frame.motion) { background-color: blue; @@ -489,7 +568,7 @@ button { } */ .frame-highlight { - background-color: #888; + background-color: var(--text-tertiary); width: 25px; height: calc( 2 * var(--lineheight) - 2px); position: relative; @@ -517,8 +596,8 @@ button { top: 50%; left: 50%; transform: translate(-50%, -50%); - background-color: #ddd; - border: 1px solid #aaa; + background-color: var(--foreground-color); + border: 1px solid var(--shade); border-radius: 5px; box-shadow: 0 0 10px rgba(0, 0, 0, 0.5); padding: 20px; @@ -535,21 +614,21 @@ button { width: 100%; padding: 8px; margin: 5px 0; - border: 1px solid #aaa; + border: 1px solid var(--shade); } #newFileDialog .dialog-button, #saveDialog button { width: 100%; padding: 10px; margin-top: 10px; - background-color: #007bff; - color: white; + background-color: var(--info); + color: var(--white); border: none; cursor: pointer; } #newFileDialog .dialog-button:hover { - background-color: #0056b3; + background-color: var(--info-dark); } #recentFilesList li { word-wrap: break-word; @@ -566,13 +645,13 @@ button { #recentFilesList li:hover { cursor: pointer; - background-color: #f0f0f0; + background-color: var(--surface-dark); border-radius: 5px; box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1); } #popupMenu { - background-color: #eee; + background-color: var(--highlight); box-shadow: 0 4px 8px rgba(0,0,0,0.5); padding: 20px; border-radius: 5px; @@ -583,18 +662,18 @@ button { margin: 0px; } #popupMenu li { - color: #222; + color: var(--keyframe); list-style-type: none; display: flex; align-items: center; /* Vertically center the image and text */ padding: 5px 0; /* Add padding for better spacing */ } #popupMenu li:hover { - background-color: #fff; + background-color: var(--surface-light); cursor:pointer; } #popupMenu li:not(:last-child) { - border-bottom: 1px solid #ccc; /* Horizontal line for all li elements except the last */ + border-bottom: 1px solid var(--background-color); /* Horizontal line for all li elements except the last */ } #popupMenu li img { margin-right: 10px; /* Space between the icon and text */ @@ -624,17 +703,39 @@ button { @media (prefers-color-scheme: dark) { :root { - color: #f6f6f6; - background-color: #2f2f2f; + /* Override variables for dark mode */ + --background-color: #333; + --foreground-color: #888; + --highlight: #4f4f4f; + --shadow: #111; + --shade: #222; + --label-color: white; + + --surface: #2f2f2f; + --surface-light: #444; + --surface-dark: #555; + + --text-primary: #f6f6f6; + --text-secondary: #aaa; + --text-tertiary: #777; + + --header-bg: #3f3f3f; + --panel-bg: #222222; + --toolbar-bg: #2f2f2f; + --grid-bg: #0f0f0f; + --horiz-break: #2f2f2f; + + color: var(--text-primary); + background-color: var(--surface); } a:hover { - color: #24c8db; + color: var(--link-alt); } input, button { - color: #ffffff; + color: var(--white); background-color: #0f0f0f98; } button:active { @@ -642,53 +743,53 @@ button { } #newFileDialog, #saveDialog { - background-color: #444; - border: 1px solid #333; + background-color: var(--surface-light); + border: 1px solid var(--background-color); } #newFileDialog .dialog-input, #saveDialog input { - border: 1px solid #333; + border: 1px solid var(--background-color); } #recentFilesList li:hover { cursor: pointer; - background-color: #555; + background-color: var(--surface-dark); border-radius: 5px; box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); } - + #popupMenu { - background-color: #222; + background-color: var(--shade); } #popupMenu li { - color: #ccc; + color: var(--background-color); } #popupMenu li:hover { - background-color: #444; + background-color: var(--surface-light); } #popupMenu li:not(:last-child) { - border-bottom: 1px solid #444; + border-bottom: 1px solid var(--surface-light); } .color-field::before { - color: #eee; + color: var(--highlight); } .layers { - background-color: #222222; + background-color: var(--panel-bg); } .frames-container { - background-color: #222222; + background-color: var(--panel-bg); } .layer-header { - background-color: #222; - border-top: 1px solid #4f4f4f; - border-bottom: 1px solid #111; + background-color: var(--shade); + border-top: 1px solid var(--highlight); + border-bottom: 1px solid var(--shadow); } .layer-header.active { - background-color: #444; + background-color: var(--surface-light); } .layer-name { - color: #aaa + color: var(--text-secondary); } .layer-header.active > .layer-name { - color: #fff; + color: var(--white); } .layer-header.audio { background-color: #23253b; @@ -696,40 +797,40 @@ button { border-bottom: 1px solid #1f1e24; } .layer-track { - background-image: + background-image: linear-gradient(to right, transparent 23px, #1a1a1a 23px 25px), /* Dark mode frame dividers */ linear-gradient(to right, #121212 100px, #0a0a0a 100px 125px); /* Dark mode frame highlights */ - border-top: 1px solid #222222; - border-bottom: 1px solid #3f3f3f; + border-top: 1px solid var(--shade); + border-bottom: 1px solid var(--header-bg); } .frame { - background-color: #4f4f4f; - border-right: 1px solid #3f3f3f; - border-left: 1px solid #555555; + background-color: var(--highlight); + border-right: 1px solid var(--header-bg); + border-left: 1px solid var(--surface-dark); } .frame:hover { - background-color: #555555; + background-color: var(--surface-dark); } .frame.active { - background-color: #666666; + background-color: var(--text-secondary); } .infopanel { - background-color: #3f3f3f; + background-color: var(--header-bg); } .header { - background-color: #3f3f3f; + background-color: var(--header-bg); } .horizontal-grid, .vertical-grid { - background-color: #0f0f0f; + background-color: var(--grid-bg); } .toolbtn { - background-color: #2f2f2f; + background-color: var(--toolbar-bg); } .toolbtn img { filter:none; } .horiz_break { - background-color: #2f2f2f; + background-color: var(--horiz-break); } .audioWaveform { filter: invert(1); @@ -773,7 +874,7 @@ button { height: 0; border-style: solid; border-width: 8px 0 8px 14px; - border-color: transparent transparent transparent #0f0f0f; + border-color: transparent transparent transparent var(--black); margin-left: 2px; } @@ -783,7 +884,7 @@ button { content: ""; width: 4px; height: 16px; - background-color: #0f0f0f; + background-color: var(--black); position: absolute; } @@ -803,7 +904,7 @@ button { height: 0; border-style: solid; border-width: 7px 10px 7px 0; - border-color: transparent #0f0f0f transparent transparent; + border-color: transparent var(--black) transparent transparent; position: absolute; } @@ -823,7 +924,7 @@ button { height: 0; border-style: solid; border-width: 7px 0 7px 10px; - border-color: transparent transparent transparent #0f0f0f; + border-color: transparent transparent transparent var(--black); position: absolute; } @@ -845,7 +946,7 @@ button { .playback-btn-start::before { width: 2px; height: 14px; - background-color: #0f0f0f; + background-color: var(--black); left: 13px; } @@ -854,7 +955,7 @@ button { height: 0; border-style: solid; border-width: 7px 12px 7px 0; - border-color: transparent #0f0f0f transparent transparent; + border-color: transparent var(--black) transparent transparent; left: 15px; } @@ -870,14 +971,14 @@ button { height: 0; border-style: solid; border-width: 7px 0 7px 12px; - border-color: transparent transparent transparent #0f0f0f; + border-color: transparent transparent transparent var(--black); left: 13px; } .playback-btn-end::after { width: 2px; height: 14px; - background-color: #0f0f0f; + background-color: var(--black); left: 25px; } @@ -887,11 +988,11 @@ button { width: 14px; height: 14px; border-radius: 50%; - background-color: #cc0000; + background-color: var(--error-dark); } .playback-btn-record:disabled::before { - background-color: #666; + background-color: var(--text-secondary); } /* Recording animation */ @@ -915,42 +1016,74 @@ button { } .playback-btn-play::before { - border-color: transparent transparent transparent #f6f6f6; + border-color: transparent transparent transparent var(--text-primary); } .playback-btn-pause::before, .playback-btn-pause::after { - background-color: #f6f6f6; + background-color: var(--text-primary); } .playback-btn-rewind::before, .playback-btn-rewind::after { - border-color: transparent #f6f6f6 transparent transparent; + border-color: transparent var(--text-primary) transparent transparent; } .playback-btn-ff::before, .playback-btn-ff::after { - border-color: transparent transparent transparent #f6f6f6; + border-color: transparent transparent transparent var(--text-primary); } .playback-btn-start::before { - background-color: #f6f6f6; + background-color: var(--text-primary); } .playback-btn-start::after { - border-color: transparent #f6f6f6 transparent transparent; + border-color: transparent var(--text-primary) transparent transparent; } .playback-btn-end::before { - border-color: transparent transparent transparent #f6f6f6; + border-color: transparent transparent transparent var(--text-primary); } .playback-btn-end::after { - background-color: #f6f6f6; + background-color: var(--text-primary); } .playback-btn-record:disabled::before { - background-color: #444; + background-color: var(--surface-light); + } + + /* Start screen dark mode */ + .start-screen-file-item { + background: var(--surface-light); + border-color: var(--border-dark); + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); + } + + .start-screen-file-item.clickable:hover { + background-color: var(--surface-dark); + border-color: var(--link-hover); + } + + .focus-card { + background: var(--surface-light); + border-color: var(--border-dark); + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.3); + } + + .focus-card:hover { + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.4); + border-color: var(--link-hover); + } + + .focus-card-icon-container { + background: var(--surface); + border-color: var(--link-color); + } + + .focus-card-icon { + color: var(--text-primary); } } @@ -1048,7 +1181,7 @@ button { width: 100%; height: 100%; position: relative; - background: #1e1e1e; + background: var(--node-bg); } /* Node editor header and breadcrumb */ @@ -1076,7 +1209,7 @@ button { } .template-name { - color: #7c7cff; + color: var(--node-primary); font-weight: bold; } @@ -1163,8 +1296,8 @@ button { } .node-category-item:hover { - background: #4d4d4d; - border-color: #7c7cff; + background: var(--node-border); + border-color: var(--node-primary); } .node-category-item:active { @@ -1197,6 +1330,16 @@ button { min-width: 180px; } +/* Wider content for nodes with sample layers */ +.node-content:has(.sample-layers-container) { + min-width: 280px; +} + +/* Wider nodes for nodes with sample layers */ +.drawflow .drawflow-node:has(.sample-layers-container) { + min-width: 296px !important; /* 280px content + 8px padding on each side */ +} + /* Expanded VoiceAllocator node */ .drawflow .drawflow-node.expanded { background: rgba(60, 60, 80, 0.95) !important; @@ -1223,7 +1366,7 @@ button { /* Child nodes (inside VoiceAllocator) */ .drawflow .drawflow-node.child-node { opacity: 0.9; - border: 1px solid #5a5aaa !important; + border: 1px solid var(--node-child) !important; box-shadow: 0 2px 8px rgba(90, 90, 170, 0.3); z-index: 10; } @@ -1234,7 +1377,7 @@ button { /* Template nodes (non-deletable I/O nodes) */ .drawflow .drawflow-node.template-node { - border: 2px solid #9d4edd !important; + border: 2px solid var(--node-template) !important; background: rgba(157, 78, 221, 0.15) !important; box-shadow: 0 0 12px rgba(157, 78, 221, 0.4); pointer-events: auto; @@ -1242,7 +1385,7 @@ button { } .drawflow .drawflow-node.template-node .node-title { - color: #c77dff; + color: var(--node-template-light); font-weight: bold; } @@ -1262,14 +1405,19 @@ button { } .node-param { - margin: 3px 0; + margin: 0; + margin-top: 8px; /* Space between parameters */ +} + +.node-param:first-of-type { + margin-top: 0; /* No extra space after node title */ } .node-param label { display: block; font-size: 10px; color: #ccc; - margin-bottom: 2px; + margin-bottom: 2px; /* Tight spacing between label and its slider */ } .node-slider { @@ -1376,15 +1524,17 @@ button { /* Node styling overrides for Drawflow */ .drawflow .drawflow-node { - background: #2d2d2d !important; - border: 2px solid #4d4d4d !important; + background: var(--node-bg) !important; + border: 2px solid var(--node-border) !important; border-radius: 6px !important; - color: #ddd !important; + color: var(--foreground-color) !important; padding: 8px !important; + min-width: 196px !important; /* 180px content + 16px padding */ + width: auto !important; } .drawflow .drawflow-node.selected { - border-color: #4CAF50 !important; + border-color: var(--node-selected) !important; box-shadow: 0 0 10px rgba(76, 175, 80, 0.5) !important; } @@ -1435,8 +1585,8 @@ button { } .preset-btn { - background: #4CAF50; - color: white; + background: var(--success); + color: var(--white); border: none; padding: 6px 12px; border-radius: 4px; @@ -1449,7 +1599,7 @@ button { } .preset-btn:hover { - background: #45a049; + background: var(--success-hover); } .preset-btn span { @@ -1517,13 +1667,13 @@ button { } .preset-item:hover { - background: #2d2d2d; - border-color: #4CAF50; + background: var(--node-bg); + border-color: var(--success); } .preset-item.selected { - background: #2d2d2d; - border-color: #7c7cff; + background: var(--node-bg); + border-color: var(--node-primary); padding: 10px 12px; } @@ -1550,9 +1700,9 @@ button { } .preset-load-btn { - background: #4CAF50; + background: var(--success); border: none; - color: #fff; + color: var(--white); cursor: pointer; font-size: 11px; padding: 4px 8px; @@ -1566,13 +1716,13 @@ button { } .preset-load-btn:hover { - background: #45a049; + background: var(--success-hover); } .preset-delete-btn { background: transparent; border: none; - color: #f44336; + color: var(--error); cursor: pointer; font-size: 16px; padding: 2px 6px; @@ -1639,7 +1789,7 @@ button { } .preset-error { - color: #f44336; + color: var(--error); } /* Modal Dialog for Save Preset */ @@ -1736,10 +1886,264 @@ button { } .btn-primary { - background: #4CAF50; - color: white; + background: var(--success); + color: var(--white); } .btn-primary:hover { - background: #45a049; + background: var(--success-hover); +} + +/* Sample layer list styles */ +.sample-layers-container { + margin-top: 4px; + max-height: 120px; + overflow-y: auto; + overflow-x: hidden; + border: 1px solid #444; + border-radius: 3px; + background: #2a2a2a; +} + +.sample-layers-table { + width: 100%; + font-size: 10px; + border-collapse: collapse; + table-layout: fixed; +} + +.sample-layers-table thead { + background: #333; + position: sticky; + top: 0; + z-index: 1; +} + +.sample-layers-table th { + padding: 4px 3px; + text-align: left; + font-weight: 600; + color: #aaa; + border-bottom: 1px solid #444; + font-size: 9px; +} + +.sample-layers-table th:nth-child(1) { width: 28%; } /* File */ +.sample-layers-table th:nth-child(2) { width: 24%; } /* Range */ +.sample-layers-table th:nth-child(3) { width: 12%; } /* Root */ +.sample-layers-table th:nth-child(4) { width: 14%; } /* Vel */ +.sample-layers-table th:nth-child(5) { width: 22%; } /* Actions */ + +.sample-layers-table td { + padding: 3px; + border-bottom: 1px solid #333; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} + +.sample-layers-table tr:hover { + background: #3a3a3a; +} + +.sample-layer-filename { + overflow: hidden; + text-overflow: ellipsis; +} + +.sample-layer-actions { + display: flex; + gap: 3px; +} + +.btn-edit-layer, +.btn-delete-layer { + padding: 1px 6px; + font-size: 9px; + background: #555; + border: none; + border-radius: 2px; + color: white; + cursor: pointer; + white-space: nowrap; +} + +.btn-edit-layer:hover { + background: #666; +} + +.btn-delete-layer { + background: var(--error-alt); +} + +.btn-delete-layer:hover { + background: var(--error-alt-hover); +} + +.sample-layers-empty { + padding: 20px; + font-size: 10px; + color: #888; + text-align: center; +} + +.form-group-inline { + display: flex; + gap: 8px; + align-items: center; +} + +.form-group-inline > div { + flex: 1; +} + +.form-group-inline > span { + margin-top: 12px; +} + +.form-note-name { + font-size: 10px; + color: #666; + margin-top: 2px; +} + +/* Start Screen Styles */ +.start-screen { + position: fixed; + top: 0; + left: 0; + width: 100vw; + height: 100vh; + background-color: var(--surface); + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + z-index: 10000; +} + +.start-screen-title { + color: var(--text-primary); + font-size: 3em; + margin: 40px 0; + font-weight: 600; +} + +.start-screen-content { + display: flex; + gap: 60px; + max-width: 1200px; + width: 90%; + align-items: flex-start; +} + +.start-screen-left-panel { + flex: 1; + display: flex; + flex-direction: column; + gap: 30px; +} + +.start-screen-section { + display: flex; + flex-direction: column; +} + +.start-screen-section-title { + color: var(--text-primary); + font-size: 1.3em; + margin-bottom: 10px; +} + +.start-screen-file-item { + color: var(--text-secondary); + font-size: 1.1em; + padding: 12px; + background: var(--surface-light); + border: 1px solid var(--border-light); + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + transition: all 0.2s; +} + +.start-screen-file-item.clickable { + cursor: pointer; +} + +.start-screen-file-item.clickable:hover { + background-color: var(--surface-dark); + border-color: var(--button-hover); +} + +.start-screen-recent-list { + list-style: none; + padding: 0; + margin: 0; + display: flex; + flex-direction: column; + gap: 8px; +} + +.start-screen-right-panel { + flex: 2; + display: flex; + flex-direction: column; +} + +.start-screen-heading { + color: var(--text-primary); + font-size: 2em; + margin-bottom: 30px; + text-align: center; +} + +.start-screen-focus-grid { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 30px; + justify-items: center; +} + +.focus-card { + width: 180px; + padding: 24px; + background: var(--surface-light); + border: 2px solid var(--border-light); + border-radius: 12px; + box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); + cursor: pointer; + transition: all 0.3s; + display: flex; + flex-direction: column; + align-items: center; + gap: 16px; +} + +.focus-card:hover { + transform: translateY(-4px); + box-shadow: 0 8px 16px rgba(0, 0, 0, 0.2); + border-color: var(--button-hover); +} + +.focus-card-icon-container { + width: 120px; + height: 120px; + display: flex; + align-items: center; + justify-content: center; + border: 3px solid var(--button-hover); + border-radius: 8px; + background: var(--surface); +} + +.focus-card-icon { + width: 80px; + height: 80px; + color: var(--text-primary); +} + +.focus-card-label { + color: var(--text-primary); + font-size: 1.2em; + font-weight: 600; }