Load factory preset instruments
This commit is contained in:
parent
f1bcf16ddc
commit
8e6ea82f92
|
|
@ -6,7 +6,7 @@ use crate::audio::pool::AudioPool;
|
|||
use crate::audio::project::Project;
|
||||
use crate::audio::recording::RecordingState;
|
||||
use crate::audio::track::{Track, TrackId, TrackNode};
|
||||
use crate::command::{AudioEvent, Command};
|
||||
use crate::command::{AudioEvent, Command, Query, QueryResponse};
|
||||
use crate::effects::{Effect, GainEffect, PanEffect, SimpleEQ};
|
||||
use petgraph::stable_graph::NodeIndex;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
|
@ -25,6 +25,8 @@ pub struct Engine {
|
|||
// Lock-free communication
|
||||
command_rx: rtrb::Consumer<Command>,
|
||||
event_tx: rtrb::Producer<AudioEvent>,
|
||||
query_rx: rtrb::Consumer<Query>,
|
||||
query_response_tx: rtrb::Producer<QueryResponse>,
|
||||
|
||||
// Shared playhead for UI reads
|
||||
playhead_atomic: Arc<AtomicU64>,
|
||||
|
|
@ -53,6 +55,8 @@ impl Engine {
|
|||
channels: u32,
|
||||
command_rx: rtrb::Consumer<Command>,
|
||||
event_tx: rtrb::Producer<AudioEvent>,
|
||||
query_rx: rtrb::Consumer<Query>,
|
||||
query_response_tx: rtrb::Producer<QueryResponse>,
|
||||
) -> Self {
|
||||
let event_interval_frames = (sample_rate as usize * channels as usize) / 60; // Update 60 times per second
|
||||
|
||||
|
|
@ -69,6 +73,8 @@ impl Engine {
|
|||
channels,
|
||||
command_rx,
|
||||
event_tx,
|
||||
query_rx,
|
||||
query_response_tx,
|
||||
playhead_atomic: Arc::new(AtomicU64::new(0)),
|
||||
frames_since_last_event: 0,
|
||||
event_interval_frames,
|
||||
|
|
@ -142,9 +148,16 @@ impl Engine {
|
|||
}
|
||||
|
||||
/// Get a handle for controlling playback from the UI thread
|
||||
pub fn get_controller(&self, command_tx: rtrb::Producer<Command>) -> EngineController {
|
||||
pub fn get_controller(
|
||||
&self,
|
||||
command_tx: rtrb::Producer<Command>,
|
||||
query_tx: rtrb::Producer<Query>,
|
||||
query_response_rx: rtrb::Consumer<QueryResponse>,
|
||||
) -> EngineController {
|
||||
EngineController {
|
||||
command_tx,
|
||||
query_tx,
|
||||
query_response_rx,
|
||||
playhead: Arc::clone(&self.playhead_atomic),
|
||||
sample_rate: self.sample_rate,
|
||||
channels: self.channels,
|
||||
|
|
@ -164,6 +177,11 @@ impl Engine {
|
|||
self.handle_command(cmd);
|
||||
}
|
||||
|
||||
// Process all pending queries
|
||||
while let Ok(query) = self.query_rx.pop() {
|
||||
self.handle_query(query);
|
||||
}
|
||||
|
||||
if self.playing {
|
||||
// Ensure mix buffer is sized correctly
|
||||
if self.mix_buffer.len() != output.len() {
|
||||
|
|
@ -744,8 +762,12 @@ impl Engine {
|
|||
"Splitter" => Box::new(SplitterNode::new("Splitter".to_string())),
|
||||
"Pan" => Box::new(PanNode::new("Pan".to_string())),
|
||||
"Delay" => Box::new(DelayNode::new("Delay".to_string())),
|
||||
"Distortion" => Box::new(DistortionNode::new("Distortion".to_string())),
|
||||
"Reverb" => Box::new(ReverbNode::new("Reverb".to_string())),
|
||||
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
|
||||
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
|
||||
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
|
||||
"EQ" => Box::new(EQNode::new("EQ".to_string())),
|
||||
"Flanger" => Box::new(FlangerNode::new("Flanger".to_string())),
|
||||
"FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())),
|
||||
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())),
|
||||
|
|
@ -803,8 +825,12 @@ impl Engine {
|
|||
"Splitter" => Box::new(SplitterNode::new("Splitter".to_string())),
|
||||
"Pan" => Box::new(PanNode::new("Pan".to_string())),
|
||||
"Delay" => Box::new(DelayNode::new("Delay".to_string())),
|
||||
"Distortion" => Box::new(DistortionNode::new("Distortion".to_string())),
|
||||
"Reverb" => Box::new(ReverbNode::new("Reverb".to_string())),
|
||||
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
|
||||
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
|
||||
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
|
||||
"EQ" => Box::new(EQNode::new("EQ".to_string())),
|
||||
"Flanger" => Box::new(FlangerNode::new("Flanger".to_string())),
|
||||
"FMSynth" => Box::new(FMSynthNode::new("FM Synth".to_string())),
|
||||
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable".to_string())),
|
||||
|
|
@ -968,7 +994,10 @@ impl Engine {
|
|||
Ok(json) => {
|
||||
match crate::audio::node_graph::preset::GraphPreset::from_json(&json) {
|
||||
Ok(preset) => {
|
||||
match InstrumentGraph::from_preset(&preset, self.sample_rate, 8192) {
|
||||
// Extract the directory path from the preset path for resolving relative sample paths
|
||||
let preset_base_path = std::path::Path::new(&preset_path).parent();
|
||||
|
||||
match InstrumentGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path) {
|
||||
Ok(graph) => {
|
||||
// Replace the track's graph
|
||||
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
|
||||
|
|
@ -1124,6 +1153,61 @@ impl Engine {
|
|||
}
|
||||
}
|
||||
|
||||
/// Handle synchronous queries from the UI thread
|
||||
fn handle_query(&mut self, query: Query) {
|
||||
let response = match query {
|
||||
Query::GetGraphState(track_id) => {
|
||||
if let Some(TrackNode::Midi(track)) = self.project.get_track(track_id) {
|
||||
if let Some(ref graph) = track.instrument_graph {
|
||||
let preset = graph.to_preset("temp");
|
||||
match preset.to_json() {
|
||||
Ok(json) => QueryResponse::GraphState(Ok(json)),
|
||||
Err(e) => QueryResponse::GraphState(Err(format!("Failed to serialize graph: {:?}", e))),
|
||||
}
|
||||
} else {
|
||||
// Empty graph
|
||||
let empty_preset = crate::audio::node_graph::preset::GraphPreset::new("empty");
|
||||
match empty_preset.to_json() {
|
||||
Ok(json) => QueryResponse::GraphState(Ok(json)),
|
||||
Err(_) => QueryResponse::GraphState(Err("Failed to serialize empty graph".to_string())),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
QueryResponse::GraphState(Err(format!("Track {} not found or is not a MIDI track", track_id)))
|
||||
}
|
||||
}
|
||||
Query::GetTemplateState(track_id, voice_allocator_id) => {
|
||||
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
|
||||
if let Some(ref mut graph) = track.instrument_graph {
|
||||
let node_idx = NodeIndex::new(voice_allocator_id as usize);
|
||||
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
|
||||
// Downcast to VoiceAllocatorNode
|
||||
let node_ptr = &*graph_node.node as *const dyn crate::audio::node_graph::AudioNode;
|
||||
let node_ptr = node_ptr as *const VoiceAllocatorNode;
|
||||
unsafe {
|
||||
let va_node = &*node_ptr;
|
||||
let template_preset = va_node.template_graph().to_preset("template");
|
||||
match template_preset.to_json() {
|
||||
Ok(json) => QueryResponse::GraphState(Ok(json)),
|
||||
Err(e) => QueryResponse::GraphState(Err(format!("Failed to serialize template: {:?}", e))),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
QueryResponse::GraphState(Err("Voice allocator node not found".to_string()))
|
||||
}
|
||||
} else {
|
||||
QueryResponse::GraphState(Err("Graph not found".to_string()))
|
||||
}
|
||||
} else {
|
||||
QueryResponse::GraphState(Err(format!("Track {} not found or is not a MIDI track", track_id)))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Send response back
|
||||
let _ = self.query_response_tx.push(response);
|
||||
}
|
||||
|
||||
/// Handle starting a recording
|
||||
fn handle_start_recording(&mut self, track_id: TrackId, start_time: f64) {
|
||||
use crate::io::WavWriter;
|
||||
|
|
@ -1285,6 +1369,8 @@ impl Engine {
|
|||
/// Controller for the engine that can be used from the UI thread
|
||||
pub struct EngineController {
|
||||
command_tx: rtrb::Producer<Command>,
|
||||
query_tx: rtrb::Producer<Query>,
|
||||
query_response_rx: rtrb::Consumer<QueryResponse>,
|
||||
playhead: Arc<AtomicU64>,
|
||||
sample_rate: u32,
|
||||
channels: u32,
|
||||
|
|
@ -1626,4 +1712,49 @@ impl EngineController {
|
|||
pub fn multi_sampler_remove_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize) {
|
||||
let _ = self.command_tx.push(Command::MultiSamplerRemoveLayer(track_id, node_id, layer_index));
|
||||
}
|
||||
|
||||
/// Send a synchronous query and wait for the response
|
||||
/// This blocks until the audio thread processes the query
|
||||
pub fn query_graph_state(&mut self, track_id: TrackId) -> Result<String, String> {
|
||||
// Send query
|
||||
if let Err(_) = self.query_tx.push(Query::GetGraphState(track_id)) {
|
||||
return Err("Failed to send query - queue full".to_string());
|
||||
}
|
||||
|
||||
// Wait for response (with timeout)
|
||||
let start = std::time::Instant::now();
|
||||
let timeout = std::time::Duration::from_millis(500);
|
||||
|
||||
while start.elapsed() < timeout {
|
||||
if let Ok(QueryResponse::GraphState(result)) = self.query_response_rx.pop() {
|
||||
return result;
|
||||
}
|
||||
// Small sleep to avoid busy-waiting
|
||||
std::thread::sleep(std::time::Duration::from_micros(100));
|
||||
}
|
||||
|
||||
Err("Query timeout".to_string())
|
||||
}
|
||||
|
||||
/// Query a template graph state
|
||||
pub fn query_template_state(&mut self, track_id: TrackId, voice_allocator_id: u32) -> Result<String, String> {
|
||||
// Send query
|
||||
if let Err(_) = self.query_tx.push(Query::GetTemplateState(track_id, voice_allocator_id)) {
|
||||
return Err("Failed to send query - queue full".to_string());
|
||||
}
|
||||
|
||||
// Wait for response (with timeout)
|
||||
let start = std::time::Instant::now();
|
||||
let timeout = std::time::Duration::from_millis(500);
|
||||
|
||||
while start.elapsed() < timeout {
|
||||
if let Ok(QueryResponse::GraphState(result)) = self.query_response_rx.pop() {
|
||||
return result;
|
||||
}
|
||||
// Small sleep to avoid busy-waiting
|
||||
std::thread::sleep(std::time::Duration::from_micros(100));
|
||||
}
|
||||
|
||||
Err("Query timeout".to_string())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -746,11 +746,30 @@ impl InstrumentGraph {
|
|||
}
|
||||
|
||||
/// Deserialize a preset into the graph
|
||||
pub fn from_preset(preset: &crate::audio::node_graph::preset::GraphPreset, sample_rate: u32, buffer_size: usize) -> Result<Self, String> {
|
||||
pub fn from_preset(preset: &crate::audio::node_graph::preset::GraphPreset, sample_rate: u32, buffer_size: usize, preset_base_path: Option<&std::path::Path>) -> Result<Self, String> {
|
||||
use crate::audio::node_graph::nodes::*;
|
||||
use petgraph::stable_graph::NodeIndex;
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Helper function to resolve sample paths relative to preset
|
||||
let resolve_sample_path = |path: &str| -> String {
|
||||
let path_obj = std::path::Path::new(path);
|
||||
|
||||
// If path is absolute, use it as-is
|
||||
if path_obj.is_absolute() {
|
||||
return path.to_string();
|
||||
}
|
||||
|
||||
// If we have a base path and the path is relative, resolve it
|
||||
if let Some(base) = preset_base_path {
|
||||
let resolved = base.join(path);
|
||||
resolved.to_string_lossy().to_string()
|
||||
} else {
|
||||
// No base path, use path as-is
|
||||
path.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
let mut graph = Self::new(sample_rate, buffer_size);
|
||||
let mut index_map: HashMap<u32, NodeIndex> = HashMap::new();
|
||||
|
||||
|
|
@ -768,8 +787,12 @@ impl InstrumentGraph {
|
|||
"Splitter" => Box::new(SplitterNode::new("Splitter")),
|
||||
"Pan" => Box::new(PanNode::new("Pan")),
|
||||
"Delay" => Box::new(DelayNode::new("Delay")),
|
||||
"Distortion" => Box::new(DistortionNode::new("Distortion")),
|
||||
"Reverb" => Box::new(ReverbNode::new("Reverb")),
|
||||
"Chorus" => Box::new(ChorusNode::new("Chorus")),
|
||||
"Compressor" => Box::new(CompressorNode::new("Compressor")),
|
||||
"Limiter" => Box::new(LimiterNode::new("Limiter")),
|
||||
"EQ" => Box::new(EQNode::new("EQ")),
|
||||
"Flanger" => Box::new(FlangerNode::new("Flanger")),
|
||||
"FMSynth" => Box::new(FMSynthNode::new("FM Synth")),
|
||||
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable")),
|
||||
|
|
@ -786,7 +809,7 @@ impl InstrumentGraph {
|
|||
|
||||
// If there's a template graph, deserialize and set it
|
||||
if let Some(ref template_preset) = serialized_node.template_graph {
|
||||
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size)?;
|
||||
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path)?;
|
||||
// Set the template graph (we'll need to add this method to VoiceAllocator)
|
||||
*va.template_graph_mut() = template_graph;
|
||||
va.rebuild_voices();
|
||||
|
|
@ -836,8 +859,9 @@ impl InstrumentGraph {
|
|||
sampler_node.set_sample(samples, embedded.sample_rate as f32);
|
||||
}
|
||||
} else if let Some(ref path) = file_path {
|
||||
// Fall back to loading from file
|
||||
let _ = sampler_node.load_sample_from_file(path);
|
||||
// Fall back to loading from file (resolve path relative to preset)
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
let _ = sampler_node.load_sample_from_file(&resolved_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -875,9 +899,10 @@ impl InstrumentGraph {
|
|||
);
|
||||
}
|
||||
} else if let Some(ref path) = layer.file_path {
|
||||
// Fall back to loading from file
|
||||
// Fall back to loading from file (resolve path relative to preset)
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
let _ = multi_sampler_node.load_layer_from_file(
|
||||
path,
|
||||
&resolved_path,
|
||||
layer.key_min,
|
||||
layer.key_max,
|
||||
layer.root_key,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,253 @@
|
|||
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
|
||||
use crate::audio::midi::MidiEvent;
|
||||
|
||||
const PARAM_THRESHOLD: u32 = 0;
|
||||
const PARAM_RATIO: u32 = 1;
|
||||
const PARAM_ATTACK: u32 = 2;
|
||||
const PARAM_RELEASE: u32 = 3;
|
||||
const PARAM_MAKEUP_GAIN: u32 = 4;
|
||||
const PARAM_KNEE: u32 = 5;
|
||||
|
||||
/// Compressor node for dynamic range compression
|
||||
pub struct CompressorNode {
|
||||
name: String,
|
||||
threshold_db: f32,
|
||||
ratio: f32,
|
||||
attack_ms: f32,
|
||||
release_ms: f32,
|
||||
makeup_gain_db: f32,
|
||||
knee_db: f32,
|
||||
|
||||
// State
|
||||
envelope: f32,
|
||||
attack_coeff: f32,
|
||||
release_coeff: f32,
|
||||
sample_rate: u32,
|
||||
|
||||
inputs: Vec<NodePort>,
|
||||
outputs: Vec<NodePort>,
|
||||
parameters: Vec<Parameter>,
|
||||
}
|
||||
|
||||
impl CompressorNode {
|
||||
pub fn new(name: impl Into<String>) -> Self {
|
||||
let name = name.into();
|
||||
|
||||
let inputs = vec![
|
||||
NodePort::new("Audio In", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let outputs = vec![
|
||||
NodePort::new("Audio Out", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let parameters = vec![
|
||||
Parameter::new(PARAM_THRESHOLD, "Threshold", -60.0, 0.0, -20.0, ParameterUnit::Decibels),
|
||||
Parameter::new(PARAM_RATIO, "Ratio", 1.0, 20.0, 4.0, ParameterUnit::Generic),
|
||||
Parameter::new(PARAM_ATTACK, "Attack", 0.1, 100.0, 5.0, ParameterUnit::Time),
|
||||
Parameter::new(PARAM_RELEASE, "Release", 10.0, 1000.0, 50.0, ParameterUnit::Time),
|
||||
Parameter::new(PARAM_MAKEUP_GAIN, "Makeup", 0.0, 24.0, 0.0, ParameterUnit::Decibels),
|
||||
Parameter::new(PARAM_KNEE, "Knee", 0.0, 12.0, 3.0, ParameterUnit::Decibels),
|
||||
];
|
||||
|
||||
let sample_rate = 44100;
|
||||
let attack_coeff = Self::ms_to_coeff(5.0, sample_rate);
|
||||
let release_coeff = Self::ms_to_coeff(50.0, sample_rate);
|
||||
|
||||
Self {
|
||||
name,
|
||||
threshold_db: -20.0,
|
||||
ratio: 4.0,
|
||||
attack_ms: 5.0,
|
||||
release_ms: 50.0,
|
||||
makeup_gain_db: 0.0,
|
||||
knee_db: 3.0,
|
||||
envelope: 0.0,
|
||||
attack_coeff,
|
||||
release_coeff,
|
||||
sample_rate,
|
||||
inputs,
|
||||
outputs,
|
||||
parameters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert milliseconds to exponential smoothing coefficient
|
||||
fn ms_to_coeff(time_ms: f32, sample_rate: u32) -> f32 {
|
||||
let time_seconds = time_ms / 1000.0;
|
||||
let samples = time_seconds * sample_rate as f32;
|
||||
(-1.0 / samples).exp()
|
||||
}
|
||||
|
||||
fn update_coefficients(&mut self) {
|
||||
self.attack_coeff = Self::ms_to_coeff(self.attack_ms, self.sample_rate);
|
||||
self.release_coeff = Self::ms_to_coeff(self.release_ms, self.sample_rate);
|
||||
}
|
||||
|
||||
/// Convert linear amplitude to dB
|
||||
fn linear_to_db(linear: f32) -> f32 {
|
||||
if linear > 0.0 {
|
||||
20.0 * linear.log10()
|
||||
} else {
|
||||
-160.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert dB to linear gain
|
||||
fn db_to_linear(db: f32) -> f32 {
|
||||
10.0_f32.powf(db / 20.0)
|
||||
}
|
||||
|
||||
/// Calculate gain reduction for a given input level
|
||||
fn calculate_gain_reduction(&self, input_db: f32) -> f32 {
|
||||
let threshold = self.threshold_db;
|
||||
let knee = self.knee_db;
|
||||
let ratio = self.ratio;
|
||||
|
||||
// Soft knee implementation
|
||||
if input_db < threshold - knee / 2.0 {
|
||||
// Below threshold - no compression
|
||||
0.0
|
||||
} else if input_db > threshold + knee / 2.0 {
|
||||
// Above threshold - full compression
|
||||
let overshoot = input_db - threshold;
|
||||
overshoot * (1.0 - 1.0 / ratio)
|
||||
} else {
|
||||
// In knee region - gradual compression
|
||||
let overshoot = input_db - threshold + knee / 2.0;
|
||||
let knee_factor = overshoot / knee;
|
||||
overshoot * knee_factor * (1.0 - 1.0 / ratio) / 2.0
|
||||
}
|
||||
}
|
||||
|
||||
fn process_sample(&mut self, input: f32) -> f32 {
|
||||
// Detect input level (using absolute value as simple peak detector)
|
||||
let input_level = input.abs();
|
||||
|
||||
// Convert to dB
|
||||
let input_db = Self::linear_to_db(input_level);
|
||||
|
||||
// Calculate target gain reduction
|
||||
let target_gr_db = self.calculate_gain_reduction(input_db);
|
||||
let target_gr_linear = Self::db_to_linear(-target_gr_db);
|
||||
|
||||
// Smooth envelope with attack/release
|
||||
let coeff = if target_gr_linear < self.envelope {
|
||||
self.attack_coeff // Attack (faster response to louder signal)
|
||||
} else {
|
||||
self.release_coeff // Release (slower response when signal gets quieter)
|
||||
};
|
||||
|
||||
self.envelope = target_gr_linear + coeff * (self.envelope - target_gr_linear);
|
||||
|
||||
// Apply compression and makeup gain
|
||||
let makeup_linear = Self::db_to_linear(self.makeup_gain_db);
|
||||
input * self.envelope * makeup_linear
|
||||
}
|
||||
}
|
||||
|
||||
impl AudioNode for CompressorNode {
|
||||
fn category(&self) -> NodeCategory {
|
||||
NodeCategory::Effect
|
||||
}
|
||||
|
||||
fn inputs(&self) -> &[NodePort] {
|
||||
&self.inputs
|
||||
}
|
||||
|
||||
fn outputs(&self) -> &[NodePort] {
|
||||
&self.outputs
|
||||
}
|
||||
|
||||
fn parameters(&self) -> &[Parameter] {
|
||||
&self.parameters
|
||||
}
|
||||
|
||||
fn set_parameter(&mut self, id: u32, value: f32) {
|
||||
match id {
|
||||
PARAM_THRESHOLD => self.threshold_db = value,
|
||||
PARAM_RATIO => self.ratio = value,
|
||||
PARAM_ATTACK => {
|
||||
self.attack_ms = value;
|
||||
self.update_coefficients();
|
||||
}
|
||||
PARAM_RELEASE => {
|
||||
self.release_ms = value;
|
||||
self.update_coefficients();
|
||||
}
|
||||
PARAM_MAKEUP_GAIN => self.makeup_gain_db = value,
|
||||
PARAM_KNEE => self.knee_db = value,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_parameter(&self, id: u32) -> f32 {
|
||||
match id {
|
||||
PARAM_THRESHOLD => self.threshold_db,
|
||||
PARAM_RATIO => self.ratio,
|
||||
PARAM_ATTACK => self.attack_ms,
|
||||
PARAM_RELEASE => self.release_ms,
|
||||
PARAM_MAKEUP_GAIN => self.makeup_gain_db,
|
||||
PARAM_KNEE => self.knee_db,
|
||||
_ => 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(
|
||||
&mut self,
|
||||
inputs: &[&[f32]],
|
||||
outputs: &mut [&mut [f32]],
|
||||
_midi_inputs: &[&[MidiEvent]],
|
||||
_midi_outputs: &mut [&mut Vec<MidiEvent>],
|
||||
sample_rate: u32,
|
||||
) {
|
||||
if inputs.is_empty() || outputs.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update sample rate if changed
|
||||
if self.sample_rate != sample_rate {
|
||||
self.sample_rate = sample_rate;
|
||||
self.update_coefficients();
|
||||
}
|
||||
|
||||
let input = inputs[0];
|
||||
let output = &mut outputs[0];
|
||||
let len = input.len().min(output.len());
|
||||
|
||||
for i in 0..len {
|
||||
output[i] = self.process_sample(input[i]);
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.envelope = 0.0;
|
||||
}
|
||||
|
||||
fn node_type(&self) -> &str {
|
||||
"Compressor"
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn clone_node(&self) -> Box<dyn AudioNode> {
|
||||
Box::new(Self {
|
||||
name: self.name.clone(),
|
||||
threshold_db: self.threshold_db,
|
||||
ratio: self.ratio,
|
||||
attack_ms: self.attack_ms,
|
||||
release_ms: self.release_ms,
|
||||
makeup_gain_db: self.makeup_gain_db,
|
||||
knee_db: self.knee_db,
|
||||
envelope: 0.0, // Reset state for clone
|
||||
attack_coeff: self.attack_coeff,
|
||||
release_coeff: self.release_coeff,
|
||||
sample_rate: self.sample_rate,
|
||||
inputs: self.inputs.clone(),
|
||||
outputs: self.outputs.clone(),
|
||||
parameters: self.parameters.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,257 @@
|
|||
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
|
||||
use crate::audio::midi::MidiEvent;
|
||||
|
||||
const PARAM_DRIVE: u32 = 0;
|
||||
const PARAM_TYPE: u32 = 1;
|
||||
const PARAM_TONE: u32 = 2;
|
||||
const PARAM_MIX: u32 = 3;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum DistortionType {
|
||||
SoftClip = 0,
|
||||
HardClip = 1,
|
||||
Tanh = 2,
|
||||
Asymmetric = 3,
|
||||
}
|
||||
|
||||
impl DistortionType {
|
||||
fn from_f32(value: f32) -> Self {
|
||||
match value.round() as i32 {
|
||||
1 => DistortionType::HardClip,
|
||||
2 => DistortionType::Tanh,
|
||||
3 => DistortionType::Asymmetric,
|
||||
_ => DistortionType::SoftClip,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Distortion node with multiple waveshaping algorithms
|
||||
pub struct DistortionNode {
|
||||
name: String,
|
||||
drive: f32, // 0.01 to 20.0 (linear gain)
|
||||
distortion_type: DistortionType,
|
||||
tone: f32, // 0.0 to 1.0 (low-pass filter cutoff)
|
||||
mix: f32, // 0.0 to 1.0 (dry/wet)
|
||||
|
||||
// Tone filter state (simple one-pole low-pass)
|
||||
filter_state_left: f32,
|
||||
filter_state_right: f32,
|
||||
sample_rate: u32,
|
||||
|
||||
inputs: Vec<NodePort>,
|
||||
outputs: Vec<NodePort>,
|
||||
parameters: Vec<Parameter>,
|
||||
}
|
||||
|
||||
impl DistortionNode {
|
||||
pub fn new(name: impl Into<String>) -> Self {
|
||||
let name = name.into();
|
||||
|
||||
let inputs = vec![
|
||||
NodePort::new("Audio In", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let outputs = vec![
|
||||
NodePort::new("Audio Out", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let parameters = vec![
|
||||
Parameter::new(PARAM_DRIVE, "Drive", 0.01, 20.0, 1.0, ParameterUnit::Generic),
|
||||
Parameter::new(PARAM_TYPE, "Type", 0.0, 3.0, 0.0, ParameterUnit::Generic),
|
||||
Parameter::new(PARAM_TONE, "Tone", 0.0, 1.0, 0.7, ParameterUnit::Generic),
|
||||
Parameter::new(PARAM_MIX, "Mix", 0.0, 1.0, 1.0, ParameterUnit::Generic),
|
||||
];
|
||||
|
||||
Self {
|
||||
name,
|
||||
drive: 1.0,
|
||||
distortion_type: DistortionType::SoftClip,
|
||||
tone: 0.7,
|
||||
mix: 1.0,
|
||||
filter_state_left: 0.0,
|
||||
filter_state_right: 0.0,
|
||||
sample_rate: 44100,
|
||||
inputs,
|
||||
outputs,
|
||||
parameters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Soft clipping using cubic waveshaping
|
||||
fn soft_clip(&self, x: f32) -> f32 {
|
||||
let x = x.clamp(-2.0, 2.0);
|
||||
if x.abs() <= 1.0 {
|
||||
x
|
||||
} else {
|
||||
let sign = x.signum();
|
||||
sign * (2.0 - (2.0 - x.abs()).powi(2)) / 2.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Hard clipping
|
||||
fn hard_clip(&self, x: f32) -> f32 {
|
||||
x.clamp(-1.0, 1.0)
|
||||
}
|
||||
|
||||
/// Hyperbolic tangent waveshaping
|
||||
fn tanh_distortion(&self, x: f32) -> f32 {
|
||||
x.tanh()
|
||||
}
|
||||
|
||||
/// Asymmetric waveshaping (different curves for positive/negative)
|
||||
fn asymmetric(&self, x: f32) -> f32 {
|
||||
if x >= 0.0 {
|
||||
// Positive: soft clip
|
||||
self.soft_clip(x)
|
||||
} else {
|
||||
// Negative: harder clip
|
||||
self.hard_clip(x * 1.5) / 1.5
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply waveshaping based on type
|
||||
fn apply_waveshaping(&self, x: f32) -> f32 {
|
||||
match self.distortion_type {
|
||||
DistortionType::SoftClip => self.soft_clip(x),
|
||||
DistortionType::HardClip => self.hard_clip(x),
|
||||
DistortionType::Tanh => self.tanh_distortion(x),
|
||||
DistortionType::Asymmetric => self.asymmetric(x),
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple one-pole low-pass filter for tone control
|
||||
fn apply_tone_filter(&mut self, input: f32, is_left: bool) -> f32 {
|
||||
// Tone parameter controls cutoff frequency (0 = dark, 1 = bright)
|
||||
// Map tone to filter coefficient (0.1 to 0.99)
|
||||
let coeff = 0.1 + self.tone * 0.89;
|
||||
|
||||
let state = if is_left {
|
||||
&mut self.filter_state_left
|
||||
} else {
|
||||
&mut self.filter_state_right
|
||||
};
|
||||
|
||||
*state = *state * coeff + input * (1.0 - coeff);
|
||||
*state
|
||||
}
|
||||
|
||||
fn process_sample(&mut self, input: f32, is_left: bool) -> f32 {
|
||||
// Apply drive (input gain)
|
||||
let driven = input * self.drive;
|
||||
|
||||
// Apply waveshaping
|
||||
let distorted = self.apply_waveshaping(driven);
|
||||
|
||||
// Apply tone control (low-pass filter to tame harshness)
|
||||
let filtered = self.apply_tone_filter(distorted, is_left);
|
||||
|
||||
// Apply output gain compensation and mix
|
||||
let output_gain = 1.0 / (1.0 + self.drive * 0.2); // Compensate for loudness increase
|
||||
let wet = filtered * output_gain;
|
||||
let dry = input;
|
||||
|
||||
// Mix dry and wet
|
||||
dry * (1.0 - self.mix) + wet * self.mix
|
||||
}
|
||||
}
|
||||
|
||||
impl AudioNode for DistortionNode {
|
||||
fn category(&self) -> NodeCategory {
|
||||
NodeCategory::Effect
|
||||
}
|
||||
|
||||
fn inputs(&self) -> &[NodePort] {
|
||||
&self.inputs
|
||||
}
|
||||
|
||||
fn outputs(&self) -> &[NodePort] {
|
||||
&self.outputs
|
||||
}
|
||||
|
||||
fn parameters(&self) -> &[Parameter] {
|
||||
&self.parameters
|
||||
}
|
||||
|
||||
fn set_parameter(&mut self, id: u32, value: f32) {
|
||||
match id {
|
||||
PARAM_DRIVE => self.drive = value.clamp(0.01, 20.0),
|
||||
PARAM_TYPE => self.distortion_type = DistortionType::from_f32(value),
|
||||
PARAM_TONE => self.tone = value.clamp(0.0, 1.0),
|
||||
PARAM_MIX => self.mix = value.clamp(0.0, 1.0),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_parameter(&self, id: u32) -> f32 {
|
||||
match id {
|
||||
PARAM_DRIVE => self.drive,
|
||||
PARAM_TYPE => self.distortion_type as i32 as f32,
|
||||
PARAM_TONE => self.tone,
|
||||
PARAM_MIX => self.mix,
|
||||
_ => 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(
|
||||
&mut self,
|
||||
inputs: &[&[f32]],
|
||||
outputs: &mut [&mut [f32]],
|
||||
_midi_inputs: &[&[MidiEvent]],
|
||||
_midi_outputs: &mut [&mut Vec<MidiEvent>],
|
||||
sample_rate: u32,
|
||||
) {
|
||||
if inputs.is_empty() || outputs.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update sample rate if changed
|
||||
if self.sample_rate != sample_rate {
|
||||
self.sample_rate = sample_rate;
|
||||
}
|
||||
|
||||
let input = inputs[0];
|
||||
let output = &mut outputs[0];
|
||||
|
||||
// Audio signals are stereo (interleaved L/R)
|
||||
let frames = input.len() / 2;
|
||||
let output_frames = output.len() / 2;
|
||||
let frames_to_process = frames.min(output_frames);
|
||||
|
||||
for frame in 0..frames_to_process {
|
||||
let left_in = input[frame * 2];
|
||||
let right_in = input[frame * 2 + 1];
|
||||
|
||||
output[frame * 2] = self.process_sample(left_in, true);
|
||||
output[frame * 2 + 1] = self.process_sample(right_in, false);
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.filter_state_left = 0.0;
|
||||
self.filter_state_right = 0.0;
|
||||
}
|
||||
|
||||
fn node_type(&self) -> &str {
|
||||
"Distortion"
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn clone_node(&self) -> Box<dyn AudioNode> {
|
||||
Box::new(Self {
|
||||
name: self.name.clone(),
|
||||
drive: self.drive,
|
||||
distortion_type: self.distortion_type,
|
||||
tone: self.tone,
|
||||
mix: self.mix,
|
||||
filter_state_left: 0.0, // Reset state for clone
|
||||
filter_state_right: 0.0,
|
||||
sample_rate: self.sample_rate,
|
||||
inputs: self.inputs.clone(),
|
||||
outputs: self.outputs.clone(),
|
||||
parameters: self.parameters.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,259 @@
|
|||
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
|
||||
use crate::audio::midi::MidiEvent;
|
||||
use crate::dsp::biquad::BiquadFilter;
|
||||
|
||||
// Low band (shelving)
|
||||
const PARAM_LOW_FREQ: u32 = 0;
|
||||
const PARAM_LOW_GAIN: u32 = 1;
|
||||
|
||||
// Mid band (peaking)
|
||||
const PARAM_MID_FREQ: u32 = 2;
|
||||
const PARAM_MID_GAIN: u32 = 3;
|
||||
const PARAM_MID_Q: u32 = 4;
|
||||
|
||||
// High band (shelving)
|
||||
const PARAM_HIGH_FREQ: u32 = 5;
|
||||
const PARAM_HIGH_GAIN: u32 = 6;
|
||||
|
||||
/// 3-Band Parametric EQ Node
|
||||
/// All three bands use peaking filters at different frequencies
|
||||
pub struct EQNode {
|
||||
name: String,
|
||||
|
||||
// Parameters
|
||||
low_freq: f32,
|
||||
low_gain_db: f32,
|
||||
low_q: f32,
|
||||
mid_freq: f32,
|
||||
mid_gain_db: f32,
|
||||
mid_q: f32,
|
||||
high_freq: f32,
|
||||
high_gain_db: f32,
|
||||
high_q: f32,
|
||||
|
||||
// Filters (stereo)
|
||||
low_filter_left: BiquadFilter,
|
||||
low_filter_right: BiquadFilter,
|
||||
mid_filter_left: BiquadFilter,
|
||||
mid_filter_right: BiquadFilter,
|
||||
high_filter_left: BiquadFilter,
|
||||
high_filter_right: BiquadFilter,
|
||||
|
||||
sample_rate: u32,
|
||||
inputs: Vec<NodePort>,
|
||||
outputs: Vec<NodePort>,
|
||||
parameters: Vec<Parameter>,
|
||||
}
|
||||
|
||||
impl EQNode {
|
||||
pub fn new(name: impl Into<String>) -> Self {
|
||||
let name = name.into();
|
||||
|
||||
let inputs = vec![
|
||||
NodePort::new("Audio In", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let outputs = vec![
|
||||
NodePort::new("Audio Out", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let parameters = vec![
|
||||
Parameter::new(PARAM_LOW_FREQ, "Low Freq", 20.0, 500.0, 100.0, ParameterUnit::Frequency),
|
||||
Parameter::new(PARAM_LOW_GAIN, "Low Gain", -24.0, 24.0, 0.0, ParameterUnit::Decibels),
|
||||
Parameter::new(PARAM_MID_FREQ, "Mid Freq", 200.0, 5000.0, 1000.0, ParameterUnit::Frequency),
|
||||
Parameter::new(PARAM_MID_GAIN, "Mid Gain", -24.0, 24.0, 0.0, ParameterUnit::Decibels),
|
||||
Parameter::new(PARAM_MID_Q, "Mid Q", 0.1, 10.0, 0.707, ParameterUnit::Generic),
|
||||
Parameter::new(PARAM_HIGH_FREQ, "High Freq", 2000.0, 20000.0, 8000.0, ParameterUnit::Frequency),
|
||||
Parameter::new(PARAM_HIGH_GAIN, "High Gain", -24.0, 24.0, 0.0, ParameterUnit::Decibels),
|
||||
];
|
||||
|
||||
let sample_rate = 44100;
|
||||
|
||||
// Initialize filters - all peaking
|
||||
let low_filter_left = BiquadFilter::peaking(100.0, 1.0, 0.0, sample_rate as f32);
|
||||
let low_filter_right = BiquadFilter::peaking(100.0, 1.0, 0.0, sample_rate as f32);
|
||||
let mid_filter_left = BiquadFilter::peaking(1000.0, 0.707, 0.0, sample_rate as f32);
|
||||
let mid_filter_right = BiquadFilter::peaking(1000.0, 0.707, 0.0, sample_rate as f32);
|
||||
let high_filter_left = BiquadFilter::peaking(8000.0, 1.0, 0.0, sample_rate as f32);
|
||||
let high_filter_right = BiquadFilter::peaking(8000.0, 1.0, 0.0, sample_rate as f32);
|
||||
|
||||
Self {
|
||||
name,
|
||||
low_freq: 100.0,
|
||||
low_gain_db: 0.0,
|
||||
low_q: 1.0,
|
||||
mid_freq: 1000.0,
|
||||
mid_gain_db: 0.0,
|
||||
mid_q: 0.707,
|
||||
high_freq: 8000.0,
|
||||
high_gain_db: 0.0,
|
||||
high_q: 1.0,
|
||||
low_filter_left,
|
||||
low_filter_right,
|
||||
mid_filter_left,
|
||||
mid_filter_right,
|
||||
high_filter_left,
|
||||
high_filter_right,
|
||||
sample_rate,
|
||||
inputs,
|
||||
outputs,
|
||||
parameters,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_filters(&mut self) {
|
||||
let sr = self.sample_rate as f32;
|
||||
|
||||
// Update low band peaking filter
|
||||
self.low_filter_left.set_peaking(self.low_freq, self.low_q, self.low_gain_db, sr);
|
||||
self.low_filter_right.set_peaking(self.low_freq, self.low_q, self.low_gain_db, sr);
|
||||
|
||||
// Update mid band peaking filter
|
||||
self.mid_filter_left.set_peaking(self.mid_freq, self.mid_q, self.mid_gain_db, sr);
|
||||
self.mid_filter_right.set_peaking(self.mid_freq, self.mid_q, self.mid_gain_db, sr);
|
||||
|
||||
// Update high band peaking filter
|
||||
self.high_filter_left.set_peaking(self.high_freq, self.high_q, self.high_gain_db, sr);
|
||||
self.high_filter_right.set_peaking(self.high_freq, self.high_q, self.high_gain_db, sr);
|
||||
}
|
||||
}
|
||||
|
||||
impl AudioNode for EQNode {
|
||||
fn category(&self) -> NodeCategory {
|
||||
NodeCategory::Effect
|
||||
}
|
||||
|
||||
fn inputs(&self) -> &[NodePort] {
|
||||
&self.inputs
|
||||
}
|
||||
|
||||
fn outputs(&self) -> &[NodePort] {
|
||||
&self.outputs
|
||||
}
|
||||
|
||||
fn parameters(&self) -> &[Parameter] {
|
||||
&self.parameters
|
||||
}
|
||||
|
||||
fn set_parameter(&mut self, id: u32, value: f32) {
|
||||
match id {
|
||||
PARAM_LOW_FREQ => {
|
||||
self.low_freq = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_LOW_GAIN => {
|
||||
self.low_gain_db = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_MID_FREQ => {
|
||||
self.mid_freq = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_MID_GAIN => {
|
||||
self.mid_gain_db = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_MID_Q => {
|
||||
self.mid_q = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_HIGH_FREQ => {
|
||||
self.high_freq = value;
|
||||
self.update_filters();
|
||||
}
|
||||
PARAM_HIGH_GAIN => {
|
||||
self.high_gain_db = value;
|
||||
self.update_filters();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_parameter(&self, id: u32) -> f32 {
|
||||
match id {
|
||||
PARAM_LOW_FREQ => self.low_freq,
|
||||
PARAM_LOW_GAIN => self.low_gain_db,
|
||||
PARAM_MID_FREQ => self.mid_freq,
|
||||
PARAM_MID_GAIN => self.mid_gain_db,
|
||||
PARAM_MID_Q => self.mid_q,
|
||||
PARAM_HIGH_FREQ => self.high_freq,
|
||||
PARAM_HIGH_GAIN => self.high_gain_db,
|
||||
_ => 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(
|
||||
&mut self,
|
||||
inputs: &[&[f32]],
|
||||
outputs: &mut [&mut [f32]],
|
||||
_midi_inputs: &[&[MidiEvent]],
|
||||
_midi_outputs: &mut [&mut Vec<MidiEvent>],
|
||||
sample_rate: u32,
|
||||
) {
|
||||
if inputs.is_empty() || outputs.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update sample rate if changed
|
||||
if self.sample_rate != sample_rate {
|
||||
self.sample_rate = sample_rate;
|
||||
self.update_filters();
|
||||
}
|
||||
|
||||
let input = inputs[0];
|
||||
let output = &mut outputs[0];
|
||||
|
||||
// Audio signals are stereo (interleaved L/R)
|
||||
let frames = input.len() / 2;
|
||||
let output_frames = output.len() / 2;
|
||||
let frames_to_process = frames.min(output_frames);
|
||||
|
||||
for frame in 0..frames_to_process {
|
||||
let mut left = input[frame * 2];
|
||||
let mut right = input[frame * 2 + 1];
|
||||
|
||||
// Process through all three bands
|
||||
left = self.low_filter_left.process_sample(left, 0);
|
||||
left = self.mid_filter_left.process_sample(left, 0);
|
||||
left = self.high_filter_left.process_sample(left, 0);
|
||||
|
||||
right = self.low_filter_right.process_sample(right, 1);
|
||||
right = self.mid_filter_right.process_sample(right, 1);
|
||||
right = self.high_filter_right.process_sample(right, 1);
|
||||
|
||||
output[frame * 2] = left;
|
||||
output[frame * 2 + 1] = right;
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.low_filter_left.reset();
|
||||
self.low_filter_right.reset();
|
||||
self.mid_filter_left.reset();
|
||||
self.mid_filter_right.reset();
|
||||
self.high_filter_left.reset();
|
||||
self.high_filter_right.reset();
|
||||
}
|
||||
|
||||
fn node_type(&self) -> &str {
|
||||
"EQ"
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn clone_node(&self) -> Box<dyn AudioNode> {
|
||||
let mut node = Self::new(self.name.clone());
|
||||
node.low_freq = self.low_freq;
|
||||
node.low_gain_db = self.low_gain_db;
|
||||
node.mid_freq = self.mid_freq;
|
||||
node.mid_gain_db = self.mid_gain_db;
|
||||
node.mid_q = self.mid_q;
|
||||
node.high_freq = self.high_freq;
|
||||
node.high_gain_db = self.high_gain_db;
|
||||
node.sample_rate = self.sample_rate;
|
||||
node.update_filters();
|
||||
Box::new(node)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,215 @@
|
|||
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
|
||||
use crate::audio::midi::MidiEvent;
|
||||
|
||||
const PARAM_THRESHOLD: u32 = 0;
|
||||
const PARAM_RELEASE: u32 = 1;
|
||||
const PARAM_CEILING: u32 = 2;
|
||||
|
||||
/// Limiter node for preventing audio peaks from exceeding a threshold
|
||||
/// Essentially a compressor with infinite ratio and very fast attack
|
||||
pub struct LimiterNode {
|
||||
name: String,
|
||||
threshold_db: f32,
|
||||
release_ms: f32,
|
||||
ceiling_db: f32,
|
||||
|
||||
// State
|
||||
envelope: f32,
|
||||
release_coeff: f32,
|
||||
sample_rate: u32,
|
||||
|
||||
inputs: Vec<NodePort>,
|
||||
outputs: Vec<NodePort>,
|
||||
parameters: Vec<Parameter>,
|
||||
}
|
||||
|
||||
impl LimiterNode {
|
||||
pub fn new(name: impl Into<String>) -> Self {
|
||||
let name = name.into();
|
||||
|
||||
let inputs = vec![
|
||||
NodePort::new("Audio In", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let outputs = vec![
|
||||
NodePort::new("Audio Out", SignalType::Audio, 0),
|
||||
];
|
||||
|
||||
let parameters = vec![
|
||||
Parameter::new(PARAM_THRESHOLD, "Threshold", -60.0, 0.0, -1.0, ParameterUnit::Decibels),
|
||||
Parameter::new(PARAM_RELEASE, "Release", 1.0, 500.0, 50.0, ParameterUnit::Time),
|
||||
Parameter::new(PARAM_CEILING, "Ceiling", -60.0, 0.0, 0.0, ParameterUnit::Decibels),
|
||||
];
|
||||
|
||||
let sample_rate = 44100;
|
||||
let release_coeff = Self::ms_to_coeff(50.0, sample_rate);
|
||||
|
||||
Self {
|
||||
name,
|
||||
threshold_db: -1.0,
|
||||
release_ms: 50.0,
|
||||
ceiling_db: 0.0,
|
||||
envelope: 0.0,
|
||||
release_coeff,
|
||||
sample_rate,
|
||||
inputs,
|
||||
outputs,
|
||||
parameters,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert milliseconds to exponential smoothing coefficient
|
||||
fn ms_to_coeff(time_ms: f32, sample_rate: u32) -> f32 {
|
||||
let time_seconds = time_ms / 1000.0;
|
||||
let samples = time_seconds * sample_rate as f32;
|
||||
(-1.0 / samples).exp()
|
||||
}
|
||||
|
||||
fn update_coefficients(&mut self) {
|
||||
self.release_coeff = Self::ms_to_coeff(self.release_ms, self.sample_rate);
|
||||
}
|
||||
|
||||
/// Convert linear amplitude to dB
|
||||
fn linear_to_db(linear: f32) -> f32 {
|
||||
if linear > 0.0 {
|
||||
20.0 * linear.log10()
|
||||
} else {
|
||||
-160.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert dB to linear gain
|
||||
fn db_to_linear(db: f32) -> f32 {
|
||||
10.0_f32.powf(db / 20.0)
|
||||
}
|
||||
|
||||
fn process_sample(&mut self, input: f32) -> f32 {
|
||||
// Detect input level (using absolute value as peak detector)
|
||||
let input_level = input.abs();
|
||||
|
||||
// Convert to dB
|
||||
let input_db = Self::linear_to_db(input_level);
|
||||
|
||||
// Calculate gain reduction needed
|
||||
// If above threshold, apply infinite ratio (hard limit)
|
||||
let target_gr_db = if input_db > self.threshold_db {
|
||||
input_db - self.threshold_db // Amount of overshoot to reduce
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let target_gr_linear = Self::db_to_linear(-target_gr_db);
|
||||
|
||||
// Very fast attack (instant for limiter), but slower release
|
||||
// Attack coeff is very close to 0 for near-instant response
|
||||
let attack_coeff = 0.0001; // Extremely fast attack
|
||||
|
||||
let coeff = if target_gr_linear < self.envelope {
|
||||
attack_coeff // Attack (instant response to louder signal)
|
||||
} else {
|
||||
self.release_coeff // Release (slower recovery)
|
||||
};
|
||||
|
||||
self.envelope = target_gr_linear + coeff * (self.envelope - target_gr_linear);
|
||||
|
||||
// Apply limiting and output ceiling
|
||||
let limited = input * self.envelope;
|
||||
let ceiling_linear = Self::db_to_linear(self.ceiling_db);
|
||||
|
||||
// Hard clip at ceiling
|
||||
limited.clamp(-ceiling_linear, ceiling_linear)
|
||||
}
|
||||
}
|
||||
|
||||
impl AudioNode for LimiterNode {
|
||||
fn category(&self) -> NodeCategory {
|
||||
NodeCategory::Effect
|
||||
}
|
||||
|
||||
fn inputs(&self) -> &[NodePort] {
|
||||
&self.inputs
|
||||
}
|
||||
|
||||
fn outputs(&self) -> &[NodePort] {
|
||||
&self.outputs
|
||||
}
|
||||
|
||||
fn parameters(&self) -> &[Parameter] {
|
||||
&self.parameters
|
||||
}
|
||||
|
||||
fn set_parameter(&mut self, id: u32, value: f32) {
|
||||
match id {
|
||||
PARAM_THRESHOLD => self.threshold_db = value,
|
||||
PARAM_RELEASE => {
|
||||
self.release_ms = value;
|
||||
self.update_coefficients();
|
||||
}
|
||||
PARAM_CEILING => self.ceiling_db = value,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_parameter(&self, id: u32) -> f32 {
|
||||
match id {
|
||||
PARAM_THRESHOLD => self.threshold_db,
|
||||
PARAM_RELEASE => self.release_ms,
|
||||
PARAM_CEILING => self.ceiling_db,
|
||||
_ => 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
fn process(
|
||||
&mut self,
|
||||
inputs: &[&[f32]],
|
||||
outputs: &mut [&mut [f32]],
|
||||
_midi_inputs: &[&[MidiEvent]],
|
||||
_midi_outputs: &mut [&mut Vec<MidiEvent>],
|
||||
sample_rate: u32,
|
||||
) {
|
||||
if inputs.is_empty() || outputs.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update sample rate if changed
|
||||
if self.sample_rate != sample_rate {
|
||||
self.sample_rate = sample_rate;
|
||||
self.update_coefficients();
|
||||
}
|
||||
|
||||
let input = inputs[0];
|
||||
let output = &mut outputs[0];
|
||||
let len = input.len().min(output.len());
|
||||
|
||||
for i in 0..len {
|
||||
output[i] = self.process_sample(input[i]);
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.envelope = 0.0;
|
||||
}
|
||||
|
||||
fn node_type(&self) -> &str {
|
||||
"Limiter"
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
fn clone_node(&self) -> Box<dyn AudioNode> {
|
||||
Box::new(Self {
|
||||
name: self.name.clone(),
|
||||
threshold_db: self.threshold_db,
|
||||
release_ms: self.release_ms,
|
||||
ceiling_db: self.ceiling_db,
|
||||
envelope: 0.0, // Reset state for clone
|
||||
release_coeff: self.release_coeff,
|
||||
sample_rate: self.sample_rate,
|
||||
inputs: self.inputs.clone(),
|
||||
outputs: self.outputs.clone(),
|
||||
parameters: self.parameters.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1,9 +1,13 @@
|
|||
mod adsr;
|
||||
mod audio_to_cv;
|
||||
mod chorus;
|
||||
mod compressor;
|
||||
mod delay;
|
||||
mod distortion;
|
||||
mod eq;
|
||||
mod filter;
|
||||
mod flanger;
|
||||
mod limiter;
|
||||
mod fm_synth;
|
||||
mod gain;
|
||||
mod lfo;
|
||||
|
|
@ -26,9 +30,13 @@ mod wavetable_oscillator;
|
|||
pub use adsr::ADSRNode;
|
||||
pub use audio_to_cv::AudioToCVNode;
|
||||
pub use chorus::ChorusNode;
|
||||
pub use compressor::CompressorNode;
|
||||
pub use delay::DelayNode;
|
||||
pub use distortion::DistortionNode;
|
||||
pub use eq::EQNode;
|
||||
pub use filter::FilterNode;
|
||||
pub use flanger::FlangerNode;
|
||||
pub use limiter::LimiterNode;
|
||||
pub use fm_synth::FMSynthNode;
|
||||
pub use gain::GainNode;
|
||||
pub use lfo::LFONode;
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
pub mod types;
|
||||
|
||||
pub use types::{AudioEvent, Command};
|
||||
pub use types::{AudioEvent, Command, Query, QueryResponse};
|
||||
|
|
|
|||
|
|
@ -197,3 +197,19 @@ pub enum AudioEvent {
|
|||
/// Graph state changed (for full UI sync)
|
||||
GraphStateChanged(TrackId),
|
||||
}
|
||||
|
||||
/// Synchronous queries sent from UI thread to audio thread
|
||||
#[derive(Debug)]
|
||||
pub enum Query {
|
||||
/// Get the current graph state as JSON (track_id)
|
||||
GetGraphState(TrackId),
|
||||
/// Get a voice allocator's template graph state as JSON (track_id, voice_allocator_id)
|
||||
GetTemplateState(TrackId, u32),
|
||||
}
|
||||
|
||||
/// Responses to synchronous queries
|
||||
#[derive(Debug)]
|
||||
pub enum QueryResponse {
|
||||
/// Graph state as JSON string
|
||||
GraphState(Result<String, String>),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@ impl AudioSystem {
|
|||
// Create queues
|
||||
let (command_tx, command_rx) = rtrb::RingBuffer::new(256);
|
||||
let (event_tx, event_rx) = rtrb::RingBuffer::new(256);
|
||||
let (query_tx, query_rx) = rtrb::RingBuffer::new(16); // Smaller buffer for synchronous queries
|
||||
let (query_response_tx, query_response_rx) = rtrb::RingBuffer::new(16);
|
||||
|
||||
// Create input ringbuffer for recording (large buffer for audio samples)
|
||||
// Buffer size: 10 seconds of audio at 48kHz stereo = 48000 * 2 * 10 = 960000 samples
|
||||
|
|
@ -66,9 +68,9 @@ impl AudioSystem {
|
|||
let (mut input_tx, input_rx) = rtrb::RingBuffer::new(input_buffer_size);
|
||||
|
||||
// Create engine
|
||||
let mut engine = Engine::new(sample_rate, channels, command_rx, event_tx);
|
||||
let mut engine = Engine::new(sample_rate, channels, command_rx, event_tx, query_rx, query_response_tx);
|
||||
engine.set_input_rx(input_rx);
|
||||
let controller = engine.get_controller(command_tx);
|
||||
let controller = engine.get_controller(command_tx, query_tx, query_response_rx);
|
||||
|
||||
// Build output stream
|
||||
let output_config: cpal::StreamConfig = default_output_config.clone().into();
|
||||
|
|
|
|||
|
|
@ -785,33 +785,68 @@ pub async fn graph_list_presets(
|
|||
|
||||
let mut presets = Vec::new();
|
||||
|
||||
// Load factory presets from bundled assets
|
||||
let factory_presets = [
|
||||
"Basic_Sine.json",
|
||||
"Sawtooth_Bass.json",
|
||||
"Warm_Pad.json",
|
||||
"Pluck.json",
|
||||
"Poly_Synth.json",
|
||||
];
|
||||
|
||||
for preset_file in &factory_presets {
|
||||
// Try to load from resource directory
|
||||
if let Ok(resource_dir) = app_handle.path().resource_dir() {
|
||||
let factory_path = resource_dir.join("assets/factory_presets").join(preset_file);
|
||||
if let Ok(json) = fs::read_to_string(&factory_path) {
|
||||
if let Ok(preset) = GraphPreset::from_json(&json) {
|
||||
// Recursively scan for JSON files in instruments directory
|
||||
fn scan_presets_recursive(dir: &std::path::Path, presets: &mut Vec<PresetInfo>) {
|
||||
eprintln!("Scanning directory: {:?}", dir);
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
// Recurse into subdirectories
|
||||
scan_presets_recursive(&path, presets);
|
||||
} else if path.extension().and_then(|s| s.to_str()) == Some("json") {
|
||||
eprintln!("Found JSON file: {:?}", path);
|
||||
// Load JSON preset files
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(json) => {
|
||||
match daw_backend::GraphPreset::from_json(&json) {
|
||||
Ok(preset) => {
|
||||
eprintln!(" ✓ Loaded preset: {}", preset.metadata.name);
|
||||
presets.push(PresetInfo {
|
||||
name: preset.metadata.name,
|
||||
path: factory_path.to_string_lossy().to_string(),
|
||||
path: path.to_string_lossy().to_string(),
|
||||
description: preset.metadata.description,
|
||||
author: preset.metadata.author,
|
||||
tags: preset.metadata.tags,
|
||||
is_factory: true,
|
||||
});
|
||||
}
|
||||
Err(e) => eprintln!(" ✗ Failed to parse preset: {}", e),
|
||||
}
|
||||
}
|
||||
Err(e) => eprintln!(" ✗ Failed to read file: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try multiple locations for instruments
|
||||
let mut instruments_found = false;
|
||||
|
||||
// 1. Try bundled resources (production)
|
||||
if let Ok(resource_dir) = app_handle.path().resource_dir() {
|
||||
let instruments_dir = resource_dir.join("assets/instruments");
|
||||
eprintln!("Trying bundled path: {:?} (exists: {})", instruments_dir, instruments_dir.exists());
|
||||
if instruments_dir.exists() {
|
||||
scan_presets_recursive(&instruments_dir, &mut presets);
|
||||
instruments_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Fallback to dev location (development mode)
|
||||
if !instruments_found {
|
||||
// Try relative to current working directory (dev mode)
|
||||
if let Ok(cwd) = std::env::current_dir() {
|
||||
let dev_instruments = cwd.join("../src/assets/instruments");
|
||||
eprintln!("Trying dev path: {:?} (exists: {})", dev_instruments, dev_instruments.exists());
|
||||
if dev_instruments.exists() {
|
||||
scan_presets_recursive(&dev_instruments, &mut presets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eprintln!("Found {} factory presets", presets.len());
|
||||
|
||||
// Load user presets
|
||||
if let Ok(app_data_dir) = app_handle.path().app_data_dir() {
|
||||
|
|
@ -845,15 +880,26 @@ pub async fn graph_list_presets(
|
|||
|
||||
#[tauri::command]
|
||||
pub async fn graph_delete_preset(
|
||||
app_handle: tauri::AppHandle,
|
||||
preset_path: String,
|
||||
) -> Result<(), String> {
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
// Only allow deleting user presets (not factory presets)
|
||||
if preset_path.contains("factory") || preset_path.contains("assets") {
|
||||
return Err("Cannot delete factory presets".to_string());
|
||||
let preset_path = Path::new(&preset_path);
|
||||
|
||||
// Check if preset is in the app's resource directory (factory content - cannot delete)
|
||||
if let Ok(resource_dir) = app_handle.path().resource_dir() {
|
||||
if let Ok(canonical_preset) = preset_path.canonicalize() {
|
||||
if let Ok(canonical_resource) = resource_dir.canonicalize() {
|
||||
if canonical_preset.starts_with(canonical_resource) {
|
||||
return Err("Cannot delete factory presets or bundled instruments".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, it's a user preset - safe to delete
|
||||
fs::remove_file(&preset_path)
|
||||
.map_err(|e| format!("Failed to delete preset: {}", e))?;
|
||||
|
||||
|
|
@ -865,40 +911,10 @@ pub async fn graph_get_state(
|
|||
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
|
||||
track_id: u32,
|
||||
) -> Result<String, String> {
|
||||
use daw_backend::GraphPreset;
|
||||
|
||||
let mut audio_state = state.lock().unwrap();
|
||||
if let Some(controller) = &mut audio_state.controller {
|
||||
// Send a command to get the graph state
|
||||
// For now, we'll use the preset serialization to get the graph
|
||||
let temp_path = std::env::temp_dir().join(format!("temp_graph_state_{}.json", track_id));
|
||||
let temp_path_str = temp_path.to_string_lossy().to_string();
|
||||
|
||||
controller.graph_save_preset(
|
||||
track_id,
|
||||
temp_path_str.clone(),
|
||||
"temp".to_string(),
|
||||
"".to_string(),
|
||||
vec![]
|
||||
);
|
||||
|
||||
// Give the audio thread time to process
|
||||
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||
|
||||
// Read the temp file
|
||||
let json = match std::fs::read_to_string(&temp_path) {
|
||||
Ok(json) => json,
|
||||
Err(_) => {
|
||||
// If file doesn't exist, graph is likely empty - return empty preset
|
||||
let empty_preset = GraphPreset::new("empty");
|
||||
empty_preset.to_json().unwrap_or_else(|_| "{}".to_string())
|
||||
}
|
||||
};
|
||||
|
||||
// Clean up temp file
|
||||
let _ = std::fs::remove_file(&temp_path);
|
||||
|
||||
Ok(json)
|
||||
// Use synchronous query to get graph state
|
||||
controller.query_graph_state(track_id)
|
||||
} else {
|
||||
Err("Audio not initialized".to_string())
|
||||
}
|
||||
|
|
@ -910,40 +926,10 @@ pub async fn graph_get_template_state(
|
|||
track_id: u32,
|
||||
voice_allocator_id: u32,
|
||||
) -> Result<String, String> {
|
||||
use daw_backend::GraphPreset;
|
||||
|
||||
let mut audio_state = state.lock().unwrap();
|
||||
if let Some(controller) = &mut audio_state.controller {
|
||||
// For template graphs, we'll use a different temp file path
|
||||
let temp_path = std::env::temp_dir().join(format!("temp_template_state_{}_{}.json", track_id, voice_allocator_id));
|
||||
let temp_path_str = temp_path.to_string_lossy().to_string();
|
||||
|
||||
// Send a custom command to save the template graph
|
||||
// We'll need to add this command to the backend
|
||||
controller.graph_save_template_preset(
|
||||
track_id,
|
||||
voice_allocator_id,
|
||||
temp_path_str.clone(),
|
||||
"temp_template".to_string()
|
||||
);
|
||||
|
||||
// Give the audio thread time to process
|
||||
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||
|
||||
// Read the temp file
|
||||
let json = match std::fs::read_to_string(&temp_path) {
|
||||
Ok(json) => json,
|
||||
Err(_) => {
|
||||
// If file doesn't exist, template is likely empty
|
||||
let empty_preset = GraphPreset::new("empty_template");
|
||||
empty_preset.to_json().unwrap_or_else(|_| "{}".to_string())
|
||||
}
|
||||
};
|
||||
|
||||
// Clean up temp file
|
||||
let _ = std::fs::remove_file(&temp_path);
|
||||
|
||||
Ok(json)
|
||||
// Use synchronous query to get template graph state
|
||||
controller.query_template_state(track_id, voice_allocator_id)
|
||||
} else {
|
||||
Err("Audio not initialized".to_string())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
"icons/icon.ico"
|
||||
],
|
||||
"resources": [
|
||||
"assets/factory_presets/*"
|
||||
"../src/assets/instruments/**/*"
|
||||
],
|
||||
"linux": {
|
||||
"appimage": {
|
||||
|
|
|
|||
184
src/nodeTypes.js
184
src/nodeTypes.js
|
|
@ -884,6 +884,190 @@ export const nodeTypes = {
|
|||
</div>
|
||||
</div>
|
||||
`
|
||||
},
|
||||
|
||||
Compressor: {
|
||||
name: 'Compressor',
|
||||
category: NodeCategory.EFFECT,
|
||||
description: 'Dynamic range compressor with soft-knee',
|
||||
inputs: [
|
||||
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
outputs: [
|
||||
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
parameters: [
|
||||
{ id: 0, name: 'threshold', label: 'Threshold', min: -60, max: 0, default: -20, unit: 'dB' },
|
||||
{ id: 1, name: 'ratio', label: 'Ratio', min: 1, max: 20, default: 4, unit: ':1' },
|
||||
{ id: 2, name: 'attack', label: 'Attack', min: 0.1, max: 100, default: 5, unit: 'ms' },
|
||||
{ id: 3, name: 'release', label: 'Release', min: 10, max: 1000, default: 100, unit: 'ms' },
|
||||
{ id: 4, name: 'makeup_gain', label: 'Makeup Gain', min: 0, max: 20, default: 0, unit: 'dB' },
|
||||
{ id: 5, name: 'knee', label: 'Knee', min: 0, max: 12, default: 6, unit: 'dB' }
|
||||
],
|
||||
getHTML: (nodeId) => `
|
||||
<div class="node-content">
|
||||
<div class="node-title">Compressor</div>
|
||||
<div class="node-param">
|
||||
<label>Threshold: <span id="threshold-${nodeId}">-20</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="0" min="-60" max="0" value="-20" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Ratio: <span id="ratio-${nodeId}">4.0</span>:1</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="1" min="1" max="20" value="4" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Attack: <span id="attack-${nodeId}">5</span> ms</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="2" min="0.1" max="100" value="5" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Release: <span id="release-${nodeId}">100</span> ms</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="3" min="10" max="1000" value="100" step="1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Makeup: <span id="makeup-${nodeId}">0</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="4" min="0" max="20" value="0" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Knee: <span id="knee-${nodeId}">6</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="5" min="0" max="12" value="6" step="0.1">
|
||||
</div>
|
||||
</div>
|
||||
`
|
||||
},
|
||||
|
||||
Limiter: {
|
||||
name: 'Limiter',
|
||||
category: NodeCategory.EFFECT,
|
||||
description: 'Peak limiter with ceiling control',
|
||||
inputs: [
|
||||
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
outputs: [
|
||||
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
parameters: [
|
||||
{ id: 0, name: 'threshold', label: 'Threshold', min: -60, max: 0, default: -10, unit: 'dB' },
|
||||
{ id: 1, name: 'release', label: 'Release', min: 10, max: 1000, default: 50, unit: 'ms' },
|
||||
{ id: 2, name: 'ceiling', label: 'Ceiling', min: -20, max: 0, default: 0, unit: 'dB' }
|
||||
],
|
||||
getHTML: (nodeId) => `
|
||||
<div class="node-content">
|
||||
<div class="node-title">Limiter</div>
|
||||
<div class="node-param">
|
||||
<label>Threshold: <span id="limthreshold-${nodeId}">-10</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="0" min="-60" max="0" value="-10" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Release: <span id="limrelease-${nodeId}">50</span> ms</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="1" min="10" max="1000" value="50" step="1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Ceiling: <span id="ceiling-${nodeId}">0</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="2" min="-20" max="0" value="0" step="0.1">
|
||||
</div>
|
||||
</div>
|
||||
`
|
||||
},
|
||||
|
||||
Distortion: {
|
||||
name: 'Distortion',
|
||||
category: NodeCategory.EFFECT,
|
||||
description: 'Waveshaping distortion with multiple algorithms',
|
||||
inputs: [
|
||||
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
outputs: [
|
||||
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
parameters: [
|
||||
{ id: 0, name: 'drive', label: 'Drive', min: 0.01, max: 20, default: 1, unit: '' },
|
||||
{ id: 1, name: 'type', label: 'Type', min: 0, max: 3, default: 0, unit: '' },
|
||||
{ id: 2, name: 'tone', label: 'Tone', min: 0, max: 1, default: 0.7, unit: '' },
|
||||
{ id: 3, name: 'mix', label: 'Mix', min: 0, max: 1, default: 1, unit: '' }
|
||||
],
|
||||
getHTML: (nodeId) => `
|
||||
<div class="node-content">
|
||||
<div class="node-title">Distortion</div>
|
||||
<div class="node-param">
|
||||
<label>Type: <span id="disttype-${nodeId}">Soft Clip</span></label>
|
||||
<select data-node="${nodeId}" data-param="1" style="width: 100%; padding: 2px;">
|
||||
<option value="0">Soft Clip</option>
|
||||
<option value="1">Hard Clip</option>
|
||||
<option value="2">Tanh</option>
|
||||
<option value="3">Asymmetric</option>
|
||||
</select>
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Drive: <span id="drive-${nodeId}">1.00</span></label>
|
||||
<input type="range" data-node="${nodeId}" data-param="0" min="0.01" max="20" value="1" step="0.01">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Tone: <span id="tone-${nodeId}">0.70</span></label>
|
||||
<input type="range" data-node="${nodeId}" data-param="2" min="0" max="1" value="0.7" step="0.01">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Mix: <span id="mix-${nodeId}">1.00</span></label>
|
||||
<input type="range" data-node="${nodeId}" data-param="3" min="0" max="1" value="1" step="0.01">
|
||||
</div>
|
||||
</div>
|
||||
`
|
||||
},
|
||||
|
||||
EQ: {
|
||||
name: 'EQ',
|
||||
category: NodeCategory.EFFECT,
|
||||
description: '3-band parametric EQ',
|
||||
inputs: [
|
||||
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
outputs: [
|
||||
{ name: 'Audio Out', type: SignalType.AUDIO, index: 0 }
|
||||
],
|
||||
parameters: [
|
||||
{ id: 0, name: 'low_freq', label: 'Low Freq', min: 20, max: 500, default: 100, unit: 'Hz' },
|
||||
{ id: 1, name: 'low_gain', label: 'Low Gain', min: -24, max: 24, default: 0, unit: 'dB' },
|
||||
{ id: 2, name: 'mid_freq', label: 'Mid Freq', min: 200, max: 5000, default: 1000, unit: 'Hz' },
|
||||
{ id: 3, name: 'mid_gain', label: 'Mid Gain', min: -24, max: 24, default: 0, unit: 'dB' },
|
||||
{ id: 4, name: 'mid_q', label: 'Mid Q', min: 0.1, max: 10, default: 0.707, unit: '' },
|
||||
{ id: 5, name: 'high_freq', label: 'High Freq', min: 2000, max: 20000, default: 8000, unit: 'Hz' },
|
||||
{ id: 6, name: 'high_gain', label: 'High Gain', min: -24, max: 24, default: 0, unit: 'dB' }
|
||||
],
|
||||
getHTML: (nodeId) => `
|
||||
<div class="node-content">
|
||||
<div class="node-title">EQ</div>
|
||||
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Low Band</div>
|
||||
<div class="node-param">
|
||||
<label>Freq: <span id="lowfreq-${nodeId}">100</span> Hz</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="0" min="20" max="500" value="100" step="1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Gain: <span id="lowgain-${nodeId}">0</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="1" min="-24" max="24" value="0" step="0.1">
|
||||
</div>
|
||||
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">Mid Band</div>
|
||||
<div class="node-param">
|
||||
<label>Freq: <span id="midfreq-${nodeId}">1000</span> Hz</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="2" min="200" max="5000" value="1000" step="10">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Gain: <span id="midgain-${nodeId}">0</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="3" min="-24" max="24" value="0" step="0.1">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Q: <span id="midq-${nodeId}">0.71</span></label>
|
||||
<input type="range" data-node="${nodeId}" data-param="4" min="0.1" max="10" value="0.707" step="0.01">
|
||||
</div>
|
||||
<div style="font-size: 10px; margin-top: 4px; font-weight: bold;">High Band</div>
|
||||
<div class="node-param">
|
||||
<label>Freq: <span id="highfreq-${nodeId}">8000</span> Hz</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="5" min="2000" max="20000" value="8000" step="100">
|
||||
</div>
|
||||
<div class="node-param">
|
||||
<label>Gain: <span id="highgain-${nodeId}">0</span> dB</label>
|
||||
<input type="range" data-node="${nodeId}" data-param="6" min="-24" max="24" value="0" step="0.1">
|
||||
</div>
|
||||
</div>
|
||||
`
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue