Compare commits

...

4 Commits

Author SHA1 Message Date
Skyler Lehmkuhl dae82b02d1 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-19 05:56:00 -05:00
Skyler Lehmkuhl 0a18d28f98 Add arpeggiator node 2026-02-19 05:30:34 -05:00
Skyler Lehmkuhl 89bbd3614f Add beat node 2026-02-19 01:19:40 -05:00
Skyler Lehmkuhl 21a49235fc sampler improvements, live waveform preview 2026-02-17 10:08:49 -05:00
20 changed files with 974 additions and 145 deletions

View File

@ -53,6 +53,7 @@ pub struct Engine {
// Recording state
recording_state: Option<RecordingState>,
input_rx: Option<rtrb::Consumer<f32>>,
recording_mirror_tx: Option<rtrb::Producer<f32>>,
recording_progress_counter: usize,
// MIDI recording state
@ -130,6 +131,7 @@ impl Engine {
next_clip_id: 0,
recording_state: None,
input_rx: None,
recording_mirror_tx: None,
recording_progress_counter: 0,
midi_recording_state: None,
midi_input_manager: None,
@ -151,6 +153,11 @@ impl Engine {
self.input_rx = Some(input_rx);
}
/// Set the recording mirror producer for streaming audio to UI during recording
pub fn set_recording_mirror_tx(&mut self, tx: rtrb::Producer<f32>) {
self.recording_mirror_tx = Some(tx);
}
/// Set the MIDI input manager for external MIDI devices
pub fn set_midi_input_manager(&mut self, manager: MidiInputManager) {
self.midi_input_manager = Some(manager);
@ -393,8 +400,24 @@ impl Engine {
// Add samples to recording
if !self.recording_sample_buffer.is_empty() {
// Calculate how many samples will be skipped (stale buffer data)
let skip = if recording.paused {
self.recording_sample_buffer.len()
} else {
recording.samples_to_skip.min(self.recording_sample_buffer.len())
};
match recording.add_samples(&self.recording_sample_buffer) {
Ok(_flushed) => {
// Mirror non-skipped samples to UI for live waveform display
if skip < self.recording_sample_buffer.len() {
if let Some(ref mut mirror_tx) = self.recording_mirror_tx {
for &sample in &self.recording_sample_buffer[skip..] {
let _ = mirror_tx.push(sample);
}
}
}
// Update clip duration every callback for sample-accurate timing
let duration = recording.duration();
let clip_id = recording.clip_id;
@ -1143,6 +1166,8 @@ impl Engine {
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
"Constant" => Box::new(ConstantNode::new("Constant".to_string())),
"BpmDetector" => Box::new(BpmDetectorNode::new("BPM Detector".to_string())),
"Beat" => Box::new(BeatNode::new("Beat".to_string())),
"Arpeggiator" => Box::new(ArpeggiatorNode::new("Arpeggiator".to_string())),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())),
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
"Math" => Box::new(MathNode::new("Math".to_string())),
@ -1230,6 +1255,8 @@ impl Engine {
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
"Constant" => Box::new(ConstantNode::new("Constant".to_string())),
"BpmDetector" => Box::new(BpmDetectorNode::new("BPM Detector".to_string())),
"Beat" => Box::new(BeatNode::new("Beat".to_string())),
"Arpeggiator" => Box::new(ArpeggiatorNode::new("Arpeggiator".to_string())),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())),
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
"Math" => Box::new(MathNode::new("Math".to_string())),
@ -2540,7 +2567,7 @@ impl Engine {
}
// Notify UI that recording has started
let _ = self.event_tx.push(AudioEvent::RecordingStarted(track_id, clip_id));
let _ = self.event_tx.push(AudioEvent::RecordingStarted(track_id, clip_id, self.sample_rate, self.channels));
}
Err(e) => {
// Send error event to UI

View File

@ -445,17 +445,20 @@ impl AudioGraph {
// Update playback time
self.playback_time = playback_time;
// Update playback time for all automation nodes before processing
use super::nodes::AutomationInputNode;
// Update playback time for all time-dependent nodes before processing
use super::nodes::{AutomationInputNode, BeatNode};
for node in self.graph.node_weights_mut() {
// Try to downcast to AutomationInputNode and update its playback time
if let Some(auto_node) = node.node.as_any_mut().downcast_mut::<AutomationInputNode>() {
auto_node.set_playback_time(playback_time);
} else if let Some(beat_node) = node.node.as_any_mut().downcast_mut::<BeatNode>() {
beat_node.set_playback_time(playback_time);
}
}
// Use the requested output buffer size for processing
// process_size is stereo (interleaved L/R), frame_count is mono
let process_size = output_buffer.len();
let frame_count = process_size / 2;
// Clear all output buffers (audio/CV and MIDI)
for node in self.graph.node_weights_mut() {
@ -498,6 +501,11 @@ impl AudioGraph {
let inputs = self.graph[node_idx].node.inputs();
let num_audio_cv_inputs = inputs.iter().filter(|p| p.signal_type != SignalType::Midi).count();
let num_midi_inputs = inputs.iter().filter(|p| p.signal_type == SignalType::Midi).count();
// Collect audio/CV input signal types for correct buffer sizing
let audio_cv_input_types: Vec<SignalType> = inputs.iter()
.filter(|p| p.signal_type != SignalType::Midi)
.map(|p| p.signal_type)
.collect();
// Clear input buffers
// - Audio inputs: fill with 0.0 (silence) when unconnected
@ -544,11 +552,18 @@ impl AudioGraph {
match source_port_type {
SignalType::Audio | SignalType::CV => {
// Map from global port index to audio/CV-only port index
// (input_buffers only contains audio/CV entries, not MIDI)
let audio_cv_port_idx = inputs.iter()
.take(to_port + 1)
.filter(|p| p.signal_type != SignalType::Midi)
.count().saturating_sub(1);
// Copy audio/CV data
if to_port < num_audio_cv_inputs && from_port < source_node.output_buffers.len() {
if audio_cv_port_idx < num_audio_cv_inputs && from_port < source_node.output_buffers.len() {
let source_buffer = &source_node.output_buffers[from_port];
if to_port < self.input_buffers.len() {
for (dst, src) in self.input_buffers[to_port].iter_mut().zip(source_buffer.iter()) {
if audio_cv_port_idx < self.input_buffers.len() {
for (dst, src) in self.input_buffers[audio_cv_port_idx].iter_mut().zip(source_buffer.iter()) {
// If dst is NaN (unconnected), replace it; otherwise add (for mixing)
if dst.is_nan() {
*dst = *src;
@ -582,11 +597,15 @@ impl AudioGraph {
}
}
// Prepare audio/CV input slices
// Prepare audio/CV input slices (Audio=stereo process_size, CV=mono frame_count)
let input_slices: Vec<&[f32]> = (0..num_audio_cv_inputs)
.map(|i| {
if i < self.input_buffers.len() {
&self.input_buffers[i][..process_size.min(self.input_buffers[i].len())]
let slice_size = match audio_cv_input_types.get(i) {
Some(&SignalType::Audio) => process_size,
_ => frame_count,
};
&self.input_buffers[i][..slice_size.min(self.input_buffers[i].len())]
} else {
&[][..]
}
@ -607,19 +626,22 @@ impl AudioGraph {
// Get mutable access to output buffers
let node = &mut self.graph[node_idx];
let outputs = node.node.outputs();
let num_audio_cv_outputs = outputs.iter().filter(|p| p.signal_type != SignalType::Midi).count();
let num_midi_outputs = outputs.iter().filter(|p| p.signal_type == SignalType::Midi).count();
// Collect output signal types for correct buffer sizing
let output_signal_types: Vec<SignalType> = outputs.iter().map(|p| p.signal_type).collect();
// Create mutable slices for audio/CV outputs
// Each buffer is independent, so this is safe
let mut output_slices: Vec<&mut [f32]> = node.output_buffers
.iter_mut()
.take(num_audio_cv_outputs)
.map(|buf| {
let len = buf.len();
&mut buf[..process_size.min(len)]
})
.collect();
// Create mutable slices for audio/CV outputs (Audio=stereo, CV=mono)
let mut output_slices: Vec<&mut [f32]> = Vec::new();
for (i, buf) in node.output_buffers.iter_mut().enumerate() {
let signal_type = output_signal_types.get(i).copied().unwrap_or(SignalType::CV);
if signal_type == SignalType::Midi { continue; }
let slice_size = match signal_type {
SignalType::Audio => process_size,
_ => frame_count,
};
let len = buf.len();
output_slices.push(&mut buf[..slice_size.min(len)]);
}
// Create mutable references for MIDI outputs
let mut midi_output_refs: Vec<&mut Vec<MidiEvent>> = node.midi_output_buffers
@ -967,6 +989,8 @@ impl AudioGraph {
"Chorus" => Box::new(ChorusNode::new("Chorus")),
"Compressor" => Box::new(CompressorNode::new("Compressor")),
"Constant" => Box::new(ConstantNode::new("Constant")),
"Beat" => Box::new(BeatNode::new("Beat")),
"Arpeggiator" => Box::new(ArpeggiatorNode::new("Arpeggiator")),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower")),
"Limiter" => Box::new(LimiterNode::new("Limiter")),
"Math" => Box::new(MathNode::new("Math")),

View File

@ -0,0 +1,412 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_MODE: u32 = 0;
const PARAM_DIRECTION: u32 = 1;
const PARAM_OCTAVES: u32 = 2;
const PARAM_RETRIGGER: u32 = 3;
/// ~1ms gate-off for re-triggering at 48kHz
const RETRIGGER_SAMPLES: u32 = 48;
#[derive(Debug, Clone, Copy, PartialEq)]
enum ArpMode {
OnePerCycle = 0,
AllPerCycle = 1,
}
impl ArpMode {
fn from_f32(v: f32) -> Self {
if v.round() as i32 >= 1 { ArpMode::AllPerCycle } else { ArpMode::OnePerCycle }
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum ArpDirection {
Up = 0,
Down = 1,
UpDown = 2,
Random = 3,
}
impl ArpDirection {
fn from_f32(v: f32) -> Self {
match v.round() as i32 {
1 => ArpDirection::Down,
2 => ArpDirection::UpDown,
3 => ArpDirection::Random,
_ => ArpDirection::Up,
}
}
}
/// Arpeggiator node — takes MIDI input (held chord) and a CV phase input,
/// outputs CV V/Oct + Gate stepping through the held notes.
pub struct ArpeggiatorNode {
name: String,
/// Currently held notes: (note, velocity), kept sorted by pitch
held_notes: Vec<(u8, u8)>,
/// Expanded sequence after applying direction + octaves
sequence: Vec<(u8, u8)>,
/// Current position in the sequence (for OnePerCycle mode)
current_step: usize,
/// Previous phase value for wraparound detection
prev_phase: f32,
/// Countdown for gate re-trigger gap
retrigger_countdown: u32,
/// Current output values
current_voct: f32,
current_gate: f32,
/// Parameters
mode: ArpMode,
direction: ArpDirection,
octaves: u32,
retrigger: bool,
/// For Up/Down direction tracking
going_up: bool,
/// Track whether sequence needs rebuilding
sequence_dirty: bool,
/// Stateful PRNG for random direction
rng_state: u32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl ArpeggiatorNode {
pub fn new(name: impl Into<String>) -> Self {
let inputs = vec![
NodePort::new("MIDI In", SignalType::Midi, 0),
NodePort::new("Phase", SignalType::CV, 0),
];
let outputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0),
NodePort::new("Gate", SignalType::CV, 1),
];
let parameters = vec![
Parameter::new(PARAM_MODE, "Mode", 0.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_DIRECTION, "Direction", 0.0, 3.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_OCTAVES, "Octaves", 1.0, 4.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_RETRIGGER, "Retrigger", 0.0, 1.0, 1.0, ParameterUnit::Generic),
];
Self {
name: name.into(),
held_notes: Vec::new(),
sequence: Vec::new(),
current_step: 0,
prev_phase: 0.0,
retrigger_countdown: 0,
current_voct: 0.0,
current_gate: 0.0,
mode: ArpMode::OnePerCycle,
direction: ArpDirection::Up,
octaves: 1,
retrigger: true,
going_up: true,
sequence_dirty: false,
rng_state: 12345,
inputs,
outputs,
parameters,
}
}
fn midi_note_to_voct(note: u8) -> f32 {
(note as f32 - 69.0) / 12.0
}
fn rebuild_sequence(&mut self) {
self.sequence.clear();
if self.held_notes.is_empty() {
return;
}
// Build base sequence sorted by pitch (held_notes is already sorted)
let base: Vec<(u8, u8)> = self.held_notes.clone();
// Expand across octaves
let mut expanded = Vec::new();
for oct in 0..self.octaves {
for &(note, vel) in &base {
let transposed = note.saturating_add((oct * 12) as u8);
if transposed <= 127 {
expanded.push((transposed, vel));
}
}
}
// Apply direction
match self.direction {
ArpDirection::Up => {
self.sequence = expanded;
}
ArpDirection::Down => {
expanded.reverse();
self.sequence = expanded;
}
ArpDirection::UpDown => {
if expanded.len() > 1 {
let mut up_down = expanded.clone();
// Go back down, skipping the top and bottom notes to avoid doubles
for i in (1..expanded.len() - 1).rev() {
up_down.push(expanded[i]);
}
self.sequence = up_down;
} else {
self.sequence = expanded;
}
}
ArpDirection::Random => {
// For random, keep the expanded list; we'll pick randomly in process()
self.sequence = expanded;
}
}
// Clamp current_step to valid range and update V/Oct immediately
if !self.sequence.is_empty() {
self.current_step = self.current_step % self.sequence.len();
let (note, _vel) = self.sequence[self.current_step];
self.current_voct = Self::midi_note_to_voct(note);
} else {
self.current_step = 0;
}
self.sequence_dirty = false;
}
fn advance_step(&mut self) {
if self.sequence.is_empty() {
return;
}
if self.direction == ArpDirection::Random {
// Stateful xorshift32 PRNG — evolves independently of current_step
let mut x = self.rng_state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
self.rng_state = x;
// Use upper bits (better distribution) and exclude current note
if self.sequence.len() > 1 {
let pick = ((x >> 16) as usize) % (self.sequence.len() - 1);
self.current_step = if pick >= self.current_step { pick + 1 } else { pick };
}
} else {
self.current_step = (self.current_step + 1) % self.sequence.len();
}
}
fn step_changed(&mut self, new_step: usize) {
let old_step = self.current_step;
self.current_step = new_step;
if !self.sequence.is_empty() {
let (note, _vel) = self.sequence[self.current_step];
self.current_voct = Self::midi_note_to_voct(note);
}
// Start retrigger gap if enabled and the step actually changed
if self.retrigger && old_step != new_step {
self.retrigger_countdown = RETRIGGER_SAMPLES;
}
}
}
impl AudioNode for ArpeggiatorNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_MODE => self.mode = ArpMode::from_f32(value),
PARAM_DIRECTION => {
let new_dir = ArpDirection::from_f32(value);
if new_dir != self.direction {
self.direction = new_dir;
self.going_up = true;
self.sequence_dirty = true;
}
}
PARAM_OCTAVES => {
// UI sends 0-3 (combo box index), map to 1-4 octaves
let new_oct = (value.round() as u32 + 1).clamp(1, 4);
if new_oct != self.octaves {
self.octaves = new_oct;
self.sequence_dirty = true;
}
}
PARAM_RETRIGGER => self.retrigger = value.round() as i32 >= 1,
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_MODE => self.mode as i32 as f32,
PARAM_DIRECTION => self.direction as i32 as f32,
PARAM_OCTAVES => (self.octaves - 1) as f32,
PARAM_RETRIGGER => if self.retrigger { 1.0 } else { 0.0 },
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
// Process incoming MIDI to build held_notes
if !midi_inputs.is_empty() {
for event in midi_inputs[0] {
let status = event.status & 0xF0;
match status {
0x90 if event.data2 > 0 => {
// Note on — add to held notes (sorted by pitch)
let note = event.data1;
let vel = event.data2;
// Remove if already held (avoid duplicates)
self.held_notes.retain(|&(n, _)| n != note);
// Insert sorted by pitch
let pos = self.held_notes.partition_point(|&(n, _)| n < note);
self.held_notes.insert(pos, (note, vel));
self.sequence_dirty = true;
}
0x80 | 0x90 => {
// Note off
let note = event.data1;
self.held_notes.retain(|&(n, _)| n != note);
self.sequence_dirty = true;
}
_ => {}
}
}
}
// Rebuild sequence if needed
if self.sequence_dirty {
self.rebuild_sequence();
}
if outputs.len() < 2 {
return;
}
let len = outputs[0].len();
// If no notes held, output silence
if self.sequence.is_empty() {
for i in 0..len {
outputs[0][i] = self.current_voct;
outputs[1][i] = 0.0;
}
self.current_gate = 0.0;
return;
}
for i in 0..len {
let phase = cv_input_or_default(inputs, 0, i, 0.0).clamp(0.0, 1.0);
match self.mode {
ArpMode::OnePerCycle => {
// Detect phase wraparound (high → low = new cycle)
if self.prev_phase > 0.7 && phase < 0.3 {
self.advance_step();
let step = self.current_step;
self.step_changed(step);
}
}
ArpMode::AllPerCycle => {
// Phase 0→1 maps across all sequence notes
let new_step = ((phase * self.sequence.len() as f32).floor() as usize)
.min(self.sequence.len() - 1);
if new_step != self.current_step {
self.step_changed(new_step);
}
}
}
self.prev_phase = phase;
// Gate: off if retriggering, on otherwise
if self.retrigger_countdown > 0 {
self.retrigger_countdown -= 1;
self.current_gate = 0.0;
} else {
self.current_gate = 1.0;
}
outputs[0][i] = self.current_voct;
outputs[1][i] = self.current_gate;
}
}
fn reset(&mut self) {
self.held_notes.clear();
self.sequence.clear();
self.current_step = 0;
self.prev_phase = 0.0;
self.retrigger_countdown = 0;
self.current_voct = 0.0;
self.current_gate = 0.0;
self.going_up = true;
}
fn node_type(&self) -> &str {
"Arpeggiator"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
held_notes: Vec::new(),
sequence: Vec::new(),
current_step: 0,
prev_phase: 0.0,
retrigger_countdown: 0,
current_voct: 0.0,
current_gate: 0.0,
mode: self.mode,
direction: self.direction,
octaves: self.octaves,
retrigger: self.retrigger,
going_up: true,
sequence_dirty: false,
rng_state: 12345,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -1,16 +1,10 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_ATTACK: u32 = 0;
const PARAM_RELEASE: u32 = 1;
/// Audio to CV converter (Envelope Follower)
/// Converts audio amplitude to control voltage
/// Audio to CV converter
/// Directly converts a stereo audio signal to mono CV (averages L+R channels)
pub struct AudioToCVNode {
name: String,
envelope: f32, // Current envelope value
attack: f32, // Attack time in seconds
release: f32, // Release time in seconds
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
@ -28,19 +22,11 @@ impl AudioToCVNode {
NodePort::new("CV Out", SignalType::CV, 0),
];
let parameters = vec![
Parameter::new(PARAM_ATTACK, "Attack", 0.001, 1.0, 0.01, ParameterUnit::Time),
Parameter::new(PARAM_RELEASE, "Release", 0.001, 1.0, 0.1, ParameterUnit::Time),
];
Self {
name,
envelope: 0.0,
attack: 0.01,
release: 0.1,
inputs,
outputs,
parameters,
parameters: Vec::new(),
}
}
}
@ -62,20 +48,10 @@ impl AudioNode for AudioToCVNode {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_ATTACK => self.attack = value.clamp(0.001, 1.0),
PARAM_RELEASE => self.release = value.clamp(0.001, 1.0),
_ => {}
}
}
fn set_parameter(&mut self, _id: u32, _value: f32) {}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_ATTACK => self.attack,
PARAM_RELEASE => self.release,
_ => 0.0,
}
fn get_parameter(&self, _id: u32) -> f32 {
0.0
}
fn process(
@ -84,7 +60,7 @@ impl AudioNode for AudioToCVNode {
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
_sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
@ -95,39 +71,16 @@ impl AudioNode for AudioToCVNode {
// Audio input is stereo (interleaved L/R), CV output is mono
let audio_frames = input.len() / 2;
let cv_frames = output.len();
let frames = audio_frames.min(cv_frames);
// Calculate attack and release coefficients
let sample_rate_f32 = sample_rate as f32;
let attack_coeff = (-1.0 / (self.attack * sample_rate_f32)).exp();
let release_coeff = (-1.0 / (self.release * sample_rate_f32)).exp();
let frames = audio_frames.min(output.len());
for frame in 0..frames {
// Get stereo samples
let left = input[frame * 2];
let right = input[frame * 2 + 1];
// Calculate RMS-like value (average of absolute values for simplicity)
let amplitude = (left.abs() + right.abs()) / 2.0;
// Envelope follower with attack/release
if amplitude > self.envelope {
// Attack: follow signal up quickly
self.envelope = amplitude * (1.0 - attack_coeff) + self.envelope * attack_coeff;
} else {
// Release: decay slowly
self.envelope = amplitude * (1.0 - release_coeff) + self.envelope * release_coeff;
}
// Output CV (mono)
output[frame] = self.envelope;
output[frame] = (left + right) * 0.5;
}
}
fn reset(&mut self) {
self.envelope = 0.0;
}
fn reset(&mut self) {}
fn node_type(&self) -> &str {
"AudioToCV"
@ -140,9 +93,6 @@ impl AudioNode for AudioToCVNode {
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
envelope: 0.0, // Reset envelope
attack: self.attack,
release: self.release,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -0,0 +1,221 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_RESOLUTION: u32 = 0;
/// Hardcoded BPM until project tempo is implemented
const DEFAULT_BPM: f32 = 120.0;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BeatResolution {
Whole = 0, // 1/1
Half = 1, // 1/2
Quarter = 2, // 1/4
Eighth = 3, // 1/8
Sixteenth = 4, // 1/16
QuarterT = 5, // 1/4 triplet
EighthT = 6, // 1/8 triplet
}
impl BeatResolution {
fn from_f32(value: f32) -> Self {
match value.round() as i32 {
0 => BeatResolution::Whole,
1 => BeatResolution::Half,
2 => BeatResolution::Quarter,
3 => BeatResolution::Eighth,
4 => BeatResolution::Sixteenth,
5 => BeatResolution::QuarterT,
6 => BeatResolution::EighthT,
_ => BeatResolution::Quarter,
}
}
/// How many subdivisions per quarter note beat
fn subdivisions_per_beat(&self) -> f64 {
match self {
BeatResolution::Whole => 0.25, // 1 per 4 beats
BeatResolution::Half => 0.5, // 1 per 2 beats
BeatResolution::Quarter => 1.0, // 1 per beat
BeatResolution::Eighth => 2.0, // 2 per beat
BeatResolution::Sixteenth => 4.0, // 4 per beat
BeatResolution::QuarterT => 1.5, // 3 per 2 beats (triplet)
BeatResolution::EighthT => 3.0, // 3 per beat (triplet)
}
}
}
/// Beat clock node — generates tempo-synced CV signals.
///
/// When playing: synced to timeline position.
/// When stopped: free-runs continuously at the set BPM.
///
/// Outputs:
/// - BPM: constant CV proportional to tempo (bpm / 240)
/// - Beat Phase: sawtooth 0→1 per beat subdivision
/// - Bar Phase: sawtooth 0→1 per bar (4 beats)
/// - Gate: 1.0 for first half of each subdivision, 0.0 otherwise
pub struct BeatNode {
name: String,
bpm: f32,
resolution: BeatResolution,
/// Playback time in seconds, set by the graph before process()
playback_time: f64,
/// Previous playback_time to detect paused state
prev_playback_time: f64,
/// Free-running time accumulator for when playback is stopped
free_run_time: f64,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl BeatNode {
pub fn new(name: impl Into<String>) -> Self {
let inputs = vec![];
let outputs = vec![
NodePort::new("BPM", SignalType::CV, 0),
NodePort::new("Beat Phase", SignalType::CV, 1),
NodePort::new("Bar Phase", SignalType::CV, 2),
NodePort::new("Gate", SignalType::CV, 3),
];
let parameters = vec![
Parameter::new(PARAM_RESOLUTION, "Resolution", 0.0, 6.0, 2.0, ParameterUnit::Generic),
];
Self {
name: name.into(),
bpm: DEFAULT_BPM,
resolution: BeatResolution::Quarter,
playback_time: 0.0,
prev_playback_time: -1.0,
free_run_time: 0.0,
inputs,
outputs,
parameters,
}
}
pub fn set_playback_time(&mut self, time: f64) {
self.playback_time = time;
}
}
impl AudioNode for BeatNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_RESOLUTION => self.resolution = BeatResolution::from_f32(value),
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_RESOLUTION => self.resolution as i32 as f32,
_ => 0.0,
}
}
fn process(
&mut self,
_inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.len() < 4 {
return;
}
let bpm_cv = (self.bpm / 240.0).clamp(0.0, 1.0);
let len = outputs[0].len();
let sample_period = 1.0 / sample_rate as f64;
// Detect paused: playback_time hasn't changed since last process()
let paused = self.playback_time == self.prev_playback_time;
self.prev_playback_time = self.playback_time;
let beats_per_second = self.bpm as f64 / 60.0;
let subs_per_beat = self.resolution.subdivisions_per_beat();
// Choose time source: timeline when playing, free-running when stopped
let base_time = if paused { self.free_run_time } else { self.playback_time };
for i in 0..len {
let time = base_time + i as f64 * sample_period;
let beat_pos = time * beats_per_second;
// Beat subdivision phase: 0→1 sawtooth
let sub_phase = ((beat_pos * subs_per_beat) % 1.0) as f32;
// Bar phase: 0→1 over 4 quarter-note beats
let bar_phase = ((beat_pos / 4.0) % 1.0) as f32;
// Gate: high for first half of each subdivision
let gate = if sub_phase < 0.5 { 1.0f32 } else { 0.0 };
outputs[0][i] = bpm_cv;
outputs[1][i] = sub_phase;
outputs[2][i] = bar_phase;
outputs[3][i] = gate;
}
// Advance free-run time (always ticks, so it's ready when playback stops)
self.free_run_time += len as f64 * sample_period;
}
fn reset(&mut self) {
self.playback_time = 0.0;
self.prev_playback_time = -1.0;
self.free_run_time = 0.0;
}
fn node_type(&self) -> &str {
"Beat"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
bpm: self.bpm,
resolution: self.resolution,
playback_time: 0.0,
prev_playback_time: -1.0,
free_run_time: 0.0,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -1,7 +1,9 @@
mod adsr;
mod arpeggiator;
mod audio_input;
mod audio_to_cv;
mod automation_input;
mod beat;
mod bit_crusher;
mod bpm_detector;
mod chorus;
@ -41,9 +43,11 @@ mod voice_allocator;
mod wavetable_oscillator;
pub use adsr::ADSRNode;
pub use arpeggiator::ArpeggiatorNode;
pub use audio_input::AudioInputNode;
pub use audio_to_cv::AudioToCVNode;
pub use automation_input::{AutomationInputNode, AutomationKeyframe, InterpolationType};
pub use beat::BeatNode;
pub use bit_crusher::BitCrusherNode;
pub use bpm_detector::BpmDetectorNode;
pub use chorus::ChorusNode;

View File

@ -87,8 +87,9 @@ pub struct OscilloscopeNode {
trigger_period: usize, // Period in samples for V/oct triggering
// Shared buffers for reading from Tauri commands
buffer: Arc<Mutex<CircularBuffer>>, // Audio buffer
buffer: Arc<Mutex<CircularBuffer>>, // Audio buffer (mono downmix)
cv_buffer: Arc<Mutex<CircularBuffer>>, // CV buffer
mono_buf: Vec<f32>, // Scratch buffer for stereo-to-mono downmix
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
@ -125,6 +126,7 @@ impl OscilloscopeNode {
trigger_period: 480, // Default to ~100Hz at 48kHz
buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
cv_buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
mono_buf: vec![0.0; 2048],
inputs,
outputs,
parameters,
@ -220,12 +222,13 @@ impl AudioNode for OscilloscopeNode {
let input = inputs[0];
let output = &mut outputs[0];
let len = input.len().min(output.len());
let stereo_len = input.len().min(output.len());
let frame_count = stereo_len / 2;
// Read CV input if available (port 1) — used for both display and V/Oct triggering
if inputs.len() > 1 && !inputs[1].is_empty() {
let cv_input = inputs[1];
let cv_len = len.min(cv_input.len());
let cv_len = frame_count.min(cv_input.len());
// Check if connected (not NaN sentinel)
if cv_len > 0 && !cv_input[0].is_nan() {
@ -244,20 +247,25 @@ impl AudioNode for OscilloscopeNode {
// Update sample counter for V/oct triggering
if self.trigger_mode == TriggerMode::VoltPerOctave {
self.sample_counter = (self.sample_counter + len) % self.trigger_period;
self.sample_counter = (self.sample_counter + frame_count) % self.trigger_period;
}
// Pass through audio (copy input to output)
output[..len].copy_from_slice(&input[..len]);
output[..stereo_len].copy_from_slice(&input[..stereo_len]);
// Capture audio samples to buffer
// Capture audio as mono downmix to match CV time scale
if let Ok(mut buffer) = self.buffer.lock() {
buffer.write(&input[..len]);
for frame in 0..frame_count {
let left = input[frame * 2];
let right = input[frame * 2 + 1];
self.mono_buf[frame] = (left + right) * 0.5;
}
buffer.write(&self.mono_buf[..frame_count]);
}
// Update last sample for trigger detection (use left channel, frame 0)
if !input.is_empty() {
self.last_sample = input[0];
// Update last sample for trigger detection
if frame_count > 0 {
self.last_sample = (input[0] + input[1]) * 0.5;
}
}
@ -288,6 +296,7 @@ impl AudioNode for OscilloscopeNode {
trigger_period: 480,
buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
cv_buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
mono_buf: vec![0.0; 2048],
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -235,8 +235,8 @@ pub enum AudioEvent {
BufferPoolStats(BufferPoolStats),
/// Automation lane created (track_id, lane_id, parameter_id)
AutomationLaneCreated(TrackId, AutomationLaneId, ParameterId),
/// Recording started (track_id, clip_id)
RecordingStarted(TrackId, ClipId),
/// Recording started (track_id, clip_id, sample_rate, channels)
RecordingStarted(TrackId, ClipId, u32, u32),
/// Recording progress update (clip_id, current_duration)
RecordingProgress(ClipId, f64),
/// Recording stopped (clip_id, pool_index, waveform)

View File

@ -39,6 +39,8 @@ pub struct AudioSystem {
pub channels: u32,
/// Event receiver for polling audio events (only present when no EventEmitter is provided)
pub event_rx: Option<rtrb::Consumer<AudioEvent>>,
/// Consumer for recording audio mirror (streams recorded samples to UI for live waveform)
recording_mirror_rx: Option<rtrb::Consumer<f32>>,
}
impl AudioSystem {
@ -85,9 +87,13 @@ impl AudioSystem {
let input_buffer_size = (sample_rate * channels * 10) as usize;
let (mut input_tx, input_rx) = rtrb::RingBuffer::new(input_buffer_size);
// Create mirror ringbuffer for streaming recorded audio to UI (live waveform)
let (mirror_tx, mirror_rx) = rtrb::RingBuffer::new(input_buffer_size);
// Create engine
let mut engine = Engine::new(sample_rate, channels, command_rx, event_tx, query_rx, query_response_tx);
engine.set_input_rx(input_rx);
engine.set_recording_mirror_tx(mirror_tx);
let controller = engine.get_controller(command_tx, query_tx, query_response_rx);
// Initialize MIDI input manager for external MIDI devices
@ -151,6 +157,7 @@ impl AudioSystem {
sample_rate,
channels,
event_rx: None, // No event receiver when audio device unavailable
recording_mirror_rx: None,
});
}
};
@ -175,6 +182,7 @@ impl AudioSystem {
sample_rate,
channels,
event_rx: None,
recording_mirror_rx: None,
});
}
};
@ -250,6 +258,7 @@ impl AudioSystem {
sample_rate,
channels,
event_rx: None,
recording_mirror_rx: None,
});
}
};
@ -275,9 +284,15 @@ impl AudioSystem {
sample_rate,
channels,
event_rx: event_rx_option,
recording_mirror_rx: Some(mirror_rx),
})
}
/// Take the recording mirror consumer for streaming recorded audio to UI
pub fn take_recording_mirror_rx(&mut self) -> Option<rtrb::Consumer<f32>> {
self.recording_mirror_rx.take()
}
/// Spawn a background thread to emit events from the ringbuffer
fn spawn_emitter_thread(mut event_rx: rtrb::Consumer<AudioEvent>, emitter: std::sync::Arc<dyn EventEmitter>) {
std::thread::spawn(move || {

View File

@ -667,9 +667,11 @@ struct EditorApp {
audio_pools_with_new_waveforms: HashSet<usize>,
/// Raw audio sample cache for GPU waveform rendering
/// Format: pool_index -> (samples, sample_rate, channels)
raw_audio_cache: HashMap<usize, (Vec<f32>, u32, u32)>,
raw_audio_cache: HashMap<usize, (Arc<Vec<f32>>, u32, u32)>,
/// Pool indices that need GPU texture upload (set when raw audio arrives, cleared after upload)
waveform_gpu_dirty: HashSet<usize>,
/// Consumer for recording audio mirror (streams recorded samples to UI for live waveform)
recording_mirror_rx: Option<rtrb::Consumer<f32>>,
/// Current file path (None if not yet saved)
current_file_path: Option<std::path::PathBuf>,
/// Application configuration (recent files, etc.)
@ -771,12 +773,13 @@ impl EditorApp {
let action_executor = lightningbeam_core::action::ActionExecutor::new(document);
// Initialize audio system and destructure it for sharing
let (audio_stream, audio_controller, audio_event_rx, audio_sample_rate, audio_channels, file_command_tx) =
let (audio_stream, audio_controller, audio_event_rx, audio_sample_rate, audio_channels, file_command_tx, recording_mirror_rx) =
match daw_backend::AudioSystem::new(None, config.audio_buffer_size) {
Ok(audio_system) => {
Ok(mut audio_system) => {
println!("✅ Audio engine initialized successfully");
// Extract components
let mirror_rx = audio_system.take_recording_mirror_rx();
let stream = audio_system.stream;
let sample_rate = audio_system.sample_rate;
let channels = audio_system.channels;
@ -788,7 +791,7 @@ impl EditorApp {
// Spawn file operations worker
let file_command_tx = FileOperationsWorker::spawn(controller.clone());
(Some(stream), Some(controller), event_rx, sample_rate, channels, file_command_tx)
(Some(stream), Some(controller), event_rx, sample_rate, channels, file_command_tx, mirror_rx)
}
Err(e) => {
eprintln!("❌ Failed to initialize audio engine: {}", e);
@ -796,7 +799,7 @@ impl EditorApp {
// Create a dummy channel for file operations (won't be used)
let (tx, _rx) = std::sync::mpsc::channel();
(None, None, None, 48000, 2, tx)
(None, None, None, 48000, 2, tx, None)
}
};
@ -872,6 +875,7 @@ impl EditorApp {
audio_pools_with_new_waveforms: HashSet::new(), // Track pool indices with new raw audio
raw_audio_cache: HashMap::new(),
waveform_gpu_dirty: HashSet::new(),
recording_mirror_rx,
current_file_path: None, // No file loaded initially
config,
file_command_tx,
@ -2701,7 +2705,7 @@ impl EditorApp {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_audio_samples(pool_index) {
Ok((samples, sr, ch)) => {
self.raw_audio_cache.insert(pool_index, (samples, sr, ch));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sr, ch));
self.waveform_gpu_dirty.insert(pool_index);
raw_fetched += 1;
}
@ -3516,7 +3520,7 @@ impl EditorApp {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_audio_samples(pool_index) {
Ok((samples, sr, ch)) => {
self.raw_audio_cache.insert(pool_index, (samples, sr, ch));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sr, ch));
self.waveform_gpu_dirty.insert(pool_index);
}
Err(e) => eprintln!("Failed to fetch raw audio for extracted audio: {}", e),
@ -3738,6 +3742,24 @@ impl eframe::App for EditorApp {
ctx.request_repaint();
}
// Drain recording mirror buffer for live waveform display
if self.is_recording {
if let Some(ref mut mirror_rx) = self.recording_mirror_rx {
let mut drained = 0usize;
if let Some(entry) = self.raw_audio_cache.get_mut(&usize::MAX) {
let samples = Arc::make_mut(&mut entry.0);
while let Ok(sample) = mirror_rx.pop() {
samples.push(sample);
drained += 1;
}
}
if drained > 0 {
self.waveform_gpu_dirty.insert(usize::MAX);
ctx.request_repaint();
}
}
}
// Poll audio events from the audio engine
if let Some(event_rx) = &mut self.audio_event_rx {
let mut polled_events = false;
@ -3777,7 +3799,7 @@ impl eframe::App for EditorApp {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_audio_samples(pool_index) {
Ok((samples, sr, ch)) => {
self.raw_audio_cache.insert(pool_index, (samples, sr, ch));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sr, ch));
self.waveform_gpu_dirty.insert(pool_index);
self.audio_pools_with_new_waveforms.insert(pool_index);
}
@ -3789,7 +3811,7 @@ impl eframe::App for EditorApp {
ctx.request_repaint();
}
// Recording events
AudioEvent::RecordingStarted(track_id, backend_clip_id) => {
AudioEvent::RecordingStarted(track_id, backend_clip_id, rec_sample_rate, rec_channels) => {
println!("🎤 Recording started on track {:?}, backend_clip_id={}", track_id, backend_clip_id);
// Create clip in document and add instance to layer
@ -3817,6 +3839,10 @@ impl eframe::App for EditorApp {
// Store mapping for later updates
self.recording_clips.insert(layer_id, backend_clip_id);
}
// Initialize live waveform cache for recording
self.raw_audio_cache.insert(usize::MAX, (Arc::new(Vec::new()), rec_sample_rate, rec_channels));
ctx.request_repaint();
}
AudioEvent::RecordingProgress(_clip_id, duration) => {
@ -3850,12 +3876,16 @@ impl eframe::App for EditorApp {
AudioEvent::RecordingStopped(_backend_clip_id, pool_index, _waveform) => {
println!("🎤 Recording stopped: pool_index={}", pool_index);
// Clean up live recording waveform cache
self.raw_audio_cache.remove(&usize::MAX);
self.waveform_gpu_dirty.remove(&usize::MAX);
// Fetch raw audio samples for GPU waveform rendering
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_audio_samples(pool_index) {
Ok((samples, sr, ch)) => {
self.raw_audio_cache.insert(pool_index, (samples, sr, ch));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sr, ch));
self.waveform_gpu_dirty.insert(pool_index);
self.audio_pools_with_new_waveforms.insert(pool_index);
}
@ -4074,7 +4104,7 @@ impl eframe::App for EditorApp {
let mut controller = controller_arc.lock().unwrap();
match controller.get_pool_audio_samples(pool_index) {
Ok((samples, sr, ch)) => {
self.raw_audio_cache.insert(pool_index, (samples, sr, ch));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sr, ch));
self.waveform_gpu_dirty.insert(pool_index);
}
Err(e) => eprintln!("Failed to fetch raw audio for pool {}: {}", pool_index, e),
@ -4088,9 +4118,9 @@ impl eframe::App for EditorApp {
AudioEvent::AudioDecodeProgress { pool_index, samples, sample_rate, channels } => {
// Samples arrive as deltas — append to existing cache
if let Some(entry) = self.raw_audio_cache.get_mut(&pool_index) {
entry.0.extend_from_slice(&samples);
Arc::make_mut(&mut entry.0).extend_from_slice(&samples);
} else {
self.raw_audio_cache.insert(pool_index, (samples, sample_rate, channels));
self.raw_audio_cache.insert(pool_index, (Arc::new(samples), sample_rate, channels));
}
self.waveform_gpu_dirty.insert(pool_index);
ctx.request_repaint();
@ -4680,7 +4710,7 @@ struct RenderContext<'a> {
/// Audio pool indices with new raw audio data this frame (for thumbnail invalidation)
audio_pools_with_new_waveforms: &'a HashSet<usize>,
/// Raw audio samples for GPU waveform rendering (pool_index -> (samples, sample_rate, channels))
raw_audio_cache: &'a HashMap<usize, (Vec<f32>, u32, u32)>,
raw_audio_cache: &'a HashMap<usize, (Arc<Vec<f32>>, u32, u32)>,
/// Pool indices needing GPU texture upload
waveform_gpu_dirty: &'a mut HashSet<usize>,
/// Effect ID to load into shader editor (set by asset library, consumed by shader editor)

View File

@ -20,7 +20,7 @@ use crate::widgets::ImeTextField;
/// Derive min/max peak pairs from raw audio samples for thumbnail rendering.
/// Downsamples to `num_peaks` (min, max) pairs by scanning chunks of samples.
fn peaks_from_raw_audio(
raw: &(Vec<f32>, u32, u32), // (samples, sample_rate, channels)
raw: &(std::sync::Arc<Vec<f32>>, u32, u32), // (samples, sample_rate, channels)
num_peaks: usize,
) -> Vec<(f32, f32)> {
let (samples, _sr, channels) = raw;

View File

@ -198,7 +198,7 @@ pub struct SharedPaneState<'a> {
/// Audio pool indices that got new raw audio data this frame (for thumbnail invalidation)
pub audio_pools_with_new_waveforms: &'a std::collections::HashSet<usize>,
/// Raw audio samples for GPU waveform rendering (pool_index -> (samples, sample_rate, channels))
pub raw_audio_cache: &'a std::collections::HashMap<usize, (Vec<f32>, u32, u32)>,
pub raw_audio_cache: &'a std::collections::HashMap<usize, (std::sync::Arc<Vec<f32>>, u32, u32)>,
/// Pool indices needing GPU waveform texture upload
pub waveform_gpu_dirty: &'a mut std::collections::HashSet<usize>,
/// Effect ID to load into shader editor (set by asset library, consumed by shader editor)

View File

@ -23,6 +23,7 @@ pub enum NodeTemplate {
MidiInput,
AudioInput,
AutomationInput,
Beat,
// Generators
Oscillator,
@ -57,6 +58,7 @@ pub enum NodeTemplate {
Constant,
MidiToCv,
AudioToCv,
Arpeggiator,
Math,
SampleHold,
SlewLimiter,
@ -115,12 +117,14 @@ impl NodeTemplate {
NodeTemplate::Constant => "Constant",
NodeTemplate::MidiToCv => "MidiToCV",
NodeTemplate::AudioToCv => "AudioToCV",
NodeTemplate::Arpeggiator => "Arpeggiator",
NodeTemplate::Math => "Math",
NodeTemplate::SampleHold => "SampleHold",
NodeTemplate::SlewLimiter => "SlewLimiter",
NodeTemplate::Quantizer => "Quantizer",
NodeTemplate::EnvelopeFollower => "EnvelopeFollower",
NodeTemplate::BpmDetector => "BpmDetector",
NodeTemplate::Beat => "Beat",
NodeTemplate::Mod => "Mod",
NodeTemplate::Oscilloscope => "Oscilloscope",
NodeTemplate::VoiceAllocator => "VoiceAllocator",
@ -354,12 +358,14 @@ impl NodeTemplateTrait for NodeTemplate {
NodeTemplate::Constant => "Constant".into(),
NodeTemplate::MidiToCv => "MIDI to CV".into(),
NodeTemplate::AudioToCv => "Audio to CV".into(),
NodeTemplate::Arpeggiator => "Arpeggiator".into(),
NodeTemplate::Math => "Math".into(),
NodeTemplate::SampleHold => "Sample & Hold".into(),
NodeTemplate::SlewLimiter => "Slew Limiter".into(),
NodeTemplate::Quantizer => "Quantizer".into(),
NodeTemplate::EnvelopeFollower => "Envelope Follower".into(),
NodeTemplate::BpmDetector => "BPM Detector".into(),
NodeTemplate::Beat => "Beat".into(),
NodeTemplate::Mod => "Modulator".into(),
// Analysis
NodeTemplate::Oscilloscope => "Oscilloscope".into(),
@ -376,7 +382,7 @@ impl NodeTemplateTrait for NodeTemplate {
fn node_finder_categories(&self, _user_state: &mut Self::UserState) -> Vec<&'static str> {
match self {
NodeTemplate::MidiInput | NodeTemplate::AudioInput | NodeTemplate::AutomationInput => vec!["Inputs"],
NodeTemplate::MidiInput | NodeTemplate::AudioInput | NodeTemplate::AutomationInput | NodeTemplate::Beat => vec!["Inputs"],
NodeTemplate::Oscillator | NodeTemplate::WavetableOscillator | NodeTemplate::FmSynth
| NodeTemplate::Noise | NodeTemplate::SimpleSampler | NodeTemplate::MultiSampler => vec!["Generators"],
NodeTemplate::Filter | NodeTemplate::Gain | NodeTemplate::Echo | NodeTemplate::Reverb
@ -384,7 +390,7 @@ impl NodeTemplateTrait for NodeTemplate {
| NodeTemplate::BitCrusher | NodeTemplate::Compressor | NodeTemplate::Limiter | NodeTemplate::Eq
| NodeTemplate::Pan | NodeTemplate::RingModulator | NodeTemplate::Vocoder => vec!["Effects"],
NodeTemplate::Adsr | NodeTemplate::Lfo | NodeTemplate::Mixer | NodeTemplate::Splitter
| NodeTemplate::Constant | NodeTemplate::MidiToCv | NodeTemplate::AudioToCv | NodeTemplate::Math
| NodeTemplate::Constant | NodeTemplate::MidiToCv | NodeTemplate::AudioToCv | NodeTemplate::Arpeggiator | NodeTemplate::Math
| NodeTemplate::SampleHold | NodeTemplate::SlewLimiter | NodeTemplate::Quantizer
| NodeTemplate::EnvelopeFollower | NodeTemplate::BpmDetector | NodeTemplate::Mod => vec!["Utilities"],
NodeTemplate::Oscilloscope => vec!["Analysis"],
@ -716,6 +722,28 @@ impl NodeTemplateTrait for NodeTemplate {
graph.add_input_param(node_id, "Audio In".into(), DataType::Audio, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_output_param(node_id, "CV Out".into(), DataType::CV);
}
NodeTemplate::Arpeggiator => {
graph.add_input_param(node_id, "MIDI In".into(), DataType::Midi, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_input_param(node_id, "Phase".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_input_param(node_id, "Mode".into(), DataType::CV,
ValueType::float_param(0.0, 0.0, 1.0, "", 0,
Some(&["One/Cycle", "All/Cycle"])),
InputParamKind::ConstantOnly, true);
graph.add_input_param(node_id, "Direction".into(), DataType::CV,
ValueType::float_param(0.0, 0.0, 3.0, "", 1,
Some(&["Up", "Down", "Up/Down", "Random"])),
InputParamKind::ConstantOnly, true);
graph.add_input_param(node_id, "Octaves".into(), DataType::CV,
ValueType::float_param(0.0, 0.0, 3.0, "", 2,
Some(&["1", "2", "3", "4"])),
InputParamKind::ConstantOnly, true);
graph.add_input_param(node_id, "Retrigger".into(), DataType::CV,
ValueType::float_param(1.0, 0.0, 1.0, "", 3,
Some(&["Off", "On"])),
InputParamKind::ConstantOnly, true);
graph.add_output_param(node_id, "V/Oct".into(), DataType::CV);
graph.add_output_param(node_id, "Gate".into(), DataType::CV);
}
NodeTemplate::Math => {
graph.add_input_param(node_id, "A".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOrConstant, true);
graph.add_input_param(node_id, "B".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOrConstant, true);
@ -745,6 +773,16 @@ impl NodeTemplateTrait for NodeTemplate {
graph.add_input_param(node_id, "Audio In".into(), DataType::Audio, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_output_param(node_id, "BPM".into(), DataType::CV);
}
NodeTemplate::Beat => {
graph.add_input_param(node_id, "Resolution".into(), DataType::CV,
ValueType::float_param(2.0, 0.0, 6.0, "", 0,
Some(&["1/1", "1/2", "1/4", "1/8", "1/16", "1/4T", "1/8T"])),
InputParamKind::ConstantOnly, true);
graph.add_output_param(node_id, "BPM".into(), DataType::CV);
graph.add_output_param(node_id, "Beat Phase".into(), DataType::CV);
graph.add_output_param(node_id, "Bar Phase".into(), DataType::CV);
graph.add_output_param(node_id, "Gate".into(), DataType::CV);
}
NodeTemplate::Mod => {
graph.add_input_param(node_id, "Carrier".into(), DataType::Audio, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_input_param(node_id, "Modulator".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
@ -1112,12 +1150,14 @@ impl NodeTemplateIter for AllNodeTemplates {
NodeTemplate::Constant,
NodeTemplate::MidiToCv,
NodeTemplate::AudioToCv,
NodeTemplate::Arpeggiator,
NodeTemplate::Math,
NodeTemplate::SampleHold,
NodeTemplate::SlewLimiter,
NodeTemplate::Quantizer,
NodeTemplate::EnvelopeFollower,
NodeTemplate::BpmDetector,
NodeTemplate::Beat,
NodeTemplate::Mod,
// Analysis
NodeTemplate::Oscilloscope,

View File

@ -1963,6 +1963,8 @@ impl NodeGraphPane {
"BPMDetector" => Some(NodeTemplate::BpmDetector),
"Mod" => Some(NodeTemplate::Mod),
"Oscilloscope" => Some(NodeTemplate::Oscilloscope),
"Arpeggiator" => Some(NodeTemplate::Arpeggiator),
"Beat" => Some(NodeTemplate::Beat),
"VoiceAllocator" => Some(NodeTemplate::VoiceAllocator),
"Group" => Some(NodeTemplate::Group),
"TemplateInput" => Some(NodeTemplate::TemplateInput),

View File

@ -73,6 +73,12 @@ impl PresetBrowserPane {
self.scan_directory(&factory_dir, &factory_dir, true);
}
// User presets
let user_dir = user_presets_dir();
if user_dir.is_dir() {
self.scan_directory(&user_dir, &user_dir, false);
}
// Sort presets alphabetically by name within each category
self.presets.sort_by(|a, b| {
a.category.cmp(&b.category).then(a.name.cmp(&b.name))

View File

@ -86,8 +86,11 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let mip_frame = frame_f / reduction;
// Convert 1D mip-space index to 2D UV coordinates
let mip_tex_width = params.tex_width / pow(2.0, f32(mip_floor));
let mip_tex_height = ceil(params.total_frames / reduction / mip_tex_width);
// Use actual texture dimensions (not computed from total_frames) because the
// texture may be pre-allocated larger for live recording.
let mip_dims = textureDimensions(peak_tex, mip_floor);
let mip_tex_width = f32(mip_dims.x);
let mip_tex_height = f32(mip_dims.y);
let texel_x = mip_frame % mip_tex_width;
let texel_y = floor(mip_frame / mip_tex_width);
let uv = vec2((texel_x + 0.5) / mip_tex_width, (texel_y + 0.5) / mip_tex_height);

View File

@ -925,7 +925,7 @@ impl TimelinePane {
active_layer_id: &Option<uuid::Uuid>,
selection: &lightningbeam_core::selection::Selection,
midi_event_cache: &std::collections::HashMap<u32, Vec<(f64, u8, u8, bool)>>,
raw_audio_cache: &std::collections::HashMap<usize, (Vec<f32>, u32, u32)>,
raw_audio_cache: &std::collections::HashMap<usize, (std::sync::Arc<Vec<f32>>, u32, u32)>,
waveform_gpu_dirty: &mut std::collections::HashSet<usize>,
target_format: wgpu::TextureFormat,
waveform_stereo: bool,
@ -1292,9 +1292,74 @@ impl TimelinePane {
}
}
}
// Recording in progress: no visualization yet
// Recording in progress: show live waveform
lightningbeam_core::clip::AudioClipType::Recording => {
// Could show a pulsing "Recording..." indicator here
let rec_pool_idx = usize::MAX;
if let Some((samples, sr, ch)) = raw_audio_cache.get(&rec_pool_idx) {
let total_frames = samples.len() / (*ch).max(1) as usize;
if total_frames > 0 {
let audio_file_duration = total_frames as f64 / *sr as f64;
let screen_size = ui.ctx().content_rect().size();
let pending_upload = if waveform_gpu_dirty.contains(&rec_pool_idx) {
waveform_gpu_dirty.remove(&rec_pool_idx);
Some(crate::waveform_gpu::PendingUpload {
samples: samples.clone(),
sample_rate: *sr,
channels: *ch,
})
} else {
None
};
let tint = [
bright_color.r() as f32 / 255.0,
bright_color.g() as f32 / 255.0,
bright_color.b() as f32 / 255.0,
bright_color.a() as f32 / 255.0,
];
let clip_screen_start = rect.min.x + ((instance_start - self.viewport_start_time) * self.pixels_per_second as f64) as f32;
let clip_screen_end = clip_screen_start + (preview_clip_duration * self.pixels_per_second as f64) as f32;
let waveform_rect = egui::Rect::from_min_max(
egui::pos2(clip_screen_start.max(clip_rect.min.x), clip_rect.min.y),
egui::pos2(clip_screen_end.min(clip_rect.max.x), clip_rect.max.y),
);
if waveform_rect.width() > 0.0 && waveform_rect.height() > 0.0 {
let instance_id = clip_instance.id.as_u128() as u64;
let callback = crate::waveform_gpu::WaveformCallback {
pool_index: rec_pool_idx,
segment_index: 0,
params: crate::waveform_gpu::WaveformParams {
clip_rect: [waveform_rect.min.x, waveform_rect.min.y, waveform_rect.max.x, waveform_rect.max.y],
viewport_start_time: self.viewport_start_time as f32,
pixels_per_second: self.pixels_per_second as f32,
audio_duration: audio_file_duration as f32,
sample_rate: *sr as f32,
clip_start_time: clip_screen_start,
trim_start: preview_trim_start as f32,
tex_width: crate::waveform_gpu::tex_width() as f32,
total_frames: total_frames as f32,
segment_start_frame: 0.0,
display_mode: if waveform_stereo { 1.0 } else { 0.0 },
_pad1: [0.0, 0.0],
tint_color: tint,
screen_size: [screen_size.x, screen_size.y],
_pad: [0.0, 0.0],
},
target_format,
pending_upload,
instance_id,
};
ui.painter().add(egui_wgpu::Callback::new_paint_callback(
waveform_rect,
callback,
));
}
}
}
}
}
}

View File

@ -104,7 +104,7 @@ pub struct WaveformCallback {
/// Raw audio data waiting to be uploaded to GPU
pub struct PendingUpload {
pub samples: Vec<f32>,
pub samples: std::sync::Arc<Vec<f32>>,
pub sample_rate: u32,
pub channels: u32,
}
@ -378,10 +378,21 @@ impl WaveformGpuResources {
let total_frames = new_total_frames;
// For live recording (pool_index == usize::MAX), pre-allocate extra texture
// height to avoid frequent full recreates as recording grows.
// Allocate 60 seconds ahead so incremental updates can fill without recreating.
let alloc_frames = if pool_index == usize::MAX {
let extra = sample_rate as usize * 60; // 60s of mono frames (texture is per-frame, not per-sample)
total_frames + extra
} else {
total_frames
};
let max_frames_per_segment = (TEX_WIDTH as u64)
* (device.limits().max_texture_dimension_2d as u64);
// Use alloc_frames for texture sizing but total_frames for data
let segment_count =
((total_frames as u64 + max_frames_per_segment - 1) / max_frames_per_segment) as usize;
((total_frames as u64 + max_frames_per_segment - 1) / max_frames_per_segment).max(1) as usize;
let frames_per_segment = if segment_count == 1 {
total_frames as u32
} else {
@ -400,7 +411,13 @@ impl WaveformGpuResources {
.min(total_frames as u64);
let seg_frame_count = (seg_end_frame - seg_start_frame) as u32;
let tex_height = (seg_frame_count + TEX_WIDTH - 1) / TEX_WIDTH;
// Allocate texture large enough for future growth (recording) or exact fit (normal)
let alloc_seg_frames = if pool_index == usize::MAX {
(alloc_frames as u32).min(seg_frame_count + sample_rate * 60)
} else {
seg_frame_count
};
let tex_height = (alloc_seg_frames + TEX_WIDTH - 1) / TEX_WIDTH;
let mip_count = compute_mip_count(TEX_WIDTH, tex_height);
// Create texture with mip levels
@ -422,8 +439,10 @@ impl WaveformGpuResources {
});
// Pack raw samples into Rgba16Float data for mip 0
let texel_count = (TEX_WIDTH * tex_height) as usize;
let mut mip0_data: Vec<half::f16> = vec![half::f16::ZERO; texel_count * 4];
// Only pack rows containing actual data (not the pre-allocated empty region)
let data_height = (seg_frame_count + TEX_WIDTH - 1) / TEX_WIDTH;
let data_texel_count = (TEX_WIDTH * data_height) as usize;
let mut mip0_data: Vec<half::f16> = vec![half::f16::ZERO; data_texel_count * 4];
for frame in 0..seg_frame_count as usize {
let global_frame = seg_start_frame as usize + frame;
@ -447,26 +466,28 @@ impl WaveformGpuResources {
mip0_data[texel_offset + 3] = half::f16::from_f32(right);
}
// Upload mip 0
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
bytemuck::cast_slice(&mip0_data),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(TEX_WIDTH * 8),
rows_per_image: Some(tex_height),
},
wgpu::Extent3d {
width: TEX_WIDTH,
height: tex_height,
depth_or_array_layers: 1,
},
);
// Upload mip 0 (only rows with actual data)
if data_height > 0 {
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
bytemuck::cast_slice(&mip0_data),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(TEX_WIDTH * 8),
rows_per_image: Some(data_height),
},
wgpu::Extent3d {
width: TEX_WIDTH,
height: data_height,
depth_or_array_layers: 1,
},
);
}
// Generate mipmaps via compute shader
let cmds = self.generate_mipmaps(
@ -528,7 +549,7 @@ impl WaveformGpuResources {
uniform_buffers,
frames_per_segment,
total_frames: total_frames as u64,
tex_height: (total_frames as u32 + TEX_WIDTH - 1) / TEX_WIDTH,
tex_height: (alloc_frames as u32 + TEX_WIDTH - 1) / TEX_WIDTH,
sample_rate,
channels,
},

View File

@ -48,10 +48,10 @@ raw-window-handle = "0.6"
image = "0.24"
[target.'cfg(target_os = "macos")'.dependencies]
ffmpeg-next = { version = "7.0", features = ["build"] }
ffmpeg-next = { version = "8.0", features = ["build"] }
[target.'cfg(not(target_os = "macos"))'.dependencies]
ffmpeg-next = "7.0"
ffmpeg-next = "8.0"
[profile.dev]

View File

@ -83,7 +83,7 @@ impl EventEmitter for TauriEventEmitter {
AudioEvent::PlaybackPosition(time) => {
SerializedAudioEvent::PlaybackPosition { time }
}
AudioEvent::RecordingStarted(track_id, clip_id) => {
AudioEvent::RecordingStarted(track_id, clip_id, _, _) => {
SerializedAudioEvent::RecordingStarted { track_id, clip_id }
}
AudioEvent::RecordingProgress(clip_id, duration) => {