add BPM detection

This commit is contained in:
Skyler Lehmkuhl 2025-11-02 09:53:34 -05:00
parent 66c4746767
commit 9702a501bd
11 changed files with 684 additions and 15 deletions

View File

@ -0,0 +1,310 @@
/// BPM Detection using autocorrelation and onset detection
///
/// This module provides both offline analysis (for audio import)
/// and real-time streaming analysis (for the BPM detector node)
use std::collections::VecDeque;
/// Detects BPM from a complete audio buffer (offline analysis)
pub fn detect_bpm_offline(audio: &[f32], sample_rate: u32) -> Option<f32> {
if audio.is_empty() {
return None;
}
// Convert to mono if needed (already mono in our case)
// Downsample for efficiency (analyze every 4th sample for faster processing)
let downsampled: Vec<f32> = audio.iter().step_by(4).copied().collect();
let effective_sample_rate = sample_rate / 4;
// Detect onsets using energy-based method
let onsets = detect_onsets(&downsampled, effective_sample_rate);
if onsets.len() < 4 {
return None;
}
// Calculate onset strength function for autocorrelation
let onset_envelope = calculate_onset_envelope(&onsets, downsampled.len(), effective_sample_rate);
// Further downsample onset envelope for BPM analysis
// For 60-200 BPM (1-3.33 Hz), we only need ~10 Hz sample rate by Nyquist
// Use 100 Hz for good margin (100 samples per second)
let tempo_sample_rate = 100.0;
let downsample_factor = (effective_sample_rate as f32 / tempo_sample_rate) as usize;
let downsampled_envelope: Vec<f32> = onset_envelope
.iter()
.step_by(downsample_factor.max(1))
.copied()
.collect();
// Use autocorrelation to find the fundamental period
let bpm = detect_bpm_autocorrelation(&downsampled_envelope, tempo_sample_rate as u32);
bpm
}
/// Calculate an onset envelope from detected onsets
fn calculate_onset_envelope(onsets: &[usize], total_length: usize, sample_rate: u32) -> Vec<f32> {
// Create a sparse representation of onsets with exponential decay
let mut envelope = vec![0.0; total_length];
let decay_samples = (sample_rate as f32 * 0.05) as usize; // 50ms decay
for &onset in onsets {
if onset < total_length {
envelope[onset] = 1.0;
// Add exponential decay after onset
for i in 1..decay_samples.min(total_length - onset) {
let decay_value = (-3.0 * i as f32 / decay_samples as f32).exp();
envelope[onset + i] = f32::max(envelope[onset + i], decay_value);
}
}
}
envelope
}
/// Detect BPM using autocorrelation on onset envelope
fn detect_bpm_autocorrelation(onset_envelope: &[f32], sample_rate: u32) -> Option<f32> {
// BPM range: 60-200 BPM
let min_bpm = 60.0;
let max_bpm = 200.0;
let min_lag = (60.0 * sample_rate as f32 / max_bpm) as usize;
let max_lag = (60.0 * sample_rate as f32 / min_bpm) as usize;
if max_lag >= onset_envelope.len() / 2 {
return None;
}
// Calculate autocorrelation for tempo range
let mut best_lag = min_lag;
let mut best_correlation = 0.0;
for lag in min_lag..=max_lag {
let mut correlation = 0.0;
let mut count = 0;
for i in 0..(onset_envelope.len() - lag) {
correlation += onset_envelope[i] * onset_envelope[i + lag];
count += 1;
}
if count > 0 {
correlation /= count as f32;
// Bias toward faster tempos slightly (common in EDM)
let bias = 1.0 + (lag as f32 - min_lag as f32) / (max_lag - min_lag) as f32 * 0.1;
correlation /= bias;
if correlation > best_correlation {
best_correlation = correlation;
best_lag = lag;
}
}
}
// Convert best lag to BPM
let bpm = 60.0 * sample_rate as f32 / best_lag as f32;
// Check for octave errors by testing multiples
// Common ranges: 60-90 (slow), 90-140 (medium), 140-200 (fast)
let half_bpm = bpm / 2.0;
let double_bpm = bpm * 2.0;
let quad_bpm = bpm * 4.0;
// Choose the octave that falls in the most common range (100-180 BPM for EDM/pop)
let final_bpm = if quad_bpm >= 100.0 && quad_bpm <= 200.0 {
// Very slow detection, multiply by 4
quad_bpm
} else if double_bpm >= 100.0 && double_bpm <= 200.0 {
// Slow detection, multiply by 2
double_bpm
} else if bpm >= 100.0 && bpm <= 200.0 {
// Already in good range
bpm
} else if half_bpm >= 100.0 && half_bpm <= 200.0 {
// Too fast detection, divide by 2
half_bpm
} else {
// Outside ideal range, use as-is
bpm
};
// Round to nearest 0.5 BPM for cleaner values
Some((final_bpm * 2.0).round() / 2.0)
}
/// Detect onsets (beat events) in audio using energy-based method
fn detect_onsets(audio: &[f32], sample_rate: u32) -> Vec<usize> {
let mut onsets = Vec::new();
// Window size for energy calculation (~20ms)
let window_size = ((sample_rate as f32 * 0.02) as usize).max(1);
let hop_size = window_size / 2;
if audio.len() < window_size {
return onsets;
}
// Calculate energy for each window
let mut energies = Vec::new();
let mut pos = 0;
while pos + window_size <= audio.len() {
let window = &audio[pos..pos + window_size];
let energy: f32 = window.iter().map(|&s| s * s).sum();
energies.push(energy / window_size as f32); // Normalize
pos += hop_size;
}
if energies.len() < 3 {
return onsets;
}
// Calculate energy differences (onset strength)
let mut onset_strengths = Vec::new();
for i in 1..energies.len() {
let diff = (energies[i] - energies[i - 1]).max(0.0); // Only positive changes
onset_strengths.push(diff);
}
// Find threshold (adaptive)
let mean_strength: f32 = onset_strengths.iter().sum::<f32>() / onset_strengths.len() as f32;
let threshold = mean_strength * 1.5; // 1.5x mean
// Peak picking with minimum distance
let min_distance = sample_rate as usize / 10; // Minimum 100ms between onsets
let mut last_onset = 0;
for (i, &strength) in onset_strengths.iter().enumerate() {
if strength > threshold {
let sample_pos = (i + 1) * hop_size;
// Check if it's a local maximum and far enough from last onset
let is_local_max = (i == 0 || onset_strengths[i - 1] <= strength) &&
(i == onset_strengths.len() - 1 || onset_strengths[i + 1] < strength);
if is_local_max && (onsets.is_empty() || sample_pos - last_onset >= min_distance) {
onsets.push(sample_pos);
last_onset = sample_pos;
}
}
}
onsets
}
/// Real-time BPM detector for streaming audio
pub struct BpmDetectorRealtime {
sample_rate: u32,
// Circular buffer for recent audio (e.g., 10 seconds)
audio_buffer: VecDeque<f32>,
max_buffer_samples: usize,
// Current BPM estimate
current_bpm: f32,
// Update interval (samples)
samples_since_update: usize,
update_interval: usize,
// Smoothing
bpm_history: VecDeque<f32>,
history_size: usize,
}
impl BpmDetectorRealtime {
pub fn new(sample_rate: u32, buffer_duration_seconds: f32) -> Self {
let max_buffer_samples = (sample_rate as f32 * buffer_duration_seconds) as usize;
let update_interval = sample_rate as usize; // Update every 1 second
Self {
sample_rate,
audio_buffer: VecDeque::with_capacity(max_buffer_samples),
max_buffer_samples,
current_bpm: 120.0, // Default BPM
samples_since_update: 0,
update_interval,
bpm_history: VecDeque::with_capacity(8),
history_size: 8,
}
}
/// Process a chunk of audio and return current BPM estimate
pub fn process(&mut self, audio: &[f32]) -> f32 {
// Add samples to buffer
for &sample in audio {
if self.audio_buffer.len() >= self.max_buffer_samples {
self.audio_buffer.pop_front();
}
self.audio_buffer.push_back(sample);
}
self.samples_since_update += audio.len();
// Periodically re-analyze
if self.samples_since_update >= self.update_interval && self.audio_buffer.len() > self.sample_rate as usize {
self.samples_since_update = 0;
// Convert buffer to slice for analysis
let buffer_vec: Vec<f32> = self.audio_buffer.iter().copied().collect();
if let Some(detected_bpm) = detect_bpm_offline(&buffer_vec, self.sample_rate) {
// Add to history for smoothing
if self.bpm_history.len() >= self.history_size {
self.bpm_history.pop_front();
}
self.bpm_history.push_back(detected_bpm);
// Use median of recent detections for stability
let mut sorted_history: Vec<f32> = self.bpm_history.iter().copied().collect();
sorted_history.sort_by(|a, b| a.partial_cmp(b).unwrap());
self.current_bpm = sorted_history[sorted_history.len() / 2];
}
}
self.current_bpm
}
pub fn get_bpm(&self) -> f32 {
self.current_bpm
}
pub fn reset(&mut self) {
self.audio_buffer.clear();
self.bpm_history.clear();
self.samples_since_update = 0;
self.current_bpm = 120.0;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_120_bpm_detection() {
let sample_rate = 48000;
let bpm = 120.0;
let beat_interval = 60.0 / bpm;
let beat_samples = (sample_rate as f32 * beat_interval) as usize;
// Generate 8 beats
let mut audio = vec![0.0; beat_samples * 8];
for beat in 0..8 {
let pos = beat * beat_samples;
// Add a sharp transient at each beat
for i in 0..100 {
audio[pos + i] = (1.0 - i as f32 / 100.0) * 0.8;
}
}
let detected = detect_bpm_offline(&audio, sample_rate);
assert!(detected.is_some());
let detected_bpm = detected.unwrap();
// Allow 5% tolerance
assert!((detected_bpm - bpm).abs() / bpm < 0.05,
"Expected ~{} BPM, got {}", bpm, detected_bpm);
}
}

View File

@ -68,7 +68,7 @@ impl Engine {
let buffer_size = 512 * channels as usize; let buffer_size = 512 * channels as usize;
Self { Self {
project: Project::new(), project: Project::new(sample_rate),
audio_pool: AudioPool::new(), audio_pool: AudioPool::new(),
buffer_pool: BufferPool::new(8, buffer_size), // 8 buffers should handle deep nesting buffer_pool: BufferPool::new(8, buffer_size), // 8 buffers should handle deep nesting
playhead: 0, playhead: 0,
@ -637,7 +637,7 @@ impl Engine {
self.recording_state = None; self.recording_state = None;
// Clear all project data // Clear all project data
self.project = Project::new(); self.project = Project::new(self.sample_rate);
// Clear audio pool // Clear audio pool
self.audio_pool = AudioPool::new(); self.audio_pool = AudioPool::new();
@ -726,6 +726,7 @@ impl Engine {
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())), "Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())), "Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
"Constant" => Box::new(ConstantNode::new("Constant".to_string())), "Constant" => Box::new(ConstantNode::new("Constant".to_string())),
"BpmDetector" => Box::new(BpmDetectorNode::new("BPM Detector".to_string())),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())), "EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())),
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())), "Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
"Math" => Box::new(MathNode::new("Math".to_string())), "Math" => Box::new(MathNode::new("Math".to_string())),
@ -810,6 +811,7 @@ impl Engine {
"Chorus" => Box::new(ChorusNode::new("Chorus".to_string())), "Chorus" => Box::new(ChorusNode::new("Chorus".to_string())),
"Compressor" => Box::new(CompressorNode::new("Compressor".to_string())), "Compressor" => Box::new(CompressorNode::new("Compressor".to_string())),
"Constant" => Box::new(ConstantNode::new("Constant".to_string())), "Constant" => Box::new(ConstantNode::new("Constant".to_string())),
"BpmDetector" => Box::new(BpmDetectorNode::new("BPM Detector".to_string())),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())), "EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower".to_string())),
"Limiter" => Box::new(LimiterNode::new("Limiter".to_string())), "Limiter" => Box::new(LimiterNode::new("Limiter".to_string())),
"Math" => Box::new(MathNode::new("Math".to_string())), "Math" => Box::new(MathNode::new("Math".to_string())),
@ -1668,6 +1670,7 @@ pub struct EngineController {
playhead: Arc<AtomicU64>, playhead: Arc<AtomicU64>,
next_midi_clip_id: Arc<AtomicU32>, next_midi_clip_id: Arc<AtomicU32>,
sample_rate: u32, sample_rate: u32,
#[allow(dead_code)] // Used in public getter method
channels: u32, channels: u32,
} }

View File

@ -1,4 +1,5 @@
pub mod automation; pub mod automation;
pub mod bpm_detector;
pub mod buffer_pool; pub mod buffer_pool;
pub mod clip; pub mod clip;
pub mod engine; pub mod engine;

View File

@ -0,0 +1,165 @@
use crate::audio::bpm_detector::BpmDetectorRealtime;
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_SMOOTHING: u32 = 0;
/// BPM Detector Node - analyzes audio input and outputs tempo as CV
/// CV output represents BPM (e.g., 0.12 = 120 BPM when scaled appropriately)
pub struct BpmDetectorNode {
name: String,
detector: BpmDetectorRealtime,
smoothing: f32, // Smoothing factor for output (0-1)
last_output: f32, // For smooth transitions
sample_rate: u32, // Current sample rate
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl BpmDetectorNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("BPM CV", SignalType::CV, 0),
];
let parameters = vec![
Parameter::new(PARAM_SMOOTHING, "Smoothing", 0.0, 1.0, 0.9, ParameterUnit::Percent),
];
// Use 10 second buffer for analysis
let detector = BpmDetectorRealtime::new(48000, 10.0);
Self {
name,
detector,
smoothing: 0.9,
last_output: 120.0,
sample_rate: 48000,
inputs,
outputs,
parameters,
}
}
}
impl AudioNode for BpmDetectorNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_SMOOTHING => self.smoothing = value.clamp(0.0, 1.0),
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_SMOOTHING => self.smoothing,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
// Recreate detector if sample rate changed
if sample_rate != self.sample_rate {
self.sample_rate = sample_rate;
self.detector = BpmDetectorRealtime::new(sample_rate, 10.0);
}
if outputs.is_empty() {
return;
}
let output = &mut outputs[0];
let length = output.len();
let input = if !inputs.is_empty() && !inputs[0].is_empty() {
inputs[0]
} else {
// Fill output with last known BPM
for i in 0..length {
output[i] = self.last_output / 1000.0; // Scale BPM for CV (e.g., 120 BPM -> 0.12)
}
return;
};
// Process audio through detector
let detected_bpm = self.detector.process(input);
// Apply smoothing
let target_bpm = detected_bpm;
let smoothed_bpm = self.last_output * self.smoothing + target_bpm * (1.0 - self.smoothing);
self.last_output = smoothed_bpm;
// Output BPM as CV (scaled down for typical CV range)
// BPM / 1000 gives us reasonable CV values (60-180 BPM -> 0.06-0.18)
let cv_value = smoothed_bpm / 1000.0;
// Fill entire output buffer with current BPM value
for i in 0..length {
output[i] = cv_value;
}
}
fn reset(&mut self) {
self.detector.reset();
self.last_output = 120.0;
}
fn node_type(&self) -> &str {
"BpmDetector"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
detector: BpmDetectorRealtime::new(self.sample_rate, 10.0),
smoothing: self.smoothing,
last_output: self.last_output,
sample_rate: self.sample_rate,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -3,6 +3,7 @@ mod audio_input;
mod audio_to_cv; mod audio_to_cv;
mod automation_input; mod automation_input;
mod bit_crusher; mod bit_crusher;
mod bpm_detector;
mod chorus; mod chorus;
mod compressor; mod compressor;
mod constant; mod constant;
@ -44,6 +45,7 @@ pub use audio_input::AudioInputNode;
pub use audio_to_cv::AudioToCVNode; pub use audio_to_cv::AudioToCVNode;
pub use automation_input::{AutomationInputNode, AutomationKeyframe, InterpolationType}; pub use automation_input::{AutomationInputNode, AutomationKeyframe, InterpolationType};
pub use bit_crusher::BitCrusherNode; pub use bit_crusher::BitCrusherNode;
pub use bpm_detector::BpmDetectorNode;
pub use chorus::ChorusNode; pub use chorus::ChorusNode;
pub use compressor::CompressorNode; pub use compressor::CompressorNode;
pub use constant::ConstantNode; pub use constant::ConstantNode;

View File

@ -13,15 +13,17 @@ pub struct Project {
tracks: HashMap<TrackId, TrackNode>, tracks: HashMap<TrackId, TrackNode>,
next_track_id: TrackId, next_track_id: TrackId,
root_tracks: Vec<TrackId>, // Top-level tracks (not in any group) root_tracks: Vec<TrackId>, // Top-level tracks (not in any group)
sample_rate: u32, // System sample rate
} }
impl Project { impl Project {
/// Create a new empty project /// Create a new empty project
pub fn new() -> Self { pub fn new(sample_rate: u32) -> Self {
Self { Self {
tracks: HashMap::new(), tracks: HashMap::new(),
next_track_id: 0, next_track_id: 0,
root_tracks: Vec::new(), root_tracks: Vec::new(),
sample_rate,
} }
} }
@ -42,7 +44,7 @@ impl Project {
/// The new track's ID /// The new track's ID
pub fn add_audio_track(&mut self, name: String, parent_id: Option<TrackId>) -> TrackId { pub fn add_audio_track(&mut self, name: String, parent_id: Option<TrackId>) -> TrackId {
let id = self.next_id(); let id = self.next_id();
let track = AudioTrack::new(id, name); let track = AudioTrack::new(id, name, self.sample_rate);
self.tracks.insert(id, TrackNode::Audio(track)); self.tracks.insert(id, TrackNode::Audio(track));
if let Some(parent) = parent_id { if let Some(parent) = parent_id {
@ -94,7 +96,7 @@ impl Project {
/// The new track's ID /// The new track's ID
pub fn add_midi_track(&mut self, name: String, parent_id: Option<TrackId>) -> TrackId { pub fn add_midi_track(&mut self, name: String, parent_id: Option<TrackId>) -> TrackId {
let id = self.next_id(); let id = self.next_id();
let track = MidiTrack::new(id, name); let track = MidiTrack::new(id, name, self.sample_rate);
self.tracks.insert(id, TrackNode::Midi(track)); self.tracks.insert(id, TrackNode::Midi(track));
if let Some(parent) = parent_id { if let Some(parent) = parent_id {
@ -422,6 +424,6 @@ impl Project {
impl Default for Project { impl Default for Project {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new(48000) // Use 48kHz as default, will be overridden when created with actual sample rate
} }
} }

View File

@ -303,16 +303,15 @@ pub struct MidiTrack {
impl MidiTrack { impl MidiTrack {
/// Create a new MIDI track with default settings /// Create a new MIDI track with default settings
pub fn new(id: TrackId, name: String) -> Self { pub fn new(id: TrackId, name: String, sample_rate: u32) -> Self {
// Use default sample rate and a large buffer size that can accommodate any callback // Use a large buffer size that can accommodate any callback
let default_sample_rate = 48000;
let default_buffer_size = 8192; let default_buffer_size = 8192;
Self { Self {
id, id,
name, name,
clips: Vec::new(), clips: Vec::new(),
instrument_graph: AudioGraph::new(default_sample_rate, default_buffer_size), instrument_graph: AudioGraph::new(sample_rate, default_buffer_size),
volume: 1.0, volume: 1.0,
muted: false, muted: false,
solo: false, solo: false,
@ -498,21 +497,24 @@ pub struct AudioTrack {
impl AudioTrack { impl AudioTrack {
/// Create a new audio track with default settings /// Create a new audio track with default settings
pub fn new(id: TrackId, name: String) -> Self { pub fn new(id: TrackId, name: String, sample_rate: u32) -> Self {
// Use default sample rate and a large buffer size that can accommodate any callback // Use a large buffer size that can accommodate any callback
let default_sample_rate = 48000;
let default_buffer_size = 8192; let default_buffer_size = 8192;
// Create the effects graph with default AudioInput -> AudioOutput chain // Create the effects graph with default AudioInput -> AudioOutput chain
let mut effects_graph = AudioGraph::new(default_sample_rate, default_buffer_size); let mut effects_graph = AudioGraph::new(sample_rate, default_buffer_size);
// Add AudioInput node // Add AudioInput node
let input_node = Box::new(AudioInputNode::new("Audio Input")); let input_node = Box::new(AudioInputNode::new("Audio Input"));
let input_id = effects_graph.add_node(input_node); let input_id = effects_graph.add_node(input_node);
// Set position for AudioInput (left side, similar to instrument preset spacing)
effects_graph.set_node_position(input_id, 100.0, 150.0);
// Add AudioOutput node // Add AudioOutput node
let output_node = Box::new(AudioOutputNode::new("Audio Output")); let output_node = Box::new(AudioOutputNode::new("Audio Output"));
let output_id = effects_graph.add_node(output_node); let output_id = effects_graph.add_node(output_node);
// Set position for AudioOutput (right side, spaced apart)
effects_graph.set_node_position(output_id, 500.0, 150.0);
// Connect AudioInput -> AudioOutput // Connect AudioInput -> AudioOutput
let _ = effects_graph.connect(input_id, 0, output_id, 0); let _ = effects_graph.connect(input_id, 0, output_id, 0);

View File

@ -10,6 +10,7 @@ pub struct AudioFileMetadata {
pub sample_rate: u32, pub sample_rate: u32,
pub channels: u32, pub channels: u32,
pub waveform: Vec<WaveformPeak>, pub waveform: Vec<WaveformPeak>,
pub detected_bpm: Option<f32>, // Detected BPM from audio analysis
} }
#[derive(serde::Serialize)] #[derive(serde::Serialize)]
@ -272,6 +273,18 @@ pub async fn audio_load_file(
let sample_rate = audio_file.sample_rate; let sample_rate = audio_file.sample_rate;
let channels = audio_file.channels; let channels = audio_file.channels;
// Detect BPM from audio (mix to mono if stereo)
let mono_audio: Vec<f32> = if channels == 2 {
// Mix stereo to mono
audio_file.data.chunks(2)
.map(|chunk| (chunk[0] + chunk.get(1).unwrap_or(&0.0)) * 0.5)
.collect()
} else {
audio_file.data.clone()
};
let detected_bpm = daw_backend::audio::bpm_detector::detect_bpm_offline(&mono_audio, sample_rate);
// Get a lock on the audio state and send the loaded data to the audio thread // Get a lock on the audio state and send the loaded data to the audio thread
let mut audio_state = state.lock().unwrap(); let mut audio_state = state.lock().unwrap();
@ -293,6 +306,7 @@ pub async fn audio_load_file(
sample_rate, sample_rate,
channels, channels,
waveform, waveform,
detected_bpm,
}) })
} else { } else {
Err("Audio not initialized".to_string()) Err("Audio not initialized".to_string())

View File

@ -536,6 +536,37 @@ export const actions = {
if (context.timelineWidget) { if (context.timelineWidget) {
context.timelineWidget.requestRedraw(); context.timelineWidget.requestRedraw();
} }
// Make this the active track
if (context.activeObject) {
context.activeObject.activeLayer = newAudioTrack;
updateLayers(); // Refresh to show active state
// Reload node editor to show the new track's graph
if (context.reloadNodeEditor) {
await context.reloadNodeEditor();
}
}
// Prompt user to set BPM if detected
if (metadata.detected_bpm && context.timelineWidget) {
const currentBpm = context.timelineWidget.timelineState.bpm;
const detectedBpm = metadata.detected_bpm;
const shouldSetBpm = confirm(
`Detected BPM: ${detectedBpm}\n\n` +
`Current project BPM: ${currentBpm}\n\n` +
`Would you like to set the project BPM to ${detectedBpm}?`
);
if (shouldSetBpm) {
context.timelineWidget.timelineState.bpm = detectedBpm;
context.timelineWidget.requestRedraw(); // Redraw to show updated BPM
console.log(`Project BPM set to ${detectedBpm}`);
// Notify all registered listeners of BPM change
if (context.notifyBpmChange) {
context.notifyBpmChange(detectedBpm);
}
}
}
} catch (error) { } catch (error) {
console.error('Failed to load audio:', error); console.error('Failed to load audio:', error);
// Update clip to show error // Update clip to show error

View File

@ -4293,6 +4293,10 @@ function timelineV2() {
if (timelineWidget.requestRedraw) { if (timelineWidget.requestRedraw) {
timelineWidget.requestRedraw(); timelineWidget.requestRedraw();
} }
// Notify all registered listeners of BPM change
if (context.notifyBpmChange) {
context.notifyBpmChange(bpm);
}
} }
} else if (action === 'edit-time-signature') { } else if (action === 'edit-time-signature') {
// Clicked on time signature - show custom dropdown with common options // Clicked on time signature - show custom dropdown with common options
@ -6681,6 +6685,84 @@ function nodeEditor() {
set suppressActionRecording(value) { suppressActionRecording = value; } set suppressActionRecording(value) { suppressActionRecording = value; }
}; };
// Initialize BPM change notification system
// This allows nodes to register callbacks to be notified when BPM changes
const bpmChangeListeners = new Set();
context.registerBpmChangeListener = (callback) => {
bpmChangeListeners.add(callback);
return () => bpmChangeListeners.delete(callback); // Return unregister function
};
context.notifyBpmChange = (newBpm) => {
console.log(`BPM changed to ${newBpm}, notifying ${bpmChangeListeners.size} listeners`);
bpmChangeListeners.forEach(callback => {
try {
callback(newBpm);
} catch (error) {
console.error('Error in BPM change listener:', error);
}
});
};
// Register a listener to update all synced Phaser nodes when BPM changes
context.registerBpmChangeListener((newBpm) => {
if (!editor) return;
const module = editor.module;
const allNodes = editor.drawflow.drawflow[module]?.data || {};
// Beat division definitions for conversion
const beatDivisions = [
{ label: '4 bars', multiplier: 16.0 },
{ label: '2 bars', multiplier: 8.0 },
{ label: '1 bar', multiplier: 4.0 },
{ label: '1/2', multiplier: 2.0 },
{ label: '1/4', multiplier: 1.0 },
{ label: '1/8', multiplier: 0.5 },
{ label: '1/16', multiplier: 0.25 },
{ label: '1/32', multiplier: 0.125 },
{ label: '1/2T', multiplier: 2.0/3.0 },
{ label: '1/4T', multiplier: 1.0/3.0 },
{ label: '1/8T', multiplier: 0.5/3.0 }
];
// Iterate through all nodes to find synced Phaser nodes
for (const [nodeId, nodeData] of Object.entries(allNodes)) {
// Check if this is a Phaser node with sync enabled
if (nodeData.name === 'Phaser' && nodeData.data.backendId !== null) {
const nodeElement = document.getElementById(`node-${nodeId}`);
if (!nodeElement) continue;
const syncCheckbox = nodeElement.querySelector(`#sync-${nodeId}`);
if (!syncCheckbox || !syncCheckbox.checked) continue;
// Get the current rate slider value (beat division index)
const rateSlider = nodeElement.querySelector(`input[data-param="0"]`); // rate is param 0
if (!rateSlider) continue;
const beatDivisionIndex = Math.min(10, Math.max(0, Math.round(parseFloat(rateSlider.value))));
const beatsPerSecond = newBpm / 60.0;
const quarterNotesPerCycle = beatDivisions[beatDivisionIndex].multiplier;
const hz = beatsPerSecond / quarterNotesPerCycle;
// Update the backend parameter
const trackInfo = getCurrentTrack();
if (trackInfo !== null) {
invoke("graph_set_parameter", {
trackId: trackInfo.trackId,
nodeId: nodeData.data.backendId,
paramId: 0, // rate parameter
value: hz
}).catch(err => {
console.error("Failed to update Phaser rate after BPM change:", err);
});
console.log(`Updated Phaser node ${nodeId} rate to ${hz} Hz for BPM ${newBpm}`);
}
}
}
});
// Initialize minimap // Initialize minimap
const minimapCanvas = container.querySelector("#minimap-canvas"); const minimapCanvas = container.querySelector("#minimap-canvas");
const minimapViewport = container.querySelector(".minimap-viewport"); const minimapViewport = container.querySelector(".minimap-viewport");
@ -7679,11 +7761,41 @@ function nodeEditor() {
if (nodeData.data.backendId !== null) { if (nodeData.data.backendId !== null) {
const trackInfo = getCurrentTrack(); const trackInfo = getCurrentTrack();
if (trackInfo !== null) { if (trackInfo !== null) {
// Convert beat divisions to Hz for Phaser rate in sync mode
let backendValue = value;
if (nodeDef && nodeDef.parameters[paramId]) {
const param = nodeDef.parameters[paramId];
if (param.name === 'rate' && nodeData.name === 'Phaser') {
const syncCheckbox = nodeElement.querySelector(`#sync-${nodeId}`);
if (syncCheckbox && syncCheckbox.checked && context.timelineWidget) {
const beatDivisions = [
{ label: '4 bars', multiplier: 16.0 },
{ label: '2 bars', multiplier: 8.0 },
{ label: '1 bar', multiplier: 4.0 },
{ label: '1/2', multiplier: 2.0 },
{ label: '1/4', multiplier: 1.0 },
{ label: '1/8', multiplier: 0.5 },
{ label: '1/16', multiplier: 0.25 },
{ label: '1/32', multiplier: 0.125 },
{ label: '1/2T', multiplier: 2.0/3.0 },
{ label: '1/4T', multiplier: 1.0/3.0 },
{ label: '1/8T', multiplier: 0.5/3.0 }
];
const idx = Math.min(10, Math.max(0, Math.round(value)));
const bpm = context.timelineWidget.timelineState.bpm;
const beatsPerSecond = bpm / 60.0;
const quarterNotesPerCycle = beatDivisions[idx].multiplier;
// Hz = how many cycles per second
backendValue = beatsPerSecond / quarterNotesPerCycle;
}
}
}
invoke("graph_set_parameter", { invoke("graph_set_parameter", {
trackId: trackInfo.trackId, trackId: trackInfo.trackId,
nodeId: nodeData.data.backendId, nodeId: nodeData.data.backendId,
paramId: paramId, paramId: paramId,
value: value value: backendValue
}).catch(err => { }).catch(err => {
console.error("Failed to set parameter:", err); console.error("Failed to set parameter:", err);
}); });

View File

@ -1283,6 +1283,33 @@ export const nodeTypes = {
` `
}, },
BpmDetector: {
name: 'BPM Detector',
category: NodeCategory.UTILITY,
description: 'Detects tempo from audio and outputs BPM as CV',
inputs: [
{ name: 'Audio In', type: SignalType.AUDIO, index: 0 }
],
outputs: [
{ name: 'BPM CV', type: SignalType.CV, index: 0 }
],
parameters: [
{ id: 0, name: 'smoothing', label: 'Smoothing', min: 0.0, max: 1.0, default: 0.9, unit: '' }
],
getHTML: (nodeId) => `
<div class="node-content">
<div class="node-title">BPM Detector</div>
<div class="node-param">
<label>Smoothing: <span id="smoothing-${nodeId}">0.90</span></label>
<input type="range" data-node="${nodeId}" data-param="0" min="0.0" max="1.0" value="0.9" step="0.01">
</div>
<div class="node-info" style="font-size: 10px; color: #888; margin-top: 5px;">
Analyzes incoming audio and outputs detected BPM as CV signal
</div>
</div>
`
},
EnvelopeFollower: { EnvelopeFollower: {
name: 'Envelope Follower', name: 'Envelope Follower',
category: NodeCategory.UTILITY, category: NodeCategory.UTILITY,