Compare commits

...

7 Commits

Author SHA1 Message Date
Skyler Lehmkuhl c10f42da8f Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-16 19:12:49 -05:00
Skyler Lehmkuhl b2a6304771 make sample load menus consistent 2026-02-16 19:12:21 -05:00
Skyler Lehmkuhl 6bbf7d27df node connection improvement 2026-02-16 19:12:06 -05:00
Skyler Lehmkuhl 2c0d53fb84 Work on sampler nodes, fix slew limiter 2026-02-16 18:45:11 -05:00
Skyler Lehmkuhl 93a29192fd Split export dialog into simple/advanced 2026-02-16 08:15:13 -05:00
Skyler Lehmkuhl e03d12009f fix broken mp3/aac export 2026-02-16 07:53:23 -05:00
Skyler Lehmkuhl 6c88c4a8da clean up compiler warnings in egui_node_graph2 2026-02-16 07:52:29 -05:00
19 changed files with 1094 additions and 255 deletions

View File

@ -466,6 +466,66 @@ impl Engine {
}
}
/// Read audio from pool as mono f32 samples.
/// Handles all storage types: InMemory/Mapped use read_samples(),
/// Compressed falls back to decoding from the file path.
fn read_mono_from_pool(pool: &crate::audio::pool::AudioClipPool, pool_index: usize) -> Option<(Vec<f32>, f32)> {
let audio_file = pool.get_file(pool_index)?;
let channels = audio_file.channels as usize;
let frames = audio_file.frames as usize;
let sample_rate = audio_file.sample_rate as f32;
// Try read_samples first (works for InMemory and Mapped)
let mut mono_samples = vec![0.0f32; frames];
let read_count = if channels == 1 {
audio_file.read_samples(0, frames, 0, &mut mono_samples)
} else {
let mut channel_buf = vec![0.0f32; frames];
let mut count = 0;
for ch in 0..channels {
count = audio_file.read_samples(0, frames, ch, &mut channel_buf);
for (i, &s) in channel_buf.iter().enumerate() {
mono_samples[i] += s;
}
}
let scale = 1.0 / channels as f32;
for s in &mut mono_samples {
*s *= scale;
}
count
};
if read_count > 0 {
return Some((mono_samples, sample_rate));
}
// Compressed storage: decode from file path using sample_loader
let path = audio_file.path.to_string_lossy();
if !path.starts_with("<embedded") {
if let Ok(sample_data) = crate::audio::sample_loader::load_audio_file(&*path) {
return Some((sample_data.samples, sample_data.sample_rate as f32));
}
}
// Last resort: try interleaved data() and mix down
let data = audio_file.data();
if !data.is_empty() && channels > 0 {
let actual_frames = data.len() / channels;
let mut mono = vec![0.0f32; actual_frames];
for frame in 0..actual_frames {
let mut sum = 0.0f32;
for ch in 0..channels {
sum += data[frame * channels + ch];
}
mono[frame] = sum / channels as f32;
}
return Some((mono, sample_rate));
}
eprintln!("[read_mono_from_pool] Failed to read audio from pool_index={}", pool_index);
None
}
/// Handle a command from the UI thread
fn handle_command(&mut self, cmd: Command) {
match cmd {
@ -1586,6 +1646,38 @@ impl Engine {
}
}
Command::SamplerLoadFromPool(track_id, node_id, pool_index) => {
use crate::audio::node_graph::nodes::SimpleSamplerNode;
let sample_result = Self::read_mono_from_pool(&self.audio_pool, pool_index);
if let Some((mono_samples, sample_rate)) = sample_result {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
if let Some(sampler_node) = graph_node.node.as_any_mut().downcast_mut::<SimpleSamplerNode>() {
sampler_node.set_sample(mono_samples, sample_rate);
}
}
}
}
}
Command::SamplerSetRootNote(track_id, node_id, root_note) => {
use crate::audio::node_graph::nodes::SimpleSamplerNode;
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
if let Some(sampler_node) = graph_node.node.as_any_mut().downcast_mut::<SimpleSamplerNode>() {
sampler_node.set_root_note(root_note);
}
}
}
}
Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
@ -1604,6 +1696,29 @@ impl Engine {
}
}
Command::MultiSamplerAddLayerFromPool(track_id, node_id, pool_index, key_min, key_max, root_key) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
use crate::audio::node_graph::nodes::LoopMode;
let sample_result = Self::read_mono_from_pool(&self.audio_pool, pool_index);
if let Some((mono_samples, sample_rate)) = sample_result {
if let Some(TrackNode::Midi(track)) = self.project.get_track_mut(track_id) {
let graph = &mut track.instrument_graph;
let node_idx = NodeIndex::new(node_id as usize);
if let Some(graph_node) = graph.get_graph_node_mut(node_idx) {
if let Some(multi_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
multi_node.add_layer(
mono_samples, sample_rate,
key_min, key_max, root_key,
0, 127, None, None, LoopMode::OneShot,
);
}
}
}
}
}
Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode) => {
use crate::audio::node_graph::nodes::MultiSamplerNode;
@ -3099,11 +3214,26 @@ impl EngineController {
let _ = self.command_tx.push(Command::SamplerLoadSample(track_id, node_id, file_path));
}
/// Load a sample from the audio pool into a SimpleSampler node
pub fn sampler_load_from_pool(&mut self, track_id: TrackId, node_id: u32, pool_index: usize) {
let _ = self.command_tx.push(Command::SamplerLoadFromPool(track_id, node_id, pool_index));
}
/// Set the root note for a SimpleSampler node
pub fn sampler_set_root_note(&mut self, track_id: TrackId, node_id: u32, root_note: u8) {
let _ = self.command_tx.push(Command::SamplerSetRootNote(track_id, node_id, root_note));
}
/// Add a sample layer to a MultiSampler node
pub fn multi_sampler_add_layer(&mut self, track_id: TrackId, node_id: u32, file_path: String, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8, loop_start: Option<usize>, loop_end: Option<usize>, loop_mode: crate::audio::node_graph::nodes::LoopMode) {
let _ = self.command_tx.push(Command::MultiSamplerAddLayer(track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode));
}
/// Add a sample layer from the audio pool to a MultiSampler node
pub fn multi_sampler_add_layer_from_pool(&mut self, track_id: TrackId, node_id: u32, pool_index: usize, key_min: u8, key_max: u8, root_key: u8) {
let _ = self.command_tx.push(Command::MultiSamplerAddLayerFromPool(track_id, node_id, pool_index, key_min, key_max, root_key));
}
/// Update a MultiSampler layer's configuration
pub fn multi_sampler_update_layer(&mut self, track_id: TrackId, node_id: u32, layer_index: usize, key_min: u8, key_max: u8, root_key: u8, velocity_min: u8, velocity_max: u8, loop_start: Option<usize>, loop_end: Option<usize>, loop_mode: crate::audio::node_graph::nodes::LoopMode) {
let _ = self.command_tx.push(Command::MultiSamplerUpdateLayer(track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode));

View File

@ -4,6 +4,10 @@ use super::project::Project;
use crate::command::AudioEvent;
use std::path::Path;
/// Render chunk size for offline export. Matches the real-time playback buffer size
/// so that MIDI events are processed at the same granularity, avoiding timing jitter.
const EXPORT_CHUNK_FRAMES: usize = 256;
/// Supported export formats
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExportFormat {
@ -73,6 +77,21 @@ pub fn export_audio<P: AsRef<Path>>(
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<(), String>
{
// Validate duration
let duration = settings.end_time - settings.start_time;
if duration <= 0.0 {
return Err(format!(
"Export duration is zero or negative (start={:.3}s, end={:.3}s). \
Check that the timeline has content.",
settings.start_time, settings.end_time
));
}
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
if total_frames == 0 {
return Err("Export would produce zero audio frames".to_string());
}
// Reset all node graphs to clear stale effect buffers (echo, reverb, etc.)
project.reset_all_graphs();
@ -135,9 +154,7 @@ pub fn render_to_memory(
println!("Export: duration={:.3}s, total_frames={}, total_samples={}, channels={}",
duration, total_frames, total_samples, settings.channels);
// Render in chunks to avoid memory issues
const CHUNK_FRAMES: usize = 4096;
let chunk_samples = CHUNK_FRAMES * settings.channels as usize;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
// Create buffer for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
@ -147,7 +164,7 @@ pub fn render_to_memory(
let mut all_samples = Vec::with_capacity(total_samples);
let mut playhead = settings.start_time;
let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let mut frames_rendered = 0;
// Render the entire timeline in chunks
@ -345,9 +362,8 @@ fn export_mp3<P: AsRef<Path>>(
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
const CHUNK_FRAMES: usize = 4096;
let chunk_samples = CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
// Create buffers for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
@ -513,9 +529,8 @@ fn export_aac<P: AsRef<Path>>(
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
const CHUNK_FRAMES: usize = 4096;
let chunk_samples = CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
// Create buffers for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
@ -669,9 +684,13 @@ fn encode_complete_frame_mp3(
channel_layout: ffmpeg_next::channel_layout::ChannelLayout,
pts: i64,
) -> Result<(), String> {
if num_frames == 0 {
return Ok(());
}
let channels = planar_samples.len();
// Create audio frame with exact size
// Create audio frame
let mut frame = ffmpeg_next::frame::Audio::new(
ffmpeg_next::format::Sample::I16(ffmpeg_next::format::sample::Type::Planar),
num_frames,
@ -680,33 +699,23 @@ fn encode_complete_frame_mp3(
frame.set_rate(sample_rate);
frame.set_pts(Some(pts));
// Copy all planar samples to frame
for ch in 0..channels {
let plane = frame.data_mut(ch);
let src = &planar_samples[ch];
// Verify buffer size
let byte_size = num_frames * std::mem::size_of::<i16>();
if plane.len() < byte_size {
return Err(format!(
"FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 2;
plane[offset..offset + 2].copy_from_slice(&bytes);
}
// Verify frame was allocated (check linesize[0] via planes())
if frame.planes() == 0 {
return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string());
}
// Copy all planar samples to frame
// Use plane_mut::<i16> instead of data_mut — data_mut(ch) is buggy for planar audio:
// FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0.
// plane_mut uses self.samples() for the length, which is correct for all planes.
for ch in 0..channels {
let plane = frame.plane_mut::<i16>(ch);
plane.copy_from_slice(&planar_samples[ch]);
}
// Send frame to encoder
encoder.send_frame(&frame)
.map_err(|e| format!("Failed to send frame: {}", e))?;
// Receive and write packets
receive_and_write_packets(encoder, output)?;
Ok(())
@ -722,9 +731,13 @@ fn encode_complete_frame_aac(
channel_layout: ffmpeg_next::channel_layout::ChannelLayout,
pts: i64,
) -> Result<(), String> {
if num_frames == 0 {
return Ok(());
}
let channels = planar_samples.len();
// Create audio frame with exact size
// Create audio frame
let mut frame = ffmpeg_next::frame::Audio::new(
ffmpeg_next::format::Sample::F32(ffmpeg_next::format::sample::Type::Planar),
num_frames,
@ -733,33 +746,23 @@ fn encode_complete_frame_aac(
frame.set_rate(sample_rate);
frame.set_pts(Some(pts));
// Copy all planar samples to frame
for ch in 0..channels {
let plane = frame.data_mut(ch);
let src = &planar_samples[ch];
// Verify buffer size
let byte_size = num_frames * std::mem::size_of::<f32>();
if plane.len() < byte_size {
return Err(format!(
"FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 4;
plane[offset..offset + 4].copy_from_slice(&bytes);
}
// Verify frame was allocated
if frame.planes() == 0 {
return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string());
}
// Copy all planar samples to frame
// Use plane_mut::<f32> instead of data_mut — data_mut(ch) is buggy for planar audio:
// FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0.
// plane_mut uses self.samples() for the length, which is correct for all planes.
for ch in 0..channels {
let plane = frame.plane_mut::<f32>(ch);
plane.copy_from_slice(&planar_samples[ch]);
}
// Send frame to encoder
encoder.send_frame(&frame)
.map_err(|e| format!("Failed to send frame: {}", e))?;
// Receive and write packets
receive_and_write_packets(encoder, output)?;
Ok(())

View File

@ -101,8 +101,7 @@ impl OscilloscopeNode {
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
NodePort::new("V/oct", SignalType::CV, 1),
NodePort::new("CV In", SignalType::CV, 2),
NodePort::new("CV In", SignalType::CV, 1),
];
let outputs = vec![
@ -223,13 +222,24 @@ impl AudioNode for OscilloscopeNode {
let output = &mut outputs[0];
let len = input.len().min(output.len());
// Read V/oct input if available and update trigger period
// Read CV input if available (port 1) — used for both display and V/Oct triggering
if inputs.len() > 1 && !inputs[1].is_empty() {
self.voct_value = inputs[1][0]; // Use first sample of V/oct input
let frequency = Self::voct_to_frequency(self.voct_value);
// Calculate period in samples, clamped to reasonable range
let period_samples = (sample_rate as f32 / frequency).max(1.0);
self.trigger_period = period_samples as usize;
let cv_input = inputs[1];
let cv_len = len.min(cv_input.len());
// Check if connected (not NaN sentinel)
if cv_len > 0 && !cv_input[0].is_nan() {
// Update V/Oct trigger period from CV value
self.voct_value = cv_input[0];
let frequency = Self::voct_to_frequency(self.voct_value);
let period_samples = (sample_rate as f32 / frequency).max(1.0);
self.trigger_period = period_samples as usize;
// Capture CV samples to buffer
if let Ok(mut cv_buffer) = self.cv_buffer.lock() {
cv_buffer.write(&cv_input[..cv_len]);
}
}
}
// Update sample counter for V/oct triggering
@ -245,14 +255,6 @@ impl AudioNode for OscilloscopeNode {
buffer.write(&input[..len]);
}
// Capture CV samples if CV input is connected (input 2)
if inputs.len() > 2 && !inputs[2].is_empty() {
let cv_input = inputs[2];
if let Ok(mut cv_buffer) = self.cv_buffer.lock() {
cv_buffer.write(&cv_input[..len.min(cv_input.len())]);
}
}
// Update last sample for trigger detection (use left channel, frame 0)
if !input.is_empty() {
self.last_sample = input[0];

View File

@ -25,6 +25,7 @@ pub struct SimpleSamplerNode {
gain: f32,
loop_enabled: bool,
pitch_shift: f32, // Additional pitch shift in semitones
root_note: u8, // MIDI note for original pitch playback (default 69 = A4)
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
@ -61,6 +62,7 @@ impl SimpleSamplerNode {
gain: 1.0,
loop_enabled: false,
pitch_shift: 0.0,
root_note: 69, // A4 — V/Oct 0.0 from MIDI-to-CV
inputs,
outputs,
parameters,
@ -101,13 +103,25 @@ impl SimpleSamplerNode {
}
/// Convert V/oct CV to playback speed multiplier
/// 0V = 1.0 (original speed), +1V = 2.0 (one octave up), -1V = 0.5 (one octave down)
/// Accounts for root_note: when the incoming MIDI note matches root_note,
/// the sample plays at original speed. V/Oct 0.0 = A4 (MIDI 69) by convention.
fn voct_to_speed(&self, voct: f32) -> f32 {
// Add pitch shift parameter
let total_semitones = voct * 12.0 + self.pitch_shift;
// Offset so root_note plays at original speed
let root_offset = (self.root_note as f32 - 69.0) / 12.0;
let total_semitones = (voct - root_offset) * 12.0 + self.pitch_shift;
2.0_f32.powf(total_semitones / 12.0)
}
/// Set the root note (MIDI note number for original-pitch playback)
pub fn set_root_note(&mut self, note: u8) {
self.root_note = note.min(127);
}
/// Get the current root note
pub fn root_note(&self) -> u8 {
self.root_note
}
/// Read sample at playhead with linear interpolation
fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 {
if sample.is_empty() {

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_RISE_TIME: u32 = 0;
@ -90,9 +90,8 @@ impl AudioNode for SlewLimiterNode {
return;
}
let input = inputs[0];
let output = &mut outputs[0];
let length = input.len().min(output.len());
let length = output.len();
// Calculate maximum change per sample
let sample_duration = 1.0 / sample_rate as f32;
@ -111,7 +110,9 @@ impl AudioNode for SlewLimiterNode {
};
for i in 0..length {
let target = input[i];
// Use cv_input_or_default to handle unconnected inputs (NaN sentinel)
// Default to last_value so output holds steady when unconnected
let target = cv_input_or_default(inputs, 0, i, self.last_value);
let difference = target - self.last_value;
let max_change = if difference > 0.0 {

View File

@ -177,8 +177,14 @@ pub enum Command {
/// Load a sample into a SimpleSampler node (track_id, node_id, file_path)
SamplerLoadSample(TrackId, u32, String),
/// Load a sample from the audio pool into a SimpleSampler node (track_id, node_id, pool_index)
SamplerLoadFromPool(TrackId, u32, usize),
/// Set the root note (original pitch) for a SimpleSampler node (track_id, node_id, midi_note)
SamplerSetRootNote(TrackId, u32, u8),
/// Add a sample layer to a MultiSampler node (track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode)
MultiSamplerAddLayer(TrackId, u32, String, u8, u8, u8, u8, u8, Option<usize>, Option<usize>, LoopMode),
/// Add a sample layer from the audio pool to a MultiSampler node (track_id, node_id, pool_index, key_min, key_max, root_key)
MultiSamplerAddLayerFromPool(TrackId, u32, usize, u8, u8, u8),
/// Update a MultiSampler layer's configuration (track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode)
MultiSamplerUpdateLayer(TrackId, u32, usize, u8, u8, u8, u8, u8, Option<usize>, Option<usize>, LoopMode),
/// Remove a layer from a MultiSampler node (track_id, node_id, layer_index)

View File

@ -332,7 +332,9 @@ where
ports: &SlotMap<Key, Value>,
port_locations: &PortLocations,
cursor_pos: Pos2,
zoom: f32,
) -> Pos2 {
let snap_distance = DISTANCE_TO_CONNECT * zoom;
ports
.iter()
.find_map(|(port_id, _)| {
@ -352,7 +354,7 @@ where
.unwrap()
})
.filter(|nearest_hook| {
nearest_hook.distance(cursor_pos) < DISTANCE_TO_CONNECT
nearest_hook.distance(cursor_pos) < snap_distance
})
.copied()
})
@ -372,6 +374,7 @@ where
&self.graph.inputs,
&port_locations,
cursor_pos,
self.pan_zoom.zoom,
),
),
AnyParameterId::Input(_) => (
@ -381,6 +384,7 @@ where
&self.graph.outputs,
&port_locations,
cursor_pos,
self.pan_zoom.zoom,
),
start_pos,
),
@ -627,11 +631,11 @@ where
ui: &mut Ui,
user_state: &mut UserState,
) -> Vec<NodeResponse<UserResponse, NodeData>> {
let mut child_ui = ui.child_ui_with_id_source(
Rect::from_min_size(*self.position + self.pan, Self::MAX_NODE_SIZE.into()),
Layout::default(),
self.node_id,
None,
let mut child_ui = ui.new_child(
egui::UiBuilder::new()
.max_rect(Rect::from_min_size(*self.position + self.pan, Self::MAX_NODE_SIZE.into()))
.layout(Layout::default())
.id_salt(self.node_id),
);
Self::show_graph_node(self, pan_zoom, &mut child_ui, user_state)
@ -676,7 +680,11 @@ where
inner_rect.max.x = inner_rect.max.x.max(inner_rect.min.x);
inner_rect.max.y = inner_rect.max.y.max(inner_rect.min.y);
let mut child_ui = ui.child_ui(inner_rect, *ui.layout(), None);
let mut child_ui = ui.new_child(
egui::UiBuilder::new()
.max_rect(inner_rect)
.layout(*ui.layout()),
);
// Get interaction rect from memory, it may expand after the window response on resize.
let interaction_rect = ui
@ -904,9 +912,10 @@ where
let resp = ui.allocate_rect(port_rect, sense);
// Check if the mouse is within the port's interaction rect
// Check if the mouse is within snap distance of the port center
// Uses circular distance to match snap_to_ports() behavior
let close_enough = if let Some(pointer_pos) = ui.ctx().pointer_hover_pos() {
port_rect.contains(pointer_pos)
port_pos.distance(pointer_pos) < DISTANCE_TO_CONNECT * pan_zoom.zoom
} else {
false
};

View File

@ -241,14 +241,14 @@ impl<NodeData> Node<NodeData> {
pub fn inputs<'a, DataType, DataValue>(
&'a self,
graph: &'a Graph<NodeData, DataType, DataValue>,
) -> impl Iterator<Item = &InputParam<DataType, DataValue>> + 'a {
) -> impl Iterator<Item = &'a InputParam<DataType, DataValue>> + 'a {
self.input_ids().map(|id| graph.get_input(id))
}
pub fn outputs<'a, DataType, DataValue>(
&'a self,
graph: &'a Graph<NodeData, DataType, DataValue>,
) -> impl Iterator<Item = &OutputParam<DataType>> + 'a {
) -> impl Iterator<Item = &'a OutputParam<DataType>> + 'a {
self.output_ids().map(|id| graph.get_output(id))
}

View File

@ -84,7 +84,7 @@ pub trait DataTypeTrait<UserState>: PartialEq + Eq {
/// }
/// }
/// ```
fn name(&self) -> std::borrow::Cow<str>;
fn name(&self) -> std::borrow::Cow<'_, str>;
}
/// This trait must be implemented for the `NodeData` generic parameter of the
@ -251,7 +251,7 @@ pub trait NodeTemplateTrait: Clone {
/// The return type is Cow<str> to allow returning owned or borrowed values
/// more flexibly. Refer to the documentation for `DataTypeTrait::name` for
/// more information
fn node_finder_label(&self, user_state: &mut Self::UserState) -> std::borrow::Cow<str>;
fn node_finder_label(&self, user_state: &mut Self::UserState) -> std::borrow::Cow<'_, str>;
/// Vec of categories to which the node belongs.
///

View File

@ -124,6 +124,54 @@ impl Action for SetLayerPropertiesAction {
Ok(())
}
fn execute_backend(
&mut self,
backend: &mut crate::action::BackendContext,
_document: &crate::document::Document,
) -> Result<(), String> {
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()),
};
for &layer_id in &self.layer_ids {
if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) {
match &self.property {
LayerProperty::Volume(v) => controller.set_track_volume(track_id, *v as f32),
LayerProperty::Muted(m) => controller.set_track_mute(track_id, *m),
LayerProperty::Soloed(s) => controller.set_track_solo(track_id, *s),
_ => {} // Locked/Opacity/Visible are UI-only
}
}
}
Ok(())
}
fn rollback_backend(
&mut self,
backend: &mut crate::action::BackendContext,
_document: &crate::document::Document,
) -> Result<(), String> {
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()),
};
for (i, &layer_id) in self.layer_ids.iter().enumerate() {
if let Some(&track_id) = backend.layer_to_track_map.get(&layer_id) {
if let Some(old_value) = &self.old_values[i] {
match old_value {
OldValue::Volume(v) => controller.set_track_volume(track_id, *v as f32),
OldValue::Muted(m) => controller.set_track_mute(track_id, *m),
OldValue::Soloed(s) => controller.set_track_solo(track_id, *s),
_ => {} // Locked/Opacity/Visible are UI-only
}
}
}
}
Ok(())
}
fn description(&self) -> String {
let property_name = match &self.property {
LayerProperty::Volume(_) => "volume",

View File

@ -196,26 +196,13 @@ fn export_audio_ffmpeg_mp3<P: AsRef<Path>>(
frame.set_rate(settings.sample_rate);
// Copy planar samples to frame
// Use plane_mut::<i16> instead of data_mut — data_mut(ch) is buggy for planar audio:
// FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0.
// plane_mut uses self.samples() for the length, which is correct for all planes.
for ch in 0..settings.channels as usize {
let plane = frame.data_mut(ch);
let plane = frame.plane_mut::<i16>(ch);
let offset = samples_encoded;
let src = &planar_samples[ch][offset..offset + chunk_size];
// Convert i16 samples to bytes and copy
let byte_size = chunk_size * std::mem::size_of::<i16>();
if plane.len() < byte_size {
return Err(format!(
"FFmpeg frame buffer too small: {} bytes, need {} bytes",
plane.len(), byte_size
));
}
// Safe byte-level copy using slice operations
for (i, &sample) in src.iter().enumerate() {
let bytes = sample.to_ne_bytes();
let offset = i * 2;
plane[offset..offset + 2].copy_from_slice(&bytes);
}
plane.copy_from_slice(&planar_samples[ch][offset..offset + chunk_size]);
}
// Send frame to encoder

View File

@ -41,35 +41,61 @@ pub struct ExportDialog {
/// Output file path
pub output_path: Option<PathBuf>,
/// Selected audio preset index (for UI)
pub selected_audio_preset: usize,
/// Error message (if any)
pub error_message: Option<String>,
/// Whether advanced settings are shown
pub show_advanced: bool,
/// Selected video preset index
pub selected_video_preset: usize,
/// Output filename (editable text, without directory)
pub output_filename: String,
/// Output directory
pub output_dir: PathBuf,
}
impl Default for ExportDialog {
fn default() -> Self {
let home = std::env::var("HOME")
.map(PathBuf::from)
.unwrap_or_else(|_| PathBuf::from("."));
let music_dir = {
let m = home.join("Music");
if m.is_dir() { m } else { home }
};
Self {
open: false,
export_type: ExportType::Audio,
audio_settings: AudioExportSettings::default(),
audio_settings: AudioExportSettings::standard_mp3(),
video_settings: VideoExportSettings::default(),
include_audio: true,
output_path: None,
selected_audio_preset: 0,
error_message: None,
show_advanced: false,
selected_video_preset: 0,
output_filename: String::new(),
output_dir: music_dir,
}
}
}
impl ExportDialog {
/// Open the dialog with default settings
pub fn open(&mut self, timeline_duration: f64) {
pub fn open(&mut self, timeline_duration: f64, project_name: &str) {
self.open = true;
self.audio_settings.end_time = timeline_duration;
self.video_settings.end_time = timeline_duration;
self.error_message = None;
// Pre-populate filename from project name if not already set
if self.output_filename.is_empty() || !self.output_filename.contains(project_name) {
let ext = self.audio_settings.format.extension();
self.output_filename = format!("{}.{}", project_name, ext);
}
}
/// Close the dialog
@ -78,6 +104,27 @@ impl ExportDialog {
self.error_message = None;
}
/// Update the filename extension to match the current format
fn update_filename_extension(&mut self) {
let ext = match self.export_type {
ExportType::Audio => self.audio_settings.format.extension(),
ExportType::Video => self.video_settings.codec.container_format(),
};
// Replace extension in filename
if let Some(dot_pos) = self.output_filename.rfind('.') {
self.output_filename.truncate(dot_pos + 1);
self.output_filename.push_str(ext);
} else if !self.output_filename.is_empty() {
self.output_filename.push('.');
self.output_filename.push_str(ext);
}
}
/// Build the full output path from directory + filename
fn build_output_path(&self) -> PathBuf {
self.output_dir.join(&self.output_filename)
}
/// Render the export dialog
///
/// Returns Some(ExportResult) if the user clicked Export, None otherwise.
@ -88,21 +135,19 @@ impl ExportDialog {
let mut should_export = false;
let mut should_close = false;
let mut open = self.open;
let window_title = match self.export_type {
ExportType::Audio => "Export Audio",
ExportType::Video => "Export Video",
};
egui::Window::new(window_title)
.open(&mut open)
.resizable(false)
.collapsible(false)
.anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO)
let modal_response = egui::Modal::new(egui::Id::new("export_dialog_modal"))
.show(ctx, |ui| {
ui.set_width(500.0);
ui.heading(window_title);
ui.add_space(8.0);
// Error message (if any)
if let Some(error) = &self.error_message {
ui.colored_label(egui::Color32::RED, error);
@ -111,30 +156,42 @@ impl ExportDialog {
// Export type selection (tabs)
ui.horizontal(|ui| {
ui.selectable_value(&mut self.export_type, ExportType::Audio, "🎵 Audio");
ui.selectable_value(&mut self.export_type, ExportType::Video, "🎬 Video");
if ui.selectable_value(&mut self.export_type, ExportType::Audio, "Audio").clicked() {
self.update_filename_extension();
}
if ui.selectable_value(&mut self.export_type, ExportType::Video, "Video").clicked() {
self.update_filename_extension();
}
});
ui.add_space(12.0);
ui.separator();
ui.add_space(12.0);
// Render either audio or video settings
// Basic settings
match self.export_type {
ExportType::Audio => self.render_audio_settings(ui),
ExportType::Video => self.render_video_settings(ui),
ExportType::Audio => self.render_audio_basic(ui),
ExportType::Video => self.render_video_basic(ui),
}
ui.add_space(12.0);
// Time range (common to both)
self.render_time_range(ui);
ui.add_space(12.0);
// Output file path (common to both)
// Output file
self.render_output_selection(ui);
ui.add_space(4.0);
// Advanced toggle
ui.toggle_value(&mut self.show_advanced, "Advanced settings");
if self.show_advanced {
ui.add_space(8.0);
match self.export_type {
ExportType::Audio => self.render_audio_advanced(ui),
ExportType::Video => self.render_video_advanced(ui),
}
}
ui.add_space(16.0);
// Buttons
@ -151,8 +208,10 @@ impl ExportDialog {
});
});
// Update open state (in case user clicked X button)
self.open = open;
// Close on backdrop click or escape
if modal_response.backdrop_response.clicked() {
should_close = true;
}
if should_close {
self.close();
@ -160,66 +219,49 @@ impl ExportDialog {
}
if should_export {
self.output_path = Some(self.build_output_path());
return self.handle_export();
}
None
}
/// Render audio export settings UI
fn render_audio_settings(&mut self, ui: &mut egui::Ui) {
// Preset selection
ui.heading("Preset");
ui.horizontal(|ui| {
let presets = [
("High Quality WAV", AudioExportSettings::high_quality_wav()),
("High Quality FLAC", AudioExportSettings::high_quality_flac()),
("Standard MP3", AudioExportSettings::standard_mp3()),
("Standard AAC", AudioExportSettings::standard_aac()),
("High Quality MP3", AudioExportSettings::high_quality_mp3()),
("High Quality AAC", AudioExportSettings::high_quality_aac()),
("Podcast MP3", AudioExportSettings::podcast_mp3()),
("Podcast AAC", AudioExportSettings::podcast_aac()),
];
egui::ComboBox::from_id_salt("export_preset")
.selected_text(presets[self.selected_audio_preset].0)
.show_ui(ui, |ui| {
for (i, (name, _)) in presets.iter().enumerate() {
if ui.selectable_value(&mut self.selected_audio_preset, i, *name).clicked() {
// Save current time range before applying preset
let saved_start = self.audio_settings.start_time;
let saved_end = self.audio_settings.end_time;
self.audio_settings = presets[i].1.clone();
// Restore time range
self.audio_settings.start_time = saved_start;
self.audio_settings.end_time = saved_end;
}
}
});
});
ui.add_space(12.0);
ui.add_space(12.0);
// Format settings
ui.heading("Format");
/// Render basic audio settings (format + filename)
fn render_audio_basic(&mut self, ui: &mut egui::Ui) {
ui.horizontal(|ui| {
ui.label("Format:");
let prev_format = self.audio_settings.format;
egui::ComboBox::from_id_salt("audio_format")
.selected_text(self.audio_settings.format.name())
.show_ui(ui, |ui| {
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Wav, "WAV (Uncompressed)");
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Flac, "FLAC (Lossless)");
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Mp3, "MP3");
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Aac, "AAC");
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Flac, "FLAC (Lossless)");
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Wav, "WAV (Uncompressed)");
});
if self.audio_settings.format != prev_format {
self.update_filename_extension();
// Apply sensible defaults when switching formats
match self.audio_settings.format {
AudioFormat::Mp3 => {
self.audio_settings.sample_rate = 44100;
self.audio_settings.bitrate_kbps = 192;
}
AudioFormat::Aac => {
self.audio_settings.sample_rate = 44100;
self.audio_settings.bitrate_kbps = 256;
}
AudioFormat::Flac | AudioFormat::Wav => {
self.audio_settings.sample_rate = 48000;
self.audio_settings.bit_depth = 24;
}
}
}
});
}
ui.add_space(8.0);
// Audio settings
/// Render advanced audio settings (sample rate, channels, bit depth, bitrate, time range)
fn render_audio_advanced(&mut self, ui: &mut egui::Ui) {
ui.horizontal(|ui| {
ui.label("Sample Rate:");
egui::ComboBox::from_id_salt("sample_rate")
@ -237,8 +279,6 @@ impl ExportDialog {
ui.radio_value(&mut self.audio_settings.channels, 2, "Stereo");
});
ui.add_space(8.0);
// Format-specific settings
if self.audio_settings.format.supports_bit_depth() {
ui.horizontal(|ui| {
@ -261,12 +301,48 @@ impl ExportDialog {
});
});
}
ui.add_space(8.0);
// Time range
self.render_time_range(ui);
}
/// Render video export settings UI
fn render_video_settings(&mut self, ui: &mut egui::Ui) {
// Codec selection
ui.heading("Codec");
/// Video presets: (name, codec, quality, width, height, fps)
const VIDEO_PRESETS: &'static [(&'static str, VideoCodec, VideoQuality, u32, u32, f64)] = &[
("1080p H.264 (Standard)", VideoCodec::H264, VideoQuality::High, 1920, 1080, 30.0),
("1080p H.264 60fps", VideoCodec::H264, VideoQuality::High, 1920, 1080, 60.0),
("4K H.264", VideoCodec::H264, VideoQuality::VeryHigh, 3840, 2160, 30.0),
("720p H.264 (Small)", VideoCodec::H264, VideoQuality::Medium, 1280, 720, 30.0),
("1080p H.265 (Smaller)", VideoCodec::H265, VideoQuality::High, 1920, 1080, 30.0),
("1080p VP9 (WebM)", VideoCodec::VP9, VideoQuality::High, 1920, 1080, 30.0),
("1080p ProRes 422", VideoCodec::ProRes422, VideoQuality::VeryHigh, 1920, 1080, 30.0),
];
/// Render basic video settings (preset dropdown)
fn render_video_basic(&mut self, ui: &mut egui::Ui) {
ui.horizontal(|ui| {
ui.label("Preset:");
egui::ComboBox::from_id_salt("video_preset")
.selected_text(Self::VIDEO_PRESETS[self.selected_video_preset].0)
.show_ui(ui, |ui| {
for (i, preset) in Self::VIDEO_PRESETS.iter().enumerate() {
if ui.selectable_value(&mut self.selected_video_preset, i, preset.0).clicked() {
let (_, codec, quality, w, h, fps) = *preset;
self.video_settings.codec = codec;
self.video_settings.quality = quality;
self.video_settings.width = Some(w);
self.video_settings.height = Some(h);
self.video_settings.framerate = fps;
self.update_filename_extension();
}
}
});
});
}
/// Render advanced video settings (codec, resolution, framerate, quality, time range)
fn render_video_advanced(&mut self, ui: &mut egui::Ui) {
ui.horizontal(|ui| {
ui.label("Codec:");
egui::ComboBox::from_id_salt("video_codec")
@ -280,44 +356,34 @@ impl ExportDialog {
});
});
ui.add_space(12.0);
// Resolution
ui.heading("Resolution");
ui.horizontal(|ui| {
ui.label("Width:");
ui.label("Resolution:");
let mut custom_width = self.video_settings.width.unwrap_or(1920);
if ui.add(egui::DragValue::new(&mut custom_width).range(1..=7680)).changed() {
self.video_settings.width = Some(custom_width);
}
ui.label("Height:");
ui.label("x");
let mut custom_height = self.video_settings.height.unwrap_or(1080);
if ui.add(egui::DragValue::new(&mut custom_height).range(1..=4320)).changed() {
self.video_settings.height = Some(custom_height);
}
});
// Resolution presets
ui.horizontal(|ui| {
if ui.button("1080p").clicked() {
if ui.small_button("1080p").clicked() {
self.video_settings.width = Some(1920);
self.video_settings.height = Some(1080);
}
if ui.button("4K").clicked() {
if ui.small_button("4K").clicked() {
self.video_settings.width = Some(3840);
self.video_settings.height = Some(2160);
}
if ui.button("720p").clicked() {
if ui.small_button("720p").clicked() {
self.video_settings.width = Some(1280);
self.video_settings.height = Some(720);
}
});
ui.add_space(12.0);
// Framerate
ui.heading("Framerate");
ui.horizontal(|ui| {
ui.label("FPS:");
egui::ComboBox::from_id_salt("framerate")
@ -329,10 +395,6 @@ impl ExportDialog {
});
});
ui.add_space(12.0);
// Quality
ui.heading("Quality");
ui.horizontal(|ui| {
ui.label("Quality:");
egui::ComboBox::from_id_salt("video_quality")
@ -345,10 +407,12 @@ impl ExportDialog {
});
});
ui.add_space(12.0);
// Include audio checkbox
ui.checkbox(&mut self.include_audio, "Include Audio");
ui.add_space(8.0);
// Time range
self.render_time_range(ui);
}
/// Render time range UI (common to both audio and video)
@ -358,7 +422,6 @@ impl ExportDialog {
ExportType::Video => (&mut self.video_settings.start_time, &mut self.video_settings.end_time),
};
ui.heading("Time Range");
ui.horizontal(|ui| {
ui.label("Start:");
ui.add(egui::DragValue::new(start_time)
@ -377,46 +440,32 @@ impl ExportDialog {
ui.label(format!("Duration: {:.2} seconds", duration));
}
/// Render output file selection UI (common to both audio and video)
/// Render output file selection UI
fn render_output_selection(&mut self, ui: &mut egui::Ui) {
ui.heading("Output");
ui.horizontal(|ui| {
let path_text = self.output_path.as_ref()
.map(|p| p.display().to_string())
.unwrap_or_else(|| "No file selected".to_string());
ui.label("File:");
ui.text_edit_singleline(&mut path_text.clone());
if ui.button("Browse...").clicked() {
// Determine file extension and filter based on export type
let (default_name, filter_name, extensions) = match self.export_type {
ExportType::Audio => {
let ext = self.audio_settings.format.extension();
(format!("audio.{}", ext), "Audio", vec![ext])
}
ExportType::Video => {
let ext = self.video_settings.codec.container_format();
(format!("video.{}", ext), "Video", vec![ext])
}
};
if let Some(path) = rfd::FileDialog::new()
.set_file_name(&default_name)
.add_filter(filter_name, &extensions)
.save_file()
ui.label("Save to:");
let dir_text = self.output_dir.display().to_string();
ui.label(&dir_text);
if ui.button("Change...").clicked() {
if let Some(dir) = rfd::FileDialog::new()
.set_directory(&self.output_dir)
.pick_folder()
{
self.output_path = Some(path);
self.output_dir = dir;
}
}
});
ui.horizontal(|ui| {
ui.label("Filename:");
ui.text_edit_singleline(&mut self.output_filename);
});
}
/// Handle export button click
fn handle_export(&mut self) -> Option<ExportResult> {
// Check if output path is set
if self.output_path.is_none() {
self.error_message = Some("Please select an output file".to_string());
if self.output_filename.trim().is_empty() {
self.error_message = Some("Please enter a filename".to_string());
return None;
}
@ -529,14 +578,13 @@ impl ExportProgressDialog {
let mut should_cancel = false;
egui::Window::new("Exporting...")
.open(&mut self.open)
.resizable(false)
.collapsible(false)
.anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO)
egui::Modal::new(egui::Id::new("export_progress_modal"))
.show(ctx, |ui| {
ui.set_width(400.0);
ui.heading("Exporting...");
ui.add_space(8.0);
// Status message
ui.label(&self.message);
ui.add_space(8.0);

View File

@ -479,9 +479,11 @@ impl ExportOrchestrator {
) {
println!("🧵 [EXPORT THREAD] run_audio_export started");
// Send start notification
// Send start notification with calculated total frames
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
progress_tx
.send(ExportProgress::Started { total_frames: 0 })
.send(ExportProgress::Started { total_frames })
.ok();
println!("🧵 [EXPORT THREAD] Sent Started progress");

View File

@ -2113,7 +2113,12 @@ impl EditorApp {
println!("Menu: Export");
// Open export dialog with calculated timeline endpoint
let timeline_endpoint = self.action_executor.document().calculate_timeline_endpoint();
self.export_dialog.open(timeline_endpoint);
// Derive project name from the .beam file path, falling back to document name
let project_name = self.current_file_path.as_ref()
.and_then(|p| p.file_stem())
.map(|s| s.to_string_lossy().into_owned())
.unwrap_or_else(|| self.action_executor.document().name.clone());
self.export_dialog.open(timeline_endpoint, &project_name);
}
MenuAction::Quit => {
println!("Menu: Quit");
@ -2707,6 +2712,41 @@ impl EditorApp {
}
eprintln!("📊 [APPLY] Step 7: Fetched {} raw audio samples in {:.2}ms", raw_fetched, step7_start.elapsed().as_secs_f64() * 1000.0);
// Rebuild MIDI event cache for all MIDI clips (needed for timeline/piano roll rendering)
let step8_start = std::time::Instant::now();
self.midi_event_cache.clear();
let midi_clip_ids: Vec<u32> = self.action_executor.document()
.audio_clips.values()
.filter_map(|clip| clip.midi_clip_id())
.collect();
let mut midi_fetched = 0;
if let Some(ref controller_arc) = self.audio_controller {
let mut controller = controller_arc.lock().unwrap();
for clip_id in midi_clip_ids {
// track_id is unused by the query, pass 0
match controller.query_midi_clip(0, clip_id) {
Ok(clip_data) => {
let processed_events: Vec<(f64, u8, u8, bool)> = clip_data.events.iter()
.filter_map(|event| {
let status_type = event.status & 0xF0;
if status_type == 0x90 || status_type == 0x80 {
let is_note_on = status_type == 0x90 && event.data2 > 0;
Some((event.timestamp, event.data1, event.data2, is_note_on))
} else {
None
}
})
.collect();
self.midi_event_cache.insert(clip_id, processed_events);
midi_fetched += 1;
}
Err(e) => eprintln!("Failed to fetch MIDI clip {}: {}", clip_id, e),
}
}
}
eprintln!("📊 [APPLY] Step 8: Rebuilt MIDI event cache for {} clips in {:.2}ms", midi_fetched, step8_start.elapsed().as_secs_f64() * 1000.0);
// Reset playback state
self.playback_time = 0.0;
self.is_playing = false;

View File

@ -6,6 +6,7 @@ use eframe::egui;
use egui_node_graph2::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use crate::widgets;
/// Signal types for audio node graph
#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
@ -135,20 +136,85 @@ impl NodeTemplate {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct NodeData {
pub template: NodeTemplate,
/// Display name of loaded sample (for SimpleSampler/MultiSampler nodes)
#[serde(default)]
pub sample_display_name: Option<String>,
/// Root note (MIDI note number) for original-pitch playback (default 69 = A4)
#[serde(default = "default_root_note")]
pub root_note: u8,
}
fn default_root_note() -> u8 { 69 }
/// Cached oscilloscope waveform data for rendering in node body
pub struct OscilloscopeCache {
pub audio: Vec<f32>,
pub cv: Vec<f32>,
}
/// Info about an audio clip available for sampler selection
pub struct SamplerClipInfo {
pub name: String,
pub pool_index: usize,
}
/// Info about an asset folder available for multi-sampler
pub struct SamplerFolderInfo {
pub folder_id: uuid::Uuid,
pub name: String,
/// Pool indices of audio clips in this folder
pub clip_pool_indices: Vec<(String, usize)>,
}
/// Pending sampler load request from bottom_ui(), handled by the node graph pane
pub enum PendingSamplerLoad {
/// Load a single clip from the audio pool into a SimpleSampler
SimpleFromPool { node_id: NodeId, backend_node_id: u32, pool_index: usize, name: String },
/// Open a file dialog to load into a SimpleSampler
SimpleFromFile { node_id: NodeId, backend_node_id: u32 },
/// Load a single clip from the audio pool as a MultiSampler layer
MultiFromPool { node_id: NodeId, backend_node_id: u32, pool_index: usize, name: String },
/// Load all clips in a folder as MultiSampler layers
MultiFromFolder { node_id: NodeId, folder_id: uuid::Uuid },
/// Open a file/folder dialog to load into a MultiSampler
MultiFromFilesystem { node_id: NodeId, backend_node_id: u32 },
}
/// Custom graph state - can track selected nodes, etc.
#[derive(Default)]
pub struct GraphState {
pub active_node: Option<NodeId>,
/// Oscilloscope data cached per node, populated before draw_graph_editor()
pub oscilloscope_data: HashMap<NodeId, OscilloscopeCache>,
/// Audio clips available for sampler selection, populated before draw
pub available_clips: Vec<SamplerClipInfo>,
/// Asset folders available for multi-sampler, populated before draw
pub available_folders: Vec<SamplerFolderInfo>,
/// Pending sample load request from bottom_ui popup
pub pending_sampler_load: Option<PendingSamplerLoad>,
/// Search text for the sampler clip picker popup
pub sampler_search_text: String,
/// Mapping from frontend NodeId to backend node index, populated before draw
pub node_backend_ids: HashMap<NodeId, u32>,
/// Pending root note changes from bottom_ui (node_id, backend_node_id, new_root_note)
pub pending_root_note_changes: Vec<(NodeId, u32, u8)>,
/// Time scale per oscilloscope node (in milliseconds)
pub oscilloscope_time_scale: HashMap<NodeId, f32>,
}
impl Default for GraphState {
fn default() -> Self {
Self {
active_node: None,
oscilloscope_data: HashMap::new(),
available_clips: Vec::new(),
available_folders: Vec::new(),
pending_sampler_load: None,
sampler_search_text: String::new(),
node_backend_ids: HashMap::new(),
pending_root_note_changes: Vec::new(),
oscilloscope_time_scale: HashMap::new(),
}
}
}
/// User response type (empty for now)
@ -333,7 +399,7 @@ impl NodeTemplateTrait for NodeTemplate {
}
fn user_data(&self, _user_state: &mut Self::UserState) -> Self::NodeData {
NodeData { template: *self }
NodeData { template: *self, sample_display_name: None, root_note: 69 }
}
fn build_node(
@ -498,6 +564,7 @@ impl NodeTemplateTrait for NodeTemplate {
graph.add_output_param(node_id, "Audio Out".into(), DataType::Audio);
}
NodeTemplate::SimpleSampler => {
graph.add_input_param(node_id, "V/Oct".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_input_param(node_id, "Gate".into(), DataType::CV, ValueType::float(0.0), InputParamKind::ConnectionOnly, true);
graph.add_output_param(node_id, "Audio Out".into(), DataType::Audio);
}
@ -781,6 +848,14 @@ impl WidgetValueTrait for ValueType {
}
}
const NOTE_NAMES: [&str; 12] = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"];
fn midi_note_name(note: u8) -> String {
let octave = (note as i32 / 12) - 1;
let name = NOTE_NAMES[note as usize % 12];
format!("{}{}", name, octave)
}
// Implement NodeDataTrait for custom node UI (optional)
impl NodeDataTrait for NodeData {
type Response = UserResponse;
@ -798,7 +873,121 @@ impl NodeDataTrait for NodeData {
where
Self::Response: UserResponseTrait,
{
if self.template == NodeTemplate::Oscilloscope {
if self.template == NodeTemplate::SimpleSampler || self.template == NodeTemplate::MultiSampler {
let is_multi = self.template == NodeTemplate::MultiSampler;
let backend_node_id = user_state.node_backend_ids.get(&node_id).copied().unwrap_or(0);
let default_text = if is_multi { "Select samples..." } else { "Select sample..." };
let button_text = self.sample_display_name.as_deref().unwrap_or(default_text);
let button = ui.button(button_text);
if button.clicked() {
user_state.sampler_search_text.clear();
}
let popup_id = egui::Popup::default_response_id(&button);
let mut close_popup = false;
egui::Popup::from_toggle_button_response(&button)
.close_behavior(egui::PopupCloseBehavior::CloseOnClickOutside)
.width(160.0)
.show(|ui| {
let search_width = ui.available_width();
ui.add_sized([search_width, 0.0], egui::TextEdit::singleline(&mut user_state.sampler_search_text).hint_text("Search..."));
ui.separator();
let search = user_state.sampler_search_text.to_lowercase();
// Folders section (multi-sampler only)
if is_multi && !user_state.available_folders.is_empty() {
ui.label(egui::RichText::new("Folders").small().weak());
for folder in &user_state.available_folders {
if !search.is_empty() && !folder.name.to_lowercase().contains(&search) {
continue;
}
let label = format!("📁 {} ({} clips)", folder.name, folder.clip_pool_indices.len());
if widgets::list_item(ui, false, &label) {
user_state.pending_sampler_load = Some(PendingSamplerLoad::MultiFromFolder {
node_id,
folder_id: folder.folder_id,
});
close_popup = true;
}
}
ui.separator();
}
// Audio clips list
if is_multi {
ui.label(egui::RichText::new("Audio Clips").small().weak());
}
let filtered_clips: Vec<&SamplerClipInfo> = user_state.available_clips.iter()
.filter(|clip| search.is_empty() || clip.name.to_lowercase().contains(&search))
.collect();
let items = filtered_clips.iter().map(|clip| (false, clip.name.as_str()));
if let Some(idx) = widgets::scrollable_list(ui, 200.0, items) {
let clip = filtered_clips[idx];
if is_multi {
user_state.pending_sampler_load = Some(PendingSamplerLoad::MultiFromPool {
node_id,
backend_node_id,
pool_index: clip.pool_index,
name: clip.name.clone(),
});
} else {
user_state.pending_sampler_load = Some(PendingSamplerLoad::SimpleFromPool {
node_id,
backend_node_id,
pool_index: clip.pool_index,
name: clip.name.clone(),
});
}
close_popup = true;
}
ui.separator();
if ui.button("Open...").clicked() {
if is_multi {
user_state.pending_sampler_load = Some(PendingSamplerLoad::MultiFromFilesystem {
node_id,
backend_node_id,
});
} else {
user_state.pending_sampler_load = Some(PendingSamplerLoad::SimpleFromFile {
node_id,
backend_node_id,
});
}
close_popup = true;
}
});
if close_popup {
egui::Popup::close_id(ui.ctx(), popup_id);
}
// Root note selector
ui.horizontal(|ui| {
ui.label(egui::RichText::new("Root:").weak());
let note_name = midi_note_name(self.root_note);
let root_btn = ui.button(&note_name);
let root_popup_id = egui::Popup::default_response_id(&root_btn);
let mut close_root = false;
egui::Popup::from_toggle_button_response(&root_btn)
.close_behavior(egui::PopupCloseBehavior::CloseOnClickOutside)
.width(80.0)
.show(|ui| {
let notes: Vec<(u8, String)> = (24..=96).rev()
.map(|n| (n, midi_note_name(n)))
.collect();
let items = notes.iter().map(|(n, name)| (*n == self.root_note, name.as_str()));
if let Some(idx) = widgets::scrollable_list(ui, 200.0, items) {
let (note, _) = &notes[idx];
user_state.pending_root_note_changes.push((node_id, backend_node_id, *note));
close_root = true;
}
});
if close_root {
egui::Popup::close_id(ui.ctx(), root_popup_id);
}
});
} else if self.template == NodeTemplate::Oscilloscope {
let size = egui::vec2(200.0, 80.0);
let (rect, _) = ui.allocate_exact_size(size, egui::Sense::hover());
let painter = ui.painter_at(rect);
@ -834,6 +1023,15 @@ impl NodeDataTrait for NodeData {
painter.add(egui::Shape::line(points, egui::Stroke::new(1.5, egui::Color32::from_rgb(0xFF, 0x98, 0x00))));
}
}
// Time window slider
let time_ms = user_state.oscilloscope_time_scale.entry(node_id).or_insert(100.0);
ui.horizontal(|ui| {
ui.spacing_mut().slider_width = 140.0;
ui.add(egui::Slider::new(time_ms, 10.0..=1000.0)
.suffix(" ms")
.logarithmic(true));
});
} else {
ui.label("");
}

View File

@ -560,6 +560,126 @@ impl NodeGraphPane {
}
}
fn handle_pending_sampler_load(
&mut self,
load: graph_data::PendingSamplerLoad,
shared: &mut crate::panes::SharedPaneState,
) {
let backend_track_id = match self.backend_track_id {
Some(id) => id,
None => return,
};
let controller_arc = match &shared.audio_controller {
Some(c) => std::sync::Arc::clone(c),
None => return,
};
match load {
graph_data::PendingSamplerLoad::SimpleFromPool { node_id, backend_node_id, pool_index, name } => {
let mut controller = controller_arc.lock().unwrap();
controller.sampler_load_from_pool(backend_track_id, backend_node_id, pool_index);
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.sample_display_name = Some(name);
}
}
graph_data::PendingSamplerLoad::SimpleFromFile { node_id, backend_node_id } => {
if let Some(path) = rfd::FileDialog::new()
.add_filter("Audio", &["wav", "flac", "mp3", "ogg", "aiff"])
.pick_file()
{
let file_name = path.file_stem()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "Sample".to_string());
// Import into audio pool + asset library, then load from pool
let mut controller = controller_arc.lock().unwrap();
match controller.import_audio_sync(path.to_path_buf()) {
Ok(pool_index) => {
// Add to document asset library
let metadata = daw_backend::io::read_metadata(&path).ok();
let duration = metadata.as_ref().map(|m| m.duration).unwrap_or(0.0);
let clip = lightningbeam_core::clip::AudioClip::new_sampled(&file_name, pool_index, duration);
shared.action_executor.document_mut().add_audio_clip(clip);
// Load into sampler from pool
controller.sampler_load_from_pool(backend_track_id, backend_node_id, pool_index);
}
Err(e) => {
eprintln!("Failed to import audio '{}': {}", path.display(), e);
}
}
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.sample_display_name = Some(file_name);
}
}
}
graph_data::PendingSamplerLoad::MultiFromPool { node_id, backend_node_id, pool_index, name } => {
let mut controller = controller_arc.lock().unwrap();
// Add as a single layer spanning full key range, root_key = 60 (C4)
controller.multi_sampler_add_layer_from_pool(
backend_track_id, backend_node_id, pool_index,
0, 127, 60,
);
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.sample_display_name = Some(name);
}
}
graph_data::PendingSamplerLoad::MultiFromFolder { node_id, folder_id } => {
// Find folder clips from available_folders
let folder_clips: Vec<(String, usize)> = self.user_state.available_folders.iter()
.find(|f| f.folder_id == folder_id)
.map(|f| f.clip_pool_indices.clone())
.unwrap_or_default();
if !folder_clips.is_empty() {
// TODO: Add MultiSamplerLoadFromPool command to avoid disk re-reads.
// For now, folder loading is a placeholder — the UI is wired up but
// loading multi-sampler layers from pool requires a new backend command.
let folder_name = self.user_state.available_folders.iter()
.find(|f| f.folder_id == folder_id)
.map(|f| f.name.clone())
.unwrap_or_else(|| "Folder".to_string());
eprintln!("MultiSampler folder load not yet implemented for folder: {}", folder_name);
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.sample_display_name = Some(format!("📁 {}", folder_name));
}
}
}
graph_data::PendingSamplerLoad::MultiFromFilesystem { node_id, backend_node_id } => {
if let Some(path) = rfd::FileDialog::new()
.add_filter("Audio", &["wav", "flac", "mp3", "ogg", "aiff"])
.pick_file()
{
let file_name = path.file_stem()
.map(|s| s.to_string_lossy().to_string())
.unwrap_or_else(|| "Sample".to_string());
let mut controller = controller_arc.lock().unwrap();
// Import into audio pool + asset library, then load from pool
match controller.import_audio_sync(path.to_path_buf()) {
Ok(pool_index) => {
let metadata = daw_backend::io::read_metadata(&path).ok();
let duration = metadata.as_ref().map(|m| m.duration).unwrap_or(0.0);
let clip = lightningbeam_core::clip::AudioClip::new_sampled(&file_name, pool_index, duration);
shared.action_executor.document_mut().add_audio_clip(clip);
// Add as layer spanning full key range
controller.multi_sampler_add_layer_from_pool(
backend_track_id, backend_node_id, pool_index,
0, 127, 60,
);
}
Err(e) => {
eprintln!("Failed to import audio '{}': {}", path.display(), e);
}
}
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.sample_display_name = Some(file_name);
}
}
}
}
}
fn check_parameter_changes(&mut self, shared: &mut crate::panes::SharedPaneState) {
// Check all input parameters for value changes
let mut _checked_count = 0;
@ -1554,7 +1674,7 @@ impl NodeGraphPane {
label: group.name.clone(),
inputs: vec![],
outputs: vec![],
user_data: NodeData { template: NodeTemplate::Group },
user_data: NodeData { template: NodeTemplate::Group, sample_display_name: None, root_note: 69 },
});
// Add dynamic input ports based on boundary inputs
@ -1626,7 +1746,7 @@ impl NodeGraphPane {
label: "Group Input".to_string(),
inputs: vec![],
outputs: vec![],
user_data: NodeData { template: NodeTemplate::Group },
user_data: NodeData { template: NodeTemplate::Group, sample_display_name: None, root_note: 69 },
});
for bc in &scope_group.boundary_inputs {
@ -1673,7 +1793,7 @@ impl NodeGraphPane {
label: "Group Output".to_string(),
inputs: vec![],
outputs: vec![],
user_data: NodeData { template: NodeTemplate::Group },
user_data: NodeData { template: NodeTemplate::Group, sample_display_name: None, root_note: 69 },
});
for bc in &scope_group.boundary_outputs {
@ -1866,7 +1986,7 @@ impl NodeGraphPane {
label: label.to_string(),
inputs: vec![],
outputs: vec![],
user_data: NodeData { template: node_template },
user_data: NodeData { template: node_template, sample_display_name: None, root_note: 69 },
});
node_template.build_node(&mut self.state.graph, &mut self.user_state, frontend_id);
@ -2042,10 +2162,14 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
let mut controller = audio_controller.lock().unwrap();
for (node_id, backend_node_id) in oscilloscope_nodes {
// Calculate sample count from per-node time scale (default 100ms)
let time_ms = self.user_state.oscilloscope_time_scale
.get(&node_id).copied().unwrap_or(100.0);
let sample_count = ((time_ms / 1000.0) * 48000.0) as usize;
let result = if let Some(va_id) = va_backend_id {
controller.query_voice_oscilloscope_data(backend_track_id, va_id, backend_node_id, 4800)
controller.query_voice_oscilloscope_data(backend_track_id, va_id, backend_node_id, sample_count)
} else {
controller.query_oscilloscope_data(backend_track_id, backend_node_id, 4800)
controller.query_oscilloscope_data(backend_track_id, backend_node_id, sample_count)
};
if let Ok(data) = result {
self.user_state.oscilloscope_data.insert(node_id, graph_data::OscilloscopeCache {
@ -2172,6 +2296,55 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
let zoom_before = self.state.pan_zoom.zoom;
let pan_before = self.state.pan_zoom.pan;
// Populate sampler clip list and node backend ID map for bottom_ui()
{
use lightningbeam_core::clip::AudioClipType;
let doc = shared.action_executor.document();
// Available audio clips
self.user_state.available_clips = doc.audio_clips.values()
.filter_map(|clip| match &clip.clip_type {
AudioClipType::Sampled { audio_pool_index } => Some(graph_data::SamplerClipInfo {
name: clip.name.clone(),
pool_index: *audio_pool_index,
}),
_ => None,
})
.collect();
self.user_state.available_clips.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
// Available folders (with their contained audio clips)
self.user_state.available_folders = doc.audio_folders.folders.values()
.map(|folder| {
let clips_in_folder: Vec<(String, usize)> = doc.audio_clips.values()
.filter(|clip| clip.folder_id == Some(folder.id))
.filter_map(|clip| match &clip.clip_type {
AudioClipType::Sampled { audio_pool_index } => Some((clip.name.clone(), *audio_pool_index)),
_ => None,
})
.collect();
graph_data::SamplerFolderInfo {
folder_id: folder.id,
name: folder.name.clone(),
clip_pool_indices: clips_in_folder,
}
})
.filter(|f| !f.clip_pool_indices.is_empty())
.collect();
self.user_state.available_folders.sort_by(|a, b| a.name.to_lowercase().cmp(&b.name.to_lowercase()));
// Node backend ID map
self.user_state.node_backend_ids = self.node_id_map.iter()
.map(|(&node_id, backend_id)| {
let id = match backend_id {
BackendNodeId::Audio(idx) => idx.index() as u32,
};
(node_id, id)
})
.collect();
}
// Draw dot grid background with pan/zoom
let pan_zoom = &self.state.pan_zoom;
Self::draw_dot_grid_background(ui, graph_rect, bg_color, grid_color, pan_zoom);
@ -2204,6 +2377,27 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
self.last_node_rects = graph_response.node_rects.clone();
self.handle_graph_response(graph_response, shared, graph_rect);
// Handle pending sampler load requests from bottom_ui()
if let Some(load) = self.user_state.pending_sampler_load.take() {
self.handle_pending_sampler_load(load, shared);
}
// Handle pending root note changes
if !self.user_state.pending_root_note_changes.is_empty() {
let changes: Vec<_> = self.user_state.pending_root_note_changes.drain(..).collect();
if let Some(backend_track_id) = self.track_id.and_then(|tid| shared.layer_to_track_map.get(&tid).copied()) {
if let Some(controller_arc) = &shared.audio_controller {
let mut controller = controller_arc.lock().unwrap();
for (node_id, backend_node_id, root_note) in changes {
controller.sampler_set_root_note(backend_track_id, backend_node_id, root_note);
if let Some(node) = self.state.graph.nodes.get_mut(node_id) {
node.user_data.root_note = root_note;
}
}
}
}
}
// Detect right-click on nodes — intercept the library's node finder and show our context menu instead
{
let secondary_clicked = ui.input(|i| i.pointer.secondary_released());

View File

@ -148,6 +148,70 @@ impl VirtualPianoPane {
(start_note as u8, end_note as u8, white_key_width, 0.0)
}
/// Render keys visually without any input handling (used when a modal is active)
fn render_keyboard_visual_only(
&self,
ui: &mut egui::Ui,
rect: egui::Rect,
visible_start: u8,
visible_end: u8,
white_key_width: f32,
offset_x: f32,
white_key_height: f32,
black_key_width: f32,
black_key_height: f32,
) {
// Draw white keys
let mut white_pos = 0f32;
for note in visible_start..=visible_end {
if !Self::is_white_key(note) {
continue;
}
let x = rect.min.x + offset_x + (white_pos * white_key_width);
let key_rect = egui::Rect::from_min_size(
egui::pos2(x, rect.min.y),
egui::vec2(white_key_width - 1.0, white_key_height),
);
let color = if self.pressed_notes.contains(&note) {
egui::Color32::from_rgb(100, 150, 255)
} else {
egui::Color32::WHITE
};
ui.painter().rect_filled(key_rect, 2.0, color);
ui.painter().rect_stroke(
key_rect,
2.0,
egui::Stroke::new(1.0, egui::Color32::BLACK),
egui::StrokeKind::Middle,
);
white_pos += 1.0;
}
// Draw black keys
for note in visible_start..=visible_end {
if !Self::is_black_key(note) {
continue;
}
let mut white_keys_before = 0;
for n in visible_start..note {
if Self::is_white_key(n) {
white_keys_before += 1;
}
}
let x = rect.min.x + offset_x + (white_keys_before as f32 * white_key_width) - (black_key_width / 2.0);
let key_rect = egui::Rect::from_min_size(
egui::pos2(x, rect.min.y),
egui::vec2(black_key_width, black_key_height),
);
let color = if self.pressed_notes.contains(&note) {
egui::Color32::from_rgb(50, 100, 200)
} else {
egui::Color32::BLACK
};
ui.painter().rect_filled(key_rect, 2.0, color);
}
}
/// Render the piano keyboard
fn render_keyboard(&mut self, ui: &mut egui::Ui, rect: egui::Rect, shared: &mut SharedPaneState) {
// Calculate visible range and key dimensions based on pane size
@ -158,6 +222,20 @@ impl VirtualPianoPane {
let black_key_width = white_key_width * self.black_key_width_ratio;
let black_key_height = white_key_height * self.black_key_height_ratio;
// If a modal dialog is open, don't process mouse input — just render keys visually.
// We read raw input (ui.input) which bypasses egui's modal blocking, so we must check manually.
let modal_active = ui.ctx().memory(|m| m.top_modal_layer().is_some());
if modal_active {
// Release any held notes so they don't get stuck
if self.dragging_note.is_some() {
if let Some(note) = self.dragging_note.take() {
self.send_note_off(note, shared);
}
}
self.render_keyboard_visual_only(ui, rect, visible_start, visible_end, white_key_width, offset_x, white_key_height, black_key_width, black_key_height);
return;
}
// Count white keys before each note for positioning
let mut white_key_positions: std::collections::HashMap<u8, f32> = std::collections::HashMap::new();
let mut white_count = 0;

View File

@ -0,0 +1,77 @@
//! Full-width selectable list for use inside popups and dropdowns.
//!
//! Solves the recurring issue where `selectable_label` inside `ScrollArea`
//! inside a `Popup` doesn't fill the available width, making only the text
//! portion clickable.
use eframe::egui;
use egui::Ui;
/// Render a full-width selectable list item.
///
/// Unlike `ui.selectable_label()`, this allocates the full available width
/// for the clickable area, matching native menu item behavior.
pub fn list_item(ui: &mut Ui, selected: bool, label: &str) -> bool {
let desired_width = ui.available_width();
let height = ui.spacing().interact_size.y;
let (rect, response) = ui.allocate_exact_size(
egui::vec2(desired_width, height),
egui::Sense::click(),
);
if ui.is_rect_visible(rect) {
let visuals = ui.visuals();
if selected {
ui.painter().rect_filled(rect, 2.0, visuals.selection.bg_fill);
} else if response.hovered() {
ui.painter().rect_filled(rect, 2.0, visuals.widgets.hovered.bg_fill);
}
let text_color = if selected {
visuals.selection.stroke.color
} else if response.hovered() {
visuals.widgets.hovered.text_color()
} else {
visuals.widgets.inactive.text_color()
};
let text_pos = rect.min + egui::vec2(4.0, (rect.height() - 14.0) / 2.0);
ui.painter().text(
text_pos,
egui::Align2::LEFT_TOP,
label,
egui::FontId::proportional(14.0),
text_color,
);
}
response.clicked()
}
/// Render a scrollable list of items inside a popup, ensuring full-width
/// clickable areas and proper ScrollArea sizing.
///
/// Returns the index of the clicked item, if any.
pub fn scrollable_list<'a>(
ui: &mut Ui,
max_height: f32,
items: impl Iterator<Item = (bool, &'a str)>,
) -> Option<usize> {
let mut clicked_index = None;
// Force the ScrollArea to use the full width set by the parent
let width = ui.available_width();
egui::ScrollArea::vertical()
.max_height(max_height)
.show(ui, |ui| {
ui.set_min_width(width);
for (i, (selected, label)) in items.enumerate() {
if list_item(ui, selected, label) {
clicked_index = Some(i);
}
}
});
clicked_index
}

View File

@ -1,5 +1,7 @@
//! Reusable UI widgets for the editor
mod text_field;
pub mod dropdown_list;
pub use text_field::ImeTextField;
pub use dropdown_list::{list_item, scrollable_list};