Improve idle performance

This commit is contained in:
Skyler Lehmkuhl 2026-03-10 02:41:44 -04:00
parent 26f06da5bf
commit ac2b4ff8ab
4 changed files with 211 additions and 133 deletions

View File

@ -41,6 +41,11 @@ pub struct AudioSystem {
pub event_rx: Option<rtrb::Consumer<AudioEvent>>,
/// Consumer for recording audio mirror (streams recorded samples to UI for live waveform)
recording_mirror_rx: Option<rtrb::Consumer<f32>>,
/// Producer end of the input ring-buffer. Taken into the closure when the
/// input stream is opened; `None` after `open_input_stream()` has been called.
input_tx: Option<rtrb::Producer<f32>>,
/// The live microphone/line-in stream. `None` until `open_input_stream()` is called.
input_stream: Option<cpal::Stream>,
}
impl AudioSystem {
@ -138,137 +143,8 @@ impl AudioSystem {
)
.map_err(|e| format!("Failed to build output stream: {e:?}"))?;
// Get input device
let input_device = match host.default_input_device() {
Some(device) => device,
None => {
eprintln!("Warning: No input device available, recording will be disabled");
// Start output stream and return without input
output_stream.play().map_err(|e| e.to_string())?;
// Spawn emitter thread if provided
if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
return Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
event_rx: None, // No event receiver when audio device unavailable
recording_mirror_rx: None,
});
}
};
// Get input config using the device's default (most compatible)
let input_config = match input_device.default_input_config() {
Ok(config) => {
let cfg: cpal::StreamConfig = config.into();
cfg
}
Err(e) => {
eprintln!("Warning: Could not get input config: {}, recording will be disabled", e);
output_stream.play().map_err(|e| e.to_string())?;
if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
return Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
event_rx: None,
recording_mirror_rx: None,
});
}
};
let input_sample_rate = input_config.sample_rate;
let input_channels = input_config.channels as u32;
let output_sample_rate = sample_rate;
let output_channels = channels;
let needs_resample = input_sample_rate != output_sample_rate || input_channels != output_channels;
if needs_resample {
eprintln!("[AUDIO] Input device: {}Hz {}ch -> resampling to {}Hz {}ch",
input_sample_rate, input_channels, output_sample_rate, output_channels);
}
// Build input stream with resampling if needed
let input_stream = match input_device
.build_input_stream(
&input_config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
if !needs_resample {
for &sample in data {
let _ = input_tx.push(sample);
}
} else {
// Resample: linear interpolation from input rate to output rate
let in_ch = input_channels as usize;
let out_ch = output_channels as usize;
let ratio = output_sample_rate as f64 / input_sample_rate as f64;
let in_frames = data.len() / in_ch;
let out_frames = (in_frames as f64 * ratio) as usize;
for i in 0..out_frames {
let src_pos = i as f64 / ratio;
let src_idx = src_pos as usize;
let frac = (src_pos - src_idx as f64) as f32;
for ch in 0..out_ch {
// Map output channel to input channel
let in_ch_idx = ch.min(in_ch - 1);
let s0 = if src_idx < in_frames {
data[src_idx * in_ch + in_ch_idx]
} else {
0.0
};
let s1 = if src_idx + 1 < in_frames {
data[(src_idx + 1) * in_ch + in_ch_idx]
} else {
s0
};
let _ = input_tx.push(s0 + frac * (s1 - s0));
}
}
}
},
|err| eprintln!("Input stream error: {}", err),
None,
) {
Ok(stream) => stream,
Err(e) => {
eprintln!("Warning: Could not build input stream: {}, recording will be disabled", e);
output_stream.play().map_err(|e| e.to_string())?;
if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
return Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
event_rx: None,
recording_mirror_rx: None,
});
}
};
// Start both streams
// Start output stream
output_stream.play().map_err(|e| e.to_string())?;
input_stream.play().map_err(|e| e.to_string())?;
// Leak the input stream to keep it alive
Box::leak(Box::new(input_stream));
// Spawn emitter thread if provided, or store event_rx for manual polling
let event_rx_option = if let Some(emitter) = event_emitter {
@ -278,6 +154,8 @@ impl AudioSystem {
Some(event_rx)
};
// Input stream is NOT opened here — call open_input_stream() when an
// audio input track is actually selected, to avoid constant ALSA wakeups.
Ok(Self {
controller,
stream: output_stream,
@ -285,6 +163,8 @@ impl AudioSystem {
channels,
event_rx: event_rx_option,
recording_mirror_rx: Some(mirror_rx),
input_tx: Some(input_tx),
input_stream: None,
})
}
@ -293,6 +173,99 @@ impl AudioSystem {
self.recording_mirror_rx.take()
}
/// Open the microphone/line-in input stream.
///
/// Call this as soon as an audio input track is selected so the stream is
/// ready before recording starts. The stream is opened with the same fixed
/// buffer size as the output stream to avoid ALSA spinning at high callback
/// rates with its tiny default buffer.
///
/// No-ops if the stream is already open.
pub fn open_input_stream(&mut self, buffer_size: u32) -> Result<(), String> {
if self.input_stream.is_some() {
return Ok(());
}
let mut input_tx = match self.input_tx.take() {
Some(tx) => tx,
None => return Err("Input ring-buffer already consumed".into()),
};
let host = cpal::default_host();
let input_device = host.default_input_device()
.ok_or("No input device available")?;
let default_cfg = input_device.default_input_config()
.map_err(|e| e.to_string())?;
let mut input_config: cpal::StreamConfig = default_cfg.into();
// Match the output buffer size so ALSA wakes up at the same rate as
// the output thread — prevents the ~750 wakeups/sec that the default
// 64-frame buffer causes.
if !cfg!(target_os = "windows") {
input_config.buffer_size = cpal::BufferSize::Fixed(buffer_size);
}
let input_sample_rate = input_config.sample_rate;
let input_channels = input_config.channels as u32;
let output_sample_rate = self.sample_rate;
let output_channels = self.channels;
let needs_resample = input_sample_rate != output_sample_rate
|| input_channels != output_channels;
if needs_resample {
eprintln!("[AUDIO] Input: {}Hz {}ch → resampling to {}Hz {}ch",
input_sample_rate, input_channels, output_sample_rate, output_channels);
}
let stream = input_device.build_input_stream(
&input_config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
if !needs_resample {
for &s in data { let _ = input_tx.push(s); }
} else {
let in_ch = input_channels as usize;
let out_ch = output_channels as usize;
let ratio = output_sample_rate as f64 / input_sample_rate as f64;
let in_frames = data.len() / in_ch;
let out_frames = (in_frames as f64 * ratio) as usize;
for i in 0..out_frames {
let src_pos = i as f64 / ratio;
let src_idx = src_pos as usize;
let frac = (src_pos - src_idx as f64) as f32;
for ch in 0..out_ch {
let ic = ch.min(in_ch - 1);
let s0 = data.get(src_idx * in_ch + ic).copied().unwrap_or(0.0);
let s1 = data.get((src_idx + 1) * in_ch + ic).copied().unwrap_or(s0);
let _ = input_tx.push(s0 + frac * (s1 - s0));
}
}
}
},
|err| eprintln!("Input stream error: {err}"),
None,
).map_err(|e| format!("Failed to build input stream: {e}"))?;
stream.play().map_err(|e| e.to_string())?;
self.input_stream = Some(stream);
Ok(())
}
/// Close the input stream (e.g. when the last audio input track is removed).
pub fn close_input_stream(&mut self) {
self.input_stream = None; // Drop stops the stream
}
/// Extract an [`InputStreamOpener`] that can be stored independently and
/// used to open the microphone/line-in stream on demand.
/// Returns `None` if called a second time.
pub fn take_input_opener(&mut self) -> Option<InputStreamOpener> {
self.input_tx.take().map(|tx| InputStreamOpener {
input_tx: tx,
sample_rate: self.sample_rate,
channels: self.channels,
})
}
/// Spawn a background thread to emit events from the ringbuffer
fn spawn_emitter_thread(mut event_rx: rtrb::Consumer<AudioEvent>, emitter: std::sync::Arc<dyn EventEmitter>) {
std::thread::spawn(move || {
@ -308,3 +281,77 @@ impl AudioSystem {
});
}
}
/// Self-contained handle for opening the microphone/line-in stream on demand.
///
/// Obtained via [`AudioSystem::take_input_opener`]. Call [`open`](Self::open)
/// when the user selects an audio input track; store the returned
/// `cpal::Stream` to keep it alive (dropping it stops the stream).
pub struct InputStreamOpener {
input_tx: rtrb::Producer<f32>,
sample_rate: u32,
channels: u32,
}
impl InputStreamOpener {
/// Open and start the input stream with the given buffer size.
///
/// Uses the same `buffer_size` as the output stream so ALSA wakes up at
/// the same rate (~187/s at 256 frames) rather than the ~750/s it defaults
/// to with 64-frame buffers.
pub fn open(mut self, buffer_size: u32) -> Result<cpal::Stream, String> {
let host = cpal::default_host();
let device = host.default_input_device()
.ok_or("No input device available")?;
let default_cfg = device.default_input_config()
.map_err(|e| e.to_string())?;
let mut cfg: cpal::StreamConfig = default_cfg.into();
if !cfg!(target_os = "windows") {
cfg.buffer_size = cpal::BufferSize::Fixed(buffer_size);
}
let in_rate = cfg.sample_rate;
let in_ch = cfg.channels as u32;
let out_rate = self.sample_rate;
let out_ch = self.channels;
let needs_resample = in_rate != out_rate || in_ch != out_ch;
if needs_resample {
eprintln!("[AUDIO] Input: {}Hz {}ch → resampling to {}Hz {}ch",
in_rate, in_ch, out_rate, out_ch);
}
let stream = device.build_input_stream(
&cfg,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
if !needs_resample {
for &s in data { let _ = self.input_tx.push(s); }
} else {
let ic = in_ch as usize;
let oc = out_ch as usize;
let ratio = out_rate as f64 / in_rate as f64;
let in_frames = data.len() / ic;
let out_frames = (in_frames as f64 * ratio) as usize;
for i in 0..out_frames {
let src = i as f64 / ratio;
let si = src as usize;
let f = (src - si as f64) as f32;
for ch in 0..oc {
let ich = ch.min(ic - 1);
let s0 = data.get(si * ic + ich).copied().unwrap_or(0.0);
let s1 = data.get((si + 1) * ic + ich).copied().unwrap_or(s0);
let _ = self.input_tx.push(s0 + f * (s1 - s0));
}
}
}
},
|err| eprintln!("Input stream error: {err}"),
None,
).map_err(|e| format!("Failed to build input stream: {e}"))?;
stream.play().map_err(|e| e.to_string())?;
Ok(stream)
}
}

View File

@ -796,6 +796,13 @@ struct EditorApp {
#[allow(dead_code)] // Must be kept alive to maintain audio output
audio_stream: Option<cpal::Stream>,
audio_controller: Option<std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
/// Holds `input_tx` and device info needed to open the microphone stream on
/// demand (when the user selects an audio input track).
audio_input: Option<daw_backend::InputStreamOpener>,
/// Active microphone/line-in stream; kept alive while an audio input track is selected.
#[allow(dead_code)]
audio_input_stream: Option<cpal::Stream>,
audio_buffer_size: u32,
audio_event_rx: Option<rtrb::Consumer<daw_backend::AudioEvent>>,
audio_events_pending: std::sync::Arc<std::sync::atomic::AtomicBool>,
/// Count of in-flight graph preset loads — keeps the repaint loop alive
@ -1004,13 +1011,16 @@ impl EditorApp {
let action_executor = lightningbeam_core::action::ActionExecutor::new(document);
// Initialize audio system and destructure it for sharing
let (audio_stream, audio_controller, audio_event_rx, audio_sample_rate, audio_channels, file_command_tx, recording_mirror_rx) =
let (audio_stream, audio_controller, audio_event_rx, audio_sample_rate, audio_channels, file_command_tx, recording_mirror_rx, audio_input) =
match daw_backend::AudioSystem::new(None, config.audio_buffer_size) {
Ok(mut audio_system) => {
println!("✅ Audio engine initialized successfully");
// Extract components
let mirror_rx = audio_system.take_recording_mirror_rx();
// take_input_opener pulls out input_tx + sample_rate/channels into
// a self-contained struct that can open the stream on demand.
let input_opener = audio_system.take_input_opener();
let stream = audio_system.stream;
let sample_rate = audio_system.sample_rate;
let channels = audio_system.channels;
@ -1022,7 +1032,7 @@ impl EditorApp {
// Spawn file operations worker
let file_command_tx = FileOperationsWorker::spawn(controller.clone());
(Some(stream), Some(controller), event_rx, sample_rate, channels, file_command_tx, mirror_rx)
(Some(stream), Some(controller), event_rx, sample_rate, channels, file_command_tx, mirror_rx, input_opener)
}
Err(e) => {
eprintln!("❌ Failed to initialize audio engine: {}", e);
@ -1030,7 +1040,7 @@ impl EditorApp {
// Create a dummy channel for file operations (won't be used)
let (tx, _rx) = std::sync::mpsc::channel();
(None, None, None, 48000, 2, tx, None)
(None, None, None, 48000, 2, tx, None, None)
}
};
@ -1078,6 +1088,9 @@ impl EditorApp {
audio_stream,
audio_controller,
audio_event_rx,
audio_input,
audio_input_stream: None,
audio_buffer_size: config.audio_buffer_size,
audio_events_pending: std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false)),
pending_graph_loads: std::sync::Arc::new(std::sync::atomic::AtomicU32::new(0)),
commit_raster_floating_if_any: false,
@ -5679,6 +5692,9 @@ impl eframe::App for EditorApp {
schneider_max_error: &mut self.schneider_max_error,
raster_settings: &mut self.raster_settings,
audio_controller: self.audio_controller.as_ref(),
audio_input_opener: &mut self.audio_input,
audio_input_stream: &mut self.audio_input_stream,
audio_buffer_size: self.audio_buffer_size,
video_manager: &self.video_manager,
playback_time: &mut self.playback_time,
is_playing: &mut self.is_playing,

View File

@ -192,6 +192,12 @@ pub struct SharedPaneState<'a> {
pub raster_settings: &'a mut crate::tools::RasterToolSettings,
/// Audio engine controller for playback control (wrapped in Arc<Mutex<>> for thread safety)
pub audio_controller: Option<&'a std::sync::Arc<std::sync::Mutex<daw_backend::EngineController>>>,
/// Opener for the microphone/line-in stream — consumed on first use.
pub audio_input_opener: &'a mut Option<daw_backend::InputStreamOpener>,
/// Live input stream handle; kept alive while recording is active.
pub audio_input_stream: &'a mut Option<cpal::Stream>,
/// Buffer size (frames) used for the output stream, passed to the input stream opener.
pub audio_buffer_size: u32,
/// Video manager for video decoding and frame caching
pub video_manager: &'a std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
/// Mapping from Document layer UUIDs to daw-backend TrackIds

View File

@ -657,6 +657,15 @@ impl TimelinePane {
}
RecordCandidate::AudioSampled => {
if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) {
// Open the input stream now if it hasn't been opened yet.
if shared.audio_input_stream.is_none() {
if let Some(opener) = shared.audio_input_opener.take() {
match opener.open(shared.audio_buffer_size) {
Ok(stream) => *shared.audio_input_stream = Some(stream),
Err(e) => eprintln!("⚠️ Could not open input stream: {e}"),
}
}
}
if let Some(controller_arc) = shared.audio_controller {
let mut controller = controller_arc.lock().unwrap();
controller.start_recording(track_id, start_time);