use tauri events instead of polling to fix race condition in recording stop
This commit is contained in:
parent
20c3b820a3
commit
d2fa167179
|
|
@ -762,62 +762,59 @@ impl Engine {
|
|||
|
||||
/// Handle stopping a recording
|
||||
fn handle_stop_recording(&mut self) {
|
||||
eprintln!("[STOP_RECORDING] handle_stop_recording called");
|
||||
if let Some(recording) = self.recording_state.take() {
|
||||
let clip_id = recording.clip_id;
|
||||
let track_id = recording.track_id;
|
||||
let sample_rate = recording.sample_rate;
|
||||
let channels = recording.channels;
|
||||
|
||||
// Finalize the recording and get temp file path
|
||||
eprintln!("[STOP_RECORDING] Stopping recording for clip_id={}, track_id={}", clip_id, track_id);
|
||||
|
||||
// Finalize the recording (flush buffers, close file, get waveform and audio data)
|
||||
let frames_recorded = recording.frames_written;
|
||||
eprintln!("[STOP_RECORDING] Calling finalize() - frames_recorded={}", frames_recorded);
|
||||
match recording.finalize() {
|
||||
Ok(temp_file_path) => {
|
||||
eprintln!("Recording finalized: {} frames written to {:?}", frames_recorded, temp_file_path);
|
||||
Ok((temp_file_path, waveform, audio_data)) => {
|
||||
eprintln!("[STOP_RECORDING] Finalize succeeded: {} frames written to {:?}, {} waveform peaks generated, {} samples in memory",
|
||||
frames_recorded, temp_file_path, waveform.len(), audio_data.len());
|
||||
|
||||
// Load the recorded audio file
|
||||
match crate::io::AudioFile::load(&temp_file_path) {
|
||||
Ok(audio_file) => {
|
||||
// Generate waveform for UI
|
||||
let duration = audio_file.duration();
|
||||
let target_peaks = ((duration * 300.0) as usize).clamp(1000, 20000);
|
||||
let waveform = audio_file.generate_waveform_overview(target_peaks);
|
||||
|
||||
// Add to pool
|
||||
// Add to pool using the in-memory audio data (no file loading needed!)
|
||||
let pool_file = crate::audio::pool::AudioFile::new(
|
||||
temp_file_path.clone(),
|
||||
audio_file.data,
|
||||
audio_file.channels,
|
||||
audio_file.sample_rate,
|
||||
audio_data,
|
||||
channels,
|
||||
sample_rate,
|
||||
);
|
||||
let pool_index = self.audio_pool.add_file(pool_file);
|
||||
eprintln!("[STOP_RECORDING] Added to pool at index {}", pool_index);
|
||||
|
||||
// Update the clip to reference the pool
|
||||
if let Some(crate::audio::track::TrackNode::Audio(track)) = self.project.get_track_mut(track_id) {
|
||||
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
|
||||
clip.audio_pool_index = pool_index;
|
||||
// Duration should already be set during recording progress updates
|
||||
eprintln!("[STOP_RECORDING] Updated clip {} with pool_index {}", clip_id, pool_index);
|
||||
}
|
||||
}
|
||||
|
||||
// Delete temp file
|
||||
let _ = std::fs::remove_file(&temp_file_path);
|
||||
|
||||
// Notify UI that recording has stopped (with waveform)
|
||||
// Send event with the incrementally-generated waveform
|
||||
eprintln!("[STOP_RECORDING] Pushing RecordingStopped event for clip_id={}, pool_index={}, waveform_peaks={}",
|
||||
clip_id, pool_index, waveform.len());
|
||||
let _ = self.event_tx.push(AudioEvent::RecordingStopped(clip_id, pool_index, waveform));
|
||||
eprintln!("[STOP_RECORDING] RecordingStopped event pushed successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
// Send error event
|
||||
let _ = self.event_tx.push(AudioEvent::RecordingError(
|
||||
format!("Failed to load recorded audio: {}", e)
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Send error event
|
||||
eprintln!("[STOP_RECORDING] Finalize failed: {}", e);
|
||||
let _ = self.event_tx.push(AudioEvent::RecordingError(
|
||||
format!("Failed to finalize recording: {}", e)
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[STOP_RECORDING] No active recording to stop");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/// Audio recording system for capturing microphone input
|
||||
use crate::audio::{ClipId, TrackId};
|
||||
use crate::io::WavWriter;
|
||||
use crate::io::{WavWriter, WaveformPeak};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// State of an active recording session
|
||||
|
|
@ -29,6 +29,14 @@ pub struct RecordingState {
|
|||
pub paused: bool,
|
||||
/// Number of samples remaining to skip (to discard stale buffer data)
|
||||
pub samples_to_skip: usize,
|
||||
/// Waveform peaks generated incrementally during recording
|
||||
pub waveform: Vec<WaveformPeak>,
|
||||
/// Temporary buffer for collecting samples for next waveform peak
|
||||
pub waveform_buffer: Vec<f32>,
|
||||
/// Number of frames per waveform peak
|
||||
pub frames_per_peak: usize,
|
||||
/// All recorded audio data accumulated in memory (for fast finalization)
|
||||
pub audio_data: Vec<f32>,
|
||||
}
|
||||
|
||||
impl RecordingState {
|
||||
|
|
@ -45,6 +53,11 @@ impl RecordingState {
|
|||
) -> Self {
|
||||
let flush_interval_frames = (sample_rate as f64 * flush_interval_seconds) as usize;
|
||||
|
||||
// Calculate frames per waveform peak
|
||||
// Target ~300 peaks per second with minimum 1000 samples per peak
|
||||
let target_peaks_per_second = 300;
|
||||
let frames_per_peak = (sample_rate / target_peaks_per_second).max(1000) as usize;
|
||||
|
||||
Self {
|
||||
track_id,
|
||||
clip_id,
|
||||
|
|
@ -58,6 +71,10 @@ impl RecordingState {
|
|||
flush_interval_frames,
|
||||
paused: false,
|
||||
samples_to_skip: 0, // Will be set by engine when it knows buffer size
|
||||
waveform: Vec::new(),
|
||||
waveform_buffer: Vec::new(),
|
||||
frames_per_peak,
|
||||
audio_data: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -68,8 +85,8 @@ impl RecordingState {
|
|||
return Ok(false);
|
||||
}
|
||||
|
||||
// Skip stale samples from the buffer
|
||||
if self.samples_to_skip > 0 {
|
||||
// Determine which samples to process
|
||||
let samples_to_process = if self.samples_to_skip > 0 {
|
||||
let to_skip = self.samples_to_skip.min(samples.len());
|
||||
self.samples_to_skip -= to_skip;
|
||||
|
||||
|
|
@ -79,12 +96,22 @@ impl RecordingState {
|
|||
}
|
||||
|
||||
// Skip partial batch and process the rest
|
||||
self.buffer.extend_from_slice(&samples[to_skip..]);
|
||||
&samples[to_skip..]
|
||||
} else {
|
||||
self.buffer.extend_from_slice(samples);
|
||||
}
|
||||
samples
|
||||
};
|
||||
|
||||
// Check if we should flush
|
||||
// Add to disk buffer
|
||||
self.buffer.extend_from_slice(samples_to_process);
|
||||
|
||||
// Add to audio data (accumulate in memory for fast finalization)
|
||||
self.audio_data.extend_from_slice(samples_to_process);
|
||||
|
||||
// Add to waveform buffer and generate peaks incrementally
|
||||
self.waveform_buffer.extend_from_slice(samples_to_process);
|
||||
self.generate_waveform_peaks();
|
||||
|
||||
// Check if we should flush to disk
|
||||
let frames_in_buffer = self.buffer.len() / self.channels as usize;
|
||||
if frames_in_buffer >= self.flush_interval_frames {
|
||||
self.flush()?;
|
||||
|
|
@ -94,6 +121,28 @@ impl RecordingState {
|
|||
Ok(false)
|
||||
}
|
||||
|
||||
/// Generate waveform peaks from accumulated samples
|
||||
/// This is called incrementally as samples arrive
|
||||
fn generate_waveform_peaks(&mut self) {
|
||||
let samples_per_peak = self.frames_per_peak * self.channels as usize;
|
||||
|
||||
while self.waveform_buffer.len() >= samples_per_peak {
|
||||
let mut min = 0.0f32;
|
||||
let mut max = 0.0f32;
|
||||
|
||||
// Scan all samples for this peak
|
||||
for sample in &self.waveform_buffer[..samples_per_peak] {
|
||||
min = min.min(*sample);
|
||||
max = max.max(*sample);
|
||||
}
|
||||
|
||||
self.waveform.push(WaveformPeak { min, max });
|
||||
|
||||
// Remove processed samples from waveform buffer
|
||||
self.waveform_buffer.drain(..samples_per_peak);
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush accumulated samples to disk
|
||||
pub fn flush(&mut self) -> Result<(), std::io::Error> {
|
||||
if self.buffer.is_empty() {
|
||||
|
|
@ -121,15 +170,28 @@ impl RecordingState {
|
|||
total_frames as f64 / self.sample_rate as f64
|
||||
}
|
||||
|
||||
/// Finalize the recording and return the temp file path
|
||||
pub fn finalize(mut self) -> Result<PathBuf, std::io::Error> {
|
||||
// Flush any remaining samples
|
||||
/// Finalize the recording and return the temp file path, waveform, and audio data
|
||||
pub fn finalize(mut self) -> Result<(PathBuf, Vec<WaveformPeak>, Vec<f32>), std::io::Error> {
|
||||
// Flush any remaining samples to disk
|
||||
self.flush()?;
|
||||
|
||||
// Generate final waveform peak from any remaining samples
|
||||
if !self.waveform_buffer.is_empty() {
|
||||
let mut min = 0.0f32;
|
||||
let mut max = 0.0f32;
|
||||
|
||||
for sample in &self.waveform_buffer {
|
||||
min = min.min(*sample);
|
||||
max = max.max(*sample);
|
||||
}
|
||||
|
||||
self.waveform.push(WaveformPeak { min, max });
|
||||
}
|
||||
|
||||
// Finalize the WAV file
|
||||
self.writer.finalize()?;
|
||||
|
||||
Ok(self.temp_file_path)
|
||||
Ok((self.temp_file_path, self.waveform, self.audio_data))
|
||||
}
|
||||
|
||||
/// Pause recording
|
||||
|
|
|
|||
|
|
@ -22,18 +22,27 @@ pub use io::{load_midi_file, AudioFile, WaveformPeak, WavWriter};
|
|||
|
||||
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
|
||||
|
||||
/// Trait for emitting audio events to external systems (UI, logging, etc.)
|
||||
/// This allows the DAW backend to remain framework-agnostic
|
||||
pub trait EventEmitter: Send + Sync {
|
||||
/// Emit an audio event
|
||||
fn emit(&self, event: AudioEvent);
|
||||
}
|
||||
|
||||
/// Simple audio system that handles cpal initialization internally
|
||||
pub struct AudioSystem {
|
||||
pub controller: EngineController,
|
||||
pub stream: cpal::Stream,
|
||||
pub event_rx: rtrb::Consumer<AudioEvent>,
|
||||
pub sample_rate: u32,
|
||||
pub channels: u32,
|
||||
}
|
||||
|
||||
impl AudioSystem {
|
||||
/// Initialize the audio system with default input and output devices
|
||||
pub fn new() -> Result<Self, String> {
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `event_emitter` - Optional event emitter for pushing events to external systems
|
||||
pub fn new(event_emitter: Option<std::sync::Arc<dyn EventEmitter>>) -> Result<Self, String> {
|
||||
let host = cpal::default_host();
|
||||
|
||||
// Get output device
|
||||
|
|
@ -84,10 +93,15 @@ impl AudioSystem {
|
|||
eprintln!("Warning: No input device available, recording will be disabled");
|
||||
// Start output stream and return without input
|
||||
output_stream.play().map_err(|e| e.to_string())?;
|
||||
|
||||
// Spawn emitter thread if provided
|
||||
if let Some(emitter) = event_emitter {
|
||||
Self::spawn_emitter_thread(event_rx, emitter);
|
||||
}
|
||||
|
||||
return Ok(Self {
|
||||
controller,
|
||||
stream: output_stream,
|
||||
event_rx,
|
||||
sample_rate,
|
||||
channels,
|
||||
});
|
||||
|
|
@ -106,10 +120,15 @@ impl AudioSystem {
|
|||
Err(e) => {
|
||||
eprintln!("Warning: Could not get input config: {}, recording will be disabled", e);
|
||||
output_stream.play().map_err(|e| e.to_string())?;
|
||||
|
||||
// Spawn emitter thread if provided
|
||||
if let Some(emitter) = event_emitter {
|
||||
Self::spawn_emitter_thread(event_rx, emitter);
|
||||
}
|
||||
|
||||
return Ok(Self {
|
||||
controller,
|
||||
stream: output_stream,
|
||||
event_rx,
|
||||
sample_rate,
|
||||
channels,
|
||||
});
|
||||
|
|
@ -138,12 +157,31 @@ impl AudioSystem {
|
|||
// Leak the input stream to keep it alive
|
||||
Box::leak(Box::new(input_stream));
|
||||
|
||||
// Spawn emitter thread if provided
|
||||
if let Some(emitter) = event_emitter {
|
||||
Self::spawn_emitter_thread(event_rx, emitter);
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
controller,
|
||||
stream: output_stream,
|
||||
event_rx,
|
||||
sample_rate,
|
||||
channels,
|
||||
})
|
||||
}
|
||||
|
||||
/// Spawn a background thread to emit events from the ringbuffer
|
||||
fn spawn_emitter_thread(mut event_rx: rtrb::Consumer<AudioEvent>, emitter: std::sync::Arc<dyn EventEmitter>) {
|
||||
std::thread::spawn(move || {
|
||||
loop {
|
||||
// Wait for events and emit them
|
||||
if let Ok(event) = event_rx.pop() {
|
||||
emitter.emit(event);
|
||||
} else {
|
||||
// No events available, sleep briefly to avoid busy-waiting
|
||||
std::thread::sleep(std::time::Duration::from_millis(1));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
use daw_backend::{AudioEvent, AudioSystem, EngineController, WaveformPeak};
|
||||
use daw_backend::{AudioEvent, AudioSystem, EngineController, EventEmitter, WaveformPeak};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tauri::{Emitter, Manager};
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct AudioFileMetadata {
|
||||
|
|
@ -12,7 +13,6 @@ pub struct AudioFileMetadata {
|
|||
|
||||
pub struct AudioState {
|
||||
controller: Option<EngineController>,
|
||||
event_rx: Option<rtrb::Consumer<AudioEvent>>,
|
||||
sample_rate: u32,
|
||||
channels: u32,
|
||||
next_track_id: u32,
|
||||
|
|
@ -23,7 +23,6 @@ impl Default for AudioState {
|
|||
fn default() -> Self {
|
||||
Self {
|
||||
controller: None,
|
||||
event_rx: None,
|
||||
sample_rate: 0,
|
||||
channels: 0,
|
||||
next_track_id: 0,
|
||||
|
|
@ -32,8 +31,42 @@ impl Default for AudioState {
|
|||
}
|
||||
}
|
||||
|
||||
/// Implementation of EventEmitter that uses Tauri's event system
|
||||
struct TauriEventEmitter {
|
||||
app_handle: tauri::AppHandle,
|
||||
}
|
||||
|
||||
impl EventEmitter for TauriEventEmitter {
|
||||
fn emit(&self, event: AudioEvent) {
|
||||
// Serialize the event to the format expected by the frontend
|
||||
let serialized_event = match event {
|
||||
AudioEvent::RecordingStarted(track_id, clip_id) => {
|
||||
SerializedAudioEvent::RecordingStarted { track_id, clip_id }
|
||||
}
|
||||
AudioEvent::RecordingProgress(clip_id, duration) => {
|
||||
SerializedAudioEvent::RecordingProgress { clip_id, duration }
|
||||
}
|
||||
AudioEvent::RecordingStopped(clip_id, pool_index, waveform) => {
|
||||
SerializedAudioEvent::RecordingStopped { clip_id, pool_index, waveform }
|
||||
}
|
||||
AudioEvent::RecordingError(message) => {
|
||||
SerializedAudioEvent::RecordingError { message }
|
||||
}
|
||||
_ => return, // Ignore other event types for now
|
||||
};
|
||||
|
||||
// Emit the event via Tauri
|
||||
if let Err(e) = self.app_handle.emit("audio-event", serialized_event) {
|
||||
eprintln!("Failed to emit audio event: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn audio_init(state: tauri::State<'_, Arc<Mutex<AudioState>>>) -> Result<String, String> {
|
||||
pub async fn audio_init(
|
||||
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
|
||||
app_handle: tauri::AppHandle,
|
||||
) -> Result<String, String> {
|
||||
let mut audio_state = state.lock().unwrap();
|
||||
|
||||
// Check if already initialized - if so, reset DAW state (for hot-reload)
|
||||
|
|
@ -47,8 +80,11 @@ pub async fn audio_init(state: tauri::State<'_, Arc<Mutex<AudioState>>>) -> Resu
|
|||
));
|
||||
}
|
||||
|
||||
// Create TauriEventEmitter
|
||||
let emitter = Arc::new(TauriEventEmitter { app_handle });
|
||||
|
||||
// AudioSystem handles all cpal initialization internally
|
||||
let system = AudioSystem::new()?;
|
||||
let system = AudioSystem::new(Some(emitter))?;
|
||||
|
||||
let info = format!(
|
||||
"Audio initialized: {} Hz, {} ch",
|
||||
|
|
@ -60,7 +96,6 @@ pub async fn audio_init(state: tauri::State<'_, Arc<Mutex<AudioState>>>) -> Resu
|
|||
Box::leak(Box::new(system.stream));
|
||||
|
||||
audio_state.controller = Some(system.controller);
|
||||
audio_state.event_rx = Some(system.event_rx);
|
||||
audio_state.sample_rate = system.sample_rate;
|
||||
audio_state.channels = system.channels;
|
||||
audio_state.next_track_id = 0;
|
||||
|
|
@ -310,7 +345,7 @@ pub async fn audio_resume_recording(
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
#[derive(serde::Serialize, Clone)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum SerializedAudioEvent {
|
||||
RecordingStarted { track_id: u32, clip_id: u32 },
|
||||
|
|
@ -319,34 +354,4 @@ pub enum SerializedAudioEvent {
|
|||
RecordingError { message: String },
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn audio_get_events(
|
||||
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
|
||||
) -> Result<Vec<SerializedAudioEvent>, String> {
|
||||
let mut audio_state = state.lock().unwrap();
|
||||
let mut events = Vec::new();
|
||||
|
||||
if let Some(event_rx) = &mut audio_state.event_rx {
|
||||
// Poll all available events
|
||||
while let Ok(event) = event_rx.pop() {
|
||||
match event {
|
||||
AudioEvent::RecordingStarted(track_id, clip_id) => {
|
||||
events.push(SerializedAudioEvent::RecordingStarted { track_id, clip_id });
|
||||
}
|
||||
AudioEvent::RecordingProgress(clip_id, duration) => {
|
||||
events.push(SerializedAudioEvent::RecordingProgress { clip_id, duration });
|
||||
}
|
||||
AudioEvent::RecordingStopped(clip_id, pool_index, waveform) => {
|
||||
events.push(SerializedAudioEvent::RecordingStopped { clip_id, pool_index, waveform });
|
||||
}
|
||||
AudioEvent::RecordingError(message) => {
|
||||
events.push(SerializedAudioEvent::RecordingError { message });
|
||||
}
|
||||
// Ignore other event types for now
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(events)
|
||||
}
|
||||
// audio_get_events command removed - events are now pushed via Tauri event system
|
||||
|
|
|
|||
|
|
@ -207,7 +207,6 @@ pub fn run() {
|
|||
audio::audio_stop_recording,
|
||||
audio::audio_pause_recording,
|
||||
audio::audio_resume_recording,
|
||||
audio::audio_get_events,
|
||||
])
|
||||
// .manage(window_counter)
|
||||
.build(tauri::generate_context!())
|
||||
|
|
|
|||
69
src/main.js
69
src/main.js
|
|
@ -1,4 +1,5 @@
|
|||
const { invoke } = window.__TAURI__.core;
|
||||
const { listen } = window.__TAURI__.event;
|
||||
import * as fitCurve from "/fit-curve.js";
|
||||
import { Bezier } from "/bezier.js";
|
||||
import { Quadtree } from "./quadtree.js";
|
||||
|
|
@ -923,12 +924,13 @@ async function playPause() {
|
|||
} else {
|
||||
// Stop recording if active
|
||||
if (context.isRecording) {
|
||||
console.log('playPause - stopping recording for clip:', context.recordingClipId);
|
||||
try {
|
||||
await invoke('audio_stop_recording');
|
||||
context.isRecording = false;
|
||||
context.recordingTrackId = null;
|
||||
context.recordingClipId = null;
|
||||
console.log('Recording stopped by play/pause');
|
||||
console.log('Recording stopped by play/pause button');
|
||||
|
||||
// Update record button appearance if it exists
|
||||
if (context.recordButton) {
|
||||
|
|
@ -969,9 +971,6 @@ function advanceFrame() {
|
|||
context.timelineWidget.timelineState.currentTime = context.activeObject.currentTime;
|
||||
}
|
||||
|
||||
// Poll for audio events (recording progress, etc.)
|
||||
pollAudioEvents();
|
||||
|
||||
// Redraw stage and timeline
|
||||
updateUI();
|
||||
if (context.timelineWidget?.requestRedraw) {
|
||||
|
|
@ -981,6 +980,11 @@ function advanceFrame() {
|
|||
if (playing) {
|
||||
const duration = context.activeObject.duration;
|
||||
|
||||
// Debug logging for recording
|
||||
if (context.isRecording) {
|
||||
console.log('advanceFrame - recording active, currentTime:', context.activeObject.currentTime, 'duration:', duration, 'isRecording:', context.isRecording);
|
||||
}
|
||||
|
||||
// Check if we've reached the end (but allow infinite playback when recording)
|
||||
if (context.isRecording || (duration > 0 && context.activeObject.currentTime < duration)) {
|
||||
// Continue playing
|
||||
|
|
@ -988,6 +992,18 @@ function advanceFrame() {
|
|||
} else {
|
||||
// Animation finished
|
||||
playing = false;
|
||||
|
||||
// Stop DAW backend audio playback
|
||||
invoke('audio_stop').catch(error => {
|
||||
console.error('Failed to stop audio playback:', error);
|
||||
});
|
||||
|
||||
// Update play/pause button appearance
|
||||
if (context.playPauseButton) {
|
||||
context.playPauseButton.className = "playback-btn playback-btn-play";
|
||||
context.playPauseButton.title = "Play";
|
||||
}
|
||||
|
||||
for (let audioTrack of context.activeObject.audioTracks) {
|
||||
for (let i in audioTrack.sounds) {
|
||||
let sound = audioTrack.sounds[i];
|
||||
|
|
@ -998,22 +1014,18 @@ function advanceFrame() {
|
|||
}
|
||||
}
|
||||
|
||||
async function pollAudioEvents() {
|
||||
const { invoke } = window.__TAURI__.core;
|
||||
|
||||
try {
|
||||
const events = await invoke('audio_get_events');
|
||||
|
||||
for (const event of events) {
|
||||
// Handle audio events pushed from Rust via Tauri event system
|
||||
async function handleAudioEvent(event) {
|
||||
switch (event.type) {
|
||||
case 'RecordingStarted':
|
||||
console.log('Recording started - track:', event.track_id, 'clip:', event.clip_id);
|
||||
console.log('[FRONTEND] RecordingStarted - track:', event.track_id, 'clip:', event.clip_id);
|
||||
context.recordingClipId = event.clip_id;
|
||||
|
||||
// Create the clip object in the audio track
|
||||
const recordingTrack = context.activeObject.audioTracks.find(t => t.audioTrackId === event.track_id);
|
||||
if (recordingTrack) {
|
||||
const startTime = context.activeObject.currentTime || 0;
|
||||
console.log('[FRONTEND] Creating clip object for clip', event.clip_id, 'on track', event.track_id, 'at time', startTime);
|
||||
recordingTrack.clips.push({
|
||||
clipId: event.clip_id,
|
||||
poolIndex: null, // Will be set when recording stops
|
||||
|
|
@ -1028,6 +1040,8 @@ async function pollAudioEvents() {
|
|||
if (context.timelineWidget?.requestRedraw) {
|
||||
context.timelineWidget.requestRedraw();
|
||||
}
|
||||
} else {
|
||||
console.error('[FRONTEND] Could not find audio track', event.track_id, 'for RecordingStarted event');
|
||||
}
|
||||
break;
|
||||
|
||||
|
|
@ -1038,11 +1052,21 @@ async function pollAudioEvents() {
|
|||
break;
|
||||
|
||||
case 'RecordingStopped':
|
||||
console.log('Recording stopped - clip:', event.clip_id, 'pool_index:', event.pool_index, 'waveform peaks:', event.waveform?.length);
|
||||
console.log('[FRONTEND] RecordingStopped event - clip:', event.clip_id, 'pool_index:', event.pool_index, 'waveform peaks:', event.waveform?.length);
|
||||
console.log('[FRONTEND] Current recording state - isRecording:', context.isRecording, 'recordingClipId:', context.recordingClipId);
|
||||
await finalizeRecording(event.clip_id, event.pool_index, event.waveform);
|
||||
|
||||
// Always clear recording state when we receive RecordingStopped
|
||||
console.log('[FRONTEND] Clearing recording state after RecordingStopped event');
|
||||
context.isRecording = false;
|
||||
context.recordingTrackId = null;
|
||||
context.recordingClipId = null;
|
||||
|
||||
// Update record button appearance
|
||||
if (context.recordButton) {
|
||||
context.recordButton.className = "playback-btn playback-btn-record";
|
||||
context.recordButton.title = "Record";
|
||||
}
|
||||
break;
|
||||
|
||||
case 'RecordingError':
|
||||
|
|
@ -1053,12 +1077,13 @@ async function pollAudioEvents() {
|
|||
context.recordingClipId = null;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Silently ignore errors - polling happens frequently
|
||||
}
|
||||
}
|
||||
|
||||
// Set up Tauri event listener for audio events
|
||||
listen('audio-event', (tauriEvent) => {
|
||||
handleAudioEvent(tauriEvent.payload);
|
||||
});
|
||||
|
||||
function updateRecordingClipDuration(clipId, duration) {
|
||||
// Find the clip in the active object's audio tracks and update its duration
|
||||
for (const audioTrack of context.activeObject.audioTracks) {
|
||||
|
|
@ -1155,14 +1180,15 @@ async function toggleRecording() {
|
|||
|
||||
if (context.isRecording) {
|
||||
// Stop recording
|
||||
console.log('[FRONTEND] toggleRecording - stopping recording for clip:', context.recordingClipId);
|
||||
try {
|
||||
await invoke('audio_stop_recording');
|
||||
context.isRecording = false;
|
||||
context.recordingTrackId = null;
|
||||
context.recordingClipId = null;
|
||||
console.log('Recording stopped');
|
||||
console.log('[FRONTEND] Recording stopped via toggle button');
|
||||
} catch (error) {
|
||||
console.error('Failed to stop recording:', error);
|
||||
console.error('[FRONTEND] Failed to stop recording:', error);
|
||||
}
|
||||
} else {
|
||||
// Start recording - check if activeLayer is an audio track
|
||||
|
|
@ -1180,6 +1206,7 @@ async function toggleRecording() {
|
|||
// Start recording at current playhead position
|
||||
const startTime = context.activeObject.currentTime || 0;
|
||||
|
||||
console.log('[FRONTEND] Starting recording on track', audioTrack.audioTrackId, 'at time', startTime);
|
||||
try {
|
||||
await invoke('audio_start_recording', {
|
||||
trackId: audioTrack.audioTrackId,
|
||||
|
|
@ -1187,14 +1214,14 @@ async function toggleRecording() {
|
|||
});
|
||||
context.isRecording = true;
|
||||
context.recordingTrackId = audioTrack.audioTrackId;
|
||||
console.log('Recording started on track', audioTrack.audioTrackId, 'at time', startTime);
|
||||
console.log('[FRONTEND] Recording started successfully, waiting for RecordingStarted event');
|
||||
|
||||
// Start playback so the timeline moves (if not already playing)
|
||||
if (!playing) {
|
||||
await playPause();
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Failed to start recording:', error);
|
||||
console.error('[FRONTEND] Failed to start recording:', error);
|
||||
alert('Failed to start recording: ' + error);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue