Audio from videos

This commit is contained in:
Skyler Lehmkuhl 2025-12-03 01:04:09 -05:00
parent d453571c9b
commit ccb29a9e04
14 changed files with 937 additions and 23 deletions

View File

@ -1737,6 +1737,29 @@ impl Engine {
Err(e) => QueryResponse::AudioClipInstanceAdded(Err(e.to_string())),
}
}
Query::AddAudioFileSync(path, data, channels, sample_rate) => {
// Add audio file to pool and return the pool index
// Detect original format from file extension
let path_buf = std::path::PathBuf::from(&path);
let original_format = path_buf.extension()
.and_then(|ext| ext.to_str())
.map(|s| s.to_lowercase());
// Create AudioFile and add to pool
let audio_file = crate::audio::pool::AudioFile::with_format(
path_buf,
data,
channels,
sample_rate,
original_format,
);
let pool_index = self.audio_pool.add_file(audio_file);
// Notify UI about the new audio file (for event listeners)
let _ = self.event_tx.push(AudioEvent::AudioFileAdded(pool_index, path));
QueryResponse::AudioFileAddedSync(Ok(pool_index))
}
Query::GetProject => {
// Clone the entire project for serialization
QueryResponse::ProjectRetrieved(Ok(Box::new(self.project.clone())))
@ -2149,6 +2172,16 @@ impl EngineController {
let _ = self.command_tx.push(Command::AddAudioFile(path, data, channels, sample_rate));
}
/// Add an audio file to the pool synchronously and get the pool index
/// Returns the pool index where the audio file was added
pub fn add_audio_file_sync(&mut self, path: String, data: Vec<f32>, channels: u32, sample_rate: u32) -> Result<usize, String> {
let query = Query::AddAudioFileSync(path, data, channels, sample_rate);
match self.send_query(query)? {
QueryResponse::AudioFileAddedSync(result) => result,
_ => Err("Unexpected query response".to_string()),
}
}
/// Add a clip to an audio track
pub fn add_audio_clip(&mut self, track_id: TrackId, pool_index: usize, start_time: f64, duration: f64, offset: f64) {
let _ = self.command_tx.push(Command::AddAudioClip(track_id, pool_index, start_time, duration, offset));

View File

@ -272,6 +272,8 @@ pub enum Query {
AddMidiClipInstanceSync(TrackId, crate::audio::midi::MidiClipInstance),
/// Add an audio clip to a track synchronously (track_id, pool_index, start_time, duration, offset) - returns instance ID
AddAudioClipSync(TrackId, usize, f64, f64, f64),
/// Add an audio file to the pool synchronously (path, data, channels, sample_rate) - returns pool index
AddAudioFileSync(String, Vec<f32>, u32, u32),
/// Get a clone of the current project for serialization
GetProject,
/// Set the project (replaces current project state)
@ -339,6 +341,8 @@ pub enum QueryResponse {
MidiClipInstanceAdded(Result<MidiClipInstanceId, String>),
/// Audio clip instance added (returns instance ID)
AudioClipInstanceAdded(Result<AudioClipInstanceId, String>),
/// Audio file added to pool (returns pool index)
AudioFileAddedSync(Result<usize, String>),
/// Project retrieved
ProjectRetrieved(Result<Box<crate::audio::project::Project>, String>),
/// Project set

View File

@ -22,6 +22,13 @@ use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
/// Backend clip instance ID - wraps both MIDI and Audio instance IDs
#[derive(Debug, Clone, Copy)]
pub enum BackendClipInstanceId {
Midi(daw_backend::MidiClipInstanceId),
Audio(daw_backend::AudioClipInstanceId),
}
/// Backend context for actions that need to interact with external systems
///
/// This bundles all backend references (audio, future video) that actions
@ -33,6 +40,9 @@ pub struct BackendContext<'a> {
/// Mapping from document layer UUIDs to backend track IDs
pub layer_to_track_map: &'a HashMap<Uuid, daw_backend::TrackId>,
/// Mapping from document clip instance UUIDs to backend clip instance IDs
pub clip_instance_to_backend_map: &'a mut HashMap<Uuid, BackendClipInstanceId>,
// Future: pub video_controller: Option<&'a mut VideoController>,
}

View File

@ -172,6 +172,13 @@ impl Action for AddClipInstanceAction {
QueryResponse::MidiClipInstanceAdded(Ok(instance_id)) => {
self.backend_track_id = Some(*backend_track_id);
self.backend_midi_instance_id = Some(instance_id);
// Add to global clip instance mapping
backend.clip_instance_to_backend_map.insert(
self.clip_instance.id,
crate::action::BackendClipInstanceId::Midi(instance_id)
);
Ok(())
}
QueryResponse::MidiClipInstanceAdded(Err(e)) => Err(e),
@ -193,6 +200,13 @@ impl Action for AddClipInstanceAction {
QueryResponse::AudioClipInstanceAdded(Ok(instance_id)) => {
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
// Add to global clip instance mapping
backend.clip_instance_to_backend_map.insert(
self.clip_instance.id,
crate::action::BackendClipInstanceId::Audio(instance_id)
);
Ok(())
}
QueryResponse::AudioClipInstanceAdded(Err(e)) => Err(e),
@ -213,6 +227,9 @@ impl Action for AddClipInstanceAction {
controller.remove_audio_clip(track_id, audio_instance_id);
}
// Remove from global clip instance mapping
backend.clip_instance_to_backend_map.remove(&self.clip_instance.id);
// Clear stored IDs
self.backend_track_id = None;
self.backend_midi_instance_id = None;

View File

@ -27,7 +27,55 @@ impl MoveClipInstancesAction {
impl Action for MoveClipInstancesAction {
fn execute(&mut self, document: &mut Document) {
// Expand moves to include grouped instances
let mut expanded_moves = self.layer_moves.clone();
let mut already_processed = std::collections::HashSet::new();
for (layer_id, moves) in &self.layer_moves {
for (instance_id, old_start, new_start) in moves {
// Skip if already processed
if already_processed.contains(instance_id) {
continue;
}
already_processed.insert(*instance_id);
// Check if this instance is in a group
if let Some(group) = document.find_group_for_instance(instance_id) {
let offset = new_start - old_start;
// Add all group members to the move list
for (member_layer_id, member_instance_id) in group.get_members() {
if member_instance_id != instance_id && !already_processed.contains(member_instance_id) {
already_processed.insert(*member_instance_id);
// Find member's current position
if let Some(layer) = document.get_layer(member_layer_id) {
let clip_instances = match layer {
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
};
if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) {
let member_old = instance.timeline_start;
let member_new = member_old + offset;
expanded_moves.entry(*member_layer_id)
.or_insert_with(Vec::new)
.push((*member_instance_id, member_old, member_new));
}
}
}
}
}
}
}
// Store expanded moves for rollback
self.layer_moves = expanded_moves.clone();
// Apply all moves (including expanded)
for (layer_id, moves) in &expanded_moves {
let layer = match document.get_layer_mut(layer_id) {
Some(l) => l,
None => continue,
@ -82,6 +130,140 @@ impl Action for MoveClipInstancesAction {
format!("Move {} clip instances", total_count)
}
}
fn execute_backend(&mut self, backend: &mut crate::action::BackendContext, document: &Document) -> Result<(), String> {
use crate::layer::AnyLayer;
use crate::clip::AudioClipType;
// Get audio controller
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()), // No audio system, skip backend sync
};
// Process each layer's moves
for (layer_id, moves) in &self.layer_moves {
// Get the layer to determine its type
let layer = document.get_layer(layer_id)
.ok_or_else(|| format!("Layer {} not found", layer_id))?;
// Only process audio layers
if !matches!(layer, AnyLayer::Audio(_)) {
continue;
}
// Look up backend track ID
let track_id = backend.layer_to_track_map.get(layer_id)
.ok_or_else(|| format!("Layer {} not mapped to backend track", layer_id))?;
// Process each clip instance move
for (instance_id, _old_start, new_start) in moves {
// Get clip instances from the layer
let clip_instances = match layer {
AnyLayer::Audio(al) => &al.clip_instances,
_ => continue,
};
// Find the clip instance
let instance = clip_instances.iter()
.find(|ci| ci.id == *instance_id)
.ok_or_else(|| format!("Clip instance {} not found", instance_id))?;
// Look up the clip to determine its type
let clip = document.get_audio_clip(&instance.clip_id)
.ok_or_else(|| format!("Audio clip {} not found", instance.clip_id))?;
// Handle move based on clip type
match &clip.clip_type {
AudioClipType::Midi { midi_clip_id } => {
// For MIDI: move_clip expects the pool clip ID
controller.move_clip(*track_id, *midi_clip_id, *new_start);
}
AudioClipType::Sampled { .. } => {
// For sampled audio: move_clip expects the instance ID
let backend_instance_id = backend.clip_instance_to_backend_map.get(instance_id)
.ok_or_else(|| format!("Clip instance {} not mapped to backend", instance_id))?;
match backend_instance_id {
crate::action::BackendClipInstanceId::Audio(audio_id) => {
controller.move_clip(*track_id, *audio_id, *new_start);
}
_ => return Err("Expected audio instance ID for sampled clip".to_string()),
}
}
}
}
}
Ok(())
}
fn rollback_backend(&mut self, backend: &mut crate::action::BackendContext, document: &Document) -> Result<(), String> {
use crate::layer::AnyLayer;
use crate::clip::AudioClipType;
// Get audio controller
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()), // No audio system, skip backend sync
};
// Process each layer's moves (restore old positions)
for (layer_id, moves) in &self.layer_moves {
// Get the layer to determine its type
let layer = document.get_layer(layer_id)
.ok_or_else(|| format!("Layer {} not found", layer_id))?;
// Only process audio layers
if !matches!(layer, AnyLayer::Audio(_)) {
continue;
}
// Look up backend track ID
let track_id = backend.layer_to_track_map.get(layer_id)
.ok_or_else(|| format!("Layer {} not mapped to backend track", layer_id))?;
// Process each clip instance move (restore old position)
for (instance_id, old_start, _new_start) in moves {
// Get clip instances from the layer
let clip_instances = match layer {
AnyLayer::Audio(al) => &al.clip_instances,
_ => continue,
};
// Find the clip instance
let instance = clip_instances.iter()
.find(|ci| ci.id == *instance_id)
.ok_or_else(|| format!("Clip instance {} not found", instance_id))?;
// Look up the clip to determine its type
let clip = document.get_audio_clip(&instance.clip_id)
.ok_or_else(|| format!("Audio clip {} not found", instance.clip_id))?;
// Handle move based on clip type (restore old position)
match &clip.clip_type {
AudioClipType::Midi { midi_clip_id } => {
// For MIDI: move_clip expects the pool clip ID
controller.move_clip(*track_id, *midi_clip_id, *old_start);
}
AudioClipType::Sampled { .. } => {
// For sampled audio: move_clip expects the instance ID
let backend_instance_id = backend.clip_instance_to_backend_map.get(instance_id)
.ok_or_else(|| format!("Clip instance {} not mapped to backend", instance_id))?;
match backend_instance_id {
crate::action::BackendClipInstanceId::Audio(audio_id) => {
controller.move_clip(*track_id, *audio_id, *old_start);
}
_ => return Err("Expected audio instance ID for sampled clip".to_string()),
}
}
}
}
}
Ok(())
}
}
#[cfg(test)]

View File

@ -63,7 +63,103 @@ impl TrimClipInstancesAction {
impl Action for TrimClipInstancesAction {
fn execute(&mut self, document: &mut Document) {
// Expand trims to include grouped instances
let mut expanded_trims = self.layer_trims.clone();
let mut already_processed = std::collections::HashSet::new();
for (layer_id, trims) in &self.layer_trims {
for (instance_id, trim_type, old, new) in trims {
// Skip if already processed
if already_processed.contains(instance_id) {
continue;
}
already_processed.insert(*instance_id);
// Check if this instance is in a group
if let Some(group) = document.find_group_for_instance(instance_id) {
// Calculate offset based on trim type
match trim_type {
TrimType::TrimLeft => {
if let (Some(old_trim), Some(new_trim), Some(old_timeline), Some(new_timeline)) =
(old.trim_value, new.trim_value, old.timeline_start, new.timeline_start)
{
let trim_offset = new_trim - old_trim;
let timeline_offset = new_timeline - old_timeline;
// Add all group members to the trim list
for (member_layer_id, member_instance_id) in group.get_members() {
if member_instance_id != instance_id && !already_processed.contains(member_instance_id) {
already_processed.insert(*member_instance_id);
// Find member's current values
if let Some(layer) = document.get_layer(member_layer_id) {
let clip_instances = match layer {
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
};
if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) {
let member_old_trim = instance.trim_start;
let member_old_timeline = instance.timeline_start;
let member_new_trim = member_old_trim + trim_offset;
let member_new_timeline = member_old_timeline + timeline_offset;
expanded_trims.entry(*member_layer_id)
.or_insert_with(Vec::new)
.push((
*member_instance_id,
TrimType::TrimLeft,
TrimData::left(member_old_trim, member_old_timeline),
TrimData::left(member_new_trim, member_new_timeline),
));
}
}
}
}
}
}
TrimType::TrimRight => {
// Add all group members to the trim list
for (member_layer_id, member_instance_id) in group.get_members() {
if member_instance_id != instance_id && !already_processed.contains(member_instance_id) {
already_processed.insert(*member_instance_id);
// Find member's current trim_end
if let Some(layer) = document.get_layer(member_layer_id) {
let clip_instances = match layer {
AnyLayer::Vector(vl) => &vl.clip_instances,
AnyLayer::Audio(al) => &al.clip_instances,
AnyLayer::Video(vl) => &vl.clip_instances,
};
if let Some(instance) = clip_instances.iter().find(|ci| ci.id == *member_instance_id) {
let member_old_trim_end = instance.trim_end;
let member_new_trim_end = new.trim_value;
expanded_trims.entry(*member_layer_id)
.or_insert_with(Vec::new)
.push((
*member_instance_id,
TrimType::TrimRight,
TrimData::right(member_old_trim_end),
TrimData::right(member_new_trim_end),
));
}
}
}
}
}
}
}
}
}
// Store expanded trims for rollback
self.layer_trims = expanded_trims.clone();
// Apply all trims (including expanded)
for (layer_id, trims) in &expanded_trims {
let layer = match document.get_layer_mut(layer_id) {
Some(l) => l,
None => continue,
@ -142,6 +238,155 @@ impl Action for TrimClipInstancesAction {
format!("Trim {} clip instances", total_count)
}
}
fn execute_backend(&mut self, backend: &mut crate::action::BackendContext, document: &Document) -> Result<(), String> {
use crate::layer::AnyLayer;
use crate::clip::AudioClipType;
// Get audio controller
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()), // No audio system, skip backend sync
};
// Process each layer's trims
for (layer_id, trims) in &self.layer_trims {
// Get the layer to determine its type
let layer = document.get_layer(layer_id)
.ok_or_else(|| format!("Layer {} not found", layer_id))?;
// Only process audio layers
if !matches!(layer, AnyLayer::Audio(_)) {
continue;
}
// Look up backend track ID
let track_id = backend.layer_to_track_map.get(layer_id)
.ok_or_else(|| format!("Layer {} not mapped to backend track", layer_id))?;
// Process each clip instance trim
for (instance_id, trim_type, _old, new) in trims {
// Get clip instances from the layer
let clip_instances = match layer {
AnyLayer::Audio(al) => &al.clip_instances,
_ => continue,
};
// Find the clip instance (post-execute, so it has new trim values)
let instance = clip_instances.iter()
.find(|ci| ci.id == *instance_id)
.ok_or_else(|| format!("Clip instance {} not found", instance_id))?;
// Look up the clip to determine its type and duration
let clip = document.get_audio_clip(&instance.clip_id)
.ok_or_else(|| format!("Audio clip {} not found", instance.clip_id))?;
// Calculate new internal_start and internal_end for backend
// Note: instance already has the new trim values after execute()
let internal_start = instance.trim_start;
let internal_end = instance.trim_end.unwrap_or(clip.duration);
// Handle trim based on clip type
match &clip.clip_type {
AudioClipType::Midi { midi_clip_id } => {
// For MIDI: trim_clip expects the pool clip ID
controller.trim_clip(*track_id, *midi_clip_id, internal_start, internal_end);
}
AudioClipType::Sampled { .. } => {
// For sampled audio: trim_clip expects the instance ID
let backend_instance_id = backend.clip_instance_to_backend_map.get(instance_id)
.ok_or_else(|| format!("Clip instance {} not mapped to backend", instance_id))?;
match backend_instance_id {
crate::action::BackendClipInstanceId::Audio(audio_id) => {
controller.trim_clip(*track_id, *audio_id, internal_start, internal_end);
}
_ => return Err("Expected audio instance ID for sampled clip".to_string()),
}
}
}
}
}
Ok(())
}
fn rollback_backend(&mut self, backend: &mut crate::action::BackendContext, document: &Document) -> Result<(), String> {
use crate::layer::AnyLayer;
use crate::clip::AudioClipType;
// Get audio controller
let controller = match backend.audio_controller.as_mut() {
Some(c) => c,
None => return Ok(()), // No audio system, skip backend sync
};
// Process each layer's trims (restore old trim values)
for (layer_id, trims) in &self.layer_trims {
// Get the layer to determine its type
let layer = document.get_layer(layer_id)
.ok_or_else(|| format!("Layer {} not found", layer_id))?;
// Only process audio layers
if !matches!(layer, AnyLayer::Audio(_)) {
continue;
}
// Look up backend track ID
let track_id = backend.layer_to_track_map.get(layer_id)
.ok_or_else(|| format!("Layer {} not mapped to backend track", layer_id))?;
// Process each clip instance trim (restore old values)
for (instance_id, trim_type, old, _new) in trims {
// Get clip instances from the layer
let clip_instances = match layer {
AnyLayer::Audio(al) => &al.clip_instances,
_ => continue,
};
// Find the clip instance
let instance = clip_instances.iter()
.find(|ci| ci.id == *instance_id)
.ok_or_else(|| format!("Clip instance {} not found", instance_id))?;
// Look up the clip to determine its type and duration
let clip = document.get_audio_clip(&instance.clip_id)
.ok_or_else(|| format!("Audio clip {} not found", instance.clip_id))?;
// Calculate old internal_start and internal_end for backend
let internal_start = match trim_type {
TrimType::TrimLeft => old.trim_value.unwrap_or(0.0),
TrimType::TrimRight => instance.trim_start, // trim_start wasn't changed
};
let internal_end = match trim_type {
TrimType::TrimLeft => instance.trim_end.unwrap_or(clip.duration), // trim_end wasn't changed
TrimType::TrimRight => old.trim_value.unwrap_or(clip.duration),
};
// Handle trim based on clip type
match &clip.clip_type {
AudioClipType::Midi { midi_clip_id } => {
// For MIDI: trim_clip expects the pool clip ID
controller.trim_clip(*track_id, *midi_clip_id, internal_start, internal_end);
}
AudioClipType::Sampled { .. } => {
// For sampled audio: trim_clip expects the instance ID
let backend_instance_id = backend.clip_instance_to_backend_map.get(instance_id)
.ok_or_else(|| format!("Clip instance {} not mapped to backend", instance_id))?;
match backend_instance_id {
crate::action::BackendClipInstanceId::Audio(audio_id) => {
controller.trim_clip(*track_id, *audio_id, internal_start, internal_end);
}
_ => return Err("Expected audio instance ID for sampled clip".to_string()),
}
}
}
}
}
Ok(())
}
}
#[cfg(test)]

View File

@ -107,6 +107,9 @@ pub struct Document {
/// Image asset library - static images for fill textures
pub image_assets: HashMap<Uuid, ImageAsset>,
/// Instance groups for linked clip instances
pub instance_groups: HashMap<Uuid, crate::instance_group::InstanceGroup>,
/// Current UI layout state (serialized for save/load)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ui_layout: Option<LayoutNode>,
@ -135,6 +138,7 @@ impl Default for Document {
video_clips: HashMap::new(),
audio_clips: HashMap::new(),
image_assets: HashMap::new(),
instance_groups: HashMap::new(),
ui_layout: None,
ui_layout_base: None,
current_time: 0.0,
@ -243,6 +247,24 @@ impl Document {
id
}
/// Add an instance group to the document
pub fn add_instance_group(&mut self, group: crate::instance_group::InstanceGroup) -> Uuid {
let id = group.id;
self.instance_groups.insert(id, group);
id
}
/// Remove an instance group from the document
pub fn remove_instance_group(&mut self, group_id: &Uuid) {
self.instance_groups.remove(group_id);
}
/// Find the group that contains a specific clip instance
pub fn find_group_for_instance(&self, instance_id: &Uuid) -> Option<&crate::instance_group::InstanceGroup> {
self.instance_groups.values()
.find(|group| group.contains_instance(instance_id))
}
/// Get a vector clip by ID
pub fn get_vector_clip(&self, id: &Uuid) -> Option<&VectorClip> {
self.vector_clips.get(id)

View File

@ -0,0 +1,58 @@
use uuid::Uuid;
use serde::{Serialize, Deserialize};
/// A group of clip instances that should be manipulated together
///
/// Instance groups ensure that operations like moving or trimming
/// are applied to all member instances simultaneously. This is used
/// to keep video and audio clip instances synchronized.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct InstanceGroup {
/// Unique identifier for this group
pub id: Uuid,
/// Optional name for the group (e.g., "Video 1 + Audio")
pub name: Option<String>,
/// Instance IDs in this group (across potentially different layers)
/// Format: Vec<(layer_id, clip_instance_id)>
pub members: Vec<(Uuid, Uuid)>,
}
impl InstanceGroup {
/// Create a new empty instance group
pub fn new() -> Self {
Self {
id: Uuid::new_v4(),
name: None,
members: Vec::new(),
}
}
/// Set the name for this group
pub fn with_name(mut self, name: impl Into<String>) -> Self {
self.name = Some(name.into());
self
}
/// Add a member to this group
pub fn add_member(&mut self, layer_id: Uuid, instance_id: Uuid) {
self.members.push((layer_id, instance_id));
}
/// Check if this group contains a specific instance
pub fn contains_instance(&self, instance_id: &Uuid) -> bool {
self.members.iter().any(|(_, id)| id == instance_id)
}
/// Get all members of this group
pub fn get_members(&self) -> &[(Uuid, Uuid)] {
&self.members
}
}
impl Default for InstanceGroup {
fn default() -> Self {
Self::new()
}
}

View File

@ -12,6 +12,7 @@ pub mod object;
pub mod layer;
pub mod layer_tree;
pub mod clip;
pub mod instance_group;
pub mod document;
pub mod renderer;
pub mod video;

View File

@ -426,6 +426,26 @@ impl FileOperationsWorker {
}
}
/// Result from background audio extraction thread
#[derive(Debug)]
enum AudioExtractionResult {
Success {
video_clip_id: Uuid,
audio_clip: lightningbeam_core::clip::AudioClip,
pool_index: usize,
video_name: String,
channels: u32,
sample_rate: u32,
},
NoAudio {
video_clip_id: Uuid,
},
Error {
video_clip_id: Uuid,
error: String,
},
}
struct EditorApp {
layouts: Vec<LayoutDefinition>,
current_layout_index: usize,
@ -463,6 +483,8 @@ struct EditorApp {
// Track ID mapping (Document layer UUIDs <-> daw-backend TrackIds)
layer_to_track_map: HashMap<Uuid, daw_backend::TrackId>,
track_to_layer_map: HashMap<daw_backend::TrackId, Uuid>,
// Clip instance ID mapping (Document clip instance UUIDs <-> backend clip instance IDs)
clip_instance_to_backend_map: HashMap<Uuid, lightningbeam_core::action::BackendClipInstanceId>,
// Playback state (global for all panes)
playback_time: f64, // Current playback position in seconds (persistent - save with document)
is_playing: bool, // Whether playback is currently active (transient - don't save)
@ -496,6 +518,10 @@ struct EditorApp {
file_command_tx: std::sync::mpsc::Sender<FileCommand>,
/// Current file operation in progress (if any)
file_operation: Option<FileOperation>,
/// Audio extraction channel for background thread communication
audio_extraction_tx: std::sync::mpsc::Sender<AudioExtractionResult>,
audio_extraction_rx: std::sync::mpsc::Receiver<AudioExtractionResult>,
}
/// Import filter types for the file dialog
@ -580,6 +606,9 @@ impl EditorApp {
}
};
// Create audio extraction channel for background thread communication
let (audio_extraction_tx, audio_extraction_rx) = std::sync::mpsc::channel();
Self {
layouts,
current_layout_index: 0,
@ -615,6 +644,7 @@ impl EditorApp {
)),
layer_to_track_map: HashMap::new(),
track_to_layer_map: HashMap::new(),
clip_instance_to_backend_map: HashMap::new(),
playback_time: 0.0, // Start at beginning
is_playing: false, // Start paused
dragging_asset: None, // No asset being dragged initially
@ -630,6 +660,8 @@ impl EditorApp {
config,
file_command_tx,
file_operation: None, // No file operation in progress initially
audio_extraction_tx,
audio_extraction_rx,
}
}
@ -983,6 +1015,7 @@ impl EditorApp {
let mut backend_context = lightningbeam_core::action::BackendContext {
audio_controller: Some(&mut *controller),
layer_to_track_map: &self.layer_to_track_map,
clip_instance_to_backend_map: &mut self.clip_instance_to_backend_map,
};
match self.action_executor.undo_with_backend(&mut backend_context) {
@ -1004,6 +1037,7 @@ impl EditorApp {
let mut backend_context = lightningbeam_core::action::BackendContext {
audio_controller: Some(&mut *controller),
layer_to_track_map: &self.layer_to_track_map,
clip_instance_to_backend_map: &mut self.clip_instance_to_backend_map,
};
match self.action_executor.redo_with_backend(&mut backend_context) {
@ -1723,8 +1757,78 @@ impl EditorApp {
}
drop(video_mgr);
// TODO: Extract audio in background thread if present
// TODO: Create AudioClip and link to VideoClip via linked_audio_clip_id
// Spawn background thread for audio extraction if video has audio
if metadata.has_audio {
if let Some(ref audio_controller) = self.audio_controller {
let path_clone = path_str.clone();
let video_clip_id = clip_id;
let video_name = name.clone();
let audio_controller_clone = Arc::clone(audio_controller);
let tx = self.audio_extraction_tx.clone();
std::thread::spawn(move || {
use lightningbeam_core::video::extract_audio_from_video;
use lightningbeam_core::clip::{AudioClip, AudioClipType};
// Extract audio from video (slow FFmpeg operation)
match extract_audio_from_video(&path_clone) {
Ok(Some(extracted)) => {
// Add audio to daw-backend pool synchronously to get pool index
let pool_index = {
let mut controller = audio_controller_clone.lock().unwrap();
match controller.add_audio_file_sync(
path_clone.clone(),
extracted.samples,
extracted.channels,
extracted.sample_rate,
) {
Ok(index) => index,
Err(e) => {
eprintln!("Failed to add audio file to backend: {}", e);
let _ = tx.send(AudioExtractionResult::Error {
video_clip_id,
error: format!("Failed to add audio to backend: {}", e),
});
return;
}
}
};
// Create AudioClip
let audio_clip_name = format!("{} (Audio)", video_name);
let audio_clip = AudioClip::new_sampled(
&audio_clip_name,
pool_index,
extracted.duration,
);
// Send success result
let _ = tx.send(AudioExtractionResult::Success {
video_clip_id,
audio_clip,
pool_index,
video_name,
channels: extracted.channels,
sample_rate: extracted.sample_rate,
});
}
Ok(None) => {
// Video has no audio stream
let _ = tx.send(AudioExtractionResult::NoAudio { video_clip_id });
}
Err(e) => {
// Audio extraction failed
let _ = tx.send(AudioExtractionResult::Error {
video_clip_id,
error: e,
});
}
}
});
} else {
eprintln!(" ⚠️ Video has audio but audio engine not initialized - skipping extraction");
}
}
// Spawn background thread for thumbnail generation
let video_manager_clone = Arc::clone(&self.video_manager);
@ -1751,7 +1855,58 @@ impl EditorApp {
);
if metadata.has_audio {
println!(" Video has audio track (extraction not yet implemented)");
println!(" Extracting audio track in background...");
}
}
/// Handle audio extraction results from background thread
fn handle_audio_extraction_result(&mut self, result: AudioExtractionResult) {
match result {
AudioExtractionResult::Success {
video_clip_id,
audio_clip,
pool_index,
video_name,
channels,
sample_rate,
} => {
// Add AudioClip to document
let audio_clip_id = self.action_executor.document_mut().add_audio_clip(audio_clip);
// Update VideoClip's linked_audio_clip_id
if let Some(video_clip) = self.action_executor.document_mut().video_clips
.get_mut(&video_clip_id)
{
video_clip.linked_audio_clip_id = Some(audio_clip_id);
// Get audio clip duration for logging
let duration = self.action_executor.document().audio_clips
.get(&audio_clip_id)
.map(|c| c.duration)
.unwrap_or(0.0);
println!("✅ Extracted audio from '{}' ({:.1}s, {}ch, {}Hz) - AudioClip ID: {}",
video_name,
duration,
channels,
sample_rate,
audio_clip_id
);
// Fetch waveform from backend and cache it for rendering
if let Some(waveform) = self.fetch_waveform(pool_index) {
println!(" Cached waveform with {} peaks", waveform.len());
}
} else {
eprintln!("⚠️ Audio extracted but VideoClip {} not found (may have been deleted)", video_clip_id);
}
}
AudioExtractionResult::NoAudio { video_clip_id } => {
println!(" Video {} has no audio stream", video_clip_id);
}
AudioExtractionResult::Error { video_clip_id, error } => {
eprintln!("❌ Failed to extract audio from video {}: {}", video_clip_id, error);
}
}
}
}
@ -1764,6 +1919,11 @@ impl eframe::App for EditorApp {
o.zoom_with_keyboard = false;
});
// Poll audio extraction results from background threads
while let Ok(result) = self.audio_extraction_rx.try_recv() {
self.handle_audio_extraction_result(result);
}
// Check for native menu events (macOS)
if let Some(menu_system) = &self.menu_system {
if let Some(action) = menu_system.check_events() {
@ -2014,6 +2174,11 @@ impl eframe::App for EditorApp {
self.pending_view_action = None;
}
// Sync any new audio layers created during this frame to the backend
// This handles layers created directly (e.g., auto-created audio tracks for video+audio)
// Must happen BEFORE executing actions so the layer-to-track mapping is available
self.sync_audio_layers_to_backend();
// Execute all pending actions (two-phase dispatch)
for action in pending_actions {
// Create backend context for actions that need backend sync
@ -2022,6 +2187,7 @@ impl eframe::App for EditorApp {
let mut backend_context = lightningbeam_core::action::BackendContext {
audio_controller: Some(&mut *controller),
layer_to_track_map: &self.layer_to_track_map,
clip_instance_to_backend_map: &mut self.clip_instance_to_backend_map,
};
// Execute action with backend synchronization

View File

@ -674,8 +674,18 @@ impl AssetLibraryPane {
});
}
// Collect audio clips
// Build set of audio clip IDs that are linked to videos
let linked_audio_ids: std::collections::HashSet<uuid::Uuid> = document.video_clips.values()
.filter_map(|video| video.linked_audio_clip_id)
.collect();
// Collect audio clips (skip those linked to videos)
for (id, clip) in &document.audio_clips {
// Skip if this audio clip is linked to a video
if linked_audio_ids.contains(id) {
continue;
}
let (extra_info, drag_clip_type) = match &clip.clip_type {
AudioClipType::Sampled { .. } => ("Sampled".to_string(), DragClipType::AudioSampled),
AudioClipType::Midi { .. } => ("MIDI".to_string(), DragClipType::AudioMidi),
@ -1292,12 +1302,23 @@ impl AssetLibraryPane {
// Handle drag start
if response.drag_started() {
// For video clips, get the linked audio clip ID
let linked_audio_clip_id = if asset.drag_clip_type == DragClipType::Video {
let result = document.video_clips.get(&asset.id)
.and_then(|video| video.linked_audio_clip_id);
eprintln!("DEBUG DRAG: Video clip {} has linked audio: {:?}", asset.id, result);
result
} else {
None
};
*shared.dragging_asset = Some(DraggingAsset {
clip_id: asset.id,
clip_type: asset.drag_clip_type,
name: asset.name.clone(),
duration: asset.duration,
dimensions: asset.dimensions,
linked_audio_clip_id,
});
}
@ -1587,12 +1608,23 @@ impl AssetLibraryPane {
// Handle drag start
if response.drag_started() {
// For video clips, get the linked audio clip ID
let linked_audio_clip_id = if asset.drag_clip_type == DragClipType::Video {
let result = document.video_clips.get(&asset.id)
.and_then(|video| video.linked_audio_clip_id);
eprintln!("DEBUG DRAG: Video clip {} has linked audio: {:?}", asset.id, result);
result
} else {
None
};
*shared.dragging_asset = Some(DraggingAsset {
clip_id: asset.id,
clip_type: asset.drag_clip_type,
name: asset.name.clone(),
duration: asset.duration,
dimensions: asset.dimensions,
linked_audio_clip_id,
});
}
}

View File

@ -48,6 +48,8 @@ pub struct DraggingAsset {
pub duration: f64,
/// Dimensions (width, height) for vector/video clips, None for audio
pub dimensions: Option<(f64, f64)>,
/// Optional linked audio clip ID (for video clips with extracted audio)
pub linked_audio_clip_id: Option<Uuid>,
}
pub mod toolbar;

View File

@ -38,6 +38,19 @@ fn create_layer_for_clip_type(clip_type: DragClipType, name: &str) -> AnyLayer {
}
}
/// Find an existing sampled audio track in the document
/// Returns the layer ID if found, None otherwise
fn find_sampled_audio_track(document: &lightningbeam_core::document::Document) -> Option<uuid::Uuid> {
for layer in &document.root.children {
if let AnyLayer::Audio(audio_layer) = layer {
if audio_layer.audio_layer_type == AudioLayerType::Sampled {
return Some(audio_layer.layer.id);
}
}
}
None
}
/// Shared Vello resources (created once, reused by all Stage panes)
struct SharedVelloResources {
renderer: Arc<Mutex<vello::Renderer>>,
@ -4169,6 +4182,9 @@ impl PaneRenderer for StagePane {
// Handle drop on mouse release
if ui.input(|i| i.pointer.any_released()) {
eprintln!("DEBUG STAGE DROP: Dropping clip type {:?}, linked_audio: {:?}",
dragging.clip_type, dragging.linked_audio_clip_id);
// Convert screen position to world coordinates
let canvas_pos = pointer_pos - rect.min;
let world_pos = (canvas_pos - self.pan_offset) / self.zoom;
@ -4274,12 +4290,67 @@ impl PaneRenderer for StagePane {
}
}
// Create and queue action
// Save instance ID for potential grouping
let video_instance_id = clip_instance.id;
// Create and queue action for video
let action = lightningbeam_core::actions::AddClipInstanceAction::new(
layer_id,
clip_instance,
);
shared.pending_actions.push(Box::new(action));
// If video has linked audio, auto-place it and create group
if let Some(linked_audio_clip_id) = dragging.linked_audio_clip_id {
eprintln!("DEBUG STAGE: Video has linked audio clip: {}", linked_audio_clip_id);
// Find or create sampled audio track
let audio_layer_id = {
let doc = shared.action_executor.document();
let result = find_sampled_audio_track(doc);
if let Some(id) = result {
eprintln!("DEBUG STAGE: Found existing audio track: {}", id);
} else {
eprintln!("DEBUG STAGE: No existing audio track found");
}
result
}.unwrap_or_else(|| {
eprintln!("DEBUG STAGE: Creating new audio track");
// Create new sampled audio layer
let audio_layer = AudioLayer::new_sampled("Audio Track");
let layer_id = shared.action_executor.document_mut().root.add_child(
AnyLayer::Audio(audio_layer)
);
eprintln!("DEBUG STAGE: Created audio layer with ID: {}", layer_id);
layer_id
});
eprintln!("DEBUG STAGE: Using audio layer ID: {}", audio_layer_id);
// Create audio clip instance at same timeline position
let audio_instance = ClipInstance::new(linked_audio_clip_id)
.with_timeline_start(drop_time);
let audio_instance_id = audio_instance.id;
eprintln!("DEBUG STAGE: Created audio instance: {} for clip: {}", audio_instance_id, linked_audio_clip_id);
// Queue audio action
let audio_action = lightningbeam_core::actions::AddClipInstanceAction::new(
audio_layer_id,
audio_instance,
);
shared.pending_actions.push(Box::new(audio_action));
eprintln!("DEBUG STAGE: Queued audio action, total pending: {}", shared.pending_actions.len());
// Create instance group linking video and audio
let mut group = lightningbeam_core::instance_group::InstanceGroup::new();
group.add_member(layer_id, video_instance_id);
group.add_member(audio_layer_id, audio_instance_id);
shared.action_executor.document_mut().add_instance_group(group);
eprintln!("DEBUG STAGE: Created instance group");
} else {
eprintln!("DEBUG STAGE: Video has NO linked audio clip!");
}
}
}

View File

@ -72,6 +72,19 @@ fn can_drop_on_layer(layer: &AnyLayer, clip_type: DragClipType) -> bool {
}
}
/// Find an existing sampled audio track in the document
/// Returns the layer ID if found, None otherwise
fn find_sampled_audio_track(document: &lightningbeam_core::document::Document) -> Option<uuid::Uuid> {
for layer in &document.root.children {
if let AnyLayer::Audio(audio_layer) = layer {
if audio_layer.audio_layer_type == AudioLayerType::Sampled {
return Some(audio_layer.layer.id);
}
}
}
None
}
impl TimelinePane {
pub fn new() -> Self {
Self {
@ -2086,35 +2099,93 @@ impl PaneRenderer for TimelinePane {
let layer_id = layer.id();
let drop_time = self.x_to_time(pointer_pos.x - content_rect.min.x).max(0.0);
// Get document dimensions for centering
let doc = shared.action_executor.document();
let center_x = doc.width / 2.0;
let center_y = doc.height / 2.0;
// Get document dimensions for centering and create clip instance
let (center_x, center_y, mut clip_instance) = {
let doc = shared.action_executor.document();
let center_x = doc.width / 2.0;
let center_y = doc.height / 2.0;
// Create clip instance centered on stage, at drop time
let mut clip_instance = ClipInstance::new(dragging.clip_id)
.with_timeline_start(drop_time)
.with_position(center_x, center_y);
let mut clip_instance = ClipInstance::new(dragging.clip_id)
.with_timeline_start(drop_time)
.with_position(center_x, center_y);
// For video clips, scale to fill document dimensions
if dragging.clip_type == DragClipType::Video {
if let Some((video_width, video_height)) = dragging.dimensions {
// Calculate scale to fill document
let scale_x = doc.width / video_width;
let scale_y = doc.height / video_height;
// For video clips, scale to fill document dimensions
if dragging.clip_type == DragClipType::Video {
if let Some((video_width, video_height)) = dragging.dimensions {
// Calculate scale to fill document
let scale_x = doc.width / video_width;
let scale_y = doc.height / video_height;
clip_instance.transform.scale_x = scale_x;
clip_instance.transform.scale_y = scale_y;
clip_instance.transform.scale_x = scale_x;
clip_instance.transform.scale_y = scale_y;
}
}
}
// Create and queue action
(center_x, center_y, clip_instance)
}; // doc is dropped here
// Save instance ID for potential grouping
let video_instance_id = clip_instance.id;
// Create and queue action for video
let action = lightningbeam_core::actions::AddClipInstanceAction::new(
layer_id,
clip_instance,
);
shared.pending_actions.push(Box::new(action));
// If video has linked audio, auto-place it and create group
if let Some(linked_audio_clip_id) = dragging.linked_audio_clip_id {
eprintln!("DEBUG: Video has linked audio clip: {}", linked_audio_clip_id);
// Find or create sampled audio track
let audio_layer_id = {
let doc = shared.action_executor.document();
let result = find_sampled_audio_track(doc);
if let Some(id) = result {
eprintln!("DEBUG: Found existing audio track: {}", id);
} else {
eprintln!("DEBUG: No existing audio track found");
}
result
}.unwrap_or_else(|| {
eprintln!("DEBUG: Creating new audio track");
// Create new sampled audio layer
let audio_layer = lightningbeam_core::layer::AudioLayer::new_sampled("Audio Track");
let layer_id = shared.action_executor.document_mut().root.add_child(
lightningbeam_core::layer::AnyLayer::Audio(audio_layer)
);
eprintln!("DEBUG: Created audio layer with ID: {}", layer_id);
layer_id
});
eprintln!("DEBUG: Using audio layer ID: {}", audio_layer_id);
// Create audio clip instance at same timeline position
let audio_instance = ClipInstance::new(linked_audio_clip_id)
.with_timeline_start(drop_time);
let audio_instance_id = audio_instance.id;
eprintln!("DEBUG: Created audio instance: {} for clip: {}", audio_instance_id, linked_audio_clip_id);
// Queue audio action
let audio_action = lightningbeam_core::actions::AddClipInstanceAction::new(
audio_layer_id,
audio_instance,
);
shared.pending_actions.push(Box::new(audio_action));
eprintln!("DEBUG: Queued audio action, total pending: {}", shared.pending_actions.len());
// Create instance group linking video and audio
let mut group = lightningbeam_core::instance_group::InstanceGroup::new();
group.add_member(layer_id, video_instance_id);
group.add_member(audio_layer_id, audio_instance_id);
shared.action_executor.document_mut().add_instance_group(group);
eprintln!("DEBUG: Created instance group");
} else {
eprintln!("DEBUG: Video has NO linked audio clip!");
}
// Clear drag state
*shared.dragging_asset = None;
}