Merge branch 'new_timeline' of github.com:skykooler/Lightningbeam into new_timeline

This commit is contained in:
Skyler Lehmkuhl 2025-11-06 23:42:50 -05:00
commit 7ec69ce950
16 changed files with 3093 additions and 151 deletions

View File

@ -398,6 +398,24 @@ impl Engine {
_ => {} _ => {}
} }
} }
Command::TrimClip(track_id, clip_id, new_start_time, new_duration, new_offset) => {
match self.project.get_track_mut(track_id) {
Some(crate::audio::track::TrackNode::Audio(track)) => {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
clip.start_time = new_start_time;
clip.duration = new_duration;
clip.offset = new_offset;
}
}
Some(crate::audio::track::TrackNode::Midi(track)) => {
if let Some(clip) = track.clips.iter_mut().find(|c| c.id == clip_id) {
clip.start_time = new_start_time;
clip.duration = new_duration;
}
}
_ => {}
}
}
Command::CreateMetatrack(name) => { Command::CreateMetatrack(name) => {
let track_id = self.project.add_group_track(name.clone(), None); let track_id = self.project.add_group_track(name.clone(), None);
// Notify UI about the new metatrack // Notify UI about the new metatrack
@ -1911,6 +1929,10 @@ impl EngineController {
let _ = self.command_tx.push(Command::MoveClip(track_id, clip_id, new_start_time)); let _ = self.command_tx.push(Command::MoveClip(track_id, clip_id, new_start_time));
} }
pub fn trim_clip(&mut self, track_id: TrackId, clip_id: ClipId, new_start_time: f64, new_duration: f64, new_offset: f64) {
let _ = self.command_tx.push(Command::TrimClip(track_id, clip_id, new_start_time, new_duration, new_offset));
}
/// Send a generic command to the audio thread /// Send a generic command to the audio thread
pub fn send_command(&mut self, command: Command) { pub fn send_command(&mut self, command: Command) {
let _ = self.command_tx.push(command); let _ = self.command_tx.push(command);

View File

@ -29,6 +29,8 @@ pub enum Command {
// Clip management commands // Clip management commands
/// Move a clip to a new timeline position /// Move a clip to a new timeline position
MoveClip(TrackId, ClipId, f64), MoveClip(TrackId, ClipId, f64),
/// Trim a clip (track_id, clip_id, new_start_time, new_duration, new_offset)
TrimClip(TrackId, ClipId, f64, f64, f64),
// Metatrack management commands // Metatrack management commands
/// Create a new metatrack with a name /// Create a new metatrack with a name

154
src-tauri/Cargo.lock generated
View File

@ -136,6 +136,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "ascii"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16"
[[package]] [[package]]
name = "ashpd" name = "ashpd"
version = "0.10.2" version = "0.10.2"
@ -247,6 +253,24 @@ version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "bindgen"
version = "0.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f"
dependencies = [
"bitflags 2.8.0",
"cexpr",
"clang-sys",
"itertools 0.13.0",
"proc-macro2",
"quote",
"regex",
"rustc-hash 1.1.0",
"shlex",
"syn 2.0.96",
]
[[package]] [[package]]
name = "bindgen" name = "bindgen"
version = "0.72.1" version = "0.72.1"
@ -260,7 +284,7 @@ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"regex", "regex",
"rustc-hash", "rustc-hash 2.1.1",
"shlex", "shlex",
"syn 2.0.96", "syn 2.0.96",
] ]
@ -576,6 +600,12 @@ dependencies = [
"windows-targets 0.52.6", "windows-targets 0.52.6",
] ]
[[package]]
name = "chunked_transfer"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901"
[[package]] [[package]]
name = "clang-sys" name = "clang-sys"
version = "1.8.1" version = "1.8.1"
@ -617,6 +647,12 @@ dependencies = [
"objc", "objc",
] ]
[[package]]
name = "color_quant"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]] [[package]]
name = "combine" name = "combine"
version = "4.6.7" version = "4.6.7"
@ -732,7 +768,7 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ceec7a6067e62d6f931a2baf6f3a751f4a892595bcec1461a3c94ef9949864b6" checksum = "ceec7a6067e62d6f931a2baf6f3a751f4a892595bcec1461a3c94ef9949864b6"
dependencies = [ dependencies = [
"bindgen", "bindgen 0.72.1",
] ]
[[package]] [[package]]
@ -1372,6 +1408,31 @@ dependencies = [
"log", "log",
] ]
[[package]]
name = "ffmpeg-next"
version = "7.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da02698288e0275e442a47fc12ca26d50daf0d48b15398ba5906f20ac2e2a9f9"
dependencies = [
"bitflags 2.8.0",
"ffmpeg-sys-next",
"libc",
]
[[package]]
name = "ffmpeg-sys-next"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9e9c75ebd4463de9d8998fb134ba26347fe5faee62fabf0a4b4d41bd500b4ad"
dependencies = [
"bindgen 0.70.1",
"cc",
"libc",
"num_cpus",
"pkg-config",
"vcpkg",
]
[[package]] [[package]]
name = "field-offset" name = "field-offset"
version = "0.3.6" version = "0.3.6"
@ -1866,6 +1927,12 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]] [[package]]
name = "hex" name = "hex"
version = "0.4.3" version = "0.4.3"
@ -1932,6 +1999,12 @@ version = "1.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
[[package]]
name = "httpdate"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "1.5.2" version = "1.5.2"
@ -2148,6 +2221,19 @@ dependencies = [
"icu_properties", "icu_properties",
] ]
[[package]]
name = "image"
version = "0.24.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d"
dependencies = [
"bytemuck",
"byteorder",
"color_quant",
"jpeg-decoder",
"num-traits",
]
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "1.9.3" version = "1.9.3"
@ -2288,6 +2374,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "jpeg-decoder"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00810f1d8b74be64b13dbf3db89ac67740615d6c891f0e7b6179326533011a07"
[[package]] [[package]]
name = "js-sys" name = "js-sys"
version = "0.3.77" version = "0.3.77"
@ -2417,7 +2509,10 @@ dependencies = [
"chrono", "chrono",
"cpal", "cpal",
"daw-backend", "daw-backend",
"ffmpeg-next",
"image",
"log", "log",
"lru",
"rtrb", "rtrb",
"serde", "serde",
"serde_json", "serde_json",
@ -2427,6 +2522,7 @@ dependencies = [
"tauri-plugin-fs", "tauri-plugin-fs",
"tauri-plugin-log", "tauri-plugin-log",
"tauri-plugin-shell", "tauri-plugin-shell",
"tiny_http",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
] ]
@ -2764,6 +2860,16 @@ dependencies = [
"autocfg", "autocfg",
] ]
[[package]]
name = "num_cpus"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
dependencies = [
"hermit-abi",
"libc",
]
[[package]] [[package]]
name = "num_enum" name = "num_enum"
version = "0.7.3" version = "0.7.3"
@ -3835,6 +3941,12 @@ version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]]
name = "rustc-hash"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2"
[[package]] [[package]]
name = "rustc-hash" name = "rustc-hash"
version = "2.1.1" version = "2.1.1"
@ -3960,10 +4072,11 @@ dependencies = [
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.217" version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
dependencies = [ dependencies = [
"serde_core",
"serde_derive", "serde_derive",
] ]
@ -3979,10 +4092,19 @@ dependencies = [
] ]
[[package]] [[package]]
name = "serde_derive" name = "serde_core"
version = "1.0.217" version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -5088,6 +5210,18 @@ dependencies = [
"time-core", "time-core",
] ]
[[package]]
name = "tiny_http"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82"
dependencies = [
"ascii",
"chunked_transfer",
"httpdate",
"log",
]
[[package]] [[package]]
name = "tinystr" name = "tinystr"
version = "0.7.6" version = "0.7.6"
@ -5491,6 +5625,12 @@ version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]] [[package]]
name = "version-compare" name = "version-compare"
version = "0.2.0" version = "0.2.0"

View File

@ -36,6 +36,14 @@ daw-backend = { path = "../daw-backend" }
cpal = "0.15" cpal = "0.15"
rtrb = "0.3" rtrb = "0.3"
# Video decoding
ffmpeg-next = "7.0"
lru = "0.12"
image = { version = "0.24", default-features = false, features = ["jpeg"] }
# HTTP server for video streaming
tiny_http = "0.12"
[profile.dev] [profile.dev]
opt-level = 1 # Enable basic optimizations in debug mode for audio decoding performance opt-level = 1 # Enable basic optimizations in debug mode for audio decoding performance

View File

@ -0,0 +1,104 @@
extern crate ffmpeg_next as ffmpeg;
use std::env;
fn main() {
ffmpeg::init().unwrap();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
eprintln!("Usage: {} <video_file>", args[0]);
std::process::exit(1);
}
let path = &args[1];
let input = ffmpeg::format::input(path).expect("Failed to open video");
println!("=== VIDEO FILE INFORMATION ===");
println!("File: {}", path);
println!("Format: {}", input.format().name());
println!("Duration: {:.2}s", input.duration() as f64 / f64::from(ffmpeg::ffi::AV_TIME_BASE));
println!();
let video_stream = input.streams()
.best(ffmpeg::media::Type::Video)
.expect("No video stream found");
let stream_index = video_stream.index();
let time_base = f64::from(video_stream.time_base());
let duration = video_stream.duration() as f64 * time_base;
let fps = f64::from(video_stream.avg_frame_rate());
println!("=== VIDEO STREAM ===");
println!("Stream index: {}", stream_index);
println!("Time base: {} ({:.10})", video_stream.time_base(), time_base);
println!("Duration: {:.2}s", duration);
println!("FPS: {:.2}", fps);
println!("Frames: {}", video_stream.frames());
let context = ffmpeg::codec::context::Context::from_parameters(video_stream.parameters())
.expect("Failed to create context");
let decoder = context.decoder().video().expect("Failed to create decoder");
println!("Codec: {:?}", decoder.id());
println!("Resolution: {}x{}", decoder.width(), decoder.height());
println!("Pixel format: {:?}", decoder.format());
println!();
println!("=== SCANNING FRAMES ===");
println!("Timestamp (ts) | Time (s) | Key | Type");
println!("---------------|----------|-----|-----");
let mut input = ffmpeg::format::input(path).expect("Failed to reopen video");
let context = ffmpeg::codec::context::Context::from_parameters(
input.streams().best(ffmpeg::media::Type::Video).unwrap().parameters()
).expect("Failed to create context");
let mut decoder = context.decoder().video().expect("Failed to create decoder");
let mut frame_count = 0;
let mut keyframe_count = 0;
for (stream, packet) in input.packets() {
if stream.index() == stream_index {
let packet_pts = packet.pts().unwrap_or(0);
let packet_time = packet_pts as f64 * time_base;
let is_key = packet.is_key();
if is_key {
keyframe_count += 1;
}
// Print first 50 packets and all keyframes
if frame_count < 50 || is_key {
println!("{:14} | {:8.2} | {:3} | {:?}",
packet_pts,
packet_time,
if is_key { "KEY" } else { " " },
if is_key { "I-frame" } else { "P/B-frame" }
);
}
decoder.send_packet(&packet).ok();
let mut frame = ffmpeg::util::frame::Video::empty();
while decoder.receive_frame(&mut frame).is_ok() {
frame_count += 1;
}
}
}
// Flush decoder
decoder.send_eof().ok();
let mut frame = ffmpeg::util::frame::Video::empty();
while decoder.receive_frame(&mut frame).is_ok() {
frame_count += 1;
}
println!();
println!("=== SUMMARY ===");
println!("Total frames decoded: {}", frame_count);
println!("Total keyframes: {}", keyframe_count);
if keyframe_count > 0 {
println!("Average keyframe interval: {:.2} frames", frame_count as f64 / keyframe_count as f64);
println!("Average keyframe interval: {:.2}s", duration / keyframe_count as f64);
}
}

View File

@ -30,15 +30,15 @@ pub struct MidiFileMetadata {
} }
pub struct AudioState { pub struct AudioState {
controller: Option<EngineController>, pub(crate) controller: Option<EngineController>,
sample_rate: u32, pub(crate) sample_rate: u32,
channels: u32, pub(crate) channels: u32,
buffer_size: u32, pub(crate) buffer_size: u32,
next_track_id: u32, pub(crate) next_track_id: u32,
next_pool_index: usize, pub(crate) next_pool_index: usize,
next_graph_node_id: u32, pub(crate) next_graph_node_id: u32,
// Track next node ID for each VoiceAllocator template (VoiceAllocator backend ID -> next template node ID) // Track next node ID for each VoiceAllocator template (VoiceAllocator backend ID -> next template node ID)
template_node_counters: HashMap<u32, u32>, pub(crate) template_node_counters: HashMap<u32, u32>,
} }
impl Default for AudioState { impl Default for AudioState {
@ -381,6 +381,24 @@ pub async fn audio_move_clip(
} }
} }
#[tauri::command]
pub async fn audio_trim_clip(
state: tauri::State<'_, Arc<Mutex<AudioState>>>,
track_id: u32,
clip_id: u32,
new_start_time: f64,
new_duration: f64,
new_offset: f64,
) -> Result<(), String> {
let mut audio_state = state.lock().unwrap();
if let Some(controller) = &mut audio_state.controller {
controller.trim_clip(track_id, clip_id, new_start_time, new_duration, new_offset);
Ok(())
} else {
Err("Audio not initialized".to_string())
}
}
#[tauri::command] #[tauri::command]
pub async fn audio_start_recording( pub async fn audio_start_recording(
state: tauri::State<'_, Arc<Mutex<AudioState>>>, state: tauri::State<'_, Arc<Mutex<AudioState>>>,

View File

@ -7,6 +7,8 @@ use chrono::Local;
use tauri::{AppHandle, Manager, Url, WebviewUrl, WebviewWindowBuilder}; use tauri::{AppHandle, Manager, Url, WebviewUrl, WebviewWindowBuilder};
mod audio; mod audio;
mod video;
mod video_server;
#[derive(Default)] #[derive(Default)]
@ -127,9 +129,16 @@ fn handle_file_associations(app: AppHandle, files: Vec<PathBuf>) {
#[cfg_attr(mobile, tauri::mobile_entry_point)] #[cfg_attr(mobile, tauri::mobile_entry_point)]
pub fn run() { pub fn run() {
let pkg_name = env!("CARGO_PKG_NAME").to_string(); let pkg_name = env!("CARGO_PKG_NAME").to_string();
// Initialize video HTTP server
let video_server = video_server::VideoServer::new()
.expect("Failed to start video server");
eprintln!("[App] Video server started on port {}", video_server.port());
tauri::Builder::default() tauri::Builder::default()
.manage(Mutex::new(AppState::default())) .manage(Mutex::new(AppState::default()))
.manage(Arc::new(Mutex::new(audio::AudioState::default()))) .manage(Arc::new(Mutex::new(audio::AudioState::default())))
.manage(Arc::new(Mutex::new(video::VideoState::default())))
.manage(Arc::new(Mutex::new(video_server)))
.setup(|app| { .setup(|app| {
#[cfg(any(windows, target_os = "linux"))] // Windows/Linux needs different handling from macOS #[cfg(any(windows, target_os = "linux"))] // Windows/Linux needs different handling from macOS
{ {
@ -205,6 +214,7 @@ pub fn run() {
audio::audio_load_file, audio::audio_load_file,
audio::audio_add_clip, audio::audio_add_clip,
audio::audio_move_clip, audio::audio_move_clip,
audio::audio_trim_clip,
audio::audio_start_recording, audio::audio_start_recording,
audio::audio_stop_recording, audio::audio_stop_recording,
audio::audio_pause_recording, audio::audio_pause_recording,
@ -251,6 +261,14 @@ pub fn run() {
audio::audio_resolve_missing_file, audio::audio_resolve_missing_file,
audio::audio_serialize_track_graph, audio::audio_serialize_track_graph,
audio::audio_load_track_graph, audio::audio_load_track_graph,
video::video_load_file,
video::video_get_frame,
video::video_get_frames_batch,
video::video_set_cache_size,
video::video_get_pool_info,
video::video_ipc_benchmark,
video::video_get_transcode_status,
video::video_allow_asset,
]) ])
// .manage(window_counter) // .manage(window_counter)
.build(tauri::generate_context!()) .build(tauri::generate_context!())

878
src-tauri/src/video.rs Normal file
View File

@ -0,0 +1,878 @@
use std::sync::{Arc, Mutex};
use std::num::NonZeroUsize;
use ffmpeg_next as ffmpeg;
use lru::LruCache;
use daw_backend::WaveformPeak;
use image::RgbaImage;
use tauri::Manager;
#[derive(serde::Serialize, Clone)]
pub struct VideoFileMetadata {
pub pool_index: usize,
pub width: u32,
pub height: u32,
pub fps: f64,
pub duration: f64,
pub has_audio: bool,
pub audio_pool_index: Option<usize>,
pub audio_duration: Option<f64>,
pub audio_sample_rate: Option<u32>,
pub audio_channels: Option<u32>,
pub audio_waveform: Option<Vec<WaveformPeak>>,
pub codec_name: String,
pub is_browser_compatible: bool,
pub http_url: Option<String>, // HTTP URL to stream video (if compatible or transcode complete)
pub transcoding: bool, // True if currently transcoding
}
struct VideoDecoder {
path: String,
width: u32, // Original video width
height: u32, // Original video height
output_width: u32, // Scaled output width
output_height: u32, // Scaled output height
fps: f64,
duration: f64,
time_base: f64,
stream_index: usize,
frame_cache: LruCache<i64, Vec<u8>>, // timestamp -> RGBA data
input: Option<ffmpeg::format::context::Input>,
decoder: Option<ffmpeg::decoder::Video>,
last_decoded_ts: i64, // Track the last decoded frame timestamp
}
impl VideoDecoder {
fn new(path: String, cache_size: usize, max_width: Option<u32>, max_height: Option<u32>) -> Result<Self, String> {
ffmpeg::init().map_err(|e| e.to_string())?;
let input = ffmpeg::format::input(&path)
.map_err(|e| format!("Failed to open video: {}", e))?;
let video_stream = input.streams()
.best(ffmpeg::media::Type::Video)
.ok_or("No video stream found")?;
let stream_index = video_stream.index();
let context_decoder = ffmpeg::codec::context::Context::from_parameters(
video_stream.parameters()
).map_err(|e| e.to_string())?;
let decoder = context_decoder.decoder().video()
.map_err(|e| e.to_string())?;
let width = decoder.width();
let height = decoder.height();
let time_base = f64::from(video_stream.time_base());
// Calculate output dimensions (scale down if larger than max)
let (output_width, output_height) = if let (Some(max_w), Some(max_h)) = (max_width, max_height) {
// Calculate scale to fit within max dimensions while preserving aspect ratio
let scale = (max_w as f32 / width as f32).min(max_h as f32 / height as f32).min(1.0);
((width as f32 * scale) as u32, (height as f32 * scale) as u32)
} else {
(width, height)
};
// Try to get duration from stream, fallback to container
let duration = if video_stream.duration() > 0 {
video_stream.duration() as f64 * time_base
} else if input.duration() > 0 {
input.duration() as f64 / f64::from(ffmpeg::ffi::AV_TIME_BASE)
} else {
// If no duration available, estimate from frame count and fps
let fps = f64::from(video_stream.avg_frame_rate());
if video_stream.frames() > 0 && fps > 0.0 {
video_stream.frames() as f64 / fps
} else {
0.0 // Unknown duration
}
};
let fps = f64::from(video_stream.avg_frame_rate());
Ok(Self {
path,
width,
height,
output_width,
output_height,
fps,
duration,
time_base,
stream_index,
frame_cache: LruCache::new(
NonZeroUsize::new(cache_size).unwrap()
),
input: None,
decoder: None,
last_decoded_ts: -1,
})
}
fn get_frame(&mut self, timestamp: f64) -> Result<Vec<u8>, String> {
use std::time::Instant;
let t_start = Instant::now();
// Convert timestamp to frame timestamp
let frame_ts = (timestamp / self.time_base) as i64;
// Check cache
if let Some(cached_frame) = self.frame_cache.get(&frame_ts) {
eprintln!("[Video Timing] Cache hit for ts={:.3}s ({}ms)", timestamp, t_start.elapsed().as_millis());
return Ok(cached_frame.clone());
}
let _t_after_cache = Instant::now();
// Determine if we need to seek
// Seek if: no decoder open, going backwards, or jumping forward more than 2 seconds
let need_seek = self.decoder.is_none()
|| frame_ts < self.last_decoded_ts
|| frame_ts > self.last_decoded_ts + (2.0 / self.time_base) as i64;
if need_seek {
let t_seek_start = Instant::now();
// Reopen input
let mut input = ffmpeg::format::input(&self.path)
.map_err(|e| format!("Failed to reopen video: {}", e))?;
// Seek to timestamp
input.seek(frame_ts, ..frame_ts)
.map_err(|e| format!("Seek failed: {}", e))?;
let context_decoder = ffmpeg::codec::context::Context::from_parameters(
input.streams().best(ffmpeg::media::Type::Video).unwrap().parameters()
).map_err(|e| e.to_string())?;
let decoder = context_decoder.decoder().video()
.map_err(|e| e.to_string())?;
self.input = Some(input);
self.decoder = Some(decoder);
self.last_decoded_ts = -1; // Reset since we seeked
eprintln!("[Video Timing] Seek took {}ms", t_seek_start.elapsed().as_millis());
}
let input = self.input.as_mut().unwrap();
let decoder = self.decoder.as_mut().unwrap();
// Decode frames until we find the one closest to our target timestamp
let mut best_frame_data: Option<Vec<u8>> = None;
let mut best_frame_ts: Option<i64> = None;
let t_decode_start = Instant::now();
let mut decode_count = 0;
let mut scale_time_ms = 0u128;
for (stream, packet) in input.packets() {
if stream.index() == self.stream_index {
decoder.send_packet(&packet)
.map_err(|e| e.to_string())?;
let mut frame = ffmpeg::util::frame::Video::empty();
while decoder.receive_frame(&mut frame).is_ok() {
decode_count += 1;
let current_frame_ts = frame.timestamp().unwrap_or(0);
self.last_decoded_ts = current_frame_ts; // Update last decoded position
// Check if this frame is closer to our target than the previous best
let is_better = match best_frame_ts {
None => true,
Some(best_ts) => {
(current_frame_ts - frame_ts).abs() < (best_ts - frame_ts).abs()
}
};
if is_better {
let t_scale_start = Instant::now();
// Convert to RGBA and scale to output size
let mut scaler = ffmpeg::software::scaling::context::Context::get(
frame.format(),
frame.width(),
frame.height(),
ffmpeg::format::Pixel::RGBA,
self.output_width,
self.output_height,
ffmpeg::software::scaling::flag::Flags::BILINEAR,
).map_err(|e| e.to_string())?;
let mut rgb_frame = ffmpeg::util::frame::Video::empty();
scaler.run(&frame, &mut rgb_frame)
.map_err(|e| e.to_string())?;
// Remove stride padding to create tightly packed RGBA data
let width = self.output_width as usize;
let height = self.output_height as usize;
let stride = rgb_frame.stride(0);
let row_size = width * 4; // RGBA = 4 bytes per pixel
let source_data = rgb_frame.data(0);
let mut packed_data = Vec::with_capacity(row_size * height);
for y in 0..height {
let row_start = y * stride;
let row_end = row_start + row_size;
packed_data.extend_from_slice(&source_data[row_start..row_end]);
}
scale_time_ms += t_scale_start.elapsed().as_millis();
best_frame_data = Some(packed_data);
best_frame_ts = Some(current_frame_ts);
}
// If we've reached or passed the target timestamp, we can stop
if current_frame_ts >= frame_ts {
// Found our frame, cache and return it
if let Some(data) = best_frame_data {
let total_time = t_start.elapsed().as_millis();
let decode_time = t_decode_start.elapsed().as_millis();
eprintln!("[Video Timing] ts={:.3}s | Decoded {} frames in {}ms | Scale: {}ms | Total: {}ms",
timestamp, decode_count, decode_time, scale_time_ms, total_time);
self.frame_cache.put(frame_ts, data.clone());
return Ok(data);
}
break;
}
}
}
}
eprintln!("[Video Decoder] ERROR: Failed to decode frame for timestamp {}", timestamp);
Err("Failed to decode frame".to_string())
}
}
use std::collections::HashMap;
use std::path::PathBuf;
#[derive(Clone)]
pub struct TranscodeJob {
pub pool_index: usize,
pub input_path: String,
pub output_path: String,
pub http_url: Option<String>, // HTTP URL when transcode completes
pub progress: f32, // 0.0 to 1.0
pub completed: bool,
}
pub struct VideoState {
pool: Vec<Arc<Mutex<VideoDecoder>>>,
next_pool_index: usize,
cache_size: usize,
transcode_jobs: Arc<Mutex<HashMap<usize, TranscodeJob>>>, // pool_index -> job
}
impl Default for VideoState {
fn default() -> Self {
Self {
pool: Vec::new(),
next_pool_index: 0,
cache_size: 20, // Default cache size
transcode_jobs: Arc::new(Mutex::new(HashMap::new())),
}
}
}
#[tauri::command]
pub async fn video_load_file(
video_state: tauri::State<'_, Arc<Mutex<VideoState>>>,
audio_state: tauri::State<'_, Arc<Mutex<crate::audio::AudioState>>>,
video_server: tauri::State<'_, Arc<Mutex<crate::video_server::VideoServer>>>,
path: String,
) -> Result<VideoFileMetadata, String> {
eprintln!("[Video] Loading file: {}", path);
ffmpeg::init().map_err(|e| e.to_string())?;
// Open input to check for audio stream
let mut input = ffmpeg::format::input(&path)
.map_err(|e| format!("Failed to open video: {}", e))?;
let audio_stream_opt = input.streams()
.best(ffmpeg::media::Type::Audio);
let has_audio = audio_stream_opt.is_some();
// Extract audio if present
let (audio_pool_index, audio_duration, audio_sample_rate, audio_channels, audio_waveform) = if has_audio {
let audio_stream = audio_stream_opt.unwrap();
let audio_index = audio_stream.index();
// Get audio properties
let context_decoder = ffmpeg::codec::context::Context::from_parameters(
audio_stream.parameters()
).map_err(|e| e.to_string())?;
let mut audio_decoder = context_decoder.decoder().audio()
.map_err(|e| e.to_string())?;
let sample_rate = audio_decoder.rate();
let channels = audio_decoder.channels() as u32;
// Decode all audio frames
let mut audio_samples: Vec<f32> = Vec::new();
for (stream, packet) in input.packets() {
if stream.index() == audio_index {
audio_decoder.send_packet(&packet)
.map_err(|e| e.to_string())?;
let mut audio_frame = ffmpeg::util::frame::Audio::empty();
while audio_decoder.receive_frame(&mut audio_frame).is_ok() {
// Convert audio to f32 planar format
let format = audio_frame.format();
let frame_channels = audio_frame.channels() as usize;
// Create resampler to convert to f32 planar
let mut resampler = ffmpeg::software::resampling::context::Context::get(
format,
audio_frame.channel_layout(),
sample_rate,
ffmpeg::format::Sample::F32(ffmpeg::format::sample::Type::Packed),
audio_frame.channel_layout(),
sample_rate,
).map_err(|e| e.to_string())?;
let mut resampled_frame = ffmpeg::util::frame::Audio::empty();
resampler.run(&audio_frame, &mut resampled_frame)
.map_err(|e| e.to_string())?;
// Extract f32 samples (interleaved format)
let data_ptr = resampled_frame.data(0).as_ptr() as *const f32;
let total_samples = resampled_frame.samples() * frame_channels;
let samples_slice = unsafe {
std::slice::from_raw_parts(data_ptr, total_samples)
};
audio_samples.extend_from_slice(samples_slice);
}
}
}
// Flush audio decoder
audio_decoder.send_eof().map_err(|e| e.to_string())?;
let mut audio_frame = ffmpeg::util::frame::Audio::empty();
while audio_decoder.receive_frame(&mut audio_frame).is_ok() {
let format = audio_frame.format();
let frame_channels = audio_frame.channels() as usize;
let mut resampler = ffmpeg::software::resampling::context::Context::get(
format,
audio_frame.channel_layout(),
sample_rate,
ffmpeg::format::Sample::F32(ffmpeg::format::sample::Type::Packed),
audio_frame.channel_layout(),
sample_rate,
).map_err(|e| e.to_string())?;
let mut resampled_frame = ffmpeg::util::frame::Audio::empty();
resampler.run(&audio_frame, &mut resampled_frame)
.map_err(|e| e.to_string())?;
let data_ptr = resampled_frame.data(0).as_ptr() as *const f32;
let total_samples = resampled_frame.samples() * frame_channels;
let samples_slice = unsafe {
std::slice::from_raw_parts(data_ptr, total_samples)
};
audio_samples.extend_from_slice(samples_slice);
}
// Calculate audio duration
let total_samples_per_channel = audio_samples.len() / channels as usize;
let audio_duration = total_samples_per_channel as f64 / sample_rate as f64;
// Generate waveform
let target_peaks = ((audio_duration * 300.0) as usize).clamp(1000, 20000);
let waveform = generate_waveform(&audio_samples, channels, target_peaks);
// Send audio to DAW backend
let mut audio_state_guard = audio_state.lock().unwrap();
let audio_pool_index = audio_state_guard.next_pool_index;
audio_state_guard.next_pool_index += 1;
if let Some(controller) = &mut audio_state_guard.controller {
controller.add_audio_file(
path.clone(),
audio_samples,
channels,
sample_rate,
);
}
drop(audio_state_guard);
(Some(audio_pool_index), Some(audio_duration), Some(sample_rate), Some(channels), Some(waveform))
} else {
(None, None, None, None, None)
};
// Detect video codec
let video_stream = input.streams()
.best(ffmpeg::media::Type::Video)
.ok_or("No video stream found")?;
let codec_id = video_stream.parameters().id();
let codec_name = ffmpeg::codec::Id::name(&codec_id).to_string();
// Check if codec is browser-compatible (can play directly)
// Browsers support: H.264/AVC, VP8, VP9, AV1 (limited)
let is_browser_compatible = matches!(
codec_id,
ffmpeg::codec::Id::H264 |
ffmpeg::codec::Id::VP8 |
ffmpeg::codec::Id::VP9 |
ffmpeg::codec::Id::AV1
);
eprintln!("[Video Codec] {} - Browser compatible: {}", codec_name, is_browser_compatible);
// Create video decoder with max dimensions for playback (800x600)
// This scales down high-res videos to reduce data transfer
let mut video_state_guard = video_state.lock().unwrap();
let pool_index = video_state_guard.next_pool_index;
video_state_guard.next_pool_index += 1;
let decoder = VideoDecoder::new(path.clone(), video_state_guard.cache_size, Some(800), Some(600))?;
// Add file to HTTP server if browser-compatible
let http_url = if is_browser_compatible {
let server = video_server.lock().unwrap();
let url_path = format!("/video/{}", pool_index);
server.add_file(url_path.clone(), PathBuf::from(&path));
let http_url = server.get_url(&url_path);
eprintln!("[Video] Browser-compatible, serving at: {}", http_url);
Some(http_url)
} else {
None
};
let metadata = VideoFileMetadata {
pool_index,
width: decoder.output_width, // Return scaled dimensions to JS
height: decoder.output_height,
fps: decoder.fps,
duration: decoder.duration,
has_audio,
audio_pool_index,
audio_duration,
audio_sample_rate,
audio_channels,
audio_waveform,
codec_name,
is_browser_compatible,
http_url,
transcoding: !is_browser_compatible,
};
video_state_guard.pool.push(Arc::new(Mutex::new(decoder)));
// Start background transcoding if not browser-compatible
if !is_browser_compatible {
eprintln!("[Video Transcode] Starting background transcode for pool_index {}", pool_index);
let jobs = video_state_guard.transcode_jobs.clone();
let input_path = path.clone();
let pool_idx = pool_index;
let server = video_server.inner().clone();
tauri::async_runtime::spawn(async move {
if let Err(e) = start_transcode(jobs, pool_idx, input_path, server).await {
eprintln!("[Video Transcode] Failed: {}", e);
}
});
}
Ok(metadata)
}
// Background transcode to WebM/VP9 for browser compatibility
async fn start_transcode(
jobs: Arc<Mutex<HashMap<usize, TranscodeJob>>>,
pool_index: usize,
input_path: String,
video_server: Arc<Mutex<crate::video_server::VideoServer>>,
) -> Result<(), String> {
use std::process::Command;
// Generate output path in system cache directory
let cache_dir = std::env::temp_dir().join("lightningbeam_transcoded");
std::fs::create_dir_all(&cache_dir).map_err(|e| e.to_string())?;
let input_file = PathBuf::from(&input_path);
let file_stem = input_file.file_stem()
.ok_or("Invalid input path")?
.to_string_lossy();
let output_path = cache_dir.join(format!("{}_{}.webm", file_stem, pool_index));
// Create job entry
{
let mut jobs_guard = jobs.lock().unwrap();
jobs_guard.insert(pool_index, TranscodeJob {
pool_index,
input_path: input_path.clone(),
output_path: output_path.to_string_lossy().to_string(),
http_url: None,
progress: 0.0,
completed: false,
});
}
eprintln!("[Video Transcode] Output: {}", output_path.display());
// Run FFmpeg transcode command
// Using VP9 codec with CRF 30 (good quality/size balance) and fast encoding
let output = Command::new("ffmpeg")
.args(&[
"-i", &input_path,
"-c:v", "libvpx-vp9", // VP9 video codec
"-crf", "30", // Quality (lower = better, 23-32 recommended)
"-b:v", "0", // Use CRF mode
"-threads", "4", // Use 4 threads
"-row-mt", "1", // Enable row-based multithreading
"-speed", "4", // Encoding speed (0=slowest/best, 4=good balance)
"-c:a", "libopus", // Opus audio codec (best for WebM)
"-b:a", "128k", // Audio bitrate
"-y", // Overwrite output
output_path.to_str().ok_or("Invalid output path")?,
])
.output()
.map_err(|e| format!("Failed to spawn ffmpeg: {}", e))?;
if output.status.success() {
eprintln!("[Video Transcode] Completed: {}", output_path.display());
// Add transcoded file to HTTP server
let server = video_server.lock().unwrap();
let url_path = format!("/video/{}", pool_index);
server.add_file(url_path.clone(), output_path.clone());
let http_url = server.get_url(&url_path);
eprintln!("[Video Transcode] Serving at: {}", http_url);
drop(server);
// Mark as completed and store HTTP URL
let mut jobs_guard = jobs.lock().unwrap();
if let Some(job) = jobs_guard.get_mut(&pool_index) {
job.progress = 1.0;
job.completed = true;
job.http_url = Some(http_url);
}
eprintln!("[Video Transcode] Job completed for pool_index {}", pool_index);
Ok(())
} else {
let stderr = String::from_utf8_lossy(&output.stderr);
eprintln!("[Video Transcode] FFmpeg error: {}", stderr);
Err(format!("FFmpeg failed: {}", stderr))
}
}
// Get transcode status for a pool index
#[tauri::command]
pub async fn video_get_transcode_status(
video_state: tauri::State<'_, Arc<Mutex<VideoState>>>,
pool_index: usize,
) -> Result<Option<(String, f32, bool, Option<String>)>, String> {
let state = video_state.lock().unwrap();
let jobs = state.transcode_jobs.lock().unwrap();
if let Some(job) = jobs.get(&pool_index) {
Ok(Some((job.output_path.clone(), job.progress, job.completed, job.http_url.clone())))
} else {
Ok(None)
}
}
// Add a video file to asset protocol scope so browser can access it
#[tauri::command]
pub async fn video_allow_asset(
app: tauri::AppHandle,
path: String,
) -> Result<(), String> {
use tauri_plugin_fs::FsExt;
let file_path = PathBuf::from(&path);
// Add to FS scope
let fs_scope = app.fs_scope();
fs_scope.allow_file(&file_path)
.map_err(|e| format!("Failed to allow file in fs scope: {}", e))?;
// Add to asset protocol scope
let asset_scope = app.asset_protocol_scope();
asset_scope.allow_file(&file_path)
.map_err(|e| format!("Failed to allow file in asset scope: {}", e))?;
eprintln!("[Video] Added to asset scope: {}", path);
Ok(())
}
fn generate_waveform(audio_data: &[f32], channels: u32, target_peaks: usize) -> Vec<WaveformPeak> {
let total_samples = audio_data.len();
let samples_per_channel = total_samples / channels as usize;
let samples_per_peak = (samples_per_channel / target_peaks).max(1);
let mut waveform = Vec::new();
for peak_idx in 0..target_peaks {
let start_sample = peak_idx * samples_per_peak;
let end_sample = ((peak_idx + 1) * samples_per_peak).min(samples_per_channel);
if start_sample >= samples_per_channel {
break;
}
let mut min_val = 0.0f32;
let mut max_val = 0.0f32;
for sample_idx in start_sample..end_sample {
// Average across channels
let mut channel_sum = 0.0f32;
for ch in 0..channels as usize {
let idx = sample_idx * channels as usize + ch;
if idx < total_samples {
channel_sum += audio_data[idx];
}
}
let avg_sample = channel_sum / channels as f32;
min_val = min_val.min(avg_sample);
max_val = max_val.max(avg_sample);
}
waveform.push(WaveformPeak {
min: min_val,
max: max_val,
});
}
waveform
}
#[tauri::command]
pub async fn video_get_frame(
state: tauri::State<'_, Arc<Mutex<VideoState>>>,
pool_index: usize,
timestamp: f64,
use_jpeg: bool,
channel: tauri::ipc::Channel,
) -> Result<(), String> {
use std::time::Instant;
let t_total_start = Instant::now();
let t_lock_start = Instant::now();
let video_state = state.lock().unwrap();
let decoder = video_state.pool.get(pool_index)
.ok_or("Invalid pool index")?
.clone();
drop(video_state);
let mut decoder = decoder.lock().unwrap();
let t_lock_end = Instant::now();
let t_decode_start = Instant::now();
let frame_data = decoder.get_frame(timestamp)?;
let t_decode_end = Instant::now();
let t_compress_start = Instant::now();
let data_to_send = if use_jpeg {
// Get frame dimensions from decoder
let width = decoder.output_width;
let height = decoder.output_height;
// Create image from raw RGBA data
let img = RgbaImage::from_raw(width, height, frame_data)
.ok_or("Failed to create image from frame data")?;
// Convert RGBA to RGB (JPEG doesn't support alpha)
let rgb_img = image::DynamicImage::ImageRgba8(img).to_rgb8();
// Encode to JPEG with quality 85 (good balance of size/quality)
let mut jpeg_data = Vec::new();
let mut encoder = image::codecs::jpeg::JpegEncoder::new_with_quality(&mut jpeg_data, 85);
encoder.encode(
rgb_img.as_raw(),
rgb_img.width(),
rgb_img.height(),
image::ColorType::Rgb8
).map_err(|e| format!("JPEG encoding failed: {}", e))?;
jpeg_data
} else {
frame_data
};
let t_compress_end = Instant::now();
// Drop decoder lock before sending to avoid blocking
drop(decoder);
let t_send_start = Instant::now();
// Send binary data through channel (bypasses JSON serialization)
// InvokeResponseBody::Raw sends raw binary data without JSON encoding
channel.send(tauri::ipc::InvokeResponseBody::Raw(data_to_send.clone()))
.map_err(|e| format!("Channel send error: {}", e))?;
let t_send_end = Instant::now();
let t_total_end = Instant::now();
// Detailed profiling
let lock_time = t_lock_end.duration_since(t_lock_start).as_micros();
let decode_time = t_decode_end.duration_since(t_decode_start).as_micros();
let compress_time = t_compress_end.duration_since(t_compress_start).as_micros();
let send_time = t_send_end.duration_since(t_send_start).as_micros();
let total_time = t_total_end.duration_since(t_total_start).as_micros();
let size_kb = data_to_send.len() / 1024;
let mode = if use_jpeg { "JPEG" } else { "RAW" };
eprintln!("[Video Profile {}] Size: {}KB | Lock: {}μs | Decode: {}μs | Compress: {}μs | Send: {}μs | Total: {}μs",
mode, size_kb, lock_time, decode_time, compress_time, send_time, total_time);
Ok(())
}
#[tauri::command]
pub async fn video_set_cache_size(
state: tauri::State<'_, Arc<Mutex<VideoState>>>,
cache_size: usize,
) -> Result<(), String> {
let mut video_state = state.lock().unwrap();
video_state.cache_size = cache_size;
Ok(())
}
#[tauri::command]
pub async fn video_get_pool_info(
state: tauri::State<'_, Arc<Mutex<VideoState>>>,
pool_index: usize,
) -> Result<(u32, u32, f64), String> {
let video_state = state.lock().unwrap();
let decoder = video_state.pool.get(pool_index)
.ok_or("Invalid pool index")?
.lock().unwrap();
Ok((
decoder.output_width, // Return scaled dimensions
decoder.output_height,
decoder.fps
))
}
// Benchmark command to test IPC performance with various payload sizes
#[tauri::command]
pub async fn video_ipc_benchmark(
size_bytes: usize,
channel: tauri::ipc::Channel,
) -> Result<(), String> {
use std::time::Instant;
let t_start = Instant::now();
// Create dummy data of requested size
let data = vec![0u8; size_bytes];
let t_after_alloc = Instant::now();
// Send through channel
channel.send(tauri::ipc::InvokeResponseBody::Raw(data))
.map_err(|e| format!("Channel send error: {}", e))?;
let t_after_send = Instant::now();
let alloc_time = t_after_alloc.duration_since(t_start).as_micros();
let send_time = t_after_send.duration_since(t_after_alloc).as_micros();
let total_time = t_after_send.duration_since(t_start).as_micros();
eprintln!("[IPC Benchmark Rust] Size: {}KB | Alloc: {}μs | Send: {}μs | Total: {}μs",
size_bytes / 1024, alloc_time, send_time, total_time);
Ok(())
}
// Batch frame request - get multiple frames in one IPC call
#[tauri::command]
pub async fn video_get_frames_batch(
state: tauri::State<'_, Arc<Mutex<VideoState>>>,
pool_index: usize,
timestamps: Vec<f64>,
use_jpeg: bool,
channel: tauri::ipc::Channel,
) -> Result<(), String> {
use std::time::Instant;
let t_total_start = Instant::now();
let video_state = state.lock().unwrap();
let decoder = video_state.pool.get(pool_index)
.ok_or("Invalid pool index")?
.clone();
drop(video_state);
let mut decoder = decoder.lock().unwrap();
// Decode all frames
let mut all_frames = Vec::new();
let mut total_decode_time = 0u128;
let mut total_compress_time = 0u128;
for timestamp in &timestamps {
let t_decode_start = Instant::now();
let frame_data = decoder.get_frame(*timestamp)?;
let t_decode_end = Instant::now();
total_decode_time += t_decode_end.duration_since(t_decode_start).as_micros();
let t_compress_start = Instant::now();
let data = if use_jpeg {
let width = decoder.output_width;
let height = decoder.output_height;
let img = RgbaImage::from_raw(width, height, frame_data)
.ok_or("Failed to create image from frame data")?;
let rgb_img = image::DynamicImage::ImageRgba8(img).to_rgb8();
let mut jpeg_data = Vec::new();
let mut encoder = image::codecs::jpeg::JpegEncoder::new_with_quality(&mut jpeg_data, 85);
encoder.encode(
rgb_img.as_raw(),
rgb_img.width(),
rgb_img.height(),
image::ColorType::Rgb8
).map_err(|e| format!("JPEG encoding failed: {}", e))?;
jpeg_data
} else {
frame_data
};
let t_compress_end = Instant::now();
total_compress_time += t_compress_end.duration_since(t_compress_start).as_micros();
all_frames.push(data);
}
drop(decoder);
// Pack all frames into one buffer with metadata
// Format: [frame_count: u32][frame1_size: u32][frame1_data...][frame2_size: u32][frame2_data...]
let mut packed_data = Vec::new();
packed_data.extend_from_slice(&(all_frames.len() as u32).to_le_bytes());
for frame in &all_frames {
packed_data.extend_from_slice(&(frame.len() as u32).to_le_bytes());
packed_data.extend_from_slice(frame);
}
let total_size_kb = packed_data.len() / 1024;
let t_send_start = Instant::now();
channel.send(tauri::ipc::InvokeResponseBody::Raw(packed_data))
.map_err(|e| format!("Channel send error: {}", e))?;
let t_send_end = Instant::now();
let send_time = t_send_end.duration_since(t_send_start).as_micros();
let total_time = t_send_end.duration_since(t_total_start).as_micros();
let mode = if use_jpeg { "JPEG" } else { "RAW" };
eprintln!("[Video Batch {}] Frames: {} | Size: {}KB | Decode: {}μs | Compress: {}μs | Send: {}μs | Total: {}μs",
mode, timestamps.len(), total_size_kb, total_decode_time, total_compress_time, send_time, total_time);
Ok(())
}

View File

@ -0,0 +1,203 @@
use std::sync::{Arc, Mutex};
use std::collections::HashMap;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use tiny_http::{Server, Response, Request, Header, StatusCode};
pub struct VideoServer {
port: u16,
allowed_files: Arc<Mutex<HashMap<String, PathBuf>>>, // URL path -> file path
}
impl VideoServer {
pub fn new() -> Result<Self, String> {
// Bind to localhost on a random available port
let server = Server::http("127.0.0.1:0")
.map_err(|e| format!("Failed to create HTTP server: {}", e))?;
let port = match server.server_addr() {
tiny_http::ListenAddr::IP(addr) => addr.port(),
_ => return Err("Unexpected server address type".to_string()),
};
let allowed_files = Arc::new(Mutex::new(HashMap::new()));
eprintln!("[Video Server] Started on port {}", port);
// Spawn server thread
let files = allowed_files.clone();
std::thread::spawn(move || {
for request in server.incoming_requests() {
handle_request(request, &files);
}
});
Ok(Self {
port,
allowed_files,
})
}
pub fn port(&self) -> u16 {
self.port
}
pub fn add_file(&self, url_path: String, file_path: PathBuf) {
eprintln!("[Video Server] Adding file: {} -> {:?}", url_path, file_path);
let mut files = self.allowed_files.lock().unwrap();
files.insert(url_path, file_path);
}
pub fn get_url(&self, url_path: &str) -> String {
format!("http://127.0.0.1:{}{}", self.port, url_path)
}
}
fn handle_request(request: Request, allowed_files: &Arc<Mutex<HashMap<String, PathBuf>>>) {
let url = request.url();
let method = request.method();
eprintln!("[Video Server] {} {}", method, url);
// Get file path
let files = allowed_files.lock().unwrap();
let file_path = match files.get(url) {
Some(path) => path.clone(),
None => {
eprintln!("[Video Server] File not found: {}", url);
let response = Response::from_string("Not Found")
.with_status_code(StatusCode(404));
let _ = request.respond(response);
return;
}
};
drop(files);
// Open file
let mut file = match File::open(&file_path) {
Ok(f) => f,
Err(e) => {
eprintln!("[Video Server] Failed to open file: {}", e);
let response = Response::from_string("Internal Server Error")
.with_status_code(StatusCode(500));
let _ = request.respond(response);
return;
}
};
// Get file size
let file_size = match file.metadata() {
Ok(meta) => meta.len(),
Err(e) => {
eprintln!("[Video Server] Failed to get file metadata: {}", e);
let response = Response::from_string("Internal Server Error")
.with_status_code(StatusCode(500));
let _ = request.respond(response);
return;
}
};
// Check for Range header - convert to owned String to avoid borrow issues
let range_header = request.headers().iter()
.find(|h| h.field.equiv("Range"))
.map(|h| h.value.as_str().to_string());
match range_header {
Some(range) if range.starts_with("bytes=") => {
// Parse range request
eprintln!("[Video Server] Range request: {}", range);
handle_range_request(request, file, file_size, &range, &file_path);
}
_ => {
// Serve entire file
eprintln!("[Video Server] Full file request");
handle_full_request(request, file, file_size, &file_path);
}
}
}
fn handle_range_request(
request: Request,
mut file: File,
file_size: u64,
range: &str,
file_path: &PathBuf,
) {
// Parse "bytes=start-end"
let range = range.trim_start_matches("bytes=");
let parts: Vec<&str> = range.split('-').collect();
let start = parts[0].parse::<u64>().unwrap_or(0);
let end = if parts.len() > 1 && !parts[1].is_empty() {
parts[1].parse::<u64>().unwrap_or(file_size - 1)
} else {
file_size - 1
};
let length = end - start + 1;
// Seek to start position
if let Err(e) = file.seek(SeekFrom::Start(start)) {
eprintln!("[Video Server] Failed to seek: {}", e);
let response = Response::from_string("Internal Server Error")
.with_status_code(StatusCode(500));
let _ = request.respond(response);
return;
}
// Read the requested range
let mut buffer = vec![0u8; length as usize];
if let Err(e) = file.read_exact(&mut buffer) {
eprintln!("[Video Server] Failed to read range: {}", e);
let response = Response::from_string("Internal Server Error")
.with_status_code(StatusCode(500));
let _ = request.respond(response);
return;
}
// Determine content type
let content_type = get_content_type(file_path);
// Send 206 Partial Content response
let content_range = format!("bytes {}-{}/{}", start, end, file_size);
let response = Response::from_data(buffer)
.with_status_code(StatusCode(206))
.with_header(Header::from_bytes(&b"Content-Type"[..], content_type.as_bytes()).unwrap())
.with_header(Header::from_bytes(&b"Content-Length"[..], length.to_string().as_bytes()).unwrap())
.with_header(Header::from_bytes(&b"Content-Range"[..], content_range.as_bytes()).unwrap())
.with_header(Header::from_bytes(&b"Accept-Ranges"[..], &b"bytes"[..]).unwrap())
.with_header(Header::from_bytes(&b"Access-Control-Allow-Origin"[..], &b"*"[..]).unwrap());
let _ = request.respond(response);
}
fn handle_full_request(
request: Request,
file: File,
file_size: u64,
file_path: &PathBuf,
) {
// Determine content type
let content_type = get_content_type(file_path);
// Send 200 OK response using from_file to avoid chunked encoding
let response = Response::from_file(file)
.with_status_code(StatusCode(200))
.with_chunked_threshold(usize::MAX) // Force Content-Length instead of chunked
.with_header(Header::from_bytes(&b"Content-Type"[..], content_type.as_bytes()).unwrap())
.with_header(Header::from_bytes(&b"Accept-Ranges"[..], &b"bytes"[..]).unwrap())
.with_header(Header::from_bytes(&b"Access-Control-Allow-Origin"[..], &b"*"[..]).unwrap());
let _ = request.respond(response);
}
fn get_content_type(path: &PathBuf) -> String {
match path.extension().and_then(|s| s.to_str()) {
Some("webm") => "video/webm".to_string(),
Some("mp4") => "video/mp4".to_string(),
Some("mkv") => "video/x-matroska".to_string(),
Some("avi") => "video/x-msvideo".to_string(),
Some("mov") => "video/quicktime".to_string(),
_ => "application/octet-stream".to_string(),
}
}

View File

@ -12,7 +12,7 @@ import {
Frame Frame
} from '../models/animation.js'; } from '../models/animation.js';
import { GraphicsObject } from '../models/graphics-object.js'; import { GraphicsObject } from '../models/graphics-object.js';
import { Layer, AudioTrack } from '../models/layer.js'; import { VectorLayer, AudioTrack, VideoLayer } from '../models/layer.js';
import { import {
arraysAreEqual, arraysAreEqual,
lerp, lerp,
@ -161,6 +161,7 @@ let redoStack = null;
let updateMenu = null; let updateMenu = null;
let updateLayers = null; let updateLayers = null;
let updateUI = null; let updateUI = null;
let updateVideoFrames = null;
let updateInfopanel = null; let updateInfopanel = null;
let invoke = null; let invoke = null;
let config = null; let config = null;
@ -186,6 +187,7 @@ export function initializeActions(deps) {
updateMenu = deps.updateMenu; updateMenu = deps.updateMenu;
updateLayers = deps.updateLayers; updateLayers = deps.updateLayers;
updateUI = deps.updateUI; updateUI = deps.updateUI;
updateVideoFrames = deps.updateVideoFrames;
updateInfopanel = deps.updateInfopanel; updateInfopanel = deps.updateInfopanel;
invoke = deps.invoke; invoke = deps.invoke;
config = deps.config; config = deps.config;
@ -587,6 +589,148 @@ export const actions = {
} }
}, },
}, },
addVideo: {
create: (filePath, object, videoname) => {
redoStack.length = 0;
let action = {
filePath: filePath,
videoname: videoname,
layeruuid: uuidv4(),
object: object.idx,
};
undoStack.push({ name: "addVideo", action: action });
actions.addVideo.execute(action);
updateMenu();
},
execute: async (action) => {
// Create new VideoLayer
let newVideoLayer = new VideoLayer(action.layeruuid, action.videoname);
let object = pointerList[action.object];
// Add layer to object
object.layers.push(newVideoLayer);
// Update UI
updateLayers();
if (context.timelineWidget) {
context.timelineWidget.requestRedraw();
}
// Load video asynchronously
try {
const metadata = await invoke('video_load_file', {
path: action.filePath
});
// Add clip to video layer
await newVideoLayer.addClip(
metadata.pool_index,
0, // startTime
metadata.duration,
0, // offset
action.videoname,
metadata.duration, // sourceDuration
metadata // Pass full metadata for browser playback support
);
// If video has audio, create linked AudioTrack
if (metadata.has_audio && metadata.audio_pool_index !== null) {
const audioTrackUuid = uuidv4();
const audioTrackName = `${action.videoname} (Audio)`;
const newAudioTrack = new AudioTrack(audioTrackUuid, audioTrackName);
// Initialize track in backend
await newAudioTrack.initializeTrack();
// Add audio clip using the extracted audio
const audioClipId = newAudioTrack.clips.length;
await invoke('audio_add_clip', {
trackId: newAudioTrack.audioTrackId,
poolIndex: metadata.audio_pool_index,
startTime: 0,
duration: metadata.audio_duration,
offset: 0
});
const audioClip = {
clipId: audioClipId,
poolIndex: metadata.audio_pool_index,
name: audioTrackName,
startTime: 0,
duration: metadata.audio_duration,
offset: 0,
waveform: metadata.audio_waveform,
sourceDuration: metadata.audio_duration
};
newAudioTrack.clips.push(audioClip);
// Link the clips to each other
const videoClip = newVideoLayer.clips[0]; // The video clip we just added
if (videoClip) {
videoClip.linkedAudioClip = audioClip;
audioClip.linkedVideoClip = videoClip;
}
// Also keep track-level references for convenience
newVideoLayer.linkedAudioTrack = newAudioTrack;
newAudioTrack.linkedVideoLayer = newVideoLayer;
// Add audio track to object
object.audioTracks.push(newAudioTrack);
// Store reference for rollback
action.audioTrackUuid = audioTrackUuid;
console.log(`Video audio extracted: ${metadata.audio_duration}s, ${metadata.audio_sample_rate}Hz, ${metadata.audio_channels}ch`);
}
// Update UI with real clip data
updateLayers();
if (context.timelineWidget) {
context.timelineWidget.requestRedraw();
}
// Make this the active layer
if (context.activeObject) {
context.activeObject.activeLayer = newVideoLayer;
updateLayers();
}
// Fetch first frame
if (updateVideoFrames) {
await updateVideoFrames(context.activeObject.currentTime || 0);
}
// Trigger redraw to show the first frame
updateUI();
console.log(`Video loaded: ${action.videoname}, ${metadata.width}x${metadata.height}, ${metadata.duration}s`);
} catch (error) {
console.error('Failed to load video:', error);
}
},
rollback: (action) => {
let object = pointerList[action.object];
let layer = pointerList[action.layeruuid];
object.layers.splice(object.layers.indexOf(layer), 1);
// Remove linked audio track if it was created
if (action.audioTrackUuid) {
let audioTrack = pointerList[action.audioTrackUuid];
if (audioTrack) {
const index = object.audioTracks.indexOf(audioTrack);
if (index !== -1) {
object.audioTracks.splice(index, 1);
}
}
}
updateLayers();
if (context.timelineWidget) {
context.timelineWidget.requestRedraw();
}
},
},
addMIDI: { addMIDI: {
create: (filePath, object, midiname) => { create: (filePath, object, midiname) => {
redoStack.length = 0; redoStack.length = 0;
@ -832,8 +976,8 @@ export const actions = {
}, },
execute: (action) => { execute: (action) => {
let object = pointerList[action.object]; let object = pointerList[action.object];
let layer = new Layer(action.uuid); let layer = new VectorLayer(action.uuid);
layer.name = `Layer ${object.layers.length + 1}`; layer.name = `VectorLayer ${object.layers.length + 1}`;
object.layers.push(layer); object.layers.push(layer);
object.currentLayer = object.layers.indexOf(layer); object.currentLayer = object.layers.indexOf(layer);
updateLayers(); updateLayers();
@ -854,7 +998,7 @@ export const actions = {
redoStack.length = 0; redoStack.length = 0;
// Don't allow deleting the only layer // Don't allow deleting the only layer
if (context.activeObject.layers.length == 1) return; if (context.activeObject.layers.length == 1) return;
if (!(layer instanceof Layer)) { if (!(layer instanceof VectorLayer)) {
layer = context.activeObject.activeLayer; layer = context.activeObject.activeLayer;
} }
let action = { let action = {
@ -929,8 +1073,8 @@ export const actions = {
let object = GraphicsObject.fromJSON(action.object); let object = GraphicsObject.fromJSON(action.object);
activeObject.addObject(object); activeObject.addObject(object);
break; break;
case "Layer": case "VectorLayer":
let layer = Layer.fromJSON(action.object); let layer = VectorLayer.fromJSON(action.object);
activeObject.addLayer(layer); activeObject.addLayer(layer);
} }
updateUI(); updateUI();
@ -943,7 +1087,7 @@ export const actions = {
let object = pointerList[action.object.idx]; let object = pointerList[action.object.idx];
activeObject.removeChild(object); activeObject.removeChild(object);
break; break;
case "Layer": case "VectorLayer":
let layer = pointerList[action.object.idx]; let layer = pointerList[action.object.idx];
activeObject.removeLayer(layer); activeObject.removeLayer(layer);
} }

View File

@ -93,8 +93,9 @@ import {
AnimationData AnimationData
} from "./models/animation.js"; } from "./models/animation.js";
import { import {
Layer, VectorLayer,
AudioTrack, AudioTrack,
VideoLayer,
initializeLayerDependencies initializeLayerDependencies
} from "./models/layer.js"; } from "./models/layer.js";
import { import {
@ -135,6 +136,7 @@ const { getVersion } = window.__TAURI__.app;
// Supported file extensions // Supported file extensions
const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"]; const imageExtensions = ["png", "gif", "avif", "jpg", "jpeg"];
const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"]; const audioExtensions = ["mp3", "wav", "aiff", "ogg", "flac"];
const videoExtensions = ["mp4", "mov", "avi", "mkv", "webm", "m4v"];
const midiExtensions = ["mid", "midi"]; const midiExtensions = ["mid", "midi"];
const beamExtensions = ["beam"]; const beamExtensions = ["beam"];
@ -218,7 +220,6 @@ let fileExportPath = undefined;
let state = "normal"; let state = "normal";
let playing = false;
let lastFrameTime; let lastFrameTime;
let uiDirty = false; let uiDirty = false;
@ -343,6 +344,66 @@ let mouseEvent;
window.context = context; window.context = context;
window.actions = actions; window.actions = actions;
window.addKeyframeAtPlayhead = addKeyframeAtPlayhead; window.addKeyframeAtPlayhead = addKeyframeAtPlayhead;
window.updateVideoFrames = null; // Will be set after function is defined
// IPC Benchmark function - run from console: testIPCBenchmark()
window.testIPCBenchmark = async function() {
const { invoke, Channel } = window.__TAURI__.core;
// Test sizes: 1KB, 10KB, 50KB, 100KB, 500KB, 1MB, 2MB, 5MB
const testSizes = [
1024, // 1 KB
10 * 1024, // 10 KB
50 * 1024, // 50 KB
100 * 1024, // 100 KB
500 * 1024, // 500 KB
1024 * 1024, // 1 MB
2 * 1024 * 1024, // 2 MB
5 * 1024 * 1024 // 5 MB
];
console.log('\n=== IPC Benchmark Starting ===\n');
console.log('Size (KB)\tJS Total (ms)\tJS IPC (ms)\tJS Recv (ms)\tThroughput (MB/s)');
console.log('─'.repeat(80));
for (const sizeBytes of testSizes) {
const t_start = performance.now();
let receivedData = null;
const dataPromise = new Promise((resolve, reject) => {
const channel = new Channel();
channel.onmessage = (data) => {
const t_recv_start = performance.now();
receivedData = data;
const t_recv_end = performance.now();
resolve(t_recv_end - t_recv_start);
};
invoke('video_ipc_benchmark', {
sizeBytes: sizeBytes,
channel: channel
}).catch(reject);
});
const recv_time = await dataPromise;
const t_after_ipc = performance.now();
const total_time = t_after_ipc - t_start;
const ipc_time = total_time - recv_time;
const size_kb = sizeBytes / 1024;
const size_mb = sizeBytes / (1024 * 1024);
const throughput = size_mb / (total_time / 1000);
console.log(`${size_kb.toFixed(0)}\t\t${total_time.toFixed(2)}\t\t${ipc_time.toFixed(2)}\t\t${recv_time.toFixed(2)}\t\t${throughput.toFixed(2)}`);
// Small delay between tests
await new Promise(resolve => setTimeout(resolve, 100));
}
console.log('\n=== IPC Benchmark Complete ===\n');
console.log('Run again with: testIPCBenchmark()');
};
function uuidv4() { function uuidv4() {
return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) => return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
@ -638,7 +699,7 @@ function redo() {
// ============================================================================ // ============================================================================
// ============================================================================ // ============================================================================
// Layer system classes (Layer, AudioTrack) // Layer system classes (VectorLayer, AudioTrack, VideoLayer)
// have been moved to src/models/layer.js and are imported at the top of this file // have been moved to src/models/layer.js and are imported at the top of this file
// ============================================================================ // ============================================================================
@ -904,8 +965,8 @@ window.addEventListener("keydown", (e) => {
}); });
async function playPause() { async function playPause() {
playing = !playing; context.playing = !context.playing;
if (playing) { if (context.playing) {
// Reset to start if we're at the end // Reset to start if we're at the end
const duration = context.activeObject.duration; const duration = context.activeObject.duration;
if (duration > 0 && context.activeObject.currentTime >= duration) { if (duration > 0 && context.activeObject.currentTime >= duration) {
@ -963,8 +1024,8 @@ async function playPause() {
// Update play/pause button appearance if it exists // Update play/pause button appearance if it exists
if (context.playPauseButton) { if (context.playPauseButton) {
context.playPauseButton.className = playing ? "playback-btn playback-btn-pause" : "playback-btn playback-btn-play"; context.playPauseButton.className = context.playing ? "playback-btn playback-btn-pause" : "playback-btn playback-btn-play";
context.playPauseButton.title = playing ? "Pause" : "Play"; context.playPauseButton.title = context.playing ? "Pause" : "Play";
} }
} }
@ -977,7 +1038,7 @@ function playbackLoop() {
context.timelineWidget.requestRedraw(); context.timelineWidget.requestRedraw();
} }
if (playing) { if (context.playing) {
const duration = context.activeObject.duration; const duration = context.activeObject.duration;
// Check if we've reached the end (but allow infinite playback when recording) // Check if we've reached the end (but allow infinite playback when recording)
@ -986,7 +1047,7 @@ function playbackLoop() {
requestAnimationFrame(playbackLoop); requestAnimationFrame(playbackLoop);
} else { } else {
// Animation finished // Animation finished
playing = false; context.playing = false;
// Stop DAW backend audio playback // Stop DAW backend audio playback
invoke('audio_stop').catch(error => { invoke('audio_stop').catch(error => {
@ -1009,6 +1070,38 @@ function playbackLoop() {
} }
} }
// Update video frames for all VideoLayers in the scene
async function updateVideoFrames(currentTime) {
// Recursively find all VideoLayers in the scene
function findVideoLayers(obj) {
const videoLayers = [];
if (obj.layers) {
for (let layer of obj.layers) {
if (layer.type === 'video') {
videoLayers.push(layer);
}
}
}
// Recursively check children (GraphicsObjects can contain other GraphicsObjects)
if (obj.children) {
for (let child of obj.children) {
videoLayers.push(...findVideoLayers(child));
}
}
return videoLayers;
}
const videoLayers = findVideoLayers(context.activeObject);
// Update all video layers in parallel
await Promise.all(videoLayers.map(layer => layer.updateFrame(currentTime)));
// Note: No updateUI() call here - renderUI() will draw after awaiting this function
}
// Expose updateVideoFrames globally
window.updateVideoFrames = updateVideoFrames;
// Single-step forward by one frame/second // Single-step forward by one frame/second
function advance() { function advance() {
if (context.timelineWidget?.timelineState?.timeFormat === "frames") { if (context.timelineWidget?.timelineState?.timeFormat === "frames") {
@ -1025,6 +1118,9 @@ function advance() {
// Sync DAW backend // Sync DAW backend
invoke('audio_seek', { seconds: context.activeObject.currentTime }); invoke('audio_seek', { seconds: context.activeObject.currentTime });
// Update video frames
updateVideoFrames(context.activeObject.currentTime);
updateLayers(); updateLayers();
updateMenu(); updateMenu();
updateUI(); updateUI();
@ -1098,7 +1194,7 @@ async function handleAudioEvent(event) {
switch (event.type) { switch (event.type) {
case 'PlaybackPosition': case 'PlaybackPosition':
// Sync frontend time with DAW time // Sync frontend time with DAW time
if (playing) { if (context.playing) {
// Quantize time to framerate for animation playback // Quantize time to framerate for animation playback
const framerate = context.activeObject.frameRate; const framerate = context.activeObject.frameRate;
const frameDuration = 1 / framerate; const frameDuration = 1 / framerate;
@ -1108,6 +1204,10 @@ async function handleAudioEvent(event) {
if (context.timelineWidget?.timelineState) { if (context.timelineWidget?.timelineState) {
context.timelineWidget.timelineState.currentTime = quantizedTime; context.timelineWidget.timelineState.currentTime = quantizedTime;
} }
// Update video frames
updateVideoFrames(quantizedTime);
// Update time display // Update time display
if (context.updateTimeDisplay) { if (context.updateTimeDisplay) {
context.updateTimeDisplay(); context.updateTimeDisplay();
@ -1545,7 +1645,7 @@ async function toggleRecording() {
console.log('[FRONTEND] MIDI recording started successfully'); console.log('[FRONTEND] MIDI recording started successfully');
// Start playback so the timeline moves (if not already playing) // Start playback so the timeline moves (if not already playing)
if (!playing) { if (!context.playing) {
await playPause(); await playPause();
} }
} catch (error) { } catch (error) {
@ -1565,7 +1665,7 @@ async function toggleRecording() {
console.log('[FRONTEND] Audio recording started successfully, waiting for RecordingStarted event'); console.log('[FRONTEND] Audio recording started successfully, waiting for RecordingStarted event');
// Start playback so the timeline moves (if not already playing) // Start playback so the timeline moves (if not already playing)
if (!playing) { if (!context.playing) {
await playPause(); await playPause();
} }
} catch (error) { } catch (error) {
@ -2355,6 +2455,10 @@ async function importFile() {
name: "Audio files", name: "Audio files",
extensions: audioExtensions, extensions: audioExtensions,
}, },
{
name: "Video files",
extensions: videoExtensions,
},
{ {
name: "MIDI files", name: "MIDI files",
extensions: midiExtensions, extensions: midiExtensions,
@ -2410,10 +2514,12 @@ async function importFile() {
let usedFilterIndex = 0; let usedFilterIndex = 0;
if (audioExtensions.includes(ext)) { if (audioExtensions.includes(ext)) {
usedFilterIndex = 1; // Audio usedFilterIndex = 1; // Audio
} else if (videoExtensions.includes(ext)) {
usedFilterIndex = 2; // Video
} else if (midiExtensions.includes(ext)) { } else if (midiExtensions.includes(ext)) {
usedFilterIndex = 2; // MIDI usedFilterIndex = 3; // MIDI
} else if (beamExtensions.includes(ext)) { } else if (beamExtensions.includes(ext)) {
usedFilterIndex = 3; // Lightningbeam usedFilterIndex = 4; // Lightningbeam
} else { } else {
usedFilterIndex = 0; // Image (default) usedFilterIndex = 0; // Image (default)
} }
@ -2480,6 +2586,9 @@ async function importFile() {
} else if (audioExtensions.includes(ext)) { } else if (audioExtensions.includes(ext)) {
// Handle audio files - pass file path directly to backend // Handle audio files - pass file path directly to backend
actions.addAudio.create(path, context.activeObject, filename); actions.addAudio.create(path, context.activeObject, filename);
} else if (videoExtensions.includes(ext)) {
// Handle video files
actions.addVideo.create(path, context.activeObject, filename);
} else if (midiExtensions.includes(ext)) { } else if (midiExtensions.includes(ext)) {
// Handle MIDI files // Handle MIDI files
actions.addMIDI.create(path, context.activeObject, filename); actions.addMIDI.create(path, context.activeObject, filename);
@ -4503,8 +4612,8 @@ function timeline() {
// Play/Pause button // Play/Pause button
const playPauseButton = document.createElement("button"); const playPauseButton = document.createElement("button");
playPauseButton.className = playing ? "playback-btn playback-btn-pause" : "playback-btn playback-btn-play"; playPauseButton.className = context.playing ? "playback-btn playback-btn-pause" : "playback-btn playback-btn-play";
playPauseButton.title = playing ? "Pause" : "Play"; playPauseButton.title = context.playing ? "Pause" : "Play";
playPauseButton.addEventListener("click", playPause); playPauseButton.addEventListener("click", playPause);
// Store reference so playPause() can update it // Store reference so playPause() can update it
@ -4886,6 +4995,12 @@ function timeline() {
timelineWidget.lastDragEvent = e; timelineWidget.lastDragEvent = e;
timelineWidget.handleMouseEvent("mousemove", x, y); timelineWidget.handleMouseEvent("mousemove", x, y);
// Update cursor based on widget's cursor property
if (timelineWidget.cursor) {
canvas.style.cursor = timelineWidget.cursor;
}
updateCanvasSize(); // Redraw after interaction updateCanvasSize(); // Redraw after interaction
}); });
@ -5607,7 +5722,12 @@ function updateUI() {
context.updateUI = updateUI; context.updateUI = updateUI;
context.updateMenu = updateMenu; context.updateMenu = updateMenu;
function renderUI() { async function renderUI() {
// Update video frames BEFORE drawing
if (context.activeObject) {
await updateVideoFrames(context.activeObject.currentTime);
}
for (let canvas of canvases) { for (let canvas of canvases) {
let ctx = canvas.getContext("2d"); let ctx = canvas.getContext("2d");
ctx.resetTransform(); ctx.resetTransform();
@ -6635,6 +6755,11 @@ async function renderMenu() {
action: actions.addLayer.create, action: actions.addLayer.create,
accelerator: getShortcut("addLayer"), accelerator: getShortcut("addLayer"),
}, },
{
text: "Add Video Layer",
enabled: true,
action: addVideoLayer,
},
{ {
text: "Add Audio Track", text: "Add Audio Track",
enabled: true, enabled: true,
@ -6729,7 +6854,7 @@ async function renderMenu() {
}, },
{ {
text: "Play", text: "Play",
enabled: !playing, enabled: !context.playing,
action: playPause, action: playPause,
accelerator: getShortcut("playAnimation"), accelerator: getShortcut("playAnimation"),
}, },
@ -10881,10 +11006,33 @@ function getMimeType(filePath) {
} }
function renderAll() { let renderInProgress = false;
let rafScheduled = false;
// FPS tracking
let lastFpsLogTime = 0;
let frameCount = 0;
let fpsHistory = [];
async function renderAll() {
rafScheduled = false;
// Skip if a render is already in progress (prevent stacking async calls)
if (renderInProgress) {
// Schedule another attempt if not already scheduled
if (!rafScheduled) {
rafScheduled = true;
requestAnimationFrame(renderAll);
}
return;
}
renderInProgress = true;
const renderStartTime = performance.now();
try { try {
if (uiDirty) { if (uiDirty) {
renderUI(); await renderUI();
uiDirty = false; uiDirty = false;
} }
if (layersDirty) { if (layersDirty) {
@ -10917,7 +11065,33 @@ function renderAll() {
repeatCount = 2; repeatCount = 2;
} }
} finally { } finally {
requestAnimationFrame(renderAll); renderInProgress = false;
// FPS logging (only when playing)
if (context.playing) {
frameCount++;
const now = performance.now();
const renderTime = now - renderStartTime;
if (now - lastFpsLogTime >= 1000) {
const fps = frameCount / ((now - lastFpsLogTime) / 1000);
fpsHistory.push({ fps, renderTime });
console.log(`[FPS] ${fps.toFixed(1)} fps | Render time: ${renderTime.toFixed(1)}ms`);
frameCount = 0;
lastFpsLogTime = now;
// Keep only last 10 samples
if (fpsHistory.length > 10) {
fpsHistory.shift();
}
}
}
// Schedule next frame if not already scheduled
if (!rafScheduled) {
rafScheduled = true;
requestAnimationFrame(renderAll);
}
} }
} }
@ -10928,6 +11102,7 @@ initializeActions({
updateMenu, updateMenu,
updateLayers, updateLayers,
updateUI, updateUI,
updateVideoFrames,
updateInfopanel, updateInfopanel,
invoke, invoke,
config config
@ -11017,6 +11192,33 @@ async function addEmptyMIDITrack() {
} }
} }
async function addVideoLayer() {
console.log('[addVideoLayer] Creating new video layer');
const layerName = `Video ${context.activeObject.layers.filter(l => l.type === 'video').length + 1}`;
const layerUuid = uuidv4();
try {
// Create new VideoLayer
const newVideoLayer = new VideoLayer(layerUuid, layerName);
// Add layer to active object
context.activeObject.layers.push(newVideoLayer);
// Select the newly created layer
context.activeObject.activeLayer = newVideoLayer;
// Update UI
updateLayers();
if (context.timelineWidget) {
context.timelineWidget.requestRedraw();
}
console.log('Empty video layer created:', layerName);
} catch (error) {
console.error('Failed to create video layer:', error);
}
}
// MIDI Command Wrappers // MIDI Command Wrappers
// Note: getAvailableInstruments() removed - now using node-based instruments // Note: getAvailableInstruments() removed - now using node-based instruments

View File

@ -1,7 +1,7 @@
// GraphicsObject model: Main container for layers and animation // GraphicsObject model: Main container for layers and animation
import { context, config, pointerList, startProps } from '../state.js'; import { context, config, pointerList, startProps } from '../state.js';
import { Layer, AudioTrack } from './layer.js'; import { VectorLayer, AudioTrack, VideoLayer } from './layer.js';
import { TempShape } from './shapes.js'; import { TempShape } from './shapes.js';
import { AnimationCurve, Keyframe } from './animation.js'; import { AnimationCurve, Keyframe } from './animation.js';
import { Widget } from '../widgets.js'; import { Widget } from '../widgets.js';
@ -45,8 +45,20 @@ class GraphicsObject extends Widget {
this.name = this.idx; this.name = this.idx;
this.currentFrameNum = 0; // LEGACY: kept for backwards compatibility this.currentFrameNum = 0; // LEGACY: kept for backwards compatibility
this.currentTime = 0; // New: continuous time for AnimationData curves this._currentTime = 0; // Internal storage for currentTime
this.currentLayer = 0; this.currentLayer = 0;
// Make currentTime a getter/setter property
Object.defineProperty(this, 'currentTime', {
get: function() {
return this._currentTime;
},
set: function(value) {
this._currentTime = value;
},
enumerable: true,
configurable: true
});
this._activeAudioTrack = null; // Reference to active audio track (if any) this._activeAudioTrack = null; // Reference to active audio track (if any)
// Initialize children and audioTracks based on initialChildType // Initialize children and audioTracks based on initialChildType
@ -54,8 +66,11 @@ class GraphicsObject extends Widget {
this.audioTracks = []; this.audioTracks = [];
if (initialChildType === 'layer') { if (initialChildType === 'layer') {
this.children = [new Layer(uuid + "-L1", this)]; this.children = [new VectorLayer(uuid + "-L1", this)];
this.currentLayer = 0; // Set first layer as active this.currentLayer = 0; // Set first layer as active
} else if (initialChildType === 'video') {
this.children = [new VideoLayer(uuid + "-V1", "Video 1")];
this.currentLayer = 0; // Set first video layer as active
} else if (initialChildType === 'midi') { } else if (initialChildType === 'midi') {
const midiTrack = new AudioTrack(uuid + "-M1", "MIDI 1", 'midi'); const midiTrack = new AudioTrack(uuid + "-M1", "MIDI 1", 'midi');
this.audioTracks.push(midiTrack); this.audioTracks.push(midiTrack);
@ -103,7 +118,12 @@ class GraphicsObject extends Widget {
graphicsObject.parent = pointerList[json.parent] graphicsObject.parent = pointerList[json.parent]
} }
for (let layer of json.layers) { for (let layer of json.layers) {
graphicsObject.layers.push(Layer.fromJSON(layer, graphicsObject)); if (layer.type === 'VideoLayer') {
graphicsObject.layers.push(VideoLayer.fromJSON(layer));
} else {
// Default to VectorLayer
graphicsObject.layers.push(VectorLayer.fromJSON(layer, graphicsObject));
}
} }
// Handle audioTracks (may not exist in older files) // Handle audioTracks (may not exist in older files)
if (json.audioTracks) { if (json.audioTracks) {
@ -177,9 +197,20 @@ class GraphicsObject extends Widget {
// Check visual layers // Check visual layers
for (let layer of this.layers) { for (let layer of this.layers) {
// Check animation data duration
if (layer.animationData && layer.animationData.duration > maxDuration) { if (layer.animationData && layer.animationData.duration > maxDuration) {
maxDuration = layer.animationData.duration; maxDuration = layer.animationData.duration;
} }
// Check video layer clips (VideoLayer has clips like AudioTrack)
if (layer.type === 'video' && layer.clips) {
for (let clip of layer.clips) {
const clipEnd = clip.startTime + clip.duration;
if (clipEnd > maxDuration) {
maxDuration = clipEnd;
}
}
}
} }
// Check audio tracks // Check audio tracks
@ -300,6 +331,12 @@ class GraphicsObject extends Widget {
for (let layer of this.layers) { for (let layer of this.layers) {
if (context.activeObject == this && !layer.visible) continue; if (context.activeObject == this && !layer.visible) continue;
// Handle VideoLayer differently - call its draw method
if (layer.type === 'video') {
layer.draw(context);
continue;
}
// Draw activeShape (shape being drawn in progress) for active layer only // Draw activeShape (shape being drawn in progress) for active layer only
if (layer === context.activeLayer && layer.activeShape) { if (layer === context.activeLayer && layer.activeShape) {
let cxt = {...context}; let cxt = {...context};

View File

@ -1,4 +1,4 @@
// Layer models: Layer and AudioLayer classes // Layer models: VectorLayer, AudioTrack, and VideoLayer classes
import { context, config, pointerList } from '../state.js'; import { context, config, pointerList } from '../state.js';
import { Frame, AnimationData, Keyframe, tempFrame } from './animation.js'; import { Frame, AnimationData, Keyframe, tempFrame } from './animation.js';
@ -18,7 +18,7 @@ import {
const Tone = window.Tone; const Tone = window.Tone;
// Tauri API // Tauri API
const { invoke } = window.__TAURI__.core; const { invoke, Channel } = window.__TAURI__.core;
// Helper function for UUID generation // Helper function for UUID generation
function uuidv4() { function uuidv4() {
@ -65,7 +65,7 @@ export function initializeLayerDependencies(deps) {
actions = deps.actions; actions = deps.actions;
} }
class Layer extends Widget { class VectorLayer extends Widget {
constructor(uuid, parentObject = null) { constructor(uuid, parentObject = null) {
super(0,0) super(0,0)
if (!uuid) { if (!uuid) {
@ -73,7 +73,7 @@ class Layer extends Widget {
} else { } else {
this.idx = uuid; this.idx = uuid;
} }
this.name = "Layer"; this.name = "VectorLayer";
// LEGACY: Keep frames array for backwards compatibility during migration // LEGACY: Keep frames array for backwards compatibility during migration
this.frames = [new Frame("keyframe", this.idx + "-F1")]; this.frames = [new Frame("keyframe", this.idx + "-F1")];
this.animationData = new AnimationData(this); this.animationData = new AnimationData(this);
@ -86,7 +86,7 @@ class Layer extends Widget {
this.shapes = [] this.shapes = []
} }
static fromJSON(json, parentObject = null) { static fromJSON(json, parentObject = null) {
const layer = new Layer(json.idx, parentObject); const layer = new VectorLayer(json.idx, parentObject);
for (let i in json.children) { for (let i in json.children) {
const child = json.children[i]; const child = json.children[i];
const childObject = GraphicsObject.fromJSON(child); const childObject = GraphicsObject.fromJSON(child);
@ -136,7 +136,7 @@ class Layer extends Widget {
} }
toJSON(randomizeUuid = false) { toJSON(randomizeUuid = false) {
const json = {}; const json = {};
json.type = "Layer"; json.type = "VectorLayer";
if (randomizeUuid) { if (randomizeUuid) {
json.idx = uuidv4(); json.idx = uuidv4();
json.name = this.name + " copy"; json.name = this.name + " copy";
@ -468,7 +468,7 @@ class Layer extends Widget {
} }
} }
copy(idx) { copy(idx) {
let newLayer = new Layer(idx.slice(0, 8) + this.idx.slice(8)); let newLayer = new VectorLayer(idx.slice(0, 8) + this.idx.slice(8));
let idxMapping = {}; let idxMapping = {};
for (let child of this.children) { for (let child of this.children) {
let newChild = child.copy(idx); let newChild = child.copy(idx);
@ -1245,4 +1245,617 @@ class AudioTrack {
} }
} }
export { Layer, AudioTrack }; class VideoLayer extends Widget {
constructor(uuid, name) {
super(0, 0);
if (!uuid) {
this.idx = uuidv4();
} else {
this.idx = uuid;
}
this.name = name || "Video";
this.type = 'video';
this.visible = true;
this.audible = true;
this.animationData = new AnimationData(this);
// Empty arrays for layer compatibility
Object.defineProperty(this, 'shapes', {
value: Object.freeze([]),
writable: false,
enumerable: true,
configurable: false
});
Object.defineProperty(this, 'children', {
value: Object.freeze([]),
writable: false,
enumerable: true,
configurable: false
});
// Video clips on this layer
// { clipId, poolIndex, name, startTime, duration, offset, width, height }
this.clips = [];
// Associated audio track (if video has audio)
this.linkedAudioTrack = null; // Reference to AudioTrack
// Performance settings
this.useJpegCompression = false; // JPEG compression adds more overhead than it saves (default: false)
this.prefetchCount = 3; // Number of frames to prefetch ahead of playhead
// Timeline display
this.collapsed = false;
this.curvesMode = 'segment';
this.curvesHeight = 150;
pointerList[this.idx] = this;
}
async addClip(poolIndex, startTime, duration, offset = 0.0, name = '', sourceDuration = null, metadata = null) {
const poolInfo = await invoke('video_get_pool_info', { poolIndex });
// poolInfo is [width, height, fps] tuple from Rust
const [width, height, fps] = poolInfo;
const clip = {
clipId: this.clips.length,
poolIndex,
name: name || `Video ${this.clips.length + 1}`,
startTime,
duration,
offset,
width,
height,
sourceDuration: sourceDuration || duration, // Store original file duration
httpUrl: metadata?.http_url || null,
isBrowserCompatible: metadata?.is_browser_compatible || false,
transcoding: metadata?.transcoding || false,
videoElement: null, // Will hold HTML5 video element if using browser playback
useBrowserVideo: false, // Switch to true when video element is ready
isPlaying: false, // Track if video element is actively playing
};
this.clips.push(clip);
console.log(`Video clip added: ${name}, ${width}x${height}, duration: ${duration}s, browser-compatible: ${clip.isBrowserCompatible}, http_url: ${clip.httpUrl}`);
// If HTTP URL is available, create video element immediately
if (clip.httpUrl) {
await this._createVideoElement(clip);
clip.useBrowserVideo = true;
}
// If transcoding is in progress, start polling
else if (clip.transcoding) {
console.log(`[Video] Starting transcode polling for ${clip.name}`);
this._pollTranscodeStatus(clip);
}
}
async _createVideoElement(clip) {
// Create hidden video element for hardware-accelerated decoding
const video = document.createElement('video');
// Hide video element using opacity (browsers may skip decoding if off-screen)
video.style.position = 'fixed';
video.style.bottom = '0';
video.style.right = '0';
video.style.width = '1px';
video.style.height = '1px';
video.style.opacity = '0.01'; // Nearly invisible but not 0 (some browsers optimize opacity:0)
video.style.pointerEvents = 'none';
video.style.zIndex = '-1';
video.preload = 'auto';
video.muted = true; // Mute video element (audio plays separately)
video.playsInline = true;
video.autoplay = false;
video.crossOrigin = 'anonymous'; // Required for canvas drawing - prevent CORS taint
// Add event listeners for debugging
video.addEventListener('loadedmetadata', () => {
console.log(`[Video] Loaded metadata for ${clip.name}: ${video.videoWidth}x${video.videoHeight}, duration: ${video.duration}s`);
});
video.addEventListener('loadeddata', () => {
console.log(`[Video] Loaded data for ${clip.name}, readyState: ${video.readyState}`);
});
video.addEventListener('canplay', () => {
console.log(`[Video] Can play ${clip.name}, duration: ${video.duration}s`);
// Mark video as ready for seeking once we can play AND have valid duration
if (video.duration > 0 && !isNaN(video.duration) && video.duration !== Infinity) {
clip.videoReady = true;
console.log(`[Video] Video is ready for seeking`);
}
});
// When seek completes, trigger UI redraw to show the new frame
video.addEventListener('seeked', () => {
if (updateUI) {
updateUI();
}
});
video.addEventListener('error', (e) => {
const error = video.error;
const errorMessages = {
1: 'MEDIA_ERR_ABORTED - Fetching aborted',
2: 'MEDIA_ERR_NETWORK - Network error',
3: 'MEDIA_ERR_DECODE - Decoding error',
4: 'MEDIA_ERR_SRC_NOT_SUPPORTED - Format not supported or file not accessible'
};
const errorMsg = errorMessages[error?.code] || 'Unknown error';
console.error(`[Video] Error loading ${clip.name}: ${errorMsg}`, error?.message);
});
// Use HTTP URL from local server (supports range requests for seeking)
video.src = clip.httpUrl;
// Try to load the video
video.load();
document.body.appendChild(video);
clip.videoElement = video;
console.log(`[Video] Created video element for clip ${clip.name}: ${clip.httpUrl}`);
}
async _pollTranscodeStatus(clip) {
// Poll transcode status every 2 seconds
const pollInterval = setInterval(async () => {
try {
const status = await invoke('video_get_transcode_status', { poolIndex: clip.poolIndex });
if (status && status[2]) { // [path, progress, completed, httpUrl]
// Transcode complete!
clearInterval(pollInterval);
const [outputPath, progress, completed, httpUrl] = status;
clip.transcodedPath = outputPath;
clip.httpUrl = httpUrl;
clip.transcoding = false;
clip.useBrowserVideo = true;
console.log(`[Video] Transcode complete for ${clip.name}, switching to browser playback: ${httpUrl}`);
// Create video element for browser playback
await this._createVideoElement(clip);
}
} catch (error) {
console.error('Failed to poll transcode status:', error);
clearInterval(pollInterval);
}
}, 2000);
}
// Pre-fetch frames for current time (call before draw)
async updateFrame(currentTime) {
// Prevent concurrent calls - if already updating, skip
if (this.updateInProgress) {
return;
}
this.updateInProgress = true;
try {
for (let clip of this.clips) {
// Check if clip is active at current time
if (currentTime < clip.startTime ||
currentTime >= clip.startTime + clip.duration) {
clip.currentFrame = null;
// Pause video element if we left its time range
if (clip.videoElement && clip.isPlaying) {
clip.videoElement.pause();
clip.isPlaying = false;
}
continue;
}
// If using browser video element
if (clip.useBrowserVideo && clip.videoElement) {
const videoTime = clip.offset + (currentTime - clip.startTime);
// Don't do anything until video is fully ready
if (!clip.videoReady) {
if (!clip._notReadyWarned) {
console.warn(`[Video updateFrame] Video not ready yet (duration=${clip.videoElement.duration})`);
clip._notReadyWarned = true;
}
continue;
}
// During playback: let video play naturally
if (context.playing) {
// Check if we just entered this clip (need to start playing)
if (!clip.isPlaying) {
// Start playing one frame ahead to compensate for canvas drawing lag
const frameDuration = 1 / (clip.fps || 30); // Use clip's actual framerate
const maxVideoTime = clip.sourceDuration - frameDuration; // Don't seek past end
const startTime = Math.min(videoTime + frameDuration, maxVideoTime);
console.log(`[Video updateFrame] Starting playback at ${startTime.toFixed(3)}s (compensated by ${frameDuration.toFixed(3)}s for ${clip.fps}fps)`);
clip.videoElement.currentTime = startTime;
clip.videoElement.play().catch(e => console.error('Failed to play video:', e));
clip.isPlaying = true;
}
// Otherwise, let it play naturally - don't seek!
}
// When scrubbing (not playing): seek to exact position and pause
else {
if (clip.isPlaying) {
clip.videoElement.pause();
clip.isPlaying = false;
}
// Only seek if the time is actually different
if (!clip.videoElement.seeking) {
const timeDiff = Math.abs(clip.videoElement.currentTime - videoTime);
if (timeDiff > 0.016) { // ~1 frame tolerance at 60fps
clip.videoElement.currentTime = videoTime;
}
}
}
continue; // Skip frame fetching
}
// Use frame batching for frame-based playback
// Initialize frame cache if needed
if (!clip.frameCache) {
clip.frameCache = new Map();
}
// Check if current frame is already cached
if (clip.frameCache.has(currentVideoTimestamp)) {
clip.currentFrame = clip.frameCache.get(currentVideoTimestamp);
clip.lastFetchedTimestamp = currentVideoTimestamp;
continue;
}
// Skip if already fetching
if (clip.fetchInProgress) {
continue;
}
clip.fetchInProgress = true;
try {
// Calculate timestamps to prefetch (current + next N frames)
const frameDuration = 1 / 30; // Assume 30fps for now, could get from clip metadata
const timestamps = [];
for (let i = 0; i < this.prefetchCount; i++) {
const ts = currentVideoTimestamp + (i * frameDuration);
// Don't exceed clip duration
if (ts <= clip.offset + clip.sourceDuration) {
timestamps.push(ts);
}
}
if (timestamps.length === 0) {
continue;
}
const t_start = performance.now();
// Request batch of frames using IPC Channel
const batchDataPromise = new Promise((resolve, reject) => {
const channel = new Channel();
channel.onmessage = (data) => {
resolve(data);
};
invoke('video_get_frames_batch', {
poolIndex: clip.poolIndex,
timestamps: timestamps,
useJpeg: this.useJpegCompression,
channel: channel
}).catch(reject);
});
let batchData = await batchDataPromise;
const t_after_ipc = performance.now();
// Ensure data is Uint8Array
if (!(batchData instanceof Uint8Array)) {
batchData = new Uint8Array(batchData);
}
// Unpack the batch format: [frame_count: u32][frame1_size: u32][frame1_data...][frame2_size: u32][frame2_data...]...
const view = new DataView(batchData.buffer, batchData.byteOffset, batchData.byteLength);
let offset = 0;
// Read frame count
const frameCount = view.getUint32(offset, true); // little-endian
offset += 4;
if (frameCount !== timestamps.length) {
console.warn(`Expected ${timestamps.length} frames, got ${frameCount}`);
}
const t_before_conversion = performance.now();
// Process each frame
for (let i = 0; i < frameCount; i++) {
// Read frame size
const frameSize = view.getUint32(offset, true);
offset += 4;
// Extract frame data
const frameData = new Uint8Array(batchData.buffer, batchData.byteOffset + offset, frameSize);
offset += frameSize;
let imageData;
if (this.useJpegCompression) {
// Decode JPEG using createImageBitmap
const blob = new Blob([frameData], { type: 'image/jpeg' });
const imageBitmap = await createImageBitmap(blob);
// Create temporary canvas to extract ImageData
const tempCanvas = document.createElement('canvas');
tempCanvas.width = clip.width;
tempCanvas.height = clip.height;
const tempCtx = tempCanvas.getContext('2d');
tempCtx.drawImage(imageBitmap, 0, 0);
imageData = tempCtx.getImageData(0, 0, clip.width, clip.height);
imageBitmap.close();
} else {
// Raw RGBA data
const expectedSize = clip.width * clip.height * 4;
if (frameData.length !== expectedSize) {
console.error(`Invalid frame ${i} data size: got ${frameData.length}, expected ${expectedSize}`);
continue;
}
imageData = new ImageData(
new Uint8ClampedArray(frameData),
clip.width,
clip.height
);
}
// Create canvas for this frame
const frameCanvas = document.createElement('canvas');
frameCanvas.width = clip.width;
frameCanvas.height = clip.height;
const frameCtx = frameCanvas.getContext('2d');
frameCtx.putImageData(imageData, 0, 0);
// Cache the frame
clip.frameCache.set(timestamps[i], frameCanvas);
// Set as current frame if it's the first one
if (i === 0) {
clip.currentFrame = frameCanvas;
clip.lastFetchedTimestamp = timestamps[i];
}
}
const t_after_conversion = performance.now();
// Limit cache size to avoid memory issues
const maxCacheSize = this.prefetchCount * 2;
if (clip.frameCache.size > maxCacheSize) {
// Remove oldest entries (simple LRU by keeping only recent timestamps)
const sortedKeys = Array.from(clip.frameCache.keys()).sort((a, b) => a - b);
const toRemove = sortedKeys.slice(0, sortedKeys.length - maxCacheSize);
for (let key of toRemove) {
clip.frameCache.delete(key);
}
}
// Log timing breakdown
const total_time = t_after_conversion - t_start;
const ipc_time = t_after_ipc - t_start;
const conversion_time = t_after_conversion - t_before_conversion;
const compression_mode = this.useJpegCompression ? 'JPEG' : 'RAW';
const avg_per_frame = total_time / frameCount;
console.log(`[JS Video Batch ${compression_mode}] Fetched ${frameCount} frames | Total: ${total_time.toFixed(1)}ms | IPC: ${ipc_time.toFixed(1)}ms (${(ipc_time/total_time*100).toFixed(0)}%) | Convert: ${conversion_time.toFixed(1)}ms | Avg/frame: ${avg_per_frame.toFixed(1)}ms | Size: ${(batchData.length/1024/1024).toFixed(2)}MB`);
} catch (error) {
console.error('Failed to get video frames batch:', error);
clip.currentFrame = null;
} finally {
clip.fetchInProgress = false;
}
}
} finally {
this.updateInProgress = false;
}
}
// Draw cached frames (synchronous)
draw(cxt, currentTime) {
if (!this.visible) {
return;
}
const ctx = cxt.ctx || cxt;
// Use currentTime from context if not provided
if (currentTime === undefined) {
currentTime = cxt.activeObject?.currentTime || 0;
}
for (let clip of this.clips) {
// Check if clip is active at current time
if (currentTime < clip.startTime ||
currentTime >= clip.startTime + clip.duration) {
continue;
}
// Debug: log what path we're taking
if (!clip._drawPathLogged) {
console.log(`[Video Draw] useBrowserVideo=${clip.useBrowserVideo}, videoElement=${!!clip.videoElement}, currentFrame=${!!clip.currentFrame}`);
clip._drawPathLogged = true;
}
// Prefer browser video element if available
if (clip.useBrowserVideo && clip.videoElement) {
// Debug: log readyState issues
if (clip.videoElement.readyState < 2) {
if (!clip._readyStateWarned) {
console.warn(`[Video] Video not ready: readyState=${clip.videoElement.readyState}, src=${clip.videoElement.src}`);
clip._readyStateWarned = true;
}
}
// Draw if video is ready (shows last frame while seeking, updates when seek completes)
if (clip.videoElement.readyState >= 2) {
try {
// Calculate expected video time
const expectedVideoTime = clip.offset + (currentTime - clip.startTime);
const actualVideoTime = clip.videoElement.currentTime;
const timeDiff = Math.abs(expectedVideoTime - actualVideoTime);
// Debug: log if time is significantly different
if (timeDiff > 0.1 && (!clip._lastTimeDiffWarning || Date.now() - clip._lastTimeDiffWarning > 1000)) {
console.warn(`[Video Draw] Time mismatch: expected ${expectedVideoTime.toFixed(2)}s, actual ${actualVideoTime.toFixed(2)}s, diff=${timeDiff.toFixed(2)}s`);
clip._lastTimeDiffWarning = Date.now();
}
// Debug: log successful draw periodically
if (!clip._lastDrawLog || Date.now() - clip._lastDrawLog > 1000) {
console.log(`[Video Draw] Drawing at currentTime=${actualVideoTime.toFixed(2)}s (expected ${expectedVideoTime.toFixed(2)}s)`);
clip._lastDrawLog = Date.now();
}
// Scale to fit canvas while maintaining aspect ratio
const canvasWidth = config.fileWidth;
const canvasHeight = config.fileHeight;
const scale = Math.min(
canvasWidth / clip.videoElement.videoWidth,
canvasHeight / clip.videoElement.videoHeight
);
const scaledWidth = clip.videoElement.videoWidth * scale;
const scaledHeight = clip.videoElement.videoHeight * scale;
const x = (canvasWidth - scaledWidth) / 2;
const y = (canvasHeight - scaledHeight) / 2;
// Debug: draw a test rectangle to verify canvas is working
if (!clip._canvasTestDone) {
ctx.save();
ctx.fillStyle = 'red';
ctx.fillRect(10, 10, 100, 100);
ctx.restore();
console.log(`[Video Draw] Drew test rectangle at (10, 10, 100, 100)`);
console.log(`[Video Draw] Canvas dimensions: ${canvasWidth}x${canvasHeight}`);
console.log(`[Video Draw] Scaled video dimensions: ${scaledWidth}x${scaledHeight} at (${x}, ${y})`);
clip._canvasTestDone = true;
}
// Debug: Check if video element has dimensions
if (!clip._videoDimensionsLogged) {
console.log(`[Video Draw] Video element dimensions: videoWidth=${clip.videoElement.videoWidth}, videoHeight=${clip.videoElement.videoHeight}, naturalWidth=${clip.videoElement.videoWidth}, naturalHeight=${clip.videoElement.videoHeight}`);
console.log(`[Video Draw] Video element state: paused=${clip.videoElement.paused}, ended=${clip.videoElement.ended}, seeking=${clip.videoElement.seeking}, readyState=${clip.videoElement.readyState}`);
clip._videoDimensionsLogged = true;
}
ctx.drawImage(clip.videoElement, x, y, scaledWidth, scaledHeight);
// Debug: Sample a pixel to see if video is actually drawing
if (!clip._pixelTestDone) {
const imageData = ctx.getImageData(canvasWidth / 2, canvasHeight / 2, 1, 1);
const pixel = imageData.data;
console.log(`[Video Draw] Center pixel after drawImage: R=${pixel[0]}, G=${pixel[1]}, B=${pixel[2]}, A=${pixel[3]}`);
clip._pixelTestDone = true;
}
} catch (error) {
console.error('Failed to draw video element:', error);
}
}
}
// Fall back to cached frame if available
else if (clip.currentFrame) {
try {
// Scale to fit canvas while maintaining aspect ratio
const canvasWidth = config.fileWidth;
const canvasHeight = config.fileHeight;
const scale = Math.min(
canvasWidth / clip.width,
canvasHeight / clip.height
);
const scaledWidth = clip.width * scale;
const scaledHeight = clip.height * scale;
const x = (canvasWidth - scaledWidth) / 2;
const y = (canvasHeight - scaledHeight) / 2;
ctx.drawImage(clip.currentFrame, x, y, scaledWidth, scaledHeight);
} catch (error) {
console.error('Failed to draw video frame:', error);
}
} else {
// Draw placeholder if frame not loaded yet
ctx.save();
ctx.fillStyle = '#333333';
ctx.fillRect(0, 0, config.fileWidth, config.fileHeight);
ctx.fillStyle = '#ffffff';
ctx.font = '24px sans-serif';
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
const msg = clip.transcoding ? 'Transcoding...' : 'Loading...';
ctx.fillText(msg, config.fileWidth / 2, config.fileHeight / 2);
ctx.restore();
}
}
}
static fromJSON(json) {
const videoLayer = new VideoLayer(json.idx, json.name);
if (json.animationData) {
videoLayer.animationData = AnimationData.fromJSON(json.animationData, videoLayer);
}
if (json.clips) {
videoLayer.clips = json.clips;
}
if (json.linkedAudioTrack) {
// Will be resolved after all objects are loaded
videoLayer.linkedAudioTrack = json.linkedAudioTrack;
}
videoLayer.visible = json.visible;
videoLayer.audible = json.audible;
// Restore compression setting (default to true if not specified for backward compatibility)
if (json.useJpegCompression !== undefined) {
videoLayer.useJpegCompression = json.useJpegCompression;
}
return videoLayer;
}
toJSON(randomizeUuid = false) {
return {
type: "VideoLayer",
idx: randomizeUuid ? uuidv4() : this.idx,
name: randomizeUuid ? this.name + " copy" : this.name,
visible: this.visible,
audible: this.audible,
animationData: this.animationData.toJSON(),
clips: this.clips,
linkedAudioTrack: this.linkedAudioTrack?.idx,
useJpegCompression: this.useJpegCompression
};
}
copy(idx) {
const json = this.toJSON(true);
json.idx = idx.slice(0, 8) + this.idx.slice(8);
return VideoLayer.fromJSON(json);
}
// Compatibility methods for layer interface
bbox() {
return {
x: { min: 0, max: config.fileWidth },
y: { min: 0, max: config.fileHeight }
};
}
}
export { VectorLayer, AudioTrack, VideoLayer };

View File

@ -36,7 +36,8 @@ export let context = {
timelineWidget: null, // Reference to TimelineWindowV2 widget for zoom controls timelineWidget: null, // Reference to TimelineWindowV2 widget for zoom controls
config: null, // Reference to config object (set after config is initialized) config: null, // Reference to config object (set after config is initialized)
mode: "select", // Current tool mode mode: "select", // Current tool mode
// Recording state // Playback and recording state
playing: false,
isRecording: false, isRecording: false,
recordingTrackId: null, recordingTrackId: null,
recordingClipId: null, recordingClipId: null,
@ -91,6 +92,7 @@ export let config = {
reopenLastSession: false, reopenLastSession: false,
lastImportFilterIndex: 0, // Index of last used filter in import dialog (0=Image, 1=Audio, 2=Lightningbeam) lastImportFilterIndex: 0, // Index of last used filter in import dialog (0=Image, 1=Audio, 2=Lightningbeam)
audioBufferSize: 256, // Audio buffer size in frames (128, 256, 512, 1024, etc. - requires restart) audioBufferSize: 256, // Audio buffer size in frames (128, 256, 512, 1024, etc. - requires restart)
minClipDuration: 0.1, // Minimum clip duration in seconds when trimming
// Layout settings // Layout settings
currentLayout: "animation", // Current active layout key currentLayout: "animation", // Current active layout key
defaultLayout: "animation", // Default layout for new files defaultLayout: "animation", // Default layout for new files

View File

@ -571,9 +571,12 @@ class TrackHierarchy {
// Iterate through layers (GraphicsObject.children are Layers) // Iterate through layers (GraphicsObject.children are Layers)
for (let layer of graphicsObject.children) { for (let layer of graphicsObject.children) {
// Determine layer type - check if it's a VideoLayer
const layerType = layer.type === 'video' ? 'video' : 'layer'
// Add layer track // Add layer track
const layerTrack = { const layerTrack = {
type: 'layer', type: layerType,
object: layer, object: layer,
name: layer.name || 'Layer', name: layer.name || 'Layer',
indent: 0, indent: 0,

View File

@ -981,6 +981,112 @@ class TimelineWindowV2 extends Widget {
/** /**
* Draw track backgrounds in timeline area (Phase 2) * Draw track backgrounds in timeline area (Phase 2)
*/ */
// Create a cached pattern for the timeline grid
createTimelinePattern(trackHeight) {
const cacheKey = `${this.timelineState.timeFormat}_${this.timelineState.pixelsPerSecond}_${this.timelineState.framerate}_${this.timelineState.bpm}_${trackHeight}`
// Return cached pattern if available
if (this.cachedPattern && this.cachedPatternKey === cacheKey) {
return this.cachedPattern
}
let patternWidth, patternHeight = trackHeight
if (this.timelineState.timeFormat === 'frames') {
// Pattern for 5 frames
const frameDuration = 1 / this.timelineState.framerate
const frameWidth = frameDuration * this.timelineState.pixelsPerSecond
patternWidth = frameWidth * 5
} else if (this.timelineState.timeFormat === 'measures') {
// Pattern for one measure
const beatsPerSecond = this.timelineState.bpm / 60
const beatsPerMeasure = this.timelineState.timeSignature.numerator
const beatWidth = (1 / beatsPerSecond) * this.timelineState.pixelsPerSecond
patternWidth = beatWidth * beatsPerMeasure
} else {
// Pattern for seconds - use 10 second intervals
patternWidth = this.timelineState.pixelsPerSecond * 10
}
// Create pattern canvas
const patternCanvas = document.createElement('canvas')
patternCanvas.width = Math.ceil(patternWidth)
patternCanvas.height = patternHeight
const pctx = patternCanvas.getContext('2d')
// Fill background
pctx.fillStyle = shade
pctx.fillRect(0, 0, patternWidth, patternHeight)
if (this.timelineState.timeFormat === 'frames') {
const frameDuration = 1 / this.timelineState.framerate
const frameWidth = frameDuration * this.timelineState.pixelsPerSecond
for (let i = 0; i < 5; i++) {
const x = i * frameWidth
if (i === 0) {
// First frame in pattern (every 5th): shade it
pctx.fillStyle = shadow
pctx.fillRect(x, 0, frameWidth, patternHeight)
} else {
// Regular frame: draw edge line
pctx.strokeStyle = shadow
pctx.lineWidth = 1
pctx.beginPath()
pctx.moveTo(x, 0)
pctx.lineTo(x, patternHeight)
pctx.stroke()
}
}
} else if (this.timelineState.timeFormat === 'measures') {
const beatsPerSecond = this.timelineState.bpm / 60
const beatsPerMeasure = this.timelineState.timeSignature.numerator
const beatWidth = (1 / beatsPerSecond) * this.timelineState.pixelsPerSecond
for (let i = 0; i < beatsPerMeasure; i++) {
const x = i * beatWidth
const isMeasureBoundary = i === 0
const isEvenBeat = (i % 2) === 0
pctx.save()
if (isMeasureBoundary) {
pctx.globalAlpha = 1.0
} else if (isEvenBeat) {
pctx.globalAlpha = 0.5
} else {
pctx.globalAlpha = 0.25
}
pctx.strokeStyle = shadow
pctx.lineWidth = 1
pctx.beginPath()
pctx.moveTo(x, 0)
pctx.lineTo(x, patternHeight)
pctx.stroke()
pctx.restore()
}
} else {
// Seconds mode: draw lines every second for 10 seconds
const secondWidth = this.timelineState.pixelsPerSecond
for (let i = 0; i < 10; i++) {
const x = i * secondWidth
pctx.strokeStyle = shadow
pctx.lineWidth = 1
pctx.beginPath()
pctx.moveTo(x, 0)
pctx.lineTo(x, patternHeight)
pctx.stroke()
}
}
// Cache the pattern
this.cachedPatternKey = cacheKey
this.cachedPattern = pctx.createPattern(patternCanvas, 'repeat')
return this.cachedPattern
}
drawTracks(ctx) { drawTracks(ctx) {
ctx.save() ctx.save()
ctx.translate(this.trackHeaderWidth, this.ruler.height) // Start after headers, below ruler ctx.translate(this.trackHeaderWidth, this.ruler.height) // Start after headers, below ruler
@ -1000,96 +1106,18 @@ class TimelineWindowV2 extends Widget {
const y = this.trackHierarchy.getTrackY(i) const y = this.trackHierarchy.getTrackY(i)
const trackHeight = this.trackHierarchy.getTrackHeight(track) const trackHeight = this.trackHierarchy.getTrackHeight(track)
// Draw track background (same color for all tracks) // Create and apply pattern for this track
ctx.fillStyle = shade const pattern = this.createTimelinePattern(trackHeight)
ctx.fillRect(0, y, trackAreaWidth, trackHeight)
// Draw interval markings // Calculate pattern offset based on viewport start time
const visibleStartTime = this.timelineState.viewportStartTime const visibleStartTime = this.timelineState.viewportStartTime
const visibleEndTime = visibleStartTime + (trackAreaWidth / this.timelineState.pixelsPerSecond) const patternOffsetX = -this.timelineState.timeToPixel(visibleStartTime)
if (this.timelineState.timeFormat === 'frames') { ctx.save()
// Frames mode: mark every frame edge, with every 5th frame shaded ctx.translate(patternOffsetX, y)
const frameDuration = 1 / this.timelineState.framerate ctx.fillStyle = pattern
const startFrame = Math.floor(visibleStartTime / frameDuration) ctx.fillRect(-patternOffsetX, 0, trackAreaWidth, trackHeight)
const endFrame = Math.ceil(visibleEndTime / frameDuration) ctx.restore()
for (let frame = startFrame; frame <= endFrame; frame++) {
const time = frame * frameDuration
const x = this.timelineState.timeToPixel(time)
const nextX = this.timelineState.timeToPixel((frame + 1) * frameDuration)
if (x >= 0 && x <= trackAreaWidth) {
if (frame % 5 === 0) {
// Every 5th frame: shade the entire frame width
ctx.fillStyle = shadow
ctx.fillRect(x, y, nextX - x, trackHeight)
} else {
// Regular frame: draw edge line
ctx.strokeStyle = shadow
ctx.lineWidth = 1
ctx.beginPath()
ctx.moveTo(x, y)
ctx.lineTo(x, y + trackHeight)
ctx.stroke()
}
}
}
} else if (this.timelineState.timeFormat === 'measures') {
// Measures mode: draw beats with varying opacity
const beatsPerSecond = this.timelineState.bpm / 60
const beatsPerMeasure = this.timelineState.timeSignature.numerator
const startBeat = Math.floor(visibleStartTime * beatsPerSecond)
const endBeat = Math.ceil(visibleEndTime * beatsPerSecond)
for (let beat = startBeat; beat <= endBeat; beat++) {
const time = beat / beatsPerSecond
const x = this.timelineState.timeToPixel(time)
if (x >= 0 && x <= trackAreaWidth) {
// Determine position within the measure
const beatInMeasure = beat % beatsPerMeasure
const isMeasureBoundary = beatInMeasure === 0
const isEvenBeatInMeasure = (beatInMeasure % 2) === 0
// Set opacity based on position
ctx.save()
if (isMeasureBoundary) {
ctx.globalAlpha = 1.0 // Full opacity for measure boundaries
} else if (isEvenBeatInMeasure) {
ctx.globalAlpha = 0.5 // Half opacity for even beats
} else {
ctx.globalAlpha = 0.25 // Quarter opacity for odd beats
}
ctx.strokeStyle = shadow
ctx.lineWidth = 1
ctx.beginPath()
ctx.moveTo(x, y)
ctx.lineTo(x, y + trackHeight)
ctx.stroke()
ctx.restore()
}
}
} else {
// Seconds mode: mark every second edge
const startSecond = Math.floor(visibleStartTime)
const endSecond = Math.ceil(visibleEndTime)
ctx.strokeStyle = shadow
ctx.lineWidth = 1
for (let second = startSecond; second <= endSecond; second++) {
const x = this.timelineState.timeToPixel(second)
if (x >= 0 && x <= trackAreaWidth) {
ctx.beginPath()
ctx.moveTo(x, y)
ctx.lineTo(x, y + trackHeight)
ctx.stroke()
}
}
}
// Draw track border // Draw track border
ctx.strokeStyle = shadow ctx.strokeStyle = shadow
@ -1459,19 +1487,25 @@ class TimelineWindowV2 extends Widget {
const waveformHeight = trackHeight - 14 // Leave padding at top/bottom const waveformHeight = trackHeight - 14 // Leave padding at top/bottom
const waveformData = clip.waveform const waveformData = clip.waveform
// Calculate how many pixels each waveform peak represents // Calculate the full source audio duration and pixels per peak based on that
const pixelsPerPeak = clipWidth / waveformData.length const sourceDuration = clip.sourceDuration || clip.duration
const pixelsPerSecond = this.timelineState.pixelsPerSecond
const fullSourceWidth = sourceDuration * pixelsPerSecond
const pixelsPerPeak = fullSourceWidth / waveformData.length
// Calculate the range of visible peaks // Calculate which peak corresponds to the clip's offset (trimmed left edge)
const firstVisiblePeak = Math.max(0, Math.floor((visibleStart - startX) / pixelsPerPeak)) const offsetPeakIndex = Math.floor((clip.offset / sourceDuration) * waveformData.length)
const lastVisiblePeak = Math.min(waveformData.length - 1, Math.ceil((visibleEnd - startX) / pixelsPerPeak))
// Calculate the range of visible peaks, accounting for offset
const firstVisiblePeak = Math.max(offsetPeakIndex, Math.floor((visibleStart - startX) / pixelsPerPeak) + offsetPeakIndex)
const lastVisiblePeak = Math.min(waveformData.length - 1, Math.ceil((visibleEnd - startX) / pixelsPerPeak) + offsetPeakIndex)
// Draw waveform as a filled path // Draw waveform as a filled path
ctx.beginPath() ctx.beginPath()
// Trace along the max values (left to right) // Trace along the max values (left to right)
for (let i = firstVisiblePeak; i <= lastVisiblePeak; i++) { for (let i = firstVisiblePeak; i <= lastVisiblePeak; i++) {
const peakX = startX + (i * pixelsPerPeak) const peakX = startX + ((i - offsetPeakIndex) * pixelsPerPeak)
const peak = waveformData[i] const peak = waveformData[i]
const maxY = centerY + (peak.max * waveformHeight * 0.5) const maxY = centerY + (peak.max * waveformHeight * 0.5)
@ -1484,7 +1518,7 @@ class TimelineWindowV2 extends Widget {
// Trace back along the min values (right to left) // Trace back along the min values (right to left)
for (let i = lastVisiblePeak; i >= firstVisiblePeak; i--) { for (let i = lastVisiblePeak; i >= firstVisiblePeak; i--) {
const peakX = startX + (i * pixelsPerPeak) const peakX = startX + ((i - offsetPeakIndex) * pixelsPerPeak)
const peak = waveformData[i] const peak = waveformData[i]
const minY = centerY + (peak.min * waveformHeight * 0.5) const minY = centerY + (peak.min * waveformHeight * 0.5)
ctx.lineTo(peakX, minY) ctx.lineTo(peakX, minY)
@ -1496,6 +1530,58 @@ class TimelineWindowV2 extends Widget {
} }
} }
} }
} else if (track.type === 'video') {
// Draw video clips for VideoLayer
const videoLayer = track.object
const y = this.trackHierarchy.getTrackY(i)
const trackHeight = this.trackHierarchy.trackHeight // Use base height for clips
// Draw each clip
for (let clip of videoLayer.clips) {
const startX = this.timelineState.timeToPixel(clip.startTime)
const endX = this.timelineState.timeToPixel(clip.startTime + clip.duration)
const clipWidth = endX - startX
// Video clips use purple/magenta color
const clipColor = '#9b59b6' // Purple for video clips
// Draw clip rectangle
ctx.fillStyle = clipColor
ctx.fillRect(
startX,
y + 5,
clipWidth,
trackHeight - 10
)
// Draw border
ctx.strokeStyle = shadow
ctx.lineWidth = 1
ctx.strokeRect(
startX,
y + 5,
clipWidth,
trackHeight - 10
)
// Draw clip name if there's enough space
const minWidthForLabel = 40
if (clipWidth >= minWidthForLabel) {
ctx.fillStyle = labelColor
ctx.font = '11px sans-serif'
ctx.textAlign = 'left'
ctx.textBaseline = 'middle'
// Clip text to clip bounds
ctx.save()
ctx.beginPath()
ctx.rect(startX + 2, y + 5, clipWidth - 4, trackHeight - 10)
ctx.clip()
ctx.fillText(clip.name, startX + 4, y + trackHeight / 2)
ctx.restore()
}
}
} }
} }
@ -2135,6 +2221,39 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Check if clicking on audio clip edge to start trimming
const audioEdgeInfo = this.getAudioClipEdgeAtPoint(track, adjustedX, adjustedY)
if (audioEdgeInfo) {
// Skip if right-clicking (button 2)
if (this.lastClickEvent?.button === 2) {
return false
}
// Select the track
this.selectTrack(track)
// Start audio clip edge dragging
this.draggingAudioClipEdge = {
track: track,
edge: audioEdgeInfo.edge,
clip: audioEdgeInfo.clip,
clipIndex: audioEdgeInfo.clipIndex,
audioTrack: audioEdgeInfo.audioTrack,
initialClipStart: audioEdgeInfo.clip.startTime,
initialClipDuration: audioEdgeInfo.clip.duration,
initialClipOffset: audioEdgeInfo.clip.offset,
initialLinkedVideoOffset: audioEdgeInfo.clip.linkedVideoClip?.offset || 0
}
// Enable global mouse events for dragging
this._globalEvents.add("mousemove")
this._globalEvents.add("mouseup")
console.log('Started dragging audio clip', audioEdgeInfo.edge, 'edge')
if (this.requestRedraw) this.requestRedraw()
return true
}
// Check if clicking on audio clip to start dragging // Check if clicking on audio clip to start dragging
const audioClipInfo = this.getAudioClipAtPoint(track, adjustedX, adjustedY) const audioClipInfo = this.getAudioClipAtPoint(track, adjustedX, adjustedY)
if (audioClipInfo) { if (audioClipInfo) {
@ -2166,6 +2285,70 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Check if clicking on video clip edge to start trimming
const videoEdgeInfo = this.getVideoClipEdgeAtPoint(track, adjustedX, adjustedY)
if (videoEdgeInfo) {
// Skip if right-clicking (button 2)
if (this.lastClickEvent?.button === 2) {
return false
}
// Select the track
this.selectTrack(track)
// Start video clip edge dragging
this.draggingVideoClipEdge = {
track: track,
edge: videoEdgeInfo.edge,
clip: videoEdgeInfo.clip,
clipIndex: videoEdgeInfo.clipIndex,
videoLayer: videoEdgeInfo.videoLayer,
initialClipStart: videoEdgeInfo.clip.startTime,
initialClipDuration: videoEdgeInfo.clip.duration,
initialClipOffset: videoEdgeInfo.clip.offset,
initialLinkedAudioOffset: videoEdgeInfo.clip.linkedAudioClip?.offset || 0
}
// Enable global mouse events for dragging
this._globalEvents.add("mousemove")
this._globalEvents.add("mouseup")
console.log('Started dragging video clip', videoEdgeInfo.edge, 'edge')
if (this.requestRedraw) this.requestRedraw()
return true
}
// Check if clicking on video clip to start dragging
const videoClipInfo = this.getVideoClipAtPoint(track, adjustedX, adjustedY)
if (videoClipInfo) {
// Skip drag if right-clicking (button 2)
if (this.lastClickEvent?.button === 2) {
return false
}
// Select the track
this.selectTrack(track)
// Start video clip dragging
const clickTime = this.timelineState.pixelToTime(adjustedX)
this.draggingVideoClip = {
track: track,
clip: videoClipInfo.clip,
clipIndex: videoClipInfo.clipIndex,
videoLayer: videoClipInfo.videoLayer,
initialMouseTime: clickTime,
initialClipStartTime: videoClipInfo.clip.startTime
}
// Enable global mouse events for dragging
this._globalEvents.add("mousemove")
this._globalEvents.add("mouseup")
console.log('Started dragging video clip at time', videoClipInfo.clip.startTime)
if (this.requestRedraw) this.requestRedraw()
return true
}
// Phase 6: Check if clicking on segment to start dragging // Phase 6: Check if clicking on segment to start dragging
const segmentInfo = this.getSegmentAtPoint(track, adjustedX, adjustedY) const segmentInfo = this.getSegmentAtPoint(track, adjustedX, adjustedY)
if (segmentInfo) { if (segmentInfo) {
@ -2691,6 +2874,115 @@ class TimelineWindowV2 extends Widget {
return null return null
} }
getAudioClipEdgeAtPoint(track, x, y) {
const clipInfo = this.getAudioClipAtPoint(track, x, y)
if (!clipInfo) return null
const clickTime = this.timelineState.pixelToTime(x)
const edgeThreshold = 8 / this.timelineState.pixelsPerSecond // 8 pixels in time units
const clipStart = clipInfo.clip.startTime
const clipEnd = clipInfo.clip.startTime + clipInfo.clip.duration
// Check if near left edge
if (Math.abs(clickTime - clipStart) <= edgeThreshold) {
return {
edge: 'left',
clip: clipInfo.clip,
clipIndex: clipInfo.clipIndex,
audioTrack: clipInfo.audioTrack,
clipStart: clipStart,
clipEnd: clipEnd
}
}
// Check if near right edge
if (Math.abs(clickTime - clipEnd) <= edgeThreshold) {
return {
edge: 'right',
clip: clipInfo.clip,
clipIndex: clipInfo.clipIndex,
audioTrack: clipInfo.audioTrack,
clipStart: clipStart,
clipEnd: clipEnd
}
}
return null
}
getVideoClipAtPoint(track, x, y) {
if (track.type !== 'video') return null
const trackIndex = this.trackHierarchy.tracks.indexOf(track)
if (trackIndex === -1) return null
const trackY = this.trackHierarchy.getTrackY(trackIndex)
const trackHeight = this.trackHierarchy.trackHeight
const clipTop = trackY + 5
const clipBottom = trackY + trackHeight - 5
// Check if y is within clip bounds
if (y < clipTop || y > clipBottom) return null
const clickTime = this.timelineState.pixelToTime(x)
const videoLayer = track.object
// Check each clip
for (let i = 0; i < videoLayer.clips.length; i++) {
const clip = videoLayer.clips[i]
const clipStart = clip.startTime
const clipEnd = clip.startTime + clip.duration
if (clickTime >= clipStart && clickTime <= clipEnd) {
return {
clip: clip,
clipIndex: i,
videoLayer: videoLayer
}
}
}
return null
}
getVideoClipEdgeAtPoint(track, x, y) {
const clipInfo = this.getVideoClipAtPoint(track, x, y)
if (!clipInfo) return null
const clickTime = this.timelineState.pixelToTime(x)
const edgeThreshold = 8 / this.timelineState.pixelsPerSecond // 8 pixels in time units
const clipStart = clipInfo.clip.startTime
const clipEnd = clipInfo.clip.startTime + clipInfo.clip.duration
// Check if near left edge
if (Math.abs(clickTime - clipStart) <= edgeThreshold) {
return {
edge: 'left',
clip: clipInfo.clip,
clipIndex: clipInfo.clipIndex,
videoLayer: clipInfo.videoLayer,
clipStart: clipStart,
clipEnd: clipEnd
}
}
// Check if near right edge
if (Math.abs(clickTime - clipEnd) <= edgeThreshold) {
return {
edge: 'right',
clip: clipInfo.clip,
clipIndex: clipInfo.clipIndex,
videoLayer: clipInfo.videoLayer,
clipStart: clipStart,
clipEnd: clipEnd
}
}
return null
}
/** /**
* Get segment edge at a point (Phase 6) * Get segment edge at a point (Phase 6)
* Returns {edge: 'left'|'right', startTime, endTime, keyframe, animationData, curveName} if near an edge * Returns {edge: 'left'|'right', startTime, endTime, keyframe, animationData, curveName} if near an edge
@ -3530,6 +3822,54 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Handle audio clip edge dragging (trimming)
if (this.draggingAudioClipEdge) {
const adjustedX = x - this.trackHeaderWidth
const newTime = this.timelineState.pixelToTime(adjustedX)
const minClipDuration = this.context.config.minClipDuration
if (this.draggingAudioClipEdge.edge === 'left') {
// Dragging left edge - adjust startTime and offset
const initialEnd = this.draggingAudioClipEdge.initialClipStart + this.draggingAudioClipEdge.initialClipDuration
const maxStartTime = initialEnd - minClipDuration
const newStartTime = Math.max(0, Math.min(newTime, maxStartTime))
const startTimeDelta = newStartTime - this.draggingAudioClipEdge.initialClipStart
this.draggingAudioClipEdge.clip.startTime = newStartTime
this.draggingAudioClipEdge.clip.offset = this.draggingAudioClipEdge.initialClipOffset + startTimeDelta
this.draggingAudioClipEdge.clip.duration = this.draggingAudioClipEdge.initialClipDuration - startTimeDelta
// Also trim linked video clip if it exists
if (this.draggingAudioClipEdge.clip.linkedVideoClip) {
const videoClip = this.draggingAudioClipEdge.clip.linkedVideoClip
videoClip.startTime = newStartTime
videoClip.offset = (this.draggingAudioClipEdge.initialLinkedVideoOffset || 0) + startTimeDelta
videoClip.duration = this.draggingAudioClipEdge.initialClipDuration - startTimeDelta
}
} else {
// Dragging right edge - adjust duration
const minEndTime = this.draggingAudioClipEdge.initialClipStart + minClipDuration
const newEndTime = Math.max(minEndTime, newTime)
let newDuration = newEndTime - this.draggingAudioClipEdge.clip.startTime
// Constrain duration to not exceed source file duration minus offset
const maxAvailableDuration = this.draggingAudioClipEdge.clip.sourceDuration - this.draggingAudioClipEdge.clip.offset
newDuration = Math.min(newDuration, maxAvailableDuration)
this.draggingAudioClipEdge.clip.duration = newDuration
// Also trim linked video clip if it exists
if (this.draggingAudioClipEdge.clip.linkedVideoClip) {
const linkedMaxDuration = this.draggingAudioClipEdge.clip.linkedVideoClip.sourceDuration - this.draggingAudioClipEdge.clip.linkedVideoClip.offset
this.draggingAudioClipEdge.clip.linkedVideoClip.duration = Math.min(newDuration, linkedMaxDuration)
}
}
// Trigger timeline redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Handle audio clip dragging // Handle audio clip dragging
if (this.draggingAudioClip) { if (this.draggingAudioClip) {
// Adjust coordinates to timeline area // Adjust coordinates to timeline area
@ -3544,6 +3884,83 @@ class TimelineWindowV2 extends Widget {
// Update clip's start time (ensure it doesn't go negative) // Update clip's start time (ensure it doesn't go negative)
this.draggingAudioClip.clip.startTime = Math.max(0, this.draggingAudioClip.initialClipStartTime + timeDelta) this.draggingAudioClip.clip.startTime = Math.max(0, this.draggingAudioClip.initialClipStartTime + timeDelta)
// Also move linked video clip if it exists
if (this.draggingAudioClip.clip.linkedVideoClip) {
this.draggingAudioClip.clip.linkedVideoClip.startTime = this.draggingAudioClip.clip.startTime
}
// Trigger timeline redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Handle video clip edge dragging (trimming)
if (this.draggingVideoClipEdge) {
const adjustedX = x - this.trackHeaderWidth
const newTime = this.timelineState.pixelToTime(adjustedX)
const minClipDuration = this.context.config.minClipDuration
if (this.draggingVideoClipEdge.edge === 'left') {
// Dragging left edge - adjust startTime and offset
const initialEnd = this.draggingVideoClipEdge.initialClipStart + this.draggingVideoClipEdge.initialClipDuration
const maxStartTime = initialEnd - minClipDuration
const newStartTime = Math.max(0, Math.min(newTime, maxStartTime))
const startTimeDelta = newStartTime - this.draggingVideoClipEdge.initialClipStart
this.draggingVideoClipEdge.clip.startTime = newStartTime
this.draggingVideoClipEdge.clip.offset = this.draggingVideoClipEdge.initialClipOffset + startTimeDelta
this.draggingVideoClipEdge.clip.duration = this.draggingVideoClipEdge.initialClipDuration - startTimeDelta
// Also trim linked audio clip if it exists
if (this.draggingVideoClipEdge.clip.linkedAudioClip) {
const audioClip = this.draggingVideoClipEdge.clip.linkedAudioClip
audioClip.startTime = newStartTime
audioClip.offset = (this.draggingVideoClipEdge.initialLinkedAudioOffset || 0) + startTimeDelta
audioClip.duration = this.draggingVideoClipEdge.initialClipDuration - startTimeDelta
}
} else {
// Dragging right edge - adjust duration
const minEndTime = this.draggingVideoClipEdge.initialClipStart + minClipDuration
const newEndTime = Math.max(minEndTime, newTime)
let newDuration = newEndTime - this.draggingVideoClipEdge.clip.startTime
// Constrain duration to not exceed source file duration minus offset
const maxAvailableDuration = this.draggingVideoClipEdge.clip.sourceDuration - this.draggingVideoClipEdge.clip.offset
newDuration = Math.min(newDuration, maxAvailableDuration)
this.draggingVideoClipEdge.clip.duration = newDuration
// Also trim linked audio clip if it exists
if (this.draggingVideoClipEdge.clip.linkedAudioClip) {
const linkedMaxDuration = this.draggingVideoClipEdge.clip.linkedAudioClip.sourceDuration - this.draggingVideoClipEdge.clip.linkedAudioClip.offset
this.draggingVideoClipEdge.clip.linkedAudioClip.duration = Math.min(newDuration, linkedMaxDuration)
}
}
// Trigger timeline redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Handle video clip dragging
if (this.draggingVideoClip) {
// Adjust coordinates to timeline area
const adjustedX = x - this.trackHeaderWidth
// Convert mouse position to time
const newTime = this.timelineState.pixelToTime(adjustedX)
// Calculate time delta
const timeDelta = newTime - this.draggingVideoClip.initialMouseTime
// Update clip's start time (ensure it doesn't go negative)
this.draggingVideoClip.clip.startTime = Math.max(0, this.draggingVideoClip.initialClipStartTime + timeDelta)
// Also move linked audio clip if it exists
if (this.draggingVideoClip.clip.linkedAudioClip) {
this.draggingVideoClip.clip.linkedAudioClip.startTime = this.draggingVideoClip.clip.startTime
}
// Trigger timeline redraw // Trigger timeline redraw
if (this.requestRedraw) this.requestRedraw() if (this.requestRedraw) this.requestRedraw()
return true return true
@ -3603,6 +4020,39 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Update cursor based on hover position (when not dragging)
if (!this.draggingAudioClip && !this.draggingVideoClip &&
!this.draggingAudioClipEdge && !this.draggingVideoClipEdge &&
!this.draggingKeyframe && !this.draggingPlayhead && !this.draggingSegment) {
const trackY = y - this.ruler.height
if (trackY >= 0 && x >= this.trackHeaderWidth) {
const adjustedY = trackY - this.trackScrollOffset
const adjustedX = x - this.trackHeaderWidth
const track = this.trackHierarchy.getTrackAtY(adjustedY)
if (track) {
// Check for audio clip edge
if (track.type === 'audio') {
const audioEdgeInfo = this.getAudioClipEdgeAtPoint(track, adjustedX, adjustedY)
if (audioEdgeInfo) {
this.cursor = audioEdgeInfo.edge === 'left' ? 'w-resize' : 'e-resize'
return false
}
}
// Check for video clip edge
else if (track.type === 'video') {
const videoEdgeInfo = this.getVideoClipEdgeAtPoint(track, adjustedX, adjustedY)
if (videoEdgeInfo) {
this.cursor = videoEdgeInfo.edge === 'left' ? 'w-resize' : 'e-resize'
return false
}
}
}
}
// Reset cursor if not over an edge
this.cursor = 'default'
}
return false return false
} }
@ -3665,6 +4115,67 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Complete audio clip edge dragging (trimming)
if (this.draggingAudioClipEdge) {
console.log('Finished trimming audio clip edge')
// Update backend with new clip trim
invoke('audio_trim_clip', {
trackId: this.draggingAudioClipEdge.audioTrack.audioTrackId,
clipId: this.draggingAudioClipEdge.clip.clipId,
newStartTime: this.draggingAudioClipEdge.clip.startTime,
newDuration: this.draggingAudioClipEdge.clip.duration,
newOffset: this.draggingAudioClipEdge.clip.offset
}).catch(error => {
console.error('Failed to trim audio clip in backend:', error)
})
// Also update linked video clip if it exists
if (this.draggingAudioClipEdge.clip.linkedVideoClip) {
console.log('Linked video clip also trimmed')
}
// Clean up dragging state
this.draggingAudioClipEdge = null
this._globalEvents.delete("mousemove")
this._globalEvents.delete("mouseup")
// Final redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Complete video clip edge dragging (trimming)
if (this.draggingVideoClipEdge) {
console.log('Finished trimming video clip edge')
// Update linked audio clip in backend if it exists
if (this.draggingVideoClipEdge.clip.linkedAudioClip) {
const linkedAudioClip = this.draggingVideoClipEdge.clip.linkedAudioClip
const audioTrack = this.draggingVideoClipEdge.videoLayer.linkedAudioTrack
if (audioTrack) {
invoke('audio_trim_clip', {
trackId: audioTrack.audioTrackId,
clipId: linkedAudioClip.clipId,
newStartTime: linkedAudioClip.startTime,
newDuration: linkedAudioClip.duration,
newOffset: linkedAudioClip.offset
}).catch(error => {
console.error('Failed to trim linked audio clip in backend:', error)
})
}
}
// Clean up dragging state
this.draggingVideoClipEdge = null
this._globalEvents.delete("mousemove")
this._globalEvents.delete("mouseup")
// Final redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Complete audio clip dragging // Complete audio clip dragging
if (this.draggingAudioClip) { if (this.draggingAudioClip) {
console.log('Finished dragging audio clip') console.log('Finished dragging audio clip')
@ -3678,6 +4189,12 @@ class TimelineWindowV2 extends Widget {
console.error('Failed to move clip in backend:', error) console.error('Failed to move clip in backend:', error)
}) })
// Also update linked video clip in backend if it exists
if (this.draggingAudioClip.clip.linkedVideoClip) {
// Video clips don't have a backend move command yet, so just log for now
console.log('Linked video clip also moved to time', this.draggingAudioClip.clip.startTime)
}
// Clean up dragging state // Clean up dragging state
this.draggingAudioClip = null this.draggingAudioClip = null
this._globalEvents.delete("mousemove") this._globalEvents.delete("mousemove")
@ -3688,6 +4205,37 @@ class TimelineWindowV2 extends Widget {
return true return true
} }
// Complete video clip dragging
if (this.draggingVideoClip) {
console.log('Finished dragging video clip')
// Video clips don't have a backend position yet (they're just visual)
// But we need to update the linked audio clip in the backend
if (this.draggingVideoClip.clip.linkedAudioClip) {
const linkedAudioClip = this.draggingVideoClip.clip.linkedAudioClip
// Find the audio track that contains this clip
const audioTrack = this.draggingVideoClip.videoLayer.linkedAudioTrack
if (audioTrack) {
invoke('audio_move_clip', {
trackId: audioTrack.audioTrackId,
clipId: linkedAudioClip.clipId,
newStartTime: linkedAudioClip.startTime
}).catch(error => {
console.error('Failed to move linked audio clip in backend:', error)
})
}
}
// Clean up dragging state
this.draggingVideoClip = null
this._globalEvents.delete("mousemove")
this._globalEvents.delete("mouseup")
// Final redraw
if (this.requestRedraw) this.requestRedraw()
return true
}
// Phase 6: Complete segment dragging // Phase 6: Complete segment dragging
if (this.draggingSegment) { if (this.draggingSegment) {
console.log('Finished dragging segment') console.log('Finished dragging segment')