Video export
This commit is contained in:
parent
fba2882b41
commit
06246bba93
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
# Build script for static FFmpeg linking
|
||||
|
||||
set -e
|
||||
|
||||
# Point pkg-config to our static FFmpeg build
|
||||
export PKG_CONFIG_PATH="/opt/ffmpeg-static/lib/pkgconfig:${PKG_CONFIG_PATH}"
|
||||
|
||||
# Tell pkg-config to use static linking
|
||||
export PKG_CONFIG_ALL_STATIC=1
|
||||
|
||||
# Force static linking of codec libraries (and link required C++ and NUMA libraries)
|
||||
export RUSTFLAGS="-C prefer-dynamic=no -C link-arg=-L/usr/lib/x86_64-linux-gnu -C link-arg=-Wl,-Bstatic -C link-arg=-lx264 -C link-arg=-lx265 -C link-arg=-lvpx -C link-arg=-lmp3lame -C link-arg=-Wl,-Bdynamic -C link-arg=-lstdc++ -C link-arg=-lnuma"
|
||||
|
||||
# Build with static features
|
||||
echo "Building with static FFmpeg from /opt/ffmpeg-static..."
|
||||
echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH"
|
||||
echo "PKG_CONFIG_ALL_STATIC=$PKG_CONFIG_ALL_STATIC"
|
||||
|
||||
cargo build --release
|
||||
|
||||
echo ""
|
||||
echo "Build complete! Binary at: target/release/lightningbeam-editor"
|
||||
echo ""
|
||||
echo "To verify static linking, run:"
|
||||
echo " ldd target/release/lightningbeam-editor | grep -E '(ffmpeg|avcodec|avformat|x264|x265|vpx)'"
|
||||
echo "(Should show no ffmpeg or codec libraries if fully static)"
|
||||
|
|
@ -8,7 +8,7 @@ lightningbeam-core = { path = "../lightningbeam-core" }
|
|||
daw-backend = { path = "../../daw-backend" }
|
||||
rtrb = "0.3"
|
||||
cpal = "0.15"
|
||||
ffmpeg-next = "8.0"
|
||||
ffmpeg-next = { version = "8.0", features = ["static"] }
|
||||
|
||||
# UI Framework
|
||||
eframe = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -0,0 +1,104 @@
|
|||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn main() {
|
||||
// Only bundle libs on Linux
|
||||
if env::var("CARGO_CFG_TARGET_OS").unwrap() != "linux" {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip bundling if using static FFmpeg linking
|
||||
if env::var("PKG_CONFIG_ALL_STATIC").is_ok() || env::var("FFMPEG_STATIC").is_ok() {
|
||||
println!("cargo:warning=Skipping FFmpeg library bundling (static linking enabled)");
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the output directory
|
||||
let out_dir = env::var("OUT_DIR").unwrap();
|
||||
let target_dir = PathBuf::from(&out_dir)
|
||||
.parent().unwrap()
|
||||
.parent().unwrap()
|
||||
.parent().unwrap()
|
||||
.to_path_buf();
|
||||
|
||||
// Create lib directory in target
|
||||
let lib_dir = target_dir.join("lib");
|
||||
fs::create_dir_all(&lib_dir).ok();
|
||||
|
||||
println!("cargo:warning=Bundling FFmpeg libraries to {:?}", lib_dir);
|
||||
|
||||
// List of FFmpeg 8.x libraries to bundle
|
||||
let ffmpeg_libs = [
|
||||
"libavcodec.so.62",
|
||||
"libavdevice.so.62",
|
||||
"libavfilter.so.11",
|
||||
"libavformat.so.62",
|
||||
"libavutil.so.60",
|
||||
"libpostproc.so.57", // Actually version 57 in Ubuntu 24.04
|
||||
"libswresample.so.6", // Actually version 6
|
||||
"libswscale.so.9",
|
||||
];
|
||||
|
||||
let lib_search_paths = [
|
||||
"/usr/lib/x86_64-linux-gnu",
|
||||
"/usr/lib64",
|
||||
"/usr/lib",
|
||||
];
|
||||
|
||||
// Copy FFmpeg libraries
|
||||
for lib_name in &ffmpeg_libs {
|
||||
copy_library(lib_name, &lib_search_paths, &lib_dir);
|
||||
}
|
||||
|
||||
// Also bundle all FFmpeg codec dependencies to avoid version mismatches
|
||||
let codec_libs = [
|
||||
// Codec libraries
|
||||
"libaom.so.3", "libdav1d.so.7", "librav1e.so.0", "libSvtAv1Enc.so.1",
|
||||
"libvpx.so.9", "libx264.so.164", "libx265.so.199",
|
||||
"libopus.so.0", "libvorbis.so.0", "libvorbisenc.so.2", "libmp3lame.so.0",
|
||||
"libtheora.so.0", "libtheoraenc.so.1", "libtheoradec.so.1",
|
||||
"libtwolame.so.0", "libspeex.so.1", "libshine.so.3",
|
||||
"libwebp.so.7", "libwebpmux.so.3", "libjxl.so.0.7", "libjxl_threads.so.0.7",
|
||||
// Container/protocol libraries
|
||||
"librabbitmq.so.4", "librist.so.4", "libsrt-gnutls.so.1.5", "libzmq.so.5",
|
||||
"libbluray.so.2", "libdvdnav.so.4", "libdvdread.so.8",
|
||||
// Other dependencies
|
||||
"libaribb24.so.0", "libcodec2.so.1.2", "libgsm.so.1",
|
||||
"libopencore-amrnb.so.0", "libopencore-amrwb.so.0",
|
||||
"libvo-amrwbenc.so.0", "libfdk-aac.so.2", "libilbc.so.3",
|
||||
"libopenjp2.so.7", "libsnappy.so.1", "libvvenc.so.1.12",
|
||||
];
|
||||
|
||||
for lib_name in &codec_libs {
|
||||
copy_library(lib_name, &lib_search_paths, &lib_dir);
|
||||
}
|
||||
|
||||
// Set rpath to look in ./lib and $ORIGIN/lib
|
||||
println!("cargo:rustc-link-arg=-Wl,-rpath,$ORIGIN/lib");
|
||||
println!("cargo:rustc-link-arg=-Wl,-rpath,{}", lib_dir.display());
|
||||
}
|
||||
|
||||
fn copy_library(lib_name: &str, search_paths: &[&str], lib_dir: &PathBuf) {
|
||||
let mut copied = false;
|
||||
|
||||
for search_path in search_paths {
|
||||
let src = PathBuf::from(search_path).join(lib_name);
|
||||
if src.exists() {
|
||||
let dst = lib_dir.join(lib_name);
|
||||
if let Err(e) = fs::copy(&src, &dst) {
|
||||
println!("cargo:warning=Failed to copy {}: {}", lib_name, e);
|
||||
} else {
|
||||
copied = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !copied {
|
||||
// Don't warn for optional libraries
|
||||
if !lib_name.contains("shine") && !lib_name.contains("fdk-aac") {
|
||||
println!("cargo:warning=Could not find {} (optional)", lib_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,262 @@
|
|||
// Working transcode-x264 example from rust-ffmpeg repository
|
||||
// Testing to verify ffmpeg-next works correctly on this system
|
||||
|
||||
extern crate ffmpeg_next as ffmpeg;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::time::Instant;
|
||||
|
||||
use ffmpeg::{
|
||||
codec, decoder, encoder, format, frame, log, media, picture, Dictionary, Packet, Rational,
|
||||
};
|
||||
|
||||
const DEFAULT_X264_OPTS: &str = "preset=medium";
|
||||
|
||||
struct Transcoder {
|
||||
ost_index: usize,
|
||||
decoder: decoder::Video,
|
||||
input_time_base: Rational,
|
||||
encoder: encoder::Video,
|
||||
logging_enabled: bool,
|
||||
frame_count: usize,
|
||||
last_log_frame_count: usize,
|
||||
starting_time: Instant,
|
||||
last_log_time: Instant,
|
||||
}
|
||||
|
||||
impl Transcoder {
|
||||
fn new(
|
||||
ist: &format::stream::Stream,
|
||||
octx: &mut format::context::Output,
|
||||
ost_index: usize,
|
||||
x264_opts: Dictionary,
|
||||
enable_logging: bool,
|
||||
) -> Result<Self, ffmpeg::Error> {
|
||||
let global_header = octx.format().flags().contains(format::Flags::GLOBAL_HEADER);
|
||||
let decoder = ffmpeg::codec::context::Context::from_parameters(ist.parameters())?
|
||||
.decoder()
|
||||
.video()?;
|
||||
|
||||
let codec = encoder::find(codec::Id::H264);
|
||||
let mut ost = octx.add_stream(codec)?;
|
||||
|
||||
let mut encoder =
|
||||
codec::context::Context::new_with_codec(codec.ok_or(ffmpeg::Error::InvalidData)?)
|
||||
.encoder()
|
||||
.video()?;
|
||||
ost.set_parameters(&encoder);
|
||||
encoder.set_height(decoder.height());
|
||||
encoder.set_width(decoder.width());
|
||||
encoder.set_aspect_ratio(decoder.aspect_ratio());
|
||||
encoder.set_format(decoder.format());
|
||||
encoder.set_frame_rate(decoder.frame_rate());
|
||||
encoder.set_time_base(ist.time_base());
|
||||
|
||||
if global_header {
|
||||
encoder.set_flags(codec::Flags::GLOBAL_HEADER);
|
||||
}
|
||||
|
||||
let opened_encoder = encoder
|
||||
.open_with(x264_opts)
|
||||
.expect("error opening x264 with supplied settings");
|
||||
ost.set_parameters(&opened_encoder);
|
||||
Ok(Self {
|
||||
ost_index,
|
||||
decoder,
|
||||
input_time_base: ist.time_base(),
|
||||
encoder: opened_encoder,
|
||||
logging_enabled: enable_logging,
|
||||
frame_count: 0,
|
||||
last_log_frame_count: 0,
|
||||
starting_time: Instant::now(),
|
||||
last_log_time: Instant::now(),
|
||||
})
|
||||
}
|
||||
|
||||
fn send_packet_to_decoder(&mut self, packet: &Packet) {
|
||||
self.decoder.send_packet(packet).unwrap();
|
||||
}
|
||||
|
||||
fn send_eof_to_decoder(&mut self) {
|
||||
self.decoder.send_eof().unwrap();
|
||||
}
|
||||
|
||||
fn receive_and_process_decoded_frames(
|
||||
&mut self,
|
||||
octx: &mut format::context::Output,
|
||||
ost_time_base: Rational,
|
||||
) {
|
||||
let mut frame = frame::Video::empty();
|
||||
while self.decoder.receive_frame(&mut frame).is_ok() {
|
||||
self.frame_count += 1;
|
||||
let timestamp = frame.timestamp();
|
||||
self.log_progress(f64::from(
|
||||
Rational(timestamp.unwrap_or(0) as i32, 1) * self.input_time_base,
|
||||
));
|
||||
frame.set_pts(timestamp);
|
||||
frame.set_kind(picture::Type::None);
|
||||
self.send_frame_to_encoder(&frame);
|
||||
self.receive_and_process_encoded_packets(octx, ost_time_base);
|
||||
}
|
||||
}
|
||||
|
||||
fn send_frame_to_encoder(&mut self, frame: &frame::Video) {
|
||||
self.encoder.send_frame(frame).unwrap();
|
||||
}
|
||||
|
||||
fn send_eof_to_encoder(&mut self) {
|
||||
self.encoder.send_eof().unwrap();
|
||||
}
|
||||
|
||||
fn receive_and_process_encoded_packets(
|
||||
&mut self,
|
||||
octx: &mut format::context::Output,
|
||||
ost_time_base: Rational,
|
||||
) {
|
||||
let mut encoded = Packet::empty();
|
||||
while self.encoder.receive_packet(&mut encoded).is_ok() {
|
||||
encoded.set_stream(self.ost_index);
|
||||
encoded.rescale_ts(self.input_time_base, ost_time_base);
|
||||
encoded.write_interleaved(octx).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn log_progress(&mut self, timestamp: f64) {
|
||||
if !self.logging_enabled
|
||||
|| (self.frame_count - self.last_log_frame_count < 100
|
||||
&& self.last_log_time.elapsed().as_secs_f64() < 1.0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
eprintln!(
|
||||
"time elpased: \t{:8.2}\tframe count: {:8}\ttimestamp: {:8.2}",
|
||||
self.starting_time.elapsed().as_secs_f64(),
|
||||
self.frame_count,
|
||||
timestamp
|
||||
);
|
||||
self.last_log_frame_count = self.frame_count;
|
||||
self.last_log_time = Instant::now();
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_opts<'a>(s: String) -> Option<Dictionary<'a>> {
|
||||
let mut dict = Dictionary::new();
|
||||
for keyval in s.split_terminator(',') {
|
||||
let tokens: Vec<&str> = keyval.split('=').collect();
|
||||
match tokens[..] {
|
||||
[key, val] => dict.set(key, val),
|
||||
_ => return None,
|
||||
}
|
||||
}
|
||||
Some(dict)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let input_file = env::args().nth(1).expect("missing input file");
|
||||
let output_file = env::args().nth(2).expect("missing output file");
|
||||
let x264_opts = parse_opts(
|
||||
env::args()
|
||||
.nth(3)
|
||||
.unwrap_or_else(|| DEFAULT_X264_OPTS.to_string()),
|
||||
)
|
||||
.expect("invalid x264 options string");
|
||||
|
||||
eprintln!("x264 options: {:?}", x264_opts);
|
||||
|
||||
ffmpeg::init().unwrap();
|
||||
log::set_level(log::Level::Info);
|
||||
|
||||
let mut ictx = format::input(&input_file).unwrap();
|
||||
let mut octx = format::output(&output_file).unwrap();
|
||||
|
||||
format::context::input::dump(&ictx, 0, Some(&input_file));
|
||||
|
||||
let best_video_stream_index = ictx
|
||||
.streams()
|
||||
.best(media::Type::Video)
|
||||
.map(|stream| stream.index());
|
||||
let mut stream_mapping: Vec<isize> = vec![0; ictx.nb_streams() as _];
|
||||
let mut ist_time_bases = vec![Rational(0, 0); ictx.nb_streams() as _];
|
||||
let mut ost_time_bases = vec![Rational(0, 0); ictx.nb_streams() as _];
|
||||
let mut transcoders = HashMap::new();
|
||||
let mut ost_index = 0;
|
||||
for (ist_index, ist) in ictx.streams().enumerate() {
|
||||
let ist_medium = ist.parameters().medium();
|
||||
if ist_medium != media::Type::Audio
|
||||
&& ist_medium != media::Type::Video
|
||||
&& ist_medium != media::Type::Subtitle
|
||||
{
|
||||
stream_mapping[ist_index] = -1;
|
||||
continue;
|
||||
}
|
||||
stream_mapping[ist_index] = ost_index;
|
||||
ist_time_bases[ist_index] = ist.time_base();
|
||||
if ist_medium == media::Type::Video {
|
||||
// Initialize transcoder for video stream.
|
||||
transcoders.insert(
|
||||
ist_index,
|
||||
Transcoder::new(
|
||||
&ist,
|
||||
&mut octx,
|
||||
ost_index as _,
|
||||
x264_opts.to_owned(),
|
||||
Some(ist_index) == best_video_stream_index,
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
} else {
|
||||
// Set up for stream copy for non-video stream.
|
||||
let mut ost = octx.add_stream(encoder::find(codec::Id::None)).unwrap();
|
||||
ost.set_parameters(ist.parameters());
|
||||
// We need to set codec_tag to 0 lest we run into incompatible codec tag
|
||||
// issues when muxing into a different container format. Unfortunately
|
||||
// there's no high level API to do this (yet).
|
||||
unsafe {
|
||||
(*ost.parameters().as_mut_ptr()).codec_tag = 0;
|
||||
}
|
||||
}
|
||||
ost_index += 1;
|
||||
}
|
||||
|
||||
octx.set_metadata(ictx.metadata().to_owned());
|
||||
format::context::output::dump(&octx, 0, Some(&output_file));
|
||||
octx.write_header().unwrap();
|
||||
|
||||
for (ost_index, _) in octx.streams().enumerate() {
|
||||
ost_time_bases[ost_index] = octx.stream(ost_index as _).unwrap().time_base();
|
||||
}
|
||||
|
||||
for (stream, mut packet) in ictx.packets() {
|
||||
let ist_index = stream.index();
|
||||
let ost_index = stream_mapping[ist_index];
|
||||
if ost_index < 0 {
|
||||
continue;
|
||||
}
|
||||
let ost_time_base = ost_time_bases[ost_index as usize];
|
||||
match transcoders.get_mut(&ist_index) {
|
||||
Some(transcoder) => {
|
||||
transcoder.send_packet_to_decoder(&packet);
|
||||
transcoder.receive_and_process_decoded_frames(&mut octx, ost_time_base);
|
||||
}
|
||||
None => {
|
||||
// Do stream copy on non-video streams.
|
||||
packet.rescale_ts(ist_time_bases[ist_index], ost_time_base);
|
||||
packet.set_position(-1);
|
||||
packet.set_stream(ost_index as _);
|
||||
packet.write_interleaved(&mut octx).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush encoders and decoders.
|
||||
for (ost_index, transcoder) in transcoders.iter_mut() {
|
||||
let ost_time_base = ost_time_bases[*ost_index];
|
||||
transcoder.send_eof_to_decoder();
|
||||
transcoder.receive_and_process_decoded_frames(&mut octx, ost_time_base);
|
||||
transcoder.send_eof_to_encoder();
|
||||
transcoder.receive_and_process_encoded_packets(&mut octx, ost_time_base);
|
||||
}
|
||||
|
||||
octx.write_trailer().unwrap();
|
||||
}
|
||||
|
|
@ -0,0 +1,244 @@
|
|||
/// Test program to validate video export with synthetic frames
|
||||
///
|
||||
/// This creates a simple 5-second video with:
|
||||
/// - Red frame for 1 second
|
||||
/// - Green frame for 1 second
|
||||
/// - Blue frame for 1 second
|
||||
/// - White frame for 1 second
|
||||
/// - Black frame for 1 second
|
||||
///
|
||||
/// Run with: cargo run --example video_export_test
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<(), String> {
|
||||
println!("Testing H.264 video export with synthetic frames...\n");
|
||||
|
||||
// Initialize FFmpeg
|
||||
ffmpeg_next::init().map_err(|e| format!("Failed to initialize FFmpeg: {}", e))?;
|
||||
|
||||
// Output file
|
||||
let output_path = "/tmp/test_synthetic.mp4";
|
||||
let width = 1920u32;
|
||||
let height = 1080u32;
|
||||
let framerate = 30.0;
|
||||
let bitrate_kbps = 5000; // 5 Mbps
|
||||
let duration_secs = 5.0;
|
||||
let total_frames = (duration_secs * framerate) as usize;
|
||||
|
||||
println!("Settings:");
|
||||
println!(" Resolution: {}×{}", width, height);
|
||||
println!(" Framerate: {} fps", framerate);
|
||||
println!(" Bitrate: {} kbps", bitrate_kbps);
|
||||
println!(" Duration: {} seconds ({} frames)", duration_secs, total_frames);
|
||||
println!();
|
||||
|
||||
// Find H.264 encoder
|
||||
let encoder_codec = ffmpeg_next::encoder::find(ffmpeg_next::codec::Id::H264)
|
||||
.ok_or("H.264 encoder not found")?;
|
||||
|
||||
println!("Using encoder: {}", encoder_codec.name());
|
||||
|
||||
// Create output format context
|
||||
let mut output = ffmpeg_next::format::output(&output_path)
|
||||
.map_err(|e| format!("Failed to create output file: {}", e))?;
|
||||
|
||||
// Create encoder from codec
|
||||
let mut encoder = ffmpeg_next::codec::Context::new_with_codec(encoder_codec)
|
||||
.encoder()
|
||||
.video()
|
||||
.map_err(|e| format!("Failed to create encoder: {}", e))?;
|
||||
|
||||
// Configure encoder parameters BEFORE opening (like working MP3 code)
|
||||
encoder.set_width(width);
|
||||
encoder.set_height(height);
|
||||
encoder.set_format(ffmpeg_next::format::Pixel::YUV420P);
|
||||
encoder.set_time_base(ffmpeg_next::Rational(1, (framerate * 1000.0) as i32));
|
||||
encoder.set_frame_rate(Some(ffmpeg_next::Rational(framerate as i32, 1)));
|
||||
encoder.set_bit_rate((bitrate_kbps * 1000) as usize);
|
||||
encoder.set_gop(framerate as u32); // 1 second GOP
|
||||
|
||||
println!("Opening encoder with open_as()...");
|
||||
|
||||
// Open encoder with codec (like working MP3 code)
|
||||
let mut encoder = encoder
|
||||
.open_as(encoder_codec)
|
||||
.map_err(|e| format!("Failed to open encoder: {}", e))?;
|
||||
|
||||
println!("✅ H.264 encoder opened successfully!");
|
||||
println!("Opened encoder format: {:?}", encoder.format());
|
||||
|
||||
// Add stream AFTER opening encoder (like working MP3 code)
|
||||
{
|
||||
let mut stream = output
|
||||
.add_stream(encoder_codec)
|
||||
.map_err(|e| format!("Failed to add stream: {}", e))?;
|
||||
stream.set_parameters(&encoder);
|
||||
}
|
||||
|
||||
output
|
||||
.write_header()
|
||||
.map_err(|e| format!("Failed to write header: {}", e))?;
|
||||
|
||||
println!("✅ Output file created: {}", output_path);
|
||||
println!();
|
||||
|
||||
// Generate and encode frames
|
||||
println!("Encoding frames...");
|
||||
let frame_size_rgba = (width * height * 4) as usize;
|
||||
let mut rgba_buffer = vec![0u8; frame_size_rgba];
|
||||
|
||||
for frame_num in 0..total_frames {
|
||||
// Fill RGBA buffer with color based on time
|
||||
let color = match frame_num / 30 {
|
||||
0 => (255, 0, 0, 255), // Red (0-1s)
|
||||
1 => (0, 255, 0, 255), // Green (1-2s)
|
||||
2 => (0, 0, 255, 255), // Blue (2-3s)
|
||||
3 => (255, 255, 255, 255), // White (3-4s)
|
||||
_ => (0, 0, 0, 255), // Black (4-5s)
|
||||
};
|
||||
|
||||
for pixel in rgba_buffer.chunks_mut(4) {
|
||||
pixel[0] = color.0;
|
||||
pixel[1] = color.1;
|
||||
pixel[2] = color.2;
|
||||
pixel[3] = color.3;
|
||||
}
|
||||
|
||||
// Convert RGBA to YUV420p
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba_buffer, width, height);
|
||||
|
||||
// Create video frame
|
||||
let mut video_frame = ffmpeg_next::frame::Video::new(
|
||||
ffmpeg_next::format::Pixel::YUV420P,
|
||||
width,
|
||||
height,
|
||||
);
|
||||
|
||||
// Copy YUV planes
|
||||
unsafe {
|
||||
let y_plane = video_frame.data_mut(0);
|
||||
std::ptr::copy_nonoverlapping(y.as_ptr(), y_plane.as_mut_ptr(), y.len());
|
||||
|
||||
let u_plane = video_frame.data_mut(1);
|
||||
std::ptr::copy_nonoverlapping(u.as_ptr(), u_plane.as_mut_ptr(), u.len());
|
||||
|
||||
let v_plane = video_frame.data_mut(2);
|
||||
std::ptr::copy_nonoverlapping(v.as_ptr(), v_plane.as_mut_ptr(), v.len());
|
||||
}
|
||||
|
||||
// Set PTS
|
||||
let timestamp = frame_num as f64 / framerate;
|
||||
video_frame.set_pts(Some((timestamp * 1000.0) as i64));
|
||||
|
||||
// Encode frame
|
||||
encoder
|
||||
.send_frame(&video_frame)
|
||||
.map_err(|e| format!("Failed to send frame: {}", e))?;
|
||||
|
||||
// Receive and write packets
|
||||
let mut encoded = ffmpeg_next::Packet::empty();
|
||||
while encoder.receive_packet(&mut encoded).is_ok() {
|
||||
encoded.set_stream(0);
|
||||
encoded
|
||||
.write_interleaved(&mut output)
|
||||
.map_err(|e| format!("Failed to write packet: {}", e))?;
|
||||
}
|
||||
|
||||
// Progress indicator
|
||||
if (frame_num + 1) % 30 == 0 || frame_num + 1 == total_frames {
|
||||
let percent = ((frame_num + 1) as f64 / total_frames as f64 * 100.0) as u32;
|
||||
println!(" Frame {}/{} ({}%)", frame_num + 1, total_frames, percent);
|
||||
}
|
||||
}
|
||||
|
||||
// Flush encoder
|
||||
encoder
|
||||
.send_eof()
|
||||
.map_err(|e| format!("Failed to send EOF: {}", e))?;
|
||||
|
||||
let mut encoded = ffmpeg_next::Packet::empty();
|
||||
while encoder.receive_packet(&mut encoded).is_ok() {
|
||||
encoded.set_stream(0);
|
||||
encoded
|
||||
.write_interleaved(&mut output)
|
||||
.map_err(|e| format!("Failed to write packet: {}", e))?;
|
||||
}
|
||||
|
||||
output
|
||||
.write_trailer()
|
||||
.map_err(|e| format!("Failed to write trailer: {}", e))?;
|
||||
|
||||
// Check output file
|
||||
if Path::new(output_path).exists() {
|
||||
let metadata = std::fs::metadata(output_path).unwrap();
|
||||
println!();
|
||||
println!("✅ Video export successful!");
|
||||
println!(" Output: {} ({:.2} MB)", output_path, metadata.len() as f64 / 1_048_576.0);
|
||||
println!();
|
||||
println!("Test with: ffplay {}", output_path);
|
||||
println!("Or: vlc {}", output_path);
|
||||
} else {
|
||||
return Err("Output file was not created!".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert RGBA8 to YUV420p using BT.709 color space
|
||||
fn rgba_to_yuv420p(rgba: &[u8], width: u32, height: u32) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
|
||||
let w = width as usize;
|
||||
let h = height as usize;
|
||||
|
||||
// Y plane (full resolution)
|
||||
let mut y_plane = Vec::with_capacity(w * h);
|
||||
|
||||
for y in 0..h {
|
||||
for x in 0..w {
|
||||
let idx = (y * w + x) * 4;
|
||||
let r = rgba[idx] as f32;
|
||||
let g = rgba[idx + 1] as f32;
|
||||
let b = rgba[idx + 2] as f32;
|
||||
|
||||
// BT.709 luma
|
||||
let y_val = (0.2126 * r + 0.7152 * g + 0.0722 * b).clamp(0.0, 255.0) as u8;
|
||||
y_plane.push(y_val);
|
||||
}
|
||||
}
|
||||
|
||||
// U and V planes (quarter resolution)
|
||||
let mut u_plane = Vec::with_capacity((w * h) / 4);
|
||||
let mut v_plane = Vec::with_capacity((w * h) / 4);
|
||||
|
||||
for y in (0..h).step_by(2) {
|
||||
for x in (0..w).step_by(2) {
|
||||
let mut r_sum = 0.0;
|
||||
let mut g_sum = 0.0;
|
||||
let mut b_sum = 0.0;
|
||||
|
||||
for dy in 0..2 {
|
||||
for dx in 0..2 {
|
||||
if y + dy < h && x + dx < w {
|
||||
let idx = ((y + dy) * w + (x + dx)) * 4;
|
||||
r_sum += rgba[idx] as f32;
|
||||
g_sum += rgba[idx + 1] as f32;
|
||||
b_sum += rgba[idx + 2] as f32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let r = r_sum / 4.0;
|
||||
let g = g_sum / 4.0;
|
||||
let b = b_sum / 4.0;
|
||||
|
||||
// BT.709 chroma (centered at 128)
|
||||
let u_val = (-0.1146 * r - 0.3854 * g + 0.5000 * b + 128.0).clamp(0.0, 255.0) as u8;
|
||||
let v_val = (0.5000 * r - 0.4542 * g - 0.0458 * b + 128.0).clamp(0.0, 255.0) as u8;
|
||||
|
||||
u_plane.push(u_val);
|
||||
v_plane.push(v_val);
|
||||
}
|
||||
}
|
||||
|
||||
(y_plane, u_plane, v_plane)
|
||||
}
|
||||
|
|
@ -3,22 +3,46 @@
|
|||
//! Provides a user interface for configuring and starting audio/video exports.
|
||||
|
||||
use eframe::egui;
|
||||
use lightningbeam_core::export::{AudioExportSettings, AudioFormat};
|
||||
use lightningbeam_core::export::{AudioExportSettings, AudioFormat, VideoExportSettings, VideoCodec, VideoQuality};
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Export type selection
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ExportType {
|
||||
Audio,
|
||||
Video,
|
||||
}
|
||||
|
||||
/// Export result from dialog
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ExportResult {
|
||||
AudioOnly(AudioExportSettings, PathBuf),
|
||||
VideoOnly(VideoExportSettings, PathBuf),
|
||||
VideoWithAudio(VideoExportSettings, AudioExportSettings, PathBuf),
|
||||
}
|
||||
|
||||
/// Export dialog state
|
||||
pub struct ExportDialog {
|
||||
/// Is the dialog open?
|
||||
pub open: bool,
|
||||
|
||||
/// Export settings
|
||||
pub settings: AudioExportSettings,
|
||||
/// Export type (Audio or Video)
|
||||
pub export_type: ExportType,
|
||||
|
||||
/// Audio export settings
|
||||
pub audio_settings: AudioExportSettings,
|
||||
|
||||
/// Video export settings
|
||||
pub video_settings: VideoExportSettings,
|
||||
|
||||
/// Include audio with video?
|
||||
pub include_audio: bool,
|
||||
|
||||
/// Output file path
|
||||
pub output_path: Option<PathBuf>,
|
||||
|
||||
/// Selected preset index (for UI)
|
||||
pub selected_preset: usize,
|
||||
/// Selected audio preset index (for UI)
|
||||
pub selected_audio_preset: usize,
|
||||
|
||||
/// Error message (if any)
|
||||
pub error_message: Option<String>,
|
||||
|
|
@ -28,9 +52,12 @@ impl Default for ExportDialog {
|
|||
fn default() -> Self {
|
||||
Self {
|
||||
open: false,
|
||||
settings: AudioExportSettings::default(),
|
||||
export_type: ExportType::Audio,
|
||||
audio_settings: AudioExportSettings::default(),
|
||||
video_settings: VideoExportSettings::default(),
|
||||
include_audio: true,
|
||||
output_path: None,
|
||||
selected_preset: 0,
|
||||
selected_audio_preset: 0,
|
||||
error_message: None,
|
||||
}
|
||||
}
|
||||
|
|
@ -40,7 +67,8 @@ impl ExportDialog {
|
|||
/// Open the dialog with default settings
|
||||
pub fn open(&mut self, timeline_duration: f64) {
|
||||
self.open = true;
|
||||
self.settings.end_time = timeline_duration;
|
||||
self.audio_settings.end_time = timeline_duration;
|
||||
self.video_settings.end_time = timeline_duration;
|
||||
self.error_message = None;
|
||||
}
|
||||
|
||||
|
|
@ -52,18 +80,23 @@ impl ExportDialog {
|
|||
|
||||
/// Render the export dialog
|
||||
///
|
||||
/// Returns Some(settings, output_path) if the user clicked Export,
|
||||
/// None otherwise.
|
||||
pub fn render(&mut self, ctx: &egui::Context) -> Option<(AudioExportSettings, PathBuf)> {
|
||||
/// Returns Some(ExportResult) if the user clicked Export, None otherwise.
|
||||
pub fn render(&mut self, ctx: &egui::Context) -> Option<ExportResult> {
|
||||
if !self.open {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut should_export = false;
|
||||
let mut should_close = false;
|
||||
let mut open = self.open;
|
||||
|
||||
egui::Window::new("Export Audio")
|
||||
.open(&mut self.open)
|
||||
let window_title = match self.export_type {
|
||||
ExportType::Audio => "Export Audio",
|
||||
ExportType::Video => "Export Video",
|
||||
};
|
||||
|
||||
egui::Window::new(window_title)
|
||||
.open(&mut open)
|
||||
.resizable(false)
|
||||
.collapsible(false)
|
||||
.anchor(egui::Align2::CENTER_CENTER, egui::Vec2::ZERO)
|
||||
|
|
@ -76,159 +109,31 @@ impl ExportDialog {
|
|||
ui.add_space(8.0);
|
||||
}
|
||||
|
||||
// Preset selection
|
||||
ui.heading("Preset");
|
||||
// Export type selection (tabs)
|
||||
ui.horizontal(|ui| {
|
||||
let presets = [
|
||||
("High Quality WAV", AudioExportSettings::high_quality_wav()),
|
||||
("High Quality FLAC", AudioExportSettings::high_quality_flac()),
|
||||
("Standard MP3", AudioExportSettings::standard_mp3()),
|
||||
("Standard AAC", AudioExportSettings::standard_aac()),
|
||||
("High Quality MP3", AudioExportSettings::high_quality_mp3()),
|
||||
("High Quality AAC", AudioExportSettings::high_quality_aac()),
|
||||
("Podcast MP3", AudioExportSettings::podcast_mp3()),
|
||||
("Podcast AAC", AudioExportSettings::podcast_aac()),
|
||||
];
|
||||
|
||||
egui::ComboBox::from_id_source("export_preset")
|
||||
.selected_text(presets[self.selected_preset].0)
|
||||
.show_ui(ui, |ui| {
|
||||
for (i, (name, _)) in presets.iter().enumerate() {
|
||||
if ui.selectable_value(&mut self.selected_preset, i, *name).clicked() {
|
||||
// Save current time range before applying preset
|
||||
let saved_start = self.settings.start_time;
|
||||
let saved_end = self.settings.end_time;
|
||||
self.settings = presets[i].1.clone();
|
||||
// Restore time range
|
||||
self.settings.start_time = saved_start;
|
||||
self.settings.end_time = saved_end;
|
||||
}
|
||||
}
|
||||
});
|
||||
ui.selectable_value(&mut self.export_type, ExportType::Audio, "🎵 Audio");
|
||||
ui.selectable_value(&mut self.export_type, ExportType::Video, "🎬 Video");
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
ui.separator();
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Format settings
|
||||
ui.heading("Format");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Format:");
|
||||
egui::ComboBox::from_id_source("audio_format")
|
||||
.selected_text(self.settings.format.name())
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.settings.format, AudioFormat::Wav, "WAV (Uncompressed)");
|
||||
ui.selectable_value(&mut self.settings.format, AudioFormat::Flac, "FLAC (Lossless)");
|
||||
ui.selectable_value(&mut self.settings.format, AudioFormat::Mp3, "MP3");
|
||||
ui.selectable_value(&mut self.settings.format, AudioFormat::Aac, "AAC");
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(8.0);
|
||||
|
||||
// Audio settings
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Sample Rate:");
|
||||
egui::ComboBox::from_id_source("sample_rate")
|
||||
.selected_text(format!("{} Hz", self.settings.sample_rate))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.settings.sample_rate, 44100, "44100 Hz");
|
||||
ui.selectable_value(&mut self.settings.sample_rate, 48000, "48000 Hz");
|
||||
ui.selectable_value(&mut self.settings.sample_rate, 96000, "96000 Hz");
|
||||
});
|
||||
});
|
||||
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Channels:");
|
||||
ui.radio_value(&mut self.settings.channels, 1, "Mono");
|
||||
ui.radio_value(&mut self.settings.channels, 2, "Stereo");
|
||||
});
|
||||
|
||||
ui.add_space(8.0);
|
||||
|
||||
// Format-specific settings
|
||||
if self.settings.format.supports_bit_depth() {
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Bit Depth:");
|
||||
ui.radio_value(&mut self.settings.bit_depth, 16, "16-bit");
|
||||
ui.radio_value(&mut self.settings.bit_depth, 24, "24-bit");
|
||||
});
|
||||
}
|
||||
|
||||
if self.settings.format.uses_bitrate() {
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Bitrate:");
|
||||
egui::ComboBox::from_id_source("bitrate")
|
||||
.selected_text(format!("{} kbps", self.settings.bitrate_kbps))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.settings.bitrate_kbps, 128, "128 kbps");
|
||||
ui.selectable_value(&mut self.settings.bitrate_kbps, 192, "192 kbps");
|
||||
ui.selectable_value(&mut self.settings.bitrate_kbps, 256, "256 kbps");
|
||||
ui.selectable_value(&mut self.settings.bitrate_kbps, 320, "320 kbps");
|
||||
});
|
||||
});
|
||||
// Render either audio or video settings
|
||||
match self.export_type {
|
||||
ExportType::Audio => self.render_audio_settings(ui),
|
||||
ExportType::Video => self.render_video_settings(ui),
|
||||
}
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Time range
|
||||
ui.heading("Time Range");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Start:");
|
||||
ui.add(egui::DragValue::new(&mut self.settings.start_time)
|
||||
.speed(0.1)
|
||||
.clamp_range(0.0..=self.settings.end_time)
|
||||
.suffix(" s"));
|
||||
|
||||
ui.label("End:");
|
||||
ui.add(egui::DragValue::new(&mut self.settings.end_time)
|
||||
.speed(0.1)
|
||||
.clamp_range(self.settings.start_time..=f64::MAX)
|
||||
.suffix(" s"));
|
||||
});
|
||||
|
||||
let duration = self.settings.duration();
|
||||
ui.label(format!("Duration: {:.2} seconds", duration));
|
||||
// Time range (common to both)
|
||||
self.render_time_range(ui);
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Output file path
|
||||
ui.heading("Output");
|
||||
ui.horizontal(|ui| {
|
||||
let path_text = self.output_path.as_ref()
|
||||
.map(|p| p.display().to_string())
|
||||
.unwrap_or_else(|| "No file selected".to_string());
|
||||
|
||||
ui.label("File:");
|
||||
ui.text_edit_singleline(&mut path_text.clone());
|
||||
|
||||
if ui.button("Browse...").clicked() {
|
||||
// Open file dialog
|
||||
let default_name = format!("audio.{}", self.settings.format.extension());
|
||||
if let Some(path) = rfd::FileDialog::new()
|
||||
.set_file_name(&default_name)
|
||||
.add_filter("Audio", &[self.settings.format.extension()])
|
||||
.save_file()
|
||||
{
|
||||
self.output_path = Some(path);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Estimated file size
|
||||
if duration > 0.0 {
|
||||
let estimated_mb = if self.settings.format.uses_bitrate() {
|
||||
// Lossy: bitrate * duration / 8 / 1024
|
||||
(self.settings.bitrate_kbps as f64 * duration) / 8.0 / 1024.0
|
||||
} else {
|
||||
// Lossless: sample_rate * channels * bit_depth * duration / 8 / 1024 / 1024
|
||||
let compression_factor = if self.settings.format == AudioFormat::Flac { 0.6 } else { 1.0 };
|
||||
(self.settings.sample_rate as f64 * self.settings.channels as f64 *
|
||||
self.settings.bit_depth as f64 * duration * compression_factor) / 8.0 / 1024.0 / 1024.0
|
||||
};
|
||||
ui.label(format!("Estimated size: ~{:.1} MB", estimated_mb));
|
||||
}
|
||||
// Output file path (common to both)
|
||||
self.render_output_selection(ui);
|
||||
|
||||
ui.add_space(16.0);
|
||||
|
||||
|
|
@ -246,32 +151,319 @@ impl ExportDialog {
|
|||
});
|
||||
});
|
||||
|
||||
// Update open state (in case user clicked X button)
|
||||
self.open = open;
|
||||
|
||||
if should_close {
|
||||
self.close();
|
||||
return None;
|
||||
}
|
||||
|
||||
if should_export {
|
||||
// Validate settings
|
||||
if let Err(err) = self.settings.validate() {
|
||||
self.error_message = Some(err);
|
||||
return None;
|
||||
}
|
||||
|
||||
// Check if output path is set
|
||||
if self.output_path.is_none() {
|
||||
self.error_message = Some("Please select an output file".to_string());
|
||||
return None;
|
||||
}
|
||||
|
||||
// Return settings and path
|
||||
let result = Some((self.settings.clone(), self.output_path.clone().unwrap()));
|
||||
self.close();
|
||||
return result;
|
||||
return self.handle_export();
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Render audio export settings UI
|
||||
fn render_audio_settings(&mut self, ui: &mut egui::Ui) {
|
||||
// Preset selection
|
||||
ui.heading("Preset");
|
||||
ui.horizontal(|ui| {
|
||||
let presets = [
|
||||
("High Quality WAV", AudioExportSettings::high_quality_wav()),
|
||||
("High Quality FLAC", AudioExportSettings::high_quality_flac()),
|
||||
("Standard MP3", AudioExportSettings::standard_mp3()),
|
||||
("Standard AAC", AudioExportSettings::standard_aac()),
|
||||
("High Quality MP3", AudioExportSettings::high_quality_mp3()),
|
||||
("High Quality AAC", AudioExportSettings::high_quality_aac()),
|
||||
("Podcast MP3", AudioExportSettings::podcast_mp3()),
|
||||
("Podcast AAC", AudioExportSettings::podcast_aac()),
|
||||
];
|
||||
|
||||
egui::ComboBox::from_id_source("export_preset")
|
||||
.selected_text(presets[self.selected_audio_preset].0)
|
||||
.show_ui(ui, |ui| {
|
||||
for (i, (name, _)) in presets.iter().enumerate() {
|
||||
if ui.selectable_value(&mut self.selected_audio_preset, i, *name).clicked() {
|
||||
// Save current time range before applying preset
|
||||
let saved_start = self.audio_settings.start_time;
|
||||
let saved_end = self.audio_settings.end_time;
|
||||
self.audio_settings = presets[i].1.clone();
|
||||
// Restore time range
|
||||
self.audio_settings.start_time = saved_start;
|
||||
self.audio_settings.end_time = saved_end;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Format settings
|
||||
ui.heading("Format");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Format:");
|
||||
egui::ComboBox::from_id_source("audio_format")
|
||||
.selected_text(self.audio_settings.format.name())
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Wav, "WAV (Uncompressed)");
|
||||
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Flac, "FLAC (Lossless)");
|
||||
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Mp3, "MP3");
|
||||
ui.selectable_value(&mut self.audio_settings.format, AudioFormat::Aac, "AAC");
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(8.0);
|
||||
|
||||
// Audio settings
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Sample Rate:");
|
||||
egui::ComboBox::from_id_source("sample_rate")
|
||||
.selected_text(format!("{} Hz", self.audio_settings.sample_rate))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.audio_settings.sample_rate, 44100, "44100 Hz");
|
||||
ui.selectable_value(&mut self.audio_settings.sample_rate, 48000, "48000 Hz");
|
||||
ui.selectable_value(&mut self.audio_settings.sample_rate, 96000, "96000 Hz");
|
||||
});
|
||||
});
|
||||
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Channels:");
|
||||
ui.radio_value(&mut self.audio_settings.channels, 1, "Mono");
|
||||
ui.radio_value(&mut self.audio_settings.channels, 2, "Stereo");
|
||||
});
|
||||
|
||||
ui.add_space(8.0);
|
||||
|
||||
// Format-specific settings
|
||||
if self.audio_settings.format.supports_bit_depth() {
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Bit Depth:");
|
||||
ui.radio_value(&mut self.audio_settings.bit_depth, 16, "16-bit");
|
||||
ui.radio_value(&mut self.audio_settings.bit_depth, 24, "24-bit");
|
||||
});
|
||||
}
|
||||
|
||||
if self.audio_settings.format.uses_bitrate() {
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Bitrate:");
|
||||
egui::ComboBox::from_id_source("bitrate")
|
||||
.selected_text(format!("{} kbps", self.audio_settings.bitrate_kbps))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.audio_settings.bitrate_kbps, 128, "128 kbps");
|
||||
ui.selectable_value(&mut self.audio_settings.bitrate_kbps, 192, "192 kbps");
|
||||
ui.selectable_value(&mut self.audio_settings.bitrate_kbps, 256, "256 kbps");
|
||||
ui.selectable_value(&mut self.audio_settings.bitrate_kbps, 320, "320 kbps");
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Render video export settings UI
|
||||
fn render_video_settings(&mut self, ui: &mut egui::Ui) {
|
||||
// Codec selection
|
||||
ui.heading("Codec");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Codec:");
|
||||
egui::ComboBox::from_id_source("video_codec")
|
||||
.selected_text(format!("{:?}", self.video_settings.codec))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.video_settings.codec, VideoCodec::H264, "H.264 (Most Compatible)");
|
||||
ui.selectable_value(&mut self.video_settings.codec, VideoCodec::H265, "H.265 (Better Compression)");
|
||||
ui.selectable_value(&mut self.video_settings.codec, VideoCodec::VP8, "VP8 (WebM)");
|
||||
ui.selectable_value(&mut self.video_settings.codec, VideoCodec::VP9, "VP9 (WebM)");
|
||||
ui.selectable_value(&mut self.video_settings.codec, VideoCodec::ProRes422, "ProRes 422 (Professional)");
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Resolution
|
||||
ui.heading("Resolution");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Width:");
|
||||
let mut custom_width = self.video_settings.width.unwrap_or(1920);
|
||||
if ui.add(egui::DragValue::new(&mut custom_width).clamp_range(1..=7680)).changed() {
|
||||
self.video_settings.width = Some(custom_width);
|
||||
}
|
||||
|
||||
ui.label("Height:");
|
||||
let mut custom_height = self.video_settings.height.unwrap_or(1080);
|
||||
if ui.add(egui::DragValue::new(&mut custom_height).clamp_range(1..=4320)).changed() {
|
||||
self.video_settings.height = Some(custom_height);
|
||||
}
|
||||
});
|
||||
|
||||
// Resolution presets
|
||||
ui.horizontal(|ui| {
|
||||
if ui.button("1080p").clicked() {
|
||||
self.video_settings.width = Some(1920);
|
||||
self.video_settings.height = Some(1080);
|
||||
}
|
||||
if ui.button("4K").clicked() {
|
||||
self.video_settings.width = Some(3840);
|
||||
self.video_settings.height = Some(2160);
|
||||
}
|
||||
if ui.button("720p").clicked() {
|
||||
self.video_settings.width = Some(1280);
|
||||
self.video_settings.height = Some(720);
|
||||
}
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Framerate
|
||||
ui.heading("Framerate");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("FPS:");
|
||||
egui::ComboBox::from_id_source("framerate")
|
||||
.selected_text(format!("{}", self.video_settings.framerate as u32))
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.video_settings.framerate, 24.0, "24");
|
||||
ui.selectable_value(&mut self.video_settings.framerate, 30.0, "30");
|
||||
ui.selectable_value(&mut self.video_settings.framerate, 60.0, "60");
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Quality
|
||||
ui.heading("Quality");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Quality:");
|
||||
egui::ComboBox::from_id_source("video_quality")
|
||||
.selected_text(self.video_settings.quality.name())
|
||||
.show_ui(ui, |ui| {
|
||||
ui.selectable_value(&mut self.video_settings.quality, VideoQuality::Low, VideoQuality::Low.name());
|
||||
ui.selectable_value(&mut self.video_settings.quality, VideoQuality::Medium, VideoQuality::Medium.name());
|
||||
ui.selectable_value(&mut self.video_settings.quality, VideoQuality::High, VideoQuality::High.name());
|
||||
ui.selectable_value(&mut self.video_settings.quality, VideoQuality::VeryHigh, VideoQuality::VeryHigh.name());
|
||||
});
|
||||
});
|
||||
|
||||
ui.add_space(12.0);
|
||||
|
||||
// Include audio checkbox
|
||||
ui.checkbox(&mut self.include_audio, "Include Audio");
|
||||
}
|
||||
|
||||
/// Render time range UI (common to both audio and video)
|
||||
fn render_time_range(&mut self, ui: &mut egui::Ui) {
|
||||
let (start_time, end_time) = match self.export_type {
|
||||
ExportType::Audio => (&mut self.audio_settings.start_time, &mut self.audio_settings.end_time),
|
||||
ExportType::Video => (&mut self.video_settings.start_time, &mut self.video_settings.end_time),
|
||||
};
|
||||
|
||||
ui.heading("Time Range");
|
||||
ui.horizontal(|ui| {
|
||||
ui.label("Start:");
|
||||
ui.add(egui::DragValue::new(start_time)
|
||||
.speed(0.1)
|
||||
.clamp_range(0.0..=*end_time)
|
||||
.suffix(" s"));
|
||||
|
||||
ui.label("End:");
|
||||
ui.add(egui::DragValue::new(end_time)
|
||||
.speed(0.1)
|
||||
.clamp_range(*start_time..=f64::MAX)
|
||||
.suffix(" s"));
|
||||
});
|
||||
|
||||
let duration = *end_time - *start_time;
|
||||
ui.label(format!("Duration: {:.2} seconds", duration));
|
||||
}
|
||||
|
||||
/// Render output file selection UI (common to both audio and video)
|
||||
fn render_output_selection(&mut self, ui: &mut egui::Ui) {
|
||||
ui.heading("Output");
|
||||
ui.horizontal(|ui| {
|
||||
let path_text = self.output_path.as_ref()
|
||||
.map(|p| p.display().to_string())
|
||||
.unwrap_or_else(|| "No file selected".to_string());
|
||||
|
||||
ui.label("File:");
|
||||
ui.text_edit_singleline(&mut path_text.clone());
|
||||
|
||||
if ui.button("Browse...").clicked() {
|
||||
// Determine file extension and filter based on export type
|
||||
let (default_name, filter_name, extensions) = match self.export_type {
|
||||
ExportType::Audio => {
|
||||
let ext = self.audio_settings.format.extension();
|
||||
(format!("audio.{}", ext), "Audio", vec![ext])
|
||||
}
|
||||
ExportType::Video => {
|
||||
let ext = self.video_settings.codec.container_format();
|
||||
(format!("video.{}", ext), "Video", vec![ext])
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(path) = rfd::FileDialog::new()
|
||||
.set_file_name(&default_name)
|
||||
.add_filter(filter_name, &extensions)
|
||||
.save_file()
|
||||
{
|
||||
self.output_path = Some(path);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Handle export button click
|
||||
fn handle_export(&mut self) -> Option<ExportResult> {
|
||||
// Check if output path is set
|
||||
if self.output_path.is_none() {
|
||||
self.error_message = Some("Please select an output file".to_string());
|
||||
return None;
|
||||
}
|
||||
|
||||
let output_path = self.output_path.clone().unwrap();
|
||||
|
||||
let result = match self.export_type {
|
||||
ExportType::Audio => {
|
||||
// Validate audio settings
|
||||
if let Err(err) = self.audio_settings.validate() {
|
||||
self.error_message = Some(err);
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(ExportResult::AudioOnly(self.audio_settings.clone(), output_path))
|
||||
}
|
||||
ExportType::Video => {
|
||||
// Validate video settings
|
||||
if let Err(err) = self.video_settings.validate() {
|
||||
self.error_message = Some(err);
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.include_audio {
|
||||
// Validate audio settings too
|
||||
if let Err(err) = self.audio_settings.validate() {
|
||||
self.error_message = Some(err);
|
||||
return None;
|
||||
}
|
||||
|
||||
// Sync time range from video to audio
|
||||
self.audio_settings.start_time = self.video_settings.start_time;
|
||||
self.audio_settings.end_time = self.video_settings.end_time;
|
||||
|
||||
Some(ExportResult::VideoWithAudio(
|
||||
self.video_settings.clone(),
|
||||
self.audio_settings.clone(),
|
||||
output_path,
|
||||
))
|
||||
} else {
|
||||
Some(ExportResult::VideoOnly(self.video_settings.clone(), output_path))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.close();
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Export progress dialog state
|
||||
|
|
|
|||
|
|
@ -5,23 +5,83 @@
|
|||
|
||||
pub mod audio_exporter;
|
||||
pub mod dialog;
|
||||
pub mod video_exporter;
|
||||
|
||||
use lightningbeam_core::export::{AudioExportSettings, ExportProgress};
|
||||
use lightningbeam_core::export::{AudioExportSettings, VideoExportSettings, ExportProgress};
|
||||
use lightningbeam_core::document::Document;
|
||||
use lightningbeam_core::renderer::ImageCache;
|
||||
use lightningbeam_core::video::VideoManager;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc::{channel, Receiver, Sender};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
/// Message sent from main thread to video encoder thread
|
||||
enum VideoFrameMessage {
|
||||
/// RGBA frame data with frame number and timestamp
|
||||
Frame { frame_num: usize, timestamp: f64, rgba_data: Vec<u8> },
|
||||
/// Signal that all frames have been sent
|
||||
Done,
|
||||
}
|
||||
|
||||
/// Video export state for incremental rendering
|
||||
pub struct VideoExportState {
|
||||
/// Current frame number being rendered
|
||||
current_frame: usize,
|
||||
/// Total number of frames to export
|
||||
total_frames: usize,
|
||||
/// Start time in seconds
|
||||
start_time: f64,
|
||||
/// End time in seconds
|
||||
end_time: f64,
|
||||
/// Frames per second
|
||||
framerate: f64,
|
||||
/// Export width in pixels
|
||||
width: u32,
|
||||
/// Export height in pixels
|
||||
height: u32,
|
||||
/// Channel to send rendered frames to encoder thread
|
||||
frame_tx: Option<Sender<VideoFrameMessage>>,
|
||||
}
|
||||
|
||||
/// Export orchestrator that manages the export process
|
||||
pub struct ExportOrchestrator {
|
||||
/// Channel for receiving progress updates
|
||||
/// Channel for receiving progress updates (video or audio-only export)
|
||||
progress_rx: Option<Receiver<ExportProgress>>,
|
||||
|
||||
/// Handle to the export thread
|
||||
/// Handle to the export thread (video or audio-only export)
|
||||
thread_handle: Option<std::thread::JoinHandle<()>>,
|
||||
|
||||
/// Cancel flag
|
||||
cancel_flag: Arc<AtomicBool>,
|
||||
|
||||
/// Video export state (if video export is in progress)
|
||||
video_state: Option<VideoExportState>,
|
||||
|
||||
/// Parallel audio+video export state
|
||||
parallel_export: Option<ParallelExportState>,
|
||||
}
|
||||
|
||||
/// State for parallel audio+video export
|
||||
struct ParallelExportState {
|
||||
/// Video progress channel
|
||||
video_progress_rx: Receiver<ExportProgress>,
|
||||
/// Audio progress channel
|
||||
audio_progress_rx: Receiver<ExportProgress>,
|
||||
/// Video encoder thread handle
|
||||
video_thread: std::thread::JoinHandle<()>,
|
||||
/// Audio export thread handle
|
||||
audio_thread: std::thread::JoinHandle<()>,
|
||||
/// Temporary video file path
|
||||
temp_video_path: PathBuf,
|
||||
/// Temporary audio file path
|
||||
temp_audio_path: PathBuf,
|
||||
/// Final output path
|
||||
final_output_path: PathBuf,
|
||||
/// Latest video progress
|
||||
video_progress: Option<ExportProgress>,
|
||||
/// Latest audio progress
|
||||
audio_progress: Option<ExportProgress>,
|
||||
}
|
||||
|
||||
impl ExportOrchestrator {
|
||||
|
|
@ -31,6 +91,8 @@ impl ExportOrchestrator {
|
|||
progress_rx: None,
|
||||
thread_handle: None,
|
||||
cancel_flag: Arc::new(AtomicBool::new(false)),
|
||||
video_state: None,
|
||||
parallel_export: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -76,23 +138,287 @@ impl ExportOrchestrator {
|
|||
///
|
||||
/// Returns None if no updates are available.
|
||||
/// Returns Some(progress) if an update is available.
|
||||
///
|
||||
/// For parallel video+audio exports, returns combined progress.
|
||||
pub fn poll_progress(&mut self) -> Option<ExportProgress> {
|
||||
// Handle parallel video+audio export
|
||||
if let Some(ref mut parallel) = self.parallel_export {
|
||||
return self.poll_parallel_progress();
|
||||
}
|
||||
|
||||
// Handle single export (audio-only or video-only)
|
||||
if let Some(rx) = &self.progress_rx {
|
||||
match rx.try_recv() {
|
||||
Ok(progress) => {
|
||||
println!("📨 [ORCHESTRATOR] Received progress: {:?}", std::mem::discriminant(&progress));
|
||||
Some(progress)
|
||||
}
|
||||
Err(e) => {
|
||||
// Only log occasionally to avoid spam
|
||||
None
|
||||
}
|
||||
Err(_) => None,
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll progress for parallel video+audio export
|
||||
fn poll_parallel_progress(&mut self) -> Option<ExportProgress> {
|
||||
let parallel = self.parallel_export.as_mut()?;
|
||||
|
||||
// Poll video progress
|
||||
while let Ok(progress) = parallel.video_progress_rx.try_recv() {
|
||||
println!("📨 [PARALLEL] Video progress: {:?}", std::mem::discriminant(&progress));
|
||||
parallel.video_progress = Some(progress);
|
||||
}
|
||||
|
||||
// Poll audio progress
|
||||
while let Ok(progress) = parallel.audio_progress_rx.try_recv() {
|
||||
println!("📨 [PARALLEL] Audio progress: {:?}", std::mem::discriminant(&progress));
|
||||
parallel.audio_progress = Some(progress);
|
||||
}
|
||||
|
||||
// Check if both are complete
|
||||
let video_complete = matches!(parallel.video_progress, Some(ExportProgress::Complete { .. }));
|
||||
let audio_complete = matches!(parallel.audio_progress, Some(ExportProgress::Complete { .. }));
|
||||
|
||||
if video_complete && audio_complete {
|
||||
println!("🎬🎵 [PARALLEL] Both video and audio complete, starting mux");
|
||||
|
||||
// Take parallel state to extract file paths
|
||||
let parallel_state = self.parallel_export.take().unwrap();
|
||||
|
||||
// Wait for threads to finish
|
||||
parallel_state.video_thread.join().ok();
|
||||
parallel_state.audio_thread.join().ok();
|
||||
|
||||
// Start muxing
|
||||
match Self::mux_video_and_audio(
|
||||
¶llel_state.temp_video_path,
|
||||
¶llel_state.temp_audio_path,
|
||||
¶llel_state.final_output_path,
|
||||
) {
|
||||
Ok(()) => {
|
||||
println!("✅ [MUX] Muxing complete, cleaning up temp files");
|
||||
|
||||
// Clean up temp files
|
||||
std::fs::remove_file(¶llel_state.temp_video_path).ok();
|
||||
std::fs::remove_file(¶llel_state.temp_audio_path).ok();
|
||||
|
||||
return Some(ExportProgress::Complete {
|
||||
output_path: parallel_state.final_output_path,
|
||||
});
|
||||
}
|
||||
Err(err) => {
|
||||
println!("❌ [MUX] Muxing failed: {}", err);
|
||||
return Some(ExportProgress::Error {
|
||||
message: format!("Muxing failed: {}", err),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for errors
|
||||
if let Some(ExportProgress::Error { ref message }) = parallel.video_progress {
|
||||
return Some(ExportProgress::Error { message: format!("Video: {}", message) });
|
||||
}
|
||||
if let Some(ExportProgress::Error { ref message }) = parallel.audio_progress {
|
||||
return Some(ExportProgress::Error { message: format!("Audio: {}", message) });
|
||||
}
|
||||
|
||||
// Return combined progress
|
||||
match (¶llel.video_progress, ¶llel.audio_progress) {
|
||||
(Some(ExportProgress::FrameRendered { frame, total }), _) => {
|
||||
Some(ExportProgress::FrameRendered { frame: *frame, total: *total })
|
||||
}
|
||||
(_, Some(ExportProgress::Started { .. })) |
|
||||
(Some(ExportProgress::Started { .. }), _) => {
|
||||
Some(ExportProgress::Started { total_frames: 0 })
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Mux video and audio files together using FFmpeg CLI
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `video_path` - Path to video file (no audio)
|
||||
/// * `audio_path` - Path to audio file
|
||||
/// * `output_path` - Path for final output file
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(()) on success, Err with message on failure
|
||||
fn mux_video_and_audio(
|
||||
video_path: &PathBuf,
|
||||
audio_path: &PathBuf,
|
||||
output_path: &PathBuf,
|
||||
) -> Result<(), String> {
|
||||
use ffmpeg_next as ffmpeg;
|
||||
|
||||
println!("🎬🎵 [MUX] Muxing video and audio using ffmpeg-next");
|
||||
println!(" Video: {:?}", video_path);
|
||||
println!(" Audio: {:?}", audio_path);
|
||||
println!(" Output: {:?}", output_path);
|
||||
|
||||
// Initialize FFmpeg
|
||||
ffmpeg::init().map_err(|e| format!("FFmpeg init failed: {}", e))?;
|
||||
|
||||
// Open input video
|
||||
let mut video_input = ffmpeg::format::input(&video_path)
|
||||
.map_err(|e| format!("Failed to open video file: {}", e))?;
|
||||
|
||||
// Open input audio
|
||||
let mut audio_input = ffmpeg::format::input(&audio_path)
|
||||
.map_err(|e| format!("Failed to open audio file: {}", e))?;
|
||||
|
||||
// Create output
|
||||
let mut output = ffmpeg::format::output(&output_path)
|
||||
.map_err(|e| format!("Failed to create output file: {}", e))?;
|
||||
|
||||
// Find video stream
|
||||
let video_stream_index = video_input.streams().best(ffmpeg::media::Type::Video)
|
||||
.ok_or("No video stream found")?.index();
|
||||
|
||||
// Find audio stream
|
||||
let audio_stream_index = audio_input.streams().best(ffmpeg::media::Type::Audio)
|
||||
.ok_or("No audio stream found")?.index();
|
||||
|
||||
// Extract video stream info (do this before adding output streams)
|
||||
let (video_input_tb, video_output_tb) = {
|
||||
let video_stream = video_input.stream(video_stream_index)
|
||||
.ok_or("Failed to get video stream")?;
|
||||
let input_tb = video_stream.time_base();
|
||||
let codec_id = video_stream.parameters().id();
|
||||
let params = video_stream.parameters();
|
||||
|
||||
// Add video stream to output and extract time_base before dropping
|
||||
let mut video_out_stream = output.add_stream(ffmpeg::encoder::find(codec_id))
|
||||
.map_err(|e| format!("Failed to add video stream: {}", e))?;
|
||||
video_out_stream.set_parameters(params);
|
||||
// Set time base explicitly (params might not include it, resulting in 0/0)
|
||||
video_out_stream.set_time_base(input_tb);
|
||||
let output_tb = video_out_stream.time_base();
|
||||
|
||||
(input_tb, output_tb)
|
||||
}; // video_out_stream drops here
|
||||
|
||||
// Extract audio stream info (after video stream is dropped)
|
||||
let (audio_input_tb, audio_output_tb) = {
|
||||
let audio_stream = audio_input.stream(audio_stream_index)
|
||||
.ok_or("Failed to get audio stream")?;
|
||||
let input_tb = audio_stream.time_base();
|
||||
let codec_id = audio_stream.parameters().id();
|
||||
let params = audio_stream.parameters();
|
||||
|
||||
// Add audio stream to output and extract time_base before dropping
|
||||
let mut audio_out_stream = output.add_stream(ffmpeg::encoder::find(codec_id))
|
||||
.map_err(|e| format!("Failed to add audio stream: {}", e))?;
|
||||
audio_out_stream.set_parameters(params);
|
||||
// Set time base explicitly (params might not include it, resulting in 0/0)
|
||||
audio_out_stream.set_time_base(input_tb);
|
||||
let output_tb = audio_out_stream.time_base();
|
||||
|
||||
(input_tb, output_tb)
|
||||
}; // audio_out_stream drops here
|
||||
|
||||
// Write header
|
||||
output.write_header().map_err(|e| format!("Failed to write header: {}", e))?;
|
||||
|
||||
println!("🎬 [MUX] Video stream - Input TB: {}/{}, Output TB: {}/{}",
|
||||
video_input_tb.0, video_input_tb.1, video_output_tb.0, video_output_tb.1);
|
||||
println!("🎵 [MUX] Audio stream - Input TB: {}/{}, Output TB: {}/{}",
|
||||
audio_input_tb.0, audio_input_tb.1, audio_output_tb.0, audio_output_tb.1);
|
||||
|
||||
// Collect all packets with their stream info and timestamps
|
||||
let mut video_packets = Vec::new();
|
||||
for (stream, packet) in video_input.packets() {
|
||||
if stream.index() == video_stream_index {
|
||||
video_packets.push(packet);
|
||||
}
|
||||
}
|
||||
|
||||
let mut audio_packets = Vec::new();
|
||||
for (stream, packet) in audio_input.packets() {
|
||||
if stream.index() == audio_stream_index {
|
||||
audio_packets.push(packet);
|
||||
}
|
||||
}
|
||||
|
||||
println!("🎬 [MUX] Collected {} video packets, {} audio packets",
|
||||
video_packets.len(), audio_packets.len());
|
||||
|
||||
// Report first and last timestamps
|
||||
if !video_packets.is_empty() {
|
||||
println!("🎬 [MUX] Video PTS range: {} to {}",
|
||||
video_packets[0].pts().unwrap_or(0),
|
||||
video_packets[video_packets.len()-1].pts().unwrap_or(0));
|
||||
}
|
||||
if !audio_packets.is_empty() {
|
||||
println!("🎵 [MUX] Audio PTS range: {} to {}",
|
||||
audio_packets[0].pts().unwrap_or(0),
|
||||
audio_packets[audio_packets.len()-1].pts().unwrap_or(0));
|
||||
}
|
||||
|
||||
// Interleave packets by comparing timestamps in a common time base (use microseconds)
|
||||
let mut v_idx = 0;
|
||||
let mut a_idx = 0;
|
||||
let mut interleave_log_count = 0;
|
||||
|
||||
while v_idx < video_packets.len() || a_idx < audio_packets.len() {
|
||||
let write_video = if v_idx >= video_packets.len() {
|
||||
false // No more video
|
||||
} else if a_idx >= audio_packets.len() {
|
||||
true // No more audio, write video
|
||||
} else {
|
||||
// Compare timestamps - convert both to microseconds
|
||||
let v_pts = video_packets[v_idx].pts().unwrap_or(0);
|
||||
let a_pts = audio_packets[a_idx].pts().unwrap_or(0);
|
||||
|
||||
// Convert to microseconds: pts * 1000000 * tb.num / tb.den
|
||||
let v_us = v_pts * 1_000_000 * video_input_tb.0 as i64 / video_input_tb.1 as i64;
|
||||
let a_us = a_pts * 1_000_000 * audio_input_tb.0 as i64 / audio_input_tb.1 as i64;
|
||||
|
||||
v_us <= a_us // Write video if it comes before or at same time as audio
|
||||
};
|
||||
|
||||
if write_video {
|
||||
let mut packet = video_packets[v_idx].clone();
|
||||
packet.set_stream(0);
|
||||
packet.rescale_ts(video_input_tb, video_output_tb);
|
||||
|
||||
if interleave_log_count < 10 {
|
||||
println!("🎬 [MUX] Writing V packet {} - PTS={:?}, DTS={:?}, Duration={:?}",
|
||||
v_idx, packet.pts(), packet.dts(), packet.duration());
|
||||
interleave_log_count += 1;
|
||||
}
|
||||
|
||||
packet.write_interleaved(&mut output)
|
||||
.map_err(|e| format!("Failed to write video packet: {}", e))?;
|
||||
v_idx += 1;
|
||||
} else {
|
||||
let mut packet = audio_packets[a_idx].clone();
|
||||
packet.set_stream(1);
|
||||
packet.rescale_ts(audio_input_tb, audio_output_tb);
|
||||
|
||||
if interleave_log_count < 10 {
|
||||
println!("🎵 [MUX] Writing A packet {} - PTS={:?}, DTS={:?}, Duration={:?}",
|
||||
a_idx, packet.pts(), packet.dts(), packet.duration());
|
||||
interleave_log_count += 1;
|
||||
}
|
||||
|
||||
packet.write_interleaved(&mut output)
|
||||
.map_err(|e| format!("Failed to write audio packet: {}", e))?;
|
||||
a_idx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
println!("🎬 [MUX] Wrote {} video packets, {} audio packets", v_idx, a_idx);
|
||||
|
||||
// Write trailer
|
||||
output.write_trailer().map_err(|e| format!("Failed to write trailer: {}", e))?;
|
||||
|
||||
println!("✅ [MUX] Muxing completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Cancel the current export
|
||||
pub fn cancel(&mut self) {
|
||||
self.cancel_flag.store(true, Ordering::Relaxed);
|
||||
|
|
@ -100,6 +426,12 @@ impl ExportOrchestrator {
|
|||
|
||||
/// Check if an export is in progress
|
||||
pub fn is_exporting(&self) -> bool {
|
||||
// Check parallel export first
|
||||
if self.parallel_export.is_some() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check single export
|
||||
if let Some(handle) = &self.thread_handle {
|
||||
!handle.is_finished()
|
||||
} else {
|
||||
|
|
@ -234,6 +566,524 @@ impl ExportOrchestrator {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start a video export in the background (encoder thread)
|
||||
///
|
||||
/// Returns immediately after spawning encoder thread. Caller must call
|
||||
/// `render_next_video_frame()` repeatedly from the main thread to feed frames.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `settings` - Video export settings
|
||||
/// * `output_path` - Output file path
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(()) on success, Err on failure
|
||||
pub fn start_video_export(
|
||||
&mut self,
|
||||
settings: VideoExportSettings,
|
||||
output_path: PathBuf,
|
||||
) -> Result<(), String> {
|
||||
println!("🎬 [VIDEO EXPORT] Starting video export");
|
||||
|
||||
// Extract values we need before moving settings to thread
|
||||
let start_time = settings.start_time;
|
||||
let end_time = settings.end_time;
|
||||
let framerate = settings.framerate;
|
||||
let width = settings.width.unwrap_or(1920);
|
||||
let height = settings.height.unwrap_or(1080);
|
||||
let duration = end_time - start_time;
|
||||
let total_frames = (duration * framerate).ceil() as usize;
|
||||
|
||||
// Create channels
|
||||
let (progress_tx, progress_rx) = channel();
|
||||
let (frame_tx, frame_rx) = channel();
|
||||
|
||||
self.progress_rx = Some(progress_rx);
|
||||
|
||||
// Reset cancel flag
|
||||
self.cancel_flag.store(false, Ordering::Relaxed);
|
||||
let cancel_flag = Arc::clone(&self.cancel_flag);
|
||||
|
||||
// Spawn encoder thread
|
||||
let handle = std::thread::spawn(move || {
|
||||
Self::run_video_encoder(
|
||||
settings,
|
||||
output_path,
|
||||
frame_rx,
|
||||
progress_tx,
|
||||
cancel_flag,
|
||||
total_frames,
|
||||
);
|
||||
});
|
||||
|
||||
self.thread_handle = Some(handle);
|
||||
|
||||
// Initialize video export state
|
||||
self.video_state = Some(VideoExportState {
|
||||
current_frame: 0,
|
||||
total_frames,
|
||||
start_time,
|
||||
end_time,
|
||||
framerate,
|
||||
width,
|
||||
height,
|
||||
frame_tx: Some(frame_tx),
|
||||
});
|
||||
|
||||
println!("🎬 [VIDEO EXPORT] Encoder thread spawned, ready for frames");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start a video+audio export in parallel
|
||||
///
|
||||
/// Exports video and audio simultaneously to temporary files, then muxes them together.
|
||||
/// Returns immediately after spawning both threads. Caller must call
|
||||
/// `render_next_video_frame()` repeatedly for video rendering.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `video_settings` - Video export settings
|
||||
/// * `audio_settings` - Audio export settings
|
||||
/// * `output_path` - Final output file path
|
||||
/// * `audio_controller` - DAW audio controller for audio export
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(()) on success, Err on failure
|
||||
pub fn start_video_with_audio_export(
|
||||
&mut self,
|
||||
video_settings: VideoExportSettings,
|
||||
mut audio_settings: AudioExportSettings,
|
||||
output_path: PathBuf,
|
||||
audio_controller: Arc<std::sync::Mutex<daw_backend::EngineController>>,
|
||||
) -> Result<(), String> {
|
||||
println!("🎬🎵 [PARALLEL EXPORT] Starting parallel video+audio export");
|
||||
|
||||
// Force AAC if format is incompatible with MP4 (WAV/FLAC/MP3)
|
||||
// AAC is the standard audio codec for MP4 containers
|
||||
// Allow user-selected AAC to pass through
|
||||
match audio_settings.format {
|
||||
lightningbeam_core::export::AudioFormat::Wav |
|
||||
lightningbeam_core::export::AudioFormat::Flac |
|
||||
lightningbeam_core::export::AudioFormat::Mp3 => {
|
||||
audio_settings.format = lightningbeam_core::export::AudioFormat::Aac;
|
||||
println!("🎵 [PARALLEL EXPORT] Audio format forced to AAC for MP4 compatibility");
|
||||
}
|
||||
lightningbeam_core::export::AudioFormat::Aac => {
|
||||
println!("🎵 [PARALLEL EXPORT] Using user-selected audio format: AAC");
|
||||
}
|
||||
}
|
||||
|
||||
// Generate temporary file paths
|
||||
let temp_dir = std::env::temp_dir();
|
||||
let timestamp = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_secs();
|
||||
|
||||
let temp_video_path = temp_dir.join(format!("lightningbeam_video_{}.mp4", timestamp));
|
||||
let temp_audio_path = temp_dir.join(format!("lightningbeam_audio_{}.{}",
|
||||
timestamp,
|
||||
match audio_settings.format {
|
||||
lightningbeam_core::export::AudioFormat::Wav => "wav",
|
||||
lightningbeam_core::export::AudioFormat::Flac => "flac",
|
||||
lightningbeam_core::export::AudioFormat::Mp3 => "mp3",
|
||||
lightningbeam_core::export::AudioFormat::Aac => "m4a",
|
||||
}
|
||||
));
|
||||
|
||||
println!("🎬 [PARALLEL EXPORT] Temp video: {:?}", temp_video_path);
|
||||
println!("🎵 [PARALLEL EXPORT] Temp audio: {:?}", temp_audio_path);
|
||||
|
||||
// Extract values we need before moving settings
|
||||
let video_start_time = video_settings.start_time;
|
||||
let video_end_time = video_settings.end_time;
|
||||
let video_framerate = video_settings.framerate;
|
||||
let video_width = video_settings.width.unwrap_or(1920);
|
||||
let video_height = video_settings.height.unwrap_or(1080);
|
||||
let video_duration = video_end_time - video_start_time;
|
||||
let total_frames = (video_duration * video_framerate).ceil() as usize;
|
||||
|
||||
// Create channels for video export
|
||||
let (video_progress_tx, video_progress_rx) = channel();
|
||||
let (frame_tx, frame_rx) = channel();
|
||||
|
||||
// Create channel for audio export
|
||||
let (audio_progress_tx, audio_progress_rx) = channel();
|
||||
|
||||
// Reset cancel flag
|
||||
self.cancel_flag.store(false, Ordering::Relaxed);
|
||||
let video_cancel_flag = Arc::clone(&self.cancel_flag);
|
||||
let audio_cancel_flag = Arc::clone(&self.cancel_flag);
|
||||
|
||||
// Spawn video encoder thread
|
||||
let video_settings_clone = video_settings.clone();
|
||||
let temp_video_path_clone = temp_video_path.clone();
|
||||
let video_thread = std::thread::spawn(move || {
|
||||
Self::run_video_encoder(
|
||||
video_settings_clone,
|
||||
temp_video_path_clone,
|
||||
frame_rx,
|
||||
video_progress_tx,
|
||||
video_cancel_flag,
|
||||
total_frames,
|
||||
);
|
||||
});
|
||||
|
||||
// Spawn audio export thread
|
||||
let temp_audio_path_clone = temp_audio_path.clone();
|
||||
let audio_thread = std::thread::spawn(move || {
|
||||
Self::run_audio_export(
|
||||
audio_settings,
|
||||
temp_audio_path_clone,
|
||||
audio_controller,
|
||||
audio_progress_tx,
|
||||
audio_cancel_flag,
|
||||
);
|
||||
});
|
||||
|
||||
// Initialize video export state for incremental rendering
|
||||
self.video_state = Some(VideoExportState {
|
||||
current_frame: 0,
|
||||
total_frames,
|
||||
start_time: video_start_time,
|
||||
end_time: video_end_time,
|
||||
framerate: video_framerate,
|
||||
width: video_width,
|
||||
height: video_height,
|
||||
frame_tx: Some(frame_tx),
|
||||
});
|
||||
|
||||
// Initialize parallel export state
|
||||
self.parallel_export = Some(ParallelExportState {
|
||||
video_progress_rx,
|
||||
audio_progress_rx,
|
||||
video_thread,
|
||||
audio_thread,
|
||||
temp_video_path,
|
||||
temp_audio_path,
|
||||
final_output_path: output_path,
|
||||
video_progress: None,
|
||||
audio_progress: None,
|
||||
});
|
||||
|
||||
println!("🎬🎵 [PARALLEL EXPORT] Both threads spawned, ready for frames");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Render and send the next video frame (call from main thread)
|
||||
///
|
||||
/// Returns true if there are more frames to render, false if done.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `document` - Document to render
|
||||
/// * `device` - wgpu device
|
||||
/// * `queue` - wgpu queue
|
||||
/// * `renderer` - Vello renderer
|
||||
/// * `image_cache` - Image cache
|
||||
/// * `video_manager` - Video manager
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(true) if more frames remain, Ok(false) if done, Err on failure
|
||||
pub fn render_next_video_frame(
|
||||
&mut self,
|
||||
document: &mut Document,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
renderer: &mut vello::Renderer,
|
||||
image_cache: &mut ImageCache,
|
||||
video_manager: &Arc<std::sync::Mutex<VideoManager>>,
|
||||
) -> Result<bool, String> {
|
||||
let state = self.video_state.as_mut()
|
||||
.ok_or("No video export in progress")?;
|
||||
|
||||
if state.current_frame >= state.total_frames {
|
||||
// All frames rendered, signal encoder thread
|
||||
if let Some(tx) = state.frame_tx.take() {
|
||||
tx.send(VideoFrameMessage::Done).ok();
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Calculate timestamp for this frame
|
||||
let timestamp = state.start_time + (state.current_frame as f64 / state.framerate);
|
||||
|
||||
// Get frame dimensions from export settings
|
||||
let width = state.width;
|
||||
let height = state.height;
|
||||
|
||||
// Render frame to RGBA buffer
|
||||
let mut rgba_buffer = vec![0u8; (width * height * 4) as usize];
|
||||
video_exporter::render_frame_to_rgba(
|
||||
document,
|
||||
timestamp,
|
||||
width,
|
||||
height,
|
||||
device,
|
||||
queue,
|
||||
renderer,
|
||||
image_cache,
|
||||
video_manager,
|
||||
&mut rgba_buffer,
|
||||
)?;
|
||||
|
||||
// Send frame to encoder thread
|
||||
if let Some(tx) = &state.frame_tx {
|
||||
tx.send(VideoFrameMessage::Frame {
|
||||
frame_num: state.current_frame,
|
||||
timestamp,
|
||||
rgba_data: rgba_buffer,
|
||||
}).map_err(|_| "Failed to send frame to encoder")?;
|
||||
}
|
||||
|
||||
state.current_frame += 1;
|
||||
|
||||
// Return true if more frames remain
|
||||
Ok(state.current_frame < state.total_frames)
|
||||
}
|
||||
|
||||
/// Background thread that receives frames and encodes them
|
||||
fn run_video_encoder(
|
||||
settings: VideoExportSettings,
|
||||
output_path: PathBuf,
|
||||
frame_rx: Receiver<VideoFrameMessage>,
|
||||
progress_tx: Sender<ExportProgress>,
|
||||
cancel_flag: Arc<AtomicBool>,
|
||||
total_frames: usize,
|
||||
) {
|
||||
println!("🧵 [ENCODER THREAD] Video encoder thread started");
|
||||
|
||||
// Send started progress
|
||||
progress_tx.send(ExportProgress::Started {
|
||||
total_frames,
|
||||
}).ok();
|
||||
|
||||
// Delegate to inner function for better error handling
|
||||
match Self::run_video_encoder_inner(
|
||||
&settings,
|
||||
&output_path,
|
||||
frame_rx,
|
||||
&progress_tx,
|
||||
&cancel_flag,
|
||||
total_frames,
|
||||
) {
|
||||
Ok(()) => {
|
||||
println!("🧵 [ENCODER] Export completed successfully");
|
||||
progress_tx.send(ExportProgress::Complete {
|
||||
output_path: output_path.clone(),
|
||||
}).ok();
|
||||
}
|
||||
Err(err) => {
|
||||
println!("🧵 [ENCODER] Export failed: {}", err);
|
||||
progress_tx.send(ExportProgress::Error {
|
||||
message: err,
|
||||
}).ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Inner encoder function with proper error handling
|
||||
fn run_video_encoder_inner(
|
||||
settings: &VideoExportSettings,
|
||||
output_path: &PathBuf,
|
||||
frame_rx: Receiver<VideoFrameMessage>,
|
||||
progress_tx: &Sender<ExportProgress>,
|
||||
cancel_flag: &Arc<AtomicBool>,
|
||||
total_frames: usize,
|
||||
) -> Result<(), String> {
|
||||
use lightningbeam_core::export::VideoCodec;
|
||||
|
||||
// Initialize FFmpeg
|
||||
ffmpeg_next::init().map_err(|e| format!("Failed to initialize FFmpeg: {}", e))?;
|
||||
|
||||
// Convert codec enum to FFmpeg codec ID
|
||||
let codec_id = match settings.codec {
|
||||
VideoCodec::H264 => ffmpeg_next::codec::Id::H264,
|
||||
VideoCodec::H265 => ffmpeg_next::codec::Id::HEVC,
|
||||
VideoCodec::VP8 => ffmpeg_next::codec::Id::VP8,
|
||||
VideoCodec::VP9 => ffmpeg_next::codec::Id::VP9,
|
||||
VideoCodec::ProRes422 => ffmpeg_next::codec::Id::PRORES,
|
||||
};
|
||||
|
||||
// Get bitrate from quality settings
|
||||
let bitrate_kbps = settings.quality.bitrate_kbps();
|
||||
let framerate = settings.framerate;
|
||||
|
||||
// Wait for first frame to determine dimensions
|
||||
let first_frame = match frame_rx.recv() {
|
||||
Ok(VideoFrameMessage::Frame { frame_num, timestamp, rgba_data }) => {
|
||||
println!("🧵 [ENCODER] Received first frame ({} bytes)", rgba_data.len());
|
||||
Some((frame_num, timestamp, rgba_data))
|
||||
}
|
||||
Ok(VideoFrameMessage::Done) => {
|
||||
return Err("No frames to encode".to_string());
|
||||
}
|
||||
Err(_) => {
|
||||
return Err("Frame channel disconnected before first frame".to_string());
|
||||
}
|
||||
};
|
||||
|
||||
// Determine dimensions from first frame
|
||||
let (width, height) = if let Some((_, _, ref rgba_data)) = first_frame {
|
||||
// Calculate dimensions from buffer size (RGBA = 4 bytes per pixel)
|
||||
let pixel_count = rgba_data.len() / 4;
|
||||
// Use settings dimensions if provided, otherwise infer from buffer
|
||||
let w = settings.width.unwrap_or(1920); // Default to 1920 if not specified
|
||||
let h = settings.height.unwrap_or(1080); // Default to 1080 if not specified
|
||||
(w, h)
|
||||
} else {
|
||||
return Err("Failed to determine dimensions".to_string());
|
||||
};
|
||||
|
||||
println!("🧵 [ENCODER] Setting up encoder: {}×{} @ {} fps, {} kbps",
|
||||
width, height, framerate, bitrate_kbps);
|
||||
|
||||
// Setup encoder
|
||||
let (mut encoder, encoder_codec) = video_exporter::setup_video_encoder(
|
||||
codec_id,
|
||||
width,
|
||||
height,
|
||||
framerate,
|
||||
bitrate_kbps,
|
||||
)?;
|
||||
|
||||
// Create output file
|
||||
let mut output = ffmpeg_next::format::output(&output_path)
|
||||
.map_err(|e| format!("Failed to create output file: {}", e))?;
|
||||
|
||||
// Add stream AFTER opening encoder (critical order!)
|
||||
{
|
||||
let mut stream = output.add_stream(encoder_codec)
|
||||
.map_err(|e| format!("Failed to add stream: {}", e))?;
|
||||
stream.set_parameters(&encoder);
|
||||
}
|
||||
|
||||
// Write header
|
||||
output.write_header()
|
||||
.map_err(|e| format!("Failed to write header: {}", e))?;
|
||||
|
||||
println!("🧵 [ENCODER] Encoder initialized, ready to encode frames");
|
||||
|
||||
// Process first frame
|
||||
if let Some((frame_num, timestamp, rgba_data)) = first_frame {
|
||||
Self::encode_frame(
|
||||
&mut encoder,
|
||||
&mut output,
|
||||
&rgba_data,
|
||||
width,
|
||||
height,
|
||||
timestamp,
|
||||
)?;
|
||||
|
||||
// Send progress update for first frame
|
||||
progress_tx.send(ExportProgress::FrameRendered {
|
||||
frame: 1,
|
||||
total: total_frames,
|
||||
}).ok();
|
||||
|
||||
println!("🧵 [ENCODER] Encoded frame {}", frame_num);
|
||||
}
|
||||
|
||||
// Process remaining frames
|
||||
let mut frames_encoded = 1;
|
||||
loop {
|
||||
if cancel_flag.load(Ordering::Relaxed) {
|
||||
return Err("Export cancelled by user".to_string());
|
||||
}
|
||||
|
||||
match frame_rx.recv() {
|
||||
Ok(VideoFrameMessage::Frame { frame_num, timestamp, rgba_data }) => {
|
||||
Self::encode_frame(
|
||||
&mut encoder,
|
||||
&mut output,
|
||||
&rgba_data,
|
||||
width,
|
||||
height,
|
||||
timestamp,
|
||||
)?;
|
||||
|
||||
frames_encoded += 1;
|
||||
|
||||
// Send progress update
|
||||
progress_tx.send(ExportProgress::FrameRendered {
|
||||
frame: frames_encoded,
|
||||
total: total_frames,
|
||||
}).ok();
|
||||
|
||||
if frames_encoded % 30 == 0 || frames_encoded == frame_num + 1 {
|
||||
println!("🧵 [ENCODER] Encoded frame {}/{}", frames_encoded, total_frames);
|
||||
}
|
||||
}
|
||||
Ok(VideoFrameMessage::Done) => {
|
||||
println!("🧵 [ENCODER] All frames received, flushing encoder");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
return Err("Frame channel disconnected".to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush encoder
|
||||
encoder.send_eof()
|
||||
.map_err(|e| format!("Failed to send EOF to encoder: {}", e))?;
|
||||
|
||||
video_exporter::receive_and_write_packets(&mut encoder, &mut output)?;
|
||||
|
||||
// Write trailer
|
||||
output.write_trailer()
|
||||
.map_err(|e| format!("Failed to write trailer: {}", e))?;
|
||||
|
||||
println!("🧵 [ENCODER] Video export completed: {} frames", frames_encoded);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Encode a single RGBA frame
|
||||
fn encode_frame(
|
||||
encoder: &mut ffmpeg_next::encoder::Video,
|
||||
output: &mut ffmpeg_next::format::context::Output,
|
||||
rgba_data: &[u8],
|
||||
width: u32,
|
||||
height: u32,
|
||||
timestamp: f64,
|
||||
) -> Result<(), String> {
|
||||
// Convert RGBA to YUV420p
|
||||
let (y_plane, u_plane, v_plane) = video_exporter::rgba_to_yuv420p(rgba_data, width, height);
|
||||
|
||||
// Create FFmpeg video frame
|
||||
let mut video_frame = ffmpeg_next::frame::Video::new(
|
||||
ffmpeg_next::format::Pixel::YUV420P,
|
||||
width,
|
||||
height,
|
||||
);
|
||||
|
||||
// Copy YUV planes to frame
|
||||
unsafe {
|
||||
let y_dest = video_frame.data_mut(0);
|
||||
std::ptr::copy_nonoverlapping(y_plane.as_ptr(), y_dest.as_mut_ptr(), y_plane.len());
|
||||
|
||||
let u_dest = video_frame.data_mut(1);
|
||||
std::ptr::copy_nonoverlapping(u_plane.as_ptr(), u_dest.as_mut_ptr(), u_plane.len());
|
||||
|
||||
let v_dest = video_frame.data_mut(2);
|
||||
std::ptr::copy_nonoverlapping(v_plane.as_ptr(), v_dest.as_mut_ptr(), v_plane.len());
|
||||
}
|
||||
|
||||
// Set PTS (presentation timestamp) in encoder's time base
|
||||
// Encoder time base is 1/(framerate * 1000), so PTS = timestamp * (framerate * 1000)
|
||||
let encoder_tb = encoder.time_base();
|
||||
let pts = (timestamp * encoder_tb.1 as f64) as i64;
|
||||
println!("🎬 [ENCODE] Frame timestamp={:.3}s, encoder_tb={}/{}, calculated PTS={}",
|
||||
timestamp, encoder_tb.0, encoder_tb.1, pts);
|
||||
video_frame.set_pts(Some(pts));
|
||||
|
||||
// Send frame to encoder
|
||||
encoder.send_frame(&video_frame)
|
||||
.map_err(|e| format!("Failed to send frame to encoder: {}", e))?;
|
||||
|
||||
// Receive and write packets
|
||||
video_exporter::receive_and_write_packets(encoder, output)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ExportOrchestrator {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,522 @@
|
|||
//! Video export functionality
|
||||
//!
|
||||
//! Exports video from the timeline using FFmpeg encoding:
|
||||
//! - H.264/H.265: MP4 container (most compatible)
|
||||
//! - VP9: WebM container (web-friendly)
|
||||
//! - ProRes422: MOV container (professional editing)
|
||||
|
||||
use ffmpeg_next as ffmpeg;
|
||||
use std::sync::Arc;
|
||||
use lightningbeam_core::document::Document;
|
||||
use lightningbeam_core::renderer::ImageCache;
|
||||
use lightningbeam_core::video::VideoManager;
|
||||
|
||||
/// Reusable frame buffers to avoid allocations
|
||||
struct FrameBuffers {
|
||||
/// RGBA buffer from GPU readback (width * height * 4 bytes)
|
||||
rgba_buffer: Vec<u8>,
|
||||
/// Y plane for YUV420p (full resolution)
|
||||
y_plane: Vec<u8>,
|
||||
/// U plane for YUV420p (quarter resolution - 2×2 subsampling)
|
||||
u_plane: Vec<u8>,
|
||||
/// V plane for YUV420p (quarter resolution - 2×2 subsampling)
|
||||
v_plane: Vec<u8>,
|
||||
}
|
||||
|
||||
impl FrameBuffers {
|
||||
/// Create new frame buffers for the given resolution
|
||||
fn new(width: u32, height: u32) -> Self {
|
||||
let rgba_size = (width * height * 4) as usize;
|
||||
let y_size = (width * height) as usize;
|
||||
let uv_size = ((width / 2) * (height / 2)) as usize;
|
||||
|
||||
Self {
|
||||
rgba_buffer: vec![0u8; rgba_size],
|
||||
y_plane: vec![0u8; y_size],
|
||||
u_plane: vec![0u8; uv_size],
|
||||
v_plane: vec![0u8; uv_size],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert RGBA8 pixels to YUV420p format using BT.709 color space
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `rgba` - Interleaved RGBA8 pixels (4 bytes per pixel)
|
||||
/// * `width` - Frame width in pixels
|
||||
/// * `height` - Frame height in pixels
|
||||
///
|
||||
/// # Returns
|
||||
/// Tuple of (Y plane, U plane, V plane) as separate byte vectors
|
||||
///
|
||||
/// # Color Space
|
||||
/// Uses BT.709 (HDTV) color space conversion:
|
||||
/// - Y = 0.2126*R + 0.7152*G + 0.0722*B
|
||||
/// - U = -0.1146*R - 0.3854*G + 0.5000*B + 128
|
||||
/// - V = 0.5000*R - 0.4542*G - 0.0458*B + 128
|
||||
///
|
||||
/// # Format
|
||||
/// YUV420p is a planar format with 2×2 chroma subsampling:
|
||||
/// - Y plane: full resolution (width × height)
|
||||
/// - U plane: quarter resolution (width/2 × height/2)
|
||||
/// - V plane: quarter resolution (width/2 × height/2)
|
||||
pub fn rgba_to_yuv420p(rgba: &[u8], width: u32, height: u32) -> (Vec<u8>, Vec<u8>, Vec<u8>) {
|
||||
let w = width as usize;
|
||||
let h = height as usize;
|
||||
|
||||
// Round to multiples of 16 for H.264 macroblock alignment
|
||||
let aligned_w = (((width + 15) / 16) * 16) as usize;
|
||||
let aligned_h = (((height + 15) / 16) * 16) as usize;
|
||||
|
||||
// Allocate Y plane (full aligned resolution, padded with black)
|
||||
let mut y_plane = Vec::with_capacity(aligned_w * aligned_h);
|
||||
|
||||
// Convert each pixel to Y (luma), with padding
|
||||
for y in 0..aligned_h {
|
||||
for x in 0..aligned_w {
|
||||
let y_val = if y < h && x < w {
|
||||
let idx = (y * w + x) * 4;
|
||||
let r = rgba[idx] as f32;
|
||||
let g = rgba[idx + 1] as f32;
|
||||
let b = rgba[idx + 2] as f32;
|
||||
// BT.709 luma conversion
|
||||
(0.2126 * r + 0.7152 * g + 0.0722 * b).clamp(0.0, 255.0) as u8
|
||||
} else {
|
||||
16 // Black in YUV (Y=16 is video black)
|
||||
};
|
||||
y_plane.push(y_val);
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate U and V planes (quarter resolution due to 2×2 subsampling)
|
||||
let mut u_plane = Vec::with_capacity((aligned_w * aligned_h) / 4);
|
||||
let mut v_plane = Vec::with_capacity((aligned_w * aligned_h) / 4);
|
||||
|
||||
// Process 2×2 blocks for chroma subsampling (with padding for aligned dimensions)
|
||||
for y in (0..aligned_h).step_by(2) {
|
||||
for x in (0..aligned_w).step_by(2) {
|
||||
// Check if this block is in the padding region
|
||||
let in_padding = y >= h || x >= w;
|
||||
|
||||
let (u_val, v_val) = if in_padding {
|
||||
// Padding region: use neutral chroma for black (U=128, V=128)
|
||||
(128, 128)
|
||||
} else {
|
||||
// Average RGB values from 2×2 block
|
||||
let mut r_sum = 0.0;
|
||||
let mut g_sum = 0.0;
|
||||
let mut b_sum = 0.0;
|
||||
|
||||
for dy in 0..2 {
|
||||
for dx in 0..2 {
|
||||
if y + dy < h && x + dx < w {
|
||||
let idx = ((y + dy) * w + (x + dx)) * 4;
|
||||
r_sum += rgba[idx] as f32;
|
||||
g_sum += rgba[idx + 1] as f32;
|
||||
b_sum += rgba[idx + 2] as f32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let r = r_sum / 4.0;
|
||||
let g = g_sum / 4.0;
|
||||
let b = b_sum / 4.0;
|
||||
|
||||
// BT.709 chroma conversion (centered at 128)
|
||||
let u = (-0.1146 * r - 0.3854 * g + 0.5000 * b + 128.0).clamp(0.0, 255.0) as u8;
|
||||
let v = (0.5000 * r - 0.4542 * g - 0.0458 * b + 128.0).clamp(0.0, 255.0) as u8;
|
||||
(u, v)
|
||||
};
|
||||
|
||||
u_plane.push(u_val);
|
||||
v_plane.push(v_val);
|
||||
}
|
||||
}
|
||||
|
||||
(y_plane, u_plane, v_plane)
|
||||
}
|
||||
|
||||
/// Setup FFmpeg video encoder for the specified codec
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `codec_id` - FFmpeg codec ID (H264, HEVC, VP9, PRORES, etc.)
|
||||
/// * `width` - Frame width in pixels
|
||||
/// * `height` - Frame height in pixels
|
||||
/// * `framerate` - Frames per second
|
||||
/// * `bitrate_kbps` - Target bitrate in kilobits per second
|
||||
///
|
||||
/// # Returns
|
||||
/// Tuple of (opened encoder, codec) for stream setup
|
||||
///
|
||||
/// # Note
|
||||
/// This function follows the same pattern as the working MP3 export:
|
||||
/// 1. Find codec
|
||||
/// 2. Create encoder context with codec
|
||||
/// 3. Set ALL parameters (width, height, format, timebase, framerate, bitrate, GOP)
|
||||
/// 4. Open encoder with open_as(codec)
|
||||
/// 5. Caller should add stream AFTER opening and set parameters from opened encoder
|
||||
pub fn setup_video_encoder(
|
||||
codec_id: ffmpeg::codec::Id,
|
||||
width: u32,
|
||||
height: u32,
|
||||
framerate: f64,
|
||||
bitrate_kbps: u32,
|
||||
) -> Result<(ffmpeg::encoder::Video, ffmpeg::Codec), String> {
|
||||
// Try to find codec by ID first
|
||||
println!("🔍 Looking for codec: {:?}", codec_id);
|
||||
let codec = ffmpeg::encoder::find(codec_id);
|
||||
|
||||
let codec = if codec.is_some() {
|
||||
println!("✅ Found codec by ID");
|
||||
codec
|
||||
} else {
|
||||
println!("⚠️ Codec {:?} not found by ID", codec_id);
|
||||
|
||||
// If not found by ID, try by name (e.g., "libx264" for H264)
|
||||
let encoder_name = match codec_id {
|
||||
ffmpeg::codec::Id::H264 => "libx264",
|
||||
ffmpeg::codec::Id::HEVC => "libx265",
|
||||
ffmpeg::codec::Id::VP8 => "libvpx",
|
||||
ffmpeg::codec::Id::VP9 => "libvpx-vp9",
|
||||
ffmpeg::codec::Id::PRORES => "prores_ks",
|
||||
_ => {
|
||||
println!("❌ No fallback encoder name for {:?}", codec_id);
|
||||
return Err(format!("Unsupported codec: {:?}", codec_id));
|
||||
}
|
||||
};
|
||||
|
||||
println!("🔍 Trying encoder by name: {}", encoder_name);
|
||||
let by_name = ffmpeg::encoder::find_by_name(encoder_name);
|
||||
|
||||
if by_name.is_some() {
|
||||
println!("✅ Found encoder by name: {}", encoder_name);
|
||||
} else {
|
||||
println!("❌ Encoder {} not found", encoder_name);
|
||||
}
|
||||
|
||||
by_name
|
||||
};
|
||||
|
||||
let codec = codec.ok_or_else(|| {
|
||||
println!("❌ Failed to find codec: {:?}", codec_id);
|
||||
println!("💡 The static FFmpeg build is missing this encoder.");
|
||||
format!("Video encoder not found for codec: {:?}. Static build may be missing encoder libraries.", codec_id)
|
||||
})?;
|
||||
|
||||
// Create encoder context with codec
|
||||
let mut encoder = ffmpeg::codec::Context::new_with_codec(codec)
|
||||
.encoder()
|
||||
.video()
|
||||
.map_err(|e| format!("Failed to create video encoder: {}", e))?;
|
||||
|
||||
// Round dimensions to multiples of 16 for H.264 macroblock alignment
|
||||
let aligned_width = ((width + 15) / 16) * 16;
|
||||
let aligned_height = ((height + 15) / 16) * 16;
|
||||
|
||||
// Configure encoder parameters BEFORE opening (critical!)
|
||||
encoder.set_width(aligned_width);
|
||||
encoder.set_height(aligned_height);
|
||||
encoder.set_format(ffmpeg::format::Pixel::YUV420P);
|
||||
encoder.set_time_base(ffmpeg::Rational(1, (framerate * 1000.0) as i32));
|
||||
encoder.set_frame_rate(Some(ffmpeg::Rational(framerate as i32, 1)));
|
||||
encoder.set_bit_rate((bitrate_kbps * 1000) as usize);
|
||||
encoder.set_gop(framerate as u32); // 1 second GOP
|
||||
|
||||
println!("📐 Video dimensions: {}×{} (aligned to {}×{} for H.264)",
|
||||
width, height, aligned_width, aligned_height);
|
||||
|
||||
// Open encoder with codec (like working MP3 export)
|
||||
let encoder = encoder
|
||||
.open_as(codec)
|
||||
.map_err(|e| format!("Failed to open video encoder: {}", e))?;
|
||||
|
||||
Ok((encoder, codec))
|
||||
}
|
||||
|
||||
/// Receive encoded packets from encoder and write to output file
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `encoder` - FFmpeg video encoder
|
||||
/// * `output` - FFmpeg output format context
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(()) on success, Err with message on failure
|
||||
pub fn receive_and_write_packets(
|
||||
encoder: &mut ffmpeg::encoder::Video,
|
||||
output: &mut ffmpeg::format::context::Output,
|
||||
) -> Result<(), String> {
|
||||
let mut encoded = ffmpeg::Packet::empty();
|
||||
|
||||
// Get time bases for rescaling
|
||||
let encoder_tb = encoder.time_base();
|
||||
let stream_tb = output.stream(0).ok_or("No output stream found")?.time_base();
|
||||
|
||||
println!("🎬 [PACKET] Encoder TB: {}/{}, Stream TB: {}/{}",
|
||||
encoder_tb.0, encoder_tb.1, stream_tb.0, stream_tb.1);
|
||||
|
||||
while encoder.receive_packet(&mut encoded).is_ok() {
|
||||
println!("🎬 [PACKET] Before rescale - PTS: {:?}, DTS: {:?}, Duration: {:?}",
|
||||
encoded.pts(), encoded.dts(), encoded.duration());
|
||||
|
||||
encoded.set_stream(0);
|
||||
// Rescale timestamps from encoder time base to stream time base
|
||||
encoded.rescale_ts(encoder_tb, stream_tb);
|
||||
|
||||
println!("🎬 [PACKET] After rescale - PTS: {:?}, DTS: {:?}, Duration: {:?}",
|
||||
encoded.pts(), encoded.dts(), encoded.duration());
|
||||
|
||||
encoded
|
||||
.write_interleaved(output)
|
||||
.map_err(|e| format!("Failed to write packet: {}", e))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Render a document frame at a specific time and read back RGBA pixels from GPU
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `document` - Document to render (current_time will be modified)
|
||||
/// * `timestamp` - Time in seconds to render at
|
||||
/// * `width` - Frame width in pixels
|
||||
/// * `height` - Frame height in pixels
|
||||
/// * `device` - wgpu device
|
||||
/// * `queue` - wgpu queue
|
||||
/// * `renderer` - Vello renderer
|
||||
/// * `image_cache` - Image cache for rendering
|
||||
/// * `video_manager` - Video manager for video clips
|
||||
/// * `rgba_buffer` - Output buffer for RGBA pixels (must be width * height * 4 bytes)
|
||||
///
|
||||
/// # Returns
|
||||
/// Ok(()) on success, Err with message on failure
|
||||
pub fn render_frame_to_rgba(
|
||||
document: &mut Document,
|
||||
timestamp: f64,
|
||||
width: u32,
|
||||
height: u32,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
renderer: &mut vello::Renderer,
|
||||
image_cache: &mut ImageCache,
|
||||
video_manager: &Arc<std::sync::Mutex<VideoManager>>,
|
||||
rgba_buffer: &mut [u8],
|
||||
) -> Result<(), String> {
|
||||
// Set document time to the frame timestamp
|
||||
document.current_time = timestamp;
|
||||
|
||||
// Create offscreen texture for rendering
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("video_export_texture"),
|
||||
size: wgpu::Extent3d {
|
||||
width,
|
||||
height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rgba8Unorm,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
|
||||
| wgpu::TextureUsages::COPY_SRC
|
||||
| wgpu::TextureUsages::STORAGE_BINDING, // Required by Vello for compute shaders
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
// Render document to Vello scene
|
||||
let mut scene = vello::Scene::new();
|
||||
lightningbeam_core::renderer::render_document(
|
||||
document,
|
||||
&mut scene,
|
||||
image_cache,
|
||||
video_manager,
|
||||
);
|
||||
|
||||
// Render scene to texture
|
||||
let render_params = vello::RenderParams {
|
||||
base_color: vello::peniko::Color::BLACK,
|
||||
width,
|
||||
height,
|
||||
antialiasing_method: vello::AaConfig::Area,
|
||||
};
|
||||
|
||||
renderer
|
||||
.render_to_texture(device, queue, &scene, &texture_view, &render_params)
|
||||
.map_err(|e| format!("Failed to render to texture: {}", e))?;
|
||||
|
||||
// GPU readback: Create staging buffer with proper alignment
|
||||
let bytes_per_pixel = 4u32; // RGBA8
|
||||
let bytes_per_row_alignment = 256u32; // wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
|
||||
let unpadded_bytes_per_row = width * bytes_per_pixel;
|
||||
let bytes_per_row = ((unpadded_bytes_per_row + bytes_per_row_alignment - 1)
|
||||
/ bytes_per_row_alignment) * bytes_per_row_alignment;
|
||||
let buffer_size = (bytes_per_row * height) as u64;
|
||||
|
||||
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("video_export_staging_buffer"),
|
||||
size: buffer_size,
|
||||
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
// Copy texture to staging buffer
|
||||
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("video_export_copy_encoder"),
|
||||
});
|
||||
|
||||
encoder.copy_texture_to_buffer(
|
||||
wgpu::TexelCopyTextureInfo {
|
||||
texture: &texture,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
},
|
||||
wgpu::TexelCopyBufferInfo {
|
||||
buffer: &staging_buffer,
|
||||
layout: wgpu::TexelCopyBufferLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(bytes_per_row),
|
||||
rows_per_image: Some(height),
|
||||
},
|
||||
},
|
||||
wgpu::Extent3d {
|
||||
width,
|
||||
height,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
);
|
||||
|
||||
queue.submit(Some(encoder.finish()));
|
||||
|
||||
// Map buffer and read pixels (synchronous)
|
||||
let buffer_slice = staging_buffer.slice(..);
|
||||
let (sender, receiver) = std::sync::mpsc::channel();
|
||||
buffer_slice.map_async(wgpu::MapMode::Read, move |result| {
|
||||
sender.send(result).ok();
|
||||
});
|
||||
|
||||
device.poll(wgpu::Maintain::Wait);
|
||||
|
||||
receiver
|
||||
.recv()
|
||||
.map_err(|_| "Failed to receive buffer mapping result")?
|
||||
.map_err(|e| format!("Failed to map buffer: {:?}", e))?;
|
||||
|
||||
// Copy data from mapped buffer to output, removing padding
|
||||
let data = buffer_slice.get_mapped_range();
|
||||
for y in 0..height as usize {
|
||||
let src_offset = y * bytes_per_row as usize;
|
||||
let dst_offset = y * unpadded_bytes_per_row as usize;
|
||||
let row_bytes = unpadded_bytes_per_row as usize;
|
||||
rgba_buffer[dst_offset..dst_offset + row_bytes]
|
||||
.copy_from_slice(&data[src_offset..src_offset + row_bytes]);
|
||||
}
|
||||
|
||||
drop(data);
|
||||
staging_buffer.unmap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rgba_to_yuv420p_white() {
|
||||
// White: R=255, G=255, B=255
|
||||
let rgba = vec![255u8, 255, 255, 255]; // 1 pixel
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba, 1, 1);
|
||||
|
||||
// Expected: Y=255 (full brightness), U=128, V=128 (neutral chroma)
|
||||
assert_eq!(y[0], 255);
|
||||
assert_eq!(u[0], 128);
|
||||
assert_eq!(v[0], 128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgba_to_yuv420p_black() {
|
||||
// Black: R=0, G=0, B=0
|
||||
let rgba = vec![0u8, 0, 0, 255]; // 1 pixel
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba, 1, 1);
|
||||
|
||||
// Expected: Y=0 (no brightness), U=128, V=128 (neutral chroma)
|
||||
assert_eq!(y[0], 0);
|
||||
assert_eq!(u[0], 128);
|
||||
assert_eq!(v[0], 128);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgba_to_yuv420p_red() {
|
||||
// Red: R=255, G=0, B=0
|
||||
let rgba = vec![255u8, 0, 0, 255]; // 1 pixel
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba, 1, 1);
|
||||
|
||||
// Red has:
|
||||
// - Y around 54 (low luma due to low green coefficient)
|
||||
// - U < 128 (negative blue component)
|
||||
// - V > 128 (positive red component)
|
||||
assert!(y[0] >= 50 && y[0] <= 60, "Y value: {}", y[0]);
|
||||
assert!(u[0] < 128, "U value: {}", u[0]);
|
||||
assert!(v[0] > 128, "V value: {}", v[0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgba_to_yuv420p_dimensions() {
|
||||
// 4×4 image (16 pixels)
|
||||
let rgba = vec![0u8; 4 * 4 * 4]; // All black
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba, 4, 4);
|
||||
|
||||
// Y should be full resolution: 4×4 = 16 pixels
|
||||
assert_eq!(y.len(), 16);
|
||||
|
||||
// U and V should be quarter resolution: 2×2 = 4 pixels each
|
||||
assert_eq!(u.len(), 4);
|
||||
assert_eq!(v.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rgba_to_yuv420p_2x2_subsampling() {
|
||||
// Create 2×2 image with different colors in each corner
|
||||
let mut rgba = vec![0u8; 2 * 2 * 4];
|
||||
|
||||
// Top-left: Red
|
||||
rgba[0] = 255;
|
||||
rgba[1] = 0;
|
||||
rgba[2] = 0;
|
||||
rgba[3] = 255;
|
||||
|
||||
// Top-right: Green
|
||||
rgba[4] = 0;
|
||||
rgba[5] = 255;
|
||||
rgba[6] = 0;
|
||||
rgba[7] = 255;
|
||||
|
||||
// Bottom-left: Blue
|
||||
rgba[8] = 0;
|
||||
rgba[9] = 0;
|
||||
rgba[10] = 255;
|
||||
rgba[11] = 255;
|
||||
|
||||
// Bottom-right: White
|
||||
rgba[12] = 255;
|
||||
rgba[13] = 255;
|
||||
rgba[14] = 255;
|
||||
rgba[15] = 255;
|
||||
|
||||
let (y, u, v) = rgba_to_yuv420p(&rgba, 2, 2);
|
||||
|
||||
// Y plane should have 4 distinct values (one per pixel)
|
||||
assert_eq!(y.len(), 4);
|
||||
|
||||
// U and V should have 1 value each (averaged over 2×2 block)
|
||||
assert_eq!(u.len(), 1);
|
||||
assert_eq!(v.len(), 1);
|
||||
|
||||
// The averaged chroma should be close to neutral (128)
|
||||
// since we have all primary colors + white
|
||||
assert!(u[0] >= 100 && u[0] <= 156, "U value: {}", u[0]);
|
||||
assert!(v[0] >= 100 && v[0] <= 156, "V value: {}", v[0]);
|
||||
}
|
||||
}
|
||||
|
|
@ -2015,7 +2015,7 @@ impl EditorApp {
|
|||
}
|
||||
|
||||
impl eframe::App for EditorApp {
|
||||
fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) {
|
||||
fn update(&mut self, ctx: &egui::Context, frame: &mut eframe::Frame) {
|
||||
// Disable egui's built-in Ctrl+Plus/Minus zoom behavior
|
||||
// We handle zoom ourselves for the Stage pane
|
||||
ctx.options_mut(|o| {
|
||||
|
|
@ -2242,34 +2242,71 @@ impl eframe::App for EditorApp {
|
|||
}
|
||||
|
||||
// Handle export dialog
|
||||
if let Some((settings, output_path)) = self.export_dialog.render(ctx) {
|
||||
// User clicked Export - start the export
|
||||
println!("🎬 [MAIN] Export button clicked: {}", output_path.display());
|
||||
if let Some(export_result) = self.export_dialog.render(ctx) {
|
||||
use export::dialog::ExportResult;
|
||||
|
||||
if let Some(audio_controller) = &self.audio_controller {
|
||||
println!("🎬 [MAIN] Audio controller available");
|
||||
// Create orchestrator if needed
|
||||
if self.export_orchestrator.is_none() {
|
||||
self.export_orchestrator = Some(export::ExportOrchestrator::new());
|
||||
}
|
||||
|
||||
// Create orchestrator if needed
|
||||
if self.export_orchestrator.is_none() {
|
||||
println!("🎬 [MAIN] Creating new orchestrator");
|
||||
self.export_orchestrator = Some(export::ExportOrchestrator::new());
|
||||
}
|
||||
let export_started = if let Some(orchestrator) = &mut self.export_orchestrator {
|
||||
match export_result {
|
||||
ExportResult::AudioOnly(settings, output_path) => {
|
||||
println!("🎵 [MAIN] Starting audio-only export: {}", output_path.display());
|
||||
|
||||
// Start export
|
||||
if let Some(orchestrator) = &mut self.export_orchestrator {
|
||||
println!("🎬 [MAIN] Calling start_audio_export...");
|
||||
orchestrator.start_audio_export(
|
||||
settings,
|
||||
output_path,
|
||||
Arc::clone(audio_controller),
|
||||
);
|
||||
println!("🎬 [MAIN] start_audio_export returned, opening progress dialog");
|
||||
// Open progress dialog
|
||||
self.export_progress_dialog.open();
|
||||
println!("🎬 [MAIN] Progress dialog opened");
|
||||
if let Some(audio_controller) = &self.audio_controller {
|
||||
orchestrator.start_audio_export(
|
||||
settings,
|
||||
output_path,
|
||||
Arc::clone(audio_controller),
|
||||
);
|
||||
true
|
||||
} else {
|
||||
eprintln!("❌ Cannot export audio: Audio controller not available");
|
||||
false
|
||||
}
|
||||
}
|
||||
ExportResult::VideoOnly(settings, output_path) => {
|
||||
println!("🎬 [MAIN] Starting video-only export: {}", output_path.display());
|
||||
|
||||
match orchestrator.start_video_export(settings, output_path) {
|
||||
Ok(()) => true,
|
||||
Err(err) => {
|
||||
eprintln!("❌ Failed to start video export: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
ExportResult::VideoWithAudio(video_settings, audio_settings, output_path) => {
|
||||
println!("🎬🎵 [MAIN] Starting video+audio export: {}", output_path.display());
|
||||
|
||||
if let Some(audio_controller) = &self.audio_controller {
|
||||
match orchestrator.start_video_with_audio_export(
|
||||
video_settings,
|
||||
audio_settings,
|
||||
output_path,
|
||||
Arc::clone(audio_controller),
|
||||
) {
|
||||
Ok(()) => true,
|
||||
Err(err) => {
|
||||
eprintln!("❌ Failed to start video+audio export: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ Cannot export with audio: Audio controller not available");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("❌ Cannot export: Audio controller not available");
|
||||
false
|
||||
};
|
||||
|
||||
// Open progress dialog if export started successfully
|
||||
if export_started {
|
||||
self.export_progress_dialog.open();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2286,6 +2323,48 @@ impl eframe::App for EditorApp {
|
|||
ctx.request_repaint();
|
||||
}
|
||||
|
||||
// Render video frames incrementally (if video export in progress)
|
||||
if let Some(orchestrator) = &mut self.export_orchestrator {
|
||||
if orchestrator.is_exporting() {
|
||||
// Get GPU resources from eframe's wgpu render state
|
||||
if let Some(render_state) = frame.wgpu_render_state() {
|
||||
let device = &render_state.device;
|
||||
let queue = &render_state.queue;
|
||||
|
||||
// Create temporary renderer and image cache for export
|
||||
// Note: Creating a new renderer per frame is inefficient but simple
|
||||
// TODO: Reuse renderer across frames by storing it in EditorApp
|
||||
let mut temp_renderer = vello::Renderer::new(
|
||||
device,
|
||||
vello::RendererOptions {
|
||||
use_cpu: false,
|
||||
antialiasing_support: vello::AaSupport::all(),
|
||||
num_init_threads: None,
|
||||
pipeline_cache: None,
|
||||
},
|
||||
).ok();
|
||||
|
||||
let mut temp_image_cache = lightningbeam_core::renderer::ImageCache::new();
|
||||
|
||||
if let Some(renderer) = &mut temp_renderer {
|
||||
if let Ok(has_more) = orchestrator.render_next_video_frame(
|
||||
self.action_executor.document_mut(),
|
||||
device,
|
||||
queue,
|
||||
renderer,
|
||||
&mut temp_image_cache,
|
||||
&self.video_manager,
|
||||
) {
|
||||
if has_more {
|
||||
// More frames to render - request repaint for next frame
|
||||
ctx.request_repaint();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Poll export orchestrator for progress
|
||||
if let Some(orchestrator) = &mut self.export_orchestrator {
|
||||
// Only log occasionally to avoid spam
|
||||
|
|
|
|||
Loading…
Reference in New Issue