Improve export performance

This commit is contained in:
Skyler Lehmkuhl 2026-03-09 13:39:56 -04:00
parent a18a335c60
commit 78e296ffde
9 changed files with 693 additions and 584 deletions

View File

@ -88,14 +88,53 @@ fn decode_image_asset(asset: &ImageAsset) -> Option<ImageBrush> {
// Per-Layer Rendering for HDR Compositing Pipeline
// ============================================================================
/// A single decoded video frame ready for GPU upload, with its document-space transform.
pub struct VideoRenderInstance {
/// sRGB RGBA8 pixel data (straight alpha — as decoded by ffmpeg).
pub rgba_data: Arc<Vec<u8>>,
pub width: u32,
pub height: u32,
/// Affine transform that maps from video-pixel space to document space.
/// Composed from the clip's animated position/rotation/scale properties.
pub transform: Affine,
/// Final opacity [0,1] after cascading layer and instance opacity.
pub opacity: f32,
}
/// Type of rendered layer for compositor handling
#[derive(Clone, Debug)]
pub enum RenderedLayerType {
/// Regular content layer (vector, video) - composite its scene
Content,
/// Effect layer - apply effects to current composite state
/// Vector / group layer — Vello scene in `RenderedLayer::scene` is used.
Vector,
/// Raster keyframe — bypass Vello; compositor uploads pixels via GPU texture cache.
Raster {
kf_id: Uuid,
width: u32,
height: u32,
/// True when `raw_pixels` changed since the last upload; forces a cache re-upload.
dirty: bool,
/// Accumulated parent-clip affine (IDENTITY for top-level layers).
/// Compositor composes this with the camera into the blit matrix.
transform: Affine,
},
/// Video layer — bypass Vello; each active clip instance carries decoded frame data.
Video {
instances: Vec<VideoRenderInstance>,
},
/// Floating raster selection — blitted immediately above its parent layer.
Float {
canvas_id: Uuid,
x: i32,
y: i32,
width: u32,
height: u32,
/// Accumulated parent-clip affine (IDENTITY for top-level layers).
transform: Affine,
/// CPU pixel data (sRGB-premultiplied RGBA8). Arc so the per-frame clone is O(1).
/// Used by the export compositor; the live compositor reads the GPU canvas directly.
pixels: std::sync::Arc<Vec<u8>>,
},
/// Effect layer — applied as a post-process pass on the HDR accumulator.
Effect {
/// Active effect instances at the current time
effect_instances: Vec<ClipInstance>,
},
}
@ -104,7 +143,7 @@ pub enum RenderedLayerType {
pub struct RenderedLayer {
/// The layer's unique identifier
pub layer_id: Uuid,
/// The Vello scene containing the layer's rendered content
/// Vello scene — only populated for `RenderedLayerType::Vector`.
pub scene: Scene,
/// Layer opacity (0.0 to 1.0)
pub opacity: f32,
@ -112,12 +151,12 @@ pub struct RenderedLayer {
pub blend_mode: BlendMode,
/// Whether this layer has any visible content
pub has_content: bool,
/// Type of layer for compositor (content vs effect)
/// Layer variant — determines how the compositor renders this entry.
pub layer_type: RenderedLayerType,
}
impl RenderedLayer {
/// Create a new rendered layer with default settings
/// Create a new vector layer with default settings.
pub fn new(layer_id: Uuid) -> Self {
Self {
layer_id,
@ -125,11 +164,11 @@ impl RenderedLayer {
opacity: 1.0,
blend_mode: BlendMode::Normal,
has_content: false,
layer_type: RenderedLayerType::Content,
layer_type: RenderedLayerType::Vector,
}
}
/// Create with specific opacity and blend mode
/// Create a vector layer with specific opacity and blend mode.
pub fn with_settings(layer_id: Uuid, opacity: f32, blend_mode: BlendMode) -> Self {
Self {
layer_id,
@ -137,11 +176,11 @@ impl RenderedLayer {
opacity,
blend_mode,
has_content: false,
layer_type: RenderedLayerType::Content,
layer_type: RenderedLayerType::Vector,
}
}
/// Create an effect layer with active effect instances
/// Create an effect layer with active effect instances.
pub fn effect_layer(layer_id: Uuid, opacity: f32, effect_instances: Vec<ClipInstance>) -> Self {
let has_content = !effect_instances.is_empty();
Self {
@ -179,12 +218,14 @@ pub fn render_document_for_compositing(
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
camera_frame: Option<&crate::webcam::CaptureFrame>,
floating_selection: Option<&crate::selection::RasterFloatingSelection>,
draw_checkerboard: bool,
) -> CompositeRenderResult {
let time = document.current_time;
// Render background to its own scene
let mut background = Scene::new();
render_background(document, &mut background, base_transform);
render_background(document, &mut background, base_transform, draw_checkerboard);
// Check if any layers are soloed
let any_soloed = document.visible_layers().any(|layer| layer.soloed());
@ -217,6 +258,36 @@ pub fn render_document_for_compositing(
rendered_layers.push(rendered);
}
// Insert the floating raster selection immediately above its parent layer.
// This ensures it composites at the correct z-position in both edit and export.
if let Some(float_sel) = floating_selection {
if let Some(pos) = rendered_layers.iter().position(|l| l.layer_id == float_sel.layer_id) {
// Inherit the parent layer's transform so the float follows it into
// any transformed clip context.
let parent_transform = match &rendered_layers[pos].layer_type {
RenderedLayerType::Raster { transform, .. } => *transform,
_ => Affine::IDENTITY,
};
let float_entry = RenderedLayer {
layer_id: Uuid::nil(), // sentinel — not a real document layer
scene: Scene::new(),
opacity: 1.0,
blend_mode: crate::gpu::BlendMode::Normal,
has_content: !float_sel.pixels.is_empty(),
layer_type: RenderedLayerType::Float {
canvas_id: float_sel.canvas_id,
x: float_sel.x,
y: float_sel.y,
width: float_sel.width,
height: float_sel.height,
transform: parent_transform,
pixels: std::sync::Arc::clone(&float_sel.pixels),
},
};
rendered_layers.insert(pos + 1, float_entry);
}
}
CompositeRenderResult {
background,
layers: rendered_layers,
@ -269,21 +340,74 @@ pub fn render_layer_isolated(
rendered.has_content = false;
}
AnyLayer::Video(video_layer) => {
use crate::animation::TransformProperty;
let layer_opacity = layer.opacity();
let mut video_mgr = video_manager.lock().unwrap();
// Only pass camera_frame for the layer that has camera enabled
let layer_camera_frame = if video_layer.camera_enabled { camera_frame } else { None };
render_video_layer_to_scene(
document,
time,
video_layer,
&mut rendered.scene,
base_transform,
1.0, // Full opacity - layer opacity handled in compositing
&mut video_mgr,
layer_camera_frame,
);
rendered.has_content = !video_layer.clip_instances.is_empty()
|| (video_layer.camera_enabled && camera_frame.is_some());
let mut instances = Vec::new();
for clip_instance in &video_layer.clip_instances {
let Some(video_clip) = document.video_clips.get(&clip_instance.clip_id) else { continue };
let Some(clip_time) = clip_instance.remap_time(time, video_clip.duration) else { continue };
let Some(frame) = video_mgr.get_frame(&clip_instance.clip_id, clip_time) else { continue };
// Evaluate animated transform properties.
let anim = &video_layer.layer.animation_data;
let id = clip_instance.id;
let t = &clip_instance.transform;
let x = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::X }, time, t.x);
let y = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::Y }, time, t.y);
let rotation = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::Rotation }, time, t.rotation);
let scale_x = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::ScaleX }, time, t.scale_x);
let scale_y = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::ScaleY }, time, t.scale_y);
let skew_x = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::SkewX }, time, t.skew_x);
let skew_y = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::SkewY }, time, t.skew_y);
let inst_opacity = anim.eval(&crate::animation::AnimationTarget::Object { id, property: TransformProperty::Opacity }, time, clip_instance.opacity);
let cx = video_clip.width / 2.0;
let cy = video_clip.height / 2.0;
let skew_transform = if skew_x != 0.0 || skew_y != 0.0 {
let sx = if skew_x != 0.0 { Affine::new([1.0, 0.0, skew_x.to_radians().tan(), 1.0, 0.0, 0.0]) } else { Affine::IDENTITY };
let sy = if skew_y != 0.0 { Affine::new([1.0, skew_y.to_radians().tan(), 0.0, 1.0, 0.0, 0.0]) } else { Affine::IDENTITY };
Affine::translate((cx, cy)) * sx * sy * Affine::translate((-cx, -cy))
} else { Affine::IDENTITY };
let clip_transform = Affine::translate((x, y))
* Affine::rotate(rotation.to_radians())
* Affine::scale_non_uniform(scale_x, scale_y)
* skew_transform;
instances.push(VideoRenderInstance {
rgba_data: frame.rgba_data.clone(),
width: frame.width,
height: frame.height,
transform: base_transform * clip_transform,
opacity: (layer_opacity * inst_opacity) as f32,
});
}
// Camera / webcam frame.
if instances.is_empty() && video_layer.camera_enabled {
if let Some(frame) = camera_frame {
let vw = frame.width as f64;
let vh = frame.height as f64;
let scale = (document.width / vw).min(document.height / vh);
let ox = (document.width - vw * scale) / 2.0;
let oy = (document.height - vh * scale) / 2.0;
let cam_transform = base_transform
* Affine::translate((ox, oy))
* Affine::scale(scale);
instances.push(VideoRenderInstance {
rgba_data: frame.rgba_data.clone(),
width: frame.width,
height: frame.height,
transform: cam_transform,
opacity: layer_opacity as f32,
});
}
}
rendered.has_content = !instances.is_empty();
rendered.layer_type = RenderedLayerType::Video { instances };
}
AnyLayer::Effect(effect_layer) => {
// Effect layers are processed during compositing, not rendered to scene
@ -307,9 +431,16 @@ pub fn render_layer_isolated(
rendered.has_content = !group_layer.children.is_empty();
}
AnyLayer::Raster(raster_layer) => {
render_raster_layer_to_scene(raster_layer, time, &mut rendered.scene, base_transform);
rendered.has_content = raster_layer.keyframe_at(time)
.map_or(false, |kf| kf.has_pixels());
if let Some(kf) = raster_layer.keyframe_at(time) {
rendered.has_content = kf.has_pixels();
rendered.layer_type = RenderedLayerType::Raster {
kf_id: kf.id,
width: kf.width,
height: kf.height,
dirty: kf.texture_dirty,
transform: base_transform,
};
}
}
}
@ -368,30 +499,6 @@ fn render_raster_layer_to_scene(
scene.fill(Fill::NonZero, base_transform, &brush, None, &canvas_rect);
}
/// Render a video layer to an isolated scene (for compositing pipeline)
fn render_video_layer_to_scene(
document: &Document,
time: f64,
layer: &crate::layer::VideoLayer,
scene: &mut Scene,
base_transform: Affine,
parent_opacity: f64,
video_manager: &mut crate::video::VideoManager,
camera_frame: Option<&crate::webcam::CaptureFrame>,
) {
// Render using the existing function but to this isolated scene
render_video_layer(
document,
time,
layer,
scene,
base_transform,
parent_opacity,
video_manager,
camera_frame,
);
}
// ============================================================================
// Legacy Single-Scene Rendering (kept for backwards compatibility)
// ============================================================================
@ -415,8 +522,8 @@ pub fn render_document_with_transform(
image_cache: &mut ImageCache,
video_manager: &std::sync::Arc<std::sync::Mutex<crate::video::VideoManager>>,
) {
// 1. Draw background
render_background(document, scene, base_transform);
// 1. Draw background (with checkerboard for transparent backgrounds — UI path)
render_background(document, scene, base_transform, true);
// 2. Recursively render the root graphics object at current time
let time = document.current_time;
@ -436,12 +543,12 @@ pub fn render_document_with_transform(
}
/// Draw the document background
fn render_background(document: &Document, scene: &mut Scene, base_transform: Affine) {
fn render_background(document: &Document, scene: &mut Scene, base_transform: Affine, draw_checkerboard: bool) {
let background_rect = Rect::new(0.0, 0.0, document.width, document.height);
let bg = &document.background_color;
// Draw checkerboard behind transparent backgrounds
if bg.a < 255 {
// Draw checkerboard behind transparent backgrounds (UI-only; skip in export)
if draw_checkerboard && bg.a < 255 {
use vello::peniko::{Blob, Color, Extend, ImageAlphaType, ImageData, ImageQuality};
// 2x2 pixel checkerboard pattern: light/dark alternating
let light: [u8; 4] = [204, 204, 204, 255];

View File

@ -104,7 +104,9 @@ fn point_in_polygon(px: i32, py: i32, polygon: &[(i32, i32)]) -> bool {
#[derive(Clone, Debug)]
pub struct RasterFloatingSelection {
/// sRGB-encoded premultiplied RGBA, width × height × 4 bytes.
pub pixels: Vec<u8>,
/// Wrapped in Arc so the renderer can clone a reference each frame (O(1))
/// instead of copying megabytes of pixel data.
pub pixels: std::sync::Arc<Vec<u8>>,
pub width: u32,
pub height: u32,
/// Top-left position in canvas pixel coordinates.
@ -116,7 +118,7 @@ pub struct RasterFloatingSelection {
/// Snapshot of `raw_pixels` before the cut/paste was initiated, used for
/// undo (via `RasterStrokeAction`) when the float is committed, and for
/// Cancel (Escape) to restore the canvas without creating an undo entry.
pub canvas_before: Vec<u8>,
pub canvas_before: std::sync::Arc<Vec<u8>>,
/// Key for this float's GPU canvas in `GpuBrushEngine::canvases`.
/// Allows painting strokes directly onto the float buffer (B) without
/// touching the layer canvas (A).

View File

@ -81,6 +81,9 @@ pub struct ExportDialog {
/// Export type used the last time the user actually clicked Export for `current_project`.
last_export_type: Option<ExportType>,
/// Full path of the most recent successful export. Restored as the default on next open.
last_exported_path: Option<PathBuf>,
}
impl Default for ExportDialog {
@ -107,6 +110,7 @@ impl Default for ExportDialog {
output_filename: String::new(),
current_project: String::new(),
last_export_type: None,
last_exported_path: None,
output_dir: music_dir,
}
}
@ -139,8 +143,11 @@ impl ExportDialog {
};
self.current_project = project_name.to_owned();
// Pre-populate filename from project name if not already set.
if self.output_filename.is_empty() || !self.output_filename.contains(project_name) {
// Restore the last exported path if available; otherwise default to project name.
if let Some(ref last) = self.last_exported_path.clone() {
if let Some(dir) = last.parent() { self.output_dir = dir.to_path_buf(); }
if let Some(name) = last.file_name() { self.output_filename = name.to_string_lossy().into_owned(); }
} else if self.output_filename.is_empty() || !self.output_filename.contains(project_name) {
self.output_filename = format!("{}.{}", project_name, self.current_extension());
}
}
@ -596,8 +603,9 @@ impl ExportDialog {
let output_path = self.output_path.clone().unwrap();
// Remember this export type for next time this file is opened.
// Remember this export type and path for next time the dialog is opened.
self.last_export_type = Some(self.export_type);
self.last_exported_path = Some(output_path.clone());
let result = match self.export_type {
ExportType::Image => {

View File

@ -514,6 +514,7 @@ impl ExportOrchestrator {
renderer: &mut vello::Renderer,
image_cache: &mut ImageCache,
video_manager: &Arc<std::sync::Mutex<VideoManager>>,
floating_selection: Option<&lightningbeam_core::selection::RasterFloatingSelection>,
) -> Result<bool, String> {
if self.cancel_flag.load(Ordering::Relaxed) {
self.image_state = None;
@ -559,6 +560,8 @@ impl ExportOrchestrator {
device, queue, renderer, image_cache, video_manager,
gpu,
output_view,
floating_selection,
state.settings.allow_transparency,
)?;
queue.submit(Some(encoder.finish()));
@ -1106,6 +1109,8 @@ impl ExportOrchestrator {
document, timestamp, width, height,
device, queue, renderer, image_cache, video_manager,
gpu_resources, &acquired.rgba_texture_view,
None, // No floating selection during video export
false, // Video export is never transparent
)?;
let render_end = Instant::now();

View File

@ -79,6 +79,10 @@ pub struct ExportGpuResources {
pub linear_to_srgb_bind_group_layout: wgpu::BindGroupLayout,
/// Sampler for linear to sRGB conversion
pub linear_to_srgb_sampler: wgpu::Sampler,
/// Canvas blit pipeline for raster/video/float layers (bypasses Vello).
pub canvas_blit: crate::gpu_brush::CanvasBlitPipeline,
/// Per-keyframe GPU texture cache for raster layers during export.
pub raster_cache: std::collections::HashMap<uuid::Uuid, crate::gpu_brush::CanvasPair>,
}
impl ExportGpuResources {
@ -235,6 +239,8 @@ impl ExportGpuResources {
..Default::default()
});
let canvas_blit = crate::gpu_brush::CanvasBlitPipeline::new(device);
Self {
buffer_pool,
compositor,
@ -251,6 +257,8 @@ impl ExportGpuResources {
linear_to_srgb_pipeline,
linear_to_srgb_bind_group_layout,
linear_to_srgb_sampler,
canvas_blit,
raster_cache: std::collections::HashMap::new(),
}
}
@ -702,6 +710,233 @@ pub fn render_frame_to_rgba(
Ok(())
}
/// Composite all layers from `composite_result` into `gpu_resources.hdr_texture_view`.
///
/// Shared by both export functions. Handles every layer type:
/// - Vector/Group: Vello scene → sRGB → linear → composite
/// - Raster: upload pixels to `raster_cache` (if needed) → GPU blit → composite
/// - Video: sRGB straight-alpha → linear premultiplied → transient GPU texture → blit → composite
/// - Float: sRGB-premultiplied → linear → transient GPU texture → blit → composite
/// - Effect: apply post-process on the HDR accumulator
fn composite_document_to_hdr(
composite_result: &lightningbeam_core::renderer::CompositeRenderResult,
document: &Document,
device: &wgpu::Device,
queue: &wgpu::Queue,
renderer: &mut vello::Renderer,
gpu_resources: &mut ExportGpuResources,
width: u32,
height: u32,
allow_transparency: bool,
) -> Result<(), String> {
use vello::kurbo::Affine;
let layer_spec = BufferSpec::new(width, height, BufferFormat::Rgba8Srgb);
let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float);
let layer_render_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width, height,
antialiasing_method: vello::AaConfig::Area,
};
// --- Background ---
let bg_srgb = gpu_resources.buffer_pool.acquire(device, layer_spec);
let bg_hdr = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(bg_srgb_view), Some(bg_hdr_view)) = (
gpu_resources.buffer_pool.get_view(bg_srgb),
gpu_resources.buffer_pool.get_view(bg_hdr),
) {
renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render background: {e}"))?;
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_bg_srgb_to_linear") });
gpu_resources.srgb_to_linear.convert(device, &mut enc, bg_srgb_view, bg_hdr_view);
queue.submit(Some(enc.finish()));
let bg_layer = CompositorLayer::normal(bg_hdr, 1.0);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_bg_composite") });
// When transparency is allowed, start from transparent black so the background's
// native alpha is preserved. Otherwise force an opaque black underlay.
let clear = if allow_transparency { [0.0, 0.0, 0.0, 0.0] } else { [0.0, 0.0, 0.0, 1.0] };
gpu_resources.compositor.composite(device, queue, &mut enc, &[bg_layer],
&gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, Some(clear));
queue.submit(Some(enc.finish()));
}
gpu_resources.buffer_pool.release(bg_srgb);
gpu_resources.buffer_pool.release(bg_hdr);
// --- Layers ---
for rendered_layer in &composite_result.layers {
if !rendered_layer.has_content { continue; }
match &rendered_layer.layer_type {
RenderedLayerType::Vector => {
let srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec);
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(srgb_view), Some(hdr_layer_view)) = (
gpu_resources.buffer_pool.get_view(srgb_handle),
gpu_resources.buffer_pool.get_view(hdr_layer_handle),
) {
renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render layer: {e}"))?;
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_layer_srgb_to_linear") });
gpu_resources.srgb_to_linear.convert(device, &mut enc, srgb_view, hdr_layer_view);
queue.submit(Some(enc.finish()));
let compositor_layer = CompositorLayer::new(hdr_layer_handle, rendered_layer.opacity, rendered_layer.blend_mode);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_layer_composite") });
gpu_resources.compositor.composite(device, queue, &mut enc, &[compositor_layer], &gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, None);
queue.submit(Some(enc.finish()));
}
gpu_resources.buffer_pool.release(srgb_handle);
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
RenderedLayerType::Raster { kf_id, width: cw, height: ch, transform: layer_transform, dirty: _ } => {
let raw_pixels = document.get_layer(&rendered_layer.layer_id)
.and_then(|l| match l {
lightningbeam_core::layer::AnyLayer::Raster(rl) => rl.keyframe_at(document.current_time),
_ => None,
})
.filter(|kf| !kf.raw_pixels.is_empty())
.map(|kf| kf.raw_pixels.clone());
if let Some(pixels) = raw_pixels {
if !gpu_resources.raster_cache.contains_key(kf_id) {
let canvas = crate::gpu_brush::CanvasPair::new(device, *cw, *ch);
canvas.upload(queue, &pixels);
gpu_resources.raster_cache.insert(*kf_id, canvas);
}
if let Some(canvas) = gpu_resources.raster_cache.get(kf_id) {
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(hdr_layer_view) = gpu_resources.buffer_pool.get_view(hdr_layer_handle) {
let bt = crate::gpu_brush::BlitTransform::new(*layer_transform, *cw, *ch, width, height);
gpu_resources.canvas_blit.blit(device, queue, canvas.src_view(), hdr_layer_view, &bt, None);
let compositor_layer = CompositorLayer::new(hdr_layer_handle, rendered_layer.opacity, rendered_layer.blend_mode);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_raster_composite") });
gpu_resources.compositor.composite(device, queue, &mut enc, &[compositor_layer], &gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, None);
queue.submit(Some(enc.finish()));
}
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
}
}
RenderedLayerType::Video { instances } => {
for inst in instances {
if inst.rgba_data.is_empty() { continue; }
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(hdr_layer_view) = gpu_resources.buffer_pool.get_view(hdr_layer_handle) {
// sRGB straight-alpha → linear premultiplied
let linear: Vec<u8> = inst.rgba_data.chunks_exact(4).flat_map(|p| {
let a = p[3] as f32 / 255.0;
let lin = |c: u8| -> f32 {
let f = c as f32 / 255.0;
if f <= 0.04045 { f / 12.92 } else { ((f + 0.055) / 1.055).powf(2.4) }
};
let r = (lin(p[0]) * a * 255.0 + 0.5) as u8;
let g = (lin(p[1]) * a * 255.0 + 0.5) as u8;
let b = (lin(p[2]) * a * 255.0 + 0.5) as u8;
[r, g, b, p[3]]
}).collect();
let tex = upload_transient_texture(device, queue, &linear, inst.width, inst.height, Some("export_video_frame_tex"));
let tex_view = tex.create_view(&Default::default());
let bt = crate::gpu_brush::BlitTransform::new(inst.transform, inst.width, inst.height, width, height);
gpu_resources.canvas_blit.blit(device, queue, &tex_view, hdr_layer_view, &bt, None);
let compositor_layer = CompositorLayer::new(hdr_layer_handle, inst.opacity, lightningbeam_core::gpu::BlendMode::Normal);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_video_composite") });
gpu_resources.compositor.composite(device, queue, &mut enc, &[compositor_layer], &gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, None);
queue.submit(Some(enc.finish()));
}
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
}
RenderedLayerType::Float { x: float_x, y: float_y, width: fw, height: fh, transform: layer_transform, pixels, .. } => {
if !pixels.is_empty() {
// sRGB-premultiplied → linear-premultiplied
let linear: Vec<u8> = pixels.chunks_exact(4).flat_map(|p| {
let lin = |c: u8| -> u8 {
let f = c as f32 / 255.0;
let l = if f <= 0.04045 { f / 12.92 } else { ((f + 0.055) / 1.055).powf(2.4) };
(l * 255.0 + 0.5) as u8
};
[lin(p[0]), lin(p[1]), lin(p[2]), p[3]]
}).collect();
let tex = upload_transient_texture(device, queue, &linear, *fw, *fh, Some("export_float_tex"));
let tex_view = tex.create_view(&Default::default());
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(hdr_layer_view) = gpu_resources.buffer_pool.get_view(hdr_layer_handle) {
let float_to_vp = *layer_transform * Affine::translate((*float_x as f64, *float_y as f64));
let bt = crate::gpu_brush::BlitTransform::new(float_to_vp, *fw, *fh, width, height);
gpu_resources.canvas_blit.blit(device, queue, &tex_view, hdr_layer_view, &bt, None);
let compositor_layer = CompositorLayer::normal(hdr_layer_handle, 1.0);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_float_composite") });
gpu_resources.compositor.composite(device, queue, &mut enc, &[compositor_layer], &gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, None);
queue.submit(Some(enc.finish()));
}
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
}
RenderedLayerType::Effect { effect_instances } => {
let current_time = document.current_time;
for effect_instance in effect_instances {
let Some(effect_def) = document.get_effect_definition(&effect_instance.clip_id) else { continue; };
if !gpu_resources.effect_processor.is_compiled(&effect_def.id) {
let success = gpu_resources.effect_processor.compile_effect(device, effect_def);
if !success { eprintln!("Failed to compile effect: {}", effect_def.name); continue; }
}
let effect_inst = lightningbeam_core::effect::EffectInstance::new(
effect_def,
effect_instance.timeline_start,
effect_instance.timeline_start + effect_instance.effective_duration(lightningbeam_core::effect::EFFECT_DURATION),
);
let effect_output_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(effect_output_view) = gpu_resources.buffer_pool.get_view(effect_output_handle) {
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_effect") });
let applied = gpu_resources.effect_processor.apply_effect(
device, queue, &mut enc, effect_def, &effect_inst,
&gpu_resources.hdr_texture_view, effect_output_view, width, height, current_time,
);
if applied {
queue.submit(Some(enc.finish()));
let effect_layer = CompositorLayer::normal(effect_output_handle, rendered_layer.opacity);
let mut copy_enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("export_effect_copy") });
// Replace the accumulator with the processed result.
gpu_resources.compositor.composite(device, queue, &mut copy_enc, &[effect_layer], &gpu_resources.buffer_pool, &gpu_resources.hdr_texture_view, Some([0.0, 0.0, 0.0, 0.0]));
queue.submit(Some(copy_enc.finish()));
}
}
gpu_resources.buffer_pool.release(effect_output_handle);
}
}
}
}
gpu_resources.buffer_pool.next_frame();
Ok(())
}
/// Upload `pixels` to a transient `Rgba8Unorm` GPU texture (TEXTURE_BINDING | COPY_DST).
fn upload_transient_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
pixels: &[u8],
width: u32,
height: u32,
label: Option<&'static str>,
) -> wgpu::Texture {
let tex = device.create_texture(&wgpu::TextureDescriptor {
label,
size: wgpu::Extent3d { width, height, depth_or_array_layers: 1 },
mip_level_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
queue.write_texture(
wgpu::TexelCopyTextureInfo { texture: &tex, mip_level: 0, origin: wgpu::Origin3d::ZERO, aspect: wgpu::TextureAspect::All },
pixels,
wgpu::TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(width * 4), rows_per_image: Some(height) },
wgpu::Extent3d { width, height, depth_or_array_layers: 1 },
);
tex
}
/// Render a document frame using the HDR compositing pipeline with effects
///
/// This function uses the same rendering pipeline as the stage preview,
@ -748,193 +983,12 @@ pub fn render_frame_to_rgba_hdr(
image_cache,
video_manager,
None, // No webcam during export
None, // No floating selection during export
false, // No checkerboard in export
);
// Buffer specs for layer rendering
let layer_spec = BufferSpec::new(width, height, BufferFormat::Rgba8Srgb);
let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float);
// Render parameters for Vello (transparent background for layers)
let layer_render_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Area,
};
// First, render background and composite it
let bg_srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec);
let bg_hdr_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(bg_srgb_view), Some(bg_hdr_view)) = (
gpu_resources.buffer_pool.get_view(bg_srgb_handle),
gpu_resources.buffer_pool.get_view(bg_hdr_handle),
) {
// Render background scene
renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render background: {}", e))?;
// Convert sRGB to linear HDR
let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_bg_srgb_to_linear_encoder"),
});
gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, bg_srgb_view, bg_hdr_view);
queue.submit(Some(convert_encoder.finish()));
// Composite background onto HDR texture (first layer, clears to black for export)
let bg_compositor_layer = CompositorLayer::normal(bg_hdr_handle, 1.0);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_bg_composite_encoder"),
});
// Clear to black for export (unlike stage preview which has gray background)
gpu_resources.compositor.composite(
device,
queue,
&mut encoder,
&[bg_compositor_layer],
&gpu_resources.buffer_pool,
&gpu_resources.hdr_texture_view,
Some([0.0, 0.0, 0.0, 1.0]),
);
queue.submit(Some(encoder.finish()));
}
gpu_resources.buffer_pool.release(bg_srgb_handle);
gpu_resources.buffer_pool.release(bg_hdr_handle);
// Now render and composite each layer incrementally
for rendered_layer in &composite_result.layers {
if !rendered_layer.has_content {
continue;
}
match &rendered_layer.layer_type {
RenderedLayerType::Content => {
// Regular content layer - render to sRGB, convert to linear, then composite
let srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec);
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(srgb_view), Some(hdr_layer_view)) = (
gpu_resources.buffer_pool.get_view(srgb_handle),
gpu_resources.buffer_pool.get_view(hdr_layer_handle),
) {
// Render layer scene to sRGB buffer
renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render layer: {}", e))?;
// Convert sRGB to linear HDR
let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_layer_srgb_to_linear_encoder"),
});
gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, srgb_view, hdr_layer_view);
queue.submit(Some(convert_encoder.finish()));
// Composite this layer onto the HDR accumulator with its opacity
let compositor_layer = CompositorLayer::new(
hdr_layer_handle,
rendered_layer.opacity,
rendered_layer.blend_mode,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_layer_composite_encoder"),
});
gpu_resources.compositor.composite(
device,
queue,
&mut encoder,
&[compositor_layer],
&gpu_resources.buffer_pool,
&gpu_resources.hdr_texture_view,
None, // Don't clear - blend onto existing content
);
queue.submit(Some(encoder.finish()));
}
gpu_resources.buffer_pool.release(srgb_handle);
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
RenderedLayerType::Effect { effect_instances } => {
// Effect layer - apply effects to the current HDR accumulator
let current_time = document.current_time;
for effect_instance in effect_instances {
// Get effect definition from document
let Some(effect_def) = document.get_effect_definition(&effect_instance.clip_id) else {
continue;
};
// Compile effect if needed
if !gpu_resources.effect_processor.is_compiled(&effect_def.id) {
let success = gpu_resources.effect_processor.compile_effect(device, effect_def);
if !success {
eprintln!("Failed to compile effect: {}", effect_def.name);
continue;
}
}
// Create EffectInstance from ClipInstance for the processor
let effect_inst = lightningbeam_core::effect::EffectInstance::new(
effect_def,
effect_instance.timeline_start,
effect_instance.timeline_start + effect_instance.effective_duration(lightningbeam_core::effect::EFFECT_DURATION),
);
// Acquire temp buffer for effect output (HDR format)
let effect_output_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(effect_output_view) = gpu_resources.buffer_pool.get_view(effect_output_handle) {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_effect_encoder"),
});
// Apply effect: HDR accumulator → effect output buffer
let applied = gpu_resources.effect_processor.apply_effect(
device,
queue,
&mut encoder,
effect_def,
&effect_inst,
&gpu_resources.hdr_texture_view,
effect_output_view,
width,
height,
current_time,
);
if applied {
queue.submit(Some(encoder.finish()));
// Copy effect output back to HDR accumulator
let mut copy_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_effect_copy_encoder"),
});
// Use compositor to copy (replacing content)
let effect_layer = CompositorLayer::normal(
effect_output_handle,
rendered_layer.opacity, // Apply effect layer opacity
);
gpu_resources.compositor.composite(
device,
queue,
&mut copy_encoder,
&[effect_layer],
&gpu_resources.buffer_pool,
&gpu_resources.hdr_texture_view,
Some([0.0, 0.0, 0.0, 0.0]), // Clear with transparent (we're replacing)
);
queue.submit(Some(copy_encoder.finish()));
}
}
gpu_resources.buffer_pool.release(effect_output_handle);
}
}
}
}
// Advance frame counter for buffer cleanup
gpu_resources.buffer_pool.next_frame();
// Video export is never transparent.
composite_document_to_hdr(&composite_result, document, device, queue, renderer, gpu_resources, width, height, false)?;
// Use persistent output texture (already created in ExportGpuResources)
let output_view = &gpu_resources.output_texture_view;
@ -1118,6 +1172,8 @@ pub fn render_frame_to_gpu_rgba(
video_manager: &Arc<std::sync::Mutex<VideoManager>>,
gpu_resources: &mut ExportGpuResources,
rgba_texture_view: &wgpu::TextureView,
floating_selection: Option<&lightningbeam_core::selection::RasterFloatingSelection>,
allow_transparency: bool,
) -> Result<wgpu::CommandEncoder, String> {
use vello::kurbo::Affine;
@ -1134,176 +1190,11 @@ pub fn render_frame_to_gpu_rgba(
image_cache,
video_manager,
None, // No webcam during export
floating_selection,
false, // No checkerboard in export
);
// Buffer specs for layer rendering
let layer_spec = BufferSpec::new(width, height, BufferFormat::Rgba8Srgb);
let hdr_spec = BufferSpec::new(width, height, BufferFormat::Rgba16Float);
// Render parameters for Vello (transparent background for layers)
let layer_render_params = vello::RenderParams {
base_color: vello::peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Area,
};
// Render background and composite it
let bg_srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec);
let bg_hdr_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(bg_srgb_view), Some(bg_hdr_view)) = (
gpu_resources.buffer_pool.get_view(bg_srgb_handle),
gpu_resources.buffer_pool.get_view(bg_hdr_handle),
) {
renderer.render_to_texture(device, queue, &composite_result.background, bg_srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render background: {}", e))?;
let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_bg_srgb_to_linear_encoder"),
});
gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, bg_srgb_view, bg_hdr_view);
queue.submit(Some(convert_encoder.finish()));
let bg_compositor_layer = CompositorLayer::normal(bg_hdr_handle, 1.0);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_bg_composite_encoder"),
});
gpu_resources.compositor.composite(
device,
queue,
&mut encoder,
&[bg_compositor_layer],
&gpu_resources.buffer_pool,
&gpu_resources.hdr_texture_view,
Some([0.0, 0.0, 0.0, 1.0]),
);
queue.submit(Some(encoder.finish()));
}
gpu_resources.buffer_pool.release(bg_srgb_handle);
gpu_resources.buffer_pool.release(bg_hdr_handle);
// Render and composite each layer incrementally
for rendered_layer in &composite_result.layers {
if !rendered_layer.has_content {
continue;
}
match &rendered_layer.layer_type {
RenderedLayerType::Content => {
let srgb_handle = gpu_resources.buffer_pool.acquire(device, layer_spec);
let hdr_layer_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let (Some(srgb_view), Some(hdr_layer_view)) = (
gpu_resources.buffer_pool.get_view(srgb_handle),
gpu_resources.buffer_pool.get_view(hdr_layer_handle),
) {
renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params)
.map_err(|e| format!("Failed to render layer: {}", e))?;
let mut convert_encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_layer_srgb_to_linear_encoder"),
});
gpu_resources.srgb_to_linear.convert(device, &mut convert_encoder, srgb_view, hdr_layer_view);
queue.submit(Some(convert_encoder.finish()));
let compositor_layer = CompositorLayer::normal(hdr_layer_handle, rendered_layer.opacity);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_layer_composite_encoder"),
});
gpu_resources.compositor.composite(
device,
queue,
&mut encoder,
&[compositor_layer],
&gpu_resources.buffer_pool,
&gpu_resources.hdr_texture_view,
None,
);
queue.submit(Some(encoder.finish()));
}
gpu_resources.buffer_pool.release(srgb_handle);
gpu_resources.buffer_pool.release(hdr_layer_handle);
}
RenderedLayerType::Effect { effect_instances } => {
// Effect layer - apply effects to the current HDR accumulator
let current_time = document.current_time;
for effect_instance in effect_instances {
// Get effect definition from document
let Some(effect_def) = document.get_effect_definition(&effect_instance.clip_id) else {
continue;
};
// Compile effect if needed
if !gpu_resources.effect_processor.is_compiled(&effect_def.id) {
let success = gpu_resources.effect_processor.compile_effect(device, effect_def);
if !success {
eprintln!("Failed to compile effect: {}", effect_def.name);
continue;
}
}
// Create EffectInstance from ClipInstance for the processor
let effect_inst = lightningbeam_core::effect::EffectInstance::new(
effect_def,
effect_instance.timeline_start,
effect_instance.timeline_start + effect_instance.effective_duration(lightningbeam_core::effect::EFFECT_DURATION),
);
// Acquire temp buffer for effect output (HDR format)
let effect_output_handle = gpu_resources.buffer_pool.acquire(device, hdr_spec);
if let Some(effect_output_view) = gpu_resources.buffer_pool.get_view(effect_output_handle) {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("export_effect_encoder"),
});
// Apply effect: HDR accumulator → effect output buffer
let applied = gpu_resources.effect_processor.apply_effect(
device,
queue,
&mut encoder,
effect_def,
&effect_inst,
&gpu_resources.hdr_texture_view,
effect_output_view,
width,
height,
current_time,
);
if applied {
// Copy effect output back to HDR accumulator
encoder.copy_texture_to_texture(
wgpu::TexelCopyTextureInfo {
texture: gpu_resources.buffer_pool.get_texture(effect_output_handle).unwrap(),
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::TexelCopyTextureInfo {
texture: &gpu_resources.hdr_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
},
);
}
queue.submit(Some(encoder.finish()));
}
gpu_resources.buffer_pool.release(effect_output_handle);
}
}
}
}
composite_document_to_hdr(&composite_result, document, device, queue, renderer, gpu_resources, width, height, allow_transparency)?;
// Convert HDR to sRGB (linear → sRGB), render directly to external RGBA texture
let output_view = rgba_texture_view;

View File

@ -1720,18 +1720,64 @@ pub struct CanvasBlitPipeline {
pub mask_sampler: wgpu::Sampler,
}
/// Camera parameters uniform for canvas_blit.wgsl.
/// General affine blit transform for canvas_blit.wgsl.
///
/// Encodes the combined `viewport_uv → canvas_uv` mapping as a column-major 3×3
/// matrix packed into three `vec4` uniforms (std140 padding).
///
/// Build with [`BlitTransform::new`] by supplying:
/// * `layer_transform` — affine that maps **canvas pixels → viewport pixels**
/// (= `base_transform` from the renderer; includes camera pan/zoom and any
/// parent-clip affine for nested layers).
/// * `canvas_w`, `canvas_h` — canvas dimensions in pixels.
/// * `vp_w`, `vp_h` — viewport dimensions in pixels.
#[repr(C)]
#[derive(Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraParams {
pub pan_x: f32,
pub pan_y: f32,
pub zoom: f32,
pub canvas_w: f32,
pub canvas_h: f32,
pub viewport_w: f32,
pub viewport_h: f32,
pub _pad: f32,
pub struct BlitTransform {
/// Column 0 of the matrix (+ 1 padding float).
pub col0: [f32; 4],
/// Column 1 of the matrix (+ 1 padding float).
pub col1: [f32; 4],
/// Column 2 — translation column: `[tx, ty, 1.0, 0.0]`.
pub col2: [f32; 4],
}
impl BlitTransform {
/// Build from a `canvas_px → viewport_px` affine transform.
///
/// The resulting uniform maps **viewport UV [0,1]² → canvas UV [0,1]²** so
/// the fragment shader only needs a single `mat3x3 * vec3` multiply.
pub fn new(
layer_transform: kurbo::Affine,
canvas_w: u32,
canvas_h: u32,
vp_w: u32,
vp_h: u32,
) -> Self {
// Combined transform: viewport_uv → canvas_uv
// = scale_canvas_inv * layer_transform.inverse() * scale_vp
//
// scale_vp: viewport UV → viewport px
// layer_transform⁻¹: viewport px → canvas px
// scale_canvas_inv: canvas px → canvas UV
let scale_vp = kurbo::Affine::scale_non_uniform(vp_w as f64, vp_h as f64);
let scale_uv = kurbo::Affine::scale_non_uniform(
1.0 / canvas_w as f64,
1.0 / canvas_h as f64,
);
let combined = scale_uv * layer_transform.inverse() * scale_vp;
// kurbo::Affine coefficients: [a, b, c, d, e, f]
// x' = a*x + c*y + e
// y' = b*x + d*y + f
// Column-major 3×3: col0=(a,b,0), col1=(c,d,0), col2=(e,f,1)
let [a, b, c, d, e, f] = combined.as_coeffs();
Self {
col0: [a as f32, b as f32, 0.0, 0.0],
col1: [c as f32, d as f32, 0.0, 0.0],
col2: [e as f32, f as f32, 1.0, 0.0],
}
}
}
impl CanvasBlitPipeline {
@ -1870,7 +1916,7 @@ impl CanvasBlitPipeline {
queue: &wgpu::Queue,
canvas_view: &wgpu::TextureView,
target_view: &wgpu::TextureView,
camera: &CameraParams,
transform: &BlitTransform,
mask_view: Option<&wgpu::TextureView>,
) {
// When no mask is provided, create a temporary 1×1 all-white texture.
@ -1905,14 +1951,14 @@ impl CanvasBlitPipeline {
&tmp_mask_view
}
};
// Upload camera params
// Upload blit transform
let cam_buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("canvas_blit_cam_buf"),
size: std::mem::size_of::<CameraParams>() as u64,
size: std::mem::size_of::<BlitTransform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
queue.write_buffer(&cam_buf, 0, bytemuck::bytes_of(camera));
queue.write_buffer(&cam_buf, 0, bytemuck::bytes_of(transform));
let bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("canvas_blit_bg"),

View File

@ -1999,7 +1999,8 @@ impl EditorApp {
let action = RasterStrokeAction::new(
float.layer_id, float.time,
float.canvas_before, canvas_after,
std::sync::Arc::try_unwrap(float.canvas_before).unwrap_or_else(|a| (*a).clone()),
canvas_after,
w, h,
);
if let Err(e) = self.action_executor.execute(Box::new(action)) {
@ -2018,7 +2019,7 @@ impl EditorApp {
let document = self.action_executor.document_mut();
let Some(AnyLayer::Raster(rl)) = document.get_layer_mut(&float.layer_id) else { return };
let Some(kf) = rl.keyframe_at_mut(float.time) else { return };
kf.raw_pixels = float.canvas_before;
kf.raw_pixels = std::sync::Arc::try_unwrap(float.canvas_before).unwrap_or_else(|a| (*a).clone());
}
/// Drop (discard) the floating selection keeping the hole punched in the
@ -2038,7 +2039,8 @@ impl EditorApp {
let (w, h) = (kf.width, kf.height);
let action = RasterStrokeAction::new(
float.layer_id, float.time,
float.canvas_before, canvas_after,
std::sync::Arc::try_unwrap(float.canvas_before).unwrap_or_else(|a| (*a).clone()),
canvas_after,
w, h,
);
if let Err(e) = self.action_executor.execute(Box::new(action)) {
@ -2059,7 +2061,7 @@ impl EditorApp {
if matches!(document.get_layer(&layer_id), Some(AnyLayer::Raster(_))) {
if let Some(float) = &self.selection.raster_floating {
self.clipboard_manager.copy(ClipboardContent::RasterPixels {
pixels: float.pixels.clone(),
pixels: (*float.pixels).clone(),
width: float.width,
height: float.height,
});
@ -2443,14 +2445,14 @@ impl EditorApp {
use lightningbeam_core::selection::{RasterFloatingSelection, RasterSelection};
self.selection.raster_floating = Some(RasterFloatingSelection {
pixels,
pixels: std::sync::Arc::new(pixels),
width,
height,
x: paste_x,
y: paste_y,
layer_id,
time: self.playback_time,
canvas_before,
canvas_before: std::sync::Arc::new(canvas_before),
canvas_id: uuid::Uuid::new_v4(),
});
// Update the marquee to show the floating selection bounds.
@ -5351,6 +5353,7 @@ impl eframe::App for EditorApp {
renderer,
&mut temp_image_cache,
&self.video_manager,
self.selection.raster_floating.as_ref(),
) {
Ok(false) => { ctx.request_repaint(); } // readback pending
Ok(true) => {} // done or cancelled

View File

@ -1,30 +1,31 @@
// Canvas blit shader.
//
// Renders a GPU raster canvas (at document resolution) into an Rgba16Float HDR
// buffer (at viewport resolution), applying the camera transform (pan + zoom)
// to map document-space pixels to viewport-space pixels.
// buffer (at viewport resolution), applying a general affine transform that maps
// viewport UV [0,1]² directly to canvas UV [0,1]².
//
// The combined inverse transform (viewport UV canvas UV) is pre-computed on the
// CPU and uploaded as a column-major 3×3 matrix packed into three vec4 uniforms.
//
// The canvas stores premultiplied linear RGBA. We output it as-is so the HDR
// compositor sees the same premultiplied-linear format it always works with,
// bypassing the sRGB intermediate used for Vello layers.
//
// Any viewport pixel whose corresponding document coordinate falls outside
// [0, canvas_w) × [0, canvas_h) outputs transparent black.
// Any viewport pixel whose corresponding canvas coordinate falls outside [0,1)²
// outputs transparent black.
struct CameraParams {
pan_x: f32,
pan_y: f32,
zoom: f32,
canvas_w: f32,
canvas_h: f32,
viewport_w: f32,
viewport_h: f32,
_pad: f32,
struct BlitTransform {
/// Column 0 of the viewport_uv canvas_uv affine matrix (+ padding).
col0: vec4<f32>,
/// Column 1 (+ padding).
col1: vec4<f32>,
/// Column 2: translation column col2.xy = translation, col2.z = 1 (+ padding).
col2: vec4<f32>,
}
@group(0) @binding(0) var canvas_tex: texture_2d<f32>;
@group(0) @binding(1) var canvas_sampler: sampler;
@group(0) @binding(2) var<uniform> camera: CameraParams;
@group(0) @binding(2) var<uniform> transform: BlitTransform;
/// Selection mask: R8Unorm, 255 = inside selection (keep), 0 = outside (discard).
/// A 1×1 all-white texture is bound when no selection is active.
@group(0) @binding(3) var mask_tex: texture_2d<f32>;
@ -48,14 +49,10 @@ fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
// Map viewport UV [0,1] viewport pixel
let vp = in.uv * vec2<f32>(camera.viewport_w, camera.viewport_h);
// Map viewport pixel document pixel (inverse camera transform)
let doc = (vp - vec2<f32>(camera.pan_x, camera.pan_y)) / camera.zoom;
// Map document pixel canvas UV [0,1]
let canvas_uv = doc / vec2<f32>(camera.canvas_w, camera.canvas_h);
// Apply the combined inverse transform: viewport UV canvas UV.
let m = mat3x3<f32>(transform.col0.xyz, transform.col1.xyz, transform.col2.xyz);
let canvas_uv_h = m * vec3<f32>(in.uv.x, in.uv.y, 1.0);
let canvas_uv = canvas_uv_h.xy;
// Out-of-bounds transparent
if canvas_uv.x < 0.0 || canvas_uv.x > 1.0

View File

@ -625,7 +625,7 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
let pixels = if float_sel.pixels.is_empty() {
vec![0u8; (float_sel.width * float_sel.height * 4) as usize]
} else {
float_sel.pixels.clone()
(*float_sel.pixels).clone()
};
canvas.upload(queue, &pixels);
}
@ -898,6 +898,8 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
&mut image_cache,
&shared.video_manager,
self.ctx.webcam_frame.as_ref(),
self.ctx.selection.raster_floating.as_ref(),
true, // Draw checkerboard for transparent backgrounds in the UI
);
drop(image_cache);
let _t_after_scene_build = std::time::Instant::now();
@ -1130,8 +1132,8 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
}
match &rendered_layer.layer_type {
RenderedLayerType::Content => {
// Regular content layer - render to sRGB, convert to linear, then composite
RenderedLayerType::Vector => {
// Vector/group layer — render Vello scene → sRGB → linear → composite.
let srgb_handle = buffer_pool.acquire(device, layer_spec);
let hdr_layer_handle = buffer_pool.acquire(device, hdr_spec);
@ -1140,45 +1142,6 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
buffer_pool.get_view(hdr_layer_handle),
&instance_resources.hdr_texture_view,
) {
// GPU canvas blit path: if a live GPU canvas exists for this
// raster layer (active tool B canvas, legacy painting_canvas, or
// raster layer texture cache), blit it directly into the HDR buffer
// (premultiplied linear → Rgba16Float), bypassing Vello entirely.
// Vello path: render to sRGB buffer → srgb_to_linear → HDR buffer.
let used_gpu_canvas = if let Some(kf_id) = gpu_canvas_kf.or(raster_cache_kf) {
let mut used = false;
if let Ok(gpu_brush) = shared.gpu_brush.lock() {
// Try tool canvases first, then the layer texture cache.
let canvas = gpu_brush.canvases.get(&kf_id)
.or_else(|| gpu_brush.raster_layer_cache.get(&kf_id));
if let Some(canvas) = canvas {
let camera = crate::gpu_brush::CameraParams {
pan_x: self.ctx.pan_offset.x,
pan_y: self.ctx.pan_offset.y,
zoom: self.ctx.zoom,
canvas_w: canvas.width as f32,
canvas_h: canvas.height as f32,
viewport_w: width as f32,
viewport_h: height as f32,
_pad: 0.0,
};
shared.canvas_blit.blit(
device, queue,
canvas.src_view(),
hdr_layer_view, // blit directly to HDR
&camera,
None, // no mask on layer canvas blit
);
used = true;
}
}
used
} else {
false
};
if !used_gpu_canvas {
// Render layer scene to sRGB buffer, then convert to HDR
if let Ok(mut renderer) = shared.renderer.lock() {
renderer.render_to_texture(device, queue, &rendered_layer.scene, srgb_view, &layer_render_params).ok();
}
@ -1187,26 +1150,17 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
});
shared.srgb_to_linear.convert(device, &mut convert_encoder, srgb_view, hdr_layer_view);
queue.submit(Some(convert_encoder.finish()));
}
// Composite this layer onto the HDR accumulator with its opacity
let compositor_layer = lightningbeam_core::gpu::CompositorLayer::new(
hdr_layer_handle,
rendered_layer.opacity,
rendered_layer.blend_mode,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("layer_composite_encoder"),
});
shared.compositor.composite(
device,
queue,
&mut encoder,
&[compositor_layer],
&buffer_pool,
hdr_view,
None, // Don't clear - blend onto existing content
device, queue, &mut encoder, &[compositor_layer], &buffer_pool, hdr_view, None,
);
queue.submit(Some(encoder.finish()));
}
@ -1214,6 +1168,154 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
buffer_pool.release(srgb_handle);
buffer_pool.release(hdr_layer_handle);
}
RenderedLayerType::Raster { transform: layer_transform, .. } => {
// Raster layer — GPU canvas blit directly to HDR (bypasses Vello).
// Tool override canvas (gpu_canvas_kf) takes priority over cached texture.
if let Some(use_kf_id) = gpu_canvas_kf.or(raster_cache_kf) {
let hdr_layer_handle = buffer_pool.acquire(device, hdr_spec);
if let (Some(hdr_layer_view), Some(hdr_view)) = (
buffer_pool.get_view(hdr_layer_handle),
&instance_resources.hdr_texture_view,
) {
if let Ok(gpu_brush) = shared.gpu_brush.lock() {
let canvas = gpu_brush.canvases.get(&use_kf_id)
.or_else(|| gpu_brush.raster_layer_cache.get(&use_kf_id));
if let Some(canvas) = canvas {
let bt = crate::gpu_brush::BlitTransform::new(
*layer_transform,
canvas.width, canvas.height,
width, height,
);
shared.canvas_blit.blit(
device, queue, canvas.src_view(), hdr_layer_view, &bt, None,
);
}
}
let compositor_layer = lightningbeam_core::gpu::CompositorLayer::new(
hdr_layer_handle,
rendered_layer.opacity,
rendered_layer.blend_mode,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("raster_composite_encoder"),
});
shared.compositor.composite(
device, queue, &mut encoder, &[compositor_layer], &buffer_pool, hdr_view, None,
);
queue.submit(Some(encoder.finish()));
}
buffer_pool.release(hdr_layer_handle);
}
}
RenderedLayerType::Video { instances } => {
// Video layer — per-instance: upload decoded frame → blit → composite.
for inst in instances {
if inst.rgba_data.is_empty() { continue; }
let hdr_layer_handle = buffer_pool.acquire(device, hdr_spec);
if let (Some(hdr_layer_view), Some(hdr_view)) = (
buffer_pool.get_view(hdr_layer_handle),
&instance_resources.hdr_texture_view,
) {
// Convert sRGB straight-alpha → linear premultiplied.
let linear: Vec<u8> = inst.rgba_data.chunks_exact(4).flat_map(|p| {
let a = p[3] as f32 / 255.0;
let lin = |c: u8| -> f32 {
let f = c as f32 / 255.0;
if f <= 0.04045 { f / 12.92 } else { ((f + 0.055) / 1.055).powf(2.4) }
};
let r = (lin(p[0]) * a * 255.0 + 0.5) as u8;
let g = (lin(p[1]) * a * 255.0 + 0.5) as u8;
let b = (lin(p[2]) * a * 255.0 + 0.5) as u8;
[r, g, b, p[3]]
}).collect();
let tex = device.create_texture(&wgpu::TextureDescriptor {
label: Some("video_frame_tex"),
size: wgpu::Extent3d { width: inst.width, height: inst.height, depth_or_array_layers: 1 },
mip_level_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
queue.write_texture(
wgpu::TexelCopyTextureInfo { texture: &tex, mip_level: 0, origin: wgpu::Origin3d::ZERO, aspect: wgpu::TextureAspect::All },
&linear,
wgpu::TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(inst.width * 4), rows_per_image: Some(inst.height) },
wgpu::Extent3d { width: inst.width, height: inst.height, depth_or_array_layers: 1 },
);
let tex_view = tex.create_view(&wgpu::TextureViewDescriptor::default());
let bt = crate::gpu_brush::BlitTransform::new(
inst.transform, inst.width, inst.height, width, height,
);
shared.canvas_blit.blit(device, queue, &tex_view, hdr_layer_view, &bt, None);
let compositor_layer = lightningbeam_core::gpu::CompositorLayer::new(
hdr_layer_handle,
inst.opacity,
lightningbeam_core::gpu::BlendMode::Normal,
);
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("video_composite_encoder"),
});
shared.compositor.composite(
device, queue, &mut encoder, &[compositor_layer], &buffer_pool, hdr_view, None,
);
queue.submit(Some(encoder.finish()));
}
buffer_pool.release(hdr_layer_handle);
}
}
RenderedLayerType::Float { canvas_id: float_canvas_id, x: float_x, y: float_y, width: fw, height: fh, transform: layer_transform, pixels: _ } => {
// Floating raster selection — now composited at the correct z-position
// (immediately above its parent layer) rather than on top of everything.
//
// Override priority:
// 1. transform_display: transform tool is active on the float.
// 2. active_tool_render (layer_id=None): unified tool on the float.
// 3. float_canvas_id from this entry: normal float display.
let blit_params: Option<(uuid::Uuid, i32, i32, u32, u32)> =
if let Some(ref td) = self.ctx.transform_display {
Some((td.display_canvas_id, td.x, td.y, td.w, td.h))
} else if let Some(ref tr) = self.ctx.active_tool_render.as_ref().filter(|tr| tr.layer_id.is_none()) {
Some((tr.b_canvas_id, tr.x, tr.y, tr.width, tr.height))
} else {
Some((*float_canvas_id, *float_x, *float_y, *fw, *fh))
};
if let Some((blit_canvas_id, blit_x, blit_y, blit_w, blit_h)) = blit_params {
if let Ok(gpu_brush) = shared.gpu_brush.lock() {
if let Some(canvas) = gpu_brush.canvases.get(&blit_canvas_id) {
let float_hdr_handle = buffer_pool.acquire(device, hdr_spec);
if let (Some(fhdr_view), Some(hdr_view)) = (
buffer_pool.get_view(float_hdr_handle),
&instance_resources.hdr_texture_view,
) {
// float_canvas_px → viewport_px:
// layer_transform maps doc_px → viewport_px
// translate(blit_x, blit_y) maps float_canvas_px → doc_px
let float_to_vp = *layer_transform
* Affine::translate((blit_x as f64, blit_y as f64));
let bt = crate::gpu_brush::BlitTransform::new(
float_to_vp, blit_w, blit_h, width, height,
);
shared.canvas_blit.blit(
device, queue, canvas.src_view(), fhdr_view, &bt,
float_mask_view.as_ref(),
);
let float_layer = lightningbeam_core::gpu::CompositorLayer::normal(float_hdr_handle, 1.0);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("float_canvas_composite"),
});
shared.compositor.composite(device, queue, &mut enc, &[float_layer], &buffer_pool, hdr_view, None);
queue.submit(Some(enc.finish()));
}
buffer_pool.release(float_hdr_handle);
}
}
}
}
RenderedLayerType::Effect { effect_instances } => {
// Effect layer - apply effects to the current HDR accumulator
let current_time = self.ctx.document.current_time;
@ -1388,59 +1490,6 @@ impl egui_wgpu::CallbackTrait for VelloCallback {
buffer_pool.release(clip_hdr_handle);
}
// Blit the float GPU canvas on top of all composited layers.
// During transform: blit the display canvas (compute shader output) instead of the float.
// The float_mask_view clips to the selection shape (None = full float visible).
let blit_params = if let Some(ref td) = self.ctx.transform_display {
// During transform: show the display canvas (compute shader output) instead of float.
Some((td.display_canvas_id, td.x, td.y, td.w, td.h))
} else if let Some(ref tr) = self.ctx.active_tool_render.as_ref().filter(|tr| tr.layer_id.is_none()) {
// Unified raster tool active on the float: show B canvas instead of float's own canvas.
Some((tr.b_canvas_id, tr.x, tr.y, tr.width, tr.height))
} else if let Some(ref float_sel) = self.ctx.selection.raster_floating {
// Regular float blit.
Some((float_sel.canvas_id, float_sel.x, float_sel.y, float_sel.width, float_sel.height))
} else {
None
};
if let Some((blit_canvas_id, blit_x, blit_y, blit_w, blit_h)) = blit_params {
if let Ok(gpu_brush) = shared.gpu_brush.lock() {
if let Some(canvas) = gpu_brush.canvases.get(&blit_canvas_id) {
let float_hdr_handle = buffer_pool.acquire(device, hdr_spec);
if let (Some(fhdr_view), Some(hdr_view)) = (
buffer_pool.get_view(float_hdr_handle),
&instance_resources.hdr_texture_view,
) {
let fcamera = crate::gpu_brush::CameraParams {
pan_x: self.ctx.pan_offset.x + blit_x as f32 * self.ctx.zoom,
pan_y: self.ctx.pan_offset.y + blit_y as f32 * self.ctx.zoom,
zoom: self.ctx.zoom,
canvas_w: blit_w as f32,
canvas_h: blit_h as f32,
viewport_w: width as f32,
viewport_h: height as f32,
_pad: 0.0,
};
// Blit directly to HDR (straight-alpha linear, no sRGB step)
shared.canvas_blit.blit(
device, queue,
canvas.src_view(),
fhdr_view,
&fcamera,
float_mask_view.as_ref(),
);
let float_layer = lightningbeam_core::gpu::CompositorLayer::normal(float_hdr_handle, 1.0);
let mut enc = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("float_canvas_composite"),
});
shared.compositor.composite(device, queue, &mut enc, &[float_layer], &buffer_pool, hdr_view, None);
queue.submit(Some(enc.finish()));
}
buffer_pool.release(float_hdr_handle);
}
}
}
// Advance frame counter for buffer cleanup
buffer_pool.next_frame();
drop(buffer_pool);
@ -5302,7 +5351,8 @@ impl StagePane {
let (w, h) = (kf.width, kf.height);
let action = RasterStrokeAction::new(
float.layer_id, float.time,
float.canvas_before, canvas_after,
std::sync::Arc::try_unwrap(float.canvas_before).unwrap_or_else(|a| (*a).clone()),
canvas_after,
w, h,
);
if let Err(e) = shared.action_executor.execute(Box::new(action)) {
@ -5388,7 +5438,7 @@ impl StagePane {
let pixels = if float.pixels.is_empty() {
vec![0u8; (float.width * float.height * 4) as usize]
} else {
float.pixels.clone()
(*float.pixels).clone()
};
let (w, h, x, y) = (float.width, float.height, float.x, float.y);
@ -5664,14 +5714,14 @@ impl StagePane {
// Re-set selection (commit_raster_floating_now cleared it) and create float.
shared.selection.raster_selection = Some(sel);
shared.selection.raster_floating = Some(RasterFloatingSelection {
pixels: float_pixels,
pixels: std::sync::Arc::new(float_pixels),
width: w,
height: h,
x: x0,
y: y0,
layer_id,
time,
canvas_before,
canvas_before: std::sync::Arc::new(canvas_before),
canvas_id: uuid::Uuid::new_v4(),
});
}
@ -5802,7 +5852,7 @@ impl StagePane {
let (canvas_id, float_x, float_y, canvas_width, canvas_height,
buffer_before, layer_id, time) = {
let float = shared.selection.raster_floating.as_ref().unwrap();
let buf = float.pixels.clone();
let buf = (*float.pixels).clone();
(float.canvas_id, float.x, float.y, float.width, float.height,
buf, float.layer_id, float.time)
};
@ -7642,7 +7692,7 @@ impl StagePane {
let float = shared.selection.raster_floating.as_ref().unwrap();
let expected_len = (float.width * float.height * 4) as usize;
let anchor_pixels = if float.pixels.len() == expected_len {
float.pixels.clone()
(*float.pixels).clone()
} else {
vec![0u8; expected_len]
};
@ -8227,7 +8277,7 @@ impl StagePane {
anchor_pixels = if float_sel.pixels.is_empty() {
vec![0u8; (w * h * 4) as usize]
} else {
float_sel.pixels.clone()
(*float_sel.pixels).clone()
};
} else {
// Warp the full keyframe canvas.
@ -8550,7 +8600,7 @@ impl StagePane {
anchor_pixels = if float_sel.pixels.is_empty() {
vec![0u8; (w * h * 4) as usize]
} else {
float_sel.pixels.clone()
(*float_sel.pixels).clone()
};
} else {
float_offset = None;
@ -8705,7 +8755,7 @@ impl StagePane {
let pixels = if f.pixels.is_empty() {
vec![0u8; (f.width * f.height * 4) as usize]
} else {
f.pixels.clone()
(*f.pixels).clone()
};
(pixels, f.width, f.height, f.time, f.canvas_id, f.x as f32, f.y as f32, f.layer_id)
});
@ -8838,7 +8888,7 @@ impl StagePane {
// Update the float's pixel buffer in place.
// The float's GPU canvas (display_canvas_id) already shows the result.
if let Some(ref mut float) = shared.selection.raster_floating {
float.pixels = after_pixels;
float.pixels = std::sync::Arc::new(after_pixels);
}
} else {
let action = RasterFillAction::new(
@ -10988,7 +11038,7 @@ impl PaneRenderer for StagePane {
}
}
}
float.pixels = pixels;
float.pixels = std::sync::Arc::new(pixels);
// Invalidate the float's GPU canvas so the lazy-init
// in prepare() re-uploads the fresh pixels next frame.
self.pending_canvas_removals.push(float.canvas_id);
@ -11079,7 +11129,7 @@ impl PaneRenderer for StagePane {
if let Some(ref mut float) = shared.selection.raster_floating {
self.pending_canvas_removal = Some(float.canvas_id);
float.canvas_id = rb.display_canvas_id;
float.pixels = rb.pixels;
float.pixels = std::sync::Arc::new(rb.pixels);
float.width = rb.width;
float.height = rb.height;
float.x = rb.x;
@ -11110,7 +11160,7 @@ impl PaneRenderer for StagePane {
// Float warp: update the floating selection's pixel data and GPU canvas.
// Do NOT write to kf.raw_pixels (it belongs to the full-canvas keyframe).
if let Some(float_sel) = shared.selection.raster_floating.as_mut() {
float_sel.pixels = rb.after_pixels;
float_sel.pixels = std::sync::Arc::new(rb.after_pixels);
float_sel.canvas_id = rb.display_canvas_id;
}
// Release the old anchor canvas (float's original canvas_id, now replaced).