Lightningbeam/lightningbeam-ui/lightningbeam-editor/src/waveform_gpu.rs

760 lines
30 KiB
Rust

/// GPU-based waveform rendering using 2D textures with custom min/max mipmaps.
///
/// Raw audio samples are packed into Rgba16Float textures (R=left_min, G=left_max,
/// B=right_min, A=right_max). At mip 0, min=max=raw sample. Higher mip levels
/// are generated by a compute shader that reduces 4 consecutive samples per level.
///
/// Audio frames are packed row-major into 2D textures with a fixed power-of-2 width.
/// Long audio is split across multiple textures.
use std::collections::HashMap;
use wgpu;
use wgpu::util::DeviceExt;
/// Fixed texture width (power of 2) for all waveform textures
const TEX_WIDTH: u32 = 2048;
/// GPU resources for all waveform textures, stored in CallbackResources
pub struct WaveformGpuResources {
/// Per-audio-pool-index GPU data
pub entries: HashMap<usize, WaveformGpuEntry>,
/// Shared render pipeline
render_pipeline: wgpu::RenderPipeline,
/// Shared mipgen compute pipeline
mipgen_pipeline: wgpu::ComputePipeline,
/// Bind group layout for render shader (texture + sampler + uniforms)
render_bind_group_layout: wgpu::BindGroupLayout,
/// Bind group layout for mipgen shader (src texture + dst storage + params)
mipgen_bind_group_layout: wgpu::BindGroupLayout,
/// Sampler for waveform texture (nearest, since we do manual LOD selection)
sampler: wgpu::Sampler,
/// Per-callback-instance uniform buffers and bind groups.
/// Keyed by (pool_index, instance_id). Each clip instance referencing the same
/// pool_index gets its own uniform buffer so multiple clips don't clobber each other.
per_instance: HashMap<(usize, u64), (wgpu::Buffer, wgpu::BindGroup)>,
}
/// GPU data for a single audio file
#[allow(dead_code)] // textures/texture_views must stay alive to back bind groups; metadata for future use
pub struct WaveformGpuEntry {
/// One texture per segment (for long audio split across multiple textures)
pub textures: Vec<wgpu::Texture>,
/// Texture views for each segment (full mip chain)
pub texture_views: Vec<wgpu::TextureView>,
/// Bind groups for the render shader (one per segment)
pub render_bind_groups: Vec<wgpu::BindGroup>,
/// Uniform buffers for each segment (updated per-frame via queue.write_buffer)
pub uniform_buffers: Vec<wgpu::Buffer>,
/// Frames covered by each texture segment
pub frames_per_segment: u32,
/// Total frame count of data currently in the texture
pub total_frames: u64,
/// Allocated texture height (may be larger than needed for current total_frames)
pub tex_height: u32,
/// Sample rate
pub sample_rate: u32,
/// Number of channels in source audio
pub channels: u32,
}
/// Parameters passed to the waveform render shader
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct WaveformParams {
pub clip_rect: [f32; 4],
pub viewport_start_time: f32,
pub pixels_per_second: f32,
pub audio_duration: f32,
pub sample_rate: f32,
pub clip_start_time: f32,
pub trim_start: f32,
pub tex_width: f32,
pub total_frames: f32,
pub segment_start_frame: f32,
pub display_mode: f32,
pub _pad1: [f32; 2], // align tint_color to 16 bytes (WGSL vec4 alignment)
pub tint_color: [f32; 4],
pub screen_size: [f32; 2],
pub _pad: [f32; 2],
}
/// Parameters for the mipgen compute shader
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct MipgenParams {
src_width: u32,
dst_width: u32,
src_sample_count: u32,
_pad: u32,
}
/// Callback for rendering a single waveform segment
pub struct WaveformCallback {
pub pool_index: usize,
pub segment_index: usize,
pub params: WaveformParams,
pub target_format: wgpu::TextureFormat,
/// Raw audio data for upload if this is the first time we see this pool_index
pub pending_upload: Option<PendingUpload>,
/// Unique ID for this callback instance (allows multiple clips sharing the same
/// pool_index to have independent uniform buffers)
pub instance_id: u64,
}
/// Raw audio data waiting to be uploaded to GPU
pub struct PendingUpload {
pub samples: std::sync::Arc<Vec<f32>>,
pub sample_rate: u32,
pub channels: u32,
}
impl WaveformGpuResources {
pub fn new(device: &wgpu::Device, target_format: wgpu::TextureFormat) -> Self {
// Render shader
let render_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("waveform_render_shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("panes/shaders/waveform.wgsl").into()),
});
// Mipgen compute shader
let mipgen_shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("waveform_mipgen_shader"),
source: wgpu::ShaderSource::Wgsl(
include_str!("panes/shaders/waveform_mipgen.wgsl").into(),
),
});
// Render bind group layout: texture + sampler + uniform buffer
let render_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("waveform_render_bgl"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
// Mipgen bind group layout: src texture + dst storage texture + params
let mipgen_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("waveform_mipgen_bgl"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: false },
view_dimension: wgpu::TextureViewDimension::D2,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
format: wgpu::TextureFormat::Rgba16Float,
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
// Render pipeline
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("waveform_render_pipeline_layout"),
bind_group_layouts: &[&render_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("waveform_render_pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &render_shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &render_shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: target_format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
// Mipgen compute pipeline
let mipgen_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("waveform_mipgen_pipeline_layout"),
bind_group_layouts: &[&mipgen_bind_group_layout],
push_constant_ranges: &[],
});
let mipgen_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("waveform_mipgen_pipeline"),
layout: Some(&mipgen_pipeline_layout),
module: &mipgen_shader,
entry_point: Some("main"),
compilation_options: Default::default(),
cache: None,
});
// Sampler: nearest filtering for explicit mip level selection
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("waveform_sampler"),
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
entries: HashMap::new(),
render_pipeline,
mipgen_pipeline,
render_bind_group_layout,
mipgen_bind_group_layout,
sampler,
per_instance: HashMap::new(),
}
}
/// Upload raw audio samples and generate mipmaps for a given pool index.
/// Returns command buffers that need to be submitted (for mipmap compute dispatches).
pub fn upload_audio(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
pool_index: usize,
samples: &[f32],
sample_rate: u32,
channels: u32,
) -> Vec<wgpu::CommandBuffer> {
let new_total_frames = samples.len() / channels.max(1) as usize;
if new_total_frames == 0 {
return Vec::new();
}
// If entry exists and texture is large enough, do an incremental update
let incremental = if let Some(entry) = self.entries.get(&pool_index) {
let new_tex_height = (new_total_frames as u32 + TEX_WIDTH - 1) / TEX_WIDTH;
if new_tex_height <= entry.tex_height && new_total_frames > entry.total_frames as usize {
Some((entry.total_frames as usize, entry.tex_height))
} else if new_total_frames <= entry.total_frames as usize {
return Vec::new(); // No new data
} else {
None // Texture too small, need full recreate
}
} else {
None // No entry yet
};
if let Some((old_frames, tex_height)) = incremental {
// Write only the NEW rows into the existing texture
let start_row = old_frames as u32 / TEX_WIDTH;
let end_row = (new_total_frames as u32 + TEX_WIDTH - 1) / TEX_WIDTH;
let rows_to_write = end_row - start_row;
let row_texel_count = (TEX_WIDTH * rows_to_write) as usize;
let mut row_data: Vec<half::f16> = vec![half::f16::ZERO; row_texel_count * 4];
let row_start_frame = start_row as usize * TEX_WIDTH as usize;
for frame in 0..(rows_to_write as usize * TEX_WIDTH as usize) {
let global_frame = row_start_frame + frame;
if global_frame >= new_total_frames {
break;
}
let sample_offset = global_frame * channels as usize;
let left = if sample_offset < samples.len() {
samples[sample_offset]
} else {
0.0
};
let right = if channels >= 2 && sample_offset + 1 < samples.len() {
samples[sample_offset + 1]
} else {
left
};
let texel_offset = frame * 4;
row_data[texel_offset] = half::f16::from_f32(left);
row_data[texel_offset + 1] = half::f16::from_f32(left);
row_data[texel_offset + 2] = half::f16::from_f32(right);
row_data[texel_offset + 3] = half::f16::from_f32(right);
}
let entry = self.entries.get(&pool_index).unwrap();
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &entry.textures[0],
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: start_row, z: 0 },
aspect: wgpu::TextureAspect::All,
},
bytemuck::cast_slice(&row_data),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(TEX_WIDTH * 8),
rows_per_image: Some(rows_to_write),
},
wgpu::Extent3d {
width: TEX_WIDTH,
height: rows_to_write,
depth_or_array_layers: 1,
},
);
// Regenerate mipmaps
let mip_count = compute_mip_count(TEX_WIDTH, tex_height);
let cmds = self.generate_mipmaps(
device,
&entry.textures[0],
TEX_WIDTH,
tex_height,
mip_count,
new_total_frames as u32,
);
// Update total_frames after borrow of entry is done
self.entries.get_mut(&pool_index).unwrap().total_frames = new_total_frames as u64;
return cmds;
}
// Full create (first upload or texture needs to grow)
self.entries.remove(&pool_index);
// Invalidate per-instance bind groups for this pool (texture changed)
self.per_instance.retain(|&(pi, _), _| pi != pool_index);
let total_frames = new_total_frames;
// For live recording (pool_index == usize::MAX), pre-allocate extra texture
// height to avoid frequent full recreates as recording grows.
// Allocate 60 seconds ahead so incremental updates can fill without recreating.
let alloc_frames = if pool_index == usize::MAX {
let extra = sample_rate as usize * 60; // 60s of mono frames (texture is per-frame, not per-sample)
total_frames + extra
} else {
total_frames
};
let max_frames_per_segment = (TEX_WIDTH as u64)
* (device.limits().max_texture_dimension_2d as u64);
// Use alloc_frames for texture sizing but total_frames for data
let segment_count =
((total_frames as u64 + max_frames_per_segment - 1) / max_frames_per_segment).max(1) as usize;
let frames_per_segment = if segment_count == 1 {
total_frames as u32
} else {
(max_frames_per_segment as u32).min(total_frames as u32)
};
let mut textures = Vec::new();
let mut texture_views = Vec::new();
let mut render_bind_groups = Vec::new();
let mut uniform_buffers = Vec::new();
let mut all_command_buffers = Vec::new();
for seg in 0..segment_count {
let seg_start_frame = seg as u64 * frames_per_segment as u64;
let seg_end_frame = ((seg + 1) as u64 * frames_per_segment as u64)
.min(total_frames as u64);
let seg_frame_count = (seg_end_frame - seg_start_frame) as u32;
// Allocate texture large enough for future growth (recording) or exact fit (normal)
let alloc_seg_frames = if pool_index == usize::MAX {
(alloc_frames as u32).min(seg_frame_count + sample_rate * 60)
} else {
seg_frame_count
};
let tex_height = (alloc_seg_frames + TEX_WIDTH - 1) / TEX_WIDTH;
let mip_count = compute_mip_count(TEX_WIDTH, tex_height);
// Create texture with mip levels
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(&format!("waveform_{}_seg{}", pool_index, seg)),
size: wgpu::Extent3d {
width: TEX_WIDTH,
height: tex_height,
depth_or_array_layers: 1,
},
mip_level_count: mip_count,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba16Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING
| wgpu::TextureUsages::STORAGE_BINDING
| wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
// Pack raw samples into Rgba16Float data for mip 0
// Only pack rows containing actual data (not the pre-allocated empty region)
let data_height = (seg_frame_count + TEX_WIDTH - 1) / TEX_WIDTH;
let data_texel_count = (TEX_WIDTH * data_height) as usize;
let mut mip0_data: Vec<half::f16> = vec![half::f16::ZERO; data_texel_count * 4];
for frame in 0..seg_frame_count as usize {
let global_frame = seg_start_frame as usize + frame;
let sample_offset = global_frame * channels as usize;
let left = if sample_offset < samples.len() {
samples[sample_offset]
} else {
0.0
};
let right = if channels >= 2 && sample_offset + 1 < samples.len() {
samples[sample_offset + 1]
} else {
left
};
let texel_offset = frame * 4;
mip0_data[texel_offset] = half::f16::from_f32(left);
mip0_data[texel_offset + 1] = half::f16::from_f32(left);
mip0_data[texel_offset + 2] = half::f16::from_f32(right);
mip0_data[texel_offset + 3] = half::f16::from_f32(right);
}
// Upload mip 0 (only rows with actual data)
if data_height > 0 {
queue.write_texture(
wgpu::TexelCopyTextureInfo {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
bytemuck::cast_slice(&mip0_data),
wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(TEX_WIDTH * 8),
rows_per_image: Some(data_height),
},
wgpu::Extent3d {
width: TEX_WIDTH,
height: data_height,
depth_or_array_layers: 1,
},
);
}
// Generate mipmaps via compute shader
let cmds = self.generate_mipmaps(
device,
&texture,
TEX_WIDTH,
tex_height,
mip_count,
seg_frame_count,
);
all_command_buffers.extend(cmds);
// Create view for full mip chain
let view = texture.create_view(&wgpu::TextureViewDescriptor {
label: Some(&format!("waveform_{}_seg{}_view", pool_index, seg)),
..Default::default()
});
// Create uniform buffer placeholder
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("waveform_{}_seg{}_uniforms", pool_index, seg)),
size: std::mem::size_of::<WaveformParams>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Create render bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(&format!("waveform_{}_seg{}_bg", pool_index, seg)),
layout: &self.render_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: uniform_buffer.as_entire_binding(),
},
],
});
textures.push(texture);
texture_views.push(view);
render_bind_groups.push(bind_group);
uniform_buffers.push(uniform_buffer);
}
self.entries.insert(
pool_index,
WaveformGpuEntry {
textures,
texture_views,
render_bind_groups,
uniform_buffers,
frames_per_segment,
total_frames: total_frames as u64,
tex_height: (alloc_frames as u32 + TEX_WIDTH - 1) / TEX_WIDTH,
sample_rate,
channels,
},
);
all_command_buffers
}
/// Generate mipmaps for a texture using the compute shader.
fn generate_mipmaps(
&self,
device: &wgpu::Device,
texture: &wgpu::Texture,
base_width: u32,
base_height: u32,
mip_count: u32,
base_sample_count: u32,
) -> Vec<wgpu::CommandBuffer> {
if mip_count <= 1 {
return Vec::new();
}
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("waveform_mipgen_encoder"),
});
let mut src_width = base_width;
let mut src_height = base_height;
let mut src_sample_count = base_sample_count;
for level in 1..mip_count {
let dst_width = (src_width / 2).max(1);
let dst_height = (src_height / 2).max(1);
let dst_sample_count = (src_sample_count + 3) / 4; // ceil(src/4)
// Create views for specific mip levels
let src_view = texture.create_view(&wgpu::TextureViewDescriptor {
label: Some(&format!("mipgen_src_level_{}", level - 1)),
base_mip_level: level - 1,
mip_level_count: Some(1),
..Default::default()
});
let dst_view = texture.create_view(&wgpu::TextureViewDescriptor {
label: Some(&format!("mipgen_dst_level_{}", level)),
base_mip_level: level,
mip_level_count: Some(1),
..Default::default()
});
// Create params buffer
let params = MipgenParams {
src_width,
dst_width,
src_sample_count,
_pad: 0,
};
let params_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("mipgen_params_level_{}", level)),
contents: bytemuck::cast_slice(&[params]),
usage: wgpu::BufferUsages::UNIFORM,
});
// Create bind group for this dispatch
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(&format!("mipgen_bg_level_{}", level)),
layout: &self.mipgen_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&src_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&dst_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: params_buffer.as_entire_binding(),
},
],
});
// Dispatch compute
let total_dst_texels = dst_width * dst_height;
let workgroup_count = (total_dst_texels + 63) / 64;
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some(&format!("mipgen_pass_level_{}", level)),
timestamp_writes: None,
});
pass.set_pipeline(&self.mipgen_pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.dispatch_workgroups(workgroup_count, 1, 1);
drop(pass);
src_width = dst_width;
src_height = dst_height;
src_sample_count = dst_sample_count;
}
vec![encoder.finish()]
}
}
impl egui_wgpu::CallbackTrait for WaveformCallback {
fn prepare(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
_screen_descriptor: &egui_wgpu::ScreenDescriptor,
_egui_encoder: &mut wgpu::CommandEncoder,
resources: &mut egui_wgpu::CallbackResources,
) -> Vec<wgpu::CommandBuffer> {
// Initialize resources if needed
if !resources.contains::<WaveformGpuResources>() {
resources.insert(WaveformGpuResources::new(device, self.target_format));
}
let gpu_resources: &mut WaveformGpuResources = resources.get_mut().unwrap();
// Upload audio data if pending
let mut cmds = Vec::new();
if let Some(ref upload) = self.pending_upload {
let new_cmds = gpu_resources.upload_audio(
device,
queue,
self.pool_index,
&upload.samples,
upload.sample_rate,
upload.channels,
);
cmds.extend(new_cmds);
}
// Get or create a per-instance uniform buffer + bind group.
// This ensures multiple clip instances sharing the same pool_index
// don't clobber each other's shader params.
let key = (self.pool_index, self.instance_id);
if let Some(entry) = gpu_resources.entries.get(&self.pool_index) {
if self.segment_index < entry.texture_views.len() {
let (buf, _bg) = gpu_resources.per_instance.entry(key).or_insert_with(|| {
let buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some(&format!("waveform_{}_inst_{}", self.pool_index, self.instance_id)),
size: std::mem::size_of::<WaveformParams>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some(&format!("waveform_{}_inst_{}_bg", self.pool_index, self.instance_id)),
layout: &gpu_resources.render_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&entry.texture_views[self.segment_index]),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&gpu_resources.sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: buf.as_entire_binding(),
},
],
});
(buf, bg)
});
queue.write_buffer(buf, 0, bytemuck::cast_slice(&[self.params]));
}
}
cmds
}
fn paint(
&self,
_info: eframe::egui::PaintCallbackInfo,
render_pass: &mut wgpu::RenderPass<'static>,
resources: &egui_wgpu::CallbackResources,
) {
let gpu_resources: &WaveformGpuResources = match resources.get() {
Some(r) => r,
None => return,
};
let key = (self.pool_index, self.instance_id);
let (_buf, bind_group) = match gpu_resources.per_instance.get(&key) {
Some(entry) => entry,
None => return,
};
render_pass.set_pipeline(&gpu_resources.render_pipeline);
render_pass.set_bind_group(0, bind_group, &[]);
render_pass.draw(0..3, 0..1); // Fullscreen triangle
}
}
/// Compute number of mip levels for given dimensions
fn compute_mip_count(width: u32, height: u32) -> u32 {
let max_dim = width.max(height);
(max_dim as f32).log2().floor() as u32 + 1
}
/// Get the fixed texture width used for all waveform textures
pub fn tex_width() -> u32 {
TEX_WIDTH
}