Add automation inputs for audio graphs
This commit is contained in:
parent
8acac71d86
commit
4f3da810d0
|
|
@ -35,6 +35,7 @@ dasp_peak = "0.11"
|
|||
dasp_rms = "0.11"
|
||||
petgraph = "0.6"
|
||||
serde_json = "1.0"
|
||||
zip = "0.6"
|
||||
|
||||
# BeamDSP scripting engine
|
||||
beamdsp = { path = "../lightningbeam-ui/beamdsp" }
|
||||
|
|
|
|||
|
|
@ -1655,7 +1655,7 @@ impl Engine {
|
|||
// Extract the directory path from the preset path for resolving relative sample paths
|
||||
let preset_base_path = std::path::Path::new(&preset_path).parent();
|
||||
|
||||
match AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path) {
|
||||
match AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path, None) {
|
||||
Ok(graph) => {
|
||||
// Replace the track's graph
|
||||
match self.project.get_track_mut(track_id) {
|
||||
|
|
@ -1705,6 +1705,80 @@ impl Engine {
|
|||
}
|
||||
}
|
||||
|
||||
Command::GraphLoadLbins(track_id, path) => {
|
||||
match crate::audio::node_graph::lbins::load_lbins(&path) {
|
||||
Ok((preset, assets)) => {
|
||||
match AudioGraph::from_preset(&preset, self.sample_rate, 8192, None, Some(&assets)) {
|
||||
Ok(graph) => {
|
||||
match self.project.get_track_mut(track_id) {
|
||||
Some(TrackNode::Midi(track)) => {
|
||||
track.instrument_graph = graph;
|
||||
track.graph_is_default = true;
|
||||
let _ = self.event_tx.push(AudioEvent::GraphStateChanged(track_id));
|
||||
let _ = self.event_tx.push(AudioEvent::GraphPresetLoaded(track_id));
|
||||
}
|
||||
Some(TrackNode::Audio(track)) => {
|
||||
track.effects_graph = graph;
|
||||
track.graph_is_default = true;
|
||||
let _ = self.event_tx.push(AudioEvent::GraphStateChanged(track_id));
|
||||
let _ = self.event_tx.push(AudioEvent::GraphPresetLoaded(track_id));
|
||||
}
|
||||
Some(TrackNode::Group(track)) => {
|
||||
track.audio_graph = graph;
|
||||
track.graph_is_default = true;
|
||||
let _ = self.event_tx.push(AudioEvent::GraphStateChanged(track_id));
|
||||
let _ = self.event_tx.push(AudioEvent::GraphPresetLoaded(track_id));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
|
||||
track_id,
|
||||
format!("Failed to load .lbins graph: {}", e),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
|
||||
track_id,
|
||||
format!("Failed to open .lbins file: {}", e),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Command::GraphSaveLbins(track_id, path, preset_name, description, tags) => {
|
||||
let graph = match self.project.get_track(track_id) {
|
||||
Some(TrackNode::Midi(track)) => Some(&track.instrument_graph),
|
||||
Some(TrackNode::Audio(track)) => Some(&track.effects_graph),
|
||||
Some(TrackNode::Group(track)) => Some(&track.audio_graph),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(graph) = graph {
|
||||
let mut preset = graph.to_preset(&preset_name);
|
||||
preset.metadata.description = description;
|
||||
preset.metadata.tags = tags;
|
||||
preset.metadata.author = String::from("User");
|
||||
|
||||
match crate::audio::node_graph::lbins::save_lbins(&path, &preset, None) {
|
||||
Ok(()) => {
|
||||
let _ = self.event_tx.push(AudioEvent::GraphPresetSaved(
|
||||
track_id,
|
||||
path.to_string_lossy().to_string(),
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = self.event_tx.push(AudioEvent::GraphConnectionError(
|
||||
track_id,
|
||||
format!("Failed to save .lbins: {}", e),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Command::GraphSaveTemplatePreset(track_id, voice_allocator_id, preset_path, preset_name) => {
|
||||
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
|
||||
|
||||
|
|
@ -2456,6 +2530,27 @@ impl Engine {
|
|||
}
|
||||
}
|
||||
|
||||
Query::GetAutomationRange(track_id, node_id) => {
|
||||
use crate::audio::node_graph::nodes::AutomationInputNode;
|
||||
|
||||
if let Some(TrackNode::Midi(track)) = self.project.get_track(track_id) {
|
||||
let graph = &track.instrument_graph;
|
||||
let node_idx = NodeIndex::new(node_id as usize);
|
||||
|
||||
if let Some(graph_node) = graph.get_graph_node(node_idx) {
|
||||
if let Some(auto_node) = graph_node.node.as_any().downcast_ref::<AutomationInputNode>() {
|
||||
QueryResponse::AutomationRange(Ok((auto_node.value_min, auto_node.value_max)))
|
||||
} else {
|
||||
QueryResponse::AutomationRange(Err(format!("Node {} is not an AutomationInputNode", node_id)))
|
||||
}
|
||||
} else {
|
||||
QueryResponse::AutomationRange(Err(format!("Node {} not found", node_id)))
|
||||
}
|
||||
} else {
|
||||
QueryResponse::AutomationRange(Err(format!("Track {} not found or is not a MIDI track", track_id)))
|
||||
}
|
||||
}
|
||||
|
||||
Query::SerializeAudioPool(project_path) => {
|
||||
QueryResponse::AudioPoolSerialized(self.audio_pool.serialize(&project_path))
|
||||
}
|
||||
|
|
@ -2509,12 +2604,12 @@ impl Engine {
|
|||
match track_node {
|
||||
TrackNode::Audio(track) => {
|
||||
// Load into effects graph with proper buffer size (8192 to handle any callback size)
|
||||
track.effects_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path)?;
|
||||
track.effects_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path, None)?;
|
||||
Ok(())
|
||||
}
|
||||
TrackNode::Midi(track) => {
|
||||
// Load into instrument graph with proper buffer size (8192 to handle any callback size)
|
||||
track.instrument_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path)?;
|
||||
track.instrument_graph = AudioGraph::from_preset(&preset, self.sample_rate, 8192, preset_base_path, None)?;
|
||||
Ok(())
|
||||
}
|
||||
TrackNode::Group(_) => {
|
||||
|
|
@ -3396,6 +3491,25 @@ impl EngineController {
|
|||
));
|
||||
}
|
||||
|
||||
/// Add a keyframe to an AutomationInput node
|
||||
pub fn automation_add_keyframe(&mut self, track_id: TrackId, node_id: u32,
|
||||
time: f64, value: f32, interpolation: String,
|
||||
ease_out: (f32, f32), ease_in: (f32, f32)) {
|
||||
let _ = self.command_tx.push(Command::AutomationAddKeyframe(
|
||||
track_id, node_id, time, value, interpolation, ease_out, ease_in));
|
||||
}
|
||||
|
||||
/// Remove a keyframe from an AutomationInput node
|
||||
pub fn automation_remove_keyframe(&mut self, track_id: TrackId, node_id: u32, time: f64) {
|
||||
let _ = self.command_tx.push(Command::AutomationRemoveKeyframe(
|
||||
track_id, node_id, time));
|
||||
}
|
||||
|
||||
/// Set the display name of an AutomationInput node
|
||||
pub fn automation_set_name(&mut self, track_id: TrackId, node_id: u32, name: String) {
|
||||
let _ = self.command_tx.push(Command::AutomationSetName(track_id, node_id, name));
|
||||
}
|
||||
|
||||
/// Start recording on a track
|
||||
pub fn start_recording(&mut self, track_id: TrackId, start_time: f64) {
|
||||
let _ = self.command_tx.push(Command::StartRecording(track_id, start_time));
|
||||
|
|
@ -3542,6 +3656,16 @@ impl EngineController {
|
|||
let _ = self.command_tx.push(Command::GraphLoadPreset(track_id, preset_path));
|
||||
}
|
||||
|
||||
/// Load a `.lbins` instrument bundle into a track's graph
|
||||
pub fn graph_load_lbins(&mut self, track_id: TrackId, path: std::path::PathBuf) {
|
||||
let _ = self.command_tx.push(Command::GraphLoadLbins(track_id, path));
|
||||
}
|
||||
|
||||
/// Save a track's graph as a `.lbins` instrument bundle
|
||||
pub fn graph_save_lbins(&mut self, track_id: TrackId, path: std::path::PathBuf, preset_name: String, description: String, tags: Vec<String>) {
|
||||
let _ = self.command_tx.push(Command::GraphSaveLbins(track_id, path, preset_name, description, tags));
|
||||
}
|
||||
|
||||
/// Save a VoiceAllocator's template graph as a preset
|
||||
pub fn graph_save_template_preset(&mut self, track_id: TrackId, voice_allocator_id: u32, preset_path: String, preset_name: String) {
|
||||
let _ = self.command_tx.push(Command::GraphSaveTemplatePreset(track_id, voice_allocator_id, preset_path, preset_name));
|
||||
|
|
@ -3809,6 +3933,25 @@ impl EngineController {
|
|||
Err("Query timeout".to_string())
|
||||
}
|
||||
|
||||
/// Query automation node value range (min, max)
|
||||
pub fn query_automation_range(&mut self, track_id: TrackId, node_id: u32) -> Result<(f32, f32), String> {
|
||||
if let Err(_) = self.query_tx.push(Query::GetAutomationRange(track_id, node_id)) {
|
||||
return Err("Failed to send query - queue full".to_string());
|
||||
}
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
let timeout = std::time::Duration::from_millis(100);
|
||||
|
||||
while start.elapsed() < timeout {
|
||||
if let Ok(QueryResponse::AutomationRange(result)) = self.query_response_rx.pop() {
|
||||
return result;
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_micros(50));
|
||||
}
|
||||
|
||||
Err("Query timeout".to_string())
|
||||
}
|
||||
|
||||
/// Serialize the audio pool for project saving
|
||||
pub fn serialize_audio_pool(&mut self, project_path: &std::path::Path) -> Result<Vec<crate::audio::pool::AudioPoolEntry>, String> {
|
||||
// Send query
|
||||
|
|
|
|||
|
|
@ -1053,7 +1053,7 @@ impl AudioGraph {
|
|||
}
|
||||
|
||||
/// Deserialize a preset into the graph
|
||||
pub fn from_preset(preset: &crate::audio::node_graph::preset::GraphPreset, sample_rate: u32, buffer_size: usize, preset_base_path: Option<&std::path::Path>) -> Result<Self, String> {
|
||||
pub fn from_preset(preset: &crate::audio::node_graph::preset::GraphPreset, sample_rate: u32, buffer_size: usize, preset_base_path: Option<&std::path::Path>, embedded_assets: Option<&std::collections::HashMap<String, Vec<u8>>>) -> Result<Self, String> {
|
||||
use crate::audio::node_graph::nodes::*;
|
||||
use petgraph::stable_graph::NodeIndex;
|
||||
use std::collections::HashMap;
|
||||
|
|
@ -1124,7 +1124,7 @@ impl AudioGraph {
|
|||
if serialized_node.node_type == "VoiceAllocator" {
|
||||
if let Some(ref template_preset) = serialized_node.template_graph {
|
||||
if let Some(va) = node.as_any_mut().downcast_mut::<VoiceAllocatorNode>() {
|
||||
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path)?;
|
||||
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path, embedded_assets)?;
|
||||
*va.template_graph_mut() = template_graph;
|
||||
va.rebuild_voices();
|
||||
}
|
||||
|
|
@ -1182,10 +1182,28 @@ impl AudioGraph {
|
|||
sampler_node.set_sample(samples, embedded.sample_rate as f32);
|
||||
}
|
||||
} else if let Some(ref path) = file_path {
|
||||
// Fall back to loading from file (resolve path relative to preset)
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
if let Err(e) = sampler_node.load_sample_from_file(&resolved_path) {
|
||||
eprintln!("Failed to load sample from {}: {}", resolved_path, e);
|
||||
// Check embedded assets map first (from .lbins bundle)
|
||||
let loaded = if let Some(assets) = embedded_assets {
|
||||
if let Some(bytes) = assets.get(path.as_str()) {
|
||||
match crate::audio::sample_loader::load_audio_from_bytes(bytes, path) {
|
||||
Ok(data) => {
|
||||
sampler_node.set_sample(data.samples, data.sample_rate as f32);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to decode bundled sample {}: {}", path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
} else { false }
|
||||
} else { false };
|
||||
|
||||
if !loaded {
|
||||
// Fall back to loading from filesystem
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
if let Err(e) = sampler_node.load_sample_from_file(&resolved_path) {
|
||||
eprintln!("Failed to load sample from {}: {}", resolved_path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1225,20 +1243,49 @@ impl AudioGraph {
|
|||
);
|
||||
}
|
||||
} else if let Some(ref path) = layer.file_path {
|
||||
// Fall back to loading from file (resolve path relative to preset)
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
if let Err(e) = multi_sampler_node.load_layer_from_file(
|
||||
&resolved_path,
|
||||
layer.key_min,
|
||||
layer.key_max,
|
||||
layer.root_key,
|
||||
layer.velocity_min,
|
||||
layer.velocity_max,
|
||||
layer.loop_start,
|
||||
layer.loop_end,
|
||||
layer.loop_mode,
|
||||
) {
|
||||
eprintln!("Failed to load sample layer from {}: {}", resolved_path, e);
|
||||
// Check embedded assets map first (from .lbins bundle)
|
||||
let loaded = if let Some(assets) = embedded_assets {
|
||||
if let Some(bytes) = assets.get(path.as_str()) {
|
||||
match crate::audio::sample_loader::load_audio_from_bytes(bytes, path) {
|
||||
Ok(data) => {
|
||||
multi_sampler_node.add_layer(
|
||||
data.samples,
|
||||
data.sample_rate as f32,
|
||||
layer.key_min,
|
||||
layer.key_max,
|
||||
layer.root_key,
|
||||
layer.velocity_min,
|
||||
layer.velocity_max,
|
||||
layer.loop_start,
|
||||
layer.loop_end,
|
||||
layer.loop_mode,
|
||||
);
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to decode bundled sample layer {}: {}", path, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
} else { false }
|
||||
} else { false };
|
||||
|
||||
if !loaded {
|
||||
// Fall back to loading from filesystem
|
||||
let resolved_path = resolve_sample_path(path);
|
||||
if let Err(e) = multi_sampler_node.load_layer_from_file(
|
||||
&resolved_path,
|
||||
layer.key_min,
|
||||
layer.key_max,
|
||||
layer.root_key,
|
||||
layer.velocity_min,
|
||||
layer.velocity_max,
|
||||
layer.loop_start,
|
||||
layer.loop_end,
|
||||
layer.loop_mode,
|
||||
) {
|
||||
eprintln!("Failed to load sample layer from {}: {}", resolved_path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1258,6 +1305,9 @@ impl AudioGraph {
|
|||
let result = if let Some(bundled_name) = model_path.strip_prefix("bundled:") {
|
||||
eprintln!("[AmpSim] Preset: loading bundled model {:?}", bundled_name);
|
||||
amp_sim.load_bundled_model(bundled_name)
|
||||
} else if let Some(bytes) = embedded_assets.and_then(|a| a.get(model_path.as_str())) {
|
||||
eprintln!("[AmpSim] Preset: loading from bundle {:?}", model_path);
|
||||
amp_sim.load_model_from_bytes(model_path, bytes)
|
||||
} else {
|
||||
let resolved_path = resolve_sample_path(model_path);
|
||||
eprintln!("[AmpSim] Preset: loading from file {:?}", resolved_path);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,192 @@
|
|||
/// Load and save `.lbins` instrument bundle files.
|
||||
///
|
||||
/// A `.lbins` file is a ZIP archive with the following layout:
|
||||
///
|
||||
/// ```
|
||||
/// instrument.lbins (ZIP)
|
||||
/// ├── instrument.json ← GraphPreset JSON (existing schema)
|
||||
/// ├── samples/
|
||||
/// │ ├── kick.wav
|
||||
/// │ └── snare.flac
|
||||
/// └── models/
|
||||
/// └── amp.nam
|
||||
/// ```
|
||||
///
|
||||
/// All asset paths in `instrument.json` are ZIP-relative
|
||||
/// (e.g. `"samples/kick.wav"`, `"models/amp.nam"`).
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Read, Write};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::audio::node_graph::preset::{GraphPreset, SampleData};
|
||||
|
||||
/// Load a `.lbins` file.
|
||||
///
|
||||
/// Returns the deserialized `GraphPreset` together with a map of all
|
||||
/// non-JSON entries keyed by their ZIP-relative path (e.g. `"samples/kick.wav"`).
|
||||
pub fn load_lbins(path: &Path) -> Result<(GraphPreset, HashMap<String, Vec<u8>>), String> {
|
||||
let file = std::fs::File::open(path)
|
||||
.map_err(|e| format!("Failed to open .lbins file: {}", e))?;
|
||||
|
||||
let mut archive = zip::ZipArchive::new(file)
|
||||
.map_err(|e| format!("Failed to read ZIP archive: {}", e))?;
|
||||
|
||||
// Read instrument.json first
|
||||
let preset_json = {
|
||||
let mut entry = archive
|
||||
.by_name("instrument.json")
|
||||
.map_err(|_| "Missing instrument.json in .lbins archive".to_string())?;
|
||||
let mut buf = String::new();
|
||||
entry
|
||||
.read_to_string(&mut buf)
|
||||
.map_err(|e| format!("Failed to read instrument.json: {}", e))?;
|
||||
buf
|
||||
};
|
||||
|
||||
let preset = GraphPreset::from_json(&preset_json)
|
||||
.map_err(|e| format!("Failed to parse instrument.json: {}", e))?;
|
||||
|
||||
// Read all other entries into memory
|
||||
let mut assets: HashMap<String, Vec<u8>> = HashMap::new();
|
||||
for i in 0..archive.len() {
|
||||
let mut entry = archive
|
||||
.by_index(i)
|
||||
.map_err(|e| format!("Failed to read ZIP entry {}: {}", i, e))?;
|
||||
|
||||
let entry_name = entry.name().to_string();
|
||||
if entry_name == "instrument.json" || entry.is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
entry
|
||||
.read_to_end(&mut bytes)
|
||||
.map_err(|e| format!("Failed to read {}: {}", entry_name, e))?;
|
||||
|
||||
assets.insert(entry_name, bytes);
|
||||
}
|
||||
|
||||
Ok((preset, assets))
|
||||
}
|
||||
|
||||
/// Save a preset to a `.lbins` file.
|
||||
///
|
||||
/// Asset paths in `preset` are rewritten to ZIP-relative form
|
||||
/// (`samples/<basename>` or `models/<basename>`).
|
||||
/// If the path is already ZIP-relative (starts with `samples/` or `models/`)
|
||||
/// it is used as-is. Absolute / relative filesystem paths are resolved
|
||||
/// relative to `asset_base` (typically the directory that contained the
|
||||
/// original `.json` preset) and then read from disk.
|
||||
pub fn save_lbins(path: &Path, preset: &GraphPreset, asset_base: Option<&Path>) -> Result<(), String> {
|
||||
let file = std::fs::File::create(path)
|
||||
.map_err(|e| format!("Failed to create .lbins file: {}", e))?;
|
||||
|
||||
let mut zip = zip::ZipWriter::new(file);
|
||||
let options = zip::write::FileOptions::default()
|
||||
.compression_method(zip::CompressionMethod::Deflated);
|
||||
|
||||
// We'll build a rewritten copy of the preset while collecting assets
|
||||
let mut rewritten = preset.clone();
|
||||
// Map: original path → (zip_path, file_bytes)
|
||||
let mut asset_map: HashMap<String, (String, Vec<u8>)> = HashMap::new();
|
||||
|
||||
// Helper: given an original asset path string and a subdirectory ("samples" or "models"),
|
||||
// resolve the bytes and return the canonical ZIP-relative path.
|
||||
let mut resolve_asset = |orig_path: &str, subdir: &str| -> Result<String, String> {
|
||||
// Already a ZIP-relative path — no re-reading needed, caller stored bytes already
|
||||
// or the asset will be provided by a prior pass. Just normalise the subdirectory.
|
||||
if orig_path.starts_with(&format!("{}/", subdir)) {
|
||||
return Ok(orig_path.to_string());
|
||||
}
|
||||
|
||||
let basename = Path::new(orig_path)
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.ok_or_else(|| format!("Cannot determine filename for asset: {}", orig_path))?;
|
||||
|
||||
let zip_path = format!("{}/{}", subdir, basename);
|
||||
|
||||
if !asset_map.contains_key(orig_path) {
|
||||
// Resolve to an absolute filesystem path
|
||||
let fs_path = if Path::new(orig_path).is_absolute() {
|
||||
std::path::PathBuf::from(orig_path)
|
||||
} else if let Some(base) = asset_base {
|
||||
base.join(orig_path)
|
||||
} else {
|
||||
std::path::PathBuf::from(orig_path)
|
||||
};
|
||||
|
||||
let bytes = std::fs::read(&fs_path)
|
||||
.map_err(|e| format!("Failed to read asset {}: {}", fs_path.display(), e))?;
|
||||
|
||||
asset_map.insert(orig_path.to_string(), (zip_path.clone(), bytes));
|
||||
}
|
||||
|
||||
Ok(zip_path)
|
||||
};
|
||||
|
||||
// Rewrite paths in all nodes
|
||||
for node in &mut rewritten.nodes {
|
||||
// Sample data paths
|
||||
if let Some(ref mut sample_data) = node.sample_data {
|
||||
match sample_data {
|
||||
SampleData::SimpleSampler { ref mut file_path, .. } => {
|
||||
if let Some(ref orig) = file_path.clone() {
|
||||
if !orig.is_empty() {
|
||||
match resolve_asset(orig, "samples") {
|
||||
Ok(zip_path) => *file_path = Some(zip_path),
|
||||
Err(e) => eprintln!("Warning: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
SampleData::MultiSampler { ref mut layers } => {
|
||||
for layer in layers.iter_mut() {
|
||||
if let Some(ref orig) = layer.file_path.clone() {
|
||||
if !orig.is_empty() {
|
||||
match resolve_asset(orig, "samples") {
|
||||
Ok(zip_path) => layer.file_path = Some(zip_path),
|
||||
Err(e) => eprintln!("Warning: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NAM model path
|
||||
if let Some(ref orig) = node.nam_model_path.clone() {
|
||||
if !orig.starts_with("bundled:") && !orig.is_empty() {
|
||||
match resolve_asset(orig, "models") {
|
||||
Ok(zip_path) => node.nam_model_path = Some(zip_path),
|
||||
Err(e) => eprintln!("Warning: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write all collected assets to the ZIP
|
||||
for (_, (zip_path, bytes)) in &asset_map {
|
||||
zip.start_file(zip_path, options)
|
||||
.map_err(|e| format!("Failed to start ZIP entry {}: {}", zip_path, e))?;
|
||||
zip.write_all(bytes)
|
||||
.map_err(|e| format!("Failed to write {}: {}", zip_path, e))?;
|
||||
}
|
||||
|
||||
// Write instrument.json last (after assets so paths are already rewritten)
|
||||
let json = rewritten
|
||||
.to_json()
|
||||
.map_err(|e| format!("Failed to serialize preset: {}", e))?;
|
||||
|
||||
zip.start_file("instrument.json", options)
|
||||
.map_err(|e| format!("Failed to start instrument.json entry: {}", e))?;
|
||||
zip.write_all(json.as_bytes())
|
||||
.map_err(|e| format!("Failed to write instrument.json: {}", e))?;
|
||||
|
||||
zip.finish()
|
||||
.map_err(|e| format!("Failed to finalize ZIP: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
mod graph;
|
||||
mod node_trait;
|
||||
mod types;
|
||||
pub mod lbins;
|
||||
pub mod nodes;
|
||||
pub mod preset;
|
||||
|
||||
|
|
|
|||
|
|
@ -75,6 +75,21 @@ impl AmpSimNode {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Load a .nam model from in-memory bytes (used when loading from a .lbins bundle).
|
||||
/// `zip_path` is the ZIP-relative path stored back in `model_path` for serialization.
|
||||
pub fn load_model_from_bytes(&mut self, zip_path: &str, bytes: &[u8]) -> Result<(), String> {
|
||||
let basename = std::path::Path::new(zip_path)
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or(zip_path);
|
||||
let mut model = nam_ffi::NamModel::from_bytes(basename, bytes)
|
||||
.map_err(|e| format!("{}", e))?;
|
||||
model.set_max_buffer_size(1024);
|
||||
self.model = Some(model);
|
||||
self.model_path = Some(zip_path.to_string());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the loaded model path (for preset serialization).
|
||||
pub fn model_path(&self) -> Option<&str> {
|
||||
self.model_path.as_deref()
|
||||
|
|
|
|||
|
|
@ -49,6 +49,10 @@ pub struct AutomationInputNode {
|
|||
parameters: Vec<Parameter>,
|
||||
/// Shared playback time (set by the graph before processing)
|
||||
playback_time: Arc<RwLock<f64>>,
|
||||
/// Minimum output value (for UI display range)
|
||||
pub value_min: f32,
|
||||
/// Maximum output value (for UI display range)
|
||||
pub value_max: f32,
|
||||
}
|
||||
|
||||
impl AutomationInputNode {
|
||||
|
|
@ -62,10 +66,12 @@ impl AutomationInputNode {
|
|||
Self {
|
||||
name: name.clone(),
|
||||
display_name: "Automation".to_string(),
|
||||
keyframes: Vec::new(),
|
||||
keyframes: vec![AutomationKeyframe::new(0.0, 0.0)],
|
||||
outputs,
|
||||
parameters: Vec::new(),
|
||||
playback_time: Arc::new(RwLock::new(0.0)),
|
||||
value_min: -1.0,
|
||||
value_max: 1.0,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -275,6 +281,8 @@ impl AudioNode for AutomationInputNode {
|
|||
outputs: self.outputs.clone(),
|
||||
parameters: self.parameters.clone(),
|
||||
playback_time: Arc::new(RwLock::new(0.0)),
|
||||
value_min: self.value_min,
|
||||
value_max: self.value_max,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use symphonia::core::io::MediaSourceStream;
|
|||
use symphonia::core::meta::MetadataOptions;
|
||||
use symphonia::core::probe::Hint;
|
||||
use std::fs::File;
|
||||
use std::io::Cursor;
|
||||
use std::path::Path;
|
||||
|
||||
/// Loaded audio sample data
|
||||
|
|
@ -20,33 +21,36 @@ pub struct SampleData {
|
|||
/// Load an audio file and decode it to mono f32 samples
|
||||
pub fn load_audio_file(path: impl AsRef<Path>) -> Result<SampleData, String> {
|
||||
let path = path.as_ref();
|
||||
|
||||
// Open the file
|
||||
let file = File::open(path)
|
||||
.map_err(|e| format!("Failed to open file: {}", e))?;
|
||||
|
||||
// Create a media source stream
|
||||
let file = File::open(path).map_err(|e| format!("Failed to open file: {}", e))?;
|
||||
let mss = MediaSourceStream::new(Box::new(file), Default::default());
|
||||
|
||||
// Create a hint to help the format registry guess the format
|
||||
let mut hint = Hint::new();
|
||||
if let Some(extension) = path.extension() {
|
||||
if let Some(ext_str) = extension.to_str() {
|
||||
hint.with_extension(ext_str);
|
||||
}
|
||||
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
|
||||
hint.with_extension(ext);
|
||||
}
|
||||
decode_mss(mss, hint)
|
||||
}
|
||||
|
||||
// Probe the media source for a format
|
||||
let format_opts = FormatOptions::default();
|
||||
let metadata_opts = MetadataOptions::default();
|
||||
/// Load audio from an in-memory byte slice and decode it to mono f32 samples.
|
||||
/// Supports WAV, FLAC, MP3, AAC, and any other format Symphonia recognises.
|
||||
/// `filename_hint` is used to help Symphonia detect the format (e.g. "kick.wav").
|
||||
pub fn load_audio_from_bytes(bytes: &[u8], filename_hint: &str) -> Result<SampleData, String> {
|
||||
let cursor = Cursor::new(bytes.to_vec());
|
||||
let mss = MediaSourceStream::new(Box::new(cursor), Default::default());
|
||||
let mut hint = Hint::new();
|
||||
if let Some(ext) = std::path::Path::new(filename_hint).extension().and_then(|e| e.to_str()) {
|
||||
hint.with_extension(ext);
|
||||
}
|
||||
decode_mss(mss, hint)
|
||||
}
|
||||
|
||||
/// Shared decode logic: probe `mss`, find the first audio track, decode to mono f32.
|
||||
fn decode_mss(mss: MediaSourceStream, hint: Hint) -> Result<SampleData, String> {
|
||||
let probed = symphonia::default::get_probe()
|
||||
.format(&hint, mss, &format_opts, &metadata_opts)
|
||||
.format(&hint, mss, &FormatOptions::default(), &MetadataOptions::default())
|
||||
.map_err(|e| format!("Failed to probe format: {}", e))?;
|
||||
|
||||
let mut format = probed.format;
|
||||
|
||||
// Find the first audio track
|
||||
let track = format
|
||||
.tracks()
|
||||
.iter()
|
||||
|
|
@ -56,47 +60,33 @@ pub fn load_audio_file(path: impl AsRef<Path>) -> Result<SampleData, String> {
|
|||
let track_id = track.id;
|
||||
let sample_rate = track.codec_params.sample_rate.unwrap_or(48000);
|
||||
|
||||
// Create a decoder for the track
|
||||
let dec_opts = DecoderOptions::default();
|
||||
let mut decoder = symphonia::default::get_codecs()
|
||||
.make(&track.codec_params, &dec_opts)
|
||||
.make(&track.codec_params, &DecoderOptions::default())
|
||||
.map_err(|e| format!("Failed to create decoder: {}", e))?;
|
||||
|
||||
// Decode all packets
|
||||
let mut all_samples = Vec::new();
|
||||
|
||||
loop {
|
||||
// Get the next packet
|
||||
let packet = match format.next_packet() {
|
||||
Ok(packet) => packet,
|
||||
Err(SymphoniaError::IoError(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
|
||||
// End of stream
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(format!("Error reading packet: {}", e));
|
||||
}
|
||||
Err(e) => return Err(format!("Error reading packet: {}", e)),
|
||||
};
|
||||
|
||||
// Skip packets that don't belong to the selected track
|
||||
if packet.track_id() != track_id {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Decode the packet
|
||||
let decoded = decoder
|
||||
.decode(&packet)
|
||||
.map_err(|e| format!("Failed to decode packet: {}", e))?;
|
||||
|
||||
// Convert to f32 samples and mix to mono
|
||||
let samples = convert_to_mono_f32(&decoded);
|
||||
all_samples.extend_from_slice(&samples);
|
||||
all_samples.extend_from_slice(&convert_to_mono_f32(&decoded));
|
||||
}
|
||||
|
||||
Ok(SampleData {
|
||||
samples: all_samples,
|
||||
sample_rate,
|
||||
})
|
||||
Ok(SampleData { samples: all_samples, sample_rate })
|
||||
}
|
||||
|
||||
/// Convert an audio buffer to mono f32 samples
|
||||
|
|
|
|||
|
|
@ -438,7 +438,7 @@ impl Metatrack {
|
|||
pub fn rebuild_audio_graph(&mut self, sample_rate: u32, buffer_size: usize) -> Result<(), String> {
|
||||
if let Some(preset) = &self.audio_graph_preset {
|
||||
if !preset.nodes.is_empty() && preset.output_node.is_some() {
|
||||
self.audio_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
|
||||
self.audio_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None, None)?;
|
||||
// graph_is_default remains as serialized (false for user-modified graphs)
|
||||
} else {
|
||||
self.audio_graph = Self::create_empty_graph(sample_rate, buffer_size);
|
||||
|
|
@ -703,7 +703,7 @@ impl MidiTrack {
|
|||
/// Rebuild the instrument graph from preset after deserialization
|
||||
pub fn rebuild_audio_graph(&mut self, sample_rate: u32, buffer_size: usize) -> Result<(), String> {
|
||||
if let Some(preset) = &self.instrument_graph_preset {
|
||||
self.instrument_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
|
||||
self.instrument_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None, None)?;
|
||||
} else {
|
||||
// No preset - create default graph
|
||||
self.instrument_graph = AudioGraph::new(sample_rate, buffer_size);
|
||||
|
|
@ -985,7 +985,7 @@ impl AudioTrack {
|
|||
|
||||
if has_nodes && has_output {
|
||||
// Valid preset - rebuild from it
|
||||
self.effects_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
|
||||
self.effects_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None, None)?;
|
||||
} else {
|
||||
// Empty or invalid preset - create default graph
|
||||
self.effects_graph = Self::create_default_graph(sample_rate, buffer_size);
|
||||
|
|
|
|||
|
|
@ -181,6 +181,10 @@ pub enum Command {
|
|||
GraphSavePreset(TrackId, String, String, String, Vec<String>),
|
||||
/// Load a preset into a track's graph (track_id, preset_path)
|
||||
GraphLoadPreset(TrackId, String),
|
||||
/// Load a .lbins instrument bundle into a track's graph (track_id, path)
|
||||
GraphLoadLbins(TrackId, std::path::PathBuf),
|
||||
/// Save a track's graph as a .lbins instrument bundle (track_id, path, preset_name, description, tags)
|
||||
GraphSaveLbins(TrackId, std::path::PathBuf, String, String, Vec<String>),
|
||||
|
||||
// Metatrack subtrack graph commands
|
||||
/// Replace a metatrack's mixing graph with the default SubtrackInputs→Mixer→Output layout.
|
||||
|
|
@ -392,6 +396,8 @@ pub enum Query {
|
|||
GetAutomationKeyframes(TrackId, u32),
|
||||
/// Get the display name of an AutomationInput node (track_id, node_id)
|
||||
GetAutomationName(TrackId, u32),
|
||||
/// Get the value range (min, max) of an AutomationInput node (track_id, node_id)
|
||||
GetAutomationRange(TrackId, u32),
|
||||
/// Serialize audio pool for project saving (project_path)
|
||||
SerializeAudioPool(std::path::PathBuf),
|
||||
/// Load audio pool from serialized entries (entries, project_path)
|
||||
|
|
@ -480,6 +486,8 @@ pub enum QueryResponse {
|
|||
AutomationKeyframes(Result<Vec<AutomationKeyframeData>, String>),
|
||||
/// Automation node name
|
||||
AutomationName(Result<String, String>),
|
||||
/// Automation node value range (min, max)
|
||||
AutomationRange(Result<(f32, f32), String>),
|
||||
/// Serialized audio pool entries
|
||||
AudioPoolSerialized(Result<Vec<crate::audio::pool::AudioPoolEntry>, String>),
|
||||
/// Audio pool loaded (returns list of missing pool indices)
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ use vello::Scene;
|
|||
/// Cache for decoded image data to avoid re-decoding every frame
|
||||
pub struct ImageCache {
|
||||
cache: HashMap<Uuid, Arc<ImageBrush>>,
|
||||
/// CPU path: tiny-skia pixmaps decoded from the same assets (premultiplied RGBA8)
|
||||
cpu_cache: HashMap<Uuid, Arc<tiny_skia::Pixmap>>,
|
||||
}
|
||||
|
||||
impl ImageCache {
|
||||
|
|
@ -31,6 +33,7 @@ impl ImageCache {
|
|||
pub fn new() -> Self {
|
||||
Self {
|
||||
cache: HashMap::new(),
|
||||
cpu_cache: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -47,14 +50,28 @@ impl ImageCache {
|
|||
Some(arc_image)
|
||||
}
|
||||
|
||||
/// Get or decode an image as a premultiplied tiny-skia Pixmap (CPU render path).
|
||||
pub fn get_or_decode_cpu(&mut self, asset: &ImageAsset) -> Option<Arc<tiny_skia::Pixmap>> {
|
||||
if let Some(cached) = self.cpu_cache.get(&asset.id) {
|
||||
return Some(Arc::clone(cached));
|
||||
}
|
||||
|
||||
let pixmap = decode_image_to_pixmap(asset)?;
|
||||
let arc = Arc::new(pixmap);
|
||||
self.cpu_cache.insert(asset.id, Arc::clone(&arc));
|
||||
Some(arc)
|
||||
}
|
||||
|
||||
/// Clear cache entry when an image asset is deleted or modified
|
||||
pub fn invalidate(&mut self, id: &Uuid) {
|
||||
self.cache.remove(id);
|
||||
self.cpu_cache.remove(id);
|
||||
}
|
||||
|
||||
/// Clear all cached images
|
||||
pub fn clear(&mut self) {
|
||||
self.cache.clear();
|
||||
self.cpu_cache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -64,6 +81,25 @@ impl Default for ImageCache {
|
|||
}
|
||||
}
|
||||
|
||||
/// Decode an image asset to a premultiplied tiny-skia Pixmap (CPU render path).
|
||||
fn decode_image_to_pixmap(asset: &ImageAsset) -> Option<tiny_skia::Pixmap> {
|
||||
let data = asset.data.as_ref()?;
|
||||
let img = image::load_from_memory(data).ok()?;
|
||||
let rgba = img.to_rgba8();
|
||||
let mut pixmap = tiny_skia::Pixmap::new(asset.width, asset.height)?;
|
||||
for (dst, src) in pixmap.pixels_mut().iter_mut().zip(rgba.pixels()) {
|
||||
let [r, g, b, a] = src.0;
|
||||
// Convert straight alpha (image crate output) to premultiplied (tiny-skia internal format)
|
||||
let af = a as f32 / 255.0;
|
||||
let pr = (r as f32 * af).round() as u8;
|
||||
let pg = (g as f32 * af).round() as u8;
|
||||
let pb = (b as f32 * af).round() as u8;
|
||||
// from_rgba only fails when channel > alpha; premultiplied values are always ≤ alpha
|
||||
*dst = tiny_skia::PremultipliedColorU8::from_rgba(pr, pg, pb, a).unwrap();
|
||||
}
|
||||
Some(pixmap)
|
||||
}
|
||||
|
||||
/// Decode an image asset to peniko ImageBrush
|
||||
fn decode_image_asset(asset: &ImageAsset) -> Option<ImageBrush> {
|
||||
// Get the raw file data
|
||||
|
|
@ -1368,8 +1404,8 @@ fn render_dcel_cpu(
|
|||
pixmap: &mut tiny_skia::PixmapMut<'_>,
|
||||
transform: tiny_skia::Transform,
|
||||
opacity: f32,
|
||||
_document: &Document,
|
||||
_image_cache: &mut ImageCache,
|
||||
document: &Document,
|
||||
image_cache: &mut ImageCache,
|
||||
) {
|
||||
// 1. Faces (fills)
|
||||
for (i, face) in dcel.faces.iter().enumerate() {
|
||||
|
|
@ -1412,8 +1448,25 @@ fn render_dcel_cpu(
|
|||
}
|
||||
}
|
||||
|
||||
// Image fill — not yet implemented for CPU renderer; fall through to solid or skip
|
||||
// TODO: decode image to Pixmap and use as Pattern shader
|
||||
// Image fill — decode to Pixmap and use as a Pattern shader
|
||||
if let Some(image_asset_id) = face.image_fill {
|
||||
if let Some(asset) = document.get_image_asset(&image_asset_id) {
|
||||
if let Some(img_pixmap) = image_cache.get_or_decode_cpu(asset) {
|
||||
let pattern = tiny_skia::Pattern::new(
|
||||
tiny_skia::Pixmap::as_ref(&img_pixmap),
|
||||
tiny_skia::SpreadMode::Pad,
|
||||
tiny_skia::FilterQuality::Bilinear,
|
||||
opacity,
|
||||
tiny_skia::Transform::identity(),
|
||||
);
|
||||
let mut paint = tiny_skia::Paint::default();
|
||||
paint.shader = pattern;
|
||||
paint.anti_alias = true;
|
||||
pixmap.fill_path(&ts_path, &paint, fill_type, transform, None);
|
||||
filled = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Solid colour fill
|
||||
if !filled {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,323 @@
|
|||
/// Generic curve lane widget — renders a keyframe curve and handles editing interactions.
|
||||
///
|
||||
/// Used for audio automation lanes (AutomationInput nodes) and, in future, for visual
|
||||
/// property animation lanes on vector/raster layers.
|
||||
|
||||
use eframe::egui::{self, Color32, Pos2, Rect, Shape, Stroke, Vec2};
|
||||
|
||||
// ─── Data types ──────────────────────────────────────────────────────────────
|
||||
|
||||
/// A single keyframe. Values are in the caller's raw unit space (not normalised).
|
||||
/// Convert from `AutomationKeyframeData` or `lightningbeam_core::animation::Keyframe`
|
||||
/// before passing in.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CurvePoint {
|
||||
pub time: f64,
|
||||
pub value: f32,
|
||||
pub interpolation: CurveInterpolation,
|
||||
/// Outgoing Bezier tangent (x, y) relative to this keyframe, range 0–1
|
||||
pub ease_out: (f32, f32),
|
||||
/// Incoming Bezier tangent (x, y) relative to next keyframe, range 0–1
|
||||
pub ease_in: (f32, f32),
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum CurveInterpolation {
|
||||
Linear,
|
||||
Bezier,
|
||||
Step,
|
||||
Hold,
|
||||
}
|
||||
|
||||
/// Edit action the user performed during one frame, returned from [`render_curve_lane`].
|
||||
#[derive(Debug)]
|
||||
pub enum CurveEditAction {
|
||||
None,
|
||||
AddKeyframe { time: f64, value: f32 },
|
||||
MoveKeyframe { index: usize, new_time: f64, new_value: f32 },
|
||||
DeleteKeyframe { index: usize },
|
||||
}
|
||||
|
||||
/// Drag state for an in-progress keyframe move.
|
||||
/// Stored by the caller alongside the lane's cached keyframe list.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CurveDragState {
|
||||
pub keyframe_index: usize,
|
||||
pub original_time: f64,
|
||||
pub original_value: f32,
|
||||
pub current_time: f64,
|
||||
pub current_value: f32,
|
||||
}
|
||||
|
||||
// ─── Curve evaluation ────────────────────────────────────────────────────────
|
||||
|
||||
/// Evaluate the curve defined by `keyframes` at the given `time`.
|
||||
///
|
||||
/// Matches the interpolation logic of `AutomationInputNode::evaluate_at_time()`.
|
||||
pub fn evaluate_curve(keyframes: &[CurvePoint], time: f64) -> f32 {
|
||||
if keyframes.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
if keyframes.len() == 1 || time <= keyframes[0].time {
|
||||
return keyframes[0].value;
|
||||
}
|
||||
let last = &keyframes[keyframes.len() - 1];
|
||||
if time >= last.time {
|
||||
return last.value;
|
||||
}
|
||||
|
||||
// Find the pair that brackets `time`
|
||||
let right = keyframes.partition_point(|kf| kf.time <= time);
|
||||
let kf1 = &keyframes[right - 1];
|
||||
let kf2 = &keyframes[right];
|
||||
|
||||
let t = if kf2.time == kf1.time {
|
||||
0.0f32
|
||||
} else {
|
||||
((time - kf1.time) / (kf2.time - kf1.time)) as f32
|
||||
};
|
||||
|
||||
match kf1.interpolation {
|
||||
CurveInterpolation::Linear => kf1.value + (kf2.value - kf1.value) * t,
|
||||
CurveInterpolation::Bezier => {
|
||||
let eased = cubic_bezier_ease(t, kf1.ease_out, kf2.ease_in);
|
||||
kf1.value + (kf2.value - kf1.value) * eased
|
||||
}
|
||||
CurveInterpolation::Step | CurveInterpolation::Hold => kf1.value,
|
||||
}
|
||||
}
|
||||
|
||||
/// Simplified cubic Bezier easing (0,0 → ease_out → ease_in → 1,1).
|
||||
/// Identical to `AutomationInputNode::cubic_bezier_ease`.
|
||||
fn cubic_bezier_ease(t: f32, ease_out: (f32, f32), ease_in: (f32, f32)) -> f32 {
|
||||
let u = 1.0 - t;
|
||||
3.0 * u * u * t * ease_out.1 + 3.0 * u * t * t * ease_in.1 + t * t * t
|
||||
}
|
||||
|
||||
// ─── Rendering ───────────────────────────────────────────────────────────────
|
||||
|
||||
const DIAMOND_RADIUS: f32 = 5.0;
|
||||
|
||||
/// Render a curve lane within `rect` and return any edit action the user performed.
|
||||
///
|
||||
/// `drag_state` is an in/out reference; the caller is responsible for storing it between
|
||||
/// frames alongside the lane's keyframe list.
|
||||
///
|
||||
/// `value_min` and `value_max` define the displayed value range (bottom to top of rect).
|
||||
/// Keyframe values outside this range are clamped visually.
|
||||
///
|
||||
/// `time_to_x` maps a project time (seconds) to an **absolute** screen X coordinate.
|
||||
/// `x_to_time` maps an **absolute** screen X coordinate to project time.
|
||||
pub fn render_curve_lane(
|
||||
ui: &mut egui::Ui,
|
||||
rect: Rect,
|
||||
keyframes: &[CurvePoint],
|
||||
drag_state: &mut Option<CurveDragState>,
|
||||
playback_time: f64,
|
||||
accent_color: Color32,
|
||||
id: egui::Id,
|
||||
value_min: f32,
|
||||
value_max: f32,
|
||||
time_to_x: impl Fn(f64) -> f32,
|
||||
x_to_time: impl Fn(f32) -> f64,
|
||||
) -> CurveEditAction {
|
||||
let painter = ui.painter_at(rect);
|
||||
|
||||
// Helper: raw value → normalised [0,1] for screen-Y mapping
|
||||
let normalize = |v: f32| -> f32 {
|
||||
if (value_max - value_min).abs() < f32::EPSILON {
|
||||
0.5
|
||||
} else {
|
||||
(v - value_min) / (value_max - value_min)
|
||||
}
|
||||
};
|
||||
// Helper: normalised [0,1] → raw value
|
||||
let denormalize = |n: f32| -> f32 {
|
||||
value_min + n * (value_max - value_min)
|
||||
};
|
||||
|
||||
// ── Background ──────────────────────────────────────────────────────────
|
||||
painter.rect_filled(rect, 0.0, Color32::from_rgba_premultiplied(20, 20, 25, 230));
|
||||
|
||||
// Zero-line (value = 0, or mid-line if range doesn't include 0)
|
||||
let zero_norm = normalize(0.0).clamp(0.0, 1.0);
|
||||
let zero_y = value_to_y(zero_norm, rect);
|
||||
painter.line_segment(
|
||||
[Pos2::new(rect.min.x, zero_y), Pos2::new(rect.max.x, zero_y)],
|
||||
Stroke::new(1.0, Color32::from_rgba_premultiplied(80, 80, 80, 120)),
|
||||
);
|
||||
|
||||
// ── Curve polyline ───────────────────────────────────────────────────────
|
||||
// Build a working keyframe list with any in-progress drag preview applied
|
||||
let display_keyframes: Vec<CurvePoint> = if let Some(ref ds) = drag_state {
|
||||
let mut kfs = keyframes.to_vec();
|
||||
if ds.keyframe_index < kfs.len() {
|
||||
kfs[ds.keyframe_index].time = ds.current_time;
|
||||
kfs[ds.keyframe_index].value = ds.current_value;
|
||||
kfs.sort_by(|a, b| a.time.partial_cmp(&b.time).unwrap_or(std::cmp::Ordering::Equal));
|
||||
}
|
||||
kfs
|
||||
} else {
|
||||
keyframes.to_vec()
|
||||
};
|
||||
|
||||
if !display_keyframes.is_empty() {
|
||||
let step = 2.0f32; // sample every 2 screen pixels
|
||||
let num_steps = ((rect.width() / step) as usize).max(1);
|
||||
let mut points: Vec<Pos2> = Vec::with_capacity(num_steps + 1);
|
||||
|
||||
for i in 0..=num_steps {
|
||||
let x = rect.min.x + i as f32 * step;
|
||||
let t = x_to_time(x.min(rect.max.x));
|
||||
let v = evaluate_curve(&display_keyframes, t);
|
||||
let y = value_to_y(normalize(v), rect);
|
||||
points.push(Pos2::new(x.min(rect.max.x), y));
|
||||
}
|
||||
|
||||
let curve_color = accent_color.linear_multiply(0.8);
|
||||
painter.add(Shape::line(points, Stroke::new(1.5, curve_color)));
|
||||
}
|
||||
|
||||
// ── Playhead ─────────────────────────────────────────────────────────────
|
||||
let ph_x = time_to_x(playback_time);
|
||||
if ph_x >= rect.min.x && ph_x <= rect.max.x {
|
||||
painter.line_segment(
|
||||
[Pos2::new(ph_x, rect.min.y), Pos2::new(ph_x, rect.max.y)],
|
||||
Stroke::new(1.0, Color32::from_rgb(255, 80, 80)),
|
||||
);
|
||||
}
|
||||
|
||||
// ── Interaction ──────────────────────────────────────────────────────────
|
||||
let sense = egui::Sense::click_and_drag();
|
||||
let response = ui.interact(rect, id, sense);
|
||||
|
||||
// latest_pos() works whether the pointer button is up or down (unlike interact_pos).
|
||||
let pointer_pos: Option<Pos2> = ui.input(|i| i.pointer.latest_pos());
|
||||
|
||||
// Find which keyframe (if any) the pointer is near
|
||||
let hovered_kf: Option<usize> = pointer_pos.and_then(|pos| {
|
||||
keyframes.iter().enumerate().find(|(_, kf)| {
|
||||
let kx = time_to_x(kf.time);
|
||||
let ky = value_to_y(normalize(kf.value), rect);
|
||||
let d = Vec2::new(pos.x - kx, pos.y - ky).length();
|
||||
d <= DIAMOND_RADIUS * 1.5
|
||||
}).map(|(i, _)| i)
|
||||
});
|
||||
|
||||
// Draw keyframe diamonds (after interaction setup so hover color works)
|
||||
for (idx, kf) in keyframes.iter().enumerate() {
|
||||
let kx = time_to_x(kf.time);
|
||||
if kx < rect.min.x - DIAMOND_RADIUS || kx > rect.max.x + DIAMOND_RADIUS {
|
||||
continue;
|
||||
}
|
||||
let ky = value_to_y(normalize(kf.value), rect);
|
||||
|
||||
// During drag, show this diamond at its preview position
|
||||
let (draw_x, draw_y) = if let Some(ref ds) = drag_state {
|
||||
if ds.keyframe_index == idx {
|
||||
(time_to_x(ds.current_time), value_to_y(normalize(ds.current_value), rect))
|
||||
} else {
|
||||
(kx, ky)
|
||||
}
|
||||
} else {
|
||||
(kx, ky)
|
||||
};
|
||||
|
||||
let is_hovered = hovered_kf == Some(idx);
|
||||
let is_dragging = drag_state.as_ref().map_or(false, |d| d.keyframe_index == idx);
|
||||
|
||||
let fill = if is_dragging {
|
||||
Color32::WHITE
|
||||
} else if is_hovered {
|
||||
accent_color
|
||||
} else {
|
||||
accent_color.linear_multiply(0.7)
|
||||
};
|
||||
|
||||
draw_diamond(&painter, Pos2::new(draw_x, draw_y), DIAMOND_RADIUS, fill);
|
||||
}
|
||||
|
||||
// ── Interaction logic ────────────────────────────────────────────────────
|
||||
|
||||
// Right-click → delete keyframe
|
||||
if response.secondary_clicked() {
|
||||
if let Some(idx) = hovered_kf {
|
||||
return CurveEditAction::DeleteKeyframe { index: idx };
|
||||
}
|
||||
}
|
||||
|
||||
// Left drag start → begin dragging a keyframe
|
||||
if response.drag_started() {
|
||||
if let Some(idx) = hovered_kf {
|
||||
let kf = &keyframes[idx];
|
||||
*drag_state = Some(CurveDragState {
|
||||
keyframe_index: idx,
|
||||
original_time: kf.time,
|
||||
original_value: kf.value,
|
||||
current_time: kf.time,
|
||||
current_value: kf.value,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Drag in progress → update preview position
|
||||
if let Some(ref mut ds) = drag_state {
|
||||
if response.dragged() {
|
||||
if let Some(pos) = pointer_pos {
|
||||
let clamped_x = pos.x.clamp(rect.min.x, rect.max.x);
|
||||
let clamped_y = pos.y.clamp(rect.min.y, rect.max.y);
|
||||
ds.current_time = x_to_time(clamped_x);
|
||||
ds.current_value = denormalize(y_to_value(clamped_y, rect));
|
||||
}
|
||||
}
|
||||
// Drag released → commit
|
||||
if response.drag_stopped() {
|
||||
let ds = drag_state.take().unwrap();
|
||||
return CurveEditAction::MoveKeyframe {
|
||||
index: ds.keyframe_index,
|
||||
new_time: ds.current_time,
|
||||
new_value: ds.current_value,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Left click on empty space → add keyframe
|
||||
// Use interact_pointer_pos() here: it captures the click position even after button release.
|
||||
if response.clicked() && hovered_kf.is_none() && drag_state.is_none() {
|
||||
if let Some(pos) = response.interact_pointer_pos() {
|
||||
let t = x_to_time(pos.x);
|
||||
let v = denormalize(y_to_value(pos.y, rect));
|
||||
return CurveEditAction::AddKeyframe { time: t, value: v };
|
||||
}
|
||||
}
|
||||
|
||||
CurveEditAction::None
|
||||
}
|
||||
|
||||
// ─── Coordinate helpers ───────────────────────────────────────────────────────
|
||||
|
||||
/// Map a normalised value (0=bottom, 1=top) to a Y screen coordinate within `rect`.
|
||||
pub fn value_to_y(value: f32, rect: Rect) -> f32 {
|
||||
rect.max.y - value.clamp(0.0, 1.0) * rect.height()
|
||||
}
|
||||
|
||||
/// Map a screen Y coordinate within `rect` to a normalised value (0=bottom, 1=top).
|
||||
pub fn y_to_value(y: f32, rect: Rect) -> f32 {
|
||||
((rect.max.y - y) / rect.height()).clamp(0.0, 1.0)
|
||||
}
|
||||
|
||||
// ─── Drawing utilities ────────────────────────────────────────────────────────
|
||||
|
||||
fn draw_diamond(painter: &egui::Painter, center: Pos2, radius: f32, fill: Color32) {
|
||||
let points = vec![
|
||||
Pos2::new(center.x, center.y - radius), // top
|
||||
Pos2::new(center.x + radius, center.y), // right
|
||||
Pos2::new(center.x, center.y + radius), // bottom
|
||||
Pos2::new(center.x - radius, center.y), // left
|
||||
];
|
||||
painter.add(Shape::convex_polygon(
|
||||
points,
|
||||
fill,
|
||||
Stroke::new(1.0, Color32::from_rgba_premultiplied(0, 0, 0, 180)),
|
||||
));
|
||||
}
|
||||
|
|
@ -57,6 +57,8 @@ mod test_mode;
|
|||
mod sample_import;
|
||||
mod sample_import_dialog;
|
||||
|
||||
mod curve_editor;
|
||||
|
||||
/// Lightningbeam Editor - Animation and video editing software
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "Lightningbeam Editor")]
|
||||
|
|
@ -840,6 +842,8 @@ struct EditorApp {
|
|||
track_to_layer_map: HashMap<daw_backend::TrackId, Uuid>,
|
||||
/// Generation counter - incremented on project load to force UI components to reload
|
||||
project_generation: u64,
|
||||
/// Incremented whenever node graph topology changes (add/remove node or connection)
|
||||
graph_topology_generation: u64,
|
||||
// Clip instance ID mapping (Document clip instance UUIDs <-> backend clip instance IDs)
|
||||
clip_instance_to_backend_map: HashMap<Uuid, lightningbeam_core::action::BackendClipInstanceId>,
|
||||
// Playback state (global for all panes)
|
||||
|
|
@ -1116,6 +1120,7 @@ impl EditorApp {
|
|||
layer_to_track_map: HashMap::new(),
|
||||
track_to_layer_map: HashMap::new(),
|
||||
project_generation: 0,
|
||||
graph_topology_generation: 0,
|
||||
clip_instance_to_backend_map: HashMap::new(),
|
||||
playback_time: 0.0, // Start at beginning
|
||||
is_playing: false, // Start paused
|
||||
|
|
@ -5823,6 +5828,7 @@ impl eframe::App for EditorApp {
|
|||
track_to_layer_map: &self.track_to_layer_map,
|
||||
waveform_stereo: self.config.waveform_stereo,
|
||||
project_generation: &mut self.project_generation,
|
||||
graph_topology_generation: &mut self.graph_topology_generation,
|
||||
script_to_edit: &mut self.script_to_edit,
|
||||
script_saved: &mut self.script_saved,
|
||||
region_selection: &mut self.region_selection,
|
||||
|
|
|
|||
|
|
@ -268,6 +268,9 @@ pub struct SharedPaneState<'a> {
|
|||
pub waveform_stereo: bool,
|
||||
/// Generation counter - incremented on project load to force reloads
|
||||
pub project_generation: &'a mut u64,
|
||||
/// Incremented whenever node graph topology changes (add/remove node or connection).
|
||||
/// Used by the timeline to know when to refresh automation lane caches.
|
||||
pub graph_topology_generation: &'a mut u64,
|
||||
/// Script ID to open in the script editor (set by node graph "Edit Script" action)
|
||||
pub script_to_edit: &'a mut Option<Uuid>,
|
||||
/// Script ID that was just saved (triggers auto-recompile of nodes using it)
|
||||
|
|
|
|||
|
|
@ -301,6 +301,12 @@ pub struct GraphState {
|
|||
pub available_nam_models: Vec<NamModelInfo>,
|
||||
/// Search text for the NAM model picker popup
|
||||
pub nam_search_text: String,
|
||||
/// Edit buffers for AutomationInput display names, keyed by frontend NodeId
|
||||
pub automation_name_edits: HashMap<NodeId, String>,
|
||||
/// Pending automation name changes (node_id, backend_node_id, new_name)
|
||||
pub pending_automation_name_changes: Vec<(NodeId, u32, String)>,
|
||||
/// AutomationInput nodes whose display name still needs to be queried from backend
|
||||
pub pending_automation_name_queries: Vec<(NodeId, u32)>,
|
||||
}
|
||||
|
||||
impl Default for GraphState {
|
||||
|
|
@ -327,6 +333,9 @@ impl Default for GraphState {
|
|||
pending_amp_sim_load: None,
|
||||
available_nam_models: Vec::new(),
|
||||
nam_search_text: String::new(),
|
||||
automation_name_edits: HashMap::new(),
|
||||
pending_automation_name_changes: Vec::new(),
|
||||
pending_automation_name_queries: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1511,6 +1520,21 @@ impl NodeDataTrait for NodeData {
|
|||
if close_popup {
|
||||
egui::Popup::close_id(ui.ctx(), popup_id);
|
||||
}
|
||||
} else if self.template == NodeTemplate::AutomationInput {
|
||||
let backend_node_id = user_state.node_backend_ids.get(&node_id).copied().unwrap_or(0);
|
||||
let edit_buf = user_state.automation_name_edits
|
||||
.entry(node_id)
|
||||
.or_insert_with(String::new);
|
||||
let resp = ui.add(
|
||||
egui::TextEdit::singleline(edit_buf)
|
||||
.hint_text("Lane name...")
|
||||
.desired_width(f32::INFINITY),
|
||||
);
|
||||
if resp.lost_focus() {
|
||||
user_state.pending_automation_name_changes.push(
|
||||
(node_id, backend_node_id, edit_buf.clone()),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
ui.label("");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -681,6 +681,9 @@ impl NodeGraphPane {
|
|||
if let Err(e) = shared.action_executor.execute_with_backend(action, &mut backend_context) {
|
||||
eprintln!("Failed to execute node graph action: {}", e);
|
||||
} else {
|
||||
// Notify other panes (e.g. timeline automation cache) that graph topology changed
|
||||
*shared.graph_topology_generation += 1;
|
||||
|
||||
// If this was a node addition, query backend to get the new node's ID
|
||||
if let Some((frontend_id, node_type, position)) = self.pending_node_addition.take() {
|
||||
if let Some(track_id) = self.track_id {
|
||||
|
|
@ -1432,6 +1435,7 @@ impl NodeGraphPane {
|
|||
|
||||
// Create nodes in frontend
|
||||
self.pending_script_resolutions.clear();
|
||||
self.user_state.pending_automation_name_queries.clear();
|
||||
for node in &graph_state.nodes {
|
||||
let node_template = match NodeTemplate::from_backend_name(&node.node_type) {
|
||||
Some(t) => t,
|
||||
|
|
@ -1456,6 +1460,13 @@ impl NodeGraphPane {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For AutomationInput nodes: queue a name query to populate the edit buffer
|
||||
if node.node_type == "AutomationInput" {
|
||||
if let Some(fid) = frontend_id {
|
||||
self.user_state.pending_automation_name_queries.push((fid, node.id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create connections in frontend
|
||||
|
|
@ -2780,6 +2791,36 @@ impl crate::panes::PaneRenderer for NodeGraphPane {
|
|||
}
|
||||
}
|
||||
|
||||
// Populate automation name edit buffers (deferred after load)
|
||||
if !self.user_state.pending_automation_name_queries.is_empty() {
|
||||
let queries: Vec<_> = self.user_state.pending_automation_name_queries.drain(..).collect();
|
||||
if let Some(backend_track_id) = self.track_id.and_then(|tid| shared.layer_to_track_map.get(&tid).copied()) {
|
||||
if let Some(controller_arc) = &shared.audio_controller {
|
||||
let mut controller = controller_arc.lock().unwrap();
|
||||
for (node_id, backend_node_id) in queries {
|
||||
if let Ok(name) = controller.query_automation_name(backend_track_id, backend_node_id) {
|
||||
self.user_state.automation_name_edits.insert(node_id, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle pending automation name changes
|
||||
if !self.user_state.pending_automation_name_changes.is_empty() {
|
||||
let changes: Vec<_> = self.user_state.pending_automation_name_changes.drain(..).collect();
|
||||
if let Some(backend_track_id) = self.track_id.and_then(|tid| shared.layer_to_track_map.get(&tid).copied()) {
|
||||
if let Some(controller_arc) = &shared.audio_controller {
|
||||
let mut controller = controller_arc.lock().unwrap();
|
||||
for (_node_id, backend_node_id, name) in changes {
|
||||
controller.automation_set_name(backend_track_id, backend_node_id, name);
|
||||
}
|
||||
// Invalidate timeline automation cache so renamed lanes appear immediately
|
||||
*shared.graph_topology_generation += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle param changes from draw block (canvas knob drag etc.)
|
||||
if !self.user_state.pending_draw_param_changes.is_empty() {
|
||||
let changes: Vec<_> = self.user_state.pending_draw_param_changes.drain(..).collect();
|
||||
|
|
|
|||
|
|
@ -7,10 +7,18 @@ use eframe::egui;
|
|||
use std::path::PathBuf;
|
||||
use super::{NodePath, PaneRenderer, SharedPaneState};
|
||||
|
||||
/// Format of a preset file
|
||||
#[derive(Clone, Copy, PartialEq)]
|
||||
enum PresetFormat {
|
||||
Json,
|
||||
Lbins,
|
||||
}
|
||||
|
||||
/// Metadata extracted from a preset file
|
||||
struct PresetInfo {
|
||||
name: String,
|
||||
path: PathBuf,
|
||||
format: PresetFormat,
|
||||
category: String,
|
||||
description: String,
|
||||
author: String,
|
||||
|
|
@ -120,19 +128,29 @@ impl PresetBrowserPane {
|
|||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
self.scan_directory(&path, base_dir, is_factory);
|
||||
} else if path.extension().is_some_and(|e| e == "json") {
|
||||
if let Some(info) = self.load_preset_info(&path, base_dir, is_factory) {
|
||||
self.presets.push(info);
|
||||
} else {
|
||||
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
|
||||
if ext == "json" || ext == "lbins" {
|
||||
if let Some(info) = self.load_preset_info(&path, base_dir, is_factory) {
|
||||
self.presets.push(info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Load metadata from a preset JSON file
|
||||
/// Load metadata from a preset file (.json or .lbins)
|
||||
fn load_preset_info(&self, path: &std::path::Path, base_dir: &std::path::Path, is_factory: bool) -> Option<PresetInfo> {
|
||||
let contents = std::fs::read_to_string(path).ok()?;
|
||||
let preset: daw_backend::audio::node_graph::GraphPreset =
|
||||
serde_json::from_str(&contents).ok()?;
|
||||
let ext = path.extension().and_then(|e| e.to_str()).unwrap_or("");
|
||||
let (preset, format) = if ext == "lbins" {
|
||||
let (p, _assets) = daw_backend::audio::node_graph::lbins::load_lbins(path).ok()?;
|
||||
(p, PresetFormat::Lbins)
|
||||
} else {
|
||||
let contents = std::fs::read_to_string(path).ok()?;
|
||||
let p: daw_backend::audio::node_graph::GraphPreset =
|
||||
serde_json::from_str(&contents).ok()?;
|
||||
(p, PresetFormat::Json)
|
||||
};
|
||||
|
||||
// Category = first directory component relative to base_dir
|
||||
let relative = path.strip_prefix(base_dir).ok()?;
|
||||
|
|
@ -144,6 +162,7 @@ impl PresetBrowserPane {
|
|||
Some(PresetInfo {
|
||||
name: preset.metadata.name,
|
||||
path: path.to_path_buf(),
|
||||
format,
|
||||
category,
|
||||
description: preset.metadata.description,
|
||||
author: preset.metadata.author,
|
||||
|
|
@ -189,7 +208,14 @@ impl PresetBrowserPane {
|
|||
|
||||
if let Some(audio_controller) = &shared.audio_controller {
|
||||
let mut controller = audio_controller.lock().unwrap();
|
||||
controller.graph_load_preset(track_id, preset.path.to_string_lossy().to_string());
|
||||
match preset.format {
|
||||
PresetFormat::Json => {
|
||||
controller.graph_load_preset(track_id, preset.path.to_string_lossy().to_string());
|
||||
}
|
||||
PresetFormat::Lbins => {
|
||||
controller.graph_load_lbins(track_id, preset.path.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
// Note: project_generation is incremented by the GraphPresetLoaded event handler
|
||||
// in main.rs, which fires after the audio thread has actually processed the load.
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ const MAX_PIXELS_PER_SECOND: f32 = 500.0;
|
|||
const EDGE_DETECTION_PIXELS: f32 = 8.0; // Distance from edge to detect trim handles
|
||||
const LOOP_CORNER_SIZE: f32 = 12.0; // Size of loop corner hotzone at top-right of clip
|
||||
const MIN_CLIP_WIDTH_PX: f32 = 8.0; // Minimum visible width for very short clips (e.g. groups)
|
||||
const AUTOMATION_LANE_HEIGHT: f32 = 40.0;
|
||||
|
||||
/// Compute stacking row assignments for clip instances on a vector layer.
|
||||
/// Only clips that overlap in time are stacked; non-overlapping clips share row 0.
|
||||
|
|
@ -207,6 +208,18 @@ pub struct TimelinePane {
|
|||
|
||||
/// Cached mousedown position in header area (for drag threshold detection)
|
||||
header_mousedown_pos: Option<egui::Pos2>,
|
||||
/// Which audio/MIDI layers have automation lanes expanded
|
||||
automation_expanded: std::collections::HashSet<uuid::Uuid>,
|
||||
/// Cached automation lane info per layer_id
|
||||
automation_cache: std::collections::HashMap<uuid::Uuid, Vec<AutomationLaneInfo>>,
|
||||
/// Drag state per (layer_id, node_id) for in-progress keyframe moves
|
||||
automation_drag: std::collections::HashMap<(uuid::Uuid, u32), Option<crate::curve_editor::CurveDragState>>,
|
||||
/// Pending automation actions to process after render
|
||||
pending_automation_actions: Vec<AutomationLaneAction>,
|
||||
/// Last seen project_generation; used to detect node graph changes and invalidate automation cache
|
||||
automation_cache_generation: u64,
|
||||
/// Last seen graph_topology_generation; used to detect node additions/removals
|
||||
automation_topology_generation: u64,
|
||||
}
|
||||
|
||||
/// Check if a clip type can be dropped on a layer type
|
||||
|
|
@ -325,6 +338,35 @@ fn flatten_layer<'a>(
|
|||
}
|
||||
}
|
||||
|
||||
/// Cached automation lane data for timeline rendering
|
||||
struct AutomationLaneInfo {
|
||||
node_id: u32,
|
||||
name: String,
|
||||
keyframes: Vec<crate::curve_editor::CurvePoint>,
|
||||
value_min: f32,
|
||||
value_max: f32,
|
||||
}
|
||||
|
||||
/// Data collected during render_layers for a single automation lane, used to call
|
||||
/// render_curve_lane *after* handle_input so our widget registers last and wins priority.
|
||||
struct AutomationLaneRender {
|
||||
layer_id: uuid::Uuid,
|
||||
node_id: u32,
|
||||
lane_rect: egui::Rect,
|
||||
keyframes: Vec<crate::curve_editor::CurvePoint>,
|
||||
value_min: f32,
|
||||
value_max: f32,
|
||||
accent_color: egui::Color32,
|
||||
playback_time: f64,
|
||||
}
|
||||
|
||||
/// Pending automation keyframe edit action from curve lane interaction
|
||||
enum AutomationLaneAction {
|
||||
AddKeyframe { layer_id: uuid::Uuid, node_id: u32, time: f64, value: f32 },
|
||||
MoveKeyframe { layer_id: uuid::Uuid, node_id: u32, old_time: f64, new_time: f64, new_value: f32, interpolation: String, ease_out: (f32, f32), ease_in: (f32, f32) },
|
||||
DeleteKeyframe { layer_id: uuid::Uuid, node_id: u32, time: f64 },
|
||||
}
|
||||
|
||||
/// Paint a soft drop shadow around a rect using gradient meshes (bottom + right + corner).
|
||||
/// Three non-overlapping quads so alpha doesn't double up.
|
||||
fn paint_drop_shadow(painter: &egui::Painter, rect: egui::Rect, shadow_size: f32, alpha: u8) {
|
||||
|
|
@ -622,6 +664,12 @@ impl TimelinePane {
|
|||
video_thumbnail_textures: std::collections::HashMap::new(),
|
||||
layer_drag: None,
|
||||
header_mousedown_pos: None,
|
||||
automation_expanded: std::collections::HashSet::new(),
|
||||
automation_cache: std::collections::HashMap::new(),
|
||||
automation_drag: std::collections::HashMap::new(),
|
||||
pending_automation_actions: Vec::new(),
|
||||
automation_cache_generation: u64::MAX,
|
||||
automation_topology_generation: u64::MAX,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -639,6 +687,95 @@ impl TimelinePane {
|
|||
}
|
||||
}
|
||||
|
||||
/// Extra height added to a layer row when automation lanes are expanded
|
||||
fn automation_lanes_height(&self, layer_id: uuid::Uuid) -> f32 {
|
||||
if !self.automation_expanded.contains(&layer_id) {
|
||||
return 0.0;
|
||||
}
|
||||
let n = self.automation_cache.get(&layer_id).map_or(0, |v| v.len());
|
||||
n as f32 * AUTOMATION_LANE_HEIGHT
|
||||
}
|
||||
|
||||
/// Total height of a timeline row (LAYER_HEIGHT + automation lanes if expanded)
|
||||
fn row_height(&self, row: &TimelineRow) -> f32 {
|
||||
LAYER_HEIGHT + self.automation_lanes_height(row.layer_id())
|
||||
}
|
||||
|
||||
/// Cumulative Y offset from top of rows area to the start of row at `idx`
|
||||
fn cumulative_row_y(&self, rows: &[TimelineRow], idx: usize) -> f32 {
|
||||
rows[..idx].iter().map(|r| self.row_height(r)).sum()
|
||||
}
|
||||
|
||||
/// Find which row contains `relative_y` (measured from top of rows area).
|
||||
/// Returns (row_index, y_within_row).
|
||||
fn row_at_y(&self, rows: &[TimelineRow], relative_y: f32) -> Option<(usize, f32)> {
|
||||
let mut y = 0.0f32;
|
||||
for (i, row) in rows.iter().enumerate() {
|
||||
let h = self.row_height(row);
|
||||
if relative_y >= y && relative_y < y + h {
|
||||
return Some((i, relative_y - y));
|
||||
}
|
||||
y += h;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Refresh the automation lane cache for a layer by querying the backend.
|
||||
fn refresh_automation_cache(
|
||||
&mut self,
|
||||
layer_id: uuid::Uuid,
|
||||
controller: &mut daw_backend::EngineController,
|
||||
layer_to_track_map: &std::collections::HashMap<uuid::Uuid, daw_backend::TrackId>,
|
||||
) {
|
||||
let track_id = match layer_to_track_map.get(&layer_id) {
|
||||
Some(t) => *t,
|
||||
None => return,
|
||||
};
|
||||
|
||||
// Query the graph state JSON
|
||||
let json = match controller.query_graph_state(track_id) {
|
||||
Ok(j) => j,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let preset: daw_backend::GraphPreset = match serde_json::from_str(&json) {
|
||||
Ok(p) => p,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let mut lanes = Vec::new();
|
||||
for node in &preset.nodes {
|
||||
if node.node_type != "AutomationInput" {
|
||||
continue;
|
||||
}
|
||||
let name = controller
|
||||
.query_automation_name(track_id, node.id)
|
||||
.unwrap_or_else(|_| "Automation".to_string());
|
||||
let keyframes = controller
|
||||
.query_automation_keyframes(track_id, node.id)
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.map(|k| crate::curve_editor::CurvePoint {
|
||||
time: k.time,
|
||||
value: k.value,
|
||||
interpolation: match k.interpolation.as_str() {
|
||||
"bezier" => crate::curve_editor::CurveInterpolation::Bezier,
|
||||
"step" => crate::curve_editor::CurveInterpolation::Step,
|
||||
"hold" => crate::curve_editor::CurveInterpolation::Hold,
|
||||
_ => crate::curve_editor::CurveInterpolation::Linear,
|
||||
},
|
||||
ease_out: k.ease_out,
|
||||
ease_in: k.ease_in,
|
||||
})
|
||||
.collect();
|
||||
let (value_min, value_max) = controller
|
||||
.query_automation_range(track_id, node.id)
|
||||
.unwrap_or((-1.0, 1.0));
|
||||
lanes.push(AutomationLaneInfo { node_id: node.id, name, keyframes, value_min, value_max });
|
||||
}
|
||||
self.automation_cache.insert(layer_id, lanes);
|
||||
}
|
||||
|
||||
/// Toggle recording on/off
|
||||
/// In Auto mode, records to the active layer (audio or video with camera)
|
||||
fn toggle_recording(&mut self, shared: &mut SharedPaneState) {
|
||||
|
|
@ -937,11 +1074,11 @@ impl TimelinePane {
|
|||
}
|
||||
|
||||
let relative_y = pointer_pos.y - header_rect.min.y + self.viewport_scroll_y;
|
||||
let hovered_layer_index = (relative_y / LAYER_HEIGHT) as usize;
|
||||
|
||||
if hovered_layer_index >= layer_count {
|
||||
return None;
|
||||
}
|
||||
let (hovered_layer_index, _y_within_row) = match self.row_at_y(&rows, relative_y) {
|
||||
Some(v) => v,
|
||||
None => return None,
|
||||
};
|
||||
let _ = layer_count; // suppress unused warning
|
||||
|
||||
let row = &rows[hovered_layer_index];
|
||||
// Collapsed groups have no directly clickable clips
|
||||
|
|
@ -969,7 +1106,7 @@ impl TimelinePane {
|
|||
|
||||
if mouse_x >= start_x && mouse_x <= end_x {
|
||||
// Check vertical bounds for stacked vector layer clips
|
||||
let layer_top = header_rect.min.y + (hovered_layer_index as f32 * LAYER_HEIGHT) - self.viewport_scroll_y;
|
||||
let layer_top = header_rect.min.y + self.cumulative_row_y(&rows, hovered_layer_index) - self.viewport_scroll_y;
|
||||
let (row, total_rows) = stacking[ci_idx];
|
||||
let (cy_min, cy_max) = clip_instance_y_bounds(row, total_rows);
|
||||
let mouse_rel_y = pointer_pos.y - layer_top;
|
||||
|
|
@ -1027,10 +1164,10 @@ impl TimelinePane {
|
|||
}
|
||||
|
||||
let relative_y = pointer_pos.y - header_rect.min.y + self.viewport_scroll_y;
|
||||
let hovered_index = (relative_y / LAYER_HEIGHT) as usize;
|
||||
if hovered_index >= rows.len() {
|
||||
return None;
|
||||
}
|
||||
let (hovered_index, _) = match self.row_at_y(&rows, relative_y) {
|
||||
Some(v) => v,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let TimelineRow::CollapsedGroup { group, .. } = &rows[hovered_index] else {
|
||||
return None;
|
||||
|
|
@ -1491,21 +1628,24 @@ impl TimelinePane {
|
|||
let gap_row_index = self.layer_drag.as_ref().map(|d| d.gap_row_index);
|
||||
|
||||
// Build filtered row list (excluding dragged layers)
|
||||
let rows: Vec<&TimelineRow> = all_rows.iter()
|
||||
let rows: Vec<TimelineRow> = all_rows.iter()
|
||||
.filter(|r| !drag_layer_ids.contains(&r.layer_id()))
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
// Draw layer headers from virtual row list
|
||||
for (filtered_i, row) in rows.iter().enumerate() {
|
||||
// Compute Y with gap offset: rows at or after the gap shift down by drag_count * LAYER_HEIGHT
|
||||
let visual_i = match gap_row_index {
|
||||
Some(gap) if filtered_i >= gap => filtered_i + drag_count,
|
||||
_ => filtered_i,
|
||||
// Compute Y using cumulative heights (supports variable-height rows with automation lanes)
|
||||
let base_y = self.cumulative_row_y(&rows, filtered_i);
|
||||
let gap_shift = match gap_row_index {
|
||||
Some(gap) if filtered_i >= gap => drag_count as f32 * LAYER_HEIGHT,
|
||||
_ => 0.0,
|
||||
};
|
||||
let y = rect.min.y + visual_i as f32 * LAYER_HEIGHT - self.viewport_scroll_y;
|
||||
let y = rect.min.y + base_y + gap_shift - self.viewport_scroll_y;
|
||||
let row_total_height = self.row_height(row);
|
||||
|
||||
// Skip if layer is outside visible area
|
||||
if y + LAYER_HEIGHT < rect.min.y || y > rect.max.y {
|
||||
// Skip if row is outside visible area
|
||||
if y + row_total_height < rect.min.y || y > rect.max.y {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -1959,6 +2099,103 @@ impl TimelinePane {
|
|||
],
|
||||
egui::Stroke::new(1.0, theme.border_color(&["#timeline", ".separator"], ui.ctx(), egui::Color32::from_gray(20))),
|
||||
);
|
||||
|
||||
// Automation expand/collapse button for Audio/MIDI layers
|
||||
let is_audio_or_midi = matches!(
|
||||
row,
|
||||
TimelineRow::Normal(AnyLayer::Audio(_))
|
||||
| TimelineRow::GroupChild { child: AnyLayer::Audio(_), .. }
|
||||
);
|
||||
if is_audio_or_midi {
|
||||
let btn_rect = egui::Rect::from_min_size(
|
||||
egui::pos2(rect.min.x + 4.0, y + LAYER_HEIGHT - 18.0),
|
||||
egui::vec2(16.0, 14.0),
|
||||
);
|
||||
let expanded = self.automation_expanded.contains(&layer_id);
|
||||
let btn_label = if expanded { "▼" } else { "▶" };
|
||||
let btn_response = ui.scope_builder(egui::UiBuilder::new().max_rect(btn_rect), |ui| {
|
||||
ui.allocate_rect(btn_rect, egui::Sense::click())
|
||||
}).inner;
|
||||
ui.painter().text(
|
||||
btn_rect.center(),
|
||||
egui::Align2::CENTER_CENTER,
|
||||
btn_label,
|
||||
egui::FontId::proportional(9.0),
|
||||
secondary_text_color,
|
||||
);
|
||||
if btn_response.clicked() {
|
||||
self.layer_control_clicked = true;
|
||||
if expanded {
|
||||
self.automation_expanded.remove(&layer_id);
|
||||
} else {
|
||||
self.automation_expanded.insert(layer_id);
|
||||
// Trigger cache refresh
|
||||
// We can't call refresh_automation_cache here (needs controller),
|
||||
// so mark it as needing refresh via empty cache entry
|
||||
self.automation_cache.remove(&layer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Draw automation lane sub-headers below this row
|
||||
if self.automation_expanded.contains(&layer_id) {
|
||||
if let Some(lanes) = self.automation_cache.get(&layer_id) {
|
||||
let lane_count = lanes.len();
|
||||
// Collect lane info to avoid borrow conflict
|
||||
let lane_names: Vec<String> = lanes.iter().map(|l| l.name.clone()).collect();
|
||||
for (lane_idx, lane_name) in lane_names.iter().enumerate() {
|
||||
let lane_y = y + LAYER_HEIGHT + lane_idx as f32 * AUTOMATION_LANE_HEIGHT;
|
||||
if lane_y + AUTOMATION_LANE_HEIGHT < rect.min.y || lane_y > rect.max.y {
|
||||
continue;
|
||||
}
|
||||
let lane_rect = egui::Rect::from_min_size(
|
||||
egui::pos2(rect.min.x, lane_y),
|
||||
egui::vec2(LAYER_HEADER_WIDTH, AUTOMATION_LANE_HEIGHT),
|
||||
);
|
||||
ui.painter().rect_filled(
|
||||
lane_rect,
|
||||
0.0,
|
||||
egui::Color32::from_rgb(25, 25, 30),
|
||||
);
|
||||
// Indent line
|
||||
let indent_rect = egui::Rect::from_min_size(
|
||||
lane_rect.min,
|
||||
egui::vec2(20.0, AUTOMATION_LANE_HEIGHT),
|
||||
);
|
||||
ui.painter().rect_filled(
|
||||
indent_rect,
|
||||
0.0,
|
||||
egui::Color32::from_rgb(15, 15, 20),
|
||||
);
|
||||
// Curve icon (small ≈ symbol)
|
||||
ui.painter().text(
|
||||
egui::pos2(lane_rect.min.x + 10.0, lane_y + AUTOMATION_LANE_HEIGHT * 0.5),
|
||||
egui::Align2::CENTER_CENTER,
|
||||
"~",
|
||||
egui::FontId::proportional(11.0),
|
||||
secondary_text_color,
|
||||
);
|
||||
// Lane name
|
||||
let display_name = if lane_name.is_empty() { "Automation" } else { lane_name.as_str() };
|
||||
ui.painter().text(
|
||||
egui::pos2(lane_rect.min.x + 22.0, lane_y + AUTOMATION_LANE_HEIGHT * 0.5 - 6.0),
|
||||
egui::Align2::LEFT_TOP,
|
||||
display_name,
|
||||
egui::FontId::proportional(11.0),
|
||||
text_color,
|
||||
);
|
||||
// Bottom separator
|
||||
ui.painter().line_segment(
|
||||
[
|
||||
egui::pos2(lane_rect.min.x, lane_y + AUTOMATION_LANE_HEIGHT - 1.0),
|
||||
egui::pos2(lane_rect.max.x, lane_y + AUTOMATION_LANE_HEIGHT - 1.0),
|
||||
],
|
||||
egui::Stroke::new(1.0, egui::Color32::from_gray(30)),
|
||||
);
|
||||
let _ = lane_count; // suppress warning
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Draw floating dragged layer headers at mouse position with drop shadow
|
||||
|
|
@ -2066,8 +2303,10 @@ impl TimelinePane {
|
|||
context_layers: &[&lightningbeam_core::layer::AnyLayer],
|
||||
video_manager: &std::sync::Arc<std::sync::Mutex<lightningbeam_core::video::VideoManager>>,
|
||||
audio_cache: &HashMap<uuid::Uuid, Vec<ClipInstance>>,
|
||||
) -> Vec<(egui::Rect, uuid::Uuid, f64, f64)> {
|
||||
let painter = ui.painter();
|
||||
playback_time: f64,
|
||||
) -> (Vec<(egui::Rect, uuid::Uuid, f64, f64)>, Vec<AutomationLaneRender>) {
|
||||
let painter = ui.painter().clone();
|
||||
let mut pending_lane_renders: Vec<AutomationLaneRender> = Vec::new();
|
||||
|
||||
// Collect video clip rects for hover detection (to avoid borrow conflicts)
|
||||
let mut video_clip_hovers: Vec<(egui::Rect, uuid::Uuid, f64, f64)> = Vec::new();
|
||||
|
|
@ -2106,6 +2345,12 @@ impl TimelinePane {
|
|||
let drag_float_top_y: Option<f32> = self.layer_drag.as_ref()
|
||||
.map(|d| d.current_mouse_y - d.grab_offset_y);
|
||||
|
||||
// Pre-collect non-dragged rows for cumulative height calculation
|
||||
let non_dragged_rows: Vec<TimelineRow> = all_rows.iter()
|
||||
.filter(|r| !drag_layer_ids_content.contains(&r.layer_id()))
|
||||
.copied()
|
||||
.collect();
|
||||
|
||||
let row_y_positions: Vec<f32> = {
|
||||
let mut positions = Vec::with_capacity(all_rows.len());
|
||||
let mut filtered_i = 0usize;
|
||||
|
|
@ -2117,12 +2362,16 @@ impl TimelinePane {
|
|||
positions.push(base_y + drag_offset as f32 * LAYER_HEIGHT);
|
||||
drag_offset += 1;
|
||||
} else {
|
||||
// Non-dragged row: discrete position, shifted around gap
|
||||
let visual = match gap_row_index_content {
|
||||
Some(gap) if filtered_i >= gap => filtered_i + drag_count_content,
|
||||
_ => filtered_i,
|
||||
// Non-dragged row: discrete position using cumulative heights
|
||||
let base_y: f32 = non_dragged_rows[..filtered_i]
|
||||
.iter()
|
||||
.map(|r| self.row_height(r))
|
||||
.sum();
|
||||
let gap_shift = match gap_row_index_content {
|
||||
Some(gap) if filtered_i >= gap => drag_count_content as f32 * LAYER_HEIGHT,
|
||||
_ => 0.0,
|
||||
};
|
||||
positions.push(rect.min.y + visual as f32 * LAYER_HEIGHT - self.viewport_scroll_y);
|
||||
positions.push(rect.min.y + base_y + gap_shift - self.viewport_scroll_y);
|
||||
filtered_i += 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -2161,7 +2410,7 @@ impl TimelinePane {
|
|||
|
||||
// Drop shadow for dragged rows
|
||||
if is_being_dragged {
|
||||
paint_drop_shadow(painter, layer_rect, 8.0, 60);
|
||||
paint_drop_shadow(&painter, layer_rect, 8.0, 60);
|
||||
}
|
||||
|
||||
let row_layer_id = row.layer_id();
|
||||
|
|
@ -2938,7 +3187,7 @@ impl TimelinePane {
|
|||
if iter_duration <= 0.0 { continue; }
|
||||
|
||||
Self::render_midi_piano_roll(
|
||||
painter,
|
||||
&painter,
|
||||
clip_rect,
|
||||
rect.min.x,
|
||||
events,
|
||||
|
|
@ -2954,7 +3203,7 @@ impl TimelinePane {
|
|||
}
|
||||
} else {
|
||||
Self::render_midi_piano_roll(
|
||||
painter,
|
||||
&painter,
|
||||
clip_rect,
|
||||
rect.min.x,
|
||||
events,
|
||||
|
|
@ -3332,13 +3581,41 @@ impl TimelinePane {
|
|||
],
|
||||
egui::Stroke::new(1.0, theme.border_color(&["#timeline", ".separator"], ui.ctx(), egui::Color32::from_gray(20))),
|
||||
);
|
||||
|
||||
// Collect automation lane render data — actual render_curve_lane calls happen after
|
||||
// handle_input so our widgets register last and win egui's interaction priority.
|
||||
if self.automation_expanded.contains(&row_layer_id) {
|
||||
if let Some(lanes) = self.automation_cache.get(&row_layer_id) {
|
||||
let (_, tc) = layer_type_info(layer);
|
||||
for (lane_idx, lane) in lanes.iter().enumerate() {
|
||||
let lane_top = y + LAYER_HEIGHT + lane_idx as f32 * AUTOMATION_LANE_HEIGHT;
|
||||
if lane_top + AUTOMATION_LANE_HEIGHT < rect.min.y || lane_top > rect.max.y {
|
||||
continue;
|
||||
}
|
||||
let lane_rect = egui::Rect::from_min_size(
|
||||
egui::pos2(rect.min.x, lane_top),
|
||||
egui::vec2(rect.width(), AUTOMATION_LANE_HEIGHT),
|
||||
);
|
||||
pending_lane_renders.push(AutomationLaneRender {
|
||||
layer_id: row_layer_id,
|
||||
node_id: lane.node_id,
|
||||
lane_rect,
|
||||
keyframes: lane.keyframes.clone(),
|
||||
value_min: lane.value_min,
|
||||
value_max: lane.value_max,
|
||||
accent_color: tc,
|
||||
playback_time,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up stale video thumbnail textures for clips no longer visible
|
||||
self.video_thumbnail_textures.retain(|&(clip_id, _), _| visible_video_clip_ids.contains(&clip_id));
|
||||
|
||||
// Return video clip hover data for processing after input handling
|
||||
video_clip_hovers
|
||||
// Return video clip hover data and pending lane renders for processing after input handling
|
||||
(video_clip_hovers, pending_lane_renders)
|
||||
}
|
||||
|
||||
/// Handle mouse input for scrubbing, panning, zooming, layer selection, and clip instance selection
|
||||
|
|
@ -4621,7 +4898,7 @@ impl PaneRenderer for TimelinePane {
|
|||
|
||||
// Render layer rows with clipping
|
||||
ui.set_clip_rect(content_rect.intersect(original_clip_rect));
|
||||
let video_clip_hovers = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager, &audio_cache);
|
||||
let (video_clip_hovers, pending_lane_renders) = self.render_layers(ui, content_rect, shared.theme, document, shared.active_layer_id, shared.focus, shared.selection, shared.midi_event_cache, shared.raw_audio_cache, shared.waveform_gpu_dirty, shared.target_format, shared.waveform_stereo, &context_layers, shared.video_manager, &audio_cache, *shared.playback_time);
|
||||
|
||||
// Render playhead on top (clip to timeline area)
|
||||
ui.set_clip_rect(timeline_rect.intersect(original_clip_rect));
|
||||
|
|
@ -4630,6 +4907,88 @@ impl PaneRenderer for TimelinePane {
|
|||
// Restore original clip rect
|
||||
ui.set_clip_rect(original_clip_rect);
|
||||
|
||||
// Process pending automation lane edit actions
|
||||
if !self.pending_automation_actions.is_empty() {
|
||||
let actions = std::mem::take(&mut self.pending_automation_actions);
|
||||
if let Some(controller_arc) = shared.audio_controller {
|
||||
let mut controller = controller_arc.lock().unwrap();
|
||||
for action in actions {
|
||||
match action {
|
||||
AutomationLaneAction::AddKeyframe { layer_id, node_id, time, value } => {
|
||||
if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) {
|
||||
controller.automation_add_keyframe(track_id, node_id, time, value, "linear".to_string(), (0.0, 0.0), (0.0, 0.0));
|
||||
// Optimistic cache update
|
||||
if let Some(lanes) = self.automation_cache.get_mut(&layer_id) {
|
||||
if let Some(lane) = lanes.iter_mut().find(|l| l.node_id == node_id) {
|
||||
let new_kf = crate::curve_editor::CurvePoint {
|
||||
time,
|
||||
value,
|
||||
interpolation: crate::curve_editor::CurveInterpolation::Linear,
|
||||
ease_out: (0.0, 0.0),
|
||||
ease_in: (0.0, 0.0),
|
||||
};
|
||||
lane.keyframes.push(new_kf);
|
||||
lane.keyframes.sort_by(|a, b| a.time.partial_cmp(&b.time).unwrap_or(std::cmp::Ordering::Equal));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AutomationLaneAction::MoveKeyframe { layer_id, node_id, old_time, new_time, new_value, interpolation, ease_out, ease_in } => {
|
||||
if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) {
|
||||
controller.automation_remove_keyframe(track_id, node_id, old_time);
|
||||
controller.automation_add_keyframe(track_id, node_id, new_time, new_value, interpolation.clone(), ease_out, ease_in);
|
||||
// Optimistic cache update
|
||||
if let Some(lanes) = self.automation_cache.get_mut(&layer_id) {
|
||||
if let Some(lane) = lanes.iter_mut().find(|l| l.node_id == node_id) {
|
||||
if let Some(kf) = lane.keyframes.iter_mut().find(|k| (k.time - old_time).abs() < 0.001) {
|
||||
kf.time = new_time;
|
||||
kf.value = new_value;
|
||||
}
|
||||
lane.keyframes.sort_by(|a, b| a.time.partial_cmp(&b.time).unwrap_or(std::cmp::Ordering::Equal));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AutomationLaneAction::DeleteKeyframe { layer_id, node_id, time } => {
|
||||
if let Some(&track_id) = shared.layer_to_track_map.get(&layer_id) {
|
||||
controller.automation_remove_keyframe(track_id, node_id, time);
|
||||
// Optimistic cache update
|
||||
if let Some(lanes) = self.automation_cache.get_mut(&layer_id) {
|
||||
if let Some(lane) = lanes.iter_mut().find(|l| l.node_id == node_id) {
|
||||
lane.keyframes.retain(|k| (k.time - time).abs() >= 0.001);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidate automation cache when the project changes (new node added, etc.)
|
||||
if *shared.project_generation != self.automation_cache_generation {
|
||||
self.automation_cache_generation = *shared.project_generation;
|
||||
self.automation_cache.clear();
|
||||
}
|
||||
|
||||
// Refresh automation cache for expanded layers.
|
||||
// Clears all caches when the project is reloaded (project_generation) or when the node
|
||||
// graph topology changes (graph_topology_generation — bumped by the node graph pane on
|
||||
// any successful add/remove/connect action).
|
||||
let topology_changed = *shared.graph_topology_generation != self.automation_topology_generation;
|
||||
if topology_changed {
|
||||
self.automation_topology_generation = *shared.graph_topology_generation;
|
||||
self.automation_cache.clear();
|
||||
}
|
||||
for layer_id in self.automation_expanded.iter().copied().collect::<Vec<_>>() {
|
||||
if !self.automation_cache.contains_key(&layer_id) {
|
||||
if let Some(controller_arc) = shared.audio_controller {
|
||||
let mut controller = controller_arc.lock().unwrap();
|
||||
self.refresh_automation_cache(layer_id, &mut controller, shared.layer_to_track_map);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle input (use full rect including header column)
|
||||
self.handle_input(
|
||||
ui,
|
||||
|
|
@ -4651,6 +5010,69 @@ impl PaneRenderer for TimelinePane {
|
|||
&audio_cache,
|
||||
);
|
||||
|
||||
// Render automation lanes AFTER handle_input so our ui.interact registers last and wins
|
||||
// egui's interaction priority over handle_input's full-content-area allocation.
|
||||
ui.set_clip_rect(content_rect.intersect(original_clip_rect));
|
||||
for lane in &pending_lane_renders {
|
||||
let drag_key = (lane.layer_id, lane.node_id);
|
||||
let mut drag_state_local = self.automation_drag
|
||||
.get(&drag_key)
|
||||
.and_then(|v| v.clone());
|
||||
let lane_id = egui::Id::new("automation_lane")
|
||||
.with(lane.layer_id)
|
||||
.with(lane.node_id);
|
||||
let lane_min_x = lane.lane_rect.min.x;
|
||||
let action = crate::curve_editor::render_curve_lane(
|
||||
ui,
|
||||
lane.lane_rect,
|
||||
&lane.keyframes,
|
||||
&mut drag_state_local,
|
||||
lane.playback_time,
|
||||
lane.accent_color,
|
||||
lane_id,
|
||||
lane.value_min,
|
||||
lane.value_max,
|
||||
|t| lane_min_x + self.time_to_x(t),
|
||||
|x| self.x_to_time(x - lane_min_x),
|
||||
);
|
||||
self.automation_drag.insert(drag_key, drag_state_local);
|
||||
let layer_id = lane.layer_id;
|
||||
let node_id = lane.node_id;
|
||||
let keyframes = &lane.keyframes;
|
||||
match action {
|
||||
crate::curve_editor::CurveEditAction::AddKeyframe { time, value } => {
|
||||
self.pending_automation_actions.push(AutomationLaneAction::AddKeyframe {
|
||||
layer_id, node_id, time, value,
|
||||
});
|
||||
}
|
||||
crate::curve_editor::CurveEditAction::MoveKeyframe { index, new_time, new_value } => {
|
||||
if let Some(kf) = keyframes.get(index) {
|
||||
self.pending_automation_actions.push(AutomationLaneAction::MoveKeyframe {
|
||||
layer_id, node_id,
|
||||
old_time: kf.time, new_time, new_value,
|
||||
interpolation: match kf.interpolation {
|
||||
crate::curve_editor::CurveInterpolation::Bezier => "bezier".to_string(),
|
||||
crate::curve_editor::CurveInterpolation::Step => "step".to_string(),
|
||||
crate::curve_editor::CurveInterpolation::Hold => "hold".to_string(),
|
||||
_ => "linear".to_string(),
|
||||
},
|
||||
ease_out: kf.ease_out,
|
||||
ease_in: kf.ease_in,
|
||||
});
|
||||
}
|
||||
}
|
||||
crate::curve_editor::CurveEditAction::DeleteKeyframe { index } => {
|
||||
if let Some(kf) = keyframes.get(index) {
|
||||
self.pending_automation_actions.push(AutomationLaneAction::DeleteKeyframe {
|
||||
layer_id, node_id, time: kf.time,
|
||||
});
|
||||
}
|
||||
}
|
||||
crate::curve_editor::CurveEditAction::None => {}
|
||||
}
|
||||
}
|
||||
ui.set_clip_rect(original_clip_rect);
|
||||
|
||||
// Context menu: detect right-click on clips or empty timeline space
|
||||
let mut just_opened_menu = false;
|
||||
let secondary_clicked = ui.input(|i| i.pointer.button_clicked(egui::PointerButton::Secondary));
|
||||
|
|
@ -4933,10 +5355,9 @@ impl PaneRenderer for TimelinePane {
|
|||
if content_rect.contains(pointer_pos) {
|
||||
// Calculate which layer the pointer is over
|
||||
let relative_y = pointer_pos.y - content_rect.min.y + self.viewport_scroll_y;
|
||||
let hovered_layer_index = (relative_y / LAYER_HEIGHT) as usize;
|
||||
|
||||
// Get the layer at this index (using virtual rows for group support)
|
||||
let drop_rows = build_timeline_rows(&context_layers);
|
||||
let hovered_layer_index = self.row_at_y(&drop_rows, relative_y).map(|(i, _)| i).unwrap_or(usize::MAX);
|
||||
|
||||
let drop_layer = drop_rows.get(hovered_layer_index).and_then(|r| r.as_any_layer());
|
||||
if let Some(layer) = drop_layer {
|
||||
|
|
|
|||
Loading…
Reference in New Issue