Compare commits

..

275 Commits

Author SHA1 Message Date
Skyler Lehmkuhl 8acac71d86 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-03-13 18:53:37 -04:00
Skyler Lehmkuhl c9a9c2c5f0 rewrite vector backend again 2026-03-13 18:53:33 -04:00
Skyler Lehmkuhl d7a29ee1dc Double CPU performance by using tiny-skia instead of vello CPU 2026-03-13 18:52:37 -04:00
Skyler Lehmkuhl be8514e2e6 Fix midi tracks recording previews 2026-03-11 12:53:26 -04:00
Skyler Lehmkuhl 3bc980d08d Use audio engine as source of truth for audio tracks 2026-03-11 12:37:31 -04:00
Skyler Lehmkuhl b8f847e167 Add drawing tablet input support 2026-03-11 10:58:30 -04:00
Skyler Lehmkuhl f72c2c5dbd Release 1.0.3-alpha 2026-03-10 21:43:26 -04:00
Skyler Lehmkuhl e388902743 Bump version to 1.0.3-alpha 2026-03-10 21:42:12 -04:00
Skyler Lehmkuhl ce7ed2586f Support Vello CPU fallback on systems with older GPUs 2026-03-10 21:39:01 -04:00
Skyler Lehmkuhl 7a3f522735 Give metatracks explicit node graphs 2026-03-10 20:20:46 -04:00
Skyler Lehmkuhl f9b62bb090 Add frames timeline mode 2026-03-10 15:54:54 -04:00
Skyler Lehmkuhl 4118c75b86 Performance tweaks 2026-03-10 03:24:03 -04:00
Skyler Lehmkuhl ac2b4ff8ab Improve idle performance 2026-03-10 02:41:44 -04:00
Skyler Lehmkuhl 26f06da5bf Add gradient support to vector graphics 2026-03-10 00:57:47 -04:00
Skyler Lehmkuhl 8bd65e5904 Address Mac build failures 2026-03-09 22:03:51 -04:00
Skyler Lehmkuhl 0ae97f9562 Address Mac build failures 2026-03-09 14:36:54 -04:00
Skyler Lehmkuhl 0066dffc81 Address Mac and Windows build failures 2026-03-09 14:15:08 -04:00
Skyler Lehmkuhl 06973d185c Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-03-09 13:41:48 -04:00
Skyler Lehmkuhl dc93f78dc7 More work on DCEL correctness 2026-03-09 13:41:45 -04:00
Skyler Lehmkuhl 89721d4c0e Release 1.0.2-alpha 2026-03-09 13:40:58 -04:00
Skyler Lehmkuhl 78e296ffde Improve export performance 2026-03-09 13:39:56 -04:00
Skyler Lehmkuhl a18a335c60 Export images 2026-03-09 11:22:51 -04:00
Skyler Lehmkuhl 09856ab52c Refactor tools and fix bugs 2026-03-08 18:44:32 -04:00
Skyler Lehmkuhl 0d2609c064 work on raster tools 2026-03-07 16:55:38 -05:00
Skyler Lehmkuhl a628d8af37 Shape tools 2026-03-07 07:27:45 -05:00
Skyler Lehmkuhl 354b96f142 Quick select tool 2026-03-07 05:30:51 -05:00
Skyler Lehmkuhl 1900792fa9 Magic wand selection 2026-03-07 03:39:09 -05:00
Skyler Lehmkuhl 1e80b1bc77 Paint bucket 2026-03-07 02:53:47 -05:00
Skyler Lehmkuhl 4386917fc2 Implement transform tool for raster 2026-03-07 02:12:55 -05:00
Skyler Lehmkuhl e7641edd0d Add blur/sharpen tool 2026-03-06 11:47:10 -05:00
Skyler Lehmkuhl 922e8f78b6 Refactor tools 2026-03-06 10:07:24 -05:00
Skyler Lehmkuhl 901aa04246 Add sponge tool 2026-03-06 09:17:35 -05:00
Skyler Lehmkuhl 6590b249d1 Add dodge/burn tool 2026-03-06 09:05:14 -05:00
Skyler Lehmkuhl 7d55443b2a Add pattern stamp tool 2026-03-06 08:40:17 -05:00
Skyler Lehmkuhl 1d9d702a59 Add healing brush 2026-03-06 08:25:12 -05:00
Skyler Lehmkuhl de24622f02 Add clone stamp tool 2026-03-06 08:05:45 -05:00
Skyler Lehmkuhl 2c9d8c1589 Add tool skeletons 2026-03-06 07:22:50 -05:00
Skyler Lehmkuhl 37ac9b6abe Fix NAM runtime errors on linux 2026-03-06 06:37:25 -05:00
Skyler Lehmkuhl d1f47d7164 Merge branch 'rust-ui' of /home/skyler/Dev/Lightningbeam-2/. into rust-ui 2026-03-06 06:19:48 -05:00
Skyler Lehmkuhl 5ae1119de7 Brush preview cursor 2026-03-06 06:18:22 -05:00
Skyler Lehmkuhl bff3d660d6 Improve smudge tool 2026-03-06 06:03:33 -05:00
Skyler Lehmkuhl bc7d997cff More region select fixes 2026-03-06 05:52:20 -05:00
Skyler Lehmkuhl 553cc383d5 separate brush and eraser in infopanel 2026-03-06 05:24:26 -05:00
Skyler Lehmkuhl f2c15d7f0d brush fixes and improvements 2026-03-05 20:24:38 -05:00
Skyler Lehmkuhl f97e61751f Fix DCEL selection bugs 2026-03-05 19:55:39 -05:00
Skyler Lehmkuhl 292328bf87 add brush library 2026-03-04 16:42:43 -05:00
Skyler Lehmkuhl 63a8080e60 improve painting performance 2026-03-04 15:20:39 -05:00
Skyler Lehmkuhl 4e79abdc35 bundle amp models more sensibly 2026-03-04 15:20:20 -05:00
Skyler Lehmkuhl e500914fa0 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-03-04 14:50:53 -05:00
Skyler Lehmkuhl a36fae7f8a Fix menu width and mouse following 2026-03-04 14:50:44 -05:00
Skyler Lehmkuhl e578aadd61 Merge branch 'rust-ui' of /home/skyler/Dev/Lightningbeam-2/. into rust-ui 2026-03-04 14:43:18 -05:00
Skyler Lehmkuhl e4b9d86688 redraw painting icon 2026-03-04 14:43:10 -05:00
Skyler Lehmkuhl 16b0d822e3 painting fixes 2026-03-04 14:40:08 -05:00
Skyler Lehmkuhl 759e41d84a fix color space for raster editing 2026-03-04 11:25:15 -05:00
Skyler Lehmkuhl 5c555bf7e1 add electric guitar preset 2026-03-02 11:58:29 -05:00
Skyler Lehmkuhl b4c7a45990 fix NAM model loading 2026-03-02 11:58:13 -05:00
Skyler Lehmkuhl 885c52c02a organize raster buffers better 2026-03-02 11:24:12 -05:00
Skyler Lehmkuhl 6b3a286caf css fixes 2026-03-02 10:51:58 -05:00
Skyler Lehmkuhl ec46e22782 update css handling 2026-03-02 10:32:19 -05:00
Skyler Lehmkuhl 73ef9e3b9c fix double paste and make selections always floating 2026-03-02 09:19:55 -05:00
Skyler Lehmkuhl 6f1a706dd2 fix interaction with a fresh raster layer 2026-03-02 08:07:45 -05:00
Skyler Lehmkuhl 19617e4223 fix pasting image data from external programs 2026-03-02 07:59:29 -05:00
Skyler Lehmkuhl c1266c0377 remove legacy path that was still dumping into text clipboard 2026-03-02 07:54:14 -05:00
Skyler Lehmkuhl 75e35b0ac6 Don't dump json into text clipboard 2026-03-02 07:30:09 -05:00
Skyler Lehmkuhl a45d674ed7 Merge branch 'rust-ui' of /home/skyler/Dev/Lightningbeam-2/. into rust-ui 2026-03-02 00:01:35 -05:00
Skyler Lehmkuhl 87815fe379 Cut/copy/paste raster data 2026-03-02 00:01:18 -05:00
Skyler Lehmkuhl 6162adfa9f Add transparent bg and make raster and vector tools use same colors 2026-03-01 23:38:20 -05:00
Skyler Lehmkuhl 1c3f794958 Merge remote and fix color space 2026-03-01 15:50:53 -05:00
Skyler Lehmkuhl da02edb9f5 Move raster editing to GPU 2026-03-01 15:41:28 -05:00
Skyler Lehmkuhl 49b822da8c Add final mix VU meters 2026-03-01 15:04:58 -05:00
Skyler Lehmkuhl a6e04ae89b Add VU meters 2026-03-01 14:49:49 -05:00
Skyler Lehmkuhl e85efe7405 Fix smudge tool 2026-03-01 14:00:39 -05:00
Skyler Lehmkuhl 8e9d90ed92 Fix recording to layers inside groups 2026-03-01 13:51:42 -05:00
Skyler Lehmkuhl 83736ec9e3 Record to multiple layers 2026-03-01 13:48:43 -05:00
Skyler Lehmkuhl 1c7256a12e Add raster layers 2026-03-01 13:16:49 -05:00
Skyler Lehmkuhl 8d8f94a547 Make layer dragging graphics nicer 2026-03-01 12:09:41 -05:00
Skyler Lehmkuhl 516960062a Drag layers to reorder 2026-03-01 11:54:41 -05:00
Skyler Lehmkuhl 4b638b882f Make tools dependent on layer type 2026-03-01 11:22:03 -05:00
Skyler Lehmkuhl c60eef0c5a Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-03-01 11:04:29 -05:00
Skyler Lehmkuhl b7d72d2803 fix group ordering 2026-03-01 11:04:22 -05:00
Skyler Lehmkuhl 38831948ac group layers manually 2026-03-01 11:01:51 -05:00
Skyler Lehmkuhl 4ab6fe0504 give secondary music panes a shared pane selection bar 2026-03-01 10:47:46 -05:00
Skyler Lehmkuhl ae7ea1bb46 note nam build requirements 2026-03-01 10:47:13 -05:00
Skyler Lehmkuhl 13840ee45f add top-level selection 2026-03-01 10:22:46 -05:00
Skyler Lehmkuhl 78577babb1 draw thumbnails on group clips too 2026-03-01 09:44:06 -05:00
Skyler Lehmkuhl b3e1da3152 draw thumbnails on video clips 2026-03-01 09:35:31 -05:00
Skyler Lehmkuhl 6bd400d353 fix lag spike when first displaying audio clip 2026-03-01 09:22:18 -05:00
Skyler Lehmkuhl b87e4325c2 use group layers instead of linked tracks 2026-03-01 09:00:55 -05:00
Skyler Lehmkuhl 520776c6e5 background color 2026-03-01 06:37:10 -05:00
Skyler Lehmkuhl f8df4d1232 Add webcam support in video editor 2026-03-01 06:25:43 -05:00
Skyler Lehmkuhl 52b12204d4 make release script 2026-03-01 04:33:29 -05:00
Skyler Lehmkuhl 5d39627d03 Update version to 1.0.1 2026-03-01 03:17:41 -05:00
Skyler Lehmkuhl 0026ad3e02 fix dcel 2026-03-01 03:03:57 -05:00
Skyler Lehmkuhl 9edfc2086a work on dcel 2026-03-01 00:35:02 -05:00
Skyler Lehmkuhl 14a2b0a4c2 paint bucket back to functionality 2026-02-28 13:06:41 -05:00
Skyler Lehmkuhl 1462df308f Add organ preset 2026-02-26 19:30:56 -05:00
Skyler Lehmkuhl 5a19e91788 add vibrato node 2026-02-26 19:14:34 -05:00
Skyler Lehmkuhl dc27cf253d rewrite dcel 2026-02-26 18:48:21 -05:00
Skyler Lehmkuhl 1621602f41 center stage 2026-02-25 07:57:50 -05:00
Skyler Lehmkuhl 7c37e69687 adjust default layouts 2026-02-25 07:55:26 -05:00
Skyler Lehmkuhl 63c1ba8854 use start screen for new file 2026-02-25 07:42:52 -05:00
Skyler Lehmkuhl 1cc7029321 make keyboard shortcuts configurable 2026-02-25 07:36:53 -05:00
Skyler Lehmkuhl 353aec3513 Fix panic with vertex deduplication 2026-02-25 07:02:09 -05:00
Skyler Lehmkuhl 4c34c8a17d Add snapping for vector editing 2026-02-25 03:29:42 -05:00
Skyler Lehmkuhl 2b63fdd2c5 fix build assets 2026-02-25 03:29:11 -05:00
Skyler Lehmkuhl 543d99e5d5 fix rpm build 2026-02-25 01:14:33 -05:00
Skyler Lehmkuhl 4195005455 License under GPLv3 2026-02-25 00:52:06 -05:00
Skyler Lehmkuhl 0b4aee51d7 update build yaml 2026-02-24 14:44:17 -05:00
Skyler Lehmkuhl 4a13ce0684 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-24 14:40:19 -05:00
Skyler Lehmkuhl 8301fbfa16 windows build fixes 2026-02-24 14:40:16 -05:00
Skyler Lehmkuhl 9d521732cc update build yaml 2026-02-24 14:13:51 -05:00
Skyler Lehmkuhl 299891dab9 update build yaml 2026-02-24 13:58:34 -05:00
Skyler Lehmkuhl 196d27bf15 update build yaml 2026-02-24 13:43:52 -05:00
Skyler Lehmkuhl 0231658b88 update build yaml 2026-02-24 13:26:44 -05:00
Skyler Lehmkuhl 4ba8337607 update build yaml 2026-02-24 13:09:02 -05:00
Skyler Lehmkuhl 0a4fb0ba77 update build yaml 2026-02-24 12:53:33 -05:00
Skyler Lehmkuhl 05d79029e3 update build yaml 2026-02-24 12:39:23 -05:00
Skyler Lehmkuhl 30eff3b6b6 update build yaml 2026-02-24 12:15:45 -05:00
Skyler Lehmkuhl bc7f1170e1 fix version 2026-02-24 12:09:50 -05:00
Skyler Lehmkuhl 7ff5ddf6ee Update packaging 2026-02-24 12:05:59 -05:00
Skyler Lehmkuhl 05966ed271 rest of DCEL migration 2026-02-24 11:41:10 -05:00
Skyler Lehmkuhl 2739391257 the pain of geometry programming 2026-02-24 11:12:17 -05:00
Skyler Lehmkuhl 1cb09c7211 Debug DCEL issues 2026-02-24 08:26:17 -05:00
Skyler Lehmkuhl 72977ccaf4 Fix stroke self-intersections 2026-02-24 03:26:12 -05:00
Skyler Lehmkuhl bcf6277329 Rebuild DCEL after vector edits 2026-02-24 02:04:07 -05:00
Skyler Lehmkuhl 99f8dcfcf4 Change vector drawing primitive from shape to doubly-connected edge graph 2026-02-23 21:29:58 -05:00
Skyler Lehmkuhl eab116c930 Add beat mode 2026-02-22 18:43:17 -05:00
Skyler Lehmkuhl 205dc9dd67 commit clipboard 2026-02-22 18:25:11 -05:00
Skyler Lehmkuhl 5212993990 commit the NAM FFI bindings 2026-02-22 17:40:19 -05:00
Skyler Lehmkuhl 4122fda954 piano roll and clip resizing fixes 2026-02-21 18:45:46 -05:00
Skyler Lehmkuhl 16011e5f28 Fix preset loading not updating node graph editor 2026-02-21 10:58:10 -05:00
Skyler Lehmkuhl 725faa4445 Load sample .nam amps 2026-02-21 10:25:55 -05:00
Skyler Lehmkuhl 7e3f18c95b Add amp sim 2026-02-21 09:43:03 -05:00
Skyler Lehmkuhl 3eba231447 deduplicate node list 2026-02-21 09:42:05 -05:00
Skyler Lehmkuhl 728b88365d Add drums and guitar 2026-02-21 08:31:35 -05:00
Skyler Lehmkuhl 84f1f8e7d7 Add orchestral sampled instruments 2026-02-21 07:28:19 -05:00
Skyler Lehmkuhl 2222e68a3e Work on region select 2026-02-21 06:04:54 -05:00
Skyler Lehmkuhl 469849a0d6 Add nested audio tracks 2026-02-21 03:56:07 -05:00
Skyler Lehmkuhl 70855963cb Stack clips only on overlap 2026-02-21 01:22:48 -05:00
Skyler Lehmkuhl 1892f970c4 Initial work on movie clips 2026-02-21 00:54:38 -05:00
Skyler Lehmkuhl 3ba6dcb3d2 Group shapes 2026-02-20 11:13:56 -05:00
Skyler Lehmkuhl 7e2f63b62d highlight node connections directly 2026-02-20 11:13:34 -05:00
Skyler Lehmkuhl 3ca03069ec try and improve graphics compatibility on older systems 2026-02-20 04:32:53 -05:00
Skyler Lehmkuhl ce40147efa Fix looping bugs 2026-02-20 04:27:20 -05:00
Skyler Lehmkuhl 042dd50db3 Add clip looping 2026-02-20 02:48:44 -05:00
Skyler Lehmkuhl 66c848e218 Multi sample bulk import 2026-02-20 01:59:37 -05:00
Skyler Lehmkuhl 35089f3b2e Add state-variable filter 2026-02-20 00:20:59 -05:00
Skyler Lehmkuhl 116db01805 Build Linux packages 2026-02-19 15:50:23 -05:00
Skyler Lehmkuhl c344e11e42 Code cleanup 2026-02-19 11:19:44 -05:00
Skyler Lehmkuhl a98b59a6d3 Clean up build warnings 2026-02-19 10:21:48 -05:00
Skyler Lehmkuhl 92dffbaa4e Add UI to script node 2026-02-19 10:16:05 -05:00
Skyler Lehmkuhl 2804c2bd5d Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-19 09:29:19 -05:00
Skyler Lehmkuhl c66487b25e Add script node 2026-02-19 09:29:14 -05:00
Skyler Lehmkuhl 8e342582d9 hide console on windows release builds 2026-02-19 07:55:55 -05:00
Skyler Lehmkuhl ac575482f3 Alphabetize nodes in node finder 2026-02-19 06:07:09 -05:00
Skyler Lehmkuhl 75be94d737 Add step sequencer node 2026-02-19 06:06:41 -05:00
Skyler Lehmkuhl dae82b02d1 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-19 05:56:00 -05:00
Skyler Lehmkuhl 0a18d28f98 Add arpeggiator node 2026-02-19 05:30:34 -05:00
Skyler Lehmkuhl 89bbd3614f Add beat node 2026-02-19 01:19:40 -05:00
Skyler Lehmkuhl 21a49235fc sampler improvements, live waveform preview 2026-02-17 10:08:49 -05:00
Skyler Lehmkuhl 9935c2f3bd resample recorded audio if it has a different sample rate (fix for wsapi persnicketiness) 2026-02-17 09:44:24 -05:00
Skyler Lehmkuhl 5a97ea76d5 add windows build script 2026-02-17 09:42:06 -05:00
Skyler Lehmkuhl c10f42da8f Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-16 19:12:49 -05:00
Skyler Lehmkuhl b2a6304771 make sample load menus consistent 2026-02-16 19:12:21 -05:00
Skyler Lehmkuhl 6bbf7d27df node connection improvement 2026-02-16 19:12:06 -05:00
Skyler Lehmkuhl 2c0d53fb84 Work on sampler nodes, fix slew limiter 2026-02-16 18:45:11 -05:00
Skyler Lehmkuhl e9ee0d92e2 fix shaders on windows 2026-02-16 10:48:51 -05:00
Skyler Lehmkuhl 2a94ac0f69 Merge branch 'rust-ui' of https://git.skyler.io/skyler/Lightningbeam into rust-ui 2026-02-16 10:06:00 -05:00
Skyler Lehmkuhl 6c10112a16 Fix build on Windows 2026-02-16 10:05:39 -05:00
Skyler Lehmkuhl 93a29192fd Split export dialog into simple/advanced 2026-02-16 08:15:13 -05:00
Skyler Lehmkuhl e03d12009f fix broken mp3/aac export 2026-02-16 07:53:23 -05:00
Skyler Lehmkuhl 6c88c4a8da clean up compiler warnings in egui_node_graph2 2026-02-16 07:52:29 -05:00
Skyler Lehmkuhl da147fe6d4 Stop virtual piano from stealing keyboard focus from input elements 2026-02-16 06:16:05 -05:00
Skyler Lehmkuhl 65fa8a3918 Add preset pane 2026-02-16 06:06:03 -05:00
Skyler Lehmkuhl 0ff651f4a5 Use forked egui to fix wayland/ibus bug 2026-02-16 04:05:59 -05:00
Skyler Lehmkuhl 0bd933fd45 Group nodes 2026-02-16 03:33:32 -05:00
Skyler Lehmkuhl ffe7799b6a rewrite unsafe code in midi handling 2026-02-16 00:34:59 -05:00
Skyler Lehmkuhl 9db34daf85 make default voice polyphonic 2026-02-16 00:19:15 -05:00
Skyler Lehmkuhl 6c4cc62098 rewrite unsafe code in ffmpeg ffi 2026-02-15 23:35:30 -05:00
Skyler Lehmkuhl a16c14a6a8 Keep voices around while notes are releasing 2026-02-15 23:27:15 -05:00
Skyler Lehmkuhl 06c5342724 rewrite unsafe code in voice allocator 2026-02-15 23:22:36 -05:00
Skyler Lehmkuhl 72f10db64d add voice allocator node 2026-02-15 23:10:00 -05:00
Skyler Lehmkuhl 0a27e4d328 add parameters to nodes that were missing them 2026-02-15 09:26:20 -05:00
Skyler Lehmkuhl 7387299b52 node graph fixes 2026-02-15 09:11:39 -05:00
Skyler Lehmkuhl 1e7001b291 Add parameters to audio nodes and rename Delay node to Echo 2026-02-15 05:34:28 -05:00
Skyler Lehmkuhl 6fcee92d59 Add user preference to show waveforms as stacked stereo 2026-02-15 04:50:33 -05:00
Skyler Lehmkuhl c6a8b944e5 Add copy and paste 2026-02-15 04:38:42 -05:00
Skyler Lehmkuhl 5164d7a0a9 Add right click menu to clips 2026-02-15 02:45:53 -05:00
Skyler Lehmkuhl 394e369122 Add clip split and duplicate commands 2026-02-15 02:11:57 -05:00
Skyler Lehmkuhl 12d927ed3d Fix UI hang on audio import 2026-02-15 00:50:22 -05:00
Skyler Lehmkuhl 408343094a Stream audio to spectrograph shader too 2026-02-14 23:58:20 -05:00
Skyler Lehmkuhl 04a7f35b84 Fix piano roll scrolling 2026-02-14 21:43:00 -05:00
Skyler Lehmkuhl 068715c0fa Use CQT transform for spectrograph instead of FFT 2026-02-14 21:18:30 -05:00
Skyler Lehmkuhl 777d3ef6be Clean up build warnings 2026-02-14 11:07:32 -05:00
Skyler Lehmkuhl 82b58ae0dc Midi recording 2026-02-13 18:00:59 -05:00
Skyler Lehmkuhl b86af7bbf5 Add piano roll 2026-02-12 19:05:49 -05:00
Skyler Lehmkuhl c11dab928c Add spectrogram 2026-02-12 18:37:34 -05:00
Skyler Lehmkuhl ad81cce0c6 fix unaligned debug in node graph 2026-02-11 19:30:11 -05:00
Skyler Lehmkuhl 93a2252a58 Update README 2026-02-11 19:11:11 -05:00
Skyler Lehmkuhl 908da99321 Add initial docs 2026-02-11 19:10:58 -05:00
Skyler Lehmkuhl f924b4c0cd Stream audio instead of loading the whole thing into memory 2026-02-11 19:07:48 -05:00
Skyler Lehmkuhl 8e38c0c5a1 Fix audio overruns 2026-02-11 16:15:16 -05:00
Skyler Lehmkuhl 8ac5f52f28 Render audio waveforms on gpu 2026-02-11 14:38:58 -05:00
Skyler Lehmkuhl fc58f29ccd Record audio 2026-02-11 02:28:37 -05:00
Skyler Lehmkuhl c3e1160fa2 add drag preview for asset library 2026-01-13 20:38:51 -05:00
Skyler Lehmkuhl f4ffa7ecdd improve folders a bit 2026-01-13 20:30:16 -05:00
Skyler Lehmkuhl b19f66e648 add folders to asset library 2025-12-30 00:45:19 -05:00
Skyler Lehmkuhl 1fcad0355d handle preview rendering during shape editing 2025-12-23 09:36:54 -05:00
Skyler Lehmkuhl f1df85baa2 fix quick drags of control points 2025-12-22 18:54:43 -05:00
Skyler Lehmkuhl ffb53884b0 initial vector editing 2025-12-22 18:34:01 -05:00
Skyler Lehmkuhl 2dea1eab9e improve video import performance a bit 2025-12-17 14:12:16 -05:00
Skyler Lehmkuhl caba4305d8 Allow setting node cv inputs via slider, add preferences window 2025-12-17 07:38:10 -05:00
Skyler Lehmkuhl 88dc60f036 hook graph up to audio backend 2025-12-16 13:22:28 -05:00
Skyler Lehmkuhl d7176a13b7 Node graph improvements 2025-12-16 11:37:07 -05:00
Skyler Lehmkuhl c58192a7da Use egui_node_graph2 for node graph 2025-12-16 10:14:34 -05:00
Skyler Lehmkuhl fa7bae12a6 add dropped nodes at cursor position 2025-12-16 08:06:54 -05:00
Skyler Lehmkuhl 798d8420af Node graph initial work 2025-12-16 07:59:16 -05:00
Skyler Lehmkuhl dda1319c42 Add debug overlay 2025-12-12 12:19:12 -05:00
Skyler Lehmkuhl c2f092b5eb Add notification when export completes 2025-12-12 11:26:22 -05:00
Skyler Lehmkuhl cb62d0ee9d slightly improve video export speed 2025-12-12 11:12:02 -05:00
Skyler Lehmkuhl d94ec0d6a8 crop preview image to square 2025-12-09 18:46:13 -05:00
Skyler Lehmkuhl 1bf1213e3f split clips for editing 2025-12-09 18:45:57 -05:00
Skyler Lehmkuhl efca9da2c9 show previews for effects 2025-12-08 13:32:11 -05:00
Skyler Lehmkuhl c8a5cbfc89 fix color space for effects and enable them in video export 2025-12-08 10:20:50 -05:00
Skyler Lehmkuhl 7eb61ab0a8 start work on compositing, effects 2025-12-08 09:30:22 -05:00
Skyler Lehmkuhl 420f3bf7b9 Composite layers in HDR color space 2025-12-08 04:20:48 -05:00
Skyler Lehmkuhl 2caea564ac preserve video aspect ratio 2025-12-07 15:34:28 -05:00
Skyler Lehmkuhl 91f6074436 add import to stage/timeline 2025-12-07 15:24:56 -05:00
Skyler Lehmkuhl 06246bba93 Video export 2025-12-07 13:17:21 -05:00
Skyler Lehmkuhl fba2882b41 fix progress bar during mp3 and aac export 2025-12-04 16:43:05 -05:00
Skyler Lehmkuhl 2cd7682399 mp3 and aac export 2025-12-04 15:58:37 -05:00
Skyler Lehmkuhl 727d782190 initial audio export 2025-12-03 17:54:15 -05:00
Skyler Lehmkuhl 2a7c26df57 scale and rotate video clips, increase hover thumbnail size 2025-12-03 08:39:26 -05:00
Skyler Lehmkuhl c35b7a31de clean up some compiler warnings 2025-12-03 06:39:26 -05:00
Skyler Lehmkuhl 346baac840 Improve trim and drag of audio/video clips 2025-12-03 06:07:39 -05:00
Skyler Lehmkuhl ef1956e8e3 Prevent video and audio clips from overlapping 2025-12-03 02:28:23 -05:00
Skyler Lehmkuhl ccb29a9e04 Audio from videos 2025-12-03 01:04:09 -05:00
Skyler Lehmkuhl d453571c9b Video import 2025-12-02 13:39:55 -05:00
Skyler Lehmkuhl c2f8969432 Render audio clip waveforms 2025-12-02 00:57:20 -05:00
Skyler Lehmkuhl cffb61e5a8 Load audio clips 2025-12-01 22:03:20 -05:00
Skyler Lehmkuhl ba9a4ee812 File save/load 2025-12-01 09:18:49 -05:00
Skyler Lehmkuhl 5379e3bc8c Import .mid files to midi tracks 2025-12-01 05:35:53 -05:00
Skyler Lehmkuhl c09cd276a0 Add velocity support to virtual piano 2025-11-30 21:20:42 -05:00
Skyler Lehmkuhl 98c2880b45 Add keyboard support to virtual piano 2025-11-30 11:26:14 -05:00
Skyler Lehmkuhl 8f1934ab59 Add virtual piano 2025-11-30 11:01:07 -05:00
Skyler Lehmkuhl c943f7bfe6 Add info panel 2025-11-30 10:01:10 -05:00
Skyler Lehmkuhl 4d1e052ee7 Add asset pane 2025-11-30 06:54:53 -05:00
Skyler Lehmkuhl 8f830b7799 tests 2025-11-29 13:39:31 -05:00
Skyler Lehmkuhl f9761b8af3 Add layer controls, fix dragging for clips 2025-11-29 12:33:17 -05:00
Skyler Lehmkuhl 5fbb2c078b Use audio engine as source of truth for playback time 2025-11-28 11:36:33 -05:00
Skyler Lehmkuhl 5761d48f1b Merge branch 'main' into rust-ui 2025-11-28 07:00:29 -05:00
Skyler Lehmkuhl bbeb85b3a3 Clips in timeline 2025-11-28 05:53:11 -05:00
Skyler Lehmkuhl 1cb2aabc9c fix skew 2025-11-19 11:47:19 -05:00
Skyler Lehmkuhl 9cbfefca1d fix skew 2025-11-19 11:18:51 -05:00
Skyler Lehmkuhl 0d8fa7ffbd add skew to transform tool 2025-11-19 10:59:18 -05:00
Skyler Lehmkuhl 258b131f2d fix select tool drag selection 2025-11-19 10:17:47 -05:00
Skyler Lehmkuhl a50b51d95b add tool keyboard shortcuts 2025-11-19 10:05:47 -05:00
Skyler Lehmkuhl 2bb9aecf31 add eyedropper tool 2025-11-19 10:01:42 -05:00
Skyler Lehmkuhl a0875b1bc0 add line and polygon tools 2025-11-19 09:27:13 -05:00
Skyler Lehmkuhl 08f3c30b29 paint bucket mostly working 2025-11-19 09:01:27 -05:00
Skyler Lehmkuhl 502bae0947 fix paint bucket angle priority 2025-11-19 05:54:51 -05:00
Skyler Lehmkuhl b7c382586e add other tool buttons 2025-11-19 04:31:03 -05:00
Skyler Lehmkuhl 7d90eed1ec check nodes instead of half edges to confirm faces in paint bucket graph 2025-11-19 04:30:45 -05:00
Skyler Lehmkuhl 71f9283356 prune paint bucket node graph 2025-11-19 02:45:38 -05:00
Skyler Lehmkuhl e1d9514472 Paint bucket 2025-11-19 01:47:37 -05:00
Skyler Lehmkuhl 9204308033 Transform shapes 2025-11-18 05:08:33 -05:00
Skyler Lehmkuhl 67724c944c Select and move shapes 2025-11-18 00:22:28 -05:00
Skyler Lehmkuhl afda2d9d4f CSS improvements, light and dark mode 2025-11-17 07:42:40 -05:00
Skyler Lehmkuhl 1324cae7e3 Render shape on stage 2025-11-16 02:40:06 -05:00
Skyler Lehmkuhl 08232454a7 Add stage pane with scrolling 2025-11-16 00:01:07 -05:00
Skyler Lehmkuhl 652b9e6cbb Add app menu 2025-11-14 18:41:44 -05:00
Skyler Lehmkuhl 48da21e062 Toolbar 2025-11-13 18:12:21 -05:00
Skyler Lehmkuhl bf007e774e Add Rust desktop UI with Blender-style pane system
Implemented foundational pane system using eframe/egui:
- Workspace structure with lightningbeam-core and lightningbeam-editor
- Layout data structures matching existing JSON schema
- All 8 predefined layouts (Animation, Video Editing, Audio/DAW, etc.)
- Recursive pane rendering with visual dividers
- Layout switcher menu
- Color-coded pane types for visualization

Foundation complete for interactive pane operations (resize, split, join).
2025-11-12 06:13:00 -05:00
Skyler Lehmkuhl f28791c2c9 remove slower methods 2025-11-07 03:23:59 -05:00
Skyler Lehmkuhl 336b9952e4 Improve further with websockets 2025-11-07 02:51:23 -05:00
810 changed files with 155001 additions and 2264 deletions

425
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,425 @@
name: Build & Package
on:
workflow_dispatch:
push:
branches:
- release
jobs:
build:
permissions:
contents: read
strategy:
fail-fast: false
matrix:
include:
- platform: ubuntu-22.04
target: ''
artifact-name: linux-x86_64
- platform: macos-latest
target: ''
artifact-name: macos-arm64
- platform: macos-latest
target: x86_64-apple-darwin
artifact-name: macos-x86_64
- platform: windows-latest
target: ''
artifact-name: windows-x86_64
runs-on: ${{ matrix.platform }}
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Verify submodules
shell: bash
run: |
echo "=== Submodule status ==="
git submodule status --recursive
echo "=== NeuralAudio deps ==="
ls -la vendor/NeuralAudio/deps/
ls vendor/NeuralAudio/deps/RTNeural/CMakeLists.txt
ls vendor/NeuralAudio/deps/math_approx/CMakeLists.txt
- name: Clone egui fork
run: git clone --depth 1 -b ibus-wayland-fix https://git.skyler.io/skyler/egui.git ../egui-fork
env:
GIT_LFS_SKIP_SMUDGE: "1"
- name: Install Rust stable
uses: dtolnay/rust-toolchain@stable
with:
targets: ${{ matrix.target != '' && matrix.target || '' }}
- name: Rust cache
uses: swatinem/rust-cache@v2
with:
workspaces: './lightningbeam-ui -> target'
key: ${{ matrix.artifact-name }}-v2
# ── Linux dependencies ──
- name: Install dependencies (Linux)
if: matrix.platform == 'ubuntu-22.04'
run: |
sudo apt-get update
sudo apt-get install -y \
build-essential pkg-config clang nasm cmake \
libasound2-dev libwayland-dev libwayland-cursor0 \
libx11-dev libxkbcommon-dev libxcb-shape0-dev libxcb-xfixes0-dev \
libxdo-dev libglib2.0-dev libgtk-3-dev libvulkan-dev \
yasm libx264-dev libx265-dev libvpx-dev libmp3lame-dev libopus-dev \
libpulse-dev squashfs-tools dpkg rpm
- name: Install cargo packaging tools (Linux)
if: matrix.platform == 'ubuntu-22.04'
uses: taiki-e/install-action@v2
with:
tool: cargo-deb,cargo-generate-rpm
# ── macOS dependencies ──
- name: Install dependencies (macOS)
if: startsWith(matrix.platform, 'macos')
run: brew install nasm cmake create-dmg
# ── Windows dependencies ──
- name: Install dependencies (Windows)
if: matrix.platform == 'windows-latest'
shell: pwsh
run: choco install cmake llvm --installargs 'ADD_CMAKE_TO_PATH=System' -y
# ── Common build steps ──
- name: Extract version
id: version
shell: bash
run: |
VERSION=$(grep '^version' lightningbeam-ui/lightningbeam-editor/Cargo.toml | sed 's/.*"\(.*\)"/\1/')
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
- name: Enable FFmpeg build from source (Linux/macOS)
if: matrix.platform != 'windows-latest'
shell: bash
run: |
sed -i.bak 's/ffmpeg-next = { version = "8.0", features = \["static"\] }/ffmpeg-next = { version = "8.0", features = ["build", "static"] }/' lightningbeam-ui/lightningbeam-editor/Cargo.toml
- name: Setup FFmpeg (Windows)
if: matrix.platform == 'windows-latest'
shell: pwsh
run: |
# Download FFmpeg 8.0 shared+dev build (headers + import libs + DLLs)
$url = "https://github.com/GyanD/codexffmpeg/releases/download/8.0/ffmpeg-8.0-full_build-shared.7z"
Invoke-WebRequest -Uri $url -OutFile ffmpeg.7z
7z x ffmpeg.7z -offmpeg-win
$ffmpegDir = (Get-ChildItem -Path ffmpeg-win -Directory | Select-Object -First 1).FullName
echo "FFMPEG_DIR=$ffmpegDir" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
# LLVM/libclang for bindgen
echo "LIBCLANG_PATH=C:\Program Files\LLVM\bin" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
- name: Setup icons
shell: bash
run: |
mkdir -p lightningbeam-ui/lightningbeam-editor/assets/icons
cp -f src-tauri/icons/32x32.png lightningbeam-ui/lightningbeam-editor/assets/icons/
cp -f src-tauri/icons/128x128.png lightningbeam-ui/lightningbeam-editor/assets/icons/
cp -f src-tauri/icons/icon.png lightningbeam-ui/lightningbeam-editor/assets/icons/256x256.png
- name: Stage factory presets
shell: bash
run: |
mkdir -p lightningbeam-ui/lightningbeam-editor/assets/presets
cp -r src/assets/instruments/* lightningbeam-ui/lightningbeam-editor/assets/presets/
# Remove empty category dirs and README
find lightningbeam-ui/lightningbeam-editor/assets/presets -maxdepth 1 -type d -empty -delete
rm -f lightningbeam-ui/lightningbeam-editor/assets/presets/README.md
- name: Inject preset entries into RPM metadata (Linux)
if: matrix.platform == 'ubuntu-22.04'
shell: bash
run: |
cd lightningbeam-ui
find lightningbeam-editor/assets/presets -type f | sort | while read -r f; do
rel="${f#lightningbeam-editor/}"
dest="/usr/share/lightningbeam-editor/presets/${f#lightningbeam-editor/assets/presets/}"
printf '\n[[package.metadata.generate-rpm.assets]]\nsource = "%s"\ndest = "%s"\nmode = "644"\n' "$rel" "$dest" >> lightningbeam-editor/Cargo.toml
done
- name: Fix FFmpeg cross-compile OS detection (macOS x86_64)
if: matrix.target == 'x86_64-apple-darwin'
shell: bash
run: |
# ffmpeg-sys-next passes --target-os=macos to FFmpeg configure, but FFmpeg
# expects --target-os=darwin. Fetch crates, then patch the build script.
cd lightningbeam-ui
cargo fetch
BUILDRS=$(find $HOME/.cargo/registry/src -path '*/ffmpeg-sys-next-*/build.rs' | head -1)
echo "Patching $BUILDRS to fix macos -> darwin mapping"
# Add "macos" => "darwin" to the match in get_ffmpeg_target_os()
sed -i.bak 's/"ios" => "darwin"/"ios" | "macos" => "darwin"/' "$BUILDRS"
grep -A4 'fn get_ffmpeg_target_os' "$BUILDRS"
echo "MACOSX_DEPLOYMENT_TARGET=11.0" >> "$GITHUB_ENV"
- name: Build release binary
shell: bash
env:
FFMPEG_STATIC: "1"
run: |
cd lightningbeam-ui
TARGET_FLAG=""
if [ -n "${{ matrix.target }}" ]; then
TARGET_FLAG="--target ${{ matrix.target }}"
fi
cargo build --release --bin lightningbeam-editor $TARGET_FLAG
- name: Copy cross-compiled binary to release dir
if: matrix.target != ''
shell: bash
run: |
mkdir -p lightningbeam-ui/target/release
cp lightningbeam-ui/target/${{ matrix.target }}/release/lightningbeam-editor lightningbeam-ui/target/release/
# ── Stage presets next to binary for packaging ──
- name: Stage presets in target dir
shell: bash
run: |
mkdir -p lightningbeam-ui/target/release/presets
cp -r lightningbeam-ui/lightningbeam-editor/assets/presets/* lightningbeam-ui/target/release/presets/
# ══════════════════════════════════════════════
# Linux Packaging
# ══════════════════════════════════════════════
- name: Build .deb package
if: matrix.platform == 'ubuntu-22.04'
shell: bash
run: |
cd lightningbeam-ui
cargo deb -p lightningbeam-editor --no-build --no-strip
# Inject factory presets into .deb (cargo-deb doesn't handle recursive dirs well)
DEB=$(ls target/debian/*.deb | head -1)
WORK=$(mktemp -d)
dpkg-deb -R "$DEB" "$WORK"
mkdir -p "$WORK/usr/share/lightningbeam-editor/presets"
cp -r lightningbeam-editor/assets/presets/* "$WORK/usr/share/lightningbeam-editor/presets/"
dpkg-deb -b "$WORK" "$DEB"
rm -rf "$WORK"
- name: Build .rpm package
if: matrix.platform == 'ubuntu-22.04'
shell: bash
run: |
cd lightningbeam-ui
cargo generate-rpm -p lightningbeam-editor
- name: Build AppImage
if: matrix.platform == 'ubuntu-22.04'
shell: bash
run: |
cd lightningbeam-ui
VERSION="${{ steps.version.outputs.version }}"
APPDIR=/tmp/AppDir
ASSETS=lightningbeam-editor/assets
rm -rf "$APPDIR"
mkdir -p "$APPDIR/usr/bin"
mkdir -p "$APPDIR/usr/bin/presets"
mkdir -p "$APPDIR/usr/share/applications"
mkdir -p "$APPDIR/usr/share/metainfo"
mkdir -p "$APPDIR/usr/share/icons/hicolor/32x32/apps"
mkdir -p "$APPDIR/usr/share/icons/hicolor/128x128/apps"
mkdir -p "$APPDIR/usr/share/icons/hicolor/256x256/apps"
cp target/release/lightningbeam-editor "$APPDIR/usr/bin/"
cp -r lightningbeam-editor/assets/presets/* "$APPDIR/usr/bin/presets/"
cp "$ASSETS/com.lightningbeam.editor.desktop" "$APPDIR/usr/share/applications/"
cp "$ASSETS/com.lightningbeam.editor.appdata.xml" "$APPDIR/usr/share/metainfo/"
cp "$ASSETS/icons/32x32.png" "$APPDIR/usr/share/icons/hicolor/32x32/apps/lightningbeam-editor.png"
cp "$ASSETS/icons/128x128.png" "$APPDIR/usr/share/icons/hicolor/128x128/apps/lightningbeam-editor.png"
cp "$ASSETS/icons/256x256.png" "$APPDIR/usr/share/icons/hicolor/256x256/apps/lightningbeam-editor.png"
ln -sf usr/share/icons/hicolor/256x256/apps/lightningbeam-editor.png "$APPDIR/lightningbeam-editor.png"
ln -sf usr/share/applications/com.lightningbeam.editor.desktop "$APPDIR/lightningbeam-editor.desktop"
printf '#!/bin/bash\nSELF=$(readlink -f "$0")\nHERE=${SELF%%/*}\nexport XDG_DATA_DIRS="${HERE}/usr/share:${XDG_DATA_DIRS:-/usr/local/share:/usr/share}"\nexec "${HERE}/usr/bin/lightningbeam-editor" "$@"\n' > "$APPDIR/AppRun"
chmod +x "$APPDIR/AppRun"
# Download AppImage runtime
wget -q "https://github.com/AppImage/AppImageKit/releases/download/continuous/runtime-x86_64" \
-O /tmp/appimage-runtime
chmod +x /tmp/appimage-runtime
# Build squashfs and concatenate
mksquashfs "$APPDIR" /tmp/appimage.squashfs \
-root-owned -noappend -no-exports -no-xattrs \
-comp gzip -b 131072
cat /tmp/appimage-runtime /tmp/appimage.squashfs \
> "Lightningbeam_Editor-${VERSION}-x86_64.AppImage"
chmod +x "Lightningbeam_Editor-${VERSION}-x86_64.AppImage"
- name: Upload .deb
if: matrix.platform == 'ubuntu-22.04'
uses: actions/upload-artifact@v4
with:
name: deb-package
path: lightningbeam-ui/target/debian/*.deb
if-no-files-found: error
- name: Upload .rpm
if: matrix.platform == 'ubuntu-22.04'
uses: actions/upload-artifact@v4
with:
name: rpm-package
path: lightningbeam-ui/target/generate-rpm/*.rpm
if-no-files-found: error
- name: Upload AppImage
if: matrix.platform == 'ubuntu-22.04'
uses: actions/upload-artifact@v4
with:
name: appimage
path: lightningbeam-ui/Lightningbeam_Editor-*.AppImage
if-no-files-found: error
# ══════════════════════════════════════════════
# macOS Packaging
# ══════════════════════════════════════════════
- name: Create macOS .app bundle
if: startsWith(matrix.platform, 'macos')
shell: bash
run: |
VERSION="${{ steps.version.outputs.version }}"
APP="Lightningbeam Editor.app"
mkdir -p "$APP/Contents/MacOS"
mkdir -p "$APP/Contents/Resources/presets"
cp lightningbeam-ui/target/release/lightningbeam-editor "$APP/Contents/MacOS/"
cp src-tauri/icons/icon.icns "$APP/Contents/Resources/lightningbeam-editor.icns"
cp -r lightningbeam-ui/lightningbeam-editor/assets/presets/* "$APP/Contents/Resources/presets/"
cat > "$APP/Contents/Info.plist" << EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleName</key>
<string>Lightningbeam Editor</string>
<key>CFBundleDisplayName</key>
<string>Lightningbeam Editor</string>
<key>CFBundleIdentifier</key>
<string>com.lightningbeam.editor</string>
<key>CFBundleVersion</key>
<string>${VERSION}</string>
<key>CFBundleShortVersionString</key>
<string>${VERSION}</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleExecutable</key>
<string>lightningbeam-editor</string>
<key>CFBundleIconFile</key>
<string>lightningbeam-editor</string>
<key>LSMinimumSystemVersion</key>
<string>11.0</string>
<key>NSHighResolutionCapable</key>
<true/>
</dict>
</plist>
EOF
- name: Create macOS .dmg
if: startsWith(matrix.platform, 'macos')
shell: bash
run: |
VERSION="${{ steps.version.outputs.version }}"
ARCH="${{ matrix.target == 'x86_64-apple-darwin' && 'x86_64' || 'arm64' }}"
DMG_NAME="Lightningbeam_Editor-${VERSION}-macOS-${ARCH}.dmg"
create-dmg \
--volname "Lightningbeam Editor" \
--window-pos 200 120 \
--window-size 600 400 \
--icon-size 100 \
--icon "Lightningbeam Editor.app" 175 190 \
--app-drop-link 425 190 \
"$DMG_NAME" \
"Lightningbeam Editor.app" || true
# create-dmg returns non-zero if codesigning is skipped, but the .dmg is still valid
- name: Upload .dmg
if: startsWith(matrix.platform, 'macos')
uses: actions/upload-artifact@v4
with:
name: dmg-${{ matrix.artifact-name }}
path: Lightningbeam_Editor-*.dmg
if-no-files-found: error
# ══════════════════════════════════════════════
# Windows Packaging
# ══════════════════════════════════════════════
- name: Create Windows .zip
if: matrix.platform == 'windows-latest'
shell: pwsh
run: |
$VERSION = "${{ steps.version.outputs.version }}"
$DIST = "Lightningbeam_Editor-${VERSION}-Windows-x86_64"
New-Item -ItemType Directory -Force -Path $DIST
Copy-Item "lightningbeam-ui/target/release/lightningbeam-editor.exe" "$DIST/"
Copy-Item -Recurse "lightningbeam-ui/target/release/presets" "$DIST/presets"
# Bundle FFmpeg DLLs (shared build)
Copy-Item "$env:FFMPEG_DIR\bin\*.dll" "$DIST/"
Compress-Archive -Path $DIST -DestinationPath "${DIST}.zip"
- name: Upload .zip
if: matrix.platform == 'windows-latest'
uses: actions/upload-artifact@v4
with:
name: windows-zip
path: Lightningbeam_Editor-*.zip
if-no-files-found: error
release:
needs: build
runs-on: ubuntu-22.04
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
sparse-checkout: |
lightningbeam-ui/lightningbeam-editor/Cargo.toml
Changelog.md
- name: Extract version
id: version
run: |
VERSION=$(grep '^version' lightningbeam-ui/lightningbeam-editor/Cargo.toml | sed 's/.*"\(.*\)"/\1/')
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
- name: Extract release notes
id: notes
uses: sean0x42/markdown-extract@v2.1.0
with:
pattern: "${{ steps.version.outputs.version }}:"
file: Changelog.md
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: dist
merge-multiple: true
- name: List artifacts
run: ls -lhR dist/
- name: Create draft release
uses: softprops/action-gh-release@v2
with:
tag_name: "v${{ steps.version.outputs.version }}"
name: "Lightningbeam v${{ steps.version.outputs.version }}"
body: ${{ steps.notes.outputs.markdown }}
draft: true
prerelease: true
files: dist/*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,150 +0,0 @@
name: 'publish'
on:
workflow_dispatch:
push:
branches:
- release
jobs:
extract-changelog:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Set version for changelog extraction
shell: bash
run: |
# Read the version from src-tauri/tauri.conf.json
VERSION=$(jq -r '.version' src-tauri/tauri.conf.json)
# Set the version in the environment variable
echo "VERSION=$VERSION" >> $GITHUB_ENV
- name: Extract release notes from Changelog.md
id: changelog
uses: sean0x42/markdown-extract@v2.1.0
with:
pattern: "${{ env.VERSION }}:" # Look for the version header (e.g., # 0.6.15-alpha:)
file: Changelog.md
- name: Set markdown output
id: set-markdown-output
run: |
echo 'RELEASE_NOTES<<EOF' >> $GITHUB_OUTPUT
echo "${{ steps.changelog.outputs.markdown }}" >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
publish-tauri:
needs: extract-changelog
permissions:
contents: write
strategy:
fail-fast: false
matrix:
include:
- platform: 'macos-latest' # for Arm based macs (M1 and above).
args: '--target aarch64-apple-darwin'
- platform: 'macos-latest' # for Intel based macs.
args: '--target x86_64-apple-darwin'
- platform: 'ubuntu-22.04'
args: ''
- platform: 'windows-latest'
args: ''
runs-on: ${{ matrix.platform }}
steps:
- uses: actions/checkout@v4
- name: Debug the extracted release notes
run: |
echo "Extracted Release Notes: ${{ needs.extract-changelog.outputs.RELEASE_NOTES }}"
- name: install dependencies (ubuntu only)
if: matrix.platform == 'ubuntu-22.04' # This must match the platform value defined above.
run: |
sudo apt-get update
sudo apt-get install -y libwebkit2gtk-4.1-dev libappindicator3-dev librsvg2-dev patchelf
- name: Install jq on Windows
if: matrix.platform == 'windows-latest'
run: |
choco install jq
- name: Set version for all platforms
shell: bash
run: |
# Read the version from src-tauri/tauri.conf.json
VERSION=$(jq -r '.version' src-tauri/tauri.conf.json)
# Set the version in the environment variable
echo "VERSION=$VERSION" >> $GITHUB_ENV
if: matrix.platform != 'windows-latest'
- name: Set version for Windows build
if: matrix.platform == 'windows-latest' # Only run on Windows
shell: pwsh # Use PowerShell on Windows runners
run: |
# Read the version from src-tauri/tauri.conf.json
$tauriConf = Get-Content src-tauri/tauri.conf.json | ConvertFrom-Json
$VERSION = $tauriConf.version
# Replace '-alpha' with '-0' and '-beta' with '-1' for Windows version
if ($VERSION -match "-alpha") {
$WINDOWS_VERSION = $VERSION -replace "-alpha", "-1"
} elseif ($VERSION -match "-beta") {
$WINDOWS_VERSION = $VERSION -replace "-beta", "-2"
} else {
$WINDOWS_VERSION = $VERSION
}
Copy-Item src-tauri/tauri.conf.json -Destination src-tauri/tauri.windows.conf.json
# Modify the version in tauri.windows.conf.json
(Get-Content src-tauri/tauri.windows.conf.json) | ForEach-Object {
$_ -replace '"version": ".*"', ('"version": "' + $WINDOWS_VERSION + '"')
} | Set-Content src-tauri/tauri.windows.conf.json
echo "VERSION=$VERSION" >> $env:GITHUB_ENV
- name: Print contents of tauri.windows.conf.json (Windows)
if: matrix.platform == 'windows-latest' # Only run on Windows
shell: pwsh
run: |
Write-Host "Contents of src-tauri/tauri.windows.conf.json:"
Get-Content src-tauri/tauri.windows.conf.json
- name: setup pnpm
uses: pnpm/action-setup@v2
with:
version: 9.1.2
- name: setup node
uses: actions/setup-node@v4
with:
node-version: lts/*
cache: 'pnpm' # Set this to npm, yarn or pnpm.
- name: install Rust stable
uses: dtolnay/rust-toolchain@stable # Set this to dtolnay/rust-toolchain@nightly
with:
# Those targets are only used on macos runners so it's in an `if` to slightly speed up windows and linux builds.
targets: ${{ matrix.platform == 'macos-latest' && 'aarch64-apple-darwin,x86_64-apple-darwin' || '' }}
- name: Rust cache
uses: swatinem/rust-cache@v2
with:
workspaces: './src-tauri -> target'
- name: install frontend dependencies
# If you don't have `beforeBuildCommand` configured you may want to build your frontend here too.
run: pnpm install # change this to npm or pnpm depending on which one you use.
- name: Create Release with Tauri Action
uses: tauri-apps/tauri-action@v0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
RELEASE_NOTES: ${{ needs.extract-changelog.outputs.RELEASE_NOTES }}
with:
tagName: "app-v${{ env.VERSION }}" # Use the original version tag for the release
releaseName: "Lightningbeam v${{ env.VERSION }}"
releaseBody: "${{ needs.extract-changelog.outputs.RELEASE_NOTES }}"
releaseDraft: true # Set to true if you want the release to be a draft
prerelease: true
args: ${{ matrix.args }}

43
.gitignore vendored Normal file
View File

@ -0,0 +1,43 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
# Git
.gitignore
# Build
src-tauri/gen
src-tauri/target
lightningbeam-core/target
daw-backend/target
target/
# Packaging build artifacts
packaging/output/
packaging/AppDir/
packaging/squashfs-root/
# Wrapper script (generated, not needed with static FFmpeg)
lightningbeam-ui/lightningbeam-editor/debian/

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "vendor/NeuralAudio"]
path = vendor/NeuralAudio
url = https://github.com/mikeoliphant/NeuralAudio.git

538
ARCHITECTURE.md Normal file
View File

@ -0,0 +1,538 @@
# Lightningbeam Architecture
This document provides a comprehensive overview of Lightningbeam's architecture, design decisions, and component interactions.
## Table of Contents
- [System Overview](#system-overview)
- [Technology Stack](#technology-stack)
- [Component Architecture](#component-architecture)
- [Data Flow](#data-flow)
- [Rendering Pipeline](#rendering-pipeline)
- [Audio Architecture](#audio-architecture)
- [Key Design Decisions](#key-design-decisions)
- [Directory Structure](#directory-structure)
## System Overview
Lightningbeam is a 2D multimedia editor combining vector animation, audio production, and video editing. The application is built as a pure Rust desktop application using immediate-mode GUI (egui) with GPU-accelerated vector rendering (Vello).
### High-Level Architecture
```
┌────────────────────────────────────────────────────────────┐
│ Lightningbeam Editor │
│ (egui UI) │
├────────────────────────────────────────────────────────────┤
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
│ │ Stage │ │ Timeline │ │ Asset │ │ Info │ │
│ │ Pane │ │ Pane │ │ Library │ │ Panel │ │
│ └──────────┘ └──────────┘ └──────────┘ └──────────┘ │
│ │
│ ┌──────────────────────────────────────────────────────┐ │
│ │ Lightningbeam Core (Data Model) │ │
│ │ Document, Layers, Clips, Actions, Undo/Redo │ │
│ └──────────────────────────────────────────────────────┘ │
├────────────────────────────────────────────────────────────┤
│ Rendering & Audio │
│ ┌──────────────────┐ ┌──────────────────┐ │
│ │ Vello + wgpu │ │ daw-backend │ │
│ │ (GPU Rendering) │ │ (Audio Engine) │ │
│ └──────────────────┘ └──────────────────┘ │
└────────────────────────────────────────────────────────────┘
↓ ↓
┌─────────┐ ┌─────────┐
│ GPU │ │ cpal │
│ (Vulkan │ │ (Audio │
│ /Metal) │ │ I/O) │
└─────────┘ └─────────┘
```
### Migration from Tauri/JavaScript
Lightningbeam is undergoing a rewrite from a Tauri/JavaScript prototype to pure Rust. The original architecture hit IPC bandwidth limitations when streaming decoded video frames. The new Rust UI eliminates this bottleneck by handling all rendering natively.
**Current Status**: Active development on the `rust-ui` branch. Core UI, tools, and undo system are implemented. Audio integration in progress.
## Technology Stack
### UI Framework
- **egui 0.33.3**: Immediate-mode GUI framework
- **eframe 0.33.3**: Application framework wrapping egui
- **winit 0.30**: Cross-platform windowing
### GPU Rendering
- **Vello (git main)**: GPU-accelerated 2D vector graphics using compute shaders
- **wgpu 27**: Low-level GPU API (Vulkan/Metal backend)
- **kurbo 0.12**: 2D curve and shape primitives
- **peniko 0.5**: Color and brush definitions
### Audio Engine
- **daw-backend**: Custom real-time audio engine
- **cpal 0.15**: Cross-platform audio I/O
- **symphonia 0.5**: Audio decoding (MP3, FLAC, WAV, Ogg, etc.)
- **rtrb 0.3**: Lock-free ringbuffers for audio thread communication
- **dasp**: Audio graph processing
### Video
- **FFmpeg**: Video encoding/decoding (via ffmpeg-next)
### Serialization
- **serde**: Document serialization
- **serde_json**: JSON format
## Component Architecture
### 1. Lightningbeam Core (`lightningbeam-core/`)
The core crate contains the data model and business logic, independent of UI framework.
**Key Types:**
```rust
Document {
canvas_size: (u32, u32),
layers: Vec<Layer>,
undo_stack: Vec<Box<dyn Action>>,
redo_stack: Vec<Box<dyn Action>>,
}
Layer (enum) {
VectorLayer { clips: Vec<VectorClip>, ... },
AudioLayer { clips: Vec<AudioClip>, ... },
VideoLayer { clips: Vec<VideoClip>, ... },
}
ClipInstance {
clip_id: Uuid, // Reference to clip definition
start_time: f64, // Timeline position
duration: f64,
trim_start: f64,
trim_end: f64,
}
```
**Responsibilities:**
- Document structure and state
- Clip and layer management
- Action system (undo/redo)
- Tool definitions
- Animation data and keyframes
### 2. Lightningbeam Editor (`lightningbeam-editor/`)
The editor application implements the UI and user interactions.
**Main Entry Point:** `src/main.rs`
- Initializes eframe application
- Sets up window, GPU context, and audio system
- Runs main event loop
**Panes** (`src/panes/`):
Each pane is a self-contained UI component:
- `stage.rs` (214KB): Main canvas for drawing, transform tools, GPU rendering
- `timeline.rs` (84KB): Multi-track timeline with clip editing
- `asset_library.rs` (70KB): Asset browser with drag-and-drop
- `infopanel.rs` (31KB): Context-sensitive property editor
- `virtual_piano.rs` (31KB): MIDI keyboard input
- `toolbar.rs` (9KB): Tool palette
**Pane System:**
```rust
pub enum PaneInstance {
Stage(Stage),
Timeline(Timeline),
AssetLibrary(AssetLibrary),
// ... other panes
}
impl PaneInstance {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
match self {
PaneInstance::Stage(stage) => stage.render(ui, shared_state),
// ... dispatch to specific pane
}
}
}
```
**SharedPaneState:**
Facilitates communication between panes:
```rust
pub struct SharedPaneState {
pub document: Document,
pub selected_tool: Tool,
pub pending_actions: Vec<Box<dyn Action>>,
pub audio_system: AudioSystem,
// ... other shared state
}
```
### 3. DAW Backend (`daw-backend/`)
Standalone audio engine crate with real-time audio processing.
**Architecture:**
```
UI Thread Audio Thread (real-time)
│ │
│ Commands (rtrb queue) │
├──────────────────────────────>│
│ │
│ State Updates │
<──────────────────────────────┤
│ │
┌───────────────┐
│ Audio Engine │
│ process() │
└───────────────┘
┌───────────────┐
│ Track Mix │
└───────────────┘
┌───────────────┐
│ cpal Output │
└───────────────┘
```
**Key Components:**
- **Engine** (`audio/engine.rs`): Main audio callback, runs on real-time thread
- **Project** (`audio/project.rs`): Top-level audio state
- **Track** (`audio/track.rs`): Individual audio tracks with effects chains
- **Effects**: Reverb, delay, EQ, compressor, distortion, etc.
- **Synthesizers**: Oscillator, FM synth, wavetable, sampler
**Lock-Free Design:**
The audio thread never blocks. UI sends commands via lock-free ringbuffers (rtrb), audio thread processes them between buffer callbacks.
## Data Flow
### Document Editing Flow
```
User Input (mouse/keyboard)
egui Event Handlers (in pane.render())
Create Action (implements Action trait)
Add to SharedPaneState.pending_actions
After all panes render: execute actions
Action.apply(&mut document)
Push to undo_stack
UI re-renders with updated document
```
### Audio Playback Flow
```
UI: User clicks Play
Send PlayCommand to audio engine (via rtrb queue)
Audio thread: Receive command
Audio thread: Start playback, increment playhead
Audio callback (every ~5ms): Engine::process()
Mix tracks, apply effects, output samples
Send playhead position back to UI
UI: Update timeline playhead position
```
### GPU Rendering Flow
```
egui layout phase
Stage pane requests wgpu callback
Vello renders vector shapes to GPU texture
Custom wgpu integration composites:
- Vello output (vector graphics)
- Waveform textures (GPU-rendered audio)
- egui UI overlay
Present to screen
```
## Rendering Pipeline
### Stage Rendering
The Stage pane uses a custom wgpu callback to render directly to GPU:
```rust
ui.painter().add(egui_wgpu::Callback::new_paint_callback(
rect,
StageCallback { /* render data */ }
));
```
**Vello Integration:**
1. Create Vello `Scene` from document shapes
2. Render scene to GPU texture using compute shaders
3. Composite with UI elements
**Waveform Rendering:**
- Audio waveforms rendered on GPU using custom WGSL shaders
- Mipmaps generated via compute shader for level-of-detail
- Uniform buffers store view parameters (zoom, offset, tint color)
**WGSL Alignment Requirements:**
WGSL has strict alignment rules. `vec4<f32>` requires 16-byte alignment:
```rust
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct WaveformParams {
view_matrix: [f32; 16], // 64 bytes
viewport_size: [f32; 2], // 8 bytes
zoom: f32, // 4 bytes
_pad1: f32, // 4 bytes padding
tint_color: [f32; 4], // 16 bytes (requires 16-byte alignment)
}
// Total: 96 bytes
```
## Audio Architecture
### Real-Time Constraints
Audio callbacks run on a dedicated real-time thread with strict timing requirements:
- Buffer size: 256 frames default (~5.8ms at 44.1kHz)
- ALSA may provide smaller buffers (64-75 frames, ~1.5ms)
- **No blocking operations allowed**: No locks, no allocations, no syscalls
### Lock-Free Communication
UI and audio thread communicate via lock-free ringbuffers (rtrb):
```rust
// UI Thread
command_sender.push(AudioCommand::Play).ok();
// Audio Thread (in process callback)
while let Ok(command) = command_receiver.pop() {
match command {
AudioCommand::Play => self.playing = true,
// ... handle other commands
}
}
```
### Audio Processing Pipeline
```
Audio Callback Invoked (every ~5ms)
Process queued commands
For each track:
- Read audio samples at playhead position
- Apply effects chain
- Mix to master output
Write samples to output buffer
Return from callback (must complete in <5ms)
```
### Optimized Debug Builds
Audio code is optimized even in debug builds to meet real-time deadlines:
```toml
[profile.dev.package.daw-backend]
opt-level = 2
[profile.dev.package.symphonia]
opt-level = 2
# ... other audio libraries
```
## Key Design Decisions
### Layer & Clip System
**Type-Specific Layers:**
Each layer type supports only its matching clip type:
- `VectorLayer``VectorClip`
- `AudioLayer``AudioClip`
- `VideoLayer``VideoClip`
**Recursive Nesting:**
Vector clips can contain internal layers of any type, enabling complex nested compositions.
**Clip vs ClipInstance:**
- **Clip**: Template/definition in asset library (the "master")
- **ClipInstance**: Placed on timeline with instance-specific properties (position, duration, trim points)
- Multiple instances can reference the same clip
- "Make Unique" operation duplicates the underlying clip
### Undo/Redo System
**Action Trait:**
```rust
pub trait Action: Send {
fn apply(&mut self, document: &mut Document);
fn undo(&mut self, document: &mut Document);
fn redo(&mut self, document: &mut Document);
}
```
All operations (drawing, editing, clip manipulation) implement this trait.
**Continuous Operations:**
Dragging sliders or scrubbing creates only one undo action when complete, not one per frame.
### Two-Phase Dispatch Pattern
Panes cannot directly mutate shared state during rendering (borrowing rules). Instead:
1. **Phase 1 (Render)**: Panes register actions
```rust
shared_state.register_action(Box::new(MyAction { ... }));
```
2. **Phase 2 (Execute)**: After all panes rendered, execute actions
```rust
for action in shared_state.pending_actions.drain(..) {
action.apply(&mut document);
undo_stack.push(action);
}
```
### Pane ID Salting
egui uses IDs to track widget state. Multiple instances of the same pane would collide without unique IDs.
**Solution**: Salt all IDs with the pane's node path:
```rust
ui.horizontal(|ui| {
ui.label("My Widget");
}).id.with(&node_path);
```
### Selection & Clipboard
- **Selection scope**: Limited to current clip/layer
- **Type-aware paste**: Content must match target type
- **Clip instance copying**: Creates reference to same underlying clip
- **Make unique**: Duplicates underlying clip for independent editing
## Directory Structure
```
lightningbeam-2/
├── lightningbeam-ui/ # Rust UI workspace
│ ├── Cargo.toml # Workspace manifest
│ ├── lightningbeam-editor/ # Main application crate
│ │ ├── Cargo.toml
│ │ └── src/
│ │ ├── main.rs # Entry point, event loop
│ │ ├── app.rs # Application state
│ │ ├── panes/
│ │ │ ├── mod.rs # Pane system dispatch
│ │ │ ├── stage.rs # Main canvas
│ │ │ ├── timeline.rs # Timeline editor
│ │ │ ├── asset_library.rs
│ │ │ └── ...
│ │ ├── tools/ # Drawing and editing tools
│ │ ├── rendering/
│ │ │ ├── vello_integration.rs
│ │ │ ├── waveform_gpu.rs
│ │ │ └── shaders/
│ │ │ ├── waveform.wgsl
│ │ │ └── waveform_mipgen.wgsl
│ │ └── export/ # Export functionality
│ └── lightningbeam-core/ # Core data model crate
│ ├── Cargo.toml
│ └── src/
│ ├── lib.rs
│ ├── document.rs # Document structure
│ ├── layer.rs # Layer types
│ ├── clip.rs # Clip types and instances
│ ├── shape.rs # Shape definitions
│ ├── action.rs # Action trait and undo/redo
│ ├── animation.rs # Keyframe animation
│ └── tools.rs # Tool definitions
├── daw-backend/ # Audio engine (standalone)
│ ├── Cargo.toml
│ └── src/
│ ├── lib.rs # Audio system initialization
│ ├── audio/
│ │ ├── engine.rs # Main audio callback
│ │ ├── track.rs # Track management
│ │ ├── project.rs # Project state
│ │ └── buffer.rs # Audio buffer utilities
│ ├── effects/ # Audio effects
│ │ ├── reverb.rs
│ │ ├── delay.rs
│ │ └── ...
│ ├── synth/ # Synthesizers
│ └── midi/ # MIDI handling
├── src-tauri/ # Legacy Tauri backend
├── src/ # Legacy JavaScript frontend
├── CONTRIBUTING.md # Contributor guide
├── ARCHITECTURE.md # This file
├── README.md # Project overview
└── docs/ # Additional documentation
├── AUDIO_SYSTEM.md
├── UI_SYSTEM.md
└── ...
```
## Performance Considerations
### GPU Rendering
- Vello uses compute shaders for efficient 2D rendering
- Waveforms pre-rendered on GPU with mipmaps for smooth zooming
- Custom wgpu integration minimizes CPU↔GPU data transfer
### Audio Processing
- Lock-free design: No blocking in audio thread
- Optimized even in debug builds (`opt-level = 2`)
- Memory-mapped file I/O for large audio files
- Zero-copy audio buffers where possible
### Memory Management
- Audio buffers pre-allocated, no allocations in audio thread
- Vello manages GPU memory automatically
- Document structure uses `Rc`/`Arc` for shared clip references
## Future Considerations
### Video Integration
Video decoding has been ported from the legacy Tauri backend. Video soundtracks become audio tracks in daw-backend, enabling full effects processing.
### File Format
The .beam file format is not yet finalized. Considerations:
- Single JSON file vs container format (e.g., ZIP)
- Embedded media vs external references
- Forward/backward compatibility strategy
### Node Editor
Primary use: Audio effects chains and modular synthesizers. Future expansion to visual effects and procedural generation is possible.
## Related Documentation
- [CONTRIBUTING.md](CONTRIBUTING.md) - Development setup and workflow
- [docs/AUDIO_SYSTEM.md](docs/AUDIO_SYSTEM.md) - Detailed audio engine documentation
- [docs/UI_SYSTEM.md](docs/UI_SYSTEM.md) - UI pane system details
- [docs/RENDERING.md](docs/RENDERING.md) - GPU rendering pipeline
- [Claude.md](Claude.md) - Comprehensive architectural reference for AI assistants

278
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,278 @@
# Contributing to Lightningbeam
Thank you for your interest in contributing to Lightningbeam! This document provides guidelines and instructions for setting up your development environment and contributing to the project.
## Table of Contents
- [Development Setup](#development-setup)
- [Building the Project](#building-the-project)
- [Project Structure](#project-structure)
- [Making Changes](#making-changes)
- [Code Style](#code-style)
- [Testing](#testing)
- [Submitting Changes](#submitting-changes)
- [Getting Help](#getting-help)
## Development Setup
### Prerequisites
- **Rust**: Install via [rustup](https://rustup.rs/) (stable toolchain)
- **System dependencies** (Linux):
- ALSA development files: `libasound2-dev`
- For Ubuntu/Debian: `sudo apt install libasound2-dev pkg-config`
- For Arch/Manjaro: `sudo pacman -S alsa-lib`
- **FFmpeg**: Required for video encoding/decoding
- Ubuntu/Debian: `sudo apt install ffmpeg libavcodec-dev libavformat-dev libavutil-dev libswscale-dev libswresample-dev pkg-config clang`
- Arch/Manjaro: `sudo pacman -S ffmpeg`
### Clone and Build
```bash
# Clone the repository (GitHub)
git clone https://github.com/skykooler/lightningbeam.git
# Or from Gitea
git clone https://git.skyler.io/skyler/lightningbeam.git
cd lightningbeam
# Build the Rust UI editor (current focus)
cd lightningbeam-ui
cargo build
# Run the editor
cargo run
```
**Note**: The project is hosted on both GitHub and Gitea (git.skyler.io). You can use either for cloning and submitting pull requests.
## Building the Project
### Workspace Structure
The project consists of multiple Rust workspaces:
1. **lightningbeam-ui** (current focus) - Pure Rust UI application
- `lightningbeam-editor/` - Main editor application
- `lightningbeam-core/` - Core data models and business logic
2. **daw-backend** - Audio engine (standalone crate)
3. **Root workspace** (legacy) - Contains Tauri backend and benchmarks
### Build Commands
```bash
# Build the editor (from lightningbeam-ui/)
cargo build
# Build with optimizations (faster runtime)
cargo build --release
# Check just the audio backend
cargo check -p daw-backend
# Build the audio backend separately
cd ../daw-backend
cargo build
```
### Debug Builds and Audio Performance
The audio engine runs on a real-time thread with strict timing constraints (~5.8ms at 44.1kHz). To maintain performance in debug builds, the audio backend is compiled with optimizations even in debug mode:
```toml
# In lightningbeam-ui/Cargo.toml
[profile.dev.package.daw-backend]
opt-level = 2
```
This is already configured—no action needed.
### Debug Flags
Enable audio diagnostics with:
```bash
DAW_AUDIO_DEBUG=1 cargo run
```
This prints timing information, buffer sizes, and overrun warnings to help debug audio issues.
## Project Structure
```
lightningbeam-2/
├── lightningbeam-ui/ # Rust UI workspace (current)
│ ├── lightningbeam-editor/ # Main application
│ │ └── src/
│ │ ├── main.rs # Entry point
│ │ ├── panes/ # UI panes (stage, timeline, etc.)
│ │ └── tools/ # Drawing and editing tools
│ └── lightningbeam-core/ # Core data model
│ └── src/
│ ├── document.rs # Document structure
│ ├── clip.rs # Clips and instances
│ ├── action.rs # Undo/redo system
│ └── tools.rs # Tool system
├── daw-backend/ # Audio engine
│ └── src/
│ ├── lib.rs # Audio system setup
│ ├── audio/
│ │ ├── engine.rs # Audio callback
│ │ ├── track.rs # Track management
│ │ └── project.rs # Project state
│ └── effects/ # Audio effects
├── src-tauri/ # Legacy Tauri backend
└── src/ # Legacy JavaScript frontend
```
## Making Changes
### Branching Strategy
- `main` - Stable branch
- `rust-ui` - Active development branch for Rust UI rewrite
- Feature branches - Create from `rust-ui` for new features
### Before You Start
1. Check existing issues or create a new one to discuss your change
2. Make sure you're on the latest `rust-ui` branch:
```bash
git checkout rust-ui
git pull origin rust-ui
```
3. Create a feature branch:
```bash
git checkout -b feature/your-feature-name
```
## Code Style
### Rust Style
- Follow standard Rust formatting: `cargo fmt`
- Check for common issues: `cargo clippy`
- Use meaningful variable names
- Add comments for non-obvious code
- Keep functions focused and reasonably sized
### Key Patterns
#### Pane ID Salting
When implementing new panes, **always salt egui IDs** with the node path to avoid collisions when users add multiple instances of the same pane:
```rust
ui.horizontal(|ui| {
ui.label("My Widget");
}).id.with(&node_path); // Salt with node path
```
#### Splitting Borrows with `std::mem::take`
When you need to split borrows from a struct, use `std::mem::take`:
```rust
let mut clips = std::mem::take(&mut self.clips);
// Now you can borrow other fields while processing clips
```
#### Two-Phase Dispatch
Panes register handlers during render, execution happens after:
```rust
// During render
shared_state.register_action(Box::new(MyAction { ... }));
// After all panes rendered
for action in shared_state.pending_actions.drain(..) {
action.execute(&mut document);
}
```
## Testing
### Running Tests
```bash
# Run all tests
cargo test
# Test specific package
cargo test -p lightningbeam-core
cargo test -p daw-backend
# Run with output
cargo test -- --nocapture
```
### Audio Testing
Test audio functionality:
```bash
# Run with audio debug output
DAW_AUDIO_DEBUG=1 cargo run
# Check for audio dropouts or timing issues in the console output
```
## Submitting Changes
### Before Submitting
1. **Format your code**: `cargo fmt --all`
2. **Run clippy**: `cargo clippy --all-targets --all-features`
3. **Run tests**: `cargo test --all`
4. **Test manually**: Build and run the application to verify your changes work
5. **Write clear commit messages**: Describe what and why, not just what
### Commit Message Format
```
Short summary (50 chars or less)
More detailed explanation if needed. Wrap at 72 characters.
Explain the problem this commit solves and why you chose
this solution.
- Bullet points are fine
- Use present tense: "Add feature" not "Added feature"
```
### Pull Request Process
1. Push your branch to GitHub or Gitea
2. Open a pull request against `rust-ui` branch
- GitHub: https://github.com/skykooler/lightningbeam
- Gitea: https://git.skyler.io/skyler/lightningbeam
3. Provide a clear description of:
- What problem does this solve?
- How does it work?
- Any testing you've done
- Screenshots/videos if applicable (especially for UI changes)
4. Address review feedback
5. Once approved, a maintainer will merge your PR
### PR Checklist
- [ ] Code follows project style (`cargo fmt`, `cargo clippy`)
- [ ] Tests pass (`cargo test`)
- [ ] New code has appropriate tests (if applicable)
- [ ] Documentation updated (if needed)
- [ ] Commit messages are clear
- [ ] PR description explains the change
## Getting Help
- **Issues**: Check issues on [GitHub](https://github.com/skykooler/lightningbeam/issues) or [Gitea](https://git.skyler.io/skyler/lightningbeam/issues) for existing discussions
- **Documentation**: See `ARCHITECTURE.md` and `docs/` folder for technical details
- **Questions**: Open a discussion or issue with the "question" label on either platform
## Additional Resources
- [ARCHITECTURE.md](ARCHITECTURE.md) - System architecture overview
- [docs/AUDIO_SYSTEM.md](docs/AUDIO_SYSTEM.md) - Audio engine details
- [docs/UI_SYSTEM.md](docs/UI_SYSTEM.md) - UI and pane system
## License
By contributing, you agree that your contributions will be licensed under the same license as the project.

View File

@ -1,3 +1,59 @@
# 1.0.3-alpha:
Changes:
- Add gradient support to vector graphics
- Add "frames" timeline mode
- Reduce CPU usage at idle
- Allow group tracks' audio node graphs to be edited
Bugfixes:
- Support Vello CPU fallback on systems with older GPUs
# 1.0.2-alpha:
Changes:
- All vector shapes on a layer go into a unified shape rather than separate shapes
- Keyboard shortcuts are now user-configurable
- Added webcam support in video editor
- Background can now be transparent
- Video thumbnails are now displayed on the clip
- Virtual keyboard, piano roll and node editor now have a quick switcher
- Add electric guitar preset
- Layers can now be grouped
- Layers can be reordered by dragging
- Added VU meters to audio layers and mix
- Added raster image editing
- Added brush, airbrush, dodge/burn, sponge, pattern stamp, healing brush, clone stamp, blur/sharpen, magic wand and quick select tools
- Added support for MyPaint .myb brushes
- UI now uses CSS styling to support future user styles
- Added image export
Bugfixes:
- Toolbar now only shows tools that can be used on the current layer
- Fix NAM model loading
- Fix menu width and mouse following
- Export dialog now remembers the previous export filename
# 1.0.1-alpha:
Changes:
- Added real-time amp simulation via NAM
- Added beat mode to the timeline
- Changed shape drawing from making separate shapes to making shapes in the layer using a DCEL graph
- Licensed under GPLv3
- Added snapping for vector editing
- Added organ instrument and vibrato node
Bugfixes:
- Fix preset loading not updating node graph editor
- Fix stroke intersections not splitting strokes
- Fix paint bucket fill not attaching to existing strokes
# 1.0.0-alpha:
Changes:
- New native GUI built with egui + wgpu (replaces Tauri/web frontend)
- GPU-accelerated canvas with vello rendering
- MIDI input and node-based audio graph improvements
- Factory instrument presets
- Video import and high performance playback
# 0.8.1-alpha:
Changes:
- Rewrite timeline UI

674
LICENSE Normal file
View File

@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

108
README.md
View File

@ -10,42 +10,118 @@ A free and open-source 2D multimedia editor combining vector animation, audio pr
![Video Editing View](screenshots/video.png)
## Current Features
## Features
**Vector Animation**
- GPU-accelerated vector rendering with Vello
- Draw and animate vector shapes with keyframe-based timeline
- Non-destructive editing workflow
- Paint bucket tool for automatic fill detection
**Audio Production**
- Multi-track audio recording
- MIDI sequencing with synthesized and sampled instruments
- Integrated DAW functionality
- Real-time multi-track audio recording and playback
- Node graph-based effects processing
- MIDI sequencing with synthesizers and samplers
- Comprehensive effects library (reverb, delay, EQ, compression, distortion, etc.)
- Custom audio engine with lock-free design for glitch-free playback
**Video Editing**
- Basic video timeline and editing (early stage)
- FFmpeg-based video decoding
- Video timeline and editing with FFmpeg-based decoding
- GPU-accelerated waveform rendering with mipmaps
- Audio integration from video soundtracks
## Technical Stack
- **Frontend:** Vanilla JavaScript
- **Backend:** Rust (Tauri framework)
- **Audio:** cpal + dasp for audio processing
- **Video:** FFmpeg for encode/decode
**Current Implementation (Rust UI)**
- **UI Framework:** egui (immediate-mode GUI)
- **GPU Rendering:** Vello + wgpu (Vulkan/Metal/DirectX 12)
- **Audio Engine:** Custom real-time engine (`daw-backend`)
- cpal for cross-platform audio I/O
- symphonia for audio decoding
- dasp for node graph processing
- **Video:** FFmpeg 8 for encode/decode
- **Platform:** Cross-platform (Linux, macOS, Windows)
**Legacy Implementation (Deprecated)**
- Frontend: Vanilla JavaScript
- Backend: Rust (Tauri framework)
## Project Status
Lightningbeam is under active development. Current focus is on core functionality and architecture. Full project export is not yet fully implemented.
Lightningbeam is under active development on the `rust-ui` branch. The project has been rewritten from a Tauri/JavaScript prototype to a pure Rust application to eliminate IPC bottlenecks and achieve better performance for real-time video and audio processing.
### Known Architectural Challenge
**Current Status:**
- ✅ Core UI panes (Stage, Timeline, Asset Library, Info Panel, Toolbar)
- ✅ Drawing tools (Select, Draw, Rectangle, Ellipse, Paint Bucket, Transform)
- ✅ Undo/redo system
- ✅ GPU-accelerated vector rendering
- ✅ Audio engine with node graph processing
- ✅ GPU waveform rendering with mipmaps
- ✅ Video decoding integration
- 🚧 Export system (in progress)
- 🚧 Node editor UI (planned)
- 🚧 Piano roll editor (planned)
The current Tauri implementation hits IPC bandwidth limitations when streaming decoded video frames from Rust to JavaScript. Tauri's IPC layer has significant serialization overhead (~few MB/s), which is insufficient for real-time high-resolution video rendering.
## Getting Started
I'm currently exploring a full Rust rewrite using wgpu/egui to eliminate the IPC bottleneck and handle rendering entirely in native code.
### Prerequisites
- Rust (stable toolchain via [rustup](https://rustup.rs/))
- System dependencies:
- **Linux:** ALSA development files, FFmpeg 8
- **macOS:** FFmpeg (via Homebrew)
- **Windows:** FFmpeg 8, Visual Studio with C++ tools
See [docs/BUILDING.md](docs/BUILDING.md) for detailed setup instructions.
### Building and Running
```bash
# Clone the repository
git clone https://github.com/skykooler/lightningbeam.git
# Or from Gitea
git clone https://git.skyler.io/skyler/lightningbeam.git
cd lightningbeam/lightningbeam-ui
# Build and run
cargo run
# Or build optimized release version
cargo build --release
```
### Documentation
- **[CONTRIBUTING.md](CONTRIBUTING.md)** - Development setup and contribution guidelines
- **[ARCHITECTURE.md](ARCHITECTURE.md)** - System architecture overview
- **[docs/BUILDING.md](docs/BUILDING.md)** - Detailed build instructions and troubleshooting
- **[docs/AUDIO_SYSTEM.md](docs/AUDIO_SYSTEM.md)** - Audio engine architecture and development
- **[docs/UI_SYSTEM.md](docs/UI_SYSTEM.md)** - UI pane system and tool development
- **[docs/RENDERING.md](docs/RENDERING.md)** - GPU rendering pipeline and shaders
## Project History
Lightningbeam evolved from earlier multimedia editing projects I've worked on since 2010, including the FreeJam DAW. The current JavaScript/Tauri iteration began in November 2023.
Lightningbeam evolved from earlier multimedia editing projects I've worked on since 2010, including the FreeJam DAW. The JavaScript/Tauri prototype began in November 2023, and the Rust UI rewrite started in late 2024 to eliminate performance bottlenecks and provide a more integrated native experience.
## Goals
Create a comprehensive FOSS alternative for 2D-focused multimedia work, integrating animation, audio, and video editing in a unified workflow.
Create a comprehensive FOSS alternative for 2D-focused multimedia work, integrating animation, audio, and video editing in a unified workflow. Lightningbeam aims to be:
- **Fast:** GPU-accelerated rendering and real-time audio processing
- **Flexible:** Node graph-based audio routing and modular synthesis
- **Integrated:** Seamless workflow across animation, audio, and video
- **Open:** Free and open-source, built on open standards
## Contributing
Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
## License
[License information to be added]
## Links
- **GitHub:** https://github.com/skykooler/lightningbeam
- **Gitea:** https://git.skyler.io/skyler/lightningbeam

446
daw-backend/Cargo.lock generated
View File

@ -31,9 +31,9 @@ dependencies = [
[[package]]
name = "alsa"
version = "0.9.1"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed7572b7ba83a31e20d1b48970ee402d2e3e0537dcfe0a3ff4d6eb7508617d43"
checksum = "7c88dbbce13b232b26250e1e2e6ac18b6a891a646b8148285036ebce260ac5c3"
dependencies = [
"alsa-sys",
"bitflags 2.9.4",
@ -69,6 +69,13 @@ version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "beamdsp"
version = "0.1.0"
dependencies = [
"serde",
]
[[package]]
name = "bindgen"
version = "0.72.1"
@ -99,6 +106,15 @@ version = "2.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
[[package]]
name = "block2"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5"
dependencies = [
"objc2",
]
[[package]]
name = "bumpalo"
version = "3.19.0"
@ -139,8 +155,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac9fe6cdbb24b6ade63616c0a0688e45bb56732262c158df3c0c4bea4ca47cb7"
dependencies = [
"find-msvc-tools",
"jobserver",
"libc",
"shlex",
]
@ -176,6 +190,15 @@ dependencies = [
"libloading",
]
[[package]]
name = "cmake"
version = "0.1.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0"
dependencies = [
"cc",
]
[[package]]
name = "combine"
version = "4.6.7"
@ -217,22 +240,16 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "coreaudio-rs"
version = "0.11.3"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "321077172d79c662f64f5071a03120748d5bb652f5231570141be24cfcd2bace"
checksum = "1aae284fbaf7d27aa0e292f7677dfbe26503b0d555026f702940805a630eac17"
dependencies = [
"bitflags 1.3.2",
"core-foundation-sys",
"coreaudio-sys",
]
[[package]]
name = "coreaudio-sys"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ceec7a6067e62d6f931a2baf6f3a751f4a892595bcec1461a3c94ef9949864b6"
dependencies = [
"bindgen",
"libc",
"objc2-audio-toolbox",
"objc2-core-audio",
"objc2-core-audio-types",
"objc2-core-foundation",
]
[[package]]
@ -257,12 +274,11 @@ dependencies = [
[[package]]
name = "cpal"
version = "0.15.3"
version = "0.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "873dab07c8f743075e57f524c583985fbaf745602acbe916a01539364369a779"
checksum = "5b1f9c7312f19fc2fa12fd7acaf38de54e8320ba10d1a02dcbe21038def51ccb"
dependencies = [
"alsa 0.9.1",
"core-foundation-sys",
"alsa 0.10.0",
"coreaudio-rs",
"dasp_sample",
"jni",
@ -271,11 +287,19 @@ dependencies = [
"mach2",
"ndk",
"ndk-context",
"oboe",
"num-derive",
"num-traits",
"objc2",
"objc2-audio-toolbox",
"objc2-avf-audio",
"objc2-core-audio",
"objc2-core-audio-types",
"objc2-core-foundation",
"objc2-foundation",
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"windows 0.54.0",
"windows 0.62.2",
]
[[package]]
@ -447,6 +471,7 @@ name = "daw-backend"
version = "0.1.0"
dependencies = [
"base64",
"beamdsp",
"cpal",
"crossterm",
"dasp_envelope",
@ -457,19 +482,33 @@ dependencies = [
"dasp_rms",
"dasp_sample",
"dasp_signal",
"ffmpeg-next",
"hound",
"memmap2",
"midir",
"midly",
"nam-ffi",
"pathdiff",
"petgraph 0.6.5",
"rand",
"ratatui",
"rayon",
"rtrb",
"serde",
"serde_json",
"symphonia",
]
[[package]]
name = "dispatch2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e0e367e4e7da84520dedcac1901e4da967309406d1e51017ae1abfb97adbd38"
dependencies = [
"bitflags 2.9.4",
"objc2",
]
[[package]]
name = "either"
version = "1.15.0"
@ -497,6 +536,31 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af9673d8203fcb076b19dfd17e38b3d4ae9f44959416ea532ce72415a6020365"
[[package]]
name = "ffmpeg-next"
version = "8.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d658424d233cbd993a972dd73a66ca733acd12a494c68995c9ac32ae1fe65b40"
dependencies = [
"bitflags 2.9.4",
"ffmpeg-sys-next",
"libc",
]
[[package]]
name = "ffmpeg-sys-next"
version = "8.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bca20aa4ee774fe384c2490096c122b0b23cf524a9910add0686691003d797b"
dependencies = [
"bindgen",
"cc",
"libc",
"num_cpus",
"pkg-config",
"vcpkg",
]
[[package]]
name = "find-msvc-tools"
version = "0.1.4"
@ -532,18 +596,6 @@ dependencies = [
"wasi",
]
[[package]]
name = "getrandom"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasip2",
]
[[package]]
name = "glob"
version = "0.3.3"
@ -579,6 +631,12 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "hound"
version = "3.5.1"
@ -651,16 +709,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33"
dependencies = [
"getrandom 0.3.4",
"libc",
]
[[package]]
name = "js-sys"
version = "0.3.81"
@ -719,9 +767,9 @@ dependencies = [
[[package]]
name = "mach2"
version = "0.4.3"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44"
checksum = "6a1b95cd5421ec55b445b5ae102f5ea0e768de1f82bd3001e11f426c269c3aea"
dependencies = [
"libc",
]
@ -732,6 +780,15 @@ version = "2.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
[[package]]
name = "memmap2"
version = "0.9.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3"
dependencies = [
"libc",
]
[[package]]
name = "midir"
version = "0.9.1"
@ -775,11 +832,18 @@ dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "nam-ffi"
version = "0.1.0"
dependencies = [
"cmake",
]
[[package]]
name = "ndk"
version = "0.8.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2076a31b7010b17a38c01907c45b945e8f11495ee4dd588309718901b1f7a5b7"
checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4"
dependencies = [
"bitflags 2.9.4",
"jni-sys",
@ -797,9 +861,9 @@ checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b"
[[package]]
name = "ndk-sys"
version = "0.5.0+25.2.9519653"
version = "0.6.0+11769913"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c196769dd60fd4f363e11d948139556a344e79d451aeb2fa2fd040738ef7691"
checksum = "ee6cda3051665f1fb8d9e08fc35c96d5a244fb1be711a03b71118828afc9a873"
dependencies = [
"jni-sys",
]
@ -845,6 +909,16 @@ dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "num_enum"
version = "0.7.4"
@ -868,26 +942,92 @@ dependencies = [
]
[[package]]
name = "oboe"
version = "0.6.1"
name = "objc2"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8b61bebd49e5d43f5f8cc7ee2891c16e0f41ec7954d36bcb6c14c5e0de867fb"
checksum = "3a12a8ed07aefc768292f076dc3ac8c48f3781c8f2d5851dd3d98950e8c5a89f"
dependencies = [
"jni",
"ndk",
"ndk-context",
"num-derive",
"num-traits",
"oboe-sys",
"objc2-encode",
]
[[package]]
name = "oboe-sys"
version = "0.6.1"
name = "objc2-audio-toolbox"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c8bb09a4a2b1d668170cfe0a7d5bc103f8999fb316c98099b6a9939c9f2e79d"
checksum = "6948501a91121d6399b79abaa33a8aa4ea7857fe019f341b8c23ad6e81b79b08"
dependencies = [
"cc",
"bitflags 2.9.4",
"libc",
"objc2",
"objc2-core-audio",
"objc2-core-audio-types",
"objc2-core-foundation",
"objc2-foundation",
]
[[package]]
name = "objc2-avf-audio"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13a380031deed8e99db00065c45937da434ca987c034e13b87e4441f9e4090be"
dependencies = [
"objc2",
"objc2-foundation",
]
[[package]]
name = "objc2-core-audio"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1eebcea8b0dbff5f7c8504f3107c68fc061a3eb44932051c8cf8a68d969c3b2"
dependencies = [
"dispatch2",
"objc2",
"objc2-core-audio-types",
"objc2-core-foundation",
"objc2-foundation",
]
[[package]]
name = "objc2-core-audio-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a89f2ec274a0cf4a32642b2991e8b351a404d290da87bb6a9a9d8632490bd1c"
dependencies = [
"bitflags 2.9.4",
"objc2",
]
[[package]]
name = "objc2-core-foundation"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536"
dependencies = [
"bitflags 2.9.4",
"block2",
"dispatch2",
"libc",
"objc2",
]
[[package]]
name = "objc2-encode"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33"
[[package]]
name = "objc2-foundation"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272"
dependencies = [
"bitflags 2.9.4",
"block2",
"libc",
"objc2",
"objc2-core-foundation",
]
[[package]]
@ -993,12 +1133,6 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.8.5"
@ -1026,7 +1160,7 @@ version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom 0.2.16",
"getrandom",
]
[[package]]
@ -1554,6 +1688,12 @@ version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "walkdir"
version = "2.5.0"
@ -1570,15 +1710,6 @@ version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasip2"
version = "1.0.1+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
dependencies = [
"wit-bindgen",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.104"
@ -1709,22 +1840,69 @@ dependencies = [
[[package]]
name = "windows"
version = "0.54.0"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49"
checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580"
dependencies = [
"windows-collections",
"windows-core",
"windows-future",
"windows-numerics",
]
[[package]]
name = "windows-collections"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610"
dependencies = [
"windows-core",
"windows-targets 0.52.6",
]
[[package]]
name = "windows-core"
version = "0.54.0"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65"
checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb"
dependencies = [
"windows-implement",
"windows-interface",
"windows-link",
"windows-result",
"windows-targets 0.52.6",
"windows-strings",
]
[[package]]
name = "windows-future"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb"
dependencies = [
"windows-core",
"windows-link",
"windows-threading",
]
[[package]]
name = "windows-implement"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "windows-interface"
version = "0.59.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
@ -1734,12 +1912,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-result"
version = "0.1.2"
name = "windows-numerics"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8"
checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26"
dependencies = [
"windows-targets 0.52.6",
"windows-core",
"windows-link",
]
[[package]]
name = "windows-result"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091"
dependencies = [
"windows-link",
]
[[package]]
@ -1800,19 +1997,12 @@ dependencies = [
]
[[package]]
name = "windows-targets"
version = "0.52.6"
name = "windows-threading"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37"
dependencies = [
"windows_aarch64_gnullvm 0.52.6",
"windows_aarch64_msvc 0.52.6",
"windows_i686_gnu 0.52.6",
"windows_i686_gnullvm",
"windows_i686_msvc 0.52.6",
"windows_x86_64_gnu 0.52.6",
"windows_x86_64_gnullvm 0.52.6",
"windows_x86_64_msvc 0.52.6",
"windows-link",
]
[[package]]
@ -1827,12 +2017,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
@ -1845,12 +2029,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
@ -1863,18 +2041,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
@ -1887,12 +2053,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
@ -1905,12 +2065,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
@ -1923,12 +2077,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
@ -1941,12 +2089,6 @@ version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
name = "winnow"
version = "0.7.13"
@ -1956,12 +2098,6 @@ dependencies = [
"memchr",
]
[[package]]
name = "wit-bindgen"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
[[package]]
name = "zerocopy"
version = "0.8.27"

View File

@ -4,7 +4,7 @@ version = "0.1.0"
edition = "2021"
[dependencies]
cpal = "0.15"
cpal = "0.17"
symphonia = { version = "0.5", features = ["all"] }
rtrb = "0.3"
midly = "0.5"
@ -15,11 +15,14 @@ crossterm = "0.27"
rand = "0.8"
base64 = "0.22"
pathdiff = "0.2"
rayon = "1.10"
# Memory-mapped I/O for audio files
memmap2 = "0.9"
# Audio export
hound = "3.5"
# TODO: Add MP3 support with a different crate
# mp3lame-encoder API is too complex, need to find a better option
ffmpeg-next = "8.0" # For MP3/AAC encoding
# Node-based audio graph dependencies
dasp_graph = "0.11"
@ -33,6 +36,12 @@ dasp_rms = "0.11"
petgraph = "0.6"
serde_json = "1.0"
# BeamDSP scripting engine
beamdsp = { path = "../lightningbeam-ui/beamdsp" }
# Neural Amp Modeler FFI
nam-ffi = { path = "../nam-ffi" }
[dev-dependencies]
[profile.release]

Binary file not shown.

View File

@ -1,3 +1,6 @@
use std::sync::Arc;
use serde::{Serialize, Deserialize};
/// Audio clip instance ID type
pub type AudioClipInstanceId = u32;
@ -16,7 +19,7 @@ pub type ClipId = AudioClipInstanceId;
/// ## Looping
/// If `external_duration` is greater than `internal_end - internal_start`,
/// the clip will seamlessly loop back to `internal_start` when it reaches `internal_end`.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AudioClipInstance {
pub id: AudioClipInstanceId,
pub audio_pool_index: usize,
@ -33,6 +36,13 @@ pub struct AudioClipInstance {
/// Clip-level gain
pub gain: f32,
/// Per-instance read-ahead buffer for compressed audio streaming.
/// Each clip instance gets its own buffer so multiple instances of the
/// same file (on different tracks or at different positions) don't fight
/// over a single target_frame.
#[serde(skip)]
pub read_ahead: Option<Arc<super::disk_reader::ReadAheadBuffer>>,
}
/// Type alias for backwards compatibility
@ -56,6 +66,7 @@ impl AudioClipInstance {
external_start,
external_duration,
gain: 1.0,
read_ahead: None,
}
}
@ -76,6 +87,7 @@ impl AudioClipInstance {
external_start: start_time,
external_duration: duration,
gain: 1.0,
read_ahead: None,
}
}

View File

@ -0,0 +1,651 @@
//! Disk reader for streaming audio playback.
//!
//! Provides lock-free read-ahead buffers for audio files that cannot be kept
//! fully decoded in memory. A background thread fills these buffers ahead of
//! the playhead so the audio callback never blocks on I/O or decoding.
//!
//! **InMemory** files bypass the disk reader entirely — their data is already
//! available as `&[f32]`. **Mapped** files (mmap'd WAV/AIFF) also bypass the
//! disk reader for now (OS page cache handles paging). **Compressed** files
//! (MP3, FLAC, OGG, etc.) use a `CompressedReader` that stream-decodes on
//! demand via Symphonia into a `ReadAheadBuffer`.
use std::cell::UnsafeCell;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use symphonia::core::audio::SampleBuffer;
use symphonia::core::codecs::DecoderOptions;
use symphonia::core::formats::{FormatOptions, SeekMode, SeekTo};
use symphonia::core::io::MediaSourceStream;
use symphonia::core::meta::MetadataOptions;
use symphonia::core::probe::Hint;
/// Read-ahead distance in seconds.
const PREFETCH_SECONDS: f64 = 2.0;
/// How often the disk reader thread wakes up to check for work (ms).
const POLL_INTERVAL_MS: u64 = 5;
// ---------------------------------------------------------------------------
// ReadAheadBuffer
// ---------------------------------------------------------------------------
/// Lock-free read-ahead buffer shared between the disk reader (writer) and the
/// audio callback (reader).
///
/// # Thread safety
///
/// This is a **single-producer single-consumer** (SPSC) structure:
/// - **Producer** (disk reader thread): calls `write_samples()` and
/// `advance_start()` to fill and reclaim buffer space.
/// - **Consumer** (audio callback): calls `read_sample()` and `has_range()`
/// to access decoded audio.
///
/// The producer only writes to indices **beyond** `valid_frames`, while the
/// consumer only reads indices **within** `[start_frame, start_frame +
/// valid_frames)`. Because the two threads always operate on disjoint regions,
/// the sample data itself requires no locking. Atomics with Acquire/Release
/// ordering on `start_frame` and `valid_frames` provide the happens-before
/// relationship that guarantees the consumer sees completed writes.
///
/// The `UnsafeCell` wrapping the buffer data allows the producer to mutate it
/// through a shared `&self` reference. This is sound because only one thread
/// (the producer) ever writes, and it writes to a region that the consumer
/// cannot yet see (gated by the `valid_frames` atomic).
pub struct ReadAheadBuffer {
/// Interleaved f32 samples stored as a circular buffer.
/// Wrapped in `UnsafeCell` to allow the producer to write through `&self`.
buffer: UnsafeCell<Box<[f32]>>,
/// The absolute frame number of the oldest valid frame in the ring.
start_frame: AtomicU64,
/// Number of valid frames starting from `start_frame`.
valid_frames: AtomicU64,
/// Total capacity in frames.
capacity_frames: usize,
/// Number of audio channels.
channels: u32,
/// Source file sample rate.
sample_rate: u32,
/// Last file-local frame requested by the audio callback.
/// Written by the consumer (render_from_file), read by the disk reader.
/// The disk reader uses this instead of the global playhead to know
/// where in the file to buffer around.
target_frame: AtomicU64,
/// When true, `render_from_file` will block-wait for frames instead of
/// returning silence on buffer miss. Used during offline export.
export_mode: AtomicBool,
}
// SAFETY: See the doc comment on ReadAheadBuffer for the full safety argument.
// In short: SPSC access pattern with atomic coordination means no data races.
// The circular design means advance_start never moves data — it only bumps
// the start pointer, so the consumer never sees partially-shifted memory.
unsafe impl Send for ReadAheadBuffer {}
unsafe impl Sync for ReadAheadBuffer {}
impl std::fmt::Debug for ReadAheadBuffer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ReadAheadBuffer")
.field("capacity_frames", &self.capacity_frames)
.field("channels", &self.channels)
.field("sample_rate", &self.sample_rate)
.field("start_frame", &self.start_frame.load(Ordering::Relaxed))
.field("valid_frames", &self.valid_frames.load(Ordering::Relaxed))
.finish()
}
}
impl ReadAheadBuffer {
/// Create a new read-ahead buffer with the given capacity (in seconds).
pub fn new(capacity_seconds: f64, sample_rate: u32, channels: u32) -> Self {
let capacity_frames = (capacity_seconds * sample_rate as f64) as usize;
let buffer_len = capacity_frames * channels as usize;
Self {
buffer: UnsafeCell::new(vec![0.0f32; buffer_len].into_boxed_slice()),
start_frame: AtomicU64::new(0),
valid_frames: AtomicU64::new(0),
capacity_frames,
channels,
sample_rate,
target_frame: AtomicU64::new(0),
export_mode: AtomicBool::new(false),
}
}
/// Map an absolute frame number to a ring-buffer sample index.
#[inline(always)]
fn ring_index(&self, frame: u64, channel: usize) -> usize {
let ring_frame = (frame as usize) % self.capacity_frames;
ring_frame * self.channels as usize + channel
}
/// Snapshot the current valid range. Call once per audio callback, then
/// pass the returned `(start, end)` to `read_sample` for consistent reads.
#[inline]
pub fn snapshot(&self) -> (u64, u64) {
let start = self.start_frame.load(Ordering::Acquire);
let valid = self.valid_frames.load(Ordering::Acquire);
(start, start + valid)
}
/// Read a single interleaved sample using a pre-loaded range snapshot.
/// Returns `0.0` if the frame is outside `[snap_start, snap_end)`.
/// Called from the **audio callback** (consumer).
#[inline]
pub fn read_sample(&self, frame: u64, channel: usize, snap_start: u64, snap_end: u64) -> f32 {
if frame < snap_start || frame >= snap_end {
return 0.0;
}
let idx = self.ring_index(frame, channel);
// SAFETY: We only read indices that the producer has already written
// and published via valid_frames. The circular layout means
// advance_start never moves data, so no torn reads are possible.
let buffer = unsafe { &*self.buffer.get() };
buffer[idx]
}
/// Check whether a contiguous range of frames is fully available.
#[inline]
pub fn has_range(&self, start: u64, count: u64) -> bool {
let buf_start = self.start_frame.load(Ordering::Acquire);
let valid = self.valid_frames.load(Ordering::Acquire);
start >= buf_start && start + count <= buf_start + valid
}
/// Current start frame of the buffer.
#[inline]
pub fn start_frame(&self) -> u64 {
self.start_frame.load(Ordering::Acquire)
}
/// Number of valid frames currently in the buffer.
#[inline]
pub fn valid_frames_count(&self) -> u64 {
self.valid_frames.load(Ordering::Acquire)
}
/// Update the target frame — the file-local frame the audio callback
/// is currently reading from. Called by `render_from_file` (consumer).
/// Each clip instance has its own buffer, so a plain store is sufficient.
#[inline]
pub fn set_target_frame(&self, frame: u64) {
self.target_frame.store(frame, Ordering::Relaxed);
}
/// Reset the target frame to MAX before a new render cycle.
/// If no clip calls `set_target_frame` this cycle, `has_active_target()`
/// returns false, telling the disk reader to skip this buffer.
#[inline]
pub fn reset_target_frame(&self) {
self.target_frame.store(u64::MAX, Ordering::Relaxed);
}
/// Force-set the target frame to an exact value.
/// Used by the disk reader's seek command where we need an absolute position.
#[inline]
pub fn force_target_frame(&self, frame: u64) {
self.target_frame.store(frame, Ordering::Relaxed);
}
/// Get the target frame set by the audio callback.
/// Called by the disk reader thread (producer).
#[inline]
pub fn target_frame(&self) -> u64 {
self.target_frame.load(Ordering::Relaxed)
}
/// Check if any clip set a target this cycle (vs still at reset value).
#[inline]
pub fn has_active_target(&self) -> bool {
self.target_frame.load(Ordering::Relaxed) != u64::MAX
}
/// Enable or disable export (blocking) mode. When enabled,
/// `render_from_file` will spin-wait for frames instead of returning
/// silence on buffer miss.
pub fn set_export_mode(&self, export: bool) {
self.export_mode.store(export, Ordering::Release);
}
/// Check if export (blocking) mode is active.
pub fn is_export_mode(&self) -> bool {
self.export_mode.load(Ordering::Acquire)
}
/// Reset the buffer to start at `new_start` with zero valid frames.
/// Called by the **disk reader thread** (producer) after a seek.
pub fn reset(&self, new_start: u64) {
self.valid_frames.store(0, Ordering::Release);
self.start_frame.store(new_start, Ordering::Release);
}
/// Write interleaved samples into the buffer, extending the valid range.
/// Called by the **disk reader thread** (producer only).
/// Returns the number of frames actually written (may be less than `frames`
/// if the buffer is full).
///
/// # Safety
/// Must only be called from the single producer thread.
pub fn write_samples(&self, samples: &[f32], frames: usize) -> usize {
let valid = self.valid_frames.load(Ordering::Acquire) as usize;
let remaining_capacity = self.capacity_frames - valid;
let write_frames = frames.min(remaining_capacity);
if write_frames == 0 {
return 0;
}
let ch = self.channels as usize;
let start = self.start_frame.load(Ordering::Acquire);
let write_start_frame = start as usize + valid;
// SAFETY: We only write to ring positions beyond the current valid
// range, which the consumer cannot access. Only one producer calls this.
let buffer = unsafe { &mut *self.buffer.get() };
// Write with wrap-around: the ring position may cross the buffer end.
let ring_start = (write_start_frame % self.capacity_frames) * ch;
let total_samples = write_frames * ch;
let buffer_sample_len = self.capacity_frames * ch;
let first_chunk = total_samples.min(buffer_sample_len - ring_start);
buffer[ring_start..ring_start + first_chunk]
.copy_from_slice(&samples[..first_chunk]);
if first_chunk < total_samples {
// Wrap around to the beginning of the buffer.
let second_chunk = total_samples - first_chunk;
buffer[..second_chunk]
.copy_from_slice(&samples[first_chunk..first_chunk + second_chunk]);
}
// Make the new samples visible to the consumer.
self.valid_frames
.store((valid + write_frames) as u64, Ordering::Release);
write_frames
}
/// Advance the buffer start, discarding frames behind the playhead.
/// Called by the **disk reader thread** (producer only) to reclaim space.
///
/// Because this is a circular buffer, advancing the start only updates
/// atomic counters — no data is moved, so the consumer never sees
/// partially-shifted memory.
pub fn advance_start(&self, new_start: u64) {
let old_start = self.start_frame.load(Ordering::Acquire);
if new_start <= old_start {
return;
}
let advance_frames = (new_start - old_start) as usize;
let valid = self.valid_frames.load(Ordering::Acquire) as usize;
if advance_frames >= valid {
// All data is stale — just reset.
self.valid_frames.store(0, Ordering::Release);
self.start_frame.store(new_start, Ordering::Release);
return;
}
let new_valid = valid - advance_frames;
// Store valid_frames first (shrinking the visible range), then
// advance start_frame. The consumer always sees a consistent
// sub-range of valid data.
self.valid_frames
.store(new_valid as u64, Ordering::Release);
self.start_frame.store(new_start, Ordering::Release);
}
}
// ---------------------------------------------------------------------------
// CompressedReader
// ---------------------------------------------------------------------------
/// Wraps a Symphonia decoder for streaming a single compressed audio file.
struct CompressedReader {
format_reader: Box<dyn symphonia::core::formats::FormatReader>,
decoder: Box<dyn symphonia::core::codecs::Decoder>,
track_id: u32,
/// Current decoder position in frames.
current_frame: u64,
sample_rate: u32,
channels: u32,
#[allow(dead_code)]
total_frames: u64,
/// Temporary decode buffer.
sample_buf: Option<SampleBuffer<f32>>,
}
impl CompressedReader {
/// Open a compressed audio file and prepare for streaming decode.
fn open(path: &Path) -> Result<Self, String> {
let file =
std::fs::File::open(path).map_err(|e| format!("Failed to open file: {}", e))?;
let mss = MediaSourceStream::new(Box::new(file), Default::default());
let mut hint = Hint::new();
if let Some(ext) = path.extension().and_then(|e| e.to_str()) {
hint.with_extension(ext);
}
let probed = symphonia::default::get_probe()
.format(
&hint,
mss,
&FormatOptions::default(),
&MetadataOptions::default(),
)
.map_err(|e| format!("Failed to probe file: {}", e))?;
let format_reader = probed.format;
let track = format_reader
.tracks()
.iter()
.find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL)
.ok_or_else(|| "No audio tracks found".to_string())?;
let track_id = track.id;
let codec_params = &track.codec_params;
let sample_rate = codec_params.sample_rate.unwrap_or(44100);
let channels = codec_params
.channels
.map(|c| c.count())
.unwrap_or(2) as u32;
let total_frames = codec_params.n_frames.unwrap_or(0);
let decoder = symphonia::default::get_codecs()
.make(codec_params, &DecoderOptions::default())
.map_err(|e| format!("Failed to create decoder: {}", e))?;
Ok(Self {
format_reader,
decoder,
track_id,
current_frame: 0,
sample_rate,
channels,
total_frames,
sample_buf: None,
})
}
/// Seek to a specific frame. Returns the actual frame reached (may differ
/// for compressed formats that can only seek to keyframes).
fn seek(&mut self, target_frame: u64) -> Result<u64, String> {
let seek_to = SeekTo::TimeStamp {
ts: target_frame,
track_id: self.track_id,
};
let seeked = self
.format_reader
.seek(SeekMode::Coarse, seek_to)
.map_err(|e| format!("Seek failed: {}", e))?;
let actual_frame = seeked.actual_ts;
self.current_frame = actual_frame;
// Reset the decoder after seeking.
self.decoder.reset();
Ok(actual_frame)
}
/// Decode the next chunk of audio into `out`. Returns the number of frames
/// decoded. Returns `Ok(0)` at end-of-file.
fn decode_next(&mut self, out: &mut Vec<f32>) -> Result<usize, String> {
out.clear();
loop {
let packet = match self.format_reader.next_packet() {
Ok(p) => p,
Err(symphonia::core::errors::Error::IoError(ref e))
if e.kind() == std::io::ErrorKind::UnexpectedEof =>
{
return Ok(0); // EOF
}
Err(e) => return Err(format!("Read packet error: {}", e)),
};
if packet.track_id() != self.track_id {
continue;
}
match self.decoder.decode(&packet) {
Ok(decoded) => {
if self.sample_buf.is_none() {
let spec = *decoded.spec();
let duration = decoded.capacity() as u64;
self.sample_buf = Some(SampleBuffer::new(duration, spec));
}
if let Some(ref mut buf) = self.sample_buf {
buf.copy_interleaved_ref(decoded);
let samples = buf.samples();
out.extend_from_slice(samples);
let frames = samples.len() / self.channels as usize;
self.current_frame += frames as u64;
return Ok(frames);
}
return Ok(0);
}
Err(symphonia::core::errors::Error::DecodeError(_)) => {
continue; // Skip corrupt packets.
}
Err(e) => return Err(format!("Decode error: {}", e)),
}
}
}
}
// ---------------------------------------------------------------------------
// DiskReaderCommand
// ---------------------------------------------------------------------------
/// Commands sent from the engine to the disk reader thread.
pub enum DiskReaderCommand {
/// Start streaming a compressed file for a clip instance.
ActivateFile {
reader_id: u64,
path: PathBuf,
buffer: Arc<ReadAheadBuffer>,
},
/// Stop streaming for a clip instance.
DeactivateFile { reader_id: u64 },
/// The playhead has jumped — refill buffers from the new position.
Seek { frame: u64 },
/// Shut down the disk reader thread.
Shutdown,
}
// ---------------------------------------------------------------------------
// DiskReader
// ---------------------------------------------------------------------------
/// Manages background read-ahead for compressed audio files.
///
/// The engine creates a `DiskReader` at startup. When a compressed file is
/// imported, it sends an `ActivateFile` command. The disk reader opens a
/// Symphonia decoder and starts filling the file's `ReadAheadBuffer` ahead
/// of the shared playhead.
pub struct DiskReader {
/// Channel to send commands to the background thread.
command_tx: rtrb::Producer<DiskReaderCommand>,
/// Shared playhead position (frames). The engine updates this atomically.
#[allow(dead_code)]
playhead_frame: Arc<AtomicU64>,
/// Whether the reader thread is running.
running: Arc<AtomicBool>,
/// Background thread handle.
thread_handle: Option<std::thread::JoinHandle<()>>,
}
impl DiskReader {
/// Create a new disk reader with a background thread.
pub fn new(playhead_frame: Arc<AtomicU64>, _sample_rate: u32) -> Self {
let (command_tx, command_rx) = rtrb::RingBuffer::new(64);
let running = Arc::new(AtomicBool::new(true));
let thread_running = running.clone();
let thread_handle = std::thread::Builder::new()
.name("disk-reader".into())
.spawn(move || {
Self::reader_thread(command_rx, thread_running);
})
.expect("Failed to spawn disk reader thread");
Self {
command_tx,
playhead_frame,
running,
thread_handle: Some(thread_handle),
}
}
/// Send a command to the disk reader thread.
pub fn send(&mut self, cmd: DiskReaderCommand) {
let _ = self.command_tx.push(cmd);
}
/// Create a `ReadAheadBuffer` for a compressed file.
pub fn create_buffer(sample_rate: u32, channels: u32) -> Arc<ReadAheadBuffer> {
Arc::new(ReadAheadBuffer::new(
PREFETCH_SECONDS + 1.0, // extra headroom
sample_rate,
channels,
))
}
/// The disk reader background thread.
fn reader_thread(
mut command_rx: rtrb::Consumer<DiskReaderCommand>,
running: Arc<AtomicBool>,
) {
let mut active_files: HashMap<u64, (CompressedReader, Arc<ReadAheadBuffer>)> =
HashMap::new();
let mut decode_buf = Vec::with_capacity(8192);
while running.load(Ordering::Relaxed) {
// Process commands.
while let Ok(cmd) = command_rx.pop() {
match cmd {
DiskReaderCommand::ActivateFile {
reader_id,
path,
buffer,
} => match CompressedReader::open(&path) {
Ok(reader) => {
eprintln!("[DiskReader] Activated reader={}, ch={}, sr={}, path={:?}",
reader_id, reader.channels, reader.sample_rate, path);
active_files.insert(reader_id, (reader, buffer));
}
Err(e) => {
eprintln!(
"[DiskReader] Failed to open compressed file {:?}: {}",
path, e
);
}
},
DiskReaderCommand::DeactivateFile { reader_id } => {
active_files.remove(&reader_id);
}
DiskReaderCommand::Seek { frame } => {
for (_, (reader, buffer)) in active_files.iter_mut() {
buffer.force_target_frame(frame);
buffer.reset(frame);
if let Err(e) = reader.seek(frame) {
eprintln!("[DiskReader] Seek error: {}", e);
}
}
}
DiskReaderCommand::Shutdown => {
return;
}
}
}
// Fill each active reader's buffer ahead of its target frame.
// Each clip instance has its own buffer and target_frame, set by
// render_from_file during the audio callback.
for (_reader_id, (reader, buffer)) in active_files.iter_mut() {
// Skip files where no clip is currently playing
if !buffer.has_active_target() {
continue;
}
let target = buffer.target_frame();
let buf_start = buffer.start_frame();
let buf_valid = buffer.valid_frames_count();
let buf_end = buf_start + buf_valid;
// If the target has jumped behind or far ahead of the buffer,
// seek the decoder and reset.
if target < buf_start || target > buf_end + reader.sample_rate as u64 {
buffer.reset(target);
let _ = reader.seek(target);
continue;
}
// Advance the buffer start to reclaim space behind the target.
// Keep a small lookback for sinc interpolation (~32 frames).
let lookback = 64u64;
let advance_to = target.saturating_sub(lookback);
if advance_to > buf_start {
buffer.advance_start(advance_to);
}
// Calculate how far ahead we need to fill.
let buf_start = buffer.start_frame();
let buf_valid = buffer.valid_frames_count();
let buf_end = buf_start + buf_valid;
let prefetch_target =
target + (PREFETCH_SECONDS * reader.sample_rate as f64) as u64;
if buf_end >= prefetch_target {
continue; // Already filled far enough ahead.
}
// Decode more data into the buffer.
match reader.decode_next(&mut decode_buf) {
Ok(0) => {} // EOF
Ok(frames) => {
let was_empty = buffer.valid_frames_count() == 0;
buffer.write_samples(&decode_buf, frames);
if was_empty {
eprintln!("[DiskReader] reader={}: first fill, {} frames, buf_start={}, valid={}",
_reader_id, frames, buffer.start_frame(), buffer.valid_frames_count());
}
}
Err(e) => {
eprintln!("[DiskReader] Decode error: {}", e);
}
}
}
// In export mode, skip the sleep so decoding runs at full speed.
// Otherwise sleep briefly to avoid busy-spinning.
let any_exporting = active_files.values().any(|(_, buf)| buf.is_export_mode());
if !any_exporting {
std::thread::sleep(std::time::Duration::from_millis(POLL_INTERVAL_MS));
}
}
}
}
impl Drop for DiskReader {
fn drop(&mut self) {
self.running.store(false, Ordering::Release);
let _ = self.command_tx.push(DiskReaderCommand::Shutdown);
if let Some(handle) = self.thread_handle.take() {
let _ = handle.join();
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,20 @@
use super::buffer_pool::BufferPool;
use super::midi_pool::MidiClipPool;
use super::pool::AudioPool;
use super::project::Project;
use crate::command::AudioEvent;
use std::path::Path;
/// Render chunk size for offline export. Matches the real-time playback buffer size
/// so that MIDI events are processed at the same granularity, avoiding timing jitter.
const EXPORT_CHUNK_FRAMES: usize = 256;
/// Supported export formats
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExportFormat {
Wav,
Flac,
// TODO: Add MP3 support
Mp3,
Aac,
}
impl ExportFormat {
@ -18,6 +23,8 @@ impl ExportFormat {
match self {
ExportFormat::Wav => "wav",
ExportFormat::Flac => "flac",
ExportFormat::Mp3 => "mp3",
ExportFormat::Aac => "m4a",
}
}
}
@ -59,32 +66,86 @@ impl Default for ExportSettings {
///
/// This performs offline rendering, processing the entire timeline
/// in chunks to generate the final audio file.
///
/// If an event producer is provided, progress events will be sent
/// after each chunk with (frames_rendered, total_frames).
pub fn export_audio<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings,
output_path: P,
) -> Result<(), String> {
// Render the project to memory
let samples = render_to_memory(project, pool, midi_pool, settings)?;
// Write to file based on format
match settings.format {
ExportFormat::Wav => write_wav(&samples, settings, output_path)?,
ExportFormat::Flac => write_flac(&samples, settings, output_path)?,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<(), String>
{
// Validate duration
let duration = settings.end_time - settings.start_time;
if duration <= 0.0 {
return Err(format!(
"Export duration is zero or negative (start={:.3}s, end={:.3}s). \
Check that the timeline has content.",
settings.start_time, settings.end_time
));
}
Ok(())
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
if total_frames == 0 {
return Err("Export would produce zero audio frames".to_string());
}
// Reset all node graphs to clear stale effect buffers (echo, reverb, etc.)
project.reset_all_graphs();
// Enable blocking mode on all read-ahead buffers so compressed audio
// streams block until decoded frames are available (instead of returning
// silence when the disk reader hasn't caught up with offline rendering).
project.set_export_mode(true);
// Route to appropriate export implementation based on format.
// Ensure export mode is disabled even if an error occurs.
let result = match settings.format {
ExportFormat::Wav | ExportFormat::Flac => {
let samples = render_to_memory(project, pool, settings, event_tx.as_mut().map(|tx| &mut **tx))?;
// Signal that rendering is done and we're now writing the file
if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportFinalizing);
}
match settings.format {
ExportFormat::Wav => write_wav(&samples, settings, &output_path),
ExportFormat::Flac => write_flac(&samples, settings, &output_path),
_ => unreachable!(),
}
}
ExportFormat::Mp3 => {
export_mp3(project, pool, settings, output_path, event_tx)
}
ExportFormat::Aac => {
export_aac(project, pool, settings, output_path, event_tx)
}
};
// Always disable export mode, even on error
project.set_export_mode(false);
result
}
/// Render the project to memory
fn render_to_memory(
///
/// This function renders the project's audio to an in-memory buffer
/// of interleaved f32 samples. This is useful for custom export formats
/// or for passing audio to external encoders (e.g., FFmpeg for MP3/AAC).
///
/// The returned samples are interleaved (L,R,L,R,... for stereo).
///
/// If an event producer is provided, progress events will be sent
/// after each chunk with (frames_rendered, total_frames).
pub fn render_to_memory(
project: &mut Project,
pool: &AudioPool,
midi_pool: &MidiClipPool,
settings: &ExportSettings,
) -> Result<Vec<f32>, String> {
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<Vec<f32>, String>
{
// Calculate total number of frames
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
@ -93,9 +154,7 @@ fn render_to_memory(
println!("Export: duration={:.3}s, total_frames={}, total_samples={}, channels={}",
duration, total_frames, total_samples, settings.channels);
// Render in chunks to avoid memory issues
const CHUNK_FRAMES: usize = 4096;
let chunk_samples = CHUNK_FRAMES * settings.channels as usize;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
// Create buffer for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
@ -105,7 +164,8 @@ fn render_to_memory(
let mut all_samples = Vec::with_capacity(total_samples);
let mut playhead = settings.start_time;
let chunk_duration = CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
let mut frames_rendered = 0;
// Render the entire timeline in chunks
while playhead < settings.end_time {
@ -116,11 +176,11 @@ fn render_to_memory(
project.render(
&mut render_buffer,
pool,
midi_pool,
&mut buffer_pool,
playhead,
settings.sample_rate,
settings.channels,
false,
);
// Calculate how many samples we actually need from this chunk
@ -138,6 +198,15 @@ fn render_to_memory(
// Append to output
all_samples.extend_from_slice(&render_buffer[..samples_needed]);
// Update progress
frames_rendered += samples_needed / settings.channels as usize;
if let Some(event_tx) = event_tx.as_mut() {
let _ = event_tx.push(AudioEvent::ExportProgress {
frames_rendered,
total_frames,
});
}
playhead += chunk_duration;
}
@ -243,7 +312,480 @@ fn write_flac<P: AsRef<Path>>(
Ok(())
}
// TODO: Add MP3 export support with a better library
/// Export audio as MP3 using FFmpeg (streaming - render and encode simultaneously)
fn export_mp3<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
settings: &ExportSettings,
output_path: P,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<(), String> {
// Initialize FFmpeg
ffmpeg_next::init().map_err(|e| format!("Failed to initialize FFmpeg: {}", e))?;
// Set up FFmpeg encoder
let encoder_codec = ffmpeg_next::encoder::find(ffmpeg_next::codec::Id::MP3)
.ok_or("MP3 encoder (libmp3lame) not found")?;
let mut output = ffmpeg_next::format::output(&output_path)
.map_err(|e| format!("Failed to create output file: {}", e))?;
let mut encoder = ffmpeg_next::codec::Context::new_with_codec(encoder_codec)
.encoder()
.audio()
.map_err(|e| format!("Failed to create encoder: {}", e))?;
let channel_layout = match settings.channels {
1 => ffmpeg_next::channel_layout::ChannelLayout::MONO,
2 => ffmpeg_next::channel_layout::ChannelLayout::STEREO,
_ => return Err(format!("Unsupported channel count: {}", settings.channels)),
};
encoder.set_rate(settings.sample_rate as i32);
encoder.set_channel_layout(channel_layout);
encoder.set_format(ffmpeg_next::format::Sample::I16(ffmpeg_next::format::sample::Type::Planar));
encoder.set_bit_rate((settings.mp3_bitrate * 1000) as usize);
encoder.set_time_base(ffmpeg_next::Rational(1, settings.sample_rate as i32));
let mut encoder = encoder.open_as(encoder_codec)
.map_err(|e| format!("Failed to open MP3 encoder: {}", e))?;
{
let mut stream = output.add_stream(encoder_codec)
.map_err(|e| format!("Failed to add stream: {}", e))?;
stream.set_parameters(&encoder);
}
output.write_header()
.map_err(|e| format!("Failed to write header: {}", e))?;
// Calculate rendering parameters
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
// Create buffers for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
let mut buffer_pool = BufferPool::new(16, chunk_samples);
// Get encoder frame size for proper buffering
let encoder_frame_size = encoder.frame_size() as usize;
let encoder_frame_size = if encoder_frame_size > 0 {
encoder_frame_size
} else {
1152 // Default MP3 frame size
};
// Sample buffer to accumulate samples until we have complete frames
let mut sample_buffer: Vec<f32> = Vec::new();
// PTS (presentation timestamp) tracking for proper timing
let mut pts: i64 = 0;
// Streaming render and encode loop
let mut playhead = settings.start_time;
let mut frames_rendered = 0;
while playhead < settings.end_time {
// Render this chunk
render_buffer.fill(0.0);
project.render(
&mut render_buffer,
pool,
&mut buffer_pool,
playhead,
settings.sample_rate,
settings.channels,
false,
);
// Calculate how many samples we need from this chunk
let remaining_time = settings.end_time - playhead;
let samples_needed = if remaining_time < chunk_duration {
((remaining_time * settings.sample_rate as f64) as usize * settings.channels as usize)
.min(chunk_samples)
} else {
chunk_samples
};
// Add to sample buffer
sample_buffer.extend_from_slice(&render_buffer[..samples_needed]);
// Encode complete frames from buffer
let encoder_frame_samples = encoder_frame_size * settings.channels as usize;
while sample_buffer.len() >= encoder_frame_samples {
// Extract one complete frame
let frame_samples: Vec<f32> = sample_buffer.drain(..encoder_frame_samples).collect();
// Convert to planar i16
let planar_i16 = convert_chunk_to_planar_i16(&frame_samples, settings.channels);
// Encode this frame
encode_complete_frame_mp3(
&mut encoder,
&mut output,
&planar_i16,
encoder_frame_size,
settings.sample_rate,
channel_layout,
pts,
)?;
frames_rendered += encoder_frame_size;
pts += encoder_frame_size as i64;
// Report progress
if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportProgress {
frames_rendered,
total_frames,
});
}
}
playhead += chunk_duration;
}
// Encode any remaining samples as the final frame
if !sample_buffer.is_empty() {
let planar_i16 = convert_chunk_to_planar_i16(&sample_buffer, settings.channels);
let final_frame_size = sample_buffer.len() / settings.channels as usize;
encode_complete_frame_mp3(
&mut encoder,
&mut output,
&planar_i16,
final_frame_size,
settings.sample_rate,
channel_layout,
pts,
)?;
}
// Signal that rendering is done and we're now flushing/finalizing
if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportFinalizing);
}
// Flush encoder
encoder.send_eof()
.map_err(|e| format!("Failed to send EOF: {}", e))?;
receive_and_write_packets(&mut encoder, &mut output)?;
output.write_trailer()
.map_err(|e| format!("Failed to write trailer: {}", e))?;
Ok(())
}
/// Export audio as AAC using FFmpeg (streaming - render and encode simultaneously)
fn export_aac<P: AsRef<Path>>(
project: &mut Project,
pool: &AudioPool,
settings: &ExportSettings,
output_path: P,
mut event_tx: Option<&mut rtrb::Producer<AudioEvent>>,
) -> Result<(), String> {
// Initialize FFmpeg
ffmpeg_next::init().map_err(|e| format!("Failed to initialize FFmpeg: {}", e))?;
// Set up FFmpeg encoder
let encoder_codec = ffmpeg_next::encoder::find(ffmpeg_next::codec::Id::AAC)
.ok_or("AAC encoder not found")?;
let mut output = ffmpeg_next::format::output(&output_path)
.map_err(|e| format!("Failed to create output file: {}", e))?;
let mut encoder = ffmpeg_next::codec::Context::new_with_codec(encoder_codec)
.encoder()
.audio()
.map_err(|e| format!("Failed to create encoder: {}", e))?;
let channel_layout = match settings.channels {
1 => ffmpeg_next::channel_layout::ChannelLayout::MONO,
2 => ffmpeg_next::channel_layout::ChannelLayout::STEREO,
_ => return Err(format!("Unsupported channel count: {}", settings.channels)),
};
encoder.set_rate(settings.sample_rate as i32);
encoder.set_channel_layout(channel_layout);
encoder.set_format(ffmpeg_next::format::Sample::F32(ffmpeg_next::format::sample::Type::Planar));
encoder.set_bit_rate((settings.mp3_bitrate * 1000) as usize);
encoder.set_time_base(ffmpeg_next::Rational(1, settings.sample_rate as i32));
let mut encoder = encoder.open_as(encoder_codec)
.map_err(|e| format!("Failed to open AAC encoder: {}", e))?;
{
let mut stream = output.add_stream(encoder_codec)
.map_err(|e| format!("Failed to add stream: {}", e))?;
stream.set_parameters(&encoder);
}
output.write_header()
.map_err(|e| format!("Failed to write header: {}", e))?;
// Calculate rendering parameters
let duration = settings.end_time - settings.start_time;
let total_frames = (duration * settings.sample_rate as f64).round() as usize;
let chunk_samples = EXPORT_CHUNK_FRAMES * settings.channels as usize;
let chunk_duration = EXPORT_CHUNK_FRAMES as f64 / settings.sample_rate as f64;
// Create buffers for rendering
let mut render_buffer = vec![0.0f32; chunk_samples];
let mut buffer_pool = BufferPool::new(16, chunk_samples);
// Get encoder frame size for proper buffering
let encoder_frame_size = encoder.frame_size() as usize;
let encoder_frame_size = if encoder_frame_size > 0 {
encoder_frame_size
} else {
1024 // Default AAC frame size
};
// Sample buffer to accumulate samples until we have complete frames
let mut sample_buffer: Vec<f32> = Vec::new();
// PTS (presentation timestamp) tracking for proper timing
let mut pts: i64 = 0;
// Streaming render and encode loop
let mut playhead = settings.start_time;
let mut frames_rendered = 0;
while playhead < settings.end_time {
// Render this chunk
render_buffer.fill(0.0);
project.render(
&mut render_buffer,
pool,
&mut buffer_pool,
playhead,
settings.sample_rate,
settings.channels,
false,
);
// Calculate how many samples we need from this chunk
let remaining_time = settings.end_time - playhead;
let samples_needed = if remaining_time < chunk_duration {
((remaining_time * settings.sample_rate as f64) as usize * settings.channels as usize)
.min(chunk_samples)
} else {
chunk_samples
};
// Add to sample buffer
sample_buffer.extend_from_slice(&render_buffer[..samples_needed]);
// Encode complete frames from buffer
let encoder_frame_samples = encoder_frame_size * settings.channels as usize;
while sample_buffer.len() >= encoder_frame_samples {
// Extract one complete frame
let frame_samples: Vec<f32> = sample_buffer.drain(..encoder_frame_samples).collect();
// Convert to planar f32
let planar_f32 = convert_chunk_to_planar_f32(&frame_samples, settings.channels);
// Encode this frame
encode_complete_frame_aac(
&mut encoder,
&mut output,
&planar_f32,
encoder_frame_size,
settings.sample_rate,
channel_layout,
pts,
)?;
frames_rendered += encoder_frame_size;
pts += encoder_frame_size as i64;
// Report progress
if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportProgress {
frames_rendered,
total_frames,
});
}
}
playhead += chunk_duration;
}
// Encode any remaining samples as the final frame
if !sample_buffer.is_empty() {
let planar_f32 = convert_chunk_to_planar_f32(&sample_buffer, settings.channels);
let final_frame_size = sample_buffer.len() / settings.channels as usize;
encode_complete_frame_aac(
&mut encoder,
&mut output,
&planar_f32,
final_frame_size,
settings.sample_rate,
channel_layout,
pts,
)?;
}
// Signal that rendering is done and we're now flushing/finalizing
if let Some(ref mut tx) = event_tx {
let _ = tx.push(AudioEvent::ExportFinalizing);
}
// Flush encoder
encoder.send_eof()
.map_err(|e| format!("Failed to send EOF: {}", e))?;
receive_and_write_packets(&mut encoder, &mut output)?;
output.write_trailer()
.map_err(|e| format!("Failed to write trailer: {}", e))?;
Ok(())
}
/// Convert a chunk of interleaved f32 samples to planar i16 format
fn convert_chunk_to_planar_i16(interleaved: &[f32], channels: u32) -> Vec<Vec<i16>> {
let num_frames = interleaved.len() / channels as usize;
let mut planar = vec![vec![0i16; num_frames]; channels as usize];
for (i, chunk) in interleaved.chunks(channels as usize).enumerate() {
for (ch, &sample) in chunk.iter().enumerate() {
let clamped = sample.max(-1.0).min(1.0);
planar[ch][i] = (clamped * 32767.0) as i16;
}
}
planar
}
/// Convert a chunk of interleaved f32 samples to planar f32 format
fn convert_chunk_to_planar_f32(interleaved: &[f32], channels: u32) -> Vec<Vec<f32>> {
let num_frames = interleaved.len() / channels as usize;
let mut planar = vec![vec![0.0f32; num_frames]; channels as usize];
for (i, chunk) in interleaved.chunks(channels as usize).enumerate() {
for (ch, &sample) in chunk.iter().enumerate() {
planar[ch][i] = sample;
}
}
planar
}
/// Encode a single complete frame of planar i16 samples to MP3
fn encode_complete_frame_mp3(
encoder: &mut ffmpeg_next::encoder::Audio,
output: &mut ffmpeg_next::format::context::Output,
planar_samples: &[Vec<i16>],
num_frames: usize,
sample_rate: u32,
channel_layout: ffmpeg_next::channel_layout::ChannelLayout,
pts: i64,
) -> Result<(), String> {
if num_frames == 0 {
return Ok(());
}
let channels = planar_samples.len();
// Create audio frame
let mut frame = ffmpeg_next::frame::Audio::new(
ffmpeg_next::format::Sample::I16(ffmpeg_next::format::sample::Type::Planar),
num_frames,
channel_layout,
);
frame.set_rate(sample_rate);
frame.set_pts(Some(pts));
// Verify frame was allocated (check linesize[0] via planes())
if frame.planes() == 0 {
return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string());
}
// Copy all planar samples to frame
// Use plane_mut::<i16> instead of data_mut — data_mut(ch) is buggy for planar audio:
// FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0.
// plane_mut uses self.samples() for the length, which is correct for all planes.
for ch in 0..channels {
let plane = frame.plane_mut::<i16>(ch);
plane.copy_from_slice(&planar_samples[ch]);
}
encoder.send_frame(&frame)
.map_err(|e| format!("Failed to send frame: {}", e))?;
receive_and_write_packets(encoder, output)?;
Ok(())
}
/// Encode a single complete frame of planar f32 samples to AAC
fn encode_complete_frame_aac(
encoder: &mut ffmpeg_next::encoder::Audio,
output: &mut ffmpeg_next::format::context::Output,
planar_samples: &[Vec<f32>],
num_frames: usize,
sample_rate: u32,
channel_layout: ffmpeg_next::channel_layout::ChannelLayout,
pts: i64,
) -> Result<(), String> {
if num_frames == 0 {
return Ok(());
}
let channels = planar_samples.len();
// Create audio frame
let mut frame = ffmpeg_next::frame::Audio::new(
ffmpeg_next::format::Sample::F32(ffmpeg_next::format::sample::Type::Planar),
num_frames,
channel_layout,
);
frame.set_rate(sample_rate);
frame.set_pts(Some(pts));
// Verify frame was allocated
if frame.planes() == 0 {
return Err("FFmpeg failed to allocate audio frame. Try exporting as WAV instead.".to_string());
}
// Copy all planar samples to frame
// Use plane_mut::<f32> instead of data_mut — data_mut(ch) is buggy for planar audio:
// FFmpeg only sets linesize[0], so data_mut returns 0-length slices for ch > 0.
// plane_mut uses self.samples() for the length, which is correct for all planes.
for ch in 0..channels {
let plane = frame.plane_mut::<f32>(ch);
plane.copy_from_slice(&planar_samples[ch]);
}
encoder.send_frame(&frame)
.map_err(|e| format!("Failed to send frame: {}", e))?;
receive_and_write_packets(encoder, output)?;
Ok(())
}
/// Receive encoded packets and write to output
fn receive_and_write_packets(
encoder: &mut ffmpeg_next::encoder::Audio,
output: &mut ffmpeg_next::format::context::Output,
) -> Result<(), String> {
let mut encoded = ffmpeg_next::Packet::empty();
while encoder.receive_packet(&mut encoded).is_ok() {
encoded.set_stream(0);
encoded.write_interleaved(output)
.map_err(|e| format!("Failed to write packet: {}", e))?;
}
Ok(())
}
#[cfg(test)]
mod tests {

View File

@ -73,7 +73,7 @@ pub type MidiClipInstanceId = u32;
///
/// This represents the content data stored in the MidiClipPool.
/// Events have timestamps relative to the start of the clip (0.0 = clip beginning).
#[derive(Debug, Clone)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct MidiClip {
pub id: MidiClipId,
pub events: Vec<MidiEvent>,
@ -132,7 +132,7 @@ impl MidiClip {
/// ## Looping
/// If `external_duration` is greater than `internal_end - internal_start`,
/// the instance will seamlessly loop back to `internal_start` when it reaches `internal_end`.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct MidiClipInstance {
pub id: MidiClipInstanceId,
pub clip_id: MidiClipId, // Reference to MidiClip in pool
@ -256,7 +256,8 @@ impl MidiClipInstance {
// Get events from the clip that fall within the internal range
for event in &clip.events {
// Skip events outside the trimmed region
if event.timestamp < self.internal_start || event.timestamp >= self.internal_end {
// Use > (not >=) for internal_end so note-offs at the clip boundary are included
if event.timestamp < self.internal_start || event.timestamp > self.internal_end {
continue;
}
@ -265,9 +266,10 @@ impl MidiClipInstance {
let timeline_time = self.external_start + loop_offset + relative_content_time;
// Check if within current buffer range and instance bounds
// Use <= for external_end so note-offs at the clip boundary are included
if timeline_time >= range_start_seconds
&& timeline_time < range_end_seconds
&& timeline_time < external_end
&& timeline_time <= external_end
{
let mut adjusted_event = *event;
adjusted_event.timestamp = timeline_time;

View File

@ -1,8 +1,10 @@
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
use super::midi::{MidiClip, MidiClipId, MidiEvent};
/// Pool for storing MIDI clip content
/// Similar to AudioClipPool but for MIDI data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MidiClipPool {
clips: HashMap<MidiClipId, MidiClip>,
next_id: MidiClipId,

View File

@ -2,6 +2,7 @@ pub mod automation;
pub mod bpm_detector;
pub mod buffer_pool;
pub mod clip;
pub mod disk_reader;
pub mod engine;
pub mod export;
pub mod metronome;
@ -13,17 +14,19 @@ pub mod project;
pub mod recording;
pub mod sample_loader;
pub mod track;
pub mod waveform_cache;
pub use automation::{AutomationLane, AutomationLaneId, AutomationPoint, CurveType, ParameterId};
pub use buffer_pool::BufferPool;
pub use clip::{AudioClipInstance, AudioClipInstanceId, Clip, ClipId};
pub use engine::{Engine, EngineController};
pub use engine::{AudioClipSnapshot, Engine, EngineController};
pub use export::{export_audio, ExportFormat, ExportSettings};
pub use metronome::Metronome;
pub use midi::{MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent};
pub use midi_pool::MidiClipPool;
pub use pool::{AudioClipPool, AudioFile as PoolAudioFile, AudioPool};
pub use pool::{AudioClipPool, AudioFile as PoolAudioFile, AudioPool, AudioStorage, PcmSampleFormat};
pub use project::Project;
pub use recording::RecordingState;
pub use sample_loader::{load_audio_file, SampleData};
pub use track::{AudioTrack, Metatrack, MidiTrack, RenderContext, Track, TrackId, TrackNode};
pub use waveform_cache::{ChunkPriority, DetailLevel, WaveformCache};

View File

@ -22,6 +22,16 @@ pub struct GraphNode {
pub midi_output_buffers: Vec<Vec<MidiEvent>>,
}
impl std::fmt::Debug for GraphNode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GraphNode")
.field("node", &"<AudioNode>")
.field("output_buffers_len", &self.output_buffers.len())
.field("midi_output_buffers_len", &self.midi_output_buffers.len())
.finish()
}
}
impl GraphNode {
pub fn new(node: Box<dyn AudioNode>, buffer_size: usize) -> Self {
let outputs = node.outputs();
@ -57,6 +67,7 @@ impl GraphNode {
}
/// Audio processing graph for instruments/effects
#[derive(Debug)]
pub struct AudioGraph {
/// The audio graph (StableGraph allows node removal without index invalidation)
graph: StableGraph<GraphNode, Connection>,
@ -84,6 +95,17 @@ pub struct AudioGraph {
/// Current playback time (for automation nodes)
playback_time: f64,
/// Project tempo (synced from Engine via SetTempo)
bpm: f32,
/// Beats per bar (time signature numerator)
beats_per_bar: u32,
/// Cached topological sort order (invalidated on graph mutation)
topo_cache: Option<Vec<NodeIndex>>,
/// Frontend-only group definitions (stored opaquely for persistence)
frontend_groups: Vec<crate::audio::node_graph::preset::SerializedGroup>,
}
impl AudioGraph {
@ -102,12 +124,23 @@ impl AudioGraph {
midi_input_buffers: (0..16).map(|_| Vec::with_capacity(128)).collect(),
node_positions: std::collections::HashMap::new(),
playback_time: 0.0,
bpm: 120.0,
beats_per_bar: 4,
topo_cache: None,
frontend_groups: Vec::new(),
}
}
/// Set the project tempo and time signature for BeatNodes
pub fn set_tempo(&mut self, bpm: f32, beats_per_bar: u32) {
self.bpm = bpm;
self.beats_per_bar = beats_per_bar;
}
/// Add a node to the graph
pub fn add_node(&mut self, node: Box<dyn AudioNode>) -> NodeIndex {
let graph_node = GraphNode::new(node, self.buffer_size);
self.topo_cache = None;
self.graph.add_node(graph_node)
}
@ -145,8 +178,29 @@ impl AudioGraph {
// Validate the connection
self.validate_connection(from, from_port, to, to_port)?;
// Remove any existing connection to the same input port (replace semantics).
// The frontend UI enforces single-connection inputs, so when a new connection
// targets the same port, the old one should be replaced.
let edges_to_remove: Vec<_> = self.graph.edges_directed(to, petgraph::Direction::Incoming)
.filter(|e| e.weight().to_port == to_port)
.map(|e| e.id())
.collect();
for edge_id in edges_to_remove {
self.graph.remove_edge(edge_id);
}
// Add the edge
self.graph.add_edge(from, to, Connection { from_port, to_port });
self.topo_cache = None;
// Auto-grow MixerNode: always keep one spare port beyond the connected count
let n_incoming = self.graph.edges_directed(to, petgraph::Direction::Incoming).count();
if let Some(graph_node) = self.graph.node_weight_mut(to) {
use crate::audio::node_graph::nodes::MixerNode;
if let Some(mixer) = graph_node.node.as_any_mut().downcast_mut::<MixerNode>() {
mixer.ensure_min_ports(n_incoming + 1);
}
}
Ok(())
}
@ -159,11 +213,24 @@ impl AudioGraph {
to: NodeIndex,
to_port: usize,
) {
// Find and remove the edge
let mut did_remove = false;
if let Some(edge_idx) = self.graph.find_edge(from, to) {
let conn = &self.graph[edge_idx];
if conn.from_port == from_port && conn.to_port == to_port {
self.graph.remove_edge(edge_idx);
self.topo_cache = None;
did_remove = true;
}
}
// Shrink MixerNode back to n_remaining + 1 spare after a disconnect
if did_remove {
let n_remaining = self.graph.edges_directed(to, petgraph::Direction::Incoming).count();
if let Some(graph_node) = self.graph.node_weight_mut(to) {
use crate::audio::node_graph::nodes::MixerNode;
if let Some(mixer) = graph_node.node.as_any_mut().downcast_mut::<MixerNode>() {
mixer.resize(n_remaining + 1);
}
}
}
}
@ -171,6 +238,7 @@ impl AudioGraph {
/// Remove a node from the graph
pub fn remove_node(&mut self, node: NodeIndex) {
self.graph.remove_node(node);
self.topo_cache = None;
// Update MIDI targets
self.midi_targets.retain(|&idx| idx != node);
@ -251,29 +319,19 @@ impl AudioGraph {
// This is tricky with trait objects, so we'll need to use Any
// For now, let's use a different approach - store the node pointer temporarily
// Check node type first
if graph_node.node.node_type() != "VoiceAllocator" {
return Err("Node is not a VoiceAllocator".to_string());
}
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
// Get mutable reference and downcast using raw pointers
let node_ptr = &mut *graph_node.node as *mut dyn AudioNode;
// Add node to template graph
let node_idx = va.template_graph_mut().add_node(node);
let node_id = node_idx.index() as u32;
// SAFETY: We just checked that this is a VoiceAllocator
// This is safe because we know the concrete type
unsafe {
let va_ptr = node_ptr as *mut VoiceAllocatorNode;
let va = &mut *va_ptr;
// Rebuild voice instances from template
va.rebuild_voices();
// Add node to template graph
let node_idx = va.template_graph_mut().add_node(node);
let node_id = node_idx.index() as u32;
// Rebuild voice instances from template
va.rebuild_voices();
return Ok(node_id);
}
return Ok(node_id);
}
Err("VoiceAllocator node not found".to_string())
@ -292,52 +350,150 @@ impl AudioGraph {
// Get the VoiceAllocator node
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Check node type first
if graph_node.node.node_type() != "VoiceAllocator" {
return Err("Node is not a VoiceAllocator".to_string());
}
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
// Get mutable reference and downcast using raw pointers
let node_ptr = &mut *graph_node.node as *mut dyn AudioNode;
// Connect in template graph
let from_idx = NodeIndex::new(from_node as usize);
let to_idx = NodeIndex::new(to_node as usize);
// SAFETY: We just checked that this is a VoiceAllocator
unsafe {
let va_ptr = node_ptr as *mut VoiceAllocatorNode;
let va = &mut *va_ptr;
va.template_graph_mut().connect(from_idx, from_port, to_idx, to_port)
.map_err(|e| format!("{:?}", e))?;
// Connect in template graph
let from_idx = NodeIndex::new(from_node as usize);
let to_idx = NodeIndex::new(to_node as usize);
// Rebuild voice instances from template
va.rebuild_voices();
va.template_graph_mut().connect(from_idx, from_port, to_idx, to_port)
.map_err(|e| format!("{:?}", e))?;
// Rebuild voice instances from template
va.rebuild_voices();
return Ok(());
}
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Disconnect two nodes in a VoiceAllocator's template graph
pub fn disconnect_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
from_node: u32,
from_port: usize,
to_node: u32,
to_port: usize,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let from_idx = NodeIndex::new(from_node as usize);
let to_idx = NodeIndex::new(to_node as usize);
va.template_graph_mut().disconnect(from_idx, from_port, to_idx, to_port);
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Remove a node from a VoiceAllocator's template graph
pub fn remove_node_from_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let node_idx = NodeIndex::new(node_id as usize);
va.template_graph_mut().remove_node(node_idx);
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Set a parameter on a node in a VoiceAllocator's template graph
pub fn set_parameter_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
param_id: u32,
value: f32,
) -> Result<(), String> {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
let va = graph_node.node.as_any_mut()
.downcast_mut::<VoiceAllocatorNode>()
.ok_or_else(|| "Node is not a VoiceAllocator".to_string())?;
let node_idx = NodeIndex::new(node_id as usize);
if let Some(template_node) = va.template_graph_mut().get_graph_node_mut(node_idx) {
template_node.node.set_parameter(param_id, value);
} else {
return Err("Node not found in template".to_string());
}
va.rebuild_voices();
return Ok(());
}
Err("VoiceAllocator node not found".to_string())
}
/// Set the position of a node in a VoiceAllocator's template graph
pub fn set_position_in_voice_allocator_template(
&mut self,
voice_allocator_idx: NodeIndex,
node_id: u32,
x: f32,
y: f32,
) {
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
if let Some(graph_node) = self.graph.node_weight_mut(voice_allocator_idx) {
// Downcast to VoiceAllocatorNode using safe Any trait
if let Some(va) = graph_node.node.as_any_mut().downcast_mut::<VoiceAllocatorNode>() {
let node_idx = NodeIndex::new(node_id as usize);
va.template_graph_mut().set_node_position(node_idx, x, y);
}
}
}
/// Process the graph and produce audio output
pub fn process(&mut self, output_buffer: &mut [f32], midi_events: &[MidiEvent], playback_time: f64) {
// Update playback time
self.playback_time = playback_time;
// Update playback time for all automation nodes before processing
use super::nodes::AutomationInputNode;
// Update playback time for all time-dependent nodes before processing
use super::nodes::{AutomationInputNode, BeatNode};
for node in self.graph.node_weights_mut() {
// Try to downcast to AutomationInputNode and update its playback time
if let Some(auto_node) = node.node.as_any_mut().downcast_mut::<AutomationInputNode>() {
auto_node.set_playback_time(playback_time);
} else if let Some(beat_node) = node.node.as_any_mut().downcast_mut::<BeatNode>() {
beat_node.set_playback_time(playback_time);
beat_node.set_tempo(self.bpm, self.beats_per_bar);
}
}
// Use the requested output buffer size for processing
// process_size is stereo (interleaved L/R), frame_count is mono
let process_size = output_buffer.len();
let frame_count = process_size / 2;
// Clear all output buffers (audio/CV and MIDI)
for node in self.graph.node_weights_mut() {
@ -361,24 +517,44 @@ impl AudioGraph {
}
}
// Topological sort for processing order
let topo = petgraph::algo::toposort(&self.graph, None)
.unwrap_or_else(|_| {
// If there's a cycle (shouldn't happen due to validation), just process in index order
self.graph.node_indices().collect()
});
// Topological sort for processing order (cached, recomputed only on graph mutation)
if self.topo_cache.is_none() {
self.topo_cache = Some(
petgraph::algo::toposort(&self.graph, None)
.unwrap_or_else(|_| {
// If there's a cycle (shouldn't happen due to validation), just process in index order
self.graph.node_indices().collect()
})
);
}
let topo_len = self.topo_cache.as_ref().unwrap().len();
// Process nodes in topological order
for node_idx in topo {
for topo_i in 0..topo_len {
let node_idx = self.topo_cache.as_ref().unwrap()[topo_i];
// Get input port information
let inputs = self.graph[node_idx].node.inputs();
let num_audio_cv_inputs = inputs.iter().filter(|p| p.signal_type != SignalType::Midi).count();
let num_midi_inputs = inputs.iter().filter(|p| p.signal_type == SignalType::Midi).count();
// Collect audio/CV input signal types for correct buffer sizing
let audio_cv_input_types: Vec<SignalType> = inputs.iter()
.filter(|p| p.signal_type != SignalType::Midi)
.map(|p| p.signal_type)
.collect();
// Clear audio/CV input buffers
for i in 0..num_audio_cv_inputs {
if i < self.input_buffers.len() {
self.input_buffers[i].fill(0.0);
// Clear input buffers
// - Audio inputs: fill with 0.0 (silence) when unconnected
// - CV inputs: fill with NaN to indicate "no connection" (allows nodes to use parameter values)
let mut audio_cv_idx = 0;
for port in inputs.iter().filter(|p| p.signal_type != SignalType::Midi) {
if audio_cv_idx < self.input_buffers.len() {
let fill_value = match port.signal_type {
SignalType::Audio => 0.0, // Silence for audio
SignalType::CV => f32::NAN, // Sentinel for CV
SignalType::Midi => unreachable!(), // Already filtered out
};
self.input_buffers[audio_cv_idx].fill(fill_value);
audio_cv_idx += 1;
}
}
@ -389,26 +565,46 @@ impl AudioGraph {
}
}
// Collect inputs from connected nodes
let incoming = self.graph.edges_directed(node_idx, Direction::Incoming).collect::<Vec<_>>();
// Collect edge info into stack array to avoid heap allocation
// (need to collect because we borrow graph immutably for source node data)
const MAX_EDGES: usize = 32;
let mut edge_info: [(NodeIndex, usize, usize); MAX_EDGES] = [(NodeIndex::new(0), 0, 0); MAX_EDGES];
let mut edge_count = 0;
for edge in self.graph.edges_directed(node_idx, Direction::Incoming) {
if edge_count < MAX_EDGES {
edge_info[edge_count] = (edge.source(), edge.weight().from_port, edge.weight().to_port);
edge_count += 1;
}
}
for edge in incoming {
let source_idx = edge.source();
let conn = edge.weight();
for ei in 0..edge_count {
let (source_idx, from_port, to_port) = edge_info[ei];
let source_node = &self.graph[source_idx];
// Determine source port type
if conn.from_port < source_node.node.outputs().len() {
let source_port_type = source_node.node.outputs()[conn.from_port].signal_type;
if from_port < source_node.node.outputs().len() {
let source_port_type = source_node.node.outputs()[from_port].signal_type;
match source_port_type {
SignalType::Audio | SignalType::CV => {
// Map from global port index to audio/CV-only port index
// (input_buffers only contains audio/CV entries, not MIDI)
let audio_cv_port_idx = inputs.iter()
.take(to_port + 1)
.filter(|p| p.signal_type != SignalType::Midi)
.count().saturating_sub(1);
// Copy audio/CV data
if conn.to_port < num_audio_cv_inputs && conn.from_port < source_node.output_buffers.len() {
let source_buffer = &source_node.output_buffers[conn.from_port];
if conn.to_port < self.input_buffers.len() {
for (dst, src) in self.input_buffers[conn.to_port].iter_mut().zip(source_buffer.iter()) {
*dst += src;
if audio_cv_port_idx < num_audio_cv_inputs && from_port < source_node.output_buffers.len() {
let source_buffer = &source_node.output_buffers[from_port];
if audio_cv_port_idx < self.input_buffers.len() {
for (dst, src) in self.input_buffers[audio_cv_port_idx].iter_mut().zip(source_buffer.iter()) {
// If dst is NaN (unconnected), replace it; otherwise add (for mixing)
if dst.is_nan() {
*dst = *src;
} else {
*dst += src;
}
}
}
}
@ -417,12 +613,12 @@ impl AudioGraph {
// Copy MIDI events
// Map from global port index to MIDI-only port index
let midi_port_idx = inputs.iter()
.take(conn.to_port + 1)
.take(to_port + 1)
.filter(|p| p.signal_type == SignalType::Midi)
.count() - 1;
let source_midi_idx = source_node.node.outputs().iter()
.take(conn.from_port + 1)
.take(from_port + 1)
.filter(|p| p.signal_type == SignalType::Midi)
.count() - 1;
@ -436,11 +632,15 @@ impl AudioGraph {
}
}
// Prepare audio/CV input slices
// Prepare audio/CV input slices (Audio=stereo process_size, CV=mono frame_count)
let input_slices: Vec<&[f32]> = (0..num_audio_cv_inputs)
.map(|i| {
if i < self.input_buffers.len() {
&self.input_buffers[i][..process_size.min(self.input_buffers[i].len())]
let slice_size = match audio_cv_input_types.get(i) {
Some(&SignalType::Audio) => process_size,
_ => frame_count,
};
&self.input_buffers[i][..slice_size.min(self.input_buffers[i].len())]
} else {
&[][..]
}
@ -461,34 +661,28 @@ impl AudioGraph {
// Get mutable access to output buffers
let node = &mut self.graph[node_idx];
let outputs = node.node.outputs();
let num_audio_cv_outputs = outputs.iter().filter(|p| p.signal_type != SignalType::Midi).count();
let num_midi_outputs = outputs.iter().filter(|p| p.signal_type == SignalType::Midi).count();
// Collect output signal types for correct buffer sizing
let output_signal_types: Vec<SignalType> = outputs.iter().map(|p| p.signal_type).collect();
// Create mutable slices for audio/CV outputs
let mut output_slices: Vec<&mut [f32]> = Vec::with_capacity(num_audio_cv_outputs);
for i in 0..num_audio_cv_outputs {
if i < node.output_buffers.len() {
// Safety: We need to work around borrowing rules here
// This is safe because each output buffer is independent
let buffer = &mut node.output_buffers[i] as *mut Vec<f32>;
unsafe {
let slice = &mut (&mut *buffer)[..process_size.min((*buffer).len())];
output_slices.push(slice);
}
}
// Create mutable slices for audio/CV outputs (Audio=stereo, CV=mono)
let mut output_slices: Vec<&mut [f32]> = Vec::new();
for (i, buf) in node.output_buffers.iter_mut().enumerate() {
let signal_type = output_signal_types.get(i).copied().unwrap_or(SignalType::CV);
if signal_type == SignalType::Midi { continue; }
let slice_size = match signal_type {
SignalType::Audio => process_size,
_ => frame_count,
};
let len = buf.len();
output_slices.push(&mut buf[..slice_size.min(len)]);
}
// Create mutable references for MIDI outputs
let mut midi_output_refs: Vec<&mut Vec<MidiEvent>> = Vec::with_capacity(num_midi_outputs);
for i in 0..num_midi_outputs {
if i < node.midi_output_buffers.len() {
// Safety: Similar to above
let buffer = &mut node.midi_output_buffers[i] as *mut Vec<MidiEvent>;
unsafe {
midi_output_refs.push(&mut *buffer);
}
}
}
let mut midi_output_refs: Vec<&mut Vec<MidiEvent>> = node.midi_output_buffers
.iter_mut()
.take(num_midi_outputs)
.collect();
// Process the node with both audio/CV and MIDI
node.node.process(&input_slices, &mut output_slices, &midi_input_slices, &mut midi_output_refs, self.sample_rate);
@ -512,6 +706,10 @@ impl AudioGraph {
self.graph.node_weight(idx).map(|n| &*n.node)
}
pub fn get_node_mut(&mut self, idx: NodeIndex) -> Option<&mut (dyn AudioNode + 'static)> {
self.graph.node_weight_mut(idx).map(|n| &mut *n.node)
}
/// Get oscilloscope data from a specific node
pub fn get_oscilloscope_data(&self, idx: NodeIndex, sample_count: usize) -> Option<Vec<f32>> {
self.get_node(idx).and_then(|node| node.get_oscilloscope_data(sample_count))
@ -539,6 +737,50 @@ impl AudioGraph {
self.graph.node_indices()
}
/// Reallocate a node's output buffers to match its current port list.
///
/// Must be called after `SubtrackInputsNode::update_subtracks` changes the port count,
/// since `GraphNode.output_buffers` was allocated at `add_node` time.
pub fn reallocate_node_output_buffers(&mut self, idx: NodeIndex, buffer_size: usize) {
if let Some(graph_node) = self.graph.node_weight_mut(idx) {
let outputs = graph_node.node.outputs();
graph_node.output_buffers.clear();
for port in outputs.iter() {
match port.signal_type {
super::types::SignalType::Audio => graph_node.output_buffers.push(vec![0.0; buffer_size * 2]),
super::types::SignalType::CV => graph_node.output_buffers.push(vec![0.0; buffer_size]),
super::types::SignalType::Midi => graph_node.output_buffers.push(vec![]),
}
}
self.topo_cache = None;
}
}
/// Remove all edges going OUT of a specific output port of a node.
pub fn disconnect_output_port(&mut self, node: NodeIndex, port: usize) {
let edges: Vec<_> = self.graph
.edges_directed(node, petgraph::Direction::Outgoing)
.filter(|e| e.weight().from_port == port)
.map(|e| e.id())
.collect();
for edge_id in edges {
self.graph.remove_edge(edge_id);
}
self.topo_cache = None;
}
/// Remove all edges going INTO a node (all input connections).
pub fn disconnect_all_inputs(&mut self, node: NodeIndex) {
let edges: Vec<_> = self.graph
.edges_directed(node, petgraph::Direction::Incoming)
.map(|e| e.id())
.collect();
for edge_id in edges {
self.graph.remove_edge(edge_id);
}
self.topo_cache = None;
}
/// Get all connections
pub fn connections(&self) -> impl Iterator<Item = (NodeIndex, NodeIndex, &Connection)> + '_ {
self.graph.edge_references().map(|e| (e.source(), e.target(), e.weight()))
@ -596,13 +838,21 @@ impl AudioGraph {
}
}
// Clone frontend groups
new_graph.frontend_groups = self.frontend_groups.clone();
new_graph
}
/// Set frontend-only group definitions (stored opaquely for persistence)
pub fn set_frontend_groups(&mut self, groups: Vec<crate::audio::node_graph::preset::SerializedGroup>) {
self.frontend_groups = groups;
}
/// Serialize the graph to a preset
pub fn to_preset(&self, name: impl Into<String>) -> crate::audio::node_graph::preset::GraphPreset {
use crate::audio::node_graph::preset::{GraphPreset, SerializedConnection, SerializedNode};
use crate::audio::node_graph::nodes::VoiceAllocatorNode;
use crate::audio::node_graph::nodes::{VoiceAllocatorNode, MixerNode, SubtrackInputsNode};
let mut preset = GraphPreset::new(name);
@ -620,15 +870,23 @@ impl AudioGraph {
serialized.set_parameter(param.id, value);
}
// Save port count for dynamic-port nodes so they round-trip correctly
if node.node_type() == "Mixer" {
if let Some(mixer) = node.as_any().downcast_ref::<MixerNode>() {
serialized.num_ports = Some(mixer.num_inputs() as u32);
}
}
if node.node_type() == "SubtrackInputs" {
if let Some(si) = node.as_any().downcast_ref::<SubtrackInputsNode>() {
serialized.num_ports = Some(si.num_subtracks() as u32);
serialized.port_names = si.subtracks().iter().map(|(_, name)| name.clone()).collect();
}
}
// For VoiceAllocator nodes, serialize the template graph
// We need to downcast to access template_graph()
// This is safe because we know the node type
if node.node_type() == "VoiceAllocator" {
// Use Any to downcast
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *const VoiceAllocatorNode;
unsafe {
let va_node = &*node_ptr;
// Downcast using safe Any trait
if let Some(va_node) = node.as_any().downcast_ref::<VoiceAllocatorNode>() {
let template_preset = va_node.template_graph().to_preset("template");
serialized.template_graph = Some(Box::new(template_preset));
}
@ -640,10 +898,8 @@ impl AudioGraph {
use crate::audio::node_graph::preset::{EmbeddedSampleData, SampleData};
use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *const SimpleSamplerNode;
unsafe {
let sampler_node = &*node_ptr;
// Downcast using safe Any trait
if let Some(sampler_node) = node.as_any().downcast_ref::<SimpleSamplerNode>() {
if let Some(sample_path) = sampler_node.get_sample_path() {
// Check file size
let should_embed = std::fs::metadata(sample_path)
@ -687,10 +943,8 @@ impl AudioGraph {
use crate::audio::node_graph::preset::{EmbeddedSampleData, LayerData, SampleData};
use base64::{Engine as _, engine::general_purpose};
let node_ptr = &**node as *const dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *const MultiSamplerNode;
unsafe {
let multi_sampler_node = &*node_ptr;
// Downcast using safe Any trait
if let Some(multi_sampler_node) = node.as_any().downcast_ref::<MultiSamplerNode>() {
let layers_info = multi_sampler_node.get_layers_info();
if !layers_info.is_empty() {
let layers: Vec<LayerData> = layers_info
@ -744,6 +998,25 @@ impl AudioGraph {
}
}
// For Script nodes, serialize the source code
if node.node_type() == "Script" {
use crate::audio::node_graph::nodes::ScriptNode;
if let Some(script_node) = node.as_any().downcast_ref::<ScriptNode>() {
let source = script_node.source_code();
if !source.is_empty() {
serialized.script_source = Some(source.to_string());
}
}
}
// For AmpSim nodes, serialize the model path
if node.node_type() == "AmpSim" {
use crate::audio::node_graph::nodes::AmpSimNode;
if let Some(amp_sim) = node.as_any().downcast_ref::<AmpSimNode>() {
serialized.nam_model_path = amp_sim.model_path().map(|s| s.to_string());
}
}
// Save position if available
if let Some(pos) = self.get_node_position(node_idx) {
serialized.set_position(pos.0, pos.1);
@ -773,6 +1046,9 @@ impl AudioGraph {
// Output node
preset.output_node = self.output_node.map(|idx| idx.index() as u32);
// Frontend groups (stored opaquely)
preset.groups = self.frontend_groups.clone();
preset
}
@ -804,70 +1080,76 @@ impl AudioGraph {
let mut graph = Self::new(sample_rate, buffer_size);
let mut index_map: HashMap<u32, NodeIndex> = HashMap::new();
// Pre-pass: compute required min port count for dynamic-port nodes from the connection list.
// This ensures old presets (without num_ports) still size correctly regardless of
// connection-restoration order.
let mut required_ports: HashMap<u32, usize> = HashMap::new();
for conn in &preset.connections {
let entry = required_ports.entry(conn.to_node).or_insert(0);
*entry = (*entry).max(conn.to_port + 2); // port N + 1 spare
}
// Create all nodes
for serialized_node in &preset.nodes {
// Create the node based on type
let node: Box<dyn crate::audio::node_graph::AudioNode> = match serialized_node.node_type.as_str() {
"Oscillator" => Box::new(OscillatorNode::new("Oscillator")),
"Gain" => Box::new(GainNode::new("Gain")),
"Mixer" => Box::new(MixerNode::new("Mixer")),
"Filter" => Box::new(FilterNode::new("Filter")),
"ADSR" => Box::new(ADSRNode::new("ADSR")),
"LFO" => Box::new(LFONode::new("LFO")),
"NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise")),
"Splitter" => Box::new(SplitterNode::new("Splitter")),
"Pan" => Box::new(PanNode::new("Pan")),
"Quantizer" => Box::new(QuantizerNode::new("Quantizer")),
"Delay" => Box::new(DelayNode::new("Delay")),
"Distortion" => Box::new(DistortionNode::new("Distortion")),
"Reverb" => Box::new(ReverbNode::new("Reverb")),
"Chorus" => Box::new(ChorusNode::new("Chorus")),
"Compressor" => Box::new(CompressorNode::new("Compressor")),
"Constant" => Box::new(ConstantNode::new("Constant")),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower")),
"Limiter" => Box::new(LimiterNode::new("Limiter")),
"Math" => Box::new(MathNode::new("Math")),
"EQ" => Box::new(EQNode::new("EQ")),
"Flanger" => Box::new(FlangerNode::new("Flanger")),
"FMSynth" => Box::new(FMSynthNode::new("FM Synth")),
"Phaser" => Box::new(PhaserNode::new("Phaser")),
"BitCrusher" => Box::new(BitCrusherNode::new("Bit Crusher")),
"Vocoder" => Box::new(VocoderNode::new("Vocoder")),
"RingModulator" => Box::new(RingModulatorNode::new("Ring Modulator")),
"SampleHold" => Box::new(SampleHoldNode::new("Sample & Hold")),
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable")),
"SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler")),
"SlewLimiter" => Box::new(SlewLimiterNode::new("Slew Limiter")),
"MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler")),
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input")),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")),
"AudioInput" => Box::new(AudioInputNode::new("Audio Input")),
"AutomationInput" => Box::new(AutomationInputNode::new("Automation")),
"Oscilloscope" => Box::new(OscilloscopeNode::new("Oscilloscope")),
"TemplateInput" => Box::new(TemplateInputNode::new("Template Input")),
"TemplateOutput" => Box::new(TemplateOutputNode::new("Template Output")),
"VoiceAllocator" => {
let mut va = VoiceAllocatorNode::new("VoiceAllocator", sample_rate, buffer_size);
let mut node = crate::audio::node_graph::nodes::create_node(&serialized_node.node_type, sample_rate, buffer_size)
.ok_or_else(|| format!("Unknown node type: {}", serialized_node.node_type))?;
// If there's a template graph, deserialize and set it
if let Some(ref template_preset) = serialized_node.template_graph {
// Pre-size dynamic-port nodes before graph.add_node() so output buffers are
// allocated at the correct size. num_ports takes priority; fall back to
// connection-count inference so old presets without num_ports still work.
if serialized_node.node_type == "Mixer" {
use crate::audio::node_graph::nodes::MixerNode;
if let Some(mixer) = node.as_any_mut().downcast_mut::<MixerNode>() {
let from_conns = required_ports.get(&serialized_node.id).copied().unwrap_or(1);
let target = serialized_node.num_ports.map(|n| n as usize).unwrap_or(0).max(from_conns).max(1);
mixer.resize(target);
}
}
if serialized_node.node_type == "SubtrackInputs" {
use crate::audio::node_graph::nodes::SubtrackInputsNode;
if let Some(si) = node.as_any_mut().downcast_mut::<SubtrackInputsNode>() {
let from_conns = required_ports.get(&serialized_node.id).copied().unwrap_or(0);
let target = serialized_node.num_ports.map(|n| n as usize).unwrap_or(0).max(from_conns);
if target > 0 {
let subtracks = (0..target)
.map(|i| (0u32, format!("Subtrack {}", i + 1)))
.collect();
si.update_subtracks(subtracks, buffer_size);
}
}
}
// VoiceAllocator needs its template graph deserialized and set
if serialized_node.node_type == "VoiceAllocator" {
if let Some(ref template_preset) = serialized_node.template_graph {
if let Some(va) = node.as_any_mut().downcast_mut::<VoiceAllocatorNode>() {
let template_graph = Self::from_preset(template_preset, sample_rate, buffer_size, preset_base_path)?;
// Set the template graph (we'll need to add this method to VoiceAllocator)
*va.template_graph_mut() = template_graph;
va.rebuild_voices();
}
Box::new(va)
}
"AudioOutput" => Box::new(AudioOutputNode::new("Output")),
_ => return Err(format!("Unknown node type: {}", serialized_node.node_type)),
};
}
let node_idx = graph.add_node(node);
index_map.insert(serialized_node.id, node_idx);
// Set parameters
// Restore script source for Script nodes (must come before parameter setting
// since set_script rebuilds parameters)
if let Some(ref source) = serialized_node.script_source {
if serialized_node.node_type == "Script" {
use crate::audio::node_graph::nodes::ScriptNode;
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
if let Some(script_node) = graph_node.node.as_any_mut().downcast_mut::<ScriptNode>() {
if let Err(e) = script_node.set_script(source) {
eprintln!("Warning: failed to compile script for node {}: {}", serialized_node.id, e);
}
}
}
}
}
// Set parameters (after script compilation so param slots exist)
for (&param_id, &value) in &serialized_node.parameters {
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
graph_node.node.set_parameter(param_id, value);
@ -880,10 +1162,8 @@ impl AudioGraph {
crate::audio::node_graph::preset::SampleData::SimpleSampler { file_path, embedded_data } => {
// Load sample into SimpleSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut SimpleSamplerNode;
unsafe {
let sampler_node = &mut *node_ptr;
// Downcast using safe Any trait
if let Some(sampler_node) = graph_node.node.as_any_mut().downcast_mut::<SimpleSamplerNode>() {
// Try embedded data first, then fall back to file path
if let Some(ref embedded) = embedded_data {
@ -914,10 +1194,8 @@ impl AudioGraph {
crate::audio::node_graph::preset::SampleData::MultiSampler { layers } => {
// Load layers into MultiSampler
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
let node_ptr = &mut *graph_node.node as *mut dyn crate::audio::node_graph::AudioNode;
let node_ptr = node_ptr as *mut MultiSamplerNode;
unsafe {
let multi_sampler_node = &mut *node_ptr;
// Downcast using safe Any trait
if let Some(multi_sampler_node) = graph_node.node.as_any_mut().downcast_mut::<MultiSamplerNode>() {
for layer in layers {
// Try embedded data first, then fall back to file path
if let Some(ref embedded) = layer.embedded_data {
@ -970,6 +1248,30 @@ impl AudioGraph {
}
}
// Restore NAM model for AmpSim nodes
if let Some(ref model_path) = serialized_node.nam_model_path {
if serialized_node.node_type == "AmpSim" {
use crate::audio::node_graph::nodes::AmpSimNode;
eprintln!("[AmpSim] Preset restore: nam_model_path={:?}", model_path);
if let Some(graph_node) = graph.graph.node_weight_mut(node_idx) {
if let Some(amp_sim) = graph_node.node.as_any_mut().downcast_mut::<AmpSimNode>() {
let result = if let Some(bundled_name) = model_path.strip_prefix("bundled:") {
eprintln!("[AmpSim] Preset: loading bundled model {:?}", bundled_name);
amp_sim.load_bundled_model(bundled_name)
} else {
let resolved_path = resolve_sample_path(model_path);
eprintln!("[AmpSim] Preset: loading from file {:?}", resolved_path);
amp_sim.load_model(&resolved_path)
};
match &result {
Ok(()) => eprintln!("[AmpSim] Preset: model loaded successfully"),
Err(e) => eprintln!("[AmpSim] Preset: failed to load NAM model: {}", e),
}
}
}
}
}
// Restore position
graph.set_node_position(node_idx, serialized_node.position.0, serialized_node.position.1);
}
@ -999,6 +1301,9 @@ impl AudioGraph {
}
}
// Restore frontend groups (stored opaquely)
graph.frontend_groups = preset.groups.clone();
Ok(graph)
}
}

View File

@ -5,6 +5,6 @@ pub mod nodes;
pub mod preset;
pub use graph::{Connection, GraphNode, AudioGraph};
pub use node_trait::AudioNode;
pub use preset::{GraphPreset, PresetMetadata, SerializedConnection, SerializedNode};
pub use node_trait::{AudioNode, cv_input_or_default};
pub use preset::{GraphPreset, PresetMetadata, SerializedConnection, SerializedNode, SerializedGroup, SerializedBoundaryConnection};
pub use types::{ConnectionError, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};

View File

@ -77,3 +77,34 @@ pub trait AudioNode: Send {
/// Downcast to `&dyn Any` for type-specific read-only operations
fn as_any(&self) -> &dyn std::any::Any;
}
/// Helper function for CV inputs with optional connections
///
/// Returns the input value if connected (not NaN), otherwise returns the default value.
/// This implements "Blender-style" input behavior where parameters are replaced by
/// connected inputs.
///
/// # Arguments
/// * `inputs` - Input buffer array from process()
/// * `port` - Input port index
/// * `frame` - Current frame index
/// * `default` - Default value to use when input is unconnected
///
/// # Returns
/// The input value if connected, otherwise the default value
#[inline]
pub fn cv_input_or_default(inputs: &[&[f32]], port: usize, frame: usize, default: f32) -> f32 {
if port < inputs.len() && frame < inputs[port].len() {
let value = inputs[port][frame];
if value.is_nan() {
// Unconnected: use default parameter value
default
} else {
// Connected: use input signal
value
}
} else {
// No input buffer: use default
default
}
}

View File

@ -1,10 +1,11 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_ATTACK: u32 = 0;
const PARAM_DECAY: u32 = 1;
const PARAM_SUSTAIN: u32 = 2;
const PARAM_RELEASE: u32 = 3;
const PARAM_CURVE: u32 = 4;
#[derive(Debug, Clone, Copy, PartialEq)]
enum EnvelopeStage {
@ -15,6 +16,19 @@ enum EnvelopeStage {
Release,
}
/// Curve shape for envelope segments
#[derive(Debug, Clone, Copy, PartialEq)]
enum CurveType {
Linear,
Exponential,
}
impl CurveType {
fn from_f32(v: f32) -> Self {
if v >= 0.5 { CurveType::Exponential } else { CurveType::Linear }
}
}
/// ADSR Envelope Generator
/// Outputs a CV signal (0.0-1.0) based on gate input and ADSR parameters
pub struct ADSRNode {
@ -23,8 +37,15 @@ pub struct ADSRNode {
decay: f32, // seconds
sustain: f32, // level (0.0-1.0)
release: f32, // seconds
curve: CurveType,
stage: EnvelopeStage,
level: f32, // current envelope level
/// For exponential curves: the coefficient per sample (computed on stage entry)
exp_coeff: f32,
/// For exponential curves: the base level when the stage started
exp_base: f32,
/// For exponential curves: the target level
exp_target: f32,
gate_was_high: bool,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
@ -48,6 +69,7 @@ impl ADSRNode {
Parameter::new(PARAM_DECAY, "Decay", 0.001, 5.0, 0.1, ParameterUnit::Time),
Parameter::new(PARAM_SUSTAIN, "Sustain", 0.0, 1.0, 0.7, ParameterUnit::Generic),
Parameter::new(PARAM_RELEASE, "Release", 0.001, 5.0, 0.2, ParameterUnit::Time),
Parameter::new(PARAM_CURVE, "Curve", 0.0, 1.0, 0.0, ParameterUnit::Generic),
];
Self {
@ -56,8 +78,12 @@ impl ADSRNode {
decay: 0.1,
sustain: 0.7,
release: 0.2,
curve: CurveType::Linear,
stage: EnvelopeStage::Idle,
level: 0.0,
exp_coeff: 0.0,
exp_base: 0.0,
exp_target: 0.0,
gate_was_high: false,
inputs,
outputs,
@ -89,6 +115,7 @@ impl AudioNode for ADSRNode {
PARAM_DECAY => self.decay = value.clamp(0.001, 5.0),
PARAM_SUSTAIN => self.sustain = value.clamp(0.0, 1.0),
PARAM_RELEASE => self.release = value.clamp(0.001, 5.0),
PARAM_CURVE => self.curve = CurveType::from_f32(value),
_ => {}
}
}
@ -99,6 +126,7 @@ impl AudioNode for ADSRNode {
PARAM_DECAY => self.decay,
PARAM_SUSTAIN => self.sustain,
PARAM_RELEASE => self.release,
PARAM_CURVE => match self.curve { CurveType::Linear => 0.0, CurveType::Exponential => 1.0 },
_ => 0.0,
}
}
@ -122,20 +150,31 @@ impl AudioNode for ADSRNode {
let frames = output.len();
for frame in 0..frames {
// Read gate input (if available)
let gate_high = if !inputs.is_empty() && frame < inputs[0].len() {
inputs[0][frame] > 0.5 // Gate is high if CV > 0.5
} else {
false
};
// Gate input: when unconnected, defaults to 0.0 (off)
let gate_cv = cv_input_or_default(inputs, 0, frame, 0.0);
let gate_high = gate_cv > 0.5;
// Detect gate transitions
if gate_high && !self.gate_was_high {
// Note on: Start attack
self.stage = EnvelopeStage::Attack;
if self.curve == CurveType::Exponential {
// For exponential attack, compute coefficient for ~5 time constants
// We overshoot the target slightly so the curve reaches 1.0 naturally
let samples = self.attack * sample_rate_f32;
self.exp_coeff = (-5.0 / samples).exp();
self.exp_base = self.level;
self.exp_target = 1.0;
}
} else if !gate_high && self.gate_was_high {
// Note off: Start release
self.stage = EnvelopeStage::Release;
if self.curve == CurveType::Exponential {
let samples = self.release * sample_rate_f32;
self.exp_coeff = (-5.0 / samples).exp();
self.exp_base = self.level;
self.exp_target = 0.0;
}
}
self.gate_was_high = gate_high;
@ -145,22 +184,51 @@ impl AudioNode for ADSRNode {
self.level = 0.0;
}
EnvelopeStage::Attack => {
// Rise from current level to 1.0
let increment = 1.0 / (self.attack * sample_rate_f32);
self.level += increment;
if self.level >= 1.0 {
self.level = 1.0;
self.stage = EnvelopeStage::Decay;
match self.curve {
CurveType::Linear => {
let increment = 1.0 / (self.attack * sample_rate_f32);
self.level += increment;
if self.level >= 1.0 {
self.level = 1.0;
self.stage = EnvelopeStage::Decay;
}
}
CurveType::Exponential => {
// Asymptotic approach: level moves toward overshoot target
// Using target of 1.0 + small overshoot so we actually reach 1.0
let overshoot_target = 1.0 + (1.0 - self.exp_base) * 0.01;
self.level = overshoot_target - (overshoot_target - self.level) * self.exp_coeff;
if self.level >= 1.0 {
self.level = 1.0;
self.stage = EnvelopeStage::Decay;
// Set up decay exponential
let samples = self.decay * sample_rate_f32;
self.exp_coeff = (-5.0 / samples).exp();
self.exp_base = 1.0;
self.exp_target = self.sustain;
}
}
}
}
EnvelopeStage::Decay => {
// Fall from 1.0 to sustain level
let target = self.sustain;
let decrement = (1.0 - target) / (self.decay * sample_rate_f32);
self.level -= decrement;
if self.level <= target {
self.level = target;
self.stage = EnvelopeStage::Sustain;
match self.curve {
CurveType::Linear => {
let decrement = (1.0 - target) / (self.decay * sample_rate_f32);
self.level -= decrement;
if self.level <= target {
self.level = target;
self.stage = EnvelopeStage::Sustain;
}
}
CurveType::Exponential => {
// Exponential decay toward sustain level
self.level = target + (self.level - target) * self.exp_coeff;
if (self.level - target).abs() < 0.001 {
self.level = target;
self.stage = EnvelopeStage::Sustain;
}
}
}
}
EnvelopeStage::Sustain => {
@ -168,12 +236,23 @@ impl AudioNode for ADSRNode {
self.level = self.sustain;
}
EnvelopeStage::Release => {
// Fall from current level to 0.0
let decrement = self.level / (self.release * sample_rate_f32);
self.level -= decrement;
if self.level <= 0.001 {
self.level = 0.0;
self.stage = EnvelopeStage::Idle;
match self.curve {
CurveType::Linear => {
let decrement = self.level / (self.release * sample_rate_f32);
self.level -= decrement;
if self.level <= 0.001 {
self.level = 0.0;
self.stage = EnvelopeStage::Idle;
}
}
CurveType::Exponential => {
// Exponential decay toward 0
self.level *= self.exp_coeff;
if self.level <= 0.001 {
self.level = 0.0;
self.stage = EnvelopeStage::Idle;
}
}
}
}
}
@ -186,6 +265,9 @@ impl AudioNode for ADSRNode {
fn reset(&mut self) {
self.stage = EnvelopeStage::Idle;
self.level = 0.0;
self.exp_coeff = 0.0;
self.exp_base = 0.0;
self.exp_target = 0.0;
self.gate_was_high = false;
}
@ -204,9 +286,13 @@ impl AudioNode for ADSRNode {
decay: self.decay,
sustain: self.sustain,
release: self.release,
stage: EnvelopeStage::Idle, // Reset state
level: 0.0, // Reset level
gate_was_high: false, // Reset gate
curve: self.curve,
stage: EnvelopeStage::Idle,
level: 0.0,
exp_coeff: 0.0,
exp_base: 0.0,
exp_target: 0.0,
gate_was_high: false,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -0,0 +1,210 @@
use crate::audio::midi::MidiEvent;
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use nam_ffi::NamModel;
use std::path::Path;
const PARAM_INPUT_GAIN: u32 = 0;
const PARAM_OUTPUT_GAIN: u32 = 1;
const PARAM_MIX: u32 = 2;
/// Guitar amp simulator node using Neural Amp Modeler (.nam) models.
pub struct AmpSimNode {
name: String,
input_gain: f32,
output_gain: f32,
mix: f32,
model: Option<NamModel>,
model_path: Option<String>,
// Mono scratch buffers for NAM processing (NAM is mono-only)
mono_in: Vec<f32>,
mono_out: Vec<f32>,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl AmpSimNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![NodePort::new("Audio In", SignalType::Audio, 0)];
let outputs = vec![NodePort::new("Audio Out", SignalType::Audio, 0)];
let parameters = vec![
Parameter::new(PARAM_INPUT_GAIN, "Input Gain", 0.0, 4.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_OUTPUT_GAIN, "Output Gain", 0.0, 4.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_MIX, "Mix", 0.0, 1.0, 1.0, ParameterUnit::Generic),
];
Self {
name,
input_gain: 1.0,
output_gain: 1.0,
mix: 1.0,
model: None,
model_path: None,
mono_in: Vec::new(),
mono_out: Vec::new(),
inputs,
outputs,
parameters,
}
}
/// Load a .nam model file. Call from the audio thread via command dispatch.
pub fn load_model(&mut self, path: &str) -> Result<(), String> {
let model_path = Path::new(path);
let mut model =
NamModel::from_file(model_path).map_err(|e| format!("{}", e))?;
model.set_max_buffer_size(1024);
self.model = Some(model);
self.model_path = Some(path.to_string());
Ok(())
}
/// Load a bundled NAM model by name (e.g. "BossSD1").
pub fn load_bundled_model(&mut self, name: &str) -> Result<(), String> {
let mut model = super::bundled_models::load_bundled_model(name)
.ok_or_else(|| format!("Unknown bundled model: {}", name))??;
model.set_max_buffer_size(1024);
self.model = Some(model);
self.model_path = Some(format!("bundled:{}", name));
Ok(())
}
/// Get the loaded model path (for preset serialization).
pub fn model_path(&self) -> Option<&str> {
self.model_path.as_deref()
}
}
impl AudioNode for AmpSimNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_INPUT_GAIN => self.input_gain = value.clamp(0.0, 4.0),
PARAM_OUTPUT_GAIN => self.output_gain = value.clamp(0.0, 4.0),
PARAM_MIX => self.mix = value.clamp(0.0, 1.0),
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_INPUT_GAIN => self.input_gain,
PARAM_OUTPUT_GAIN => self.output_gain,
PARAM_MIX => self.mix,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
let input = inputs[0];
let output = &mut outputs[0];
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
if let Some(ref mut model) = self.model {
// Ensure scratch buffers are large enough
if self.mono_in.len() < frames_to_process {
self.mono_in.resize(frames_to_process, 0.0);
self.mono_out.resize(frames_to_process, 0.0);
}
// Deinterleave stereo to mono (average L+R) and apply input gain
for frame in 0..frames_to_process {
let left = input[frame * 2];
let right = input[frame * 2 + 1];
self.mono_in[frame] = (left + right) * 0.5 * self.input_gain;
}
// Process through NAM model
model.process(
&self.mono_in[..frames_to_process],
&mut self.mono_out[..frames_to_process],
);
// Apply output gain, mix wet/dry, copy mono back to stereo
for frame in 0..frames_to_process {
let dry = (input[frame * 2] + input[frame * 2 + 1]) * 0.5;
let wet = self.mono_out[frame] * self.output_gain;
let mixed = dry * (1.0 - self.mix) + wet * self.mix;
output[frame * 2] = mixed;
output[frame * 2 + 1] = mixed;
}
} else {
// No model loaded — pass through unchanged
let samples = frames_to_process * 2;
output[..samples].copy_from_slice(&input[..samples]);
}
}
fn reset(&mut self) {
// No persistent filter state to reset
}
fn node_type(&self) -> &str {
"AmpSim"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
// Cannot clone the NAM model (C++ pointer), so clone without model.
// The model will need to be reloaded via command if needed.
Box::new(Self {
name: self.name.clone(),
input_gain: self.input_gain,
output_gain: self.output_gain,
mix: self.mix,
model: None,
model_path: self.model_path.clone(),
mono_in: Vec::new(),
mono_out: Vec::new(),
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -0,0 +1,412 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_MODE: u32 = 0;
const PARAM_DIRECTION: u32 = 1;
const PARAM_OCTAVES: u32 = 2;
const PARAM_RETRIGGER: u32 = 3;
/// ~1ms gate-off for re-triggering at 48kHz
const RETRIGGER_SAMPLES: u32 = 48;
#[derive(Debug, Clone, Copy, PartialEq)]
enum ArpMode {
OnePerCycle = 0,
AllPerCycle = 1,
}
impl ArpMode {
fn from_f32(v: f32) -> Self {
if v.round() as i32 >= 1 { ArpMode::AllPerCycle } else { ArpMode::OnePerCycle }
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum ArpDirection {
Up = 0,
Down = 1,
UpDown = 2,
Random = 3,
}
impl ArpDirection {
fn from_f32(v: f32) -> Self {
match v.round() as i32 {
1 => ArpDirection::Down,
2 => ArpDirection::UpDown,
3 => ArpDirection::Random,
_ => ArpDirection::Up,
}
}
}
/// Arpeggiator node — takes MIDI input (held chord) and a CV phase input,
/// outputs CV V/Oct + Gate stepping through the held notes.
pub struct ArpeggiatorNode {
name: String,
/// Currently held notes: (note, velocity), kept sorted by pitch
held_notes: Vec<(u8, u8)>,
/// Expanded sequence after applying direction + octaves
sequence: Vec<(u8, u8)>,
/// Current position in the sequence (for OnePerCycle mode)
current_step: usize,
/// Previous phase value for wraparound detection
prev_phase: f32,
/// Countdown for gate re-trigger gap
retrigger_countdown: u32,
/// Current output values
current_voct: f32,
current_gate: f32,
/// Parameters
mode: ArpMode,
direction: ArpDirection,
octaves: u32,
retrigger: bool,
/// For Up/Down direction tracking
going_up: bool,
/// Track whether sequence needs rebuilding
sequence_dirty: bool,
/// Stateful PRNG for random direction
rng_state: u32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl ArpeggiatorNode {
pub fn new(name: impl Into<String>) -> Self {
let inputs = vec![
NodePort::new("MIDI In", SignalType::Midi, 0),
NodePort::new("Phase", SignalType::CV, 0),
];
let outputs = vec![
NodePort::new("V/Oct", SignalType::CV, 0),
NodePort::new("Gate", SignalType::CV, 1),
];
let parameters = vec![
Parameter::new(PARAM_MODE, "Mode", 0.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_DIRECTION, "Direction", 0.0, 3.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_OCTAVES, "Octaves", 1.0, 4.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_RETRIGGER, "Retrigger", 0.0, 1.0, 1.0, ParameterUnit::Generic),
];
Self {
name: name.into(),
held_notes: Vec::new(),
sequence: Vec::new(),
current_step: 0,
prev_phase: 0.0,
retrigger_countdown: 0,
current_voct: 0.0,
current_gate: 0.0,
mode: ArpMode::OnePerCycle,
direction: ArpDirection::Up,
octaves: 1,
retrigger: true,
going_up: true,
sequence_dirty: false,
rng_state: 12345,
inputs,
outputs,
parameters,
}
}
fn midi_note_to_voct(note: u8) -> f32 {
(note as f32 - 69.0) / 12.0
}
fn rebuild_sequence(&mut self) {
self.sequence.clear();
if self.held_notes.is_empty() {
return;
}
// Build base sequence sorted by pitch (held_notes is already sorted)
let base: Vec<(u8, u8)> = self.held_notes.clone();
// Expand across octaves
let mut expanded = Vec::new();
for oct in 0..self.octaves {
for &(note, vel) in &base {
let transposed = note.saturating_add((oct * 12) as u8);
if transposed <= 127 {
expanded.push((transposed, vel));
}
}
}
// Apply direction
match self.direction {
ArpDirection::Up => {
self.sequence = expanded;
}
ArpDirection::Down => {
expanded.reverse();
self.sequence = expanded;
}
ArpDirection::UpDown => {
if expanded.len() > 1 {
let mut up_down = expanded.clone();
// Go back down, skipping the top and bottom notes to avoid doubles
for i in (1..expanded.len() - 1).rev() {
up_down.push(expanded[i]);
}
self.sequence = up_down;
} else {
self.sequence = expanded;
}
}
ArpDirection::Random => {
// For random, keep the expanded list; we'll pick randomly in process()
self.sequence = expanded;
}
}
// Clamp current_step to valid range and update V/Oct immediately
if !self.sequence.is_empty() {
self.current_step = self.current_step % self.sequence.len();
let (note, _vel) = self.sequence[self.current_step];
self.current_voct = Self::midi_note_to_voct(note);
} else {
self.current_step = 0;
}
self.sequence_dirty = false;
}
fn advance_step(&mut self) {
if self.sequence.is_empty() {
return;
}
if self.direction == ArpDirection::Random {
// Stateful xorshift32 PRNG — evolves independently of current_step
let mut x = self.rng_state;
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
self.rng_state = x;
// Use upper bits (better distribution) and exclude current note
if self.sequence.len() > 1 {
let pick = ((x >> 16) as usize) % (self.sequence.len() - 1);
self.current_step = if pick >= self.current_step { pick + 1 } else { pick };
}
} else {
self.current_step = (self.current_step + 1) % self.sequence.len();
}
}
fn step_changed(&mut self, new_step: usize) {
let old_step = self.current_step;
self.current_step = new_step;
if !self.sequence.is_empty() {
let (note, _vel) = self.sequence[self.current_step];
self.current_voct = Self::midi_note_to_voct(note);
}
// Start retrigger gap if enabled and the step actually changed
if self.retrigger && old_step != new_step {
self.retrigger_countdown = RETRIGGER_SAMPLES;
}
}
}
impl AudioNode for ArpeggiatorNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_MODE => self.mode = ArpMode::from_f32(value),
PARAM_DIRECTION => {
let new_dir = ArpDirection::from_f32(value);
if new_dir != self.direction {
self.direction = new_dir;
self.going_up = true;
self.sequence_dirty = true;
}
}
PARAM_OCTAVES => {
// UI sends 0-3 (combo box index), map to 1-4 octaves
let new_oct = (value.round() as u32 + 1).clamp(1, 4);
if new_oct != self.octaves {
self.octaves = new_oct;
self.sequence_dirty = true;
}
}
PARAM_RETRIGGER => self.retrigger = value.round() as i32 >= 1,
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_MODE => self.mode as i32 as f32,
PARAM_DIRECTION => self.direction as i32 as f32,
PARAM_OCTAVES => (self.octaves - 1) as f32,
PARAM_RETRIGGER => if self.retrigger { 1.0 } else { 0.0 },
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
// Process incoming MIDI to build held_notes
if !midi_inputs.is_empty() {
for event in midi_inputs[0] {
let status = event.status & 0xF0;
match status {
0x90 if event.data2 > 0 => {
// Note on — add to held notes (sorted by pitch)
let note = event.data1;
let vel = event.data2;
// Remove if already held (avoid duplicates)
self.held_notes.retain(|&(n, _)| n != note);
// Insert sorted by pitch
let pos = self.held_notes.partition_point(|&(n, _)| n < note);
self.held_notes.insert(pos, (note, vel));
self.sequence_dirty = true;
}
0x80 | 0x90 => {
// Note off
let note = event.data1;
self.held_notes.retain(|&(n, _)| n != note);
self.sequence_dirty = true;
}
_ => {}
}
}
}
// Rebuild sequence if needed
if self.sequence_dirty {
self.rebuild_sequence();
}
if outputs.len() < 2 {
return;
}
let len = outputs[0].len();
// If no notes held, output silence
if self.sequence.is_empty() {
for i in 0..len {
outputs[0][i] = self.current_voct;
outputs[1][i] = 0.0;
}
self.current_gate = 0.0;
return;
}
for i in 0..len {
let phase = cv_input_or_default(inputs, 0, i, 0.0).clamp(0.0, 1.0);
match self.mode {
ArpMode::OnePerCycle => {
// Detect phase wraparound (high → low = new cycle)
if self.prev_phase > 0.7 && phase < 0.3 {
self.advance_step();
let step = self.current_step;
self.step_changed(step);
}
}
ArpMode::AllPerCycle => {
// Phase 0→1 maps across all sequence notes
let new_step = ((phase * self.sequence.len() as f32).floor() as usize)
.min(self.sequence.len() - 1);
if new_step != self.current_step {
self.step_changed(new_step);
}
}
}
self.prev_phase = phase;
// Gate: off if retriggering, on otherwise
if self.retrigger_countdown > 0 {
self.retrigger_countdown -= 1;
self.current_gate = 0.0;
} else {
self.current_gate = 1.0;
}
outputs[0][i] = self.current_voct;
outputs[1][i] = self.current_gate;
}
}
fn reset(&mut self) {
self.held_notes.clear();
self.sequence.clear();
self.current_step = 0;
self.prev_phase = 0.0;
self.retrigger_countdown = 0;
self.current_voct = 0.0;
self.current_gate = 0.0;
self.going_up = true;
}
fn node_type(&self) -> &str {
"Arpeggiator"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
held_notes: Vec::new(),
sequence: Vec::new(),
current_step: 0,
prev_phase: 0.0,
retrigger_countdown: 0,
current_voct: 0.0,
current_gate: 0.0,
mode: self.mode,
direction: self.direction,
octaves: self.octaves,
retrigger: self.retrigger,
going_up: true,
sequence_dirty: false,
rng_state: 12345,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -1,16 +1,10 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_ATTACK: u32 = 0;
const PARAM_RELEASE: u32 = 1;
/// Audio to CV converter (Envelope Follower)
/// Converts audio amplitude to control voltage
/// Audio to CV converter
/// Directly converts a stereo audio signal to mono CV (averages L+R channels)
pub struct AudioToCVNode {
name: String,
envelope: f32, // Current envelope value
attack: f32, // Attack time in seconds
release: f32, // Release time in seconds
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
@ -28,19 +22,11 @@ impl AudioToCVNode {
NodePort::new("CV Out", SignalType::CV, 0),
];
let parameters = vec![
Parameter::new(PARAM_ATTACK, "Attack", 0.001, 1.0, 0.01, ParameterUnit::Time),
Parameter::new(PARAM_RELEASE, "Release", 0.001, 1.0, 0.1, ParameterUnit::Time),
];
Self {
name,
envelope: 0.0,
attack: 0.01,
release: 0.1,
inputs,
outputs,
parameters,
parameters: Vec::new(),
}
}
}
@ -62,20 +48,10 @@ impl AudioNode for AudioToCVNode {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_ATTACK => self.attack = value.clamp(0.001, 1.0),
PARAM_RELEASE => self.release = value.clamp(0.001, 1.0),
_ => {}
}
}
fn set_parameter(&mut self, _id: u32, _value: f32) {}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_ATTACK => self.attack,
PARAM_RELEASE => self.release,
_ => 0.0,
}
fn get_parameter(&self, _id: u32) -> f32 {
0.0
}
fn process(
@ -84,7 +60,7 @@ impl AudioNode for AudioToCVNode {
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
_sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
@ -95,39 +71,16 @@ impl AudioNode for AudioToCVNode {
// Audio input is stereo (interleaved L/R), CV output is mono
let audio_frames = input.len() / 2;
let cv_frames = output.len();
let frames = audio_frames.min(cv_frames);
// Calculate attack and release coefficients
let sample_rate_f32 = sample_rate as f32;
let attack_coeff = (-1.0 / (self.attack * sample_rate_f32)).exp();
let release_coeff = (-1.0 / (self.release * sample_rate_f32)).exp();
let frames = audio_frames.min(output.len());
for frame in 0..frames {
// Get stereo samples
let left = input[frame * 2];
let right = input[frame * 2 + 1];
// Calculate RMS-like value (average of absolute values for simplicity)
let amplitude = (left.abs() + right.abs()) / 2.0;
// Envelope follower with attack/release
if amplitude > self.envelope {
// Attack: follow signal up quickly
self.envelope = amplitude * (1.0 - attack_coeff) + self.envelope * attack_coeff;
} else {
// Release: decay slowly
self.envelope = amplitude * (1.0 - release_coeff) + self.envelope * release_coeff;
}
// Output CV (mono)
output[frame] = self.envelope;
output[frame] = (left + right) * 0.5;
}
}
fn reset(&mut self) {
self.envelope = 0.0;
}
fn reset(&mut self) {}
fn node_type(&self) -> &str {
"AudioToCV"
@ -140,9 +93,6 @@ impl AudioNode for AudioToCVNode {
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
envelope: 0.0, // Reset envelope
attack: self.attack,
release: self.release,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -0,0 +1,230 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_RESOLUTION: u32 = 0;
const DEFAULT_BPM: f32 = 120.0;
const DEFAULT_BEATS_PER_BAR: u32 = 4;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BeatResolution {
Whole = 0, // 1/1
Half = 1, // 1/2
Quarter = 2, // 1/4
Eighth = 3, // 1/8
Sixteenth = 4, // 1/16
QuarterT = 5, // 1/4 triplet
EighthT = 6, // 1/8 triplet
}
impl BeatResolution {
fn from_f32(value: f32) -> Self {
match value.round() as i32 {
0 => BeatResolution::Whole,
1 => BeatResolution::Half,
2 => BeatResolution::Quarter,
3 => BeatResolution::Eighth,
4 => BeatResolution::Sixteenth,
5 => BeatResolution::QuarterT,
6 => BeatResolution::EighthT,
_ => BeatResolution::Quarter,
}
}
/// How many subdivisions per quarter note beat
fn subdivisions_per_beat(&self) -> f64 {
match self {
BeatResolution::Whole => 0.25, // 1 per 4 beats
BeatResolution::Half => 0.5, // 1 per 2 beats
BeatResolution::Quarter => 1.0, // 1 per beat
BeatResolution::Eighth => 2.0, // 2 per beat
BeatResolution::Sixteenth => 4.0, // 4 per beat
BeatResolution::QuarterT => 1.5, // 3 per 2 beats (triplet)
BeatResolution::EighthT => 3.0, // 3 per beat (triplet)
}
}
}
/// Beat clock node — generates tempo-synced CV signals.
///
/// BPM and time signature are synced from the project document via SetTempo.
/// When playing: synced to timeline position.
/// When stopped: free-runs continuously at the project BPM.
///
/// Outputs:
/// - BPM: constant CV proportional to tempo (bpm / 240)
/// - Beat Phase: sawtooth 0→1 per beat subdivision
/// - Bar Phase: sawtooth 0→1 per bar (uses project time signature)
/// - Gate: 1.0 for first half of each subdivision, 0.0 otherwise
pub struct BeatNode {
name: String,
bpm: f32,
beats_per_bar: u32,
resolution: BeatResolution,
/// Playback time in seconds, set by the graph before process()
playback_time: f64,
/// Previous playback_time to detect paused state
prev_playback_time: f64,
/// Free-running time accumulator for when playback is stopped
free_run_time: f64,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl BeatNode {
pub fn new(name: impl Into<String>) -> Self {
let inputs = vec![];
let outputs = vec![
NodePort::new("BPM", SignalType::CV, 0),
NodePort::new("Beat Phase", SignalType::CV, 1),
NodePort::new("Bar Phase", SignalType::CV, 2),
NodePort::new("Gate", SignalType::CV, 3),
];
let parameters = vec![
Parameter::new(PARAM_RESOLUTION, "Resolution", 0.0, 6.0, 2.0, ParameterUnit::Generic),
];
Self {
name: name.into(),
bpm: DEFAULT_BPM,
beats_per_bar: DEFAULT_BEATS_PER_BAR,
resolution: BeatResolution::Quarter,
playback_time: 0.0,
prev_playback_time: -1.0,
free_run_time: 0.0,
inputs,
outputs,
parameters,
}
}
pub fn set_playback_time(&mut self, time: f64) {
self.playback_time = time;
}
pub fn set_tempo(&mut self, bpm: f32, beats_per_bar: u32) {
self.bpm = bpm;
self.beats_per_bar = beats_per_bar;
}
}
impl AudioNode for BeatNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_RESOLUTION => self.resolution = BeatResolution::from_f32(value),
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_RESOLUTION => self.resolution as i32 as f32,
_ => 0.0,
}
}
fn process(
&mut self,
_inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.len() < 4 {
return;
}
let bpm_cv = (self.bpm / 240.0).clamp(0.0, 1.0);
let len = outputs[0].len();
let sample_period = 1.0 / sample_rate as f64;
// Detect paused: playback_time hasn't changed since last process()
let paused = self.playback_time == self.prev_playback_time;
self.prev_playback_time = self.playback_time;
let beats_per_second = self.bpm as f64 / 60.0;
let subs_per_beat = self.resolution.subdivisions_per_beat();
// Choose time source: timeline when playing, free-running when stopped
let base_time = if paused { self.free_run_time } else { self.playback_time };
for i in 0..len {
let time = base_time + i as f64 * sample_period;
let beat_pos = time * beats_per_second;
// Beat subdivision phase: 0→1 sawtooth
let sub_phase = ((beat_pos * subs_per_beat) % 1.0) as f32;
// Bar phase: 0→1 over one bar (beats_per_bar beats)
let bar_phase = ((beat_pos / self.beats_per_bar as f64) % 1.0) as f32;
// Gate: high for first half of each subdivision
let gate = if sub_phase < 0.5 { 1.0f32 } else { 0.0 };
outputs[0][i] = bpm_cv;
outputs[1][i] = sub_phase;
outputs[2][i] = bar_phase;
outputs[3][i] = gate;
}
// Advance free-run time (always ticks, so it's ready when playback stops)
self.free_run_time += len as f64 * sample_period;
}
fn reset(&mut self) {
self.playback_time = 0.0;
self.prev_playback_time = -1.0;
self.free_run_time = 0.0;
}
fn node_type(&self) -> &str {
"Beat"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
bpm: self.bpm,
beats_per_bar: self.beats_per_bar,
resolution: self.resolution,
playback_time: 0.0,
prev_playback_time: -1.0,
free_run_time: 0.0,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -0,0 +1,50 @@
use nam_ffi::NamModel;
struct BundledModel {
name: &'static str,
filename: &'static str,
data: &'static [u8],
}
const BUNDLED_MODELS: &[BundledModel] = &[
BundledModel {
name: "BossSD1",
filename: "BossSD1-WaveNet.nam",
data: include_bytes!("../../../../../src/assets/nam_models/BossSD1-WaveNet.nam"),
},
BundledModel {
name: "DeluxeReverb",
filename: "DeluxeReverb.nam",
data: include_bytes!("../../../../../src/assets/nam_models/DeluxeReverb.nam"),
},
BundledModel {
name: "DingwallBass",
filename: "DingwallBass.nam",
data: include_bytes!("../../../../../src/assets/nam_models/DingwallBass.nam"),
},
BundledModel {
name: "Rhythm",
filename: "Rhythm.nam",
data: include_bytes!("../../../../../src/assets/nam_models/Rhythm.nam"),
},
];
/// Return display names of all bundled NAM models.
pub fn bundled_model_names() -> Vec<&'static str> {
BUNDLED_MODELS.iter().map(|m| m.name).collect()
}
/// Load a bundled NAM model by display name.
/// Returns `None` if the name isn't found, `Some(Err(...))` on load failure.
pub fn load_bundled_model(name: &str) -> Option<Result<NamModel, String>> {
eprintln!("[NAM] load_bundled_model: looking up {:?}", name);
let model = BUNDLED_MODELS.iter().find(|m| m.name == name)?;
eprintln!("[NAM] Found bundled model: name={}, filename={}, data_len={}", model.name, model.filename, model.data.len());
Some(
NamModel::from_bytes(model.filename, model.data)
.map_err(|e| {
eprintln!("[NAM] from_bytes failed for {}: {}", model.filename, e);
e.to_string()
}),
)
}

View File

@ -7,8 +7,8 @@ const PARAM_WET_DRY: u32 = 2;
const MAX_DELAY_SECONDS: f32 = 2.0;
/// Stereo delay node with feedback
pub struct DelayNode {
/// Stereo echo node with feedback
pub struct EchoNode {
name: String,
delay_time: f32, // seconds
feedback: f32, // 0.0 to 0.95
@ -26,7 +26,7 @@ pub struct DelayNode {
parameters: Vec<Parameter>,
}
impl DelayNode {
impl EchoNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
@ -79,7 +79,7 @@ impl DelayNode {
}
}
impl AudioNode for DelayNode {
impl AudioNode for EchoNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
@ -185,7 +185,7 @@ impl AudioNode for DelayNode {
}
fn node_type(&self) -> &str {
"Delay"
"Echo"
}
fn name(&self) -> &str {

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use crate::dsp::biquad::BiquadFilter;
@ -29,6 +29,8 @@ pub struct FilterNode {
resonance: f32,
filter_type: FilterType,
sample_rate: u32,
/// Last cutoff frequency applied to filter coefficients (for change detection with CV modulation)
last_applied_cutoff: f32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
@ -62,6 +64,7 @@ impl FilterNode {
resonance: 0.707,
filter_type: FilterType::Lowpass,
sample_rate: 44100,
last_applied_cutoff: 1000.0,
inputs,
outputs,
parameters,
@ -150,10 +153,28 @@ impl AudioNode for FilterNode {
output[..len].copy_from_slice(&input[..len]);
// Check for CV modulation (modulates cutoff)
if inputs.len() > 1 && !inputs[1].is_empty() {
// CV input modulates cutoff frequency
// For now, just use the base cutoff - per-sample modulation would be expensive
// TODO: Sample CV at frame rate and update filter periodically
// CV input (0..1) scales the cutoff: 0 = 20 Hz, 1 = base cutoff * 2
// Sample CV at the start of the buffer - per-sample would be too expensive
let cutoff_cv_raw = cv_input_or_default(inputs, 1, 0, f32::NAN);
let effective_cutoff = if cutoff_cv_raw.is_nan() {
self.cutoff
} else {
// Map CV (0..1) to frequency range around the base cutoff
// 0.5 = base cutoff, 0 = cutoff / 4, 1 = cutoff * 4 (two octaves each way)
let octave_shift = (cutoff_cv_raw.clamp(0.0, 1.0) - 0.5) * 4.0;
self.cutoff * 2.0_f32.powf(octave_shift)
};
if (effective_cutoff - self.last_applied_cutoff).abs() > 0.01 {
let new_cutoff = effective_cutoff.clamp(20.0, 20000.0);
self.last_applied_cutoff = new_cutoff;
match self.filter_type {
FilterType::Lowpass => {
self.filter.set_lowpass(new_cutoff, self.resonance, self.sample_rate as f32);
}
FilterType::Highpass => {
self.filter.set_highpass(new_cutoff, self.resonance, self.sample_rate as f32);
}
}
}
// Apply filter (processes stereo interleaved)
@ -179,10 +200,10 @@ impl AudioNode for FilterNode {
// Set filter to match current type
match self.filter_type {
FilterType::Lowpass => {
new_filter.set_lowpass(self.sample_rate as f32, self.cutoff, self.resonance);
new_filter.set_lowpass(self.cutoff, self.resonance, self.sample_rate as f32);
}
FilterType::Highpass => {
new_filter.set_highpass(self.sample_rate as f32, self.cutoff, self.resonance);
new_filter.set_highpass(self.cutoff, self.resonance, self.sample_rate as f32);
}
}
@ -193,6 +214,7 @@ impl AudioNode for FilterNode {
resonance: self.resonance,
filter_type: self.filter_type,
sample_rate: self.sample_rate,
last_applied_cutoff: self.cutoff,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
@ -256,18 +256,11 @@ impl AudioNode for FMSynthNode {
let frames = output.len() / 2;
for frame in 0..frames {
// Read CV inputs
let voct = if inputs.len() > 0 && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0
};
let gate = if inputs.len() > 1 && !inputs[1].is_empty() {
inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2]
} else {
0.0
};
// Read CV inputs (both are mono signals)
// V/Oct: when unconnected, defaults to 0.0 (A4 440 Hz)
let voct = cv_input_or_default(inputs, 0, frame, 0.0);
// Gate: when unconnected, defaults to 0.0 (off)
let gate = cv_input_or_default(inputs, 1, frame, 0.0);
// Update state
self.current_frequency = Self::voct_to_freq(voct);

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_GAIN: u32 = 0;
@ -90,15 +90,11 @@ impl AudioNode for GainNode {
let frames = input.len().min(output.len()) / 2;
for frame in 0..frames {
// Calculate final gain
let mut final_gain = self.gain;
// CV input acts as a VCA (voltage-controlled amplifier)
// CV ranges from 0.0 (silence) to 1.0 (full gain parameter value)
if inputs.len() > 1 && frame < inputs[1].len() {
let cv = inputs[1][frame];
final_gain *= cv; // Multiply gain by CV (0.0 = silence, 1.0 = full gain)
}
// When unconnected (NaN), defaults to 1.0 (no modulation, use gain parameter as-is)
let cv = cv_input_or_default(inputs, 1, frame, 1.0);
let final_gain = self.gain * cv;
// Apply gain to both channels
output[frame * 2] = input[frame * 2] * final_gain; // Left

View File

@ -1,48 +1,74 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
const PARAM_GAIN_1: u32 = 0;
const PARAM_GAIN_2: u32 = 1;
const PARAM_GAIN_3: u32 = 2;
const PARAM_GAIN_4: u32 = 3;
/// Mixer node - combines multiple audio inputs with independent gain controls
/// Mixer node — combines N audio inputs with independent gain controls.
///
/// The number of input ports is dynamic: one spare unconnected port is always present
/// beyond however many are currently wired, so users can keep patching in without
/// manually adding inputs. Port count is managed by `AudioGraph::connect` /
/// `AudioGraph::disconnect` calling `ensure_min_ports` / `resize`.
///
/// Gain values are stored separately from the port list so they survive resize
/// operations and can be set via `set_parameter` before the port is visible.
pub struct MixerNode {
name: String,
gains: [f32; 4],
/// Displayed input ports. Length = num_ports (connected + 1 spare).
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
/// Per-channel gains, indexed by port. May be longer than `inputs` if gains
/// were set before ports were created (handled gracefully).
gains: Vec<f32>,
/// Mirrored parameter list so `parameters()` stays in sync with `inputs`.
parameters: Vec<Parameter>,
}
impl MixerNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let mut node = Self {
name: name.into(),
inputs: Vec::new(),
outputs: vec![NodePort::new("Mixed Out", SignalType::Audio, 0)],
gains: Vec::new(),
parameters: Vec::new(),
};
node.resize(1); // start with one spare input
node
}
let inputs = vec![
NodePort::new("Input 1", SignalType::Audio, 0),
NodePort::new("Input 2", SignalType::Audio, 1),
NodePort::new("Input 3", SignalType::Audio, 2),
NodePort::new("Input 4", SignalType::Audio, 3),
];
/// Return the current number of input ports (connected + 1 spare).
pub fn num_inputs(&self) -> usize {
self.inputs.len()
}
let outputs = vec![
NodePort::new("Mixed Out", SignalType::Audio, 0),
];
/// Set the exact number of input ports.
///
/// Existing gain values are preserved. Truncates spare gains when shrinking,
/// but gain slots that have already been written survive a grow-shrink-grow cycle.
pub fn resize(&mut self, n: usize) {
let n = n.max(1); // always at least one spare
let parameters = vec![
Parameter::new(PARAM_GAIN_1, "Gain 1", 0.0, 2.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_GAIN_2, "Gain 2", 0.0, 2.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_GAIN_3, "Gain 3", 0.0, 2.0, 1.0, ParameterUnit::Generic),
Parameter::new(PARAM_GAIN_4, "Gain 4", 0.0, 2.0, 1.0, ParameterUnit::Generic),
];
self.inputs = (0..n)
.map(|i| NodePort::new(format!("Input {}", i + 1).as_str(), SignalType::Audio, i))
.collect();
Self {
name,
gains: [1.0, 1.0, 1.0, 1.0],
inputs,
outputs,
parameters,
// Extend gains with 1.0 for new slots; preserve existing values.
if self.gains.len() < n {
self.gains.resize(n, 1.0);
}
self.parameters = (0..n)
.map(|i| {
Parameter::new(i as u32, format!("Gain {}", i + 1).as_str(), 0.0, 2.0, 1.0, ParameterUnit::Generic)
})
.collect();
}
/// Ensure at least `n` input ports exist, growing if needed but never shrinking.
///
/// Called by `AudioGraph::connect` after adding a connection.
pub fn ensure_min_ports(&mut self, n: usize) {
if n > self.inputs.len() {
self.resize(n);
}
}
}
@ -65,23 +91,17 @@ impl AudioNode for MixerNode {
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_GAIN_1 => self.gains[0] = value.clamp(0.0, 2.0),
PARAM_GAIN_2 => self.gains[1] = value.clamp(0.0, 2.0),
PARAM_GAIN_3 => self.gains[2] = value.clamp(0.0, 2.0),
PARAM_GAIN_4 => self.gains[3] = value.clamp(0.0, 2.0),
_ => {}
let idx = id as usize;
// Extend gains if this port hasn't been created yet (e.g. loaded from preset
// before connections are restored).
if idx >= self.gains.len() {
self.gains.resize(idx + 1, 1.0);
}
self.gains[idx] = value.clamp(0.0, 2.0);
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_GAIN_1 => self.gains[0],
PARAM_GAIN_2 => self.gains[1],
PARAM_GAIN_3 => self.gains[2],
PARAM_GAIN_4 => self.gains[3],
_ => 0.0,
}
self.gains.get(id as usize).copied().unwrap_or(1.0)
}
fn process(
@ -97,32 +117,23 @@ impl AudioNode for MixerNode {
}
let output = &mut outputs[0];
// Audio signals are stereo (interleaved L/R)
let frames = output.len() / 2;
// Clear output buffer first
output.fill(0.0);
// Mix each input with its gain
for (input_idx, input) in inputs.iter().enumerate().take(4) {
if input_idx >= self.gains.len() {
break;
}
let gain = self.gains[input_idx];
for (input_idx, input) in inputs.iter().enumerate() {
let gain = self.gains.get(input_idx).copied().unwrap_or(1.0);
let input_frames = input.len() / 2;
let process_frames = frames.min(input_frames);
for frame in 0..process_frames {
output[frame * 2] += input[frame * 2] * gain; // Left
output[frame * 2] += input[frame * 2] * gain; // Left
output[frame * 2 + 1] += input[frame * 2 + 1] * gain; // Right
}
}
}
fn reset(&mut self) {
// No state to reset
// No per-frame state
}
fn node_type(&self) -> &str {
@ -136,9 +147,9 @@ impl AudioNode for MixerNode {
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
gains: self.gains,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
gains: self.gains.clone(),
parameters: self.parameters.clone(),
})
}

View File

@ -1,13 +1,18 @@
mod amp_sim;
pub mod bundled_models;
mod adsr;
mod subtrack_inputs;
mod arpeggiator;
mod audio_input;
mod audio_to_cv;
mod automation_input;
mod beat;
mod bit_crusher;
mod bpm_detector;
mod chorus;
mod compressor;
mod constant;
mod delay;
mod echo;
mod distortion;
mod envelope_follower;
mod eq;
@ -32,24 +37,31 @@ mod quantizer;
mod reverb;
mod ring_modulator;
mod sample_hold;
mod script_node;
mod sequencer;
mod simple_sampler;
mod slew_limiter;
mod splitter;
mod svf;
mod template_io;
mod vibrato;
mod vocoder;
mod voice_allocator;
mod wavetable_oscillator;
pub use amp_sim::AmpSimNode;
pub use adsr::ADSRNode;
pub use arpeggiator::ArpeggiatorNode;
pub use audio_input::AudioInputNode;
pub use audio_to_cv::AudioToCVNode;
pub use automation_input::{AutomationInputNode, AutomationKeyframe, InterpolationType};
pub use beat::BeatNode;
pub use bit_crusher::BitCrusherNode;
pub use bpm_detector::BpmDetectorNode;
pub use chorus::ChorusNode;
pub use compressor::CompressorNode;
pub use constant::ConstantNode;
pub use delay::DelayNode;
pub use echo::EchoNode;
pub use distortion::DistortionNode;
pub use envelope_follower::EnvelopeFollowerNode;
pub use eq::EQNode;
@ -74,10 +86,75 @@ pub use quantizer::QuantizerNode;
pub use reverb::ReverbNode;
pub use ring_modulator::RingModulatorNode;
pub use sample_hold::SampleHoldNode;
pub use script_node::ScriptNode;
pub use sequencer::SequencerNode;
pub use simple_sampler::SimpleSamplerNode;
pub use slew_limiter::SlewLimiterNode;
pub use splitter::SplitterNode;
pub use svf::SVFNode;
pub use template_io::{TemplateInputNode, TemplateOutputNode};
pub use vibrato::VibratoNode;
pub use vocoder::VocoderNode;
pub use voice_allocator::VoiceAllocatorNode;
pub use wavetable_oscillator::WavetableOscillatorNode;
pub use subtrack_inputs::SubtrackInputsNode;
/// Create a node instance by type name string.
///
/// Returns `None` for unknown type names. `sample_rate` and `buffer_size`
/// are only used by VoiceAllocator; other nodes ignore them.
pub fn create_node(node_type: &str, sample_rate: u32, buffer_size: usize) -> Option<Box<dyn super::AudioNode>> {
Some(match node_type {
"Oscillator" => Box::new(OscillatorNode::new("Oscillator")),
"Gain" => Box::new(GainNode::new("Gain")),
"Mixer" => Box::new(MixerNode::new("Mixer")),
"Filter" => Box::new(FilterNode::new("Filter")),
"SVF" => Box::new(SVFNode::new("SVF")),
"ADSR" => Box::new(ADSRNode::new("ADSR")),
"LFO" => Box::new(LFONode::new("LFO")),
"NoiseGenerator" => Box::new(NoiseGeneratorNode::new("Noise")),
"Splitter" => Box::new(SplitterNode::new("Splitter")),
"Pan" => Box::new(PanNode::new("Pan")),
"Quantizer" => Box::new(QuantizerNode::new("Quantizer")),
"Echo" | "Delay" => Box::new(EchoNode::new("Echo")),
"Distortion" => Box::new(DistortionNode::new("Distortion")),
"Reverb" => Box::new(ReverbNode::new("Reverb")),
"Chorus" => Box::new(ChorusNode::new("Chorus")),
"Compressor" => Box::new(CompressorNode::new("Compressor")),
"Constant" => Box::new(ConstantNode::new("Constant")),
"BpmDetector" => Box::new(BpmDetectorNode::new("BPM Detector")),
"Beat" => Box::new(BeatNode::new("Beat")),
"Arpeggiator" => Box::new(ArpeggiatorNode::new("Arpeggiator")),
"Sequencer" => Box::new(SequencerNode::new("Sequencer")),
"Script" => Box::new(ScriptNode::new("Script")),
"EnvelopeFollower" => Box::new(EnvelopeFollowerNode::new("Envelope Follower")),
"Limiter" => Box::new(LimiterNode::new("Limiter")),
"Math" => Box::new(MathNode::new("Math")),
"EQ" => Box::new(EQNode::new("EQ")),
"Flanger" => Box::new(FlangerNode::new("Flanger")),
"FMSynth" => Box::new(FMSynthNode::new("FM Synth")),
"Phaser" => Box::new(PhaserNode::new("Phaser")),
"BitCrusher" => Box::new(BitCrusherNode::new("Bit Crusher")),
"Vocoder" => Box::new(VocoderNode::new("Vocoder")),
"RingModulator" => Box::new(RingModulatorNode::new("Ring Modulator")),
"SampleHold" => Box::new(SampleHoldNode::new("Sample & Hold")),
"WavetableOscillator" => Box::new(WavetableOscillatorNode::new("Wavetable")),
"SimpleSampler" => Box::new(SimpleSamplerNode::new("Sampler")),
"SlewLimiter" => Box::new(SlewLimiterNode::new("Slew Limiter")),
"MultiSampler" => Box::new(MultiSamplerNode::new("Multi Sampler")),
"MidiInput" => Box::new(MidiInputNode::new("MIDI Input")),
"MidiToCV" => Box::new(MidiToCVNode::new("MIDI→CV")),
"AudioToCV" => Box::new(AudioToCVNode::new("Audio→CV")),
"AudioInput" => Box::new(AudioInputNode::new("Audio Input")),
"AutomationInput" => Box::new(AutomationInputNode::new("Automation")),
"Oscilloscope" => Box::new(OscilloscopeNode::new("Oscilloscope")),
"TemplateInput" => Box::new(TemplateInputNode::new("Template Input")),
"TemplateOutput" => Box::new(TemplateOutputNode::new("Template Output")),
"VoiceAllocator" => Box::new(VoiceAllocatorNode::new("VoiceAllocator", sample_rate, buffer_size)),
"Vibrato" => Box::new(VibratoNode::new("Vibrato")),
"AmpSim" => Box::new(AmpSimNode::new("Amp Sim")),
"AudioOutput" => Box::new(AudioOutputNode::new("Output")),
"SubtrackInputs" => Box::new(SubtrackInputsNode::new("Subtrack Inputs", vec![])),
_ => return None,
})
}

View File

@ -231,7 +231,7 @@ impl Voice {
envelope_phase: EnvelopePhase::Attack,
envelope_value: 0.0,
crossfade_buffer: Vec::new(),
crossfade_length: 1000, // ~20ms at 48kHz (longer for smoother loops)
crossfade_length: 4800, // ~100ms at 48kHz — hides loop seams in sustained instruments
}
}
}
@ -458,6 +458,16 @@ impl MultiSamplerNode {
Ok(())
}
/// Remove all layers
pub fn clear_layers(&mut self) {
self.layers.clear();
self.layer_infos.clear();
// Stop all active voices
for voice in &mut self.voices {
voice.is_active = false;
}
}
/// Find the best matching layer for a given note and velocity
fn find_layer(&self, note: u8, velocity: u8) -> Option<usize> {
self.layers

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
@ -124,26 +124,28 @@ impl AudioNode for OscillatorNode {
let frames = output.len() / 2;
for frame in 0..frames {
// Start with base frequency
let mut frequency = self.frequency;
// V/Oct input: Standard V/Oct (0V = A4 440Hz, ±1V per octave)
if !inputs.is_empty() && frame < inputs[0].len() {
let voct = inputs[0][frame]; // Read V/Oct CV (mono)
// Convert V/Oct to frequency: f = 440 * 2^(voct)
// Port 0: V/Oct CV input
// If connected, interprets the CV signal as V/Oct (440 * 2^voct)
// If unconnected, uses self.frequency directly as Hz
let voct = cv_input_or_default(inputs, 0, frame, f32::NAN);
let base_frequency = if voct.is_nan() {
// Unconnected: use frequency parameter directly
self.frequency
} else {
// Connected: convert V/Oct to frequency
// voct = 0.0 -> 440 Hz (A4)
// voct = 1.0 -> 880 Hz (A5)
// voct = -0.75 -> 261.6 Hz (C4, middle C)
frequency = 440.0 * 2.0_f32.powf(voct);
}
440.0 * 2.0_f32.powf(voct)
};
// FM input: modulates the frequency
if inputs.len() > 1 && frame < inputs[1].len() {
let fm = inputs[1][frame]; // Read FM CV (mono)
frequency *= 1.0 + fm;
}
let freq_mod = frequency;
// Port 1: FM CV input
// If connected, applies FM modulation (multiply by 1 + fm)
// If unconnected, no modulation (fm = 0.0)
let fm = cv_input_or_default(inputs, 1, frame, 0.0);
let freq_mod = base_frequency * (1.0 + fm);
// Generate waveform sample based on waveform type
let sample = match self.waveform {

View File

@ -87,8 +87,9 @@ pub struct OscilloscopeNode {
trigger_period: usize, // Period in samples for V/oct triggering
// Shared buffers for reading from Tauri commands
buffer: Arc<Mutex<CircularBuffer>>, // Audio buffer
buffer: Arc<Mutex<CircularBuffer>>, // Audio buffer (mono downmix)
cv_buffer: Arc<Mutex<CircularBuffer>>, // CV buffer
mono_buf: Vec<f32>, // Scratch buffer for stereo-to-mono downmix
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
@ -101,8 +102,7 @@ impl OscilloscopeNode {
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
NodePort::new("V/oct", SignalType::CV, 1),
NodePort::new("CV In", SignalType::CV, 2),
NodePort::new("CV In", SignalType::CV, 1),
];
let outputs = vec![
@ -126,6 +126,7 @@ impl OscilloscopeNode {
trigger_period: 480, // Default to ~100Hz at 48kHz
buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
cv_buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
mono_buf: vec![0.0; 2048],
inputs,
outputs,
parameters,
@ -221,41 +222,50 @@ impl AudioNode for OscilloscopeNode {
let input = inputs[0];
let output = &mut outputs[0];
let len = input.len().min(output.len());
let stereo_len = input.len().min(output.len());
let frame_count = stereo_len / 2;
// Read V/oct input if available and update trigger period
// Read CV input if available (port 1) — used for both display and V/Oct triggering
if inputs.len() > 1 && !inputs[1].is_empty() {
self.voct_value = inputs[1][0]; // Use first sample of V/oct input
let frequency = Self::voct_to_frequency(self.voct_value);
// Calculate period in samples, clamped to reasonable range
let period_samples = (sample_rate as f32 / frequency).max(1.0);
self.trigger_period = period_samples as usize;
let cv_input = inputs[1];
let cv_len = frame_count.min(cv_input.len());
// Check if connected (not NaN sentinel)
if cv_len > 0 && !cv_input[0].is_nan() {
// Update V/Oct trigger period from CV value
self.voct_value = cv_input[0];
let frequency = Self::voct_to_frequency(self.voct_value);
let period_samples = (sample_rate as f32 / frequency).max(1.0);
self.trigger_period = period_samples as usize;
// Capture CV samples to buffer
if let Ok(mut cv_buffer) = self.cv_buffer.lock() {
cv_buffer.write(&cv_input[..cv_len]);
}
}
}
// Update sample counter for V/oct triggering
if self.trigger_mode == TriggerMode::VoltPerOctave {
self.sample_counter = (self.sample_counter + len) % self.trigger_period;
self.sample_counter = (self.sample_counter + frame_count) % self.trigger_period;
}
// Pass through audio (copy input to output)
output[..len].copy_from_slice(&input[..len]);
output[..stereo_len].copy_from_slice(&input[..stereo_len]);
// Capture audio samples to buffer
// Capture audio as mono downmix to match CV time scale
if let Ok(mut buffer) = self.buffer.lock() {
buffer.write(&input[..len]);
}
// Capture CV samples if CV input is connected (input 2)
if inputs.len() > 2 && !inputs[2].is_empty() {
let cv_input = inputs[2];
if let Ok(mut cv_buffer) = self.cv_buffer.lock() {
cv_buffer.write(&cv_input[..len.min(cv_input.len())]);
for frame in 0..frame_count {
let left = input[frame * 2];
let right = input[frame * 2 + 1];
self.mono_buf[frame] = (left + right) * 0.5;
}
buffer.write(&self.mono_buf[..frame_count]);
}
// Update last sample for trigger detection (use left channel, frame 0)
if !input.is_empty() {
self.last_sample = input[0];
// Update last sample for trigger detection
if frame_count > 0 {
self.last_sample = (input[0] + input[1]) * 0.5;
}
}
@ -286,6 +296,7 @@ impl AudioNode for OscilloscopeNode {
trigger_period: 480,
buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
cv_buffer: Arc::new(Mutex::new(CircularBuffer::new(BUFFER_SIZE))),
mono_buf: vec![0.0; 2048],
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
@ -113,18 +113,12 @@ impl AudioNode for PanNode {
let frames_to_process = frames.min(output_frames);
for frame in 0..frames_to_process {
// Get base pan position
let mut pan = self.pan;
// Pan CV input: when connected, replaces parameter; when unconnected, uses parameter
// CV is in 0-1 range, mapped to -1 to +1 pan range
let cv_raw = cv_input_or_default(inputs, 1, frame, (self.pan + 1.0) * 0.5);
let pan = (cv_raw * 2.0 - 1.0).clamp(-1.0, 1.0);
// Add CV modulation if connected
if inputs.len() > 1 && frame < inputs[1].len() {
let cv = inputs[1][frame]; // CV is mono
// CV is 0-1, map to -1 to +1 range
pan += cv * 2.0 - 1.0;
pan = pan.clamp(-1.0, 1.0);
}
// Update gains if pan changed from CV
// Calculate gains using constant-power panning law
let angle = (pan + 1.0) * 0.5 * PI / 2.0;
let left_gain = angle.cos();
let right_gain = angle.sin();

View File

@ -0,0 +1,229 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use beamdsp::{ScriptVM, SampleSlot};
/// A user-scriptable audio node powered by the BeamDSP VM
pub struct ScriptNode {
name: String,
script_name: String,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
category: NodeCategory,
vm: ScriptVM,
source_code: String,
ui_declaration: beamdsp::UiDeclaration,
}
impl ScriptNode {
/// Create a default empty Script node (compiles a passthrough on first script set)
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
// Default: single audio in, single audio out, no params
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
// Create a minimal VM that just halts (no-op)
let vm = ScriptVM::new(
vec![255], // Halt
Vec::new(),
Vec::new(),
0,
&[],
0,
&[],
0,
);
Self {
name,
script_name: "Script".into(),
inputs,
outputs,
parameters: Vec::new(),
category: NodeCategory::Effect,
vm,
source_code: String::new(),
ui_declaration: beamdsp::UiDeclaration { elements: Vec::new() },
}
}
/// Compile and load a new script, replacing the current one.
/// Returns Ok(ui_declaration) on success, or Err(error_message) on failure.
pub fn set_script(&mut self, source: &str) -> Result<beamdsp::UiDeclaration, String> {
let compiled = beamdsp::compile(source).map_err(|e| e.to_string())?;
// Update ports
self.inputs = compiled.input_ports.iter().enumerate().map(|(i, p)| {
let sig = match p.signal {
beamdsp::ast::SignalKind::Audio => SignalType::Audio,
beamdsp::ast::SignalKind::Cv => SignalType::CV,
beamdsp::ast::SignalKind::Midi => SignalType::Midi,
};
NodePort::new(&p.name, sig, i)
}).collect();
self.outputs = compiled.output_ports.iter().enumerate().map(|(i, p)| {
let sig = match p.signal {
beamdsp::ast::SignalKind::Audio => SignalType::Audio,
beamdsp::ast::SignalKind::Cv => SignalType::CV,
beamdsp::ast::SignalKind::Midi => SignalType::Midi,
};
NodePort::new(&p.name, sig, i)
}).collect();
// Update parameters
self.parameters = compiled.parameters.iter().enumerate().map(|(i, p)| {
let unit = if p.unit == "dB" {
ParameterUnit::Decibels
} else if p.unit == "Hz" {
ParameterUnit::Frequency
} else if p.unit == "s" {
ParameterUnit::Time
} else if p.unit == "%" {
ParameterUnit::Percent
} else {
ParameterUnit::Generic
};
Parameter::new(i as u32, &p.name, p.min, p.max, p.default, unit)
}).collect();
// Update category
self.category = match compiled.category {
beamdsp::ast::CategoryKind::Generator => NodeCategory::Generator,
beamdsp::ast::CategoryKind::Effect => NodeCategory::Effect,
beamdsp::ast::CategoryKind::Utility => NodeCategory::Utility,
};
self.script_name = compiled.name.clone();
self.vm = compiled.vm;
self.source_code = compiled.source;
self.ui_declaration = compiled.ui_declaration.clone();
Ok(compiled.ui_declaration)
}
/// Set audio data for a sample slot
pub fn set_sample(&mut self, slot_index: usize, data: Vec<f32>, sample_rate: u32, name: String) {
if slot_index < self.vm.sample_slots.len() {
let frame_count = data.len() / 2;
self.vm.sample_slots[slot_index] = SampleSlot {
data,
frame_count,
sample_rate,
name,
};
}
}
pub fn source_code(&self) -> &str {
&self.source_code
}
pub fn ui_declaration(&self) -> &beamdsp::UiDeclaration {
&self.ui_declaration
}
pub fn sample_slot_names(&self) -> Vec<String> {
self.vm.sample_slots.iter().map(|s| s.name.clone()).collect()
}
}
impl AudioNode for ScriptNode {
fn category(&self) -> NodeCategory {
self.category
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
let idx = id as usize;
let params = self.vm.params_mut();
if idx < params.len() {
params[idx] = value;
}
}
fn get_parameter(&self, id: u32) -> f32 {
let idx = id as usize;
let params = self.vm.params();
if idx < params.len() {
params[idx]
} else {
0.0
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if outputs.is_empty() {
return;
}
// Determine buffer size from output buffer length
let buffer_size = outputs[0].len();
// Execute VM — on error, zero all outputs (fail silent on audio thread)
if let Err(_) = self.vm.execute(inputs, outputs, sample_rate, buffer_size) {
for out in outputs.iter_mut() {
out.fill(0.0);
}
}
}
fn reset(&mut self) {
self.vm.reset_state();
}
fn node_type(&self) -> &str {
"Script"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
let mut cloned = Self {
name: self.name.clone(),
script_name: self.script_name.clone(),
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
category: self.category,
vm: self.vm.clone(),
source_code: self.source_code.clone(),
ui_declaration: self.ui_declaration.clone(),
};
cloned.vm.reset_state();
Box::new(cloned)
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -0,0 +1,307 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_MODE: u32 = 0;
const PARAM_STEPS: u32 = 1;
const PARAM_SCALE_MODE: u32 = 2;
const PARAM_KEY: u32 = 3;
const PARAM_SCALE_TYPE: u32 = 4;
const PARAM_OCTAVE: u32 = 5;
const PARAM_VELOCITY: u32 = 6;
const PARAM_ROW_BASE: u32 = 7;
const NUM_ROWS: usize = 8;
#[derive(Debug, Clone, Copy, PartialEq)]
enum SeqMode {
OnePerCycle = 0,
AllPerCycle = 1,
}
impl SeqMode {
fn from_f32(v: f32) -> Self {
if v.round() as i32 >= 1 { SeqMode::AllPerCycle } else { SeqMode::OnePerCycle }
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum ScaleMode {
Chromatic = 0,
Diatonic = 1,
}
impl ScaleMode {
fn from_f32(v: f32) -> Self {
if v.round() as i32 >= 1 { ScaleMode::Diatonic } else { ScaleMode::Chromatic }
}
}
/// Scale interval patterns (semitones from root)
const SCALES: &[&[u8]] = &[
&[0, 2, 4, 5, 7, 9, 11], // Major
&[0, 2, 3, 5, 7, 8, 10], // Minor
&[0, 2, 3, 5, 7, 9, 10], // Dorian
&[0, 2, 4, 5, 7, 9, 10], // Mixolydian
&[0, 2, 4, 7, 9], // Pentatonic Major
&[0, 3, 5, 7, 10], // Pentatonic Minor
&[0, 3, 5, 6, 7, 10], // Blues
&[0, 2, 3, 5, 7, 8, 11], // Harmonic Minor
];
/// Step Sequencer node — MxN grid of note triggers with CV phase input and MIDI output.
pub struct SequencerNode {
name: String,
/// Grid state: row_patterns[row] is a u16 bitmask (bit N = step N active)
row_patterns: [u16; 16],
num_steps: usize,
/// Scale mapping
scale_mode: ScaleMode,
key: u8,
scale_type: usize,
base_octave: u8,
velocity: u8,
/// Playback state
mode: SeqMode,
current_step: usize,
prev_phase: f32,
/// Notes currently "on" from the previous step
prev_active_notes: Vec<u8>,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl SequencerNode {
pub fn new(name: impl Into<String>) -> Self {
let inputs = vec![
NodePort::new("Phase", SignalType::CV, 0),
];
let outputs = vec![
NodePort::new("MIDI Out", SignalType::Midi, 0),
];
let mut parameters = vec![
Parameter::new(PARAM_MODE, "Mode", 0.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_STEPS, "Steps", 0.0, 2.0, 2.0, ParameterUnit::Generic),
Parameter::new(PARAM_SCALE_MODE, "Scale Mode", 0.0, 1.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_KEY, "Key", 0.0, 11.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_SCALE_TYPE, "Scale", 0.0, 7.0, 0.0, ParameterUnit::Generic),
Parameter::new(PARAM_OCTAVE, "Octave", 0.0, 8.0, 4.0, ParameterUnit::Generic),
Parameter::new(PARAM_VELOCITY, "Velocity", 1.0, 127.0, 100.0, ParameterUnit::Generic),
];
// Row bitmask parameters
for row in 0..16u32 {
parameters.push(Parameter::new(
PARAM_ROW_BASE + row,
"Row",
0.0,
65535.0,
0.0,
ParameterUnit::Generic,
));
}
Self {
name: name.into(),
row_patterns: [0u16; 16],
num_steps: 16,
scale_mode: ScaleMode::Chromatic,
key: 0,
scale_type: 0,
base_octave: 4,
velocity: 100,
mode: SeqMode::OnePerCycle,
current_step: 0,
prev_phase: 0.0,
prev_active_notes: Vec::new(),
inputs,
outputs,
parameters,
}
}
fn steps_from_param(v: f32) -> usize {
match v.round() as i32 {
0 => 4,
1 => 8,
_ => 16,
}
}
fn row_to_midi_note(&self, row: usize) -> u8 {
let base = self.key as u16 + self.base_octave as u16 * 12;
let note = match self.scale_mode {
ScaleMode::Chromatic => base + row as u16,
ScaleMode::Diatonic => {
let scale = SCALES[self.scale_type.min(SCALES.len() - 1)];
let octave_offset = row / scale.len();
let degree = row % scale.len();
base + octave_offset as u16 * 12 + scale[degree] as u16
}
};
(note as u8).min(127)
}
}
impl AudioNode for SequencerNode {
fn category(&self) -> NodeCategory {
NodeCategory::Utility
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_MODE => self.mode = SeqMode::from_f32(value),
PARAM_STEPS => self.num_steps = Self::steps_from_param(value),
PARAM_SCALE_MODE => self.scale_mode = ScaleMode::from_f32(value),
PARAM_KEY => self.key = (value.round() as u8).min(11),
PARAM_SCALE_TYPE => self.scale_type = (value.round() as usize).min(SCALES.len() - 1),
PARAM_OCTAVE => self.base_octave = (value.round() as u8).min(8),
PARAM_VELOCITY => self.velocity = (value.round() as u8).clamp(1, 127),
id if id >= PARAM_ROW_BASE && id < PARAM_ROW_BASE + 16 => {
let row = (id - PARAM_ROW_BASE) as usize;
self.row_patterns[row] = value.round() as u16;
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_MODE => self.mode as i32 as f32,
PARAM_STEPS => match self.num_steps {
4 => 0.0,
8 => 1.0,
_ => 2.0,
},
PARAM_SCALE_MODE => self.scale_mode as i32 as f32,
PARAM_KEY => self.key as f32,
PARAM_SCALE_TYPE => self.scale_type as f32,
PARAM_OCTAVE => self.base_octave as f32,
PARAM_VELOCITY => self.velocity as f32,
id if id >= PARAM_ROW_BASE && id < PARAM_ROW_BASE + 16 => {
let row = (id - PARAM_ROW_BASE) as usize;
self.row_patterns[row] as f32
}
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
_outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
if midi_outputs.is_empty() {
return;
}
let len = if !inputs.is_empty() { inputs[0].len() } else { return };
for i in 0..len {
let phase = cv_input_or_default(inputs, 0, i, 0.0).clamp(0.0, 1.0);
let new_step = match self.mode {
SeqMode::OnePerCycle => {
if self.prev_phase > 0.7 && phase < 0.3 {
(self.current_step + 1) % self.num_steps
} else {
self.current_step
}
}
SeqMode::AllPerCycle => {
((phase * self.num_steps as f32).floor() as usize)
.min(self.num_steps - 1)
}
};
if new_step != self.current_step {
// Compute active notes for the new step
let mut new_notes = Vec::new();
for row in 0..NUM_ROWS {
if self.row_patterns[row] & (1 << new_step) != 0 {
let note = self.row_to_midi_note(row);
new_notes.push(note);
}
}
// Note-off for notes no longer active
for &note in &self.prev_active_notes {
if !new_notes.contains(&note) {
midi_outputs[0].push(MidiEvent::note_off(0.0, 0, note, 0));
}
}
// Note-on for newly active notes
for &note in &new_notes {
if !self.prev_active_notes.contains(&note) {
midi_outputs[0].push(MidiEvent::note_on(0.0, 0, note, self.velocity));
}
}
self.prev_active_notes = new_notes;
self.current_step = new_step;
}
self.prev_phase = phase;
}
}
fn reset(&mut self) {
self.current_step = 0;
self.prev_phase = 0.0;
self.prev_active_notes.clear();
}
fn node_type(&self) -> &str {
"Sequencer"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
row_patterns: self.row_patterns,
num_steps: self.num_steps,
scale_mode: self.scale_mode,
key: self.key,
scale_type: self.scale_type,
base_octave: self.base_octave,
velocity: self.velocity,
mode: self.mode,
current_step: 0,
prev_phase: 0.0,
prev_active_notes: Vec::new(),
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use std::sync::{Arc, Mutex};
@ -25,6 +25,7 @@ pub struct SimpleSamplerNode {
gain: f32,
loop_enabled: bool,
pitch_shift: f32, // Additional pitch shift in semitones
root_note: u8, // MIDI note for original pitch playback (default 69 = A4)
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
@ -61,6 +62,7 @@ impl SimpleSamplerNode {
gain: 1.0,
loop_enabled: false,
pitch_shift: 0.0,
root_note: 69, // A4 — V/Oct 0.0 from MIDI-to-CV
inputs,
outputs,
parameters,
@ -101,13 +103,25 @@ impl SimpleSamplerNode {
}
/// Convert V/oct CV to playback speed multiplier
/// 0V = 1.0 (original speed), +1V = 2.0 (one octave up), -1V = 0.5 (one octave down)
/// Accounts for root_note: when the incoming MIDI note matches root_note,
/// the sample plays at original speed. V/Oct 0.0 = A4 (MIDI 69) by convention.
fn voct_to_speed(&self, voct: f32) -> f32 {
// Add pitch shift parameter
let total_semitones = voct * 12.0 + self.pitch_shift;
// Offset so root_note plays at original speed
let root_offset = (self.root_note as f32 - 69.0) / 12.0;
let total_semitones = (voct - root_offset) * 12.0 + self.pitch_shift;
2.0_f32.powf(total_semitones / 12.0)
}
/// Set the root note (MIDI note number for original-pitch playback)
pub fn set_root_note(&mut self, note: u8) {
self.root_note = note.min(127);
}
/// Get the current root note
pub fn root_note(&self) -> u8 {
self.root_note
}
/// Read sample at playhead with linear interpolation
fn read_sample(&self, playhead: f32, sample: &[f32]) -> f32 {
if sample.is_empty() {
@ -202,18 +216,11 @@ impl AudioNode for SimpleSamplerNode {
let frames = output.len() / 2;
for frame in 0..frames {
// Read CV inputs
let voct = if !inputs.is_empty() && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0 // Default to original pitch
};
let gate = if inputs.len() > 1 && !inputs[1].is_empty() {
inputs[1][frame.min(inputs[1].len() / 2 - 1) * 2]
} else {
0.0
};
// Read CV inputs (both are mono signals)
// V/Oct: when unconnected, defaults to 0.0 (original pitch)
let voct = cv_input_or_default(inputs, 0, frame, 0.0);
// Gate: when unconnected, defaults to 0.0 (off)
let gate = cv_input_or_default(inputs, 1, frame, 0.0);
// Detect gate trigger (rising edge)
let gate_active = gate > 0.5;

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
const PARAM_RISE_TIME: u32 = 0;
@ -90,9 +90,8 @@ impl AudioNode for SlewLimiterNode {
return;
}
let input = inputs[0];
let output = &mut outputs[0];
let length = input.len().min(output.len());
let length = output.len();
// Calculate maximum change per sample
let sample_duration = 1.0 / sample_rate as f32;
@ -111,7 +110,9 @@ impl AudioNode for SlewLimiterNode {
};
for i in 0..length {
let target = input[i];
// Use cv_input_or_default to handle unconnected inputs (NaN sentinel)
// Default to last_value so output holds steady when unconnected
let target = cv_input_or_default(inputs, 0, i, self.last_value);
let difference = target - self.last_value;
let max_change = if difference > 0.0 {

View File

@ -0,0 +1,177 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, SignalType};
use crate::audio::midi::MidiEvent;
use crate::audio::track::TrackId;
/// Subtrack inputs node for metatracks.
///
/// Exposes one output port per child track so users can route individual subtracks
/// independently in the mixing graph (e.g., for sidechain effects).
///
/// Audio is injected into pre-allocated per-slot buffers by the render system before
/// the graph is processed — no heap allocation occurs during audio rendering.
pub struct SubtrackInputsNode {
name: String,
/// Ordered list of (TrackId, display_name) for each subtrack slot.
/// TrackId is used by the render system to match the right buffer to the right slot.
subtracks: Vec<(TrackId, String)>,
/// Output port descriptors — rebuilt whenever subtracks changes.
outputs: Vec<NodePort>,
/// Pre-allocated audio buffers, one per subtrack slot (stereo interleaved, length = buffer_size * 2).
/// Filled by `inject_subtrack_audio` before graph processing; no alloc per frame.
buffers: Vec<Vec<f32>>,
/// The buffer size this node was last sized for.
buffer_size: usize,
}
impl SubtrackInputsNode {
pub fn new(name: impl Into<String>, subtracks: Vec<(TrackId, String)>) -> Self {
let outputs = Self::build_outputs(&subtracks);
let n = subtracks.len();
Self {
name: name.into(),
subtracks,
outputs,
buffers: vec![Vec::new(); n],
buffer_size: 0,
}
}
fn build_outputs(subtracks: &[(TrackId, String)]) -> Vec<NodePort> {
subtracks
.iter()
.enumerate()
.map(|(i, (_, name))| NodePort::new(name.as_str(), SignalType::Audio, i))
.collect()
}
/// Inject audio from a child track into its pre-allocated slot.
///
/// `idx` is the slot index (matching the order in `subtracks`).
/// Called by the render system once per child per frame — no allocation.
pub fn inject_subtrack_audio(&mut self, idx: usize, audio: &[f32]) {
if let Some(buf) = self.buffers.get_mut(idx) {
let len = buf.len().min(audio.len());
buf[..len].copy_from_slice(&audio[..len]);
// Zero any remaining samples if audio is shorter than the buffer
if audio.len() < buf.len() {
buf[audio.len()..].fill(0.0);
}
}
}
/// Rebuild ports and resize pre-allocated buffers.
///
/// Only reallocates when the subtrack list actually changes in size or content.
/// Pass `buffer_size` in frames (stereo buffers will be `buffer_size * 2` samples).
pub fn update_subtracks(&mut self, subtracks: Vec<(TrackId, String)>, buffer_size: usize) {
let n = subtracks.len();
self.outputs = Self::build_outputs(&subtracks);
self.subtracks = subtracks;
self.buffer_size = buffer_size;
// Resize buffers: keep existing allocations where possible
self.buffers.resize_with(n, Vec::new);
for buf in &mut self.buffers {
let target = buffer_size * 2; // stereo interleaved
if buf.len() != target {
buf.resize(target, 0.0);
}
}
}
/// Return the slot index for the given TrackId, or None if not found.
pub fn subtrack_index_for(&self, track_id: TrackId) -> Option<usize> {
self.subtracks.iter().position(|(id, _)| *id == track_id)
}
/// Return the number of subtrack slots.
pub fn num_subtracks(&self) -> usize {
self.subtracks.len()
}
/// Return the ordered subtrack list.
pub fn subtracks(&self) -> &[(TrackId, String)] {
&self.subtracks
}
}
impl AudioNode for SubtrackInputsNode {
fn category(&self) -> NodeCategory {
NodeCategory::Input
}
fn inputs(&self) -> &[NodePort] {
&[] // No inputs — audio is injected externally
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&[] // No user-facing parameters; port count is stored via num_ports in serialization
}
fn set_parameter(&mut self, _id: u32, _value: f32) {}
fn get_parameter(&self, _id: u32) -> f32 {
0.0
}
fn process(
&mut self,
_inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
_sample_rate: u32,
) {
// Copy each pre-filled buffer to its output port
for (i, output) in outputs.iter_mut().enumerate() {
if let Some(buf) = self.buffers.get(i) {
let len = output.len().min(buf.len());
if len > 0 {
output[..len].copy_from_slice(&buf[..len]);
}
if output.len() > len {
output[len..].fill(0.0);
}
} else {
output.fill(0.0);
}
}
}
fn reset(&mut self) {
for buf in &mut self.buffers {
buf.fill(0.0);
}
}
fn node_type(&self) -> &str {
"SubtrackInputs"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
subtracks: self.subtracks.clone(),
outputs: self.outputs.clone(),
// Don't clone audio buffers; fresh node starts silent
buffers: vec![vec![0.0; self.buffer_size * 2]; self.subtracks.len()],
buffer_size: self.buffer_size,
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -0,0 +1,199 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use crate::dsp::svf::SvfFilter;
const PARAM_CUTOFF: u32 = 0;
const PARAM_RESONANCE: u32 = 1;
/// State Variable Filter node — simultaneously outputs lowpass, highpass,
/// bandpass, and notch from one filter, with per-sample CV modulation of
/// cutoff and resonance.
pub struct SVFNode {
name: String,
filter: SvfFilter,
cutoff: f32,
resonance: f32,
sample_rate: u32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl SVFNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
NodePort::new("Cutoff CV", SignalType::CV, 1),
NodePort::new("Resonance CV", SignalType::CV, 2),
];
let outputs = vec![
NodePort::new("Lowpass", SignalType::Audio, 0),
NodePort::new("Highpass", SignalType::Audio, 1),
NodePort::new("Bandpass", SignalType::Audio, 2),
NodePort::new("Notch", SignalType::Audio, 3),
];
let parameters = vec![
Parameter::new(PARAM_CUTOFF, "Cutoff", 20.0, 20000.0, 1000.0, ParameterUnit::Frequency),
Parameter::new(PARAM_RESONANCE, "Resonance", 0.0, 1.0, 0.0, ParameterUnit::Generic),
];
let mut filter = SvfFilter::new();
filter.set_params(1000.0, 0.0, 44100.0);
Self {
name,
filter,
cutoff: 1000.0,
resonance: 0.0,
sample_rate: 44100,
inputs,
outputs,
parameters,
}
}
}
impl AudioNode for SVFNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_CUTOFF => {
self.cutoff = value.clamp(20.0, 20000.0);
self.filter.set_params(self.cutoff, self.resonance, self.sample_rate as f32);
}
PARAM_RESONANCE => {
self.resonance = value.clamp(0.0, 1.0);
self.filter.set_params(self.cutoff, self.resonance, self.sample_rate as f32);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_CUTOFF => self.cutoff,
PARAM_RESONANCE => self.resonance,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if inputs.is_empty() || outputs.len() < 4 {
return;
}
if self.sample_rate != sample_rate {
self.sample_rate = sample_rate;
self.filter.set_params(self.cutoff, self.resonance, sample_rate as f32);
}
let input = inputs[0];
// All 4 outputs are stereo interleaved
let frames = input.len() / 2;
let sr = self.sample_rate as f32;
// Check if CV inputs are connected (sample first frame to detect NaN)
let has_cutoff_cv = !cv_input_or_default(inputs, 1, 0, f32::NAN).is_nan();
let has_resonance_cv = !cv_input_or_default(inputs, 2, 0, f32::NAN).is_nan();
let mut last_cutoff = self.cutoff;
let mut last_resonance = self.resonance;
for frame in 0..frames {
// Update coefficients from CV if connected
if has_cutoff_cv || has_resonance_cv {
let cutoff = if has_cutoff_cv {
let cv = cv_input_or_default(inputs, 1, frame, 0.5);
let octave_shift = (cv.clamp(0.0, 1.0) - 0.5) * 4.0;
(self.cutoff * 2.0_f32.powf(octave_shift)).clamp(20.0, 20000.0)
} else {
self.cutoff
};
let resonance = if has_resonance_cv {
cv_input_or_default(inputs, 2, frame, self.resonance).clamp(0.0, 1.0)
} else {
self.resonance
};
if cutoff != last_cutoff || resonance != last_resonance {
self.filter.set_params(cutoff, resonance, sr);
last_cutoff = cutoff;
last_resonance = resonance;
}
}
// Process both channels, writing all 4 outputs
for ch in 0..2 {
let idx = frame * 2 + ch;
let (lp, hp, bp, notch) = self.filter.process_sample_quad(input[idx], ch);
outputs[0][idx] = lp;
outputs[1][idx] = hp;
outputs[2][idx] = bp;
outputs[3][idx] = notch;
}
}
}
fn reset(&mut self) {
self.filter.reset();
}
fn node_type(&self) -> &str {
"SVF"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
let mut filter = SvfFilter::new();
filter.set_params(self.cutoff, self.resonance, self.sample_rate as f32);
Box::new(Self {
name: self.name.clone(),
filter,
cutoff: self.cutoff,
resonance: self.resonance,
sample_rate: self.sample_rate,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -0,0 +1,270 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
const PARAM_RATE: u32 = 0;
const PARAM_DEPTH: u32 = 1;
const MAX_DELAY_MS: f32 = 7.0;
const BASE_DELAY_MS: f32 = 0.5;
/// Vibrato effect — periodic pitch modulation via a short modulated delay line.
///
/// 100% wet signal (no dry mix). Supports an external Mod CV input that, when
/// connected, replaces the internal sine LFO with the incoming CV signal.
pub struct VibratoNode {
name: String,
rate: f32,
depth: f32,
delay_buffer_left: Vec<f32>,
delay_buffer_right: Vec<f32>,
write_position: usize,
max_delay_samples: usize,
sample_rate: u32,
lfo_phase: f32,
inputs: Vec<NodePort>,
outputs: Vec<NodePort>,
parameters: Vec<Parameter>,
}
impl VibratoNode {
pub fn new(name: impl Into<String>) -> Self {
let name = name.into();
let inputs = vec![
NodePort::new("Audio In", SignalType::Audio, 0),
NodePort::new("Mod CV In", SignalType::CV, 1),
NodePort::new("Rate CV In", SignalType::CV, 2),
NodePort::new("Depth CV In", SignalType::CV, 3),
];
let outputs = vec![
NodePort::new("Audio Out", SignalType::Audio, 0),
];
let parameters = vec![
Parameter::new(PARAM_RATE, "Rate", 0.1, 14.0, 5.0, ParameterUnit::Frequency),
Parameter::new(PARAM_DEPTH, "Depth", 0.0, 1.0, 0.5, ParameterUnit::Generic),
];
let max_delay_samples = ((MAX_DELAY_MS / 1000.0) * 48000.0) as usize;
Self {
name,
rate: 5.0,
depth: 0.5,
delay_buffer_left: vec![0.0; max_delay_samples],
delay_buffer_right: vec![0.0; max_delay_samples],
write_position: 0,
max_delay_samples,
sample_rate: 48000,
lfo_phase: 0.0,
inputs,
outputs,
parameters,
}
}
fn read_interpolated_sample(&self, buffer: &[f32], delay_samples: f32) -> f32 {
let delay_samples = delay_samples.clamp(0.0, (self.max_delay_samples - 1) as f32);
let read_pos_float = self.write_position as f32 - delay_samples;
let read_pos_float = if read_pos_float < 0.0 {
read_pos_float + self.max_delay_samples as f32
} else {
read_pos_float
};
let read_pos_int = read_pos_float.floor() as usize;
let frac = read_pos_float - read_pos_int as f32;
let sample1 = buffer[read_pos_int % self.max_delay_samples];
let sample2 = buffer[(read_pos_int + 1) % self.max_delay_samples];
sample1 * (1.0 - frac) + sample2 * frac
}
}
impl AudioNode for VibratoNode {
fn category(&self) -> NodeCategory {
NodeCategory::Effect
}
fn inputs(&self) -> &[NodePort] {
&self.inputs
}
fn outputs(&self) -> &[NodePort] {
&self.outputs
}
fn parameters(&self) -> &[Parameter] {
&self.parameters
}
fn set_parameter(&mut self, id: u32, value: f32) {
match id {
PARAM_RATE => {
self.rate = value.clamp(0.1, 14.0);
}
PARAM_DEPTH => {
self.depth = value.clamp(0.0, 1.0);
}
_ => {}
}
}
fn get_parameter(&self, id: u32) -> f32 {
match id {
PARAM_RATE => self.rate,
PARAM_DEPTH => self.depth,
_ => 0.0,
}
}
fn process(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
_midi_inputs: &[&[MidiEvent]],
_midi_outputs: &mut [&mut Vec<MidiEvent>],
sample_rate: u32,
) {
if inputs.is_empty() || outputs.is_empty() {
return;
}
if self.sample_rate != sample_rate {
self.sample_rate = sample_rate;
self.max_delay_samples = ((MAX_DELAY_MS / 1000.0) * sample_rate as f32) as usize;
self.delay_buffer_left.resize(self.max_delay_samples, 0.0);
self.delay_buffer_right.resize(self.max_delay_samples, 0.0);
self.write_position = 0;
}
let input = inputs[0];
let output = &mut outputs[0];
// CV inputs — unconnected ports are filled with NaN
let mod_cv = inputs.get(1);
let rate_cv = inputs.get(2);
let depth_cv = inputs.get(3);
let frames = input.len() / 2;
let output_frames = output.len() / 2;
let frames_to_process = frames.min(output_frames);
let base_delay_samples = (BASE_DELAY_MS / 1000.0) * self.sample_rate as f32;
let max_modulation_samples = (MAX_DELAY_MS - BASE_DELAY_MS) / 1000.0 * self.sample_rate as f32;
for frame in 0..frames_to_process {
let left_in = input[frame * 2];
let right_in = input[frame * 2 + 1];
// Resolve depth: CV overrides knob when connected
let depth = if let Some(cv) = depth_cv {
let cv_val = cv.get(frame).copied().unwrap_or(f32::NAN);
if cv_val.is_nan() {
self.depth
} else {
cv_val.clamp(0.0, 1.0)
}
} else {
self.depth
};
// Determine modulation value (0..1 range, pre-depth)
let mod_value = if let Some(cv) = mod_cv {
let cv_val = cv.get(frame).copied().unwrap_or(f32::NAN);
if cv_val.is_nan() {
// No external mod — use internal LFO
None
} else {
Some(cv_val.clamp(0.0, 1.0))
}
} else {
None
};
let modulation = if let Some(ext) = mod_value {
// External modulation: CV value scaled by depth
ext * depth
} else {
// Internal LFO: resolve rate with CV
let rate = if let Some(cv) = rate_cv {
let cv_val = cv.get(frame).copied().unwrap_or(f32::NAN);
if cv_val.is_nan() {
self.rate
} else {
(self.rate + cv_val * 14.0).clamp(0.1, 14.0)
}
} else {
self.rate
};
let lfo_value = (self.lfo_phase * 2.0 * PI).sin() * 0.5 + 0.5;
self.lfo_phase += rate / self.sample_rate as f32;
if self.lfo_phase >= 1.0 {
self.lfo_phase -= 1.0;
}
lfo_value * depth
};
let delay_samples = base_delay_samples + modulation * max_modulation_samples;
// 100% wet — output is only the delayed signal
output[frame * 2] = self.read_interpolated_sample(&self.delay_buffer_left, delay_samples);
output[frame * 2 + 1] = self.read_interpolated_sample(&self.delay_buffer_right, delay_samples);
self.delay_buffer_left[self.write_position] = left_in;
self.delay_buffer_right[self.write_position] = right_in;
self.write_position = (self.write_position + 1) % self.max_delay_samples;
}
}
fn reset(&mut self) {
self.delay_buffer_left.fill(0.0);
self.delay_buffer_right.fill(0.0);
self.write_position = 0;
self.lfo_phase = 0.0;
}
fn node_type(&self) -> &str {
"Vibrato"
}
fn name(&self) -> &str {
&self.name
}
fn clone_node(&self) -> Box<dyn AudioNode> {
Box::new(Self {
name: self.name.clone(),
rate: self.rate,
depth: self.depth,
delay_buffer_left: vec![0.0; self.max_delay_samples],
delay_buffer_right: vec![0.0; self.max_delay_samples],
write_position: 0,
max_delay_samples: self.max_delay_samples,
sample_rate: self.sample_rate,
lfo_phase: 0.0,
inputs: self.inputs.clone(),
outputs: self.outputs.clone(),
parameters: self.parameters.clone(),
})
}
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
self
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
}

View File

@ -9,6 +9,7 @@ const DEFAULT_VOICES: usize = 8;
#[derive(Clone)]
struct VoiceState {
active: bool,
releasing: bool, // Note-off received, still processing (e.g. ADSR release)
note: u8,
age: u32, // For voice stealing
pending_events: Vec<MidiEvent>, // MIDI events to send to this voice
@ -18,6 +19,7 @@ impl VoiceState {
fn new() -> Self {
Self {
active: false,
releasing: false,
note: 0,
age: 0,
pending_events: Vec::new(),
@ -72,8 +74,19 @@ impl VoiceAllocatorNode {
Parameter::new(PARAM_VOICE_COUNT, "Voices", 1.0, MAX_VOICES as f32, DEFAULT_VOICES as f32, ParameterUnit::Generic),
];
// Create empty template graph
let template_graph = AudioGraph::new(sample_rate, buffer_size);
// Create template graph with default TemplateInput and TemplateOutput nodes
let mut template_graph = AudioGraph::new(sample_rate, buffer_size);
{
use super::template_io::{TemplateInputNode, TemplateOutputNode};
let input_node = Box::new(TemplateInputNode::new("Template Input"));
let output_node = Box::new(TemplateOutputNode::new("Template Output"));
let input_idx = template_graph.add_node(input_node);
let output_idx = template_graph.add_node(output_node);
template_graph.set_node_position(input_idx, -200.0, 0.0);
template_graph.set_node_position(output_idx, 200.0, 0.0);
template_graph.set_midi_target(input_idx, true);
template_graph.set_output_node(Some(output_idx));
}
// Create voice instances (initially empty clones of template)
let voice_instances: Vec<AudioGraph> = (0..MAX_VOICES)
@ -134,9 +147,9 @@ impl VoiceAllocatorNode {
}
}
/// Find a free voice, or steal the oldest one
/// Find a free voice, or steal one
/// Priority: inactive → oldest releasing → oldest held
fn find_voice_for_note_on(&mut self) -> usize {
// Only search within active voice_count
// First, look for an inactive voice
for (i, voice) in self.voices[..self.voice_count].iter().enumerate() {
if !voice.active {
@ -144,7 +157,17 @@ impl VoiceAllocatorNode {
}
}
// No free voices, steal the oldest one within voice_count
// No inactive voices — steal the oldest releasing voice
if let Some((i, _)) = self.voices[..self.voice_count]
.iter()
.enumerate()
.filter(|(_, v)| v.releasing)
.max_by_key(|(_, v)| v.age)
{
return i;
}
// No releasing voices either — steal the oldest held voice
self.voices[..self.voice_count]
.iter()
.enumerate()
@ -153,13 +176,42 @@ impl VoiceAllocatorNode {
.unwrap_or(0)
}
/// Find all voices playing a specific note
/// Get oscilloscope data from the most relevant voice's subgraph.
/// Priority: first active voice → first releasing voice → first voice.
pub fn get_voice_oscilloscope_data(&self, node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> {
let voice_idx = self.best_voice_index();
let graph = &self.voice_instances[voice_idx];
let node_idx = petgraph::stable_graph::NodeIndex::new(node_id as usize);
let audio = graph.get_oscilloscope_data(node_idx, sample_count)?;
let cv = graph.get_oscilloscope_cv_data(node_idx, sample_count).unwrap_or_default();
Some((audio, cv))
}
/// Find the best voice index to observe: first active → first releasing → 0
fn best_voice_index(&self) -> usize {
// First active (non-releasing) voice
for (i, v) in self.voices[..self.voice_count].iter().enumerate() {
if v.active && !v.releasing {
return i;
}
}
// First releasing voice
for (i, v) in self.voices[..self.voice_count].iter().enumerate() {
if v.active && v.releasing {
return i;
}
}
// Fallback to first voice
0
}
/// Find all voices playing a specific note (held, not yet releasing)
fn find_voices_for_note_off(&self, note: u8) -> Vec<usize> {
self.voices[..self.voice_count]
.iter()
.enumerate()
.filter_map(|(i, v)| {
if v.active && v.note == note {
if v.active && !v.releasing && v.note == note {
Some(i)
} else {
None
@ -195,6 +247,7 @@ impl AudioNode for VoiceAllocatorNode {
// Stop voices beyond the new count
for voice in &mut self.voices[new_count..] {
voice.active = false;
voice.releasing = false;
}
}
}
@ -218,25 +271,26 @@ impl AudioNode for VoiceAllocatorNode {
if event.data2 > 0 {
let voice_idx = self.find_voice_for_note_on();
self.voices[voice_idx].active = true;
self.voices[voice_idx].releasing = false;
self.voices[voice_idx].note = event.data1;
self.voices[voice_idx].age = 0;
// Store MIDI event for this voice to process
self.voices[voice_idx].pending_events.push(*event);
} else {
// Velocity = 0 means note off - send to ALL voices playing this note
// Velocity = 0 means note off — mark releasing, keep active for ADSR release
let voice_indices = self.find_voices_for_note_off(event.data1);
for voice_idx in voice_indices {
self.voices[voice_idx].active = false;
self.voices[voice_idx].releasing = true;
self.voices[voice_idx].pending_events.push(*event);
}
}
}
0x80 => {
// Note off - send to ALL voices playing this note
// Note off — mark releasing, keep active for ADSR release
let voice_indices = self.find_voices_for_note_off(event.data1);
for voice_idx in voice_indices {
self.voices[voice_idx].active = false;
self.voices[voice_idx].releasing = true;
self.voices[voice_idx].pending_events.push(*event);
}
}
@ -291,26 +345,28 @@ impl AudioNode for VoiceAllocatorNode {
// Note: playback_time is 0.0 since voice allocator doesn't track time
self.voice_instances[voice_idx].process(mix_slice, &midi_events, 0.0);
// Auto-deactivate releasing voices that have gone silent
if voice_state.releasing {
let peak = mix_slice.iter().fold(0.0f32, |max, &s| max.max(s.abs()));
if peak < 1e-6 {
voice_state.active = false;
voice_state.releasing = false;
continue; // Don't mix silent output
}
}
// Mix into output (accumulate)
for (i, sample) in mix_slice.iter().enumerate() {
output[i] += sample;
}
}
}
// Apply normalization to prevent clipping (divide by active voice count)
let active_count = self.voices[..self.voice_count].iter().filter(|v| v.active).count();
if active_count > 1 {
let scale = 1.0 / (active_count as f32).sqrt(); // Use sqrt for better loudness perception
for sample in output.iter_mut() {
*sample *= scale;
}
}
}
fn reset(&mut self) {
for voice in &mut self.voices {
voice.active = false;
voice.releasing = false;
voice.pending_events.clear();
}
for graph in &mut self.voice_instances {

View File

@ -1,4 +1,4 @@
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType};
use crate::audio::node_graph::{AudioNode, NodeCategory, NodePort, Parameter, ParameterUnit, SignalType, cv_input_or_default};
use crate::audio::midi::MidiEvent;
use std::f32::consts::PI;
@ -243,14 +243,11 @@ impl AudioNode for WavetableOscillatorNode {
let frames = output.len() / 2;
for frame in 0..frames {
// Read V/Oct input
let voct = if !inputs.is_empty() && !inputs[0].is_empty() {
inputs[0][frame.min(inputs[0].len() / 2 - 1) * 2]
} else {
0.0 // Default to A4 (440 Hz)
};
// V/Oct input: when unconnected, defaults to 0.0 (A4 440 Hz)
// CV signals are mono, so read from frame index directly
let voct = cv_input_or_default(inputs, 0, frame, 0.0);
// Calculate frequency
// Calculate frequency from V/Oct
let freq = self.voct_to_freq(voct);
// Read from wavetable

View File

@ -67,6 +67,10 @@ pub struct GraphPreset {
/// Which node index is the audio output (None if not set)
pub output_node: Option<u32>,
/// Frontend-only group definitions (backend stores opaquely, does not interpret)
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<SerializedGroup>,
}
/// Metadata about the preset
@ -119,6 +123,50 @@ pub struct SerializedNode {
/// For sampler nodes: loaded sample data
#[serde(skip_serializing_if = "Option::is_none")]
pub sample_data: Option<SampleData>,
/// For Script nodes: BeamDSP source code
#[serde(skip_serializing_if = "Option::is_none")]
pub script_source: Option<String>,
/// For AmpSim nodes: path to the .nam model file
#[serde(skip_serializing_if = "Option::is_none")]
pub nam_model_path: Option<String>,
/// For dynamic-port nodes (Mixer, SubtrackInputs): saved port count so ports
/// round-trip correctly through save/load independent of connection order.
#[serde(skip_serializing_if = "Option::is_none")]
pub num_ports: Option<u32>,
/// For SubtrackInputs: ordered port names (one per subtrack slot).
/// Allows the UI to display actual track names on the node's output ports.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub port_names: Vec<String>,
}
/// Serialized group definition (frontend-only visual grouping, stored opaquely by backend)
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SerializedGroup {
pub id: u32,
pub name: String,
pub member_nodes: Vec<u32>,
pub position: (f32, f32),
pub boundary_inputs: Vec<SerializedBoundaryConnection>,
pub boundary_outputs: Vec<SerializedBoundaryConnection>,
/// Parent group ID for nested groups (None = top-level group)
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parent_group_id: Option<u32>,
}
/// Serialized boundary connection for group definitions
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SerializedBoundaryConnection {
pub external_node: u32,
pub external_port: usize,
pub internal_node: u32,
pub internal_port: usize,
pub port_name: String,
/// Signal type as string ("Audio", "Midi", "CV")
pub data_type: String,
}
/// Serialized connection between nodes
@ -152,6 +200,7 @@ impl GraphPreset {
connections: Vec::new(),
midi_targets: Vec::new(),
output_node: None,
groups: Vec::new(),
}
}
@ -186,6 +235,10 @@ impl SerializedNode {
position: (0.0, 0.0),
template_graph: None,
sample_data: None,
script_source: None,
nam_model_path: None,
num_ports: None,
port_names: Vec::new(),
}
}

View File

@ -1,4 +1,5 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::f32::consts::PI;
use serde::{Deserialize, Serialize};
@ -51,26 +52,231 @@ fn windowed_sinc_interpolate(samples: &[f32], frac: f32) -> f32 {
result
}
/// PCM sample format for memory-mapped audio files
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PcmSampleFormat {
I16,
I24,
F32,
}
/// How audio data is stored for a pool entry
#[derive(Debug, Clone)]
pub enum AudioStorage {
/// Fully decoded interleaved f32 samples in memory
InMemory(Vec<f32>),
/// Memory-mapped PCM file (WAV/AIFF) — instant load, OS-managed paging
Mapped {
mmap: Arc<memmap2::Mmap>,
data_offset: usize,
sample_format: PcmSampleFormat,
bytes_per_sample: usize,
total_frames: u64,
},
/// Compressed audio — playback handled by disk reader's stream decoder.
/// `decoded_for_waveform` is progressively filled by a background thread.
Compressed {
decoded_for_waveform: Vec<f32>,
decoded_frames: u64,
total_frames: u64,
},
}
/// Audio file stored in the pool
#[derive(Debug, Clone)]
pub struct AudioFile {
pub path: PathBuf,
pub data: Vec<f32>, // Interleaved samples
pub storage: AudioStorage,
pub channels: u32,
pub sample_rate: u32,
pub frames: u64,
/// Original file format (mp3, ogg, wav, flac, etc.)
/// Used to determine if we should preserve lossy encoding during save
pub original_format: Option<String>,
/// Original compressed file bytes (preserved across save/load to avoid re-encoding)
pub original_bytes: Option<Vec<u8>>,
}
impl AudioFile {
/// Create a new AudioFile
/// Create a new AudioFile with in-memory interleaved f32 data
pub fn new(path: PathBuf, data: Vec<f32>, channels: u32, sample_rate: u32) -> Self {
let frames = (data.len() / channels as usize) as u64;
Self {
path,
data,
storage: AudioStorage::InMemory(data),
channels,
sample_rate,
frames,
original_format: None,
original_bytes: None,
}
}
/// Create a new AudioFile with original format information
pub fn with_format(path: PathBuf, data: Vec<f32>, channels: u32, sample_rate: u32, original_format: Option<String>) -> Self {
let frames = (data.len() / channels as usize) as u64;
Self {
path,
storage: AudioStorage::InMemory(data),
channels,
sample_rate,
frames,
original_format,
original_bytes: None,
}
}
/// Create an AudioFile backed by a memory-mapped WAV/AIFF file
pub fn from_mmap(
path: PathBuf,
mmap: memmap2::Mmap,
data_offset: usize,
sample_format: PcmSampleFormat,
channels: u32,
sample_rate: u32,
total_frames: u64,
) -> Self {
let bytes_per_sample = match sample_format {
PcmSampleFormat::I16 => 2,
PcmSampleFormat::I24 => 3,
PcmSampleFormat::F32 => 4,
};
Self {
path,
storage: AudioStorage::Mapped {
mmap: Arc::new(mmap),
data_offset,
sample_format,
bytes_per_sample,
total_frames,
},
channels,
sample_rate,
frames: total_frames,
original_format: Some("wav".to_string()),
original_bytes: None,
}
}
/// Create a placeholder AudioFile for a compressed format (playback via disk reader)
pub fn from_compressed(
path: PathBuf,
channels: u32,
sample_rate: u32,
total_frames: u64,
original_format: Option<String>,
) -> Self {
Self {
path,
storage: AudioStorage::Compressed {
decoded_for_waveform: Vec::new(),
decoded_frames: 0,
total_frames,
},
channels,
sample_rate,
frames: total_frames,
original_format,
original_bytes: None,
}
}
/// Get interleaved f32 sample data.
///
/// - **InMemory**: returns the full slice directly.
/// - **Mapped F32**: reinterprets the mmap'd bytes as `&[f32]` (zero-copy).
/// - **Mapped I16/I24 or Compressed**: returns an empty slice (use
/// `read_samples()` or the disk reader's `ReadAheadBuffer` instead).
pub fn data(&self) -> &[f32] {
match &self.storage {
AudioStorage::InMemory(data) => data,
AudioStorage::Mapped {
mmap,
data_offset,
sample_format,
total_frames,
..
} if *sample_format == PcmSampleFormat::F32 => {
let byte_slice = &mmap[*data_offset..];
let ptr = byte_slice.as_ptr();
// Check 4-byte alignment (required for f32)
if ptr.align_offset(std::mem::align_of::<f32>()) == 0 {
let len = (*total_frames as usize) * self.channels as usize;
let available = byte_slice.len() / 4;
let safe_len = len.min(available);
// SAFETY: pointer is aligned, mmap is read-only and outlives
// this borrow, and we clamp to the available byte range.
unsafe { std::slice::from_raw_parts(ptr as *const f32, safe_len) }
} else {
&[]
}
}
_ => &[],
}
}
/// Read samples for a specific channel into the output buffer.
/// Works for InMemory and Mapped storage. Returns the number of frames read.
pub fn read_samples(
&self,
start_frame: usize,
count: usize,
channel: usize,
out: &mut [f32],
) -> usize {
let channels = self.channels as usize;
let total_frames = self.frames as usize;
match &self.storage {
AudioStorage::InMemory(data) => {
let mut written = 0;
for i in 0..count.min(out.len()) {
let frame = start_frame + i;
if frame >= total_frames { break; }
let idx = frame * channels + channel;
out[i] = data[idx];
written += 1;
}
written
}
AudioStorage::Mapped { mmap, data_offset, sample_format, bytes_per_sample, .. } => {
let mut written = 0;
for i in 0..count.min(out.len()) {
let frame = start_frame + i;
if frame >= total_frames { break; }
let sample_index = frame * channels + channel;
let byte_offset = data_offset + sample_index * bytes_per_sample;
let end = byte_offset + bytes_per_sample;
if end > mmap.len() { break; }
let bytes = &mmap[byte_offset..end];
out[i] = match sample_format {
PcmSampleFormat::I16 => {
let val = i16::from_le_bytes([bytes[0], bytes[1]]);
val as f32 / 32768.0
}
PcmSampleFormat::I24 => {
// Sign-extend 24-bit to 32-bit
let val = ((bytes[0] as i32)
| ((bytes[1] as i32) << 8)
| ((bytes[2] as i32) << 16))
<< 8
>> 8;
val as f32 / 8388608.0
}
PcmSampleFormat::F32 => {
f32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]])
}
};
written += 1;
}
written
}
AudioStorage::Compressed { .. } => {
// Compressed files are read through the disk reader
0
}
}
}
@ -82,36 +288,68 @@ impl AudioFile {
/// Generate a waveform overview with the specified number of peaks
/// This creates a downsampled representation suitable for timeline visualization
pub fn generate_waveform_overview(&self, target_peaks: usize) -> Vec<crate::io::WaveformPeak> {
self.generate_waveform_overview_range(0, self.frames as usize, target_peaks)
}
/// Generate a waveform overview for a specific range of frames
///
/// # Arguments
/// * `start_frame` - Starting frame index (0-based)
/// * `end_frame` - Ending frame index (exclusive)
/// * `target_peaks` - Desired number of peaks to generate
pub fn generate_waveform_overview_range(
&self,
start_frame: usize,
end_frame: usize,
target_peaks: usize,
) -> Vec<crate::io::WaveformPeak> {
if self.frames == 0 || target_peaks == 0 {
return Vec::new();
}
let total_frames = self.frames as usize;
let frames_per_peak = (total_frames / target_peaks).max(1);
let actual_peaks = (total_frames + frames_per_peak - 1) / frames_per_peak;
let start_frame = start_frame.min(total_frames);
let end_frame = end_frame.min(total_frames);
if start_frame >= end_frame {
return Vec::new();
}
let range_frames = end_frame - start_frame;
let frames_per_peak = (range_frames / target_peaks).max(1);
let actual_peaks = (range_frames + frames_per_peak - 1) / frames_per_peak;
let mut peaks = Vec::with_capacity(actual_peaks);
for peak_idx in 0..actual_peaks {
let start_frame = peak_idx * frames_per_peak;
let end_frame = ((peak_idx + 1) * frames_per_peak).min(total_frames);
let peak_start = start_frame + peak_idx * frames_per_peak;
let peak_end = (start_frame + (peak_idx + 1) * frames_per_peak).min(end_frame);
let mut min = 0.0f32;
let mut max = 0.0f32;
let mut min = f32::MAX;
let mut max = f32::MIN;
// Scan all samples in this window
for frame_idx in start_frame..end_frame {
let data = self.data();
for frame_idx in peak_start..peak_end {
// For multi-channel audio, combine all channels
for ch in 0..self.channels as usize {
let sample_idx = frame_idx * self.channels as usize + ch;
if sample_idx < self.data.len() {
let sample = self.data[sample_idx];
if sample_idx < data.len() {
let sample = data[sample_idx];
min = min.min(sample);
max = max.max(sample);
}
}
}
// If no samples were found, clamp to safe defaults
if min == f32::MAX {
min = 0.0;
}
if max == f32::MIN {
max = 0.0;
}
peaks.push(crate::io::WaveformPeak { min, max });
}
@ -122,6 +360,8 @@ impl AudioFile {
/// Pool of shared audio files (audio clip content)
pub struct AudioClipPool {
files: Vec<AudioFile>,
/// Waveform chunk cache for multi-resolution waveform generation
waveform_cache: crate::audio::waveform_cache::WaveformCache,
}
/// Type alias for backwards compatibility
@ -132,6 +372,7 @@ impl AudioClipPool {
pub fn new() -> Self {
Self {
files: Vec::new(),
waveform_cache: crate::audio::waveform_cache::WaveformCache::new(100), // 100MB cache
}
}
@ -159,6 +400,25 @@ impl AudioClipPool {
})
}
/// Generate waveform overview for a specific range of a file in the pool
///
/// # Arguments
/// * `pool_index` - Index of the file in the pool
/// * `start_frame` - Starting frame index (0-based)
/// * `end_frame` - Ending frame index (exclusive)
/// * `target_peaks` - Desired number of peaks to generate
pub fn generate_waveform_range(
&self,
pool_index: usize,
start_frame: usize,
end_frame: usize,
target_peaks: usize,
) -> Option<Vec<crate::io::WaveformPeak>> {
self.files.get(pool_index).map(|file| {
file.generate_waveform_overview_range(start_frame, end_frame, target_peaks)
})
}
/// Add an audio file to the pool and return its index
pub fn add_file(&mut self, file: AudioFile) -> usize {
let index = self.files.len();
@ -171,6 +431,11 @@ impl AudioClipPool {
self.files.get(index)
}
/// Get a mutable reference to an audio file by index
pub fn get_file_mut(&mut self, index: usize) -> Option<&mut AudioFile> {
self.files.get_mut(index)
}
/// Get number of files in the pool
pub fn file_count(&self) -> usize {
self.files.len()
@ -178,6 +443,7 @@ impl AudioClipPool {
/// Render audio from a file in the pool with high-quality windowed sinc interpolation
/// start_time_seconds: position in the audio file to start reading from (in seconds)
/// clip_read_ahead: per-clip-instance read-ahead buffer for compressed audio streaming
/// Returns the number of samples actually rendered
pub fn render_from_file(
&self,
@ -187,121 +453,303 @@ impl AudioClipPool {
gain: f32,
engine_sample_rate: u32,
engine_channels: u32,
clip_read_ahead: Option<&super::disk_reader::ReadAheadBuffer>,
) -> usize {
let Some(audio_file) = self.files.get(pool_index) else {
return 0;
};
let audio_data = audio_file.data();
let read_ahead = clip_read_ahead;
let use_read_ahead = audio_data.is_empty();
let src_channels = audio_file.channels as usize;
// Nothing to render: no data and no read-ahead buffer
if use_read_ahead && read_ahead.is_none() {
// Log once per pool_index to diagnose silent clips
static LOGGED: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(u64::MAX);
let prev = LOGGED.swap(pool_index as u64, std::sync::atomic::Ordering::Relaxed);
if prev != pool_index as u64 {
eprintln!("[RENDER] pool={}: data empty, no read_ahead! storage={:?}, frames={}",
pool_index, std::mem::discriminant(&audio_file.storage), audio_file.frames);
}
return 0;
}
// In export mode, block-wait until the disk reader has filled the
// frames we need, so offline rendering never gets buffer misses.
if use_read_ahead {
let ra = read_ahead.unwrap();
if ra.is_export_mode() {
let src_start = (start_time_seconds * audio_file.sample_rate as f64) as u64;
// Tell the disk reader where we need data BEFORE waiting
ra.set_target_frame(src_start);
// Pad by 64 frames for sinc interpolation taps
let frames_needed = (output.len() / engine_channels as usize) as u64 + 64;
// Spin-wait with small sleeps until the disk reader fills the buffer
let mut wait_iters = 0u64;
while !ra.has_range(src_start, frames_needed) {
std::thread::sleep(std::time::Duration::from_micros(100));
wait_iters += 1;
if wait_iters > 100_000 {
// Safety valve: 10 seconds of waiting
eprintln!("[EXPORT] Timed out waiting for disk reader (need frames {}..{})",
src_start, src_start + frames_needed);
break;
}
}
}
}
// Snapshot the read-ahead buffer range once for the entire render call.
// This ensures all sinc interpolation taps within a single callback
// see a consistent range, preventing crackle from concurrent updates.
let (ra_start, ra_end) = if use_read_ahead {
read_ahead.unwrap().snapshot()
} else {
(0, 0)
};
// Buffer-miss counter: how many times we wanted a sample the ring
// buffer didn't have (frame in file range but outside buffer range).
let mut buffer_misses: u32 = 0;
// Read a single interleaved sample by (frame, channel).
// Uses direct slice access for InMemory/Mapped, or the disk reader's
// ReadAheadBuffer for compressed files.
macro_rules! get_sample {
($frame:expr, $ch:expr) => {{
if use_read_ahead {
let f = $frame as u64;
let s = read_ahead.unwrap().read_sample(f, $ch, ra_start, ra_end);
if s == 0.0 && (f < ra_start || f >= ra_end) {
buffer_misses += 1;
}
s
} else {
let idx = ($frame) * src_channels + ($ch);
if idx < audio_data.len() { audio_data[idx] } else { 0.0 }
}
}};
}
let dst_channels = engine_channels as usize;
let output_frames = output.len() / dst_channels;
// Calculate starting position in source with fractional precision
let src_start_position = start_time_seconds * audio_file.sample_rate as f64;
// Sample rate conversion ratio
let rate_ratio = audio_file.sample_rate as f64 / engine_sample_rate as f64;
// Kernel size for windowed sinc (32 taps = high quality, good performance)
const KERNEL_SIZE: usize = 32;
const HALF_KERNEL: usize = KERNEL_SIZE / 2;
// Tell the disk reader where we're reading so it buffers the right region.
if use_read_ahead {
read_ahead.unwrap().set_target_frame(src_start_position as u64);
}
let mut rendered_frames = 0;
// Render frame by frame with windowed sinc interpolation
for output_frame in 0..output_frames {
// Calculate exact fractional position in source
let src_position = src_start_position + (output_frame as f64 * rate_ratio);
let src_frame = src_position.floor() as i32;
let frac = (src_position - src_frame as f64) as f32;
if audio_file.sample_rate == engine_sample_rate {
// Fast path: matching sample rates — direct sample copy, no interpolation
let src_start_frame = src_start_position.floor() as i64;
// Check if we've gone past the end of the audio file
if src_frame < 0 || src_frame as usize >= audio_file.frames as usize {
break;
// Continuity check: detect gaps/overlaps between consecutive callbacks (DAW_AUDIO_DEBUG=1)
if std::env::var("DAW_AUDIO_DEBUG").is_ok() {
use std::sync::atomic::{AtomicI64, Ordering as AO};
static EXPECTED_NEXT: AtomicI64 = AtomicI64::new(-1);
static DISCONTINUITIES: AtomicI64 = AtomicI64::new(0);
let expected = EXPECTED_NEXT.load(AO::Relaxed);
if expected >= 0 && src_start_frame != expected {
let count = DISCONTINUITIES.fetch_add(1, AO::Relaxed) + 1;
eprintln!("[RENDER CONTINUITY] DISCONTINUITY #{}: expected frame {}, got {} (delta={})",
count, expected, src_start_frame, src_start_frame - expected);
}
EXPECTED_NEXT.store(src_start_frame + output_frames as i64, AO::Relaxed);
}
// Interpolate each channel
for dst_ch in 0..dst_channels {
let sample = if src_channels == dst_channels {
// Direct channel mapping
let ch_offset = dst_ch;
for output_frame in 0..output_frames {
let src_frame = src_start_frame + output_frame as i64;
if src_frame < 0 || src_frame as u64 >= audio_file.frames {
break;
}
let sf = src_frame as usize;
// Extract channel samples for interpolation
let mut channel_samples = Vec::with_capacity(KERNEL_SIZE);
for i in -(HALF_KERNEL as i32)..(HALF_KERNEL as i32) {
let idx = src_frame + i;
if idx >= 0 && (idx as usize) < audio_file.frames as usize {
let sample_idx = (idx as usize) * src_channels + ch_offset;
channel_samples.push(audio_file.data[sample_idx]);
} else {
channel_samples.push(0.0);
for dst_ch in 0..dst_channels {
let sample = if src_channels == dst_channels {
get_sample!(sf, dst_ch)
} else if src_channels == 1 {
get_sample!(sf, 0)
} else if dst_channels == 1 {
let mut sum = 0.0f32;
for src_ch in 0..src_channels {
sum += get_sample!(sf, src_ch);
}
}
sum / src_channels as f32
} else {
get_sample!(sf, dst_ch % src_channels)
};
windowed_sinc_interpolate(&channel_samples, frac)
output[output_frame * dst_channels + dst_ch] += sample * gain;
}
} else if src_channels == 1 && dst_channels > 1 {
// Mono to stereo - duplicate
let mut channel_samples = Vec::with_capacity(KERNEL_SIZE);
for i in -(HALF_KERNEL as i32)..(HALF_KERNEL as i32) {
let idx = src_frame + i;
if idx >= 0 && (idx as usize) < audio_file.frames as usize {
channel_samples.push(audio_file.data[idx as usize]);
} else {
channel_samples.push(0.0);
rendered_frames += 1;
}
} else {
// Sample rate conversion with windowed sinc interpolation
let rate_ratio = audio_file.sample_rate as f64 / engine_sample_rate as f64;
const KERNEL_SIZE: usize = 32;
const HALF_KERNEL: usize = KERNEL_SIZE / 2;
for output_frame in 0..output_frames {
let src_position = src_start_position + (output_frame as f64 * rate_ratio);
let src_frame = src_position.floor() as i32;
let frac = (src_position - src_frame as f64) as f32;
if src_frame < 0 || src_frame as usize >= audio_file.frames as usize {
break;
}
for dst_ch in 0..dst_channels {
let src_ch = if src_channels == dst_channels {
dst_ch
} else if src_channels == 1 {
0
} else if dst_channels == 1 {
usize::MAX // sentinel: average all channels below
} else {
dst_ch % src_channels
};
let sample = if src_ch == usize::MAX {
let mut sum = 0.0;
for ch in 0..src_channels {
let mut channel_samples = [0.0f32; KERNEL_SIZE];
for (j, i) in (-(HALF_KERNEL as i32)..(HALF_KERNEL as i32)).enumerate() {
let idx = src_frame + i;
if idx >= 0 && (idx as usize) < audio_file.frames as usize {
channel_samples[j] = get_sample!(idx as usize, ch);
}
}
sum += windowed_sinc_interpolate(&channel_samples, frac);
}
}
windowed_sinc_interpolate(&channel_samples, frac)
} else if src_channels > 1 && dst_channels == 1 {
// Multi-channel to mono - average all source channels
let mut sum = 0.0;
for src_ch in 0..src_channels {
let mut channel_samples = Vec::with_capacity(KERNEL_SIZE);
for i in -(HALF_KERNEL as i32)..(HALF_KERNEL as i32) {
sum / src_channels as f32
} else {
let mut channel_samples = [0.0f32; KERNEL_SIZE];
for (j, i) in (-(HALF_KERNEL as i32)..(HALF_KERNEL as i32)).enumerate() {
let idx = src_frame + i;
if idx >= 0 && (idx as usize) < audio_file.frames as usize {
let sample_idx = (idx as usize) * src_channels + src_ch;
channel_samples.push(audio_file.data[sample_idx]);
} else {
channel_samples.push(0.0);
channel_samples[j] = get_sample!(idx as usize, src_ch);
}
}
sum += windowed_sinc_interpolate(&channel_samples, frac);
}
windowed_sinc_interpolate(&channel_samples, frac)
};
sum / src_channels as f32
output[output_frame * dst_channels + dst_ch] += sample * gain;
}
} else {
// Mismatched channels - use modulo mapping
let src_ch = dst_ch % src_channels;
let mut channel_samples = Vec::with_capacity(KERNEL_SIZE);
for i in -(HALF_KERNEL as i32)..(HALF_KERNEL as i32) {
let idx = src_frame + i;
if idx >= 0 && (idx as usize) < audio_file.frames as usize {
let sample_idx = (idx as usize) * src_channels + src_ch;
channel_samples.push(audio_file.data[sample_idx]);
} else {
channel_samples.push(0.0);
}
}
windowed_sinc_interpolate(&channel_samples, frac)
};
// Mix into output with gain
let output_idx = output_frame * dst_channels + dst_ch;
output[output_idx] += sample * gain;
rendered_frames += 1;
}
}
rendered_frames += 1;
if use_read_ahead && buffer_misses > 0 {
static MISS_COUNT: std::sync::atomic::AtomicU64 = std::sync::atomic::AtomicU64::new(0);
let total = MISS_COUNT.fetch_add(buffer_misses as u64, std::sync::atomic::Ordering::Relaxed) + buffer_misses as u64;
// Log every 100 misses to avoid flooding
if total % 100 < buffer_misses as u64 {
eprintln!("[RENDER] buffer misses this call: {}, total: {}, snap=[{}..{}], src_start_frame={}",
buffer_misses, total, ra_start, ra_end,
(start_time_seconds * audio_file.sample_rate as f64) as u64);
}
}
rendered_frames * dst_channels
}
/// Generate waveform chunks for a file in the pool
///
/// This generates chunks at a specific detail level and caches them.
/// Returns the generated chunks.
pub fn generate_waveform_chunks(
&mut self,
pool_index: usize,
detail_level: u8,
chunk_indices: &[u32],
) -> Vec<crate::io::WaveformChunk> {
let file = match self.files.get(pool_index) {
Some(f) => f,
None => return Vec::new(),
};
let chunks = crate::audio::waveform_cache::WaveformCache::generate_chunks(
file,
pool_index,
detail_level,
chunk_indices,
);
// Store chunks in cache
for chunk in &chunks {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level: chunk.detail_level,
chunk_index: chunk.chunk_index,
};
self.waveform_cache.store_chunk(key, chunk.peaks.clone());
}
chunks
}
/// Generate Level 0 (overview) chunks for a file
///
/// This should be called immediately when a file is imported.
/// Returns the generated chunks.
pub fn generate_overview_chunks(
&mut self,
pool_index: usize,
) -> Vec<crate::io::WaveformChunk> {
let file = match self.files.get(pool_index) {
Some(f) => f,
None => return Vec::new(),
};
self.waveform_cache.generate_overview_chunks(file, pool_index)
}
/// Get a cached waveform chunk
pub fn get_waveform_chunk(
&self,
pool_index: usize,
detail_level: u8,
chunk_index: u32,
) -> Option<&Vec<crate::io::WaveformPeak>> {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level,
chunk_index,
};
self.waveform_cache.get_chunk(&key)
}
/// Check if a waveform chunk is cached
pub fn has_waveform_chunk(
&self,
pool_index: usize,
detail_level: u8,
chunk_index: u32,
) -> bool {
let key = crate::io::WaveformChunkKey {
pool_index,
detail_level,
chunk_index,
};
self.waveform_cache.has_chunk(&key)
}
/// Get waveform cache memory usage in MB
pub fn waveform_cache_memory_mb(&self) -> f64 {
self.waveform_cache.memory_usage_mb()
}
/// Get number of cached waveform chunks
pub fn waveform_chunk_count(&self) -> usize {
self.waveform_cache.chunk_count()
}
}
impl Default for AudioClipPool {
@ -410,9 +858,38 @@ impl AudioClipPool {
fn embed_from_memory(audio_file: &AudioFile) -> EmbeddedAudioData {
use base64::{Engine as _, engine::general_purpose};
// Convert the f32 interleaved samples to WAV format bytes
// Check if this is a lossy format that should be preserved
let is_lossy = audio_file.original_format.as_ref().map_or(false, |fmt| {
let fmt_lower = fmt.to_lowercase();
fmt_lower == "mp3" || fmt_lower == "ogg" || fmt_lower == "aac"
|| fmt_lower == "m4a" || fmt_lower == "opus"
});
// Check for preserved original bytes first (from previous load cycle)
if let Some(ref original_bytes) = audio_file.original_bytes {
let data_base64 = general_purpose::STANDARD.encode(original_bytes);
return EmbeddedAudioData {
data_base64,
format: audio_file.original_format.clone().unwrap_or_else(|| "wav".to_string()),
};
}
if is_lossy {
// For lossy formats, read the original file bytes (if it still exists)
if let Ok(original_bytes) = std::fs::read(&audio_file.path) {
let data_base64 = general_purpose::STANDARD.encode(&original_bytes);
return EmbeddedAudioData {
data_base64,
format: audio_file.original_format.clone().unwrap_or_else(|| "mp3".to_string()),
};
}
// If we can't read the original file, fall through to WAV conversion
}
// For lossless/PCM or if we couldn't read the original lossy file,
// convert the f32 interleaved samples to WAV format bytes
let wav_data = Self::encode_wav(
&audio_file.data,
audio_file.data(),
audio_file.channels,
audio_file.sample_rate
);
@ -470,13 +947,18 @@ impl AudioClipPool {
entries: Vec<AudioPoolEntry>,
project_path: &Path,
) -> Result<Vec<usize>, String> {
let fn_start = std::time::Instant::now();
eprintln!("📊 [LOAD_SERIALIZED] Starting load_from_serialized with {} entries...", entries.len());
let project_dir = project_path.parent()
.ok_or_else(|| "Project path has no parent directory".to_string())?;
let mut missing_indices = Vec::new();
// Clear existing pool
let clear_start = std::time::Instant::now();
self.files.clear();
eprintln!("📊 [LOAD_SERIALIZED] Clear pool took {:.2}ms", clear_start.elapsed().as_secs_f64() * 1000.0);
// Find the maximum pool index to determine required size
let max_index = entries.iter()
@ -485,12 +967,18 @@ impl AudioClipPool {
.unwrap_or(0);
// Ensure we have space for all entries
let resize_start = std::time::Instant::now();
self.files.resize(max_index + 1, AudioFile::new(PathBuf::new(), Vec::new(), 2, 44100));
eprintln!("📊 [LOAD_SERIALIZED] Resize pool to {} took {:.2}ms", max_index + 1, resize_start.elapsed().as_secs_f64() * 1000.0);
for entry in entries {
let success = if let Some(embedded) = entry.embedded_data {
for (i, entry) in entries.iter().enumerate() {
let entry_start = std::time::Instant::now();
eprintln!("📊 [LOAD_SERIALIZED] Processing entry {}/{}: '{}'", i + 1, entries.len(), entry.name);
let success = if let Some(ref embedded) = entry.embedded_data {
// Load from embedded data
match Self::load_from_embedded_into_pool(self, entry.pool_index, embedded, &entry.name) {
eprintln!("📊 [LOAD_SERIALIZED] Entry has embedded data (format: {})", embedded.format);
match Self::load_from_embedded_into_pool(self, entry.pool_index, embedded.clone(), &entry.name) {
Ok(_) => {
eprintln!("[AudioPool] Successfully loaded embedded audio: {}", entry.name);
true
@ -500,8 +988,9 @@ impl AudioClipPool {
false
}
}
} else if let Some(rel_path) = entry.relative_path {
} else if let Some(ref rel_path) = entry.relative_path {
// Load from file path
eprintln!("📊 [LOAD_SERIALIZED] Entry has file path: {:?}", rel_path);
let full_path = project_dir.join(&rel_path);
if full_path.exists() {
@ -518,8 +1007,12 @@ impl AudioClipPool {
if !success {
missing_indices.push(entry.pool_index);
}
eprintln!("📊 [LOAD_SERIALIZED] Entry {} took {:.2}ms (success: {})", i + 1, entry_start.elapsed().as_secs_f64() * 1000.0, success);
}
eprintln!("📊 [LOAD_SERIALIZED] ✅ Total load_from_serialized time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0);
Ok(missing_indices)
}
@ -532,29 +1025,43 @@ impl AudioClipPool {
) -> Result<(), String> {
use base64::{Engine as _, engine::general_purpose};
let fn_start = std::time::Instant::now();
eprintln!("📊 [POOL] Loading embedded audio '{}'...", name);
// Decode base64
let step1_start = std::time::Instant::now();
let data = general_purpose::STANDARD
.decode(&embedded.data_base64)
.map_err(|e| format!("Failed to decode base64: {}", e))?;
eprintln!("📊 [POOL] Step 1: Decode base64 ({} bytes) took {:.2}ms", data.len(), step1_start.elapsed().as_secs_f64() * 1000.0);
// Write to temporary file for symphonia to decode
let step2_start = std::time::Instant::now();
let temp_dir = std::env::temp_dir();
let temp_path = temp_dir.join(format!("lightningbeam_embedded_{}.{}", pool_index, embedded.format));
std::fs::write(&temp_path, &data)
.map_err(|e| format!("Failed to write temporary file: {}", e))?;
eprintln!("📊 [POOL] Step 2: Write temp file took {:.2}ms", step2_start.elapsed().as_secs_f64() * 1000.0);
// Load the temporary file using existing infrastructure
let step3_start = std::time::Instant::now();
let result = Self::load_file_into_pool(self, pool_index, &temp_path);
eprintln!("📊 [POOL] Step 3: Decode audio with Symphonia took {:.2}ms", step3_start.elapsed().as_secs_f64() * 1000.0);
// Clean up temporary file
let _ = std::fs::remove_file(&temp_path);
// Update the path to reflect it was embedded
// Update the path to reflect it was embedded, and preserve original bytes
if result.is_ok() && pool_index < self.files.len() {
self.files[pool_index].path = PathBuf::from(format!("<embedded: {}>", name));
// Preserve the original compressed/encoded bytes so re-save doesn't need to re-encode
self.files[pool_index].original_bytes = Some(data);
self.files[pool_index].original_format = Some(embedded.format.clone());
}
eprintln!("📊 [POOL] ✅ Total load_from_embedded time: {:.2}ms", fn_start.elapsed().as_secs_f64() * 1000.0);
result
}
@ -630,11 +1137,17 @@ impl AudioClipPool {
}
}
let audio_file = AudioFile::new(
// Detect original format from file extension
let original_format = file_path.extension()
.and_then(|ext| ext.to_str())
.map(|s| s.to_lowercase());
let audio_file = AudioFile::with_format(
file_path.to_path_buf(),
samples,
channels,
sample_rate,
original_format,
);
if pool_index >= self.files.len() {

View File

@ -1,9 +1,10 @@
use super::buffer_pool::BufferPool;
use super::clip::Clip;
use super::clip::{AudioClipInstanceId, Clip};
use super::midi::{MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent};
use super::midi_pool::MidiClipPool;
use super::pool::AudioClipPool;
use super::track::{AudioTrack, Metatrack, MidiTrack, RenderContext, TrackId, TrackNode};
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
/// Project manages the hierarchical track structure and clip pools
@ -13,6 +14,7 @@ use std::collections::HashMap;
///
/// Clip content is stored in pools (MidiClipPool), while tracks store
/// clip instances that reference the pool content.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Project {
tracks: HashMap<TrackId, TrackNode>,
next_track_id: TrackId,
@ -80,7 +82,7 @@ impl Project {
/// The new group's ID
pub fn add_group_track(&mut self, name: String, parent_id: Option<TrackId>) -> TrackId {
let id = self.next_id();
let group = Metatrack::new(id, name);
let group = Metatrack::new(id, name, self.sample_rate);
self.tracks.insert(id, TrackNode::Group(group));
if let Some(parent) = parent_id {
@ -209,6 +211,11 @@ impl Project {
self.tracks.get_mut(&track_id)
}
/// Iterate over all tracks in the project.
pub fn track_iter(&self) -> impl Iterator<Item = (TrackId, &TrackNode)> {
self.tracks.iter().map(|(&id, node)| (id, node))
}
/// Get oscilloscope data from a node in a track's graph
pub fn get_oscilloscope_data(&self, track_id: TrackId, node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> {
if let Some(TrackNode::Midi(track)) = self.tracks.get(&track_id) {
@ -226,6 +233,18 @@ impl Project {
None
}
/// Get oscilloscope data from a node inside a VoiceAllocator's best voice
pub fn get_voice_oscilloscope_data(&self, track_id: TrackId, va_node_id: u32, inner_node_id: u32, sample_count: usize) -> Option<(Vec<f32>, Vec<f32>)> {
if let Some(TrackNode::Midi(track)) = self.tracks.get(&track_id) {
let graph = &track.instrument_graph;
let va_idx = petgraph::stable_graph::NodeIndex::new(va_node_id as usize);
let node = graph.get_node(va_idx)?;
let va = node.as_any().downcast_ref::<crate::audio::node_graph::nodes::VoiceAllocatorNode>()?;
return va.get_voice_oscilloscope_data(inner_node_id, sample_count);
}
None
}
/// Get all root-level track IDs
pub fn root_tracks(&self) -> &[TrackId] {
&self.root_tracks
@ -242,10 +261,11 @@ impl Project {
}
/// Add a clip to an audio track
pub fn add_clip(&mut self, track_id: TrackId, clip: Clip) -> Result<(), &'static str> {
pub fn add_clip(&mut self, track_id: TrackId, clip: Clip) -> Result<AudioClipInstanceId, &'static str> {
if let Some(TrackNode::Audio(track)) = self.tracks.get_mut(&track_id) {
let instance_id = clip.id;
track.add_clip(clip);
Ok(())
Ok(instance_id)
} else {
Err("Track not found or is not an audio track")
}
@ -302,12 +322,12 @@ impl Project {
}
/// Legacy method for backwards compatibility - creates clip and instance from old MidiClip format
pub fn add_midi_clip(&mut self, track_id: TrackId, clip: MidiClip) -> Result<(), &'static str> {
pub fn add_midi_clip(&mut self, track_id: TrackId, clip: MidiClip) -> Result<MidiClipInstanceId, &'static str> {
self.add_midi_clip_at(track_id, clip, 0.0)
}
/// Add a MIDI clip to the pool and create an instance at the given timeline position
pub fn add_midi_clip_at(&mut self, track_id: TrackId, clip: MidiClip, start_time: f64) -> Result<(), &'static str> {
pub fn add_midi_clip_at(&mut self, track_id: TrackId, clip: MidiClip, start_time: f64) -> Result<MidiClipInstanceId, &'static str> {
// Add the clip to the pool (it already has events and duration)
let duration = clip.duration;
let clip_id = clip.id;
@ -317,39 +337,62 @@ impl Project {
let instance_id = self.next_midi_clip_instance_id();
let instance = MidiClipInstance::from_full_clip(instance_id, clip_id, duration, start_time);
self.add_midi_clip_instance(track_id, instance)
self.add_midi_clip_instance(track_id, instance)?;
Ok(instance_id)
}
/// Render all root tracks into the output buffer
/// Remove a MIDI clip instance from a track (for undo/redo support)
pub fn remove_midi_clip(&mut self, track_id: TrackId, instance_id: MidiClipInstanceId) -> Result<(), &'static str> {
if let Some(track) = self.get_track_mut(track_id) {
track.remove_midi_clip_instance(instance_id);
Ok(())
} else {
Err("Track not found")
}
}
/// Remove an audio clip instance from a track (for undo/redo support)
pub fn remove_audio_clip(&mut self, track_id: TrackId, instance_id: AudioClipInstanceId) -> Result<(), &'static str> {
if let Some(track) = self.get_track_mut(track_id) {
track.remove_audio_clip_instance(instance_id);
Ok(())
} else {
Err("Track not found")
}
}
/// Render all root tracks into the output buffer.
///
/// When `live_only` is true, MIDI tracks skip clip event collection and only process
/// their live MIDI queue (note-off tails + keyboard input). Audio tracks produce silence.
/// This lets the caller use the same group-hierarchy render path regardless of play state.
pub fn render(
&mut self,
output: &mut [f32],
audio_pool: &AudioClipPool,
midi_pool: &MidiClipPool,
buffer_pool: &mut BufferPool,
playhead_seconds: f64,
sample_rate: u32,
channels: u32,
live_only: bool,
) {
output.fill(0.0);
let any_solo = self.any_solo();
// Create initial render context
let ctx = RenderContext::new(
playhead_seconds,
sample_rate,
channels,
output.len(),
);
let ctx = RenderContext {
live_only,
..RenderContext::new(playhead_seconds, sample_rate, channels, output.len())
};
// Render each root track
for &track_id in &self.root_tracks.clone() {
// Render each root track (index-based to avoid clone)
for i in 0..self.root_tracks.len() {
let track_id = self.root_tracks[i];
self.render_track(
track_id,
output,
audio_pool,
midi_pool,
buffer_pool,
ctx,
any_solo,
@ -364,7 +407,6 @@ impl Project {
track_id: TrackId,
output: &mut [f32],
audio_pool: &AudioClipPool,
midi_pool: &MidiClipPool,
buffer_pool: &mut BufferPool,
ctx: RenderContext,
any_solo: bool,
@ -407,54 +449,146 @@ impl Project {
// Handle audio track vs MIDI track vs group track
match self.tracks.get_mut(&track_id) {
Some(TrackNode::Audio(track)) => {
// Render audio track directly into output
track.render(output, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
// Audio tracks have no live input; skip in live_only mode.
if ctx.live_only {
return;
}
// Render audio track into a temp buffer for peak measurement
let mut track_buffer = buffer_pool.acquire();
track_buffer.resize(output.len(), 0.0);
track_buffer.fill(0.0);
track.render(&mut track_buffer, audio_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
// Accumulate peak level for VU metering (max over meter interval)
let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max);
track.peak_level = track.peak_level.max(buffer_peak);
// Mix into output
for (out, src) in output.iter_mut().zip(track_buffer.iter()) {
*out += src;
}
buffer_pool.release(track_buffer);
}
Some(TrackNode::Midi(track)) => {
// Render MIDI track directly into output
track.render(output, midi_pool, ctx.playhead_seconds, ctx.sample_rate, ctx.channels);
// Render MIDI track into a temp buffer for peak measurement
let mut track_buffer = buffer_pool.acquire();
track_buffer.resize(output.len(), 0.0);
track_buffer.fill(0.0);
track.render(&mut track_buffer, &self.midi_clip_pool, ctx);
// Accumulate peak level for VU metering (max over meter interval)
let buffer_peak = track_buffer.iter().map(|s| s.abs()).fold(0.0f32, f32::max);
track.peak_level = track.peak_level.max(buffer_peak);
// Mix into output
for (out, src) in output.iter_mut().zip(track_buffer.iter()) {
*out += src;
}
buffer_pool.release(track_buffer);
}
Some(TrackNode::Group(group)) => {
// Get children IDs, check if this group is soloed, and transform context
let children: Vec<TrackId> = group.children.clone();
// Skip rendering if playhead is outside the metatrack's trim window.
// In live_only mode always render so note-off tails pass through the mixer.
if !ctx.live_only && !group.is_active_at_time(ctx.playhead_seconds) {
return;
}
// Read group properties and transform context before any mutable borrows
let num_children = group.children.len();
let this_group_is_soloed = group.solo;
let child_ctx = group.transform_context(ctx);
// Acquire a temporary buffer for the group mix
let mut group_buffer = buffer_pool.acquire();
group_buffer.resize(output.len(), 0.0);
group_buffer.fill(0.0);
// Recursively render all children into the group buffer
// If this group is soloed (or parent was soloed), children inherit that state
let children_parent_soloed = parent_is_soloed || this_group_is_soloed;
for &child_id in &children {
// Render each child into its own buffer and inject into SubtrackInputsNode.
// One pool buffer is reused per child (no extra allocation per frame).
for i in 0..num_children {
let child_id = match self.tracks.get(&track_id) {
Some(TrackNode::Group(g)) => g.children[i],
_ => break,
};
let mut child_buffer = buffer_pool.acquire();
child_buffer.resize(output.len(), 0.0);
child_buffer.fill(0.0);
self.render_track(
child_id,
&mut group_buffer,
&mut child_buffer,
audio_pool,
midi_pool,
buffer_pool,
child_ctx,
any_solo,
children_parent_soloed,
);
}
// Apply group volume and mix into output
if let Some(TrackNode::Group(group)) = self.tracks.get_mut(&track_id) {
for (out_sample, group_sample) in output.iter_mut().zip(group_buffer.iter()) {
*out_sample += group_sample * group.volume;
// Inject into the SubtrackInputsNode slot for this child
if let Some(TrackNode::Group(group)) = self.tracks.get_mut(&track_id) {
use super::node_graph::nodes::SubtrackInputsNode;
let node_indices: Vec<_> = group.audio_graph.node_indices().collect();
for node_idx in node_indices {
if let Some(gn) = group.audio_graph.get_graph_node_mut(node_idx) {
if gn.node.node_type() == "SubtrackInputs" {
if let Some(si) = gn.node.as_any_mut()
.downcast_mut::<SubtrackInputsNode>()
{
if let Some(slot) = si.subtrack_index_for(child_id) {
si.inject_subtrack_audio(slot, &child_buffer);
}
}
break;
}
}
}
}
buffer_pool.release(child_buffer);
}
// Release buffer back to pool
buffer_pool.release(group_buffer);
// Process children's audio through the metatrack's mixing graph
if let Some(TrackNode::Group(group)) = self.tracks.get_mut(&track_id) {
let mut graph_output = buffer_pool.acquire();
graph_output.resize(output.len(), 0.0);
graph_output.fill(0.0);
group.audio_graph.process(&mut graph_output, &[], ctx.playhead_seconds);
for (out_sample, graph_sample) in output.iter_mut().zip(graph_output.iter()) {
*out_sample += graph_sample * group.volume;
}
buffer_pool.release(graph_output);
}
}
None => {}
}
}
/// Reset all per-clip read-ahead target frames before a new render cycle.
pub fn reset_read_ahead_targets(&self) {
for track in self.tracks.values() {
if let TrackNode::Audio(audio_track) = track {
for clip in &audio_track.clips {
if let Some(ra) = clip.read_ahead.as_deref() {
ra.reset_target_frame();
}
}
}
}
}
/// Collect per-track peak levels for VU metering and reset accumulators
pub fn collect_track_peaks(&mut self) -> Vec<(TrackId, f32)> {
let mut levels = Vec::new();
for (id, track) in &mut self.tracks {
match track {
TrackNode::Audio(t) => {
levels.push((*id, t.peak_level));
t.peak_level = 0.0;
}
TrackNode::Midi(t) => {
levels.push((*id, t.peak_level));
t.peak_level = 0.0;
}
TrackNode::Group(_) => {}
}
}
levels
}
/// Stop all notes on all MIDI tracks
pub fn stop_all_notes(&mut self) {
for track in self.tracks.values_mut() {
@ -464,13 +598,39 @@ impl Project {
}
}
/// Process live MIDI input from all MIDI tracks (called even when not playing)
pub fn process_live_midi(&mut self, output: &mut [f32], sample_rate: u32, channels: u32) {
// Process all MIDI tracks to handle queued live input events
/// Set export (blocking) mode on all clip read-ahead buffers.
/// When enabled, `render_from_file` blocks until the disk reader
/// has filled the needed frames instead of returning silence.
pub fn set_export_mode(&self, export: bool) {
for track in self.tracks.values() {
if let TrackNode::Audio(t) = track {
for clip in &t.clips {
if let Some(ref ra) = clip.read_ahead {
ra.set_export_mode(export);
}
}
}
}
}
/// Reset all node graphs (clears effect buffers on seek)
pub fn reset_all_graphs(&mut self) {
for track in self.tracks.values_mut() {
if let TrackNode::Midi(midi_track) = track {
// Process only queued live events, not clips
midi_track.process_live_input(output, sample_rate, channels);
match track {
TrackNode::Audio(t) => t.effects_graph.reset(),
TrackNode::Midi(t) => t.instrument_graph.reset(),
TrackNode::Group(_) => {}
}
}
}
/// Propagate tempo to all audio graphs (for BeatNode sync)
pub fn set_tempo(&mut self, bpm: f32, beats_per_bar: u32) {
for track in self.tracks.values_mut() {
match track {
TrackNode::Audio(t) => t.effects_graph.set_tempo(bpm, beats_per_bar),
TrackNode::Midi(t) => t.instrument_graph.set_tempo(bpm, beats_per_bar),
TrackNode::Group(g) => g.audio_graph.set_tempo(bpm, beats_per_bar),
}
}
}
@ -493,6 +653,47 @@ impl Project {
track.queue_live_midi(event);
}
}
/// Prepare all tracks for serialization by saving their audio graphs as presets
pub fn prepare_for_save(&mut self) {
for track in self.tracks.values_mut() {
match track {
TrackNode::Audio(audio_track) => {
audio_track.prepare_for_save();
}
TrackNode::Midi(midi_track) => {
midi_track.prepare_for_save();
}
TrackNode::Group(group) => {
group.prepare_for_save();
}
}
}
}
/// Rebuild all audio graphs from presets after deserialization
///
/// This should be called after deserializing a Project to reconstruct
/// the AudioGraph instances from their stored presets.
///
/// # Arguments
/// * `buffer_size` - Buffer size for audio processing (typically 8192)
pub fn rebuild_audio_graphs(&mut self, buffer_size: usize) -> Result<(), String> {
for track in self.tracks.values_mut() {
match track {
TrackNode::Audio(audio_track) => {
audio_track.rebuild_audio_graph(self.sample_rate, buffer_size)?;
}
TrackNode::Midi(midi_track) => {
midi_track.rebuild_audio_graph(self.sample_rate, buffer_size)?;
}
TrackNode::Group(group) => {
group.rebuild_audio_graph(self.sample_rate, buffer_size)?;
}
}
}
Ok(())
}
}
impl Default for Project {

View File

@ -12,7 +12,7 @@ pub struct RecordingState {
pub clip_id: ClipId,
/// Path to temporary WAV file
pub temp_file_path: PathBuf,
/// WAV file writer
/// WAV file writer (only used at finalization, not during recording)
pub writer: WavWriter,
/// Sample rate of recording
pub sample_rate: u32,
@ -20,12 +20,8 @@ pub struct RecordingState {
pub channels: u32,
/// Timeline start position in seconds
pub start_time: f64,
/// Total frames written to disk
/// Total frames recorded
pub frames_written: usize,
/// Accumulation buffer for next flush
pub buffer: Vec<f32>,
/// Number of frames to accumulate before flushing
pub flush_interval_frames: usize,
/// Whether recording is currently paused
pub paused: bool,
/// Number of samples remaining to skip (to discard stale buffer data)
@ -36,7 +32,7 @@ pub struct RecordingState {
pub waveform_buffer: Vec<f32>,
/// Number of frames per waveform peak
pub frames_per_peak: usize,
/// All recorded audio data accumulated in memory (for fast finalization)
/// All recorded audio data accumulated in memory (written to disk at finalization)
pub audio_data: Vec<f32>,
}
@ -50,10 +46,8 @@ impl RecordingState {
sample_rate: u32,
channels: u32,
start_time: f64,
flush_interval_seconds: f64,
_flush_interval_seconds: f64, // No longer used - kept for API compatibility
) -> Self {
let flush_interval_frames = (sample_rate as f64 * flush_interval_seconds) as usize;
// Calculate frames per waveform peak
// Target ~300 peaks per second with minimum 1000 samples per peak
let target_peaks_per_second = 300;
@ -68,8 +62,6 @@ impl RecordingState {
channels,
start_time,
frames_written: 0,
buffer: Vec::new(),
flush_interval_frames,
paused: false,
samples_to_skip: 0, // Will be set by engine when it knows buffer size
waveform: Vec::new(),
@ -102,22 +94,16 @@ impl RecordingState {
samples
};
// Add to disk buffer
self.buffer.extend_from_slice(samples_to_process);
// Add to audio data (accumulate in memory for fast finalization)
// Add to audio data (accumulate in memory - disk write happens at finalization only)
self.audio_data.extend_from_slice(samples_to_process);
// Add to waveform buffer and generate peaks incrementally
self.waveform_buffer.extend_from_slice(samples_to_process);
self.generate_waveform_peaks();
// Check if we should flush to disk
let frames_in_buffer = self.buffer.len() / self.channels as usize;
if frames_in_buffer >= self.flush_interval_frames {
self.flush()?;
return Ok(true);
}
// Track frames for duration calculation (no disk I/O in audio callback!)
let frames_added = samples_to_process.len() / self.channels as usize;
self.frames_written += frames_added;
Ok(false)
}
@ -144,37 +130,17 @@ impl RecordingState {
}
}
/// Flush accumulated samples to disk
pub fn flush(&mut self) -> Result<(), std::io::Error> {
if self.buffer.is_empty() {
return Ok(());
}
// Write to WAV file
self.writer.write_samples(&self.buffer)?;
// Update frames written
let frames_flushed = self.buffer.len() / self.channels as usize;
self.frames_written += frames_flushed;
// Clear buffer
self.buffer.clear();
Ok(())
}
/// Get current recording duration in seconds
/// Includes both flushed frames and buffered frames
pub fn duration(&self) -> f64 {
let buffered_frames = self.buffer.len() / self.channels as usize;
let total_frames = self.frames_written + buffered_frames;
total_frames as f64 / self.sample_rate as f64
self.frames_written as f64 / self.sample_rate as f64
}
/// Finalize the recording and return the temp file path, waveform, and audio data
pub fn finalize(mut self) -> Result<(PathBuf, Vec<WaveformPeak>, Vec<f32>), std::io::Error> {
// Flush any remaining samples to disk
self.flush()?;
// Write all audio data to disk at once (outside audio callback - safe to do I/O)
if !self.audio_data.is_empty() {
self.writer.write_samples(&self.audio_data)?;
}
// Generate final waveform peak from any remaining samples
if !self.waveform_buffer.is_empty() {
@ -287,6 +253,23 @@ impl MidiRecordingState {
self.completed_notes.len()
}
/// Get all completed notes plus currently-held notes with a provisional duration.
/// Used for live preview during recording so held notes appear immediately.
pub fn get_notes_with_active(&self, current_time: f64) -> Vec<(f64, u8, u8, f64)> {
let mut notes = self.completed_notes.clone();
for active in self.active_notes.values() {
let time_offset = active.start_time - self.start_time;
let provisional_dur = (current_time - active.start_time).max(0.0);
notes.push((time_offset, active.note, active.velocity, provisional_dur));
}
notes
}
/// Get the note numbers of all currently held (active) notes
pub fn active_note_numbers(&self) -> Vec<u8> {
self.active_notes.keys().copied().collect()
}
/// Close out all active notes at the given time
/// This should be called when stopping recording to end any held notes
pub fn close_active_notes(&mut self, end_time: f64) {

View File

@ -1,15 +1,22 @@
use super::automation::{AutomationLane, AutomationLaneId, ParameterId};
use super::clip::AudioClipInstance;
use super::midi::{MidiClipInstance, MidiEvent};
use super::clip::{AudioClipInstance, AudioClipInstanceId};
use super::midi::{MidiClipInstance, MidiClipInstanceId, MidiEvent};
use super::midi_pool::MidiClipPool;
use super::node_graph::AudioGraph;
use super::node_graph::nodes::{AudioInputNode, AudioOutputNode};
use super::node_graph::preset::GraphPreset;
use super::pool::AudioClipPool;
use std::collections::HashMap;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
/// Track ID type
pub type TrackId = u32;
/// Default function for creating empty AudioGraph during deserialization
fn default_audio_graph() -> AudioGraph {
AudioGraph::new(48000, 8192)
}
/// Type alias for backwards compatibility
pub type Track = AudioTrack;
@ -28,6 +35,10 @@ pub struct RenderContext {
pub buffer_size: usize,
/// Accumulated time stretch factor (1.0 = normal, 0.5 = half speed, 2.0 = double speed)
pub time_stretch: f32,
/// When true: skip clip event collection; only render instrument state and live MIDI queue.
/// Used after pause/stop to route note-off tails through the normal group hierarchy
/// without re-triggering notes from clips at the paused position.
pub live_only: bool,
}
impl RenderContext {
@ -44,6 +55,7 @@ impl RenderContext {
channels,
buffer_size,
time_stretch: 1.0,
live_only: false,
}
}
@ -59,6 +71,7 @@ impl RenderContext {
}
/// Node in the track hierarchy - can be an audio track, MIDI track, or a metatrack
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum TrackNode {
Audio(AudioTrack),
Midi(MidiTrack),
@ -128,9 +141,24 @@ impl TrackNode {
TrackNode::Group(group) => group.set_solo(solo),
}
}
/// Remove a MIDI clip instance (only works on MIDI tracks)
pub fn remove_midi_clip_instance(&mut self, instance_id: MidiClipInstanceId) {
if let TrackNode::Midi(track) = self {
track.remove_midi_clip_instance(instance_id);
}
}
/// Remove an audio clip instance (only works on audio tracks)
pub fn remove_audio_clip_instance(&mut self, instance_id: AudioClipInstanceId) {
if let TrackNode::Audio(track) = self {
track.remove_audio_clip_instance(instance_id);
}
}
}
/// Metatrack that contains other tracks with time transformation capabilities
#[derive(Debug, Serialize, Deserialize)]
pub struct Metatrack {
pub id: TrackId,
pub name: String,
@ -144,14 +172,56 @@ pub struct Metatrack {
pub pitch_shift: f32,
/// Time offset in seconds (shift content forward/backward in time)
pub offset: f64,
/// Trim start: offset into the metatrack's internal content (seconds)
/// Children will see time starting from this point
pub trim_start: f64,
/// Trim end: offset into the metatrack's internal content (seconds)
/// None means no end trim (play until content ends)
pub trim_end: Option<f64>,
/// Automation lanes for this metatrack
pub automation_lanes: HashMap<AutomationLaneId, AutomationLane>,
next_automation_id: AutomationLaneId,
/// Audio node graph for effects processing (input → output)
#[serde(skip, default = "default_audio_graph")]
pub audio_graph: AudioGraph,
/// Saved graph preset for serialization
audio_graph_preset: Option<GraphPreset>,
/// True while the mixing graph is still the auto-generated default (no user edits).
/// Used to auto-connect new subtracks and to prompt before loading a preset.
#[serde(default)]
pub graph_is_default: bool,
}
impl Clone for Metatrack {
fn clone(&self) -> Self {
Self {
id: self.id,
name: self.name.clone(),
children: self.children.clone(),
volume: self.volume,
muted: self.muted,
solo: self.solo,
time_stretch: self.time_stretch,
pitch_shift: self.pitch_shift,
offset: self.offset,
trim_start: self.trim_start,
trim_end: self.trim_end,
automation_lanes: self.automation_lanes.clone(),
next_automation_id: self.next_automation_id,
audio_graph: default_audio_graph(), // Create fresh graph, not cloned
audio_graph_preset: self.audio_graph_preset.clone(),
graph_is_default: self.graph_is_default,
}
}
}
impl Metatrack {
/// Create a new metatrack
pub fn new(id: TrackId, name: String) -> Self {
/// Create a new metatrack. The mixing graph is set up later via `set_subtrack_graph`
/// once the child track list is known.
pub fn new(id: TrackId, name: String, sample_rate: u32) -> Self {
let default_buffer_size = 8192;
let audio_graph = Self::create_empty_graph(sample_rate, default_buffer_size);
Self {
id,
name,
@ -162,8 +232,238 @@ impl Metatrack {
time_stretch: 1.0,
pitch_shift: 0.0,
offset: 0.0,
trim_start: 0.0,
trim_end: None,
automation_lanes: HashMap::new(),
next_automation_id: 0,
audio_graph,
audio_graph_preset: None,
graph_is_default: true,
}
}
/// Minimal graph used before subtracks are known (just an AudioOutput node).
fn create_empty_graph(sample_rate: u32, buffer_size: usize) -> AudioGraph {
let mut graph = AudioGraph::new(sample_rate, buffer_size);
let output_node = Box::new(AudioOutputNode::new("Audio Output"));
let output_id = graph.add_node(output_node);
graph.set_node_position(output_id, 500.0, 150.0);
graph.set_output_node(Some(output_id));
graph
}
/// Build the explicit subtrack mixing graph: SubtrackInputs → Mixer → AudioOutput.
///
/// `subtracks` is an ordered list of (backend TrackId, display name) for each child.
/// Replaces the current graph and marks `graph_is_default = true`.
pub fn set_subtrack_graph(
&mut self,
subtracks: Vec<(TrackId, String)>,
sample_rate: u32,
buffer_size: usize,
) {
use super::node_graph::nodes::{SubtrackInputsNode, MixerNode};
let n = subtracks.len();
let mut graph = AudioGraph::new(sample_rate, buffer_size);
// SubtrackInputs node (N outputs, one per child)
// NOTE: `new()` initialises buffers as zero-length; call `update_subtracks` immediately
// to allocate stereo interleaved buffers (buffer_size * 2 samples each).
let mut inputs_node = SubtrackInputsNode::new("Subtrack Inputs", subtracks);
let subtracks_copy = inputs_node.subtracks().to_vec();
inputs_node.update_subtracks(subtracks_copy, buffer_size);
let inputs_id = graph.add_node(Box::new(inputs_node));
graph.set_node_position(inputs_id, 100.0, 150.0);
// Mixer node (starts with 1 spare; grows as connections are made)
let mixer_node = Box::new(MixerNode::new("Mixer"));
let mixer_id = graph.add_node(mixer_node);
graph.set_node_position(mixer_id, 350.0, 150.0);
// AudioOutput node
let output_node = Box::new(AudioOutputNode::new("Audio Output"));
let output_id = graph.add_node(output_node);
graph.set_node_position(output_id, 600.0, 150.0);
// Connect SubtrackInputs[i] → Mixer[i] for each subtrack
for i in 0..n {
let _ = graph.connect(inputs_id, i, mixer_id, i);
}
let _ = graph.connect(mixer_id, 0, output_id, 0);
graph.set_output_node(Some(output_id));
self.audio_graph = graph;
self.audio_graph_preset = None;
self.graph_is_default = true;
}
/// Add a new subtrack port to the existing graph.
///
/// If `graph_is_default`: also connects the new port to a new Mixer input.
/// If the user has modified the graph: just adds the port (unconnected).
pub fn add_subtrack_to_graph(&mut self, track_id: TrackId, name: String, buffer_size: usize) {
use super::node_graph::nodes::SubtrackInputsNode;
// Find SubtrackInputs node index
let si_idx = self.audio_graph.node_indices()
.find(|&idx| self.audio_graph.get_graph_node(idx)
.map(|n| n.node.node_type() == "SubtrackInputs")
.unwrap_or(false));
let si_idx = match si_idx {
Some(idx) => idx,
None => return, // No subtrack graph set up yet
};
// Get current subtrack count (= new port index after adding)
let new_slot = {
let gn = self.audio_graph.get_graph_node_mut(si_idx).unwrap();
let si = gn.node.as_any_mut().downcast_mut::<SubtrackInputsNode>().unwrap();
let mut subtracks = si.subtracks().to_vec();
subtracks.push((track_id, name));
let n = subtracks.len();
si.update_subtracks(subtracks, buffer_size);
// Rebuild output buffers for the new port count
n - 1 // index of the newly added slot
};
// Reallocate GraphNode output buffers to match new port count
self.audio_graph.reallocate_node_output_buffers(si_idx, buffer_size);
if self.graph_is_default {
// Find the Mixer node and connect the new subtrack port to a new Mixer input
let mixer_idx = self.audio_graph.node_indices()
.find(|&idx| self.audio_graph.get_graph_node(idx)
.map(|n| n.node.node_type() == "Mixer")
.unwrap_or(false));
if let Some(mixer_idx) = mixer_idx {
// n_incoming after connecting = new_slot + 1; auto-grow handled by connect()
let _ = self.audio_graph.connect(si_idx, new_slot, mixer_idx, new_slot);
}
}
}
/// Remove a subtrack from the graph (by TrackId).
///
/// Always disconnects any connections from the removed port and removes the port.
/// If `graph_is_default`: also reshuffles Mixer connections to stay compact.
pub fn remove_subtrack_from_graph(&mut self, track_id: TrackId, buffer_size: usize) {
use super::node_graph::nodes::SubtrackInputsNode;
let si_idx = self.audio_graph.node_indices()
.find(|&idx| self.audio_graph.get_graph_node(idx)
.map(|n| n.node.node_type() == "SubtrackInputs")
.unwrap_or(false));
let si_idx = match si_idx {
Some(idx) => idx,
None => return,
};
// Find the slot index for this track
let slot = {
let gn = self.audio_graph.get_graph_node(si_idx).unwrap();
let si = gn.node.as_any().downcast_ref::<SubtrackInputsNode>().unwrap();
si.subtrack_index_for(track_id)
};
let slot = match slot {
Some(s) => s,
None => return,
};
// Remove all connections from this output port
self.audio_graph.disconnect_output_port(si_idx, slot);
// Update the SubtrackInputsNode's subtrack list
{
let gn = self.audio_graph.get_graph_node_mut(si_idx).unwrap();
let si = gn.node.as_any_mut().downcast_mut::<SubtrackInputsNode>().unwrap();
let mut subtracks = si.subtracks().to_vec();
subtracks.remove(slot);
si.update_subtracks(subtracks, buffer_size);
}
self.audio_graph.reallocate_node_output_buffers(si_idx, buffer_size);
if self.graph_is_default {
// Rebuild default Mixer connections (they've shifted after removal)
let mixer_idx = self.audio_graph.node_indices()
.find(|&idx| self.audio_graph.get_graph_node(idx)
.map(|n| n.node.node_type() == "Mixer")
.unwrap_or(false));
if let Some(mixer_idx) = mixer_idx {
// Clear all connections TO mixer
self.audio_graph.disconnect_all_inputs(mixer_idx);
// Get new subtrack count
let n = {
let gn = self.audio_graph.get_graph_node(si_idx).unwrap();
gn.node.as_any().downcast_ref::<SubtrackInputsNode>().unwrap().num_subtracks()
};
// Resize mixer and reconnect
{
let gn = self.audio_graph.get_graph_node_mut(mixer_idx).unwrap();
let mixer = gn.node.as_any_mut().downcast_mut::<super::node_graph::nodes::MixerNode>().unwrap();
mixer.resize(n + 1);
}
for i in 0..n {
let _ = self.audio_graph.connect(si_idx, i, mixer_idx, i);
}
}
}
}
/// Return the current ordered subtrack list from SubtrackInputsNode, or empty vec if none.
pub fn current_subtracks(&self) -> Vec<(TrackId, String)> {
use super::node_graph::nodes::SubtrackInputsNode;
for idx in self.audio_graph.node_indices().collect::<Vec<_>>() {
if let Some(gn) = self.audio_graph.get_graph_node(idx) {
if let Some(si) = gn.node.as_any().downcast_ref::<SubtrackInputsNode>() {
return si.subtracks().to_vec();
}
}
}
Vec::new()
}
/// Prepare for serialization by saving the audio graph as a preset
pub fn prepare_for_save(&mut self) {
self.audio_graph_preset = Some(self.audio_graph.to_preset("Metatrack Graph"));
}
/// Rebuild the audio graph from preset after deserialization.
///
/// After loading, the caller must call `update_subtrack_ids` to re-associate
/// backend TrackIds with the SubtrackInputsNode's port slots.
pub fn rebuild_audio_graph(&mut self, sample_rate: u32, buffer_size: usize) -> Result<(), String> {
if let Some(preset) = &self.audio_graph_preset {
if !preset.nodes.is_empty() && preset.output_node.is_some() {
self.audio_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
// graph_is_default remains as serialized (false for user-modified graphs)
} else {
self.audio_graph = Self::create_empty_graph(sample_rate, buffer_size);
self.graph_is_default = true;
}
} else {
self.audio_graph = Self::create_empty_graph(sample_rate, buffer_size);
self.graph_is_default = true;
}
Ok(())
}
/// Re-associate backend TrackIds with the SubtrackInputsNode's port slots after reload.
///
/// The preset stores placeholder TrackId=0 entries; this call fills in the real IDs.
pub fn update_subtrack_ids(&mut self, subtracks: Vec<(TrackId, String)>, buffer_size: usize) {
use super::node_graph::nodes::SubtrackInputsNode;
for idx in self.audio_graph.node_indices().collect::<Vec<_>>() {
if let Some(gn) = self.audio_graph.get_graph_node_mut(idx) {
if let Some(si) = gn.node.as_any_mut().downcast_mut::<SubtrackInputsNode>() {
si.update_subtracks(subtracks, buffer_size);
return;
}
}
}
}
@ -259,11 +559,27 @@ impl Metatrack {
!self.muted && (!any_solo || self.solo)
}
/// Check whether this metatrack should produce audio at the given parent time.
/// Returns false if the playhead is outside the trim window.
pub fn is_active_at_time(&self, parent_playhead: f64) -> bool {
let local_time = (parent_playhead - self.offset) * self.time_stretch as f64;
if local_time < self.trim_start {
return false;
}
if let Some(end) = self.trim_end {
if local_time >= end {
return false;
}
}
true
}
/// Transform a render context for this metatrack's children
///
/// Applies time stretching and offset transformations.
/// Applies time stretching, offset, and trim transformations.
/// Time stretch affects how fast content plays: 0.5 = half speed, 2.0 = double speed
/// Offset shifts content forward/backward in time
/// Trim start offsets into the internal content
pub fn transform_context(&self, ctx: RenderContext) -> RenderContext {
let mut transformed = ctx;
@ -277,7 +593,11 @@ impl Metatrack {
// With stretch=0.5, when parent time is 2.0s, child reads from 1.0s (plays slower, pitches down)
// With stretch=2.0, when parent time is 2.0s, child reads from 4.0s (plays faster, pitches up)
// Note: This creates pitch shift as well - true time stretching would require resampling
transformed.playhead_seconds = adjusted_playhead * self.time_stretch as f64;
let stretched = adjusted_playhead * self.time_stretch as f64;
// 3. Add trim_start so children see time starting from the trim point
// If trim_start=2.0, children start seeing time 2.0 when parent reaches offset
transformed.playhead_seconds = stretched + self.trim_start;
// Accumulate time stretch for nested metatracks
transformed.time_stretch *= self.time_stretch;
@ -287,12 +607,21 @@ impl Metatrack {
}
/// MIDI track with MIDI clip instances and a node-based instrument
#[derive(Debug, Serialize, Deserialize)]
pub struct MidiTrack {
pub id: TrackId,
pub name: String,
/// Clip instances placed on this track (reference clips in the MidiClipPool)
pub clip_instances: Vec<MidiClipInstance>,
/// Serialized instrument graph (used for save/load)
#[serde(default, skip_serializing_if = "Option::is_none")]
instrument_graph_preset: Option<GraphPreset>,
/// Runtime instrument graph (rebuilt from preset on load)
#[serde(skip, default = "default_audio_graph")]
pub instrument_graph: AudioGraph,
pub volume: f32,
pub muted: bool,
pub solo: bool,
@ -300,7 +629,42 @@ pub struct MidiTrack {
pub automation_lanes: HashMap<AutomationLaneId, AutomationLane>,
next_automation_id: AutomationLaneId,
/// Queue for live MIDI input (virtual keyboard, MIDI controllers)
#[serde(skip)]
live_midi_queue: Vec<MidiEvent>,
/// Clip instances that were active (overlapping playhead) in the previous render buffer.
/// Used to detect when the playhead exits a clip, so we can send all-notes-off.
#[serde(skip)]
prev_active_instances: HashSet<MidiClipInstanceId>,
/// Peak level of last render() call (for VU metering)
#[serde(skip, default)]
pub peak_level: f32,
/// True while the instrument graph is still the auto-generated default (no user edits).
/// Used to prompt before loading a preset.
#[serde(default)]
pub graph_is_default: bool,
}
impl Clone for MidiTrack {
fn clone(&self) -> Self {
Self {
id: self.id,
name: self.name.clone(),
clip_instances: self.clip_instances.clone(),
instrument_graph_preset: self.instrument_graph_preset.clone(),
instrument_graph: default_audio_graph(), // Create fresh graph, not cloned
volume: self.volume,
muted: self.muted,
solo: self.solo,
automation_lanes: self.automation_lanes.clone(),
next_automation_id: self.next_automation_id,
live_midi_queue: Vec::new(), // Don't clone live MIDI queue
prev_active_instances: HashSet::new(),
peak_level: 0.0,
graph_is_default: self.graph_is_default,
}
}
}
impl MidiTrack {
@ -309,20 +673,44 @@ impl MidiTrack {
// Use a large buffer size that can accommodate any callback
let default_buffer_size = 8192;
// Start with empty graph — the frontend loads a default instrument preset
// (bass.json) via graph_load_preset which replaces the entire graph
let instrument_graph = AudioGraph::new(sample_rate, default_buffer_size);
Self {
id,
name,
clip_instances: Vec::new(),
instrument_graph: AudioGraph::new(sample_rate, default_buffer_size),
instrument_graph_preset: None,
instrument_graph,
volume: 1.0,
muted: false,
solo: false,
automation_lanes: HashMap::new(),
next_automation_id: 0,
live_midi_queue: Vec::new(),
prev_active_instances: HashSet::new(),
peak_level: 0.0,
graph_is_default: true,
}
}
/// Prepare for serialization by saving the instrument graph as a preset
pub fn prepare_for_save(&mut self) {
self.instrument_graph_preset = Some(self.instrument_graph.to_preset("Instrument Graph"));
}
/// Rebuild the instrument graph from preset after deserialization
pub fn rebuild_audio_graph(&mut self, sample_rate: u32, buffer_size: usize) -> Result<(), String> {
if let Some(preset) = &self.instrument_graph_preset {
self.instrument_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
} else {
// No preset - create default graph
self.instrument_graph = AudioGraph::new(sample_rate, buffer_size);
}
Ok(())
}
/// Add an automation lane to this track
pub fn add_automation_lane(&mut self, parameter_id: ParameterId) -> AutomationLaneId {
let lane_id = self.next_automation_id;
@ -353,6 +741,11 @@ impl MidiTrack {
self.clip_instances.push(instance);
}
/// Remove a MIDI clip instance from this track by instance ID (for undo/redo support)
pub fn remove_midi_clip_instance(&mut self, instance_id: MidiClipInstanceId) {
self.clip_instances.retain(|instance| instance.id != instance_id);
}
/// Set track volume
pub fn set_volume(&mut self, volume: f32) {
self.volume = volume.max(0.0);
@ -398,61 +791,55 @@ impl MidiTrack {
self.live_midi_queue.clear();
}
/// Process only live MIDI input (queued events) without rendering clips
/// This is used when playback is stopped but we want to hear live input
pub fn process_live_input(
&mut self,
output: &mut [f32],
_sample_rate: u32,
_channels: u32,
) {
// Generate audio using instrument graph with live MIDI events
self.instrument_graph.process(output, &self.live_midi_queue, 0.0);
// Clear the queue after processing
self.live_midi_queue.clear();
// Apply track volume (no automation during live input)
for sample in output.iter_mut() {
*sample *= self.volume;
}
}
/// Render this MIDI track into the output buffer
/// Render this MIDI track into the output buffer.
///
/// When `ctx.live_only` is true, clip event collection is skipped and only the live MIDI
/// queue is processed. This lets note-off tails (and live keyboard input) route through
/// the normal group hierarchy without re-triggering notes from clips at the paused position.
pub fn render(
&mut self,
output: &mut [f32],
midi_pool: &MidiClipPool,
playhead_seconds: f64,
sample_rate: u32,
channels: u32,
ctx: RenderContext,
) {
let buffer_duration_seconds = output.len() as f64 / (sample_rate as f64 * channels as f64);
let buffer_end_seconds = playhead_seconds + buffer_duration_seconds;
// Collect MIDI events from all clip instances that overlap with current time range
let mut midi_events = Vec::new();
for instance in &self.clip_instances {
// Get the clip content from the pool
if let Some(clip) = midi_pool.get_clip(instance.clip_id) {
let events = instance.get_events_in_range(
clip,
playhead_seconds,
buffer_end_seconds,
);
midi_events.extend(events);
if !ctx.live_only {
let buffer_duration_seconds = output.len() as f64 / (ctx.sample_rate as f64 * ctx.channels as f64);
let buffer_end_seconds = ctx.playhead_seconds + buffer_duration_seconds;
// Collect MIDI events from all clip instances that overlap with current time range
let mut currently_active = HashSet::new();
for instance in &self.clip_instances {
if instance.overlaps_range(ctx.playhead_seconds, buffer_end_seconds) {
currently_active.insert(instance.id);
}
if let Some(clip) = midi_pool.get_clip(instance.clip_id) {
let events = instance.get_events_in_range(clip, ctx.playhead_seconds, buffer_end_seconds);
midi_events.extend(events);
}
}
// Send all-notes-off for clip instances that just became inactive
for prev_id in &self.prev_active_instances {
if !currently_active.contains(prev_id) {
for note in 0..128u8 {
midi_events.push(MidiEvent::note_off(ctx.playhead_seconds, 0, note, 0));
}
break;
}
}
self.prev_active_instances = currently_active;
}
// Add live MIDI events (from virtual keyboard or MIDI controllers)
// This allows real-time input to be heard during playback/recording
midi_events.extend(self.live_midi_queue.drain(..));
// Generate audio using instrument graph
self.instrument_graph.process(output, &midi_events, playhead_seconds);
self.instrument_graph.process(output, &midi_events, ctx.playhead_seconds);
// Evaluate and apply automation
let effective_volume = self.evaluate_automation_at_time(playhead_seconds);
// Evaluate and apply automation (skip automation in live_only mode — no playhead to evaluate at)
let effective_volume = if ctx.live_only { self.volume } else { self.evaluate_automation_at_time(ctx.playhead_seconds) };
// Apply track volume
for sample in output.iter_mut() {
@ -485,6 +872,7 @@ impl MidiTrack {
}
/// Audio track with audio clip instances
#[derive(Debug, Serialize, Deserialize)]
pub struct AudioTrack {
pub id: TrackId,
pub name: String,
@ -496,8 +884,47 @@ pub struct AudioTrack {
/// Automation lanes for this track
pub automation_lanes: HashMap<AutomationLaneId, AutomationLane>,
next_automation_id: AutomationLaneId,
/// Effects processing graph for this audio track
/// Serialized effects graph (used for save/load)
#[serde(default, skip_serializing_if = "Option::is_none")]
effects_graph_preset: Option<GraphPreset>,
/// Runtime effects processing graph (rebuilt from preset on load)
#[serde(skip, default = "default_audio_graph")]
pub effects_graph: AudioGraph,
/// Pre-allocated buffer for clip rendering (avoids heap allocation per callback)
#[serde(skip, default)]
clip_render_buffer: Vec<f32>,
/// Peak level of last render() call (for VU metering)
#[serde(skip, default)]
pub peak_level: f32,
/// True while the effects graph is still the auto-generated default (no user edits).
/// Used to prompt before loading a preset.
#[serde(default)]
pub graph_is_default: bool,
}
impl Clone for AudioTrack {
fn clone(&self) -> Self {
Self {
id: self.id,
name: self.name.clone(),
clips: self.clips.clone(),
volume: self.volume,
muted: self.muted,
solo: self.solo,
automation_lanes: self.automation_lanes.clone(),
next_automation_id: self.next_automation_id,
effects_graph_preset: self.effects_graph_preset.clone(),
effects_graph: default_audio_graph(), // Create fresh graph, not cloned
clip_render_buffer: Vec::new(),
peak_level: 0.0,
graph_is_default: self.graph_is_default,
}
}
}
impl AudioTrack {
@ -536,10 +963,63 @@ impl AudioTrack {
solo: false,
automation_lanes: HashMap::new(),
next_automation_id: 0,
effects_graph_preset: None,
effects_graph,
clip_render_buffer: Vec::new(),
peak_level: 0.0,
graph_is_default: true,
}
}
/// Prepare for serialization by saving the effects graph as a preset
pub fn prepare_for_save(&mut self) {
self.effects_graph_preset = Some(self.effects_graph.to_preset("Effects Graph"));
}
/// Rebuild the effects graph from preset after deserialization
pub fn rebuild_audio_graph(&mut self, sample_rate: u32, buffer_size: usize) -> Result<(), String> {
if let Some(preset) = &self.effects_graph_preset {
// Check if preset is empty or missing required nodes
let has_nodes = !preset.nodes.is_empty();
let has_output = preset.output_node.is_some();
if has_nodes && has_output {
// Valid preset - rebuild from it
self.effects_graph = AudioGraph::from_preset(preset, sample_rate, buffer_size, None)?;
} else {
// Empty or invalid preset - create default graph
self.effects_graph = Self::create_default_graph(sample_rate, buffer_size);
}
} else {
// No preset - create default graph
self.effects_graph = Self::create_default_graph(sample_rate, buffer_size);
}
Ok(())
}
/// Create a default effects graph with AudioInput -> AudioOutput
fn create_default_graph(sample_rate: u32, buffer_size: usize) -> AudioGraph {
let mut effects_graph = AudioGraph::new(sample_rate, buffer_size);
// Add AudioInput node
let input_node = Box::new(AudioInputNode::new("Audio Input"));
let input_id = effects_graph.add_node(input_node);
effects_graph.set_node_position(input_id, 100.0, 150.0);
// Add AudioOutput node
let output_node = Box::new(AudioOutputNode::new("Audio Output"));
let output_id = effects_graph.add_node(output_node);
effects_graph.set_node_position(output_id, 500.0, 150.0);
// Connect AudioInput -> AudioOutput
let _ = effects_graph.connect(input_id, 0, output_id, 0);
// Set the AudioOutput node as the graph's output
effects_graph.set_output_node(Some(output_id));
effects_graph
}
/// Add an automation lane to this track
pub fn add_automation_lane(&mut self, parameter_id: ParameterId) -> AutomationLaneId {
let lane_id = self.next_automation_id;
@ -570,6 +1050,11 @@ impl AudioTrack {
self.clips.push(clip);
}
/// Remove an audio clip instance from this track by instance ID (for undo/redo support)
pub fn remove_audio_clip_instance(&mut self, instance_id: AudioClipInstanceId) {
self.clips.retain(|instance| instance.id != instance_id);
}
/// Set track volume (0.0 = silence, 1.0 = unity gain, >1.0 = amplification)
pub fn set_volume(&mut self, volume: f32) {
self.volume = volume.max(0.0);
@ -603,11 +1088,13 @@ impl AudioTrack {
let buffer_duration_seconds = output.len() as f64 / (sample_rate as f64 * channels as f64);
let buffer_end_seconds = playhead_seconds + buffer_duration_seconds;
// Create a temporary buffer for clip rendering
let mut clip_buffer = vec![0.0f32; output.len()];
// Split borrow: take clip_render_buffer out to avoid borrow conflict with &self methods
let mut clip_buffer = std::mem::take(&mut self.clip_render_buffer);
clip_buffer.resize(output.len(), 0.0);
clip_buffer.fill(0.0);
let mut rendered = 0;
// Render all active clip instances into the temporary buffer
// Render all active clip instances into the buffer
for clip in &self.clips {
// Check if clip overlaps with current buffer time range
if clip.external_start < buffer_end_seconds && clip.external_end() > playhead_seconds {
@ -638,6 +1125,9 @@ impl AudioTrack {
// Process through the effects graph (this will write to output buffer)
self.effects_graph.process(output, &[], playhead_seconds);
// Put the buffer back for reuse next callback
self.clip_render_buffer = clip_buffer;
// Evaluate and apply automation
let effective_volume = self.evaluate_automation_at_time(playhead_seconds);
@ -701,7 +1191,7 @@ impl AudioTrack {
}
// Calculate combined gain
let combined_gain = clip.gain * self.volume;
let combined_gain = clip.gain;
let mut total_rendered = 0;
@ -711,8 +1201,8 @@ impl AudioTrack {
// For now, render in a simpler way - iterate through the timeline range
// and use get_content_position for each sample position
let output_start_offset = ((render_start_seconds - playhead_seconds) * samples_per_second) as usize;
let output_end_offset = ((render_end_seconds - playhead_seconds) * samples_per_second) as usize;
let output_start_offset = ((render_start_seconds - playhead_seconds) * samples_per_second + 0.5) as usize;
let output_end_offset = ((render_end_seconds - playhead_seconds) * samples_per_second + 0.5) as usize;
if output_end_offset > output.len() || output_start_offset > output.len() {
return 0;
@ -732,6 +1222,7 @@ impl AudioTrack {
combined_gain,
sample_rate,
channels,
clip.read_ahead.as_deref(),
);
} else {
// Looping case: need to handle wrap-around at loop boundaries
@ -766,6 +1257,7 @@ impl AudioTrack {
combined_gain,
sample_rate,
channels,
clip.read_ahead.as_deref(),
);
total_rendered += rendered;

View File

@ -0,0 +1,290 @@
//! Waveform chunk cache for scalable multi-resolution waveform generation
//!
//! This module provides a chunk-based waveform caching system that generates
//! waveform data progressively at multiple detail levels, avoiding the limitations
//! of the old fixed 20,000-peak approach.
use crate::io::{WaveformChunk, WaveformChunkKey, WaveformPeak};
use crate::audio::pool::AudioFile;
use std::collections::HashMap;
/// Detail levels for multi-resolution waveform storage
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DetailLevel {
Overview = 0, // 1 peak per second
Low = 1, // 10 peaks per second
Medium = 2, // 100 peaks per second
High = 3, // 1000 peaks per second
Max = 4, // Full resolution (sample-accurate)
}
impl DetailLevel {
/// Get peaks per second for this detail level
pub fn peaks_per_second(self) -> usize {
match self {
DetailLevel::Overview => 1,
DetailLevel::Low => 10,
DetailLevel::Medium => 100,
DetailLevel::High => 1000,
DetailLevel::Max => 48000, // Approximate max for sample-accurate
}
}
/// Create from u8 value
pub fn from_u8(value: u8) -> Option<Self> {
match value {
0 => Some(DetailLevel::Overview),
1 => Some(DetailLevel::Low),
2 => Some(DetailLevel::Medium),
3 => Some(DetailLevel::High),
4 => Some(DetailLevel::Max),
_ => None,
}
}
}
/// Priority for chunk generation
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum ChunkPriority {
Low = 0, // Background generation
Medium = 1, // Precache adjacent to viewport
High = 2, // Visible in current viewport
}
/// Chunk generation request
#[derive(Debug, Clone)]
pub struct ChunkGenerationRequest {
pub key: WaveformChunkKey,
pub priority: ChunkPriority,
}
/// Waveform chunk cache with multi-resolution support
pub struct WaveformCache {
/// Cached chunks indexed by key
chunks: HashMap<WaveformChunkKey, Vec<WaveformPeak>>,
/// Maximum memory usage in MB (for future LRU eviction)
_max_memory_mb: usize,
/// Current memory usage estimate in bytes
current_memory_bytes: usize,
}
impl WaveformCache {
/// Create a new waveform cache with the specified memory limit
pub fn new(max_memory_mb: usize) -> Self {
Self {
chunks: HashMap::new(),
_max_memory_mb: max_memory_mb,
current_memory_bytes: 0,
}
}
/// Get a chunk from the cache
pub fn get_chunk(&self, key: &WaveformChunkKey) -> Option<&Vec<WaveformPeak>> {
self.chunks.get(key)
}
/// Store a chunk in the cache
pub fn store_chunk(&mut self, key: WaveformChunkKey, peaks: Vec<WaveformPeak>) {
let chunk_size = peaks.len() * std::mem::size_of::<WaveformPeak>();
self.current_memory_bytes += chunk_size;
self.chunks.insert(key, peaks);
// TODO: Implement LRU eviction if memory exceeds limit
}
/// Check if a chunk exists in the cache
pub fn has_chunk(&self, key: &WaveformChunkKey) -> bool {
self.chunks.contains_key(key)
}
/// Clear all chunks for a specific pool index (when file is unloaded)
pub fn clear_pool(&mut self, pool_index: usize) {
self.chunks.retain(|key, peaks| {
if key.pool_index == pool_index {
let chunk_size = peaks.len() * std::mem::size_of::<WaveformPeak>();
self.current_memory_bytes = self.current_memory_bytes.saturating_sub(chunk_size);
false
} else {
true
}
});
}
/// Generate a single waveform chunk for an audio file
///
/// This generates peaks for a specific time range at a specific detail level.
/// The chunk covers a time range based on the detail level and chunk index.
pub fn generate_chunk(
audio_file: &AudioFile,
detail_level: u8,
chunk_index: u32,
) -> Option<WaveformChunk> {
let level = DetailLevel::from_u8(detail_level)?;
let peaks_per_second = level.peaks_per_second();
// Calculate time range for this chunk based on detail level
// Each chunk covers a varying amount of time depending on detail level
let chunk_duration_seconds = match level {
DetailLevel::Overview => 60.0, // 60 seconds per chunk (60 peaks)
DetailLevel::Low => 30.0, // 30 seconds per chunk (300 peaks)
DetailLevel::Medium => 10.0, // 10 seconds per chunk (1000 peaks)
DetailLevel::High => 5.0, // 5 seconds per chunk (5000 peaks)
DetailLevel::Max => 1.0, // 1 second per chunk (48000 peaks)
};
let start_time = chunk_index as f64 * chunk_duration_seconds;
let end_time = start_time + chunk_duration_seconds;
// Check if this chunk is within the audio file duration
let audio_duration = audio_file.duration_seconds();
if start_time >= audio_duration {
return None; // Chunk is completely beyond file end
}
// Clamp end_time to file duration
let end_time = end_time.min(audio_duration);
// Calculate frame range
let start_frame = (start_time * audio_file.sample_rate as f64) as usize;
let end_frame = (end_time * audio_file.sample_rate as f64) as usize;
// Calculate number of peaks for this time range
let duration = end_time - start_time;
let target_peaks = (duration * peaks_per_second as f64).ceil() as usize;
if target_peaks == 0 {
return None;
}
// Generate peaks using the existing method
let peaks = audio_file.generate_waveform_overview_range(
start_frame,
end_frame,
target_peaks,
);
Some(WaveformChunk {
audio_pool_index: 0, // Will be set by caller
detail_level,
chunk_index,
time_range: (start_time, end_time),
peaks,
})
}
/// Generate multiple chunks for an audio file
///
/// This is a convenience method for generating several chunks at once.
pub fn generate_chunks(
audio_file: &AudioFile,
pool_index: usize,
detail_level: u8,
chunk_indices: &[u32],
) -> Vec<WaveformChunk> {
chunk_indices
.iter()
.filter_map(|&chunk_index| {
let mut chunk = Self::generate_chunk(audio_file, detail_level, chunk_index)?;
chunk.audio_pool_index = pool_index;
Some(chunk)
})
.collect()
}
/// Calculate how many chunks are needed for a file at a given detail level
pub fn calculate_chunk_count(duration_seconds: f64, detail_level: u8) -> u32 {
let level = match DetailLevel::from_u8(detail_level) {
Some(l) => l,
None => return 0,
};
let chunk_duration_seconds = match level {
DetailLevel::Overview => 60.0,
DetailLevel::Low => 30.0,
DetailLevel::Medium => 10.0,
DetailLevel::High => 5.0,
DetailLevel::Max => 1.0,
};
((duration_seconds / chunk_duration_seconds).ceil() as u32).max(1)
}
/// Generate all Level 0 (overview) chunks for a file
///
/// This should be called immediately when a file is imported to provide
/// instant thumbnail display.
pub fn generate_overview_chunks(
&mut self,
audio_file: &AudioFile,
pool_index: usize,
) -> Vec<WaveformChunk> {
let duration = audio_file.duration_seconds();
let chunk_count = Self::calculate_chunk_count(duration, 0);
let chunk_indices: Vec<u32> = (0..chunk_count).collect();
let chunks = Self::generate_chunks(audio_file, pool_index, 0, &chunk_indices);
// Store chunks in cache
for chunk in &chunks {
let key = WaveformChunkKey {
pool_index,
detail_level: chunk.detail_level,
chunk_index: chunk.chunk_index,
};
self.store_chunk(key, chunk.peaks.clone());
}
chunks
}
/// Get current memory usage in bytes
pub fn memory_usage_bytes(&self) -> usize {
self.current_memory_bytes
}
/// Get current memory usage in megabytes
pub fn memory_usage_mb(&self) -> f64 {
self.current_memory_bytes as f64 / 1_000_000.0
}
/// Get number of cached chunks
pub fn chunk_count(&self) -> usize {
self.chunks.len()
}
}
impl Default for WaveformCache {
fn default() -> Self {
Self::new(100) // Default 100MB cache
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_detail_level_peaks_per_second() {
assert_eq!(DetailLevel::Overview.peaks_per_second(), 1);
assert_eq!(DetailLevel::Low.peaks_per_second(), 10);
assert_eq!(DetailLevel::Medium.peaks_per_second(), 100);
assert_eq!(DetailLevel::High.peaks_per_second(), 1000);
}
#[test]
fn test_chunk_count_calculation() {
// 60 second file, Overview level (60s chunks) = 1 chunk
assert_eq!(WaveformCache::calculate_chunk_count(60.0, 0), 1);
// 120 second file, Overview level (60s chunks) = 2 chunks
assert_eq!(WaveformCache::calculate_chunk_count(120.0, 0), 2);
// 10 second file, Medium level (10s chunks) = 1 chunk
assert_eq!(WaveformCache::calculate_chunk_count(10.0, 2), 1);
// 25 second file, Medium level (10s chunks) = 3 chunks
assert_eq!(WaveformCache::calculate_chunk_count(25.0, 2), 3);
}
}

View File

@ -1,6 +1,6 @@
use crate::audio::{
AutomationLaneId, ClipId, CurveType, MidiClip, MidiClipId, ParameterId,
TrackId,
AudioClipInstanceId, AutomationLaneId, ClipId, CurveType, MidiClip, MidiClipId,
MidiClipInstanceId, ParameterId, TrackId,
};
use crate::audio::buffer_pool::BufferPoolStats;
use crate::audio::node_graph::nodes::LoopMode;
@ -38,8 +38,8 @@ pub enum Command {
ExtendClip(TrackId, ClipId, f64),
// Metatrack management commands
/// Create a new metatrack with a name
CreateMetatrack(String),
/// Create a new metatrack with a name and optional parent group
CreateMetatrack(String, Option<TrackId>),
/// Add a track to a metatrack (track_id, metatrack_id)
AddToMetatrack(TrackId, TrackId),
/// Remove a track from its parent metatrack
@ -54,19 +54,28 @@ pub enum Command {
SetOffset(TrackId, f64),
/// Set metatrack pitch shift in semitones (track_id, semitones) - for future use
SetPitchShift(TrackId, f32),
/// Set metatrack trim start in seconds (track_id, trim_start)
/// Children won't hear content before this point
SetTrimStart(TrackId, f64),
/// Set metatrack trim end in seconds (track_id, trim_end)
/// None means no end trim
SetTrimEnd(TrackId, Option<f64>),
// Audio track commands
/// Create a new audio track with a name
CreateAudioTrack(String),
/// Create a new audio track with a name and optional parent group
CreateAudioTrack(String, Option<TrackId>),
/// Add an audio file to the pool (path, data, channels, sample_rate)
/// Returns the pool index via an AudioEvent
AddAudioFile(String, Vec<f32>, u32, u32),
/// Add a clip to an audio track (track_id, pool_index, start_time, duration, offset)
AddAudioClip(TrackId, usize, f64, f64, f64),
/// Add a clip to an audio track (track_id, clip_id, pool_index, start_time, duration, offset)
/// The clip_id is pre-assigned by the caller (via EngineController::next_audio_clip_id())
AddAudioClip(TrackId, AudioClipInstanceId, usize, f64, f64, f64),
// MIDI commands
/// Create a new MIDI track with a name
CreateMidiTrack(String),
/// Create a new MIDI track with a name and optional parent group
CreateMidiTrack(String, Option<TrackId>),
/// Add a MIDI clip to the pool without placing it on a track
AddMidiClipToPool(MidiClip),
/// Create a new MIDI clip on a track (track_id, start_time, duration)
CreateMidiClip(TrackId, f64, f64),
/// Add a MIDI note to a clip (track_id, clip_id, time_offset, note, velocity, duration)
@ -76,6 +85,10 @@ pub enum Command {
/// Update MIDI clip notes (track_id, clip_id, notes: Vec<(start_time, note, velocity, duration)>)
/// NOTE: May need to switch to individual note operations if this becomes slow on clips with many notes
UpdateMidiClipNotes(TrackId, MidiClipId, Vec<(f64, u8, u8, f64)>),
/// Remove a MIDI clip instance from a track (track_id, instance_id) - for undo/redo support
RemoveMidiClip(TrackId, MidiClipInstanceId),
/// Remove an audio clip instance from a track (track_id, instance_id) - for undo/redo support
RemoveAudioClip(TrackId, AudioClipInstanceId),
// Diagnostics commands
/// Request buffer pool statistics
@ -126,6 +139,8 @@ pub enum Command {
// Metronome command
/// Enable or disable the metronome click track
SetMetronomeEnabled(bool),
/// Set project tempo and time signature (bpm, (numerator, denominator))
SetTempo(f32, (u32, u32)),
// Node graph commands
/// Add a node to a track's instrument graph (track_id, node_type, position_x, position_y)
@ -140,28 +155,75 @@ pub enum Command {
GraphConnectInTemplate(TrackId, u32, u32, usize, u32, usize),
/// Disconnect two nodes in a track's graph (track_id, from_node, from_port, to_node, to_port)
GraphDisconnect(TrackId, u32, usize, u32, usize),
/// Disconnect nodes in a VoiceAllocator template (track_id, voice_allocator_node_id, from_node, from_port, to_node, to_port)
GraphDisconnectInTemplate(TrackId, u32, u32, usize, u32, usize),
/// Remove a node from a VoiceAllocator's template graph (track_id, voice_allocator_node_id, node_index)
GraphRemoveNodeFromTemplate(TrackId, u32, u32),
/// Set a parameter on a node (track_id, node_index, param_id, value)
GraphSetParameter(TrackId, u32, u32, f32),
/// Set a parameter on a node in a VoiceAllocator's template graph (track_id, voice_allocator_node_id, node_index, param_id, value)
GraphSetParameterInTemplate(TrackId, u32, u32, u32, f32),
/// Set the UI position of a node (track_id, node_index, x, y)
GraphSetNodePosition(TrackId, u32, f32, f32),
/// Set the UI position of a node in a VoiceAllocator's template (track_id, voice_allocator_id, node_index, x, y)
GraphSetNodePositionInTemplate(TrackId, u32, u32, f32, f32),
/// Set which node receives MIDI events (track_id, node_index, enabled)
GraphSetMidiTarget(TrackId, u32, bool),
/// Set which node is the audio output (track_id, node_index)
GraphSetOutputNode(TrackId, u32),
/// Set frontend-only group definitions on a track's graph (track_id, serialized groups)
GraphSetGroups(TrackId, Vec<crate::audio::node_graph::preset::SerializedGroup>),
/// Set frontend-only group definitions on a VA template graph (track_id, voice_allocator_id, serialized groups)
GraphSetGroupsInTemplate(TrackId, u32, Vec<crate::audio::node_graph::preset::SerializedGroup>),
/// Save current graph as a preset (track_id, preset_path, preset_name, description, tags)
GraphSavePreset(TrackId, String, String, String, Vec<String>),
/// Load a preset into a track's graph (track_id, preset_path)
GraphLoadPreset(TrackId, String),
// Metatrack subtrack graph commands
/// Replace a metatrack's mixing graph with the default SubtrackInputs→Mixer→Output layout.
/// (metatrack_id, ordered list of (child_track_id, display_name))
SetMetatrackSubtrackGraph(TrackId, Vec<(TrackId, String)>),
/// Add a new subtrack port to a metatrack's SubtrackInputsNode.
/// (metatrack_id, child_track_id, display_name)
AddMetatrackSubtrack(TrackId, TrackId, String),
/// Remove a subtrack port from a metatrack's SubtrackInputsNode.
/// (metatrack_id, child_track_id)
RemoveMetatrackSubtrack(TrackId, TrackId),
/// Re-associate backend TrackIds with SubtrackInputsNode slots after project reload.
/// (metatrack_id, ordered list of (child_track_id, display_name))
UpdateMetatrackSubtrackIds(TrackId, Vec<(TrackId, String)>),
/// Set or clear the graph_is_default flag on any track (track_id, value)
SetGraphIsDefault(TrackId, bool),
/// Save a VoiceAllocator's template graph as a preset (track_id, voice_allocator_id, preset_path, preset_name)
GraphSaveTemplatePreset(TrackId, u32, String, String),
/// Compile and set a BeamDSP script on a Script node (track_id, node_id, source_code)
GraphSetScript(TrackId, u32, String),
/// Load audio sample data into a Script node's sample slot (track_id, node_id, slot_index, audio_data, sample_rate, name)
GraphSetScriptSample(TrackId, u32, usize, Vec<f32>, u32, String),
/// Load a NAM model into an AmpSim node (track_id, node_id, model_path)
AmpSimLoadModel(TrackId, u32, String),
/// Load a sample into a SimpleSampler node (track_id, node_id, file_path)
SamplerLoadSample(TrackId, u32, String),
/// Load a sample from the audio pool into a SimpleSampler node (track_id, node_id, pool_index)
SamplerLoadFromPool(TrackId, u32, usize),
/// Set the root note (original pitch) for a SimpleSampler node (track_id, node_id, midi_note)
SamplerSetRootNote(TrackId, u32, u8),
/// Add a sample layer to a MultiSampler node (track_id, node_id, file_path, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode)
MultiSamplerAddLayer(TrackId, u32, String, u8, u8, u8, u8, u8, Option<usize>, Option<usize>, LoopMode),
/// Add a sample layer from the audio pool to a MultiSampler node (track_id, node_id, pool_index, key_min, key_max, root_key)
MultiSamplerAddLayerFromPool(TrackId, u32, usize, u8, u8, u8),
/// Update a MultiSampler layer's configuration (track_id, node_id, layer_index, key_min, key_max, root_key, velocity_min, velocity_max, loop_start, loop_end, loop_mode)
MultiSamplerUpdateLayer(TrackId, u32, usize, u8, u8, u8, u8, u8, Option<usize>, Option<usize>, LoopMode),
/// Remove a layer from a MultiSampler node (track_id, node_id, layer_index)
MultiSamplerRemoveLayer(TrackId, u32, usize),
/// Clear all layers from a MultiSampler node (track_id, node_id)
MultiSamplerClearLayers(TrackId, u32),
// Automation Input Node commands
/// Add or update a keyframe on an AutomationInput node (track_id, node_id, time, value, interpolation, ease_out, ease_in)
@ -170,6 +232,29 @@ pub enum Command {
AutomationRemoveKeyframe(TrackId, u32, f64),
/// Set the display name of an AutomationInput node (track_id, node_id, name)
AutomationSetName(TrackId, u32, String),
// Waveform chunk generation commands
/// Generate waveform chunks for an audio file
/// (pool_index, detail_level, chunk_indices, priority)
GenerateWaveformChunks {
pool_index: usize,
detail_level: u8,
chunk_indices: Vec<u32>,
priority: u8, // 0=Low, 1=Medium, 2=High
},
// Input monitoring/gain commands
/// Enable or disable input monitoring (mic level metering)
SetInputMonitoring(bool),
/// Set the input gain multiplier (applied before recording)
SetInputGain(f32),
// Async audio import
/// Import an audio file asynchronously. The engine probes the file format
/// and either memory-maps it (WAV/AIFF) or sets up stream decode
/// (compressed). Emits `AudioFileReady` when playback-ready and
/// `AudioDecodeProgress` for compressed files as waveform data is decoded.
ImportAudio(std::path::PathBuf),
}
/// Events sent from audio thread back to UI/control thread
@ -191,8 +276,8 @@ pub enum AudioEvent {
BufferPoolStats(BufferPoolStats),
/// Automation lane created (track_id, lane_id, parameter_id)
AutomationLaneCreated(TrackId, AutomationLaneId, ParameterId),
/// Recording started (track_id, clip_id)
RecordingStarted(TrackId, ClipId),
/// Recording started (track_id, clip_id, sample_rate, channels)
RecordingStarted(TrackId, ClipId, u32, u32),
/// Recording progress update (clip_id, current_duration)
RecordingProgress(ClipId, f64),
/// Recording stopped (clip_id, pool_index, waveform)
@ -222,6 +307,71 @@ pub enum AudioEvent {
GraphPresetLoaded(TrackId),
/// Preset has been saved to file (track_id, preset_path)
GraphPresetSaved(TrackId, String),
/// Script compilation result (track_id, node_id, success, error, ui_declaration, source)
ScriptCompiled {
track_id: TrackId,
node_id: u32,
success: bool,
error: Option<String>,
ui_declaration: Option<beamdsp::UiDeclaration>,
source: String,
},
/// Export progress (frames_rendered, total_frames)
ExportProgress {
frames_rendered: usize,
total_frames: usize,
},
/// Export rendering complete, now writing/encoding the output file
ExportFinalizing,
/// Waveform generated for audio pool file (pool_index, waveform)
WaveformGenerated(usize, Vec<WaveformPeak>),
/// Waveform chunks ready for retrieval
/// (pool_index, detail_level, chunks: Vec<(chunk_index, time_range, peaks)>)
WaveformChunksReady {
pool_index: usize,
detail_level: u8,
chunks: Vec<(u32, (f64, f64), Vec<WaveformPeak>)>,
},
/// An audio file has been imported and is ready for playback.
/// For WAV/AIFF: the file is memory-mapped. For compressed: the disk
/// reader is stream-decoding ahead of the playhead.
AudioFileReady {
pool_index: usize,
path: String,
channels: u32,
sample_rate: u32,
duration: f64,
format: crate::io::audio_file::AudioFormat,
},
/// Progressive decode progress for a compressed audio file's waveform data.
/// Carries the samples inline so the UI doesn't need to query back.
AudioDecodeProgress {
pool_index: usize,
samples: Vec<f32>,
sample_rate: u32,
channels: u32,
},
/// Peak amplitude of mic input (for input monitoring meter)
InputLevel(f32),
/// Peak amplitude of mix output (for master meter), stereo (left, right)
OutputLevel(f32, f32),
/// Per-track playback peak levels
TrackLevels(Vec<(TrackId, f32)>),
/// Background waveform decode progress/completion for a compressed audio file.
/// Internal event — consumed by the engine to update the pool, not forwarded to UI.
/// `decoded_frames` < `total_frames` means partial; equal means complete.
WaveformDecodeComplete {
pool_index: usize,
samples: Vec<f32>,
decoded_frames: u64,
total_frames: u64,
},
}
/// Synchronous queries sent from UI thread to audio thread
@ -233,6 +383,9 @@ pub enum Query {
GetTemplateState(TrackId, u32),
/// Get oscilloscope data from a node (track_id, node_id, sample_count)
GetOscilloscopeData(TrackId, u32, usize),
/// Get oscilloscope data from a node inside a VoiceAllocator's best voice
/// (track_id, va_node_id, inner_node_id, sample_count)
GetVoiceOscilloscopeData(TrackId, u32, u32, usize),
/// Get MIDI clip data (track_id, clip_id)
GetMidiClip(TrackId, MidiClipId),
/// Get keyframes from an AutomationInput node (track_id, node_id)
@ -249,16 +402,43 @@ pub enum Query {
SerializeTrackGraph(TrackId, std::path::PathBuf),
/// Load a track's effects/instrument graph (track_id, preset_json, project_path)
LoadTrackGraph(TrackId, String, std::path::PathBuf),
/// Create a new audio track (name) - returns track ID synchronously
CreateAudioTrackSync(String),
/// Create a new MIDI track (name) - returns track ID synchronously
CreateMidiTrackSync(String),
/// Create a new audio track (name, parent) - returns track ID synchronously
CreateAudioTrackSync(String, Option<TrackId>),
/// Create a new MIDI track (name, parent) - returns track ID synchronously
CreateMidiTrackSync(String, Option<TrackId>),
/// Create a new metatrack/group (name, parent) - returns track ID synchronously
CreateMetatrackSync(String, Option<TrackId>),
/// Get waveform data from audio pool (pool_index, target_peaks)
GetPoolWaveform(usize, usize),
/// Get file info from audio pool (pool_index) - returns (duration, sample_rate, channels)
GetPoolFileInfo(usize),
/// Export audio to file (settings, output_path)
ExportAudio(crate::audio::ExportSettings, std::path::PathBuf),
/// Add a MIDI clip to a track synchronously (track_id, clip, start_time) - returns instance ID
AddMidiClipSync(TrackId, crate::audio::midi::MidiClip, f64),
/// Add a MIDI clip instance to a track synchronously (track_id, instance) - returns instance ID
/// The clip must already exist in the MidiClipPool
AddMidiClipInstanceSync(TrackId, crate::audio::midi::MidiClipInstance),
/// Add an audio file to the pool synchronously (path, data, channels, sample_rate) - returns pool index
AddAudioFileSync(String, Vec<f32>, u32, u32),
/// Import an audio file synchronously (path) - returns pool index.
/// Does the same work as Command::ImportAudio (mmap for PCM, streaming
/// setup for compressed) but returns the real pool index in the response.
/// NOTE: briefly blocks the UI thread during file setup (sub-ms for PCM
/// mmap; a few ms for compressed streaming init). If this becomes a
/// problem for very large files, switch to async import with event-based
/// pool index reconciliation.
ImportAudioSync(std::path::PathBuf),
/// Get raw audio samples from pool (pool_index) - returns (samples, sample_rate, channels)
GetPoolAudioSamples(usize),
/// Get a clone of the current project for serialization
GetProject,
/// Set the project (replaces current project state)
SetProject(Box<crate::audio::project::Project>),
/// Duplicate a MIDI clip in the pool, returning the new clip's ID
DuplicateMidiClipSync(MidiClipId),
/// Get whether a track's graph is still the auto-generated default
GetGraphIsDefault(TrackId),
}
/// Oscilloscope data from a node
@ -318,4 +498,20 @@ pub enum QueryResponse {
PoolFileInfo(Result<(f64, u32, u32), String>),
/// Audio exported
AudioExported(Result<(), String>),
/// MIDI clip instance added (returns instance ID)
MidiClipInstanceAdded(Result<MidiClipInstanceId, String>),
/// Audio file added to pool (returns pool index)
AudioFileAddedSync(Result<usize, String>),
/// Audio file imported to pool (returns pool index)
AudioImportedSync(Result<usize, String>),
/// Raw audio samples from pool (samples, sample_rate, channels)
PoolAudioSamples(Result<(Vec<f32>, u32, u32), String>),
/// Project retrieved
ProjectRetrieved(Result<Box<crate::audio::project::Project>, String>),
/// Project set
ProjectSet(Result<(), String>),
/// MIDI clip duplicated (returns new clip ID)
MidiClipDuplicated(Result<MidiClipId, String>),
/// Whether a track's graph is the auto-generated default
GraphIsDefault(bool),
}

View File

@ -1,3 +1,5 @@
pub mod biquad;
pub mod svf;
pub use biquad::BiquadFilter;
pub use svf::SvfFilter;

135
daw-backend/src/dsp/svf.rs Normal file
View File

@ -0,0 +1,135 @@
use std::f32::consts::PI;
/// State Variable Filter mode
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum SvfMode {
Lowpass = 0,
Highpass = 1,
Bandpass = 2,
Notch = 3,
}
impl SvfMode {
pub fn from_f32(value: f32) -> Self {
match value.round() as i32 {
1 => SvfMode::Highpass,
2 => SvfMode::Bandpass,
3 => SvfMode::Notch,
_ => SvfMode::Lowpass,
}
}
}
/// Linear trapezoidal integrated State Variable Filter (Simper/Cytomic)
///
/// Zero-delay feedback topology. Per-sample cutoff modulation is cheap —
/// just update `g` and `k` coefficients (no per-sample trig needed if
/// cutoff hasn't changed).
#[derive(Clone)]
pub struct SvfFilter {
// Coefficients
g: f32, // frequency warping: tan(π * cutoff / sample_rate)
k: f32, // damping: 2 - 2*resonance
a1: f32, // 1 / (1 + g*(g+k))
a2: f32, // g * a1
// State per channel (up to 2 for stereo)
ic1eq: [f32; 2],
ic2eq: [f32; 2],
mode: SvfMode,
}
impl SvfFilter {
/// Create a new SVF with default parameters (1kHz lowpass, no resonance)
pub fn new() -> Self {
let mut filter = Self {
g: 0.0,
k: 2.0,
a1: 0.0,
a2: 0.0,
ic1eq: [0.0; 2],
ic2eq: [0.0; 2],
mode: SvfMode::Lowpass,
};
filter.set_params(1000.0, 0.0, 44100.0);
filter
}
/// Set filter parameters
///
/// # Arguments
/// * `cutoff_hz` - Cutoff frequency in Hz (clamped to valid range)
/// * `resonance` - Resonance 0.0 (none) to 1.0 (self-oscillation)
/// * `sample_rate` - Sample rate in Hz
#[inline]
pub fn set_params(&mut self, cutoff_hz: f32, resonance: f32, sample_rate: f32) {
// Clamp cutoff to avoid instability near Nyquist
let cutoff = cutoff_hz.clamp(5.0, sample_rate * 0.49);
let resonance = resonance.clamp(0.0, 1.0);
self.g = (PI * cutoff / sample_rate).tan();
self.k = 2.0 - 2.0 * resonance;
self.a1 = 1.0 / (1.0 + self.g * (self.g + self.k));
self.a2 = self.g * self.a1;
}
/// Set filter mode
pub fn set_mode(&mut self, mode: SvfMode) {
self.mode = mode;
}
/// Process a single sample, returning all four outputs: (lowpass, highpass, bandpass, notch)
#[inline]
pub fn process_sample_quad(&mut self, input: f32, channel: usize) -> (f32, f32, f32, f32) {
let ch = channel.min(1);
let v3 = input - self.ic2eq[ch];
let v1 = self.a1 * self.ic1eq[ch] + self.a2 * v3;
let v2 = self.ic2eq[ch] + self.g * v1;
self.ic1eq[ch] = 2.0 * v1 - self.ic1eq[ch];
self.ic2eq[ch] = 2.0 * v2 - self.ic2eq[ch];
let hp = input - self.k * v1 - v2;
(v2, hp, v1, hp + v2)
}
/// Process a single sample with a selected mode
#[inline]
pub fn process_sample(&mut self, input: f32, channel: usize) -> f32 {
let (lp, hp, bp, notch) = self.process_sample_quad(input, channel);
match self.mode {
SvfMode::Lowpass => lp,
SvfMode::Highpass => hp,
SvfMode::Bandpass => bp,
SvfMode::Notch => notch,
}
}
/// Process a buffer of interleaved samples
pub fn process_buffer(&mut self, buffer: &mut [f32], channels: usize) {
if channels == 1 {
for sample in buffer.iter_mut() {
*sample = self.process_sample(*sample, 0);
}
} else if channels == 2 {
for frame in buffer.chunks_exact_mut(2) {
frame[0] = self.process_sample(frame[0], 0);
frame[1] = self.process_sample(frame[1], 1);
}
}
}
/// Reset filter state (clear delay lines)
pub fn reset(&mut self) {
self.ic1eq = [0.0; 2];
self.ic2eq = [0.0; 2];
}
}
impl Default for SvfFilter {
fn default() -> Self {
Self::new()
}
}

View File

@ -13,6 +13,43 @@ pub struct WaveformPeak {
pub max: f32,
}
/// Uniquely identifies a waveform chunk
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WaveformChunkKey {
pub pool_index: usize,
pub detail_level: u8, // 0-4
pub chunk_index: u32, // Sequential chunk number
}
/// A chunk of waveform data at a specific detail level
#[derive(Debug, Clone)]
pub struct WaveformChunk {
pub audio_pool_index: usize,
pub detail_level: u8, // 0-4 (overview to max detail)
pub chunk_index: u32, // Sequential chunk number
pub time_range: (f64, f64), // Start and end time in seconds
pub peaks: Vec<WaveformPeak>, // Variable length based on level
}
/// Whether an audio file is uncompressed (WAV/AIFF — can be memory-mapped) or compressed
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AudioFormat {
/// Uncompressed PCM (WAV, AIFF) — suitable for memory mapping
Pcm,
/// Compressed (MP3, FLAC, OGG, AAC, etc.) — requires decoding
Compressed,
}
/// Audio file metadata obtained without decoding
#[derive(Debug, Clone)]
pub struct AudioMetadata {
pub channels: u32,
pub sample_rate: u32,
pub duration: f64,
pub n_frames: Option<u64>,
pub format: AudioFormat,
}
pub struct AudioFile {
pub data: Vec<f32>,
pub channels: u32,
@ -20,6 +57,179 @@ pub struct AudioFile {
pub frames: u64,
}
/// Read only metadata from an audio file without decoding any audio packets.
/// This is fast (sub-millisecond) and suitable for calling on the UI thread.
pub fn read_metadata<P: AsRef<Path>>(path: P) -> Result<AudioMetadata, String> {
let path = path.as_ref();
let file = std::fs::File::open(path)
.map_err(|e| format!("Failed to open file: {}", e))?;
let mss = MediaSourceStream::new(Box::new(file), Default::default());
let mut hint = Hint::new();
let ext = path.extension().and_then(|e| e.to_str()).map(|s| s.to_lowercase());
if let Some(ref ext_str) = ext {
hint.with_extension(ext_str);
}
let probed = symphonia::default::get_probe()
.format(&hint, mss, &FormatOptions::default(), &MetadataOptions::default())
.map_err(|e| format!("Failed to probe file: {}", e))?;
let format = probed.format;
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL)
.ok_or_else(|| "No audio tracks found".to_string())?;
let codec_params = &track.codec_params;
let channels = codec_params.channels
.ok_or_else(|| "Channel count not specified".to_string())?
.count() as u32;
let sample_rate = codec_params.sample_rate
.ok_or_else(|| "Sample rate not specified".to_string())?;
let n_frames = codec_params.n_frames;
// Determine duration from frame count or time base
let duration = if let Some(frames) = n_frames {
frames as f64 / sample_rate as f64
} else if let Some(tb) = codec_params.time_base {
if let Some(dur) = codec_params.n_frames {
tb.calc_time(dur).seconds as f64 + tb.calc_time(dur).frac
} else {
0.0
}
} else {
0.0
};
// Determine if this is a PCM format (WAV/AIFF) or compressed
let audio_format = match ext.as_deref() {
Some("wav") | Some("wave") | Some("aiff") | Some("aif") => AudioFormat::Pcm,
_ => AudioFormat::Compressed,
};
Ok(AudioMetadata {
channels,
sample_rate,
duration,
n_frames,
format: audio_format,
})
}
/// Parsed WAV header info needed for memory-mapping.
pub struct WavHeaderInfo {
pub data_offset: usize,
pub data_size: usize,
pub sample_format: crate::audio::pool::PcmSampleFormat,
pub channels: u32,
pub sample_rate: u32,
pub total_frames: u64,
}
/// Parse a WAV file header from a byte slice (e.g. from an mmap).
/// Returns the byte offset to PCM data and format details.
pub fn parse_wav_header(data: &[u8]) -> Result<WavHeaderInfo, String> {
if data.len() < 44 {
return Err("File too small to be a valid WAV".to_string());
}
// RIFF header
if &data[0..4] != b"RIFF" || &data[8..12] != b"WAVE" {
return Err("Not a valid RIFF/WAVE file".to_string());
}
// Walk chunks to find "fmt " and "data"
let mut pos = 12;
let mut fmt_found = false;
let mut channels: u32 = 0;
let mut sample_rate: u32 = 0;
let mut bits_per_sample: u16 = 0;
let mut format_code: u16 = 0;
let mut data_offset: usize = 0;
let mut data_size: usize = 0;
while pos + 8 <= data.len() {
let chunk_id = &data[pos..pos + 4];
let chunk_size = u32::from_le_bytes([
data[pos + 4],
data[pos + 5],
data[pos + 6],
data[pos + 7],
]) as usize;
if chunk_id == b"fmt " {
if pos + 8 + 16 > data.len() {
return Err("fmt chunk too small".to_string());
}
let base = pos + 8;
format_code = u16::from_le_bytes([data[base], data[base + 1]]);
channels = u16::from_le_bytes([data[base + 2], data[base + 3]]) as u32;
sample_rate = u32::from_le_bytes([
data[base + 4],
data[base + 5],
data[base + 6],
data[base + 7],
]);
bits_per_sample = u16::from_le_bytes([data[base + 14], data[base + 15]]);
fmt_found = true;
} else if chunk_id == b"data" {
data_offset = pos + 8;
data_size = chunk_size;
break;
}
// Advance to next chunk (chunks are 2-byte aligned)
pos += 8 + chunk_size;
if chunk_size % 2 != 0 {
pos += 1;
}
}
if !fmt_found {
return Err("No fmt chunk found".to_string());
}
if data_offset == 0 {
return Err("No data chunk found".to_string());
}
// Determine sample format
let sample_format = match (format_code, bits_per_sample) {
(1, 16) => crate::audio::pool::PcmSampleFormat::I16,
(1, 24) => crate::audio::pool::PcmSampleFormat::I24,
(3, 32) => crate::audio::pool::PcmSampleFormat::F32,
(1, 32) => crate::audio::pool::PcmSampleFormat::F32, // 32-bit PCM treated as float
_ => {
return Err(format!(
"Unsupported WAV format: code={}, bits={}",
format_code, bits_per_sample
));
}
};
let bytes_per_sample = (bits_per_sample / 8) as usize;
let bytes_per_frame = bytes_per_sample * channels as usize;
let total_frames = if bytes_per_frame > 0 {
(data_size / bytes_per_frame) as u64
} else {
0
};
Ok(WavHeaderInfo {
data_offset,
data_size,
sample_format,
channels,
sample_rate,
total_frames,
})
}
impl AudioFile {
/// Load an audio file from disk and decode it to interleaved f32 samples
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self, String> {
@ -128,6 +338,123 @@ impl AudioFile {
})
}
/// Decode a compressed audio file progressively, calling `on_progress` with
/// partial data snapshots so the UI can display waveforms as they decode.
/// Sends updates roughly every 2 seconds of decoded audio.
pub fn decode_progressive<P: AsRef<Path>, F>(path: P, total_frames: u64, on_progress: F)
where
F: Fn(&[f32], u64, u64),
{
let path = path.as_ref();
let file = match std::fs::File::open(path) {
Ok(f) => f,
Err(e) => {
eprintln!("[WAVEFORM DECODE] Failed to open {:?}: {}", path, e);
return;
}
};
let mss = MediaSourceStream::new(Box::new(file), Default::default());
let mut hint = Hint::new();
if let Some(extension) = path.extension() {
if let Some(ext_str) = extension.to_str() {
hint.with_extension(ext_str);
}
}
let probed = match symphonia::default::get_probe()
.format(&hint, mss, &FormatOptions::default(), &MetadataOptions::default())
{
Ok(p) => p,
Err(e) => {
eprintln!("[WAVEFORM DECODE] Failed to probe {:?}: {}", path, e);
return;
}
};
let mut format = probed.format;
let track = match format.tracks().iter()
.find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL)
{
Some(t) => t,
None => {
eprintln!("[WAVEFORM DECODE] No audio tracks in {:?}", path);
return;
}
};
let track_id = track.id;
let channels = track.codec_params.channels
.map(|c| c.count() as u32)
.unwrap_or(2);
let sample_rate = track.codec_params.sample_rate.unwrap_or(44100);
let mut decoder = match symphonia::default::get_codecs()
.make(&track.codec_params, &DecoderOptions::default())
{
Ok(d) => d,
Err(e) => {
eprintln!("[WAVEFORM DECODE] Failed to create decoder for {:?}: {}", path, e);
return;
}
};
let mut audio_data = Vec::new();
let mut sample_buf = None;
// Send a progress update roughly every 2 seconds of audio
// Send first update quickly (0.25s), then every 2s of audio
let initial_interval = (sample_rate as usize * channels as usize) / 4;
let steady_interval = (sample_rate as usize * channels as usize) * 2;
let mut sent_first = false;
let mut last_update_len = 0usize;
loop {
let packet = match format.next_packet() {
Ok(packet) => packet,
Err(Error::IoError(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => break,
Err(Error::ResetRequired) => break,
Err(_) => break,
};
if packet.track_id() != track_id {
continue;
}
match decoder.decode(&packet) {
Ok(decoded) => {
if sample_buf.is_none() {
let spec = *decoded.spec();
let duration = decoded.capacity() as u64;
sample_buf = Some(SampleBuffer::<f32>::new(duration, spec));
}
if let Some(ref mut buf) = sample_buf {
buf.copy_interleaved_ref(decoded);
audio_data.extend_from_slice(buf.samples());
}
// Send progressive update (fast initial, then periodic)
// Only send NEW samples since last update (delta) to avoid large copies
let interval = if sent_first { steady_interval } else { initial_interval };
if audio_data.len() - last_update_len >= interval {
let decoded_frames = audio_data.len() as u64 / channels as u64;
on_progress(&audio_data[last_update_len..], decoded_frames, total_frames);
last_update_len = audio_data.len();
sent_first = true;
}
}
Err(Error::DecodeError(_)) => continue,
Err(_) => break,
}
}
// Final update with remaining data (delta since last update)
let decoded_frames = audio_data.len() as u64 / channels as u64;
on_progress(&audio_data[last_update_len..], decoded_frames, decoded_frames.max(total_frames));
}
/// Calculate the duration of the audio file in seconds
pub fn duration(&self) -> f64 {
self.frames as f64 / self.sample_rate as f64
@ -136,25 +463,48 @@ impl AudioFile {
/// Generate a waveform overview with the specified number of peaks
/// This creates a downsampled representation suitable for timeline visualization
pub fn generate_waveform_overview(&self, target_peaks: usize) -> Vec<WaveformPeak> {
self.generate_waveform_overview_range(0, self.frames as usize, target_peaks)
}
/// Generate a waveform overview for a specific range of frames
///
/// # Arguments
/// * `start_frame` - Starting frame index (0-based)
/// * `end_frame` - Ending frame index (exclusive)
/// * `target_peaks` - Desired number of peaks to generate
pub fn generate_waveform_overview_range(
&self,
start_frame: usize,
end_frame: usize,
target_peaks: usize,
) -> Vec<WaveformPeak> {
if self.frames == 0 || target_peaks == 0 {
return Vec::new();
}
let total_frames = self.frames as usize;
let frames_per_peak = (total_frames / target_peaks).max(1);
let actual_peaks = (total_frames + frames_per_peak - 1) / frames_per_peak;
let start_frame = start_frame.min(total_frames);
let end_frame = end_frame.min(total_frames);
if start_frame >= end_frame {
return Vec::new();
}
let range_frames = end_frame - start_frame;
let frames_per_peak = (range_frames / target_peaks).max(1);
let actual_peaks = (range_frames + frames_per_peak - 1) / frames_per_peak;
let mut peaks = Vec::with_capacity(actual_peaks);
for peak_idx in 0..actual_peaks {
let start_frame = peak_idx * frames_per_peak;
let end_frame = ((peak_idx + 1) * frames_per_peak).min(total_frames);
let peak_start = start_frame + peak_idx * frames_per_peak;
let peak_end = (start_frame + (peak_idx + 1) * frames_per_peak).min(end_frame);
let mut min = 0.0f32;
let mut max = 0.0f32;
// Scan all samples in this window
for frame_idx in start_frame..end_frame {
for frame_idx in peak_start..peak_end {
// For multi-channel audio, combine all channels
for ch in 0..self.channels as usize {
let sample_idx = frame_idx * self.channels as usize + ch;

View File

@ -3,7 +3,7 @@ pub mod midi_file;
pub mod midi_input;
pub mod wav_writer;
pub use audio_file::{AudioFile, WaveformPeak};
pub use audio_file::{AudioFile, AudioFormat, AudioMetadata, WavHeaderInfo, WaveformChunk, WaveformChunkKey, WaveformPeak, parse_wav_header, read_metadata};
pub use midi_file::load_midi_file;
pub use midi_input::MidiInputManager;
pub use wav_writer::WavWriter;

View File

@ -13,14 +13,14 @@ pub mod tui;
// Re-export commonly used types
pub use audio::{
AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController,
Metatrack, MidiClip, MidiClipId, MidiEvent, MidiTrack, ParameterId, PoolAudioFile, Project, RecordingState, RenderContext, Track, TrackId,
AudioClipInstanceId, AudioClipSnapshot, AudioPool, AudioTrack, AutomationLane, AutomationLaneId, AutomationPoint, BufferPool, Clip, ClipId, CurveType, Engine, EngineController,
Metatrack, MidiClip, MidiClipId, MidiClipInstance, MidiClipInstanceId, MidiEvent, MidiTrack, ParameterId, PoolAudioFile, Project, RecordingState, RenderContext, Track, TrackId,
TrackNode,
};
pub use audio::node_graph::{GraphPreset, AudioGraph, PresetMetadata, SerializedConnection, SerializedNode};
pub use command::{AudioEvent, Command, OscilloscopeData};
pub use command::types::AutomationKeyframeData;
pub use io::{load_midi_file, AudioFile, WaveformPeak, WavWriter};
pub use io::{load_midi_file, AudioFile, WaveformChunk, WaveformChunkKey, WaveformPeak, WavWriter};
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
@ -37,6 +37,15 @@ pub struct AudioSystem {
pub stream: cpal::Stream,
pub sample_rate: u32,
pub channels: u32,
/// Event receiver for polling audio events (only present when no EventEmitter is provided)
pub event_rx: Option<rtrb::Consumer<AudioEvent>>,
/// Consumer for recording audio mirror (streams recorded samples to UI for live waveform)
recording_mirror_rx: Option<rtrb::Consumer<f32>>,
/// Producer end of the input ring-buffer. Taken into the closure when the
/// input stream is opened; `None` after `open_input_stream()` has been called.
input_tx: Option<rtrb::Producer<f32>>,
/// The live microphone/line-in stream. `None` until `open_input_stream()` is called.
input_stream: Option<cpal::Stream>,
}
impl AudioSystem {
@ -46,6 +55,13 @@ impl AudioSystem {
/// * `event_emitter` - Optional event emitter for pushing events to external systems
/// * `buffer_size` - Audio buffer size in frames (128, 256, 512, 1024, etc.)
/// Smaller = lower latency but higher CPU usage. Default: 256
///
/// # Environment Variables
/// * `DAW_AUDIO_DEBUG=1` - Enable audio callback timing diagnostics. Logs:
/// - Device and config info at startup
/// - First 10 callback buffer sizes (to detect ALSA buffer variance)
/// - Per-overrun timing breakdown (command vs render time)
/// - Periodic (~5s) timing summaries (avg/worst/overrun rate)
pub fn new(
event_emitter: Option<std::sync::Arc<dyn EventEmitter>>,
buffer_size: u32,
@ -58,8 +74,12 @@ impl AudioSystem {
.ok_or("No output device available")?;
let default_output_config = output_device.default_output_config().map_err(|e| e.to_string())?;
let sample_rate = default_output_config.sample_rate().0;
let sample_rate = default_output_config.sample_rate();
let channels = default_output_config.channels() as u32;
let _debug_audio = std::env::var("DAW_AUDIO_DEBUG").map_or(false, |v| v == "1");
eprintln!("[AUDIO] Device: {:?}, format={:?}, rate={}, channels={}",
output_device.description().map(|d| d.name().to_string()).unwrap_or_default(), default_output_config.sample_format(), sample_rate, channels);
// Create queues
let (command_tx, command_rx) = rtrb::RingBuffer::new(512); // Larger buffer for MIDI + UI commands
@ -72,9 +92,13 @@ impl AudioSystem {
let input_buffer_size = (sample_rate * channels * 10) as usize;
let (mut input_tx, input_rx) = rtrb::RingBuffer::new(input_buffer_size);
// Create mirror ringbuffer for streaming recorded audio to UI (live waveform)
let (mirror_tx, mirror_rx) = rtrb::RingBuffer::new(input_buffer_size);
// Create engine
let mut engine = Engine::new(sample_rate, channels, command_rx, event_tx, query_rx, query_response_tx);
engine.set_input_rx(input_rx);
engine.set_recording_mirror_tx(mirror_tx);
let controller = engine.get_controller(command_tx, query_tx, query_response_rx);
// Initialize MIDI input manager for external MIDI devices
@ -92,38 +116,23 @@ impl AudioSystem {
}
}
// Build output stream with configurable buffer size
let mut output_config: cpal::StreamConfig = default_output_config.clone().into();
// Build output stream
let mut output_config: cpal::StreamConfig = default_output_config.into();
// Set the requested buffer size
output_config.buffer_size = cpal::BufferSize::Fixed(buffer_size);
// WASAPI shared mode on Windows does not support fixed buffer sizes.
// Use the device default on Windows; honor the requested size on other platforms.
if cfg!(target_os = "windows") {
output_config.buffer_size = cpal::BufferSize::Default;
} else {
output_config.buffer_size = cpal::BufferSize::Fixed(buffer_size);
}
let mut output_buffer = vec![0.0f32; 16384];
// Log audio configuration
println!("Audio Output Configuration:");
println!(" Sample Rate: {} Hz", output_config.sample_rate.0);
println!(" Channels: {}", output_config.channels);
println!(" Buffer Size: {:?}", output_config.buffer_size);
// Calculate expected latency
if let cpal::BufferSize::Fixed(size) = output_config.buffer_size {
let latency_ms = (size as f64 / output_config.sample_rate.0 as f64) * 1000.0;
println!(" Expected Latency: {:.2} ms", latency_ms);
}
let mut first_callback = true;
let output_stream = output_device
.build_output_stream(
&output_config,
move |data: &mut [f32], _: &cpal::OutputCallbackInfo| {
if first_callback {
let frames = data.len() / output_config.channels as usize;
let latency_ms = (frames as f64 / output_config.sample_rate.0 as f64) * 1000.0;
println!("Audio callback buffer size: {} samples ({} frames, {:.2} ms latency)",
data.len(), frames, latency_ms);
first_callback = false;
}
let buf = &mut output_buffer[..data.len()];
buf.fill(0.0);
engine.process(buf);
@ -132,89 +141,128 @@ impl AudioSystem {
|err| eprintln!("Output stream error: {}", err),
None,
)
.map_err(|e| e.to_string())?;
.map_err(|e| format!("Failed to build output stream: {e:?}"))?;
// Get input device
let input_device = match host.default_input_device() {
Some(device) => device,
None => {
eprintln!("Warning: No input device available, recording will be disabled");
// Start output stream and return without input
output_stream.play().map_err(|e| e.to_string())?;
// Spawn emitter thread if provided
if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
return Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
});
}
};
// Get input config matching output sample rate and channels if possible
let input_config = match input_device.default_input_config() {
Ok(config) => {
let mut cfg: cpal::StreamConfig = config.into();
// Try to match output sample rate and channels
cfg.sample_rate = cpal::SampleRate(sample_rate);
cfg.channels = channels as u16;
cfg
}
Err(e) => {
eprintln!("Warning: Could not get input config: {}, recording will be disabled", e);
output_stream.play().map_err(|e| e.to_string())?;
// Spawn emitter thread if provided
if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
return Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
});
}
};
// Build input stream that feeds into the ringbuffer
let input_stream = input_device
.build_input_stream(
&input_config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
// Push input samples to ringbuffer for recording
for &sample in data {
let _ = input_tx.push(sample);
}
},
|err| eprintln!("Input stream error: {}", err),
None,
)
.map_err(|e| e.to_string())?;
// Start both streams
// Start output stream
output_stream.play().map_err(|e| e.to_string())?;
input_stream.play().map_err(|e| e.to_string())?;
// Leak the input stream to keep it alive
Box::leak(Box::new(input_stream));
// Spawn emitter thread if provided
if let Some(emitter) = event_emitter {
// Spawn emitter thread if provided, or store event_rx for manual polling
let event_rx_option = if let Some(emitter) = event_emitter {
Self::spawn_emitter_thread(event_rx, emitter);
}
None
} else {
Some(event_rx)
};
// Input stream is NOT opened here — call open_input_stream() when an
// audio input track is actually selected, to avoid constant ALSA wakeups.
Ok(Self {
controller,
stream: output_stream,
sample_rate,
channels,
event_rx: event_rx_option,
recording_mirror_rx: Some(mirror_rx),
input_tx: Some(input_tx),
input_stream: None,
})
}
/// Take the recording mirror consumer for streaming recorded audio to UI
pub fn take_recording_mirror_rx(&mut self) -> Option<rtrb::Consumer<f32>> {
self.recording_mirror_rx.take()
}
/// Open the microphone/line-in input stream.
///
/// Call this as soon as an audio input track is selected so the stream is
/// ready before recording starts. The stream is opened with the same fixed
/// buffer size as the output stream to avoid ALSA spinning at high callback
/// rates with its tiny default buffer.
///
/// No-ops if the stream is already open.
pub fn open_input_stream(&mut self, buffer_size: u32) -> Result<(), String> {
if self.input_stream.is_some() {
return Ok(());
}
let mut input_tx = match self.input_tx.take() {
Some(tx) => tx,
None => return Err("Input ring-buffer already consumed".into()),
};
let host = cpal::default_host();
let input_device = host.default_input_device()
.ok_or("No input device available")?;
let default_cfg = input_device.default_input_config()
.map_err(|e| e.to_string())?;
let mut input_config: cpal::StreamConfig = default_cfg.into();
// Match the output buffer size so ALSA wakes up at the same rate as
// the output thread — prevents the ~750 wakeups/sec that the default
// 64-frame buffer causes.
if !cfg!(target_os = "windows") {
input_config.buffer_size = cpal::BufferSize::Fixed(buffer_size);
}
let input_sample_rate = input_config.sample_rate;
let input_channels = input_config.channels as u32;
let output_sample_rate = self.sample_rate;
let output_channels = self.channels;
let needs_resample = input_sample_rate != output_sample_rate
|| input_channels != output_channels;
if needs_resample {
eprintln!("[AUDIO] Input: {}Hz {}ch → resampling to {}Hz {}ch",
input_sample_rate, input_channels, output_sample_rate, output_channels);
}
let stream = input_device.build_input_stream(
&input_config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
if !needs_resample {
for &s in data { let _ = input_tx.push(s); }
} else {
let in_ch = input_channels as usize;
let out_ch = output_channels as usize;
let ratio = output_sample_rate as f64 / input_sample_rate as f64;
let in_frames = data.len() / in_ch;
let out_frames = (in_frames as f64 * ratio) as usize;
for i in 0..out_frames {
let src_pos = i as f64 / ratio;
let src_idx = src_pos as usize;
let frac = (src_pos - src_idx as f64) as f32;
for ch in 0..out_ch {
let ic = ch.min(in_ch - 1);
let s0 = data.get(src_idx * in_ch + ic).copied().unwrap_or(0.0);
let s1 = data.get((src_idx + 1) * in_ch + ic).copied().unwrap_or(s0);
let _ = input_tx.push(s0 + frac * (s1 - s0));
}
}
}
},
|err| eprintln!("Input stream error: {err}"),
None,
).map_err(|e| format!("Failed to build input stream: {e}"))?;
stream.play().map_err(|e| e.to_string())?;
self.input_stream = Some(stream);
Ok(())
}
/// Close the input stream (e.g. when the last audio input track is removed).
pub fn close_input_stream(&mut self) {
self.input_stream = None; // Drop stops the stream
}
/// Extract an [`InputStreamOpener`] that can be stored independently and
/// used to open the microphone/line-in stream on demand.
/// Returns `None` if called a second time.
pub fn take_input_opener(&mut self) -> Option<InputStreamOpener> {
self.input_tx.take().map(|tx| InputStreamOpener {
input_tx: tx,
sample_rate: self.sample_rate,
channels: self.channels,
})
}
@ -233,3 +281,77 @@ impl AudioSystem {
});
}
}
/// Self-contained handle for opening the microphone/line-in stream on demand.
///
/// Obtained via [`AudioSystem::take_input_opener`]. Call [`open`](Self::open)
/// when the user selects an audio input track; store the returned
/// `cpal::Stream` to keep it alive (dropping it stops the stream).
pub struct InputStreamOpener {
input_tx: rtrb::Producer<f32>,
sample_rate: u32,
channels: u32,
}
impl InputStreamOpener {
/// Open and start the input stream with the given buffer size.
///
/// Uses the same `buffer_size` as the output stream so ALSA wakes up at
/// the same rate (~187/s at 256 frames) rather than the ~750/s it defaults
/// to with 64-frame buffers.
pub fn open(mut self, buffer_size: u32) -> Result<cpal::Stream, String> {
let host = cpal::default_host();
let device = host.default_input_device()
.ok_or("No input device available")?;
let default_cfg = device.default_input_config()
.map_err(|e| e.to_string())?;
let mut cfg: cpal::StreamConfig = default_cfg.into();
if !cfg!(target_os = "windows") {
cfg.buffer_size = cpal::BufferSize::Fixed(buffer_size);
}
let in_rate = cfg.sample_rate;
let in_ch = cfg.channels as u32;
let out_rate = self.sample_rate;
let out_ch = self.channels;
let needs_resample = in_rate != out_rate || in_ch != out_ch;
if needs_resample {
eprintln!("[AUDIO] Input: {}Hz {}ch → resampling to {}Hz {}ch",
in_rate, in_ch, out_rate, out_ch);
}
let stream = device.build_input_stream(
&cfg,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
if !needs_resample {
for &s in data { let _ = self.input_tx.push(s); }
} else {
let ic = in_ch as usize;
let oc = out_ch as usize;
let ratio = out_rate as f64 / in_rate as f64;
let in_frames = data.len() / ic;
let out_frames = (in_frames as f64 * ratio) as usize;
for i in 0..out_frames {
let src = i as f64 / ratio;
let si = src as usize;
let f = (src - si as f64) as f32;
for ch in 0..oc {
let ich = ch.min(ic - 1);
let s0 = data.get(si * ic + ich).copied().unwrap_or(0.0);
let s1 = data.get((si + 1) * ic + ich).copied().unwrap_or(s0);
let _ = self.input_tx.push(s0 + f * (s1 - s0));
}
}
}
},
|err| eprintln!("Input stream error: {err}"),
None,
).map_err(|e| format!("Failed to build input stream: {e}"))?;
stream.play().map_err(|e| e.to_string())?;
Ok(stream)
}
}

1092
docs/AUDIO_SYSTEM.md Normal file

File diff suppressed because it is too large Load Diff

545
docs/BUILDING.md Normal file
View File

@ -0,0 +1,545 @@
# Building Lightningbeam
This guide provides detailed instructions for building Lightningbeam on different platforms, including dependency installation, troubleshooting, and advanced build configurations.
## Table of Contents
- [Quick Start](#quick-start)
- [Platform-Specific Instructions](#platform-specific-instructions)
- [Dependencies](#dependencies)
- [Build Configurations](#build-configurations)
- [Troubleshooting](#troubleshooting)
- [Development Builds](#development-builds)
## Quick Start
```bash
# Clone the repository
git clone https://github.com/skykooler/lightningbeam.git
cd lightningbeam
# Initialize submodules (including nested ones required by nam-ffi)
git submodule update --init --recursive
cd lightningbeam-ui
# Build and run
cargo build
cargo run
```
## Platform-Specific Instructions
### Linux
#### Ubuntu/Debian
**Important**: Lightningbeam requires FFmpeg 8, which may not be in the default repositories.
```bash
# Install basic dependencies
sudo apt update
sudo apt install -y \
build-essential \
pkg-config \
libasound2-dev \
clang \
libclang-dev
# Install FFmpeg 8 from PPA (Ubuntu)
sudo add-apt-repository ppa:ubuntuhandbook1/ffmpeg7
sudo apt update
sudo apt install -y \
ffmpeg \
libavcodec-dev \
libavformat-dev \
libavutil-dev \
libswscale-dev \
libswresample-dev
# Verify FFmpeg version (should be 8.x)
ffmpeg -version
# Install Rust if needed
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Build
cd lightningbeam-ui
cargo build --release
```
**Note**: If the PPA doesn't provide FFmpeg 8, you may need to compile FFmpeg from source or find an alternative PPA. See [FFmpeg Issues](#ffmpeg-issues) for details.
#### Arch Linux/Manjaro
```bash
# Install system dependencies
sudo pacman -S --needed \
base-devel \
rust \
alsa-lib \
ffmpeg \
clang
# Build
cd lightningbeam-ui
cargo build --release
```
#### Fedora/RHEL
```bash
# Install system dependencies
sudo dnf install -y \
gcc \
gcc-c++ \
make \
pkg-config \
alsa-lib-devel \
ffmpeg \
ffmpeg-devel \
clang \
clang-devel
# Install Rust if needed
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Build
cd lightningbeam-ui
cargo build --release
```
### macOS
```bash
# Install Homebrew if needed
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# Install dependencies
brew install rust ffmpeg pkg-config
# Build
cd lightningbeam-ui
cargo build --release
```
**Note**: macOS uses CoreAudio for audio I/O (via cpal), so no additional audio libraries are needed.
### Windows
#### Using Visual Studio
1. Install [Visual Studio 2022](https://visualstudio.microsoft.com/) with "Desktop development with C++" workload
2. Install [Rust](https://rustup.rs/)
3. Install [FFmpeg](https://ffmpeg.org/download.html#build-windows):
- Download a shared build from https://www.gyan.dev/ffmpeg/builds/
- Extract to `C:\ffmpeg`
- Add `C:\ffmpeg\bin` to PATH
- Set environment variables:
```cmd
set FFMPEG_DIR=C:\ffmpeg
set PKG_CONFIG_PATH=C:\ffmpeg\lib\pkgconfig
```
4. Build:
```cmd
cd lightningbeam-ui
cargo build --release
```
#### Using MSYS2/MinGW
```bash
# In MSYS2 shell
pacman -S mingw-w64-x86_64-rust \
mingw-w64-x86_64-ffmpeg \
mingw-w64-x86_64-pkg-config
cd lightningbeam-ui
cargo build --release
```
**Note**: Windows uses WASAPI for audio I/O (via cpal), which is built into Windows.
## Dependencies
### Required Dependencies
#### Rust Toolchain
- **Version**: Stable (1.70+)
- **Install**: https://rustup.rs/
- **Components**: Default installation includes everything needed
#### Audio I/O (ALSA on Linux)
- **Ubuntu/Debian**: `libasound2-dev`
- **Arch**: `alsa-lib`
- **Fedora**: `alsa-lib-devel`
- **macOS**: CoreAudio (built-in)
- **Windows**: WASAPI (built-in)
#### FFmpeg
**Version Required**: FFmpeg 8.x
Required for video encoding/decoding. Note that many distribution repositories may have older versions.
- **Ubuntu/Debian**: Use PPA for FFmpeg 8 (see [Ubuntu/Debian instructions](#ubuntudebian))
- **Arch**: `ffmpeg` (usually up-to-date)
- **Fedora**: `ffmpeg ffmpeg-devel` (check version with `ffmpeg -version`)
- **macOS**: `brew install ffmpeg` (Homebrew usually has latest)
- **Windows**: Download FFmpeg 8 from https://ffmpeg.org/download.html
#### Build Tools
- **Linux**: `build-essential` (Ubuntu), `base-devel` (Arch)
- **macOS**: Xcode Command Line Tools (`xcode-select --install`)
- **Windows**: Visual Studio with C++ tools or MinGW
#### pkg-config
Required for finding system libraries.
- **Linux**: Usually included with build tools
- **macOS**: `brew install pkg-config`
- **Windows**: Included with MSYS2/MinGW, or use vcpkg
### Optional Dependencies
#### GPU Drivers
Vello requires a GPU with Vulkan (Linux/Windows) or Metal (macOS) support:
- **Linux Vulkan**:
- NVIDIA: Install proprietary drivers
- AMD: `mesa-vulkan-drivers` (Ubuntu) or `vulkan-radeon` (Arch)
- Intel: `mesa-vulkan-drivers` (Ubuntu) or `vulkan-intel` (Arch)
- **macOS Metal**: Built-in (macOS 10.13+)
- **Windows Vulkan**:
- Usually included with GPU drivers
- Manual install: https://vulkan.lunarg.com/
## Build Configurations
### Release Build (Optimized)
```bash
cargo build --release
```
- Optimizations: Level 3
- LTO: Enabled
- Debug info: None
- Build time: Slower (~5-10 minutes)
- Runtime: Fast
Binary location: `target/release/lightningbeam-editor`
### Debug Build (Default)
```bash
cargo build
```
- Optimizations: Level 1 (Level 2 for audio code)
- LTO: Disabled
- Debug info: Full
- Build time: Faster (~2-5 minutes)
- Runtime: Slower (but audio is still optimized)
Binary location: `target/debug/lightningbeam-editor`
**Note**: Audio code is always compiled with `opt-level = 2` even in debug builds to meet real-time deadlines. This is configured in `lightningbeam-ui/Cargo.toml`:
```toml
[profile.dev.package.daw-backend]
opt-level = 2
```
### Check Without Building
Quickly check for compilation errors without producing binaries:
```bash
cargo check
```
Useful for rapid feedback during development.
### Build Specific Package
```bash
# Check only the audio backend
cargo check -p daw-backend
# Build only the core library
cargo build -p lightningbeam-core
```
## Troubleshooting
### Submodule / CMake Issues
#### "does not contain a CMakeLists.txt file" (RTNeural or math_approx)
**Cause**: The `vendor/NeuralAudio` submodule has its own nested submodules (`deps/RTNeural`, `deps/math_approx`) that weren't initialized. A plain `git submodule update --init` only initializes top-level submodules.
**Solution**: Use `--recursive` to initialize all nested submodules:
```bash
git submodule update --init --recursive
```
Or, if the top-level submodule is already checked out:
```bash
cd vendor/NeuralAudio
git submodule update --init
```
### Audio Issues
#### "ALSA lib cannot find card" or similar errors
**Solution**: Install ALSA development files:
```bash
# Ubuntu/Debian
sudo apt install libasound2-dev
# Arch
sudo pacman -S alsa-lib
```
#### Audio dropouts or crackling
**Symptoms**: Console shows "Audio overrun" or timing warnings.
**Solutions**:
1. Increase buffer size in `daw-backend/src/lib.rs` (default: 256 frames)
2. Enable audio debug logging:
```bash
DAW_AUDIO_DEBUG=1 cargo run
```
3. Make sure audio code is optimized (check `Cargo.toml` profile settings)
4. Close other audio applications
#### "PulseAudio" or "JACK" errors in container
**Note**: This is expected in containerized environments without audio support. These errors don't occur on native systems.
### FFmpeg Issues
#### "Could not find FFmpeg libraries" or linking errors
**Version Check First**:
```bash
ffmpeg -version
# Should show version 8.x
```
**Linux**:
```bash
# Ubuntu/Debian - requires FFmpeg 8 from PPA
sudo add-apt-repository ppa:ubuntuhandbook1/ffmpeg7
sudo apt update
sudo apt install libavcodec-dev libavformat-dev libavutil-dev libswscale-dev libswresample-dev
# Arch (usually has latest)
sudo pacman -S ffmpeg
# Check installation
pkg-config --modversion libavcodec
# Should show 61.x or higher (FFmpeg 8)
```
If the PPA doesn't work or doesn't have FFmpeg 8, you may need to compile from source:
```bash
# Download and compile FFmpeg 8
wget https://ffmpeg.org/releases/ffmpeg-8.0.tar.xz
tar xf ffmpeg-8.0.tar.xz
cd ffmpeg-8.0
./configure --enable-shared --disable-static
make -j$(nproc)
sudo make install
sudo ldconfig
```
**macOS**:
```bash
brew install ffmpeg
export PKG_CONFIG_PATH="/opt/homebrew/opt/ffmpeg/lib/pkgconfig:$PKG_CONFIG_PATH"
```
**Windows**:
Set environment variables:
```cmd
set FFMPEG_DIR=C:\path\to\ffmpeg
set PKG_CONFIG_PATH=C:\path\to\ffmpeg\lib\pkgconfig
```
#### "Unsupported codec" or video not playing
Make sure FFmpeg was compiled with the necessary codecs:
```bash
ffmpeg -codecs | grep h264 # Check for H.264
ffmpeg -codecs | grep vp9 # Check for VP9
```
### GPU/Rendering Issues
#### Black screen or no rendering
**Check GPU support**:
```bash
# Linux - check Vulkan
vulkaninfo | grep deviceName
# macOS - Metal is built-in on 10.13+
system_profiler SPDisplaysDataType
```
**Solutions**:
1. Update GPU drivers
2. Install Vulkan runtime (Linux)
3. Check console for wgpu errors
#### "No suitable GPU adapter found"
This usually means missing Vulkan/Metal support.
**Linux**: Install Vulkan drivers (see [Optional Dependencies](#optional-dependencies))
**macOS**: Requires macOS 10.13+ (Metal support)
**Windows**: Update GPU drivers
### Build Performance
#### Slow compilation times
**Solutions**:
1. Use `cargo check` instead of `cargo build` during development
2. Enable incremental compilation (enabled by default)
3. Use `mold` linker (Linux):
```bash
# Install mold
sudo apt install mold # Ubuntu 22.04+
# Use mold
mold -run cargo build
```
4. Increase parallel jobs:
```bash
cargo build -j 8 # Use 8 parallel jobs
```
#### Out of memory during compilation
**Solution**: Reduce parallel jobs:
```bash
cargo build -j 2 # Use only 2 parallel jobs
```
### Linker Errors
#### "undefined reference to..." or "cannot find -l..."
**Cause**: Missing system libraries.
**Solution**: Install all dependencies listed in [Platform-Specific Instructions](#platform-specific-instructions).
#### Windows: "LNK1181: cannot open input file"
**Cause**: FFmpeg libraries not found.
**Solution**:
1. Download FFmpeg shared build
2. Set `FFMPEG_DIR` environment variable
3. Add FFmpeg bin directory to PATH
## Development Builds
### Enable Audio Debug Logging
```bash
DAW_AUDIO_DEBUG=1 cargo run
```
Output includes:
- Buffer sizes
- Average/worst-case processing times
- Audio overruns/underruns
- Playhead position updates
### Disable Optimizations for Specific Crates
Edit `lightningbeam-ui/Cargo.toml`:
```toml
[profile.dev.package.my-crate]
opt-level = 0 # No optimizations
```
**Warning**: Do not disable optimizations for `daw-backend` or audio-related crates, as this will cause audio dropouts.
### Build with Specific Features
```bash
# Build with all features
cargo build --all-features
# Build with no default features
cargo build --no-default-features
```
### Clean Build
Remove all build artifacts and start fresh:
```bash
cargo clean
cargo build
```
Useful when dependencies change or build cache becomes corrupted.
### Cross-Compilation
Cross-compiling is not currently documented but should be possible using `cross`:
```bash
cargo install cross
cross build --target x86_64-unknown-linux-gnu
```
See [cross documentation](https://github.com/cross-rs/cross) for details.
## Running Tests
```bash
# Run all tests
cargo test
# Run tests for specific package
cargo test -p lightningbeam-core
# Run with output
cargo test -- --nocapture
# Run specific test
cargo test test_name
```
## Building Documentation
Generate and open Rust API documentation:
```bash
cargo doc --open
```
This generates HTML documentation from code comments and opens it in your browser.
## Next Steps
After building successfully:
- See [CONTRIBUTING.md](../CONTRIBUTING.md) for development workflow
- See [ARCHITECTURE.md](../ARCHITECTURE.md) for system architecture
- See [docs/AUDIO_SYSTEM.md](AUDIO_SYSTEM.md) for audio engine details
- See [docs/UI_SYSTEM.md](UI_SYSTEM.md) for UI development

812
docs/RENDERING.md Normal file
View File

@ -0,0 +1,812 @@
# GPU Rendering Architecture
This document describes Lightningbeam's GPU rendering pipeline, including Vello integration for vector graphics, custom WGSL shaders for waveforms, and wgpu integration patterns.
## Table of Contents
- [Overview](#overview)
- [Rendering Pipeline](#rendering-pipeline)
- [Vello Integration](#vello-integration)
- [Waveform Rendering](#waveform-rendering)
- [WGSL Shaders](#wgsl-shaders)
- [Uniform Buffer Alignment](#uniform-buffer-alignment)
- [Custom wgpu Integration](#custom-wgpu-integration)
- [Performance Optimization](#performance-optimization)
- [Debugging Rendering Issues](#debugging-rendering-issues)
## Overview
Lightningbeam uses GPU-accelerated rendering for high-performance 2D graphics:
- **Vello**: Compute shader-based 2D vector rendering
- **wgpu 27**: Cross-platform GPU API (Vulkan, Metal, D3D12)
- **egui-wgpu**: Integration layer between egui and wgpu
- **Custom WGSL shaders**: For specialized rendering (waveforms, effects)
### Supported Backends
- **Linux**: Vulkan (primary), OpenGL (fallback)
- **macOS**: Metal
- **Windows**: Vulkan, DirectX 12
## Rendering Pipeline
### High-Level Flow
```
┌─────────────────────────────────────────────────────────────┐
│ Application Frame │
├─────────────────────────────────────────────────────────────┤
│ │
│ 1. egui Layout Phase │
│ - Build UI tree │
│ - Collect paint primitives │
│ - Register wgpu callbacks │
│ │
│ 2. Custom GPU Rendering (via egui_wgpu::Callback) │
│ ┌────────────────────────────────────────────────┐ │
│ │ prepare(): │ │
│ │ - Build Vello scene from document │ │
│ │ - Update uniform buffers │ │
│ │ - Generate waveform mipmaps (if needed) │ │
│ └────────────────────────────────────────────────┘ │
│ ┌────────────────────────────────────────────────┐ │
│ │ paint(): │ │
│ │ - Render Vello scene to texture │ │
│ │ - Render waveforms │ │
│ │ - Composite layers │ │
│ └────────────────────────────────────────────────┘ │
│ │
│ 3. egui Paint │
│ - Render egui UI elements │
│ - Composite with custom rendering │
│ │
│ 4. Present to Screen │
│ │
└─────────────────────────────────────────────────────────────┘
```
### Render Pass Structure
```
Main Render Pass
├─> Clear screen
├─> Custom wgpu callbacks (Stage pane, etc.)
│ ├─> Vello vector rendering
│ └─> Waveform rendering
└─> egui UI rendering (text, widgets, overlays)
```
## Vello Integration
Vello is a GPU-accelerated 2D rendering engine that uses compute shaders for high-performance vector graphics.
### Vello Architecture
```
Document Shapes
Convert to kurbo paths
Build Vello Scene
Vello Renderer (compute shaders)
Render to GPU texture
Composite with UI
```
### Building a Vello Scene
```rust
use vello::{Scene, SceneBuilder, kurbo::{Affine, BezPath}};
use peniko::{Color, Fill, Brush};
fn build_vello_scene(document: &Document) -> Scene {
let mut scene = Scene::new();
let mut builder = SceneBuilder::for_scene(&mut scene);
for layer in &document.layers {
if let Layer::VectorLayer { clips, visible, .. } = layer {
if !visible {
continue;
}
for clip in clips {
for shape_instance in &clip.shapes {
// Get transform for this shape
let transform = shape_instance.compute_world_transform();
let affine = to_vello_affine(transform);
// Convert shape to kurbo path
let path = shape_to_kurbo_path(&shape_instance.shape);
// Fill
if let Some(fill_color) = shape_instance.shape.fill {
let brush = Brush::Solid(to_peniko_color(fill_color));
builder.fill(
Fill::NonZero,
affine,
&brush,
None,
&path,
);
}
// Stroke
if let Some(stroke) = &shape_instance.shape.stroke {
let brush = Brush::Solid(to_peniko_color(stroke.color));
let stroke_style = vello::kurbo::Stroke::new(stroke.width);
builder.stroke(
&stroke_style,
affine,
&brush,
None,
&path,
);
}
}
}
}
}
scene
}
```
### Shape to Kurbo Path Conversion
```rust
use kurbo::{BezPath, PathEl, Point};
fn shape_to_kurbo_path(shape: &Shape) -> BezPath {
let mut path = BezPath::new();
if shape.curves.is_empty() {
return path;
}
// Start at first point
path.move_to(Point::new(
shape.curves[0].start.x as f64,
shape.curves[0].start.y as f64,
));
// Add curves
for curve in &shape.curves {
match curve.curve_type {
CurveType::Linear => {
path.line_to(Point::new(
curve.end.x as f64,
curve.end.y as f64,
));
}
CurveType::Quadratic => {
path.quad_to(
Point::new(curve.control1.x as f64, curve.control1.y as f64),
Point::new(curve.end.x as f64, curve.end.y as f64),
);
}
CurveType::Cubic => {
path.curve_to(
Point::new(curve.control1.x as f64, curve.control1.y as f64),
Point::new(curve.control2.x as f64, curve.control2.y as f64),
Point::new(curve.end.x as f64, curve.end.y as f64),
);
}
}
}
// Close path if needed
if shape.closed {
path.close_path();
}
path
}
```
### Vello Renderer Setup
```rust
use vello::{Renderer, RendererOptions, RenderParams};
use wgpu;
pub struct VelloRenderer {
renderer: Renderer,
surface_format: wgpu::TextureFormat,
}
impl VelloRenderer {
pub fn new(device: &wgpu::Device, surface_format: wgpu::TextureFormat) -> Self {
let renderer = Renderer::new(
device,
RendererOptions {
surface_format: Some(surface_format),
use_cpu: false,
antialiasing_support: vello::AaSupport::all(),
num_init_threads: None,
},
).expect("Failed to create Vello renderer");
Self {
renderer,
surface_format,
}
}
pub fn render(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
scene: &Scene,
texture: &wgpu::TextureView,
width: u32,
height: u32,
) {
let params = RenderParams {
base_color: peniko::Color::TRANSPARENT,
width,
height,
antialiasing_method: vello::AaConfig::Msaa16,
};
self.renderer
.render_to_texture(device, queue, scene, texture, &params)
.expect("Failed to render Vello scene");
}
}
```
## Waveform Rendering
Audio waveforms are rendered on the GPU using custom WGSL shaders with mipmapping for efficient zooming.
### Waveform GPU Resources
```rust
pub struct WaveformGPU {
// Waveform data texture (min/max per sample)
texture: wgpu::Texture,
texture_view: wgpu::TextureView,
// Mipmap chain for level-of-detail
mip_levels: Vec<wgpu::TextureView>,
// Render pipeline
pipeline: wgpu::RenderPipeline,
// Uniform buffer for view parameters
uniform_buffer: wgpu::Buffer,
bind_group: wgpu::BindGroup,
}
```
### Waveform Texture Format
Each texel stores min/max amplitude for a sample range:
```
Texture Format: Rgba16Float (4 channels, 16-bit float each)
- R channel: Left channel minimum amplitude in range [-1, 1]
- G channel: Left channel maximum amplitude in range [-1, 1]
- B channel: Right channel minimum amplitude in range [-1, 1]
- A channel: Right channel maximum amplitude in range [-1, 1]
Mip level 0: Per-sample min/max (1x)
Mip level 1: Per-4-sample min/max (1/4x)
Mip level 2: Per-16-sample min/max (1/16x)
Mip level 3: Per-64-sample min/max (1/64x)
...
Each mip level reduces by 4x, not 2x, for efficient zooming.
```
### Generating Waveform Texture
```rust
fn generate_waveform_texture(
device: &wgpu::Device,
queue: &wgpu::Queue,
audio_samples: &[f32],
) -> wgpu::Texture {
// Calculate mip levels
let width = audio_samples.len() as u32;
let mip_levels = (width as f32).log2().floor() as u32 + 1;
// Create texture
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Waveform Texture"),
size: wgpu::Extent3d {
width,
height: 1,
depth_or_array_layers: 1,
},
mip_level_count: mip_levels,
sample_count: 1,
dimension: wgpu::TextureDimension::D1,
format: wgpu::TextureFormat::Rg32Float,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
// Upload base level (per-sample min/max)
let mut data: Vec<f32> = Vec::with_capacity(width as usize * 2);
for &sample in audio_samples {
data.push(sample); // min
data.push(sample); // max
}
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
bytemuck::cast_slice(&data),
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(width * 8), // 2 floats * 4 bytes
rows_per_image: None,
},
wgpu::Extent3d {
width,
height: 1,
depth_or_array_layers: 1,
},
);
texture
}
```
### Mipmap Generation (Compute Shader)
```rust
// Compute shader generates mipmaps by taking min/max of 4 parent samples
// Each mip level is 4x smaller than the previous level
fn generate_mipmaps(
device: &wgpu::Device,
queue: &wgpu::Queue,
texture: &wgpu::Texture,
base_width: u32,
base_height: u32,
mip_count: u32,
base_sample_count: u32,
) -> Vec<wgpu::CommandBuffer> {
if mip_count <= 1 {
return Vec::new();
}
let mut encoder = device.create_command_encoder(&Default::default());
let mut src_width = base_width;
let mut src_height = base_height;
let mut src_sample_count = base_sample_count;
for level in 1..mip_count {
// Dimensions halve (2x2 texels -> 1 texel)
let dst_width = (src_width / 2).max(1);
let dst_height = (src_height / 2).max(1);
// But sample count reduces by 4x (4 samples -> 1)
let dst_sample_count = (src_sample_count + 3) / 4;
let src_view = texture.create_view(&wgpu::TextureViewDescriptor {
base_mip_level: level - 1,
mip_level_count: Some(1),
..Default::default()
});
let dst_view = texture.create_view(&wgpu::TextureViewDescriptor {
base_mip_level: level,
mip_level_count: Some(1),
..Default::default()
});
let params = MipgenParams {
src_width,
dst_width,
src_sample_count,
_pad: 0,
};
let params_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
contents: bytemuck::cast_slice(&[params]),
usage: wgpu::BufferUsages::UNIFORM,
});
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &mipgen_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&src_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&dst_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: params_buffer.as_entire_binding(),
},
],
});
// Dispatch compute shader
let total_dst_texels = dst_width * dst_height;
let workgroup_count = (total_dst_texels + 63) / 64;
let mut pass = encoder.begin_compute_pass(&Default::default());
pass.set_pipeline(&mipgen_pipeline);
pass.set_bind_group(0, &bind_group, &[]);
pass.dispatch_workgroups(workgroup_count, 1, 1);
drop(pass);
src_width = dst_width;
src_height = dst_height;
src_sample_count = dst_sample_count;
}
vec![encoder.finish()]
}
```
## WGSL Shaders
### Waveform Render Shader
```wgsl
// waveform.wgsl
struct WaveformParams {
view_matrix: mat4x4<f32>, // 64 bytes
viewport_size: vec2<f32>, // 8 bytes
zoom: f32, // 4 bytes
_pad1: f32, // 4 bytes (padding)
tint_color: vec4<f32>, // 16 bytes (requires 16-byte alignment)
// Total: 96 bytes
}
@group(0) @binding(0) var<uniform> params: WaveformParams;
@group(0) @binding(1) var waveform_texture: texture_1d<f32>;
@group(0) @binding(2) var waveform_sampler: sampler;
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
}
@vertex
fn vs_main(@builtin(vertex_index) vertex_index: u32) -> VertexOutput {
// Generate fullscreen quad
var positions = array<vec2<f32>, 6>(
vec2(-1.0, -1.0),
vec2( 1.0, -1.0),
vec2( 1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2(-1.0, 1.0),
);
var output: VertexOutput;
output.position = vec4(positions[vertex_index], 0.0, 1.0);
output.uv = (positions[vertex_index] + 1.0) * 0.5;
return output;
}
@fragment
fn fs_main(input: VertexOutput) -> @location(0) vec4<f32> {
// Sample waveform texture
let sample_pos = input.uv.x;
let waveform = textureSample(waveform_texture, waveform_sampler, sample_pos);
// waveform.r = min amplitude, waveform.g = max amplitude
let min_amp = waveform.r;
let max_amp = waveform.g;
// Map amplitude to vertical position
let center_y = 0.5;
let min_y = center_y - min_amp * 0.5;
let max_y = center_y + max_amp * 0.5;
// Check if pixel is within waveform range
if (input.uv.y >= min_y && input.uv.y <= max_y) {
return params.tint_color;
} else {
return vec4(0.0, 0.0, 0.0, 0.0); // Transparent
}
}
```
### Mipmap Generation Shader
```wgsl
// waveform_mipgen.wgsl
struct MipgenParams {
src_width: u32,
dst_width: u32,
src_sample_count: u32,
}
@group(0) @binding(0) var src_texture: texture_2d<f32>;
@group(0) @binding(1) var dst_texture: texture_storage_2d<rgba16float, write>;
@group(0) @binding(2) var<uniform> params: MipgenParams;
@compute @workgroup_size(64)
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
let linear_index = global_id.x;
// Convert linear index to 2D coordinates
let dst_x = linear_index % params.dst_width;
let dst_y = linear_index / params.dst_width;
// Each dst texel corresponds to 4 src samples (not 4 src texels)
// But 2D texture layout halves in each dimension
let src_x = dst_x * 2u;
let src_y = dst_y * 2u;
// Sample 4 texels from parent level (2x2 block)
let s00 = textureLoad(src_texture, vec2<i32>(i32(src_x), i32(src_y)), 0);
let s10 = textureLoad(src_texture, vec2<i32>(i32(src_x + 1u), i32(src_y)), 0);
let s01 = textureLoad(src_texture, vec2<i32>(i32(src_x), i32(src_y + 1u)), 0);
let s11 = textureLoad(src_texture, vec2<i32>(i32(src_x + 1u), i32(src_y + 1u)), 0);
// Compute min/max across all 4 samples for each channel
let left_min = min(min(s00.r, s10.r), min(s01.r, s11.r));
let left_max = max(max(s00.g, s10.g), max(s01.g, s11.g));
let right_min = min(min(s00.b, s10.b), min(s01.b, s11.b));
let right_max = max(max(s00.a, s10.a), max(s01.a, s11.a));
// Write to destination mip level
textureStore(dst_texture, vec2<i32>(i32(dst_x), i32(dst_y)),
vec4(left_min, left_max, right_min, right_max));
}
```
## Uniform Buffer Alignment
WGSL has strict alignment requirements. The most common issue is `vec4<f32>` requiring 16-byte alignment.
### Alignment Rules
```rust
// ❌ Bad: tint_color not aligned to 16 bytes
#[repr(C)]
struct WaveformParams {
view_matrix: [f32; 16], // 64 bytes (offset 0)
viewport_size: [f32; 2], // 8 bytes (offset 64)
zoom: f32, // 4 bytes (offset 72)
tint_color: [f32; 4], // 16 bytes (offset 76) ❌ Not 16-byte aligned!
}
// ✅ Good: explicit padding for alignment
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct WaveformParams {
view_matrix: [f32; 16], // 64 bytes (offset 0)
viewport_size: [f32; 2], // 8 bytes (offset 64)
zoom: f32, // 4 bytes (offset 72)
_pad1: f32, // 4 bytes (offset 76) - padding
tint_color: [f32; 4], // 16 bytes (offset 80) ✅ 16-byte aligned!
}
// Total size: 96 bytes
```
### Common Alignment Requirements
| WGSL Type | Size | Alignment |
|-----------|------|-----------|
| `f32` | 4 bytes | 4 bytes |
| `vec2<f32>` | 8 bytes | 8 bytes |
| `vec3<f32>` | 12 bytes | 16 bytes ⚠️ |
| `vec4<f32>` | 16 bytes | 16 bytes |
| `mat4x4<f32>` | 64 bytes | 16 bytes |
| Struct | Sum of members | 16 bytes (uniform buffers) |
### Debug Alignment Issues
```rust
// Use static_assertions to catch alignment bugs at compile time
use static_assertions::const_assert_eq;
const_assert_eq!(std::mem::size_of::<WaveformParams>(), 96);
const_assert_eq!(std::mem::align_of::<WaveformParams>(), 16);
// Runtime validation
fn validate_uniform_buffer<T: bytemuck::Pod>(data: &T) {
let size = std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
assert!(size % 16 == 0, "Uniform buffer size must be multiple of 16");
assert!(align >= 16, "Uniform buffer must be 16-byte aligned");
}
```
## Custom wgpu Integration
### egui-wgpu Callback Pattern
```rust
use egui_wgpu::CallbackTrait;
struct CustomRenderCallback {
// Data needed for rendering
scene: Scene,
params: UniformData,
}
impl CallbackTrait for CustomRenderCallback {
fn prepare(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
_screen_descriptor: &egui_wgpu::ScreenDescriptor,
_encoder: &mut wgpu::CommandEncoder,
resources: &mut egui_wgpu::CallbackResources,
) -> Vec<wgpu::CommandBuffer> {
// Update GPU resources (buffers, textures, etc.)
// This runs before rendering
// Get or create renderer
let renderer: &mut MyRenderer = resources.get_or_insert_with(|| {
MyRenderer::new(device)
});
// Update uniform buffer
queue.write_buffer(&renderer.uniform_buffer, 0, bytemuck::bytes_of(&self.params));
vec![] // Return additional command buffers if needed
}
fn paint<'a>(
&'a self,
_info: egui::PaintCallbackInfo,
render_pass: &mut wgpu::RenderPass<'a>,
resources: &'a egui_wgpu::CallbackResources,
) {
// Actual rendering
let renderer: &MyRenderer = resources.get().unwrap();
render_pass.set_pipeline(&renderer.pipeline);
render_pass.set_bind_group(0, &renderer.bind_group, &[]);
render_pass.draw(0..6, 0..1); // Draw fullscreen quad
}
}
```
### Registering Callback in egui
```rust
// In Stage pane render method
let callback = egui_wgpu::Callback::new_paint_callback(
rect,
CustomRenderCallback {
scene: self.build_scene(document),
params: self.compute_params(),
},
);
ui.painter().add(callback);
```
## Performance Optimization
### Minimize GPU↔CPU Transfer
```rust
// ❌ Bad: Update uniform buffer every frame
for frame in frames {
queue.write_buffer(&uniform_buffer, 0, &params);
render();
}
// ✅ Good: Only update when changed
if params_changed {
queue.write_buffer(&uniform_buffer, 0, &params);
}
render();
```
### Reuse GPU Resources
```rust
// ✅ Good: Reuse textures and buffers
struct WaveformCache {
textures: HashMap<Uuid, wgpu::Texture>,
}
impl WaveformCache {
fn get_or_create(&mut self, clip_id: Uuid, audio_data: &[f32]) -> &wgpu::Texture {
self.textures.entry(clip_id).or_insert_with(|| {
generate_waveform_texture(device, queue, audio_data)
})
}
}
```
### Batch Draw Calls
```rust
// ❌ Bad: One draw call per shape
for shape in shapes {
render_pass.set_bind_group(0, &shape.bind_group, &[]);
render_pass.draw(0..shape.vertex_count, 0..1);
}
// ✅ Good: Batch into single draw call
let batched_vertices = batch_shapes(shapes);
render_pass.set_bind_group(0, &batched_bind_group, &[]);
render_pass.draw(0..batched_vertices.len(), 0..1);
```
### Use Mipmaps for Zooming
```rust
// ✅ Good: Select appropriate mip level based on zoom
let mip_level = ((1.0 / zoom).log2().floor() as u32).min(max_mip_level);
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor {
base_mip_level: mip_level,
mip_level_count: Some(1),
..Default::default()
});
```
## Debugging Rendering Issues
### Enable wgpu Validation
```rust
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
dx12_shader_compiler: Default::default(),
flags: wgpu::InstanceFlags::validation(), // Enable validation
gles_minor_version: wgpu::Gles3MinorVersion::Automatic,
});
```
### Check for Errors
```rust
// Set error handler
device.on_uncaptured_error(Box::new(|error| {
eprintln!("wgpu error: {:?}", error);
}));
```
### Capture GPU Frame
**Linux** (RenderDoc):
```bash
renderdoccmd capture ./lightningbeam-editor
```
**macOS** (Xcode):
- Run with GPU Frame Capture enabled
- Trigger capture with Cmd+Option+G
### Common Issues
#### Black Screen
- Check that vertex shader outputs correct clip-space coordinates
- Verify texture bindings are correct
- Check that render pipeline format matches surface format
#### Validation Errors
- Check uniform buffer alignment (see [Uniform Buffer Alignment](#uniform-buffer-alignment))
- Verify texture formats match shader expectations
- Ensure bind groups match pipeline layout
#### Performance Issues
- Use GPU profiler (RenderDoc, Xcode)
- Check for redundant buffer uploads
- Profile shader performance
- Reduce draw call count via batching
## Related Documentation
- [ARCHITECTURE.md](../ARCHITECTURE.md) - Overall system architecture
- [docs/UI_SYSTEM.md](UI_SYSTEM.md) - UI and pane integration
- [CONTRIBUTING.md](../CONTRIBUTING.md) - Development workflow

848
docs/UI_SYSTEM.md Normal file
View File

@ -0,0 +1,848 @@
# UI System Architecture
This document describes Lightningbeam's UI architecture, including the pane system, tool system, GPU integration, and patterns for extending the UI with new features.
## Table of Contents
- [Overview](#overview)
- [Pane System](#pane-system)
- [Shared State](#shared-state)
- [Two-Phase Dispatch](#two-phase-dispatch)
- [ID Collision Avoidance](#id-collision-avoidance)
- [Tool System](#tool-system)
- [GPU Integration](#gpu-integration)
- [Adding New Panes](#adding-new-panes)
- [Adding New Tools](#adding-new-tools)
- [Event Handling](#event-handling)
- [Best Practices](#best-practices)
## Overview
Lightningbeam's UI is built with **egui**, an immediate-mode GUI framework. Unlike retained-mode frameworks (Qt, GTK), immediate-mode rebuilds the UI every frame by running code that describes what should be displayed.
### Key Technologies
- **egui 0.33.3**: Immediate-mode GUI framework
- **eframe**: Application framework wrapping egui
- **winit**: Cross-platform windowing
- **Vello**: GPU-accelerated 2D vector rendering
- **wgpu**: Low-level GPU API
- **egui-wgpu**: Integration layer between egui and wgpu
### Immediate Mode Overview
```rust
// Immediate mode: UI is described every frame
fn render(&mut self, ui: &mut egui::Ui) {
if ui.button("Click me").clicked() {
self.counter += 1;
}
ui.label(format!("Count: {}", self.counter));
}
```
**Benefits**:
- Simple mental model (just describe what you see)
- No manual synchronization between state and UI
- Easy to compose and reuse components
**Considerations**:
- Must avoid expensive operations in render code
- IDs needed for stateful widgets (handled automatically in most cases)
## Pane System
Lightningbeam uses a flexible pane system where the UI is composed of independent, reusable panes (Stage, Timeline, Asset Library, etc.).
### Pane Architecture
```
┌─────────────────────────────────────────────────────────┐
│ Main Application │
│ (LightningbeamApp) │
├─────────────────────────────────────────────────────────┤
│ │
│ ┌────────────────────────────────────────────────┐ │
│ │ Pane Tree (egui_tiles) │ │
│ │ │ │
│ │ ┌──────────┐ ┌──────────┐ ┌──────────┐ │ │
│ │ │ Stage │ │ Timeline │ │ Asset │ │ │
│ │ │ Pane │ │ Pane │ │ Library │ │ │
│ │ └──────────┘ └──────────┘ └──────────┘ │ │
│ │ │ │
│ │ Each pane: │ │
│ │ - Renders its UI │ │
│ │ - Registers actions with SharedPaneState │ │
│ │ - Accesses shared document state │ │
│ └────────────────────────────────────────────────┘ │
│ │
│ ┌────────────────────────────────────────────────┐ │
│ │ SharedPaneState │ │
│ │ - Document │ │
│ │ - Selected tool │ │
│ │ - Pending actions │ │
│ │ - Audio system │ │
│ └────────────────────────────────────────────────┘ │
│ │
│ After all panes render: │
│ - Execute pending actions │
│ - Update undo/redo stacks │
│ - Synchronize with audio engine │
│ │
└─────────────────────────────────────────────────────────┘
```
### PaneInstance Enum
All panes are variants of the `PaneInstance` enum:
```rust
// In lightningbeam-editor/src/panes/mod.rs
pub enum PaneInstance {
Stage(Stage),
Timeline(Timeline),
AssetLibrary(AssetLibrary),
InfoPanel(InfoPanel),
VirtualPiano(VirtualPiano),
Toolbar(Toolbar),
NodeEditor(NodeEditor),
PianoRoll(PianoRoll),
Outliner(Outliner),
PresetBrowser(PresetBrowser),
}
impl PaneInstance {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
match self {
PaneInstance::Stage(stage) => stage.render(ui, shared_state),
PaneInstance::Timeline(timeline) => timeline.render(ui, shared_state),
PaneInstance::AssetLibrary(lib) => lib.render(ui, shared_state),
// ... dispatch to specific pane
}
}
pub fn title(&self) -> &str {
match self {
PaneInstance::Stage(_) => "Stage",
PaneInstance::Timeline(_) => "Timeline",
// ...
}
}
}
```
### Individual Pane Structure
Each pane is a struct with its own state and a `render` method:
```rust
pub struct MyPane {
// Pane-specific state
scroll_offset: f32,
selected_item: Option<usize>,
// ... other state
}
impl MyPane {
pub fn new() -> Self {
Self {
scroll_offset: 0.0,
selected_item: None,
}
}
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
// Render pane UI
ui.heading("My Pane");
// Access shared state
let document = &shared_state.document;
// Create actions
if ui.button("Do something").clicked() {
let action = Box::new(MyAction { /* ... */ });
shared_state.pending_actions.push(action);
}
}
}
```
### Key Panes
Located in `lightningbeam-editor/src/panes/`:
- **stage.rs** (214KB): Main canvas for drawing and transform tools
- **timeline.rs** (84KB): Multi-track timeline with clip editing
- **asset_library.rs** (70KB): Asset browser with drag-to-timeline
- **infopanel.rs** (31KB): Context-sensitive property editor
- **virtual_piano.rs** (31KB): On-screen MIDI keyboard
- **toolbar.rs** (9KB): Tool palette
## Shared State
`SharedPaneState` is passed to all panes during rendering to share data and coordinate actions.
### SharedPaneState Structure
```rust
pub struct SharedPaneState {
// Document state
pub document: Document,
pub undo_stack: Vec<Box<dyn Action>>,
pub redo_stack: Vec<Box<dyn Action>>,
// Tool state
pub selected_tool: Tool,
pub tool_state: ToolState,
// Actions to execute after rendering
pub pending_actions: Vec<Box<dyn Action>>,
// Audio engine
pub audio_system: AudioSystem,
pub playhead_position: f64,
pub is_playing: bool,
// Selection state
pub selected_clips: HashSet<Uuid>,
pub selected_shapes: HashSet<Uuid>,
// Clipboard
pub clipboard: Option<ClipboardData>,
// UI state
pub show_grid: bool,
pub snap_to_grid: bool,
pub grid_size: f32,
}
```
### Accessing Shared State
```rust
impl MyPane {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
// Read from document
let layer_count = shared_state.document.layers.len();
ui.label(format!("Layers: {}", layer_count));
// Check tool state
if shared_state.selected_tool == Tool::Select {
// ... render selection-specific UI
}
// Check playback state
if shared_state.is_playing {
ui.label("▶ Playing");
}
}
}
```
## Two-Phase Dispatch
Panes cannot directly mutate shared state during rendering due to Rust's borrowing rules. Instead, they register actions to be executed after all panes have rendered.
### Why Two-Phase?
```rust
// This doesn't work: can't borrow shared_state as mutable twice
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
if ui.button("Add layer").clicked() {
// ❌ Can't mutate document while borrowed by render
shared_state.document.layers.push(Layer::new());
}
}
```
### Solution: Pending Actions
```rust
// Phase 1: Register action during render
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
if ui.button("Add layer").clicked() {
let action = Box::new(AddLayerAction::new());
shared_state.pending_actions.push(action);
}
}
// Phase 2: Execute after all panes rendered (in main app)
for action in shared_state.pending_actions.drain(..) {
action.apply(&mut shared_state.document);
shared_state.undo_stack.push(action);
}
```
### Action Trait
All actions implement the `Action` trait:
```rust
pub trait Action: Send {
fn apply(&mut self, document: &mut Document);
fn undo(&mut self, document: &mut Document);
fn redo(&mut self, document: &mut Document);
}
```
Example action:
```rust
pub struct AddLayerAction {
layer_id: Uuid,
layer_type: LayerType,
}
impl Action for AddLayerAction {
fn apply(&mut self, document: &mut Document) {
let layer = Layer::new(self.layer_id, self.layer_type);
document.layers.push(layer);
}
fn undo(&mut self, document: &mut Document) {
document.layers.retain(|l| l.id != self.layer_id);
}
fn redo(&mut self, document: &mut Document) {
self.apply(document);
}
}
```
## ID Collision Avoidance
egui uses IDs to track widget state across frames (e.g., scroll position, collapse state). When multiple instances of the same pane exist, IDs can collide.
### The Problem
```rust
// If two Timeline panes exist, they'll share the same ID
ui.collapsing("Track 1", |ui| {
// ... content
}); // ID is derived from label "Track 1"
```
Both timeline instances would have the same "Track 1" ID, causing state conflicts.
### Solution: Salt IDs with Node Path
Each pane has a unique node path (e.g., `"root/0/1/2"`). Salt all IDs with this path:
```rust
pub struct Timeline {
node_path: String, // Unique path for this pane instance
}
impl Timeline {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
// Salt IDs with node path
ui.push_id(&self.node_path, |ui| {
// Now all IDs within this closure are unique to this instance
ui.collapsing("Track 1", |ui| {
// ... content
});
});
}
}
```
### Alternative: Per-Widget Salting
For individual widgets:
```rust
ui.collapsing("Track 1", |ui| {
// ... content
}).id.with(&self.node_path); // Salt this specific ID
```
### Best Practice
**Always salt IDs in new panes** to support multiple instances:
```rust
impl NewPane {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
ui.push_id(&self.node_path, |ui| {
// All rendering code goes here
});
}
}
```
## Tool System
Tools handle user input on the Stage pane (drawing, selection, transforms, etc.).
### Tool Enum
```rust
pub enum Tool {
Select,
Draw,
Rectangle,
Ellipse,
Line,
PaintBucket,
Transform,
Eyedropper,
}
```
### Tool State
```rust
pub struct ToolState {
// Generic tool state
pub mouse_pos: Pos2,
pub mouse_down: bool,
pub drag_start: Option<Pos2>,
// Tool-specific state
pub draw_points: Vec<Pos2>,
pub transform_mode: TransformMode,
pub paint_bucket_tolerance: f32,
}
```
### Tool Implementation
Tools implement the `ToolBehavior` trait:
```rust
pub trait ToolBehavior {
fn on_mouse_down(&mut self, pos: Pos2, shared_state: &mut SharedPaneState);
fn on_mouse_move(&mut self, pos: Pos2, shared_state: &mut SharedPaneState);
fn on_mouse_up(&mut self, pos: Pos2, shared_state: &mut SharedPaneState);
fn on_key(&mut self, key: Key, shared_state: &mut SharedPaneState);
fn render_overlay(&self, painter: &Painter);
}
```
Example: Rectangle tool:
```rust
pub struct RectangleTool {
start_pos: Option<Pos2>,
}
impl ToolBehavior for RectangleTool {
fn on_mouse_down(&mut self, pos: Pos2, _shared_state: &mut SharedPaneState) {
self.start_pos = Some(pos);
}
fn on_mouse_move(&mut self, pos: Pos2, _shared_state: &mut SharedPaneState) {
// Visual feedback handled in render_overlay
}
fn on_mouse_up(&mut self, pos: Pos2, shared_state: &mut SharedPaneState) {
if let Some(start) = self.start_pos.take() {
// Create rectangle shape
let rect = Rect::from_two_pos(start, pos);
let action = Box::new(AddShapeAction::rectangle(rect));
shared_state.pending_actions.push(action);
}
}
fn render_overlay(&self, painter: &Painter) {
if let Some(start) = self.start_pos {
let current = painter.mouse_pos();
let rect = Rect::from_two_pos(start, current);
painter.rect_stroke(rect, 0.0, Stroke::new(2.0, Color32::WHITE));
}
}
}
```
### Tool Selection
```rust
// In Toolbar pane
if ui.button("✏ Draw").clicked() {
shared_state.selected_tool = Tool::Draw;
}
// In Stage pane
match shared_state.selected_tool {
Tool::Draw => self.draw_tool.on_mouse_move(pos, shared_state),
Tool::Select => self.select_tool.on_mouse_move(pos, shared_state),
// ...
}
```
## GPU Integration
The Stage pane uses custom wgpu rendering for vector graphics and waveforms.
### egui-wgpu Callbacks
```rust
// In Stage::render()
ui.painter().add(egui_wgpu::Callback::new_paint_callback(
rect,
StageCallback {
document: shared_state.document.clone(),
vello_renderer: self.vello_renderer.clone(),
waveform_renderer: self.waveform_renderer.clone(),
},
));
```
### Callback Implementation
```rust
struct StageCallback {
document: Document,
vello_renderer: Arc<Mutex<VelloRenderer>>,
waveform_renderer: Arc<Mutex<WaveformRenderer>>,
}
impl egui_wgpu::CallbackTrait for StageCallback {
fn prepare(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
resources: &egui_wgpu::CallbackResources,
) -> Vec<wgpu::CommandBuffer> {
// Prepare GPU resources
let mut vello = self.vello_renderer.lock().unwrap();
vello.prepare_scene(&self.document);
vec![]
}
fn paint<'a>(
&'a self,
info: egui::PaintCallbackInfo,
render_pass: &mut wgpu::RenderPass<'a>,
resources: &'a egui_wgpu::CallbackResources,
) {
// Render vector graphics
let vello = self.vello_renderer.lock().unwrap();
vello.render(render_pass);
// Render waveforms
let waveforms = self.waveform_renderer.lock().unwrap();
waveforms.render(render_pass);
}
}
```
### Vello Integration
Vello renders 2D vector graphics using GPU compute shaders:
```rust
use vello::{Scene, SceneBuilder, kurbo};
fn build_vello_scene(document: &Document) -> Scene {
let mut scene = Scene::new();
let mut builder = SceneBuilder::for_scene(&mut scene);
for layer in &document.layers {
if let Layer::VectorLayer { clips, .. } = layer {
for clip in clips {
for shape in &clip.shapes {
// Convert shape to kurbo path
let path = shape.to_kurbo_path();
// Add to scene with fill/stroke
builder.fill(
Fill::NonZero,
Affine::IDENTITY,
&shape.fill_color,
None,
&path,
);
}
}
}
}
scene
}
```
## Adding New Panes
### Step 1: Create Pane Struct
```rust
// In lightningbeam-editor/src/panes/my_pane.rs
pub struct MyPane {
node_path: String,
// Pane-specific state
selected_index: usize,
scroll_offset: f32,
}
impl MyPane {
pub fn new(node_path: String) -> Self {
Self {
node_path,
selected_index: 0,
scroll_offset: 0.0,
}
}
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
// IMPORTANT: Salt IDs with node path
ui.push_id(&self.node_path, |ui| {
ui.heading("My Pane");
// Render pane content
// ...
});
}
}
```
### Step 2: Add to PaneInstance Enum
```rust
// In lightningbeam-editor/src/panes/mod.rs
pub enum PaneInstance {
// ... existing variants
MyPane(MyPane),
}
impl PaneInstance {
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
match self {
// ... existing cases
PaneInstance::MyPane(pane) => pane.render(ui, shared_state),
}
}
pub fn title(&self) -> &str {
match self {
// ... existing cases
PaneInstance::MyPane(_) => "My Pane",
}
}
}
```
### Step 3: Add to Menu
```rust
// In main application
if ui.button("My Pane").clicked() {
let pane = PaneInstance::MyPane(MyPane::new(generate_node_path()));
app.add_pane(pane);
}
```
## Adding New Tools
### Step 1: Add to Tool Enum
```rust
pub enum Tool {
// ... existing tools
MyTool,
}
```
### Step 2: Implement Tool Behavior
```rust
pub struct MyToolState {
// Tool-specific state
start_pos: Option<Pos2>,
}
impl MyToolState {
pub fn handle_input(
&mut self,
response: &Response,
shared_state: &mut SharedPaneState,
) {
if response.clicked() {
self.start_pos = response.interact_pointer_pos();
}
if response.drag_released() {
if let Some(start) = self.start_pos.take() {
// Create action
let action = Box::new(MyAction { /* ... */ });
shared_state.pending_actions.push(action);
}
}
}
pub fn render_overlay(&self, painter: &Painter) {
// Draw tool-specific overlay
}
}
```
### Step 3: Add to Toolbar
```rust
// In Toolbar pane
if ui.button("🔧 My Tool").clicked() {
shared_state.selected_tool = Tool::MyTool;
}
```
### Step 4: Handle in Stage Pane
```rust
// In Stage pane
match shared_state.selected_tool {
// ... existing tools
Tool::MyTool => self.my_tool_state.handle_input(&response, shared_state),
}
// Render overlay
match shared_state.selected_tool {
// ... existing tools
Tool::MyTool => self.my_tool_state.render_overlay(&painter),
}
```
## Event Handling
### Mouse Events
```rust
let response = ui.allocate_rect(rect, Sense::click_and_drag());
if response.clicked() {
let pos = response.interact_pointer_pos().unwrap();
// Handle click at pos
}
if response.dragged() {
let delta = response.drag_delta();
// Handle drag by delta
}
if response.drag_released() {
// Handle drag end
}
```
### Keyboard Events
```rust
ui.input(|i| {
if i.key_pressed(Key::Delete) {
// Delete selected items
}
if i.modifiers.ctrl && i.key_pressed(Key::Z) {
// Undo
}
if i.modifiers.ctrl && i.key_pressed(Key::Y) {
// Redo
}
});
```
### Drag and Drop
```rust
// Source (Asset Library)
let response = ui.label("Audio Clip");
if response.dragged() {
let payload = DragPayload::AudioClip(clip_id);
ui.memory_mut(|mem| {
mem.data.insert_temp(Id::new("drag_payload"), payload);
});
}
// Target (Timeline)
let response = ui.allocate_rect(rect, Sense::hover());
if response.hovered() {
if let Some(payload) = ui.memory(|mem| mem.data.get_temp::<DragPayload>(Id::new("drag_payload"))) {
// Handle drop
let action = Box::new(AddClipAction { clip_id: payload.clip_id(), position });
shared_state.pending_actions.push(action);
}
}
```
## Best Practices
### 1. Always Salt IDs
```rust
// ✅ Good
ui.push_id(&self.node_path, |ui| {
// All rendering here
});
// ❌ Bad (ID collisions if multiple instances)
ui.collapsing("Settings", |ui| {
// ...
});
```
### 2. Use Pending Actions
```rust
// ✅ Good
shared_state.pending_actions.push(Box::new(action));
// ❌ Bad (borrowing conflicts)
shared_state.document.layers.push(layer);
```
### 3. Split Borrows with std::mem::take
```rust
// ✅ Good
let mut clips = std::mem::take(&mut self.clips);
for clip in &mut clips {
self.render_clip(ui, clip); // Can borrow self immutably
}
self.clips = clips;
// ❌ Bad (can't borrow self while iterating clips)
for clip in &mut self.clips {
self.render_clip(ui, clip); // Error!
}
```
### 4. Avoid Expensive Operations in Render
```rust
// ❌ Bad (heavy computation every frame)
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
let thumbnail = self.generate_thumbnail(); // Expensive!
ui.image(thumbnail);
}
// ✅ Good (cache result)
pub fn render(&mut self, ui: &mut Ui, shared_state: &mut SharedPaneState) {
if self.thumbnail_cache.is_none() {
self.thumbnail_cache = Some(self.generate_thumbnail());
}
ui.image(self.thumbnail_cache.as_ref().unwrap());
}
```
### 5. Handle Missing State Gracefully
```rust
// ✅ Good
if let Some(layer) = document.layers.get(layer_index) {
// Render layer
} else {
ui.label("Layer not found");
}
// ❌ Bad (panics if layer missing)
let layer = &document.layers[layer_index]; // May panic!
```
## Related Documentation
- [ARCHITECTURE.md](../ARCHITECTURE.md) - Overall system architecture
- [docs/AUDIO_SYSTEM.md](AUDIO_SYSTEM.md) - Audio engine integration
- [docs/RENDERING.md](RENDERING.md) - GPU rendering details
- [CONTRIBUTING.md](../CONTRIBUTING.md) - Development workflow

17
lightningbeam-ui/.gitignore vendored Normal file
View File

@ -0,0 +1,17 @@
# Rust build artifacts
/target/
**/target/
# Cargo.lock for applications (keep for libraries)
# We'll keep it since this is an application
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db

8276
lightningbeam-ui/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,89 @@
[workspace]
resolver = "2"
members = [
"lightningbeam-editor",
"lightningbeam-core",
"beamdsp",
]
[workspace.dependencies]
# UI Framework (using eframe for simplified integration)
# Note: Upgraded from 0.29 to 0.31 to fix Linux IME/keyboard input issues
# See: https://github.com/emilk/egui/pull/5198
# Upgraded to 0.33 for shader editor (egui_code_editor) and continued bug fixes
egui = "0.33.3"
eframe = { version = "0.33.3", default-features = true, features = ["wgpu"] }
egui_extras = { version = "0.33.3", features = ["image", "svg", "syntect"] }
egui-wgpu = "0.33.3"
egui_code_editor = "0.2"
# GPU Rendering
# vello from git uses wgpu 27, matching eframe 0.33
vello = { git = "https://github.com/linebender/vello", branch = "main" }
wgpu = { version = "27", features = ["vulkan", "metal", "gles"] }
kurbo = { version = "0.12", features = ["serde"] }
peniko = "0.5"
# Windowing
winit = "0.30"
# Native menus
muda = "0.15"
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
# Image loading
image = "0.25"
resvg = "0.42"
# Utilities
pollster = "0.3"
# Desktop notifications
notify-rust = "4.11"
# Optimize the audio backend even in debug builds — the audio callback
# runs on a real-time thread with ~1.5ms deadlines at small buffer sizes,
# so it cannot tolerate unoptimized code.
[profile.dev.package.daw-backend]
opt-level = 2
[profile.dev.package.nam-ffi]
opt-level = 2
[profile.dev.package.beamdsp]
opt-level = 2
# Also optimize symphonia (audio decoder) and cpal (audio I/O) — these
# run in the audio callback path and are heavily numeric.
[profile.dev.package.symphonia]
opt-level = 2
[profile.dev.package.symphonia-core]
opt-level = 2
[profile.dev.package.symphonia-bundle-mp3]
opt-level = 2
[profile.dev.package.symphonia-bundle-flac]
opt-level = 2
[profile.dev.package.symphonia-format-ogg]
opt-level = 2
[profile.dev.package.symphonia-codec-vorbis]
opt-level = 2
[profile.dev.package.symphonia-codec-aac]
opt-level = 2
[profile.dev.package.symphonia-format-isomp4]
opt-level = 2
[profile.dev.package.cpal]
opt-level = 2
# Use local egui fork with ibus/Wayland text input fix
[patch.crates-io]
egui = { path = "../../egui-fork/crates/egui" }
eframe = { path = "../../egui-fork/crates/eframe" }
egui_extras = { path = "../../egui-fork/crates/egui_extras" }
egui-wgpu = { path = "../../egui-fork/crates/egui-wgpu" }
egui-winit = { path = "../../egui-fork/crates/egui-winit" }
epaint = { path = "../../egui-fork/crates/epaint" }
ecolor = { path = "../../egui-fork/crates/ecolor" }
emath = { path = "../../egui-fork/crates/emath" }

View File

@ -0,0 +1,613 @@
# BeamDSP Language Reference
BeamDSP is a domain-specific language for writing audio processing scripts in Lightningbeam. Scripts are compiled to bytecode and run on the real-time audio thread with guaranteed bounded execution time and constant memory usage.
## Quick Start
```
name "Simple Gain"
category effect
inputs {
audio_in: audio
}
outputs {
audio_out: audio
}
params {
gain: 1.0 [0.0, 2.0] ""
}
process {
for i in 0..buffer_size {
audio_out[i * 2] = audio_in[i * 2] * gain;
audio_out[i * 2 + 1] = audio_in[i * 2 + 1] * gain;
}
}
```
Save this as a `.bdsp` file or create it directly in the Script Editor pane.
## Script Structure
A BeamDSP script is composed of **header blocks** followed by a **process block**. All blocks are optional except `name`, `category`, and `process`.
```
name "Display Name"
category effect|generator|utility
inputs { ... }
outputs { ... }
params { ... }
state { ... }
ui { ... }
process { ... }
```
### name
```
name "My Effect"
```
Sets the display name shown in the node graph.
### category
```
category effect
```
One of:
- **`effect`** — Processes audio (has inputs and outputs)
- **`generator`** — Produces audio or CV (outputs only, no audio inputs)
- **`utility`** — Signal routing, mixing, or other utility functions
### inputs
Declares input ports. Each input has a name and signal type.
```
inputs {
audio_in: audio
mod_signal: cv
}
```
Signal types:
- **`audio`** — Stereo interleaved audio (2 samples per frame: left, right)
- **`cv`** — Mono control voltage (1 sample per frame, NaN when unconnected)
### outputs
Declares output ports. Same syntax as inputs.
```
outputs {
audio_out: audio
env_out: cv
}
```
### params
Declares user-adjustable parameters. Each parameter has a default value, range, and unit string.
```
params {
frequency: 440.0 [20.0, 20000.0] "Hz"
gain: 1.0 [0.0, 2.0] ""
mix: 0.5 [0.0, 1.0] ""
}
```
Format: `name: default [min, max] "unit"`
Parameters appear as sliders in the node's UI. They are read-only inside the `process` block.
### state
Declares persistent variables that survive across process calls. State is zero-initialized and can be reset.
```
state {
phase: f32
counter: int
active: bool
buffer: [44100]f32
indices: [16]int
clip: sample
}
```
Types:
| Type | Description |
|------|-------------|
| `f32` | 32-bit float |
| `int` | 32-bit signed integer |
| `bool` | Boolean |
| `[N]f32` | Fixed-size float array (N is a constant) |
| `[N]int` | Fixed-size integer array (N is a constant) |
| `sample` | Loadable audio sample (stereo interleaved, read-only in process) |
State arrays are allocated once at compile time and never resized. The `sample` type holds audio data loaded through the node's UI.
### ui
Declares the layout of controls rendered below the node in the graph editor. If omitted, a default UI is generated with sliders for all parameters and pickers for all samples.
```
ui {
sample clip
param frequency
param gain
group "Mix" {
param mix
}
}
```
Elements:
| Element | Description |
|---------|-------------|
| `param name` | Slider for the named parameter |
| `sample name` | Audio clip picker for the named sample state variable |
| `group "label" { ... }` | Collapsible section containing child elements |
### process
The process block runs once per audio callback, processing all frames in the current buffer.
```
process {
for i in 0..buffer_size {
audio_out[i * 2] = audio_in[i * 2];
audio_out[i * 2 + 1] = audio_in[i * 2 + 1];
}
}
```
## Types
BeamDSP has three scalar types:
| Type | Description | Literal examples |
|------|-------------|-----------------|
| `f32` | 32-bit float | `1.0`, `0.5`, `3.14` |
| `int` | 32-bit signed integer | `0`, `42`, `256` |
| `bool` | Boolean | `true`, `false` |
Type conversions use cast syntax:
- `int(expr)` — Convert float to integer (truncates toward zero)
- `float(expr)` — Convert integer to float
Arithmetic between `int` and `f32` promotes the result to `f32`.
## Variables
### Local variables
```
let x = 1.0;
let mut counter = 0;
```
Use `let` to declare a local variable. Add `mut` to allow reassignment. Local variables exist only within the current block scope.
### Built-in variables
| Variable | Type | Description |
|----------|------|-------------|
| `sample_rate` | `int` | Audio sample rate in Hz (e.g., 44100) |
| `buffer_size` | `int` | Number of frames in the current buffer |
### Inputs and outputs
Input and output ports are accessed as arrays:
```
// Audio is stereo interleaved: [L0, R0, L1, R1, ...]
let left = audio_in[i * 2];
let right = audio_in[i * 2 + 1];
audio_out[i * 2] = left;
audio_out[i * 2 + 1] = right;
// CV is mono: one sample per frame
let mod_value = mod_in[i];
cv_out[i] = mod_value;
```
Input arrays are read-only. Output arrays are write-only.
### Parameters
Parameters are available as read-only `f32` variables:
```
audio_out[i * 2] = audio_in[i * 2] * gain;
```
### State variables
State scalars and arrays are mutable and persist across calls:
```
state {
phase: f32
buffer: [1024]f32
}
process {
phase = phase + 0.01;
buffer[0] = phase;
}
```
## Control Flow
### if / else
```
if phase >= 1.0 {
phase = phase - 1.0;
}
if value > threshold {
audio_out[i * 2] = 1.0;
} else {
audio_out[i * 2] = 0.0;
}
```
### for loops
For loops iterate from 0 to an upper bound (exclusive). The loop variable is an immutable `int`.
```
for i in 0..buffer_size {
// i goes from 0 to buffer_size - 1
}
for j in 0..len(buffer) {
buffer[j] = 0.0;
}
```
The upper bound must be an integer expression. Typically `buffer_size`, `len(array)`, or a constant.
There are no `while` loops, no recursion, and no user-defined functions. This is by design — it guarantees bounded execution time on the audio thread.
## Operators
### Arithmetic
| Operator | Description |
|----------|-------------|
| `+` | Addition |
| `-` | Subtraction (binary) or negation (unary) |
| `*` | Multiplication |
| `/` | Division |
| `%` | Modulo |
### Comparison
| Operator | Description |
|----------|-------------|
| `==` | Equal |
| `!=` | Not equal |
| `<` | Less than |
| `>` | Greater than |
| `<=` | Less than or equal |
| `>=` | Greater than or equal |
### Logical
| Operator | Description |
|----------|-------------|
| `&&` | Logical AND |
| `\|\|` | Logical OR |
| `!` | Logical NOT (unary) |
## Built-in Functions
### Trigonometric
| Function | Description |
|----------|-------------|
| `sin(x)` | Sine |
| `cos(x)` | Cosine |
| `tan(x)` | Tangent |
| `asin(x)` | Arc sine |
| `acos(x)` | Arc cosine |
| `atan(x)` | Arc tangent |
| `atan2(y, x)` | Two-argument arc tangent |
### Exponential
| Function | Description |
|----------|-------------|
| `exp(x)` | e^x |
| `log(x)` | Natural logarithm |
| `log2(x)` | Base-2 logarithm |
| `pow(x, y)` | x raised to power y |
| `sqrt(x)` | Square root |
### Rounding
| Function | Description |
|----------|-------------|
| `floor(x)` | Round toward negative infinity |
| `ceil(x)` | Round toward positive infinity |
| `round(x)` | Round to nearest integer |
| `trunc(x)` | Round toward zero |
| `fract(x)` | Fractional part (x - floor(x)) |
### Clamping and interpolation
| Function | Description |
|----------|-------------|
| `abs(x)` | Absolute value |
| `sign(x)` | Sign (-1.0, 0.0, or 1.0) |
| `min(x, y)` | Minimum of two values |
| `max(x, y)` | Maximum of two values |
| `clamp(x, lo, hi)` | Clamp x to [lo, hi] |
| `mix(a, b, t)` | Linear interpolation: a*(1-t) + b*t |
| `smoothstep(edge0, edge1, x)` | Hermite interpolation between 0 and 1 |
### Array
| Function | Description |
|----------|-------------|
| `len(array)` | Length of a state array (returns `int`) |
### CV
| Function | Description |
|----------|-------------|
| `cv_or(value, default)` | Returns `default` if `value` is NaN (unconnected CV), otherwise returns `value` |
### Sample
| Function | Description |
|----------|-------------|
| `sample_len(s)` | Number of frames in sample (0 if unloaded, returns `int`) |
| `sample_read(s, index)` | Read sample data at index (0.0 if out of bounds, returns `f32`) |
| `sample_rate_of(s)` | Original sample rate of the loaded audio (returns `int`) |
Sample data is stereo interleaved, so frame N has left at index `N*2` and right at `N*2+1`.
## Comments
```
// This is a line comment
let x = 1.0; // Inline comment
```
Line comments start with `//` and extend to the end of the line.
## Semicolons
Semicolons are **optional** statement terminators. You can use them or omit them.
```
let x = 1.0; // with semicolons
let y = 2.0
audio_out[0] = x + y
```
## Examples
### Stereo Delay
```
name "Stereo Delay"
category effect
inputs {
audio_in: audio
}
outputs {
audio_out: audio
}
params {
delay_time: 0.5 [0.01, 2.0] "s"
feedback: 0.3 [0.0, 0.95] ""
mix: 0.5 [0.0, 1.0] ""
}
state {
buffer: [88200]f32
write_pos: int
}
ui {
param delay_time
param feedback
param mix
}
process {
let delay_samples = int(delay_time * float(sample_rate)) * 2;
for i in 0..buffer_size {
let l = audio_in[i * 2];
let r = audio_in[i * 2 + 1];
let read_pos = (write_pos - delay_samples + len(buffer)) % len(buffer);
let dl = buffer[read_pos];
let dr = buffer[read_pos + 1];
buffer[write_pos] = l + dl * feedback;
buffer[write_pos + 1] = r + dr * feedback;
write_pos = (write_pos + 2) % len(buffer);
audio_out[i * 2] = l * (1.0 - mix) + dl * mix;
audio_out[i * 2 + 1] = r * (1.0 - mix) + dr * mix;
}
}
```
### Sine Oscillator
```
name "Sine Oscillator"
category generator
outputs {
audio_out: audio
}
params {
frequency: 440.0 [20.0, 20000.0] "Hz"
amplitude: 0.5 [0.0, 1.0] ""
}
state {
phase: f32
}
ui {
param frequency
param amplitude
}
process {
let inc = frequency / float(sample_rate);
for i in 0..buffer_size {
let sample = sin(phase * 6.2831853) * amplitude;
audio_out[i * 2] = sample;
audio_out[i * 2 + 1] = sample;
phase = phase + inc;
if phase >= 1.0 {
phase = phase - 1.0;
}
}
}
```
### Sample Player
```
name "One-Shot Player"
category generator
outputs {
audio_out: audio
}
params {
speed: 1.0 [0.1, 4.0] ""
}
state {
clip: sample
phase: f32
}
ui {
sample clip
param speed
}
process {
let frames = sample_len(clip);
for i in 0..buffer_size {
let idx = int(phase) * 2;
audio_out[i * 2] = sample_read(clip, idx);
audio_out[i * 2 + 1] = sample_read(clip, idx + 1);
phase = phase + speed;
if phase >= float(frames) {
phase = 0.0;
}
}
}
```
### CV-Controlled Filter (Tone Control)
```
name "Tone Control"
category effect
inputs {
audio_in: audio
cutoff_cv: cv
}
outputs {
audio_out: audio
}
params {
cutoff: 1000.0 [20.0, 20000.0] "Hz"
resonance: 0.5 [0.0, 1.0] ""
}
state {
lp_l: f32
lp_r: f32
}
ui {
param cutoff
param resonance
}
process {
for i in 0..buffer_size {
let cv_mod = cv_or(cutoff_cv[i], 0.0);
let freq = clamp(cutoff + cv_mod * 5000.0, 20.0, 20000.0);
let rc = 1.0 / (6.2831853 * freq);
let dt = 1.0 / float(sample_rate);
let alpha = dt / (rc + dt);
let l = audio_in[i * 2];
let r = audio_in[i * 2 + 1];
lp_l = lp_l + alpha * (l - lp_l);
lp_r = lp_r + alpha * (r - lp_r);
audio_out[i * 2] = lp_l;
audio_out[i * 2 + 1] = lp_r;
}
}
```
### LFO
```
name "LFO"
category generator
outputs {
cv_out: cv
}
params {
rate: 1.0 [0.01, 20.0] "Hz"
depth: 1.0 [0.0, 1.0] ""
}
state {
phase: f32
}
ui {
param rate
param depth
}
process {
let inc = rate / float(sample_rate);
for i in 0..buffer_size {
cv_out[i] = sin(phase * 6.2831853) * depth;
phase = phase + inc;
if phase >= 1.0 {
phase = phase - 1.0;
}
}
}
```
## Safety Model
BeamDSP scripts run on the real-time audio thread. The language enforces safety through compile-time restrictions:
- **Bounded time**: Only `for i in 0..N` loops with statically bounded N. No `while` loops, no recursion, no user-defined functions. An instruction counter limit (~10 million) acts as a safety net.
- **Constant memory**: All state arrays have compile-time sizes. The VM uses a fixed-size stack (256 slots) and fixed locals (64 slots). No heap allocation occurs during processing.
- **Fail-silent**: If the VM encounters a runtime error (stack overflow, instruction limit exceeded), all outputs are zeroed for that buffer. Audio does not glitch — it simply goes silent.
## File Format
BeamDSP scripts use the `.bdsp` file extension. Files are plain UTF-8 text. You can export and import `.bdsp` files through the Script Editor pane or the node graph's script picker dropdown.

View File

@ -0,0 +1,7 @@
[package]
name = "beamdsp"
version = "0.1.0"
edition = "2021"
[dependencies]
serde = { version = "1", features = ["derive"] }

View File

@ -0,0 +1,158 @@
use crate::token::Span;
use crate::ui_decl::UiElement;
/// Top-level script AST
#[derive(Debug, Clone)]
pub struct Script {
pub name: String,
pub category: CategoryKind,
pub inputs: Vec<PortDecl>,
pub outputs: Vec<PortDecl>,
pub params: Vec<ParamDecl>,
pub state: Vec<StateDecl>,
pub ui: Option<Vec<UiElement>>,
pub process: Block,
pub draw: Option<Block>,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CategoryKind {
Generator,
Effect,
Utility,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SignalKind {
Audio,
Cv,
Midi,
}
#[derive(Debug, Clone)]
pub struct PortDecl {
pub name: String,
pub signal: SignalKind,
pub span: Span,
}
#[derive(Debug, Clone)]
pub struct ParamDecl {
pub name: String,
pub default: f32,
pub min: f32,
pub max: f32,
pub unit: String,
pub span: Span,
}
#[derive(Debug, Clone)]
pub struct StateDecl {
pub name: String,
pub ty: StateType,
pub span: Span,
}
#[derive(Debug, Clone, PartialEq)]
pub enum StateType {
F32,
Int,
Bool,
ArrayF32(usize),
ArrayInt(usize),
Sample,
}
pub type Block = Vec<Stmt>;
#[derive(Debug, Clone)]
pub enum Stmt {
Let {
name: String,
mutable: bool,
init: Expr,
span: Span,
},
Assign {
target: LValue,
value: Expr,
span: Span,
},
If {
cond: Expr,
then_block: Block,
else_block: Option<Block>,
span: Span,
},
For {
var: String,
end: Expr,
body: Block,
span: Span,
},
ExprStmt(Expr),
}
#[derive(Debug, Clone)]
pub enum LValue {
Ident(String, Span),
Index(String, Box<Expr>, Span),
}
#[derive(Debug, Clone)]
pub enum Expr {
FloatLit(f32, Span),
IntLit(i32, Span),
BoolLit(bool, Span),
Ident(String, Span),
BinOp(Box<Expr>, BinOp, Box<Expr>, Span),
UnaryOp(UnaryOp, Box<Expr>, Span),
Call(String, Vec<Expr>, Span),
Index(Box<Expr>, Box<Expr>, Span),
Cast(CastKind, Box<Expr>, Span),
}
impl Expr {
pub fn span(&self) -> Span {
match self {
Expr::FloatLit(_, s) => *s,
Expr::IntLit(_, s) => *s,
Expr::BoolLit(_, s) => *s,
Expr::Ident(_, s) => *s,
Expr::BinOp(_, _, _, s) => *s,
Expr::UnaryOp(_, _, s) => *s,
Expr::Call(_, _, s) => *s,
Expr::Index(_, _, s) => *s,
Expr::Cast(_, _, s) => *s,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BinOp {
Add,
Sub,
Mul,
Div,
Mod,
Eq,
Ne,
Lt,
Gt,
Le,
Ge,
And,
Or,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UnaryOp {
Neg,
Not,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CastKind {
ToInt,
ToFloat,
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,55 @@
use crate::token::Span;
use std::fmt;
/// Compile-time error with source location
#[derive(Debug, Clone)]
pub struct CompileError {
pub message: String,
pub span: Span,
pub hint: Option<String>,
}
impl CompileError {
pub fn new(message: impl Into<String>, span: Span) -> Self {
Self {
message: message.into(),
span,
hint: None,
}
}
pub fn with_hint(mut self, hint: impl Into<String>) -> Self {
self.hint = Some(hint.into());
self
}
}
impl fmt::Display for CompileError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error at line {}, col {}: {}", self.span.line, self.span.col, self.message)?;
if let Some(hint) = &self.hint {
write!(f, "\n Hint: {}", hint)?;
}
Ok(())
}
}
/// Runtime error during VM execution
#[derive(Debug, Clone)]
pub enum ScriptError {
ExecutionLimitExceeded,
StackOverflow,
StackUnderflow,
InvalidOpcode(u8),
}
impl fmt::Display for ScriptError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScriptError::ExecutionLimitExceeded => write!(f, "Execution limit exceeded (possible infinite loop)"),
ScriptError::StackOverflow => write!(f, "Stack overflow"),
ScriptError::StackUnderflow => write!(f, "Stack underflow"),
ScriptError::InvalidOpcode(op) => write!(f, "Invalid opcode: {}", op),
}
}
}

View File

@ -0,0 +1,306 @@
use crate::error::CompileError;
use crate::token::{Span, Token, TokenKind};
pub struct Lexer<'a> {
source: &'a [u8],
pos: usize,
line: u32,
col: u32,
}
impl<'a> Lexer<'a> {
pub fn new(source: &'a str) -> Self {
Self {
source: source.as_bytes(),
pos: 0,
line: 1,
col: 1,
}
}
pub fn tokenize(&mut self) -> Result<Vec<Token>, CompileError> {
let mut tokens = Vec::new();
loop {
self.skip_whitespace_and_comments();
if self.pos >= self.source.len() {
tokens.push(Token {
kind: TokenKind::Eof,
span: self.span(),
});
break;
}
tokens.push(self.next_token()?);
}
Ok(tokens)
}
fn span(&self) -> Span {
Span::new(self.line, self.col)
}
fn peek(&self) -> Option<u8> {
self.source.get(self.pos).copied()
}
fn peek_next(&self) -> Option<u8> {
self.source.get(self.pos + 1).copied()
}
fn advance(&mut self) -> u8 {
let ch = self.source[self.pos];
self.pos += 1;
if ch == b'\n' {
self.line += 1;
self.col = 1;
} else {
self.col += 1;
}
ch
}
fn skip_whitespace_and_comments(&mut self) {
loop {
// Skip whitespace
while self.pos < self.source.len() && self.source[self.pos].is_ascii_whitespace() {
self.advance();
}
// Skip line comments
if self.pos + 1 < self.source.len()
&& self.source[self.pos] == b'/'
&& self.source[self.pos + 1] == b'/'
{
while self.pos < self.source.len() && self.source[self.pos] != b'\n' {
self.advance();
}
continue;
}
break;
}
}
fn next_token(&mut self) -> Result<Token, CompileError> {
let span = self.span();
let ch = self.advance();
match ch {
b'{' => Ok(Token { kind: TokenKind::LBrace, span }),
b'}' => Ok(Token { kind: TokenKind::RBrace, span }),
b'[' => Ok(Token { kind: TokenKind::LBracket, span }),
b']' => Ok(Token { kind: TokenKind::RBracket, span }),
b'(' => Ok(Token { kind: TokenKind::LParen, span }),
b')' => Ok(Token { kind: TokenKind::RParen, span }),
b':' => Ok(Token { kind: TokenKind::Colon, span }),
b',' => Ok(Token { kind: TokenKind::Comma, span }),
b';' => Ok(Token { kind: TokenKind::Semicolon, span }),
b'+' => Ok(Token { kind: TokenKind::Plus, span }),
b'-' => Ok(Token { kind: TokenKind::Minus, span }),
b'*' => Ok(Token { kind: TokenKind::Star, span }),
b'/' => Ok(Token { kind: TokenKind::Slash, span }),
b'%' => Ok(Token { kind: TokenKind::Percent, span }),
b'.' if self.peek() == Some(b'.') => {
self.advance();
Ok(Token { kind: TokenKind::DotDot, span })
}
b'=' if self.peek() == Some(b'=') => {
self.advance();
Ok(Token { kind: TokenKind::EqEq, span })
}
b'=' => Ok(Token { kind: TokenKind::Eq, span }),
b'!' if self.peek() == Some(b'=') => {
self.advance();
Ok(Token { kind: TokenKind::BangEq, span })
}
b'!' => Ok(Token { kind: TokenKind::Bang, span }),
b'<' if self.peek() == Some(b'=') => {
self.advance();
Ok(Token { kind: TokenKind::LtEq, span })
}
b'<' => Ok(Token { kind: TokenKind::Lt, span }),
b'>' if self.peek() == Some(b'=') => {
self.advance();
Ok(Token { kind: TokenKind::GtEq, span })
}
b'>' => Ok(Token { kind: TokenKind::Gt, span }),
b'&' if self.peek() == Some(b'&') => {
self.advance();
Ok(Token { kind: TokenKind::AmpAmp, span })
}
b'|' if self.peek() == Some(b'|') => {
self.advance();
Ok(Token { kind: TokenKind::PipePipe, span })
}
b'"' => self.read_string(span),
ch if ch.is_ascii_digit() => self.read_number(ch, span),
ch if ch.is_ascii_alphabetic() || ch == b'_' => self.read_ident(ch, span),
_ => Err(CompileError::new(
format!("Unexpected character: '{}'", ch as char),
span,
)),
}
}
fn read_string(&mut self, span: Span) -> Result<Token, CompileError> {
let mut s = String::new();
loop {
match self.peek() {
Some(b'"') => {
self.advance();
return Ok(Token {
kind: TokenKind::StringLit(s),
span,
});
}
Some(b'\n') | None => {
return Err(CompileError::new("Unterminated string literal", span));
}
Some(_) => {
s.push(self.advance() as char);
}
}
}
}
fn read_number(&mut self, first: u8, span: Span) -> Result<Token, CompileError> {
// Check for hex literal: 0x...
if first == b'0' && self.peek() == Some(b'x') {
self.advance(); // skip 'x'
let mut hex = String::new();
while let Some(ch) = self.peek() {
if ch.is_ascii_hexdigit() {
hex.push(self.advance() as char);
} else {
break;
}
}
if hex.is_empty() {
return Err(CompileError::new("Expected hex digits after 0x", span));
}
let val = u32::from_str_radix(&hex, 16)
.map_err(|_| CompileError::new(format!("Invalid hex literal: 0x{}", hex), span))?;
return Ok(Token {
kind: TokenKind::IntLit(val as i32),
span,
});
}
let mut s = String::new();
s.push(first as char);
let mut is_float = false;
while let Some(ch) = self.peek() {
if ch.is_ascii_digit() {
s.push(self.advance() as char);
} else if ch == b'.' && self.peek_next() != Some(b'.') && !is_float {
is_float = true;
s.push(self.advance() as char);
} else {
break;
}
}
if is_float {
let val: f32 = s
.parse()
.map_err(|_| CompileError::new(format!("Invalid float literal: {}", s), span))?;
Ok(Token {
kind: TokenKind::FloatLit(val),
span,
})
} else {
let val: i32 = s
.parse()
.map_err(|_| CompileError::new(format!("Invalid integer literal: {}", s), span))?;
// Check if this could be a float (e.g. 0 used in float context)
// For now, emit as IntLit; parser/validator handles coercion
Ok(Token {
kind: TokenKind::IntLit(val),
span,
})
}
}
fn read_ident(&mut self, first: u8, span: Span) -> Result<Token, CompileError> {
let mut s = String::new();
s.push(first as char);
while let Some(ch) = self.peek() {
if ch.is_ascii_alphanumeric() || ch == b'_' {
s.push(self.advance() as char);
} else {
break;
}
}
Ok(Token {
kind: TokenKind::from_ident(&s),
span,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_simple_tokens() {
let mut lexer = Lexer::new("name \"Test\" category effect");
let tokens = lexer.tokenize().unwrap();
assert_eq!(tokens[0].kind, TokenKind::Name);
assert_eq!(tokens[1].kind, TokenKind::StringLit("Test".into()));
assert_eq!(tokens[2].kind, TokenKind::Category);
assert_eq!(tokens[3].kind, TokenKind::Effect);
}
#[test]
fn test_numbers() {
let mut lexer = Lexer::new("42 3.14 0.5");
let tokens = lexer.tokenize().unwrap();
assert_eq!(tokens[0].kind, TokenKind::IntLit(42));
assert_eq!(tokens[1].kind, TokenKind::FloatLit(3.14));
assert_eq!(tokens[2].kind, TokenKind::FloatLit(0.5));
}
#[test]
fn test_operators() {
let mut lexer = Lexer::new("== != <= >= && || ..");
let tokens = lexer.tokenize().unwrap();
assert_eq!(tokens[0].kind, TokenKind::EqEq);
assert_eq!(tokens[1].kind, TokenKind::BangEq);
assert_eq!(tokens[2].kind, TokenKind::LtEq);
assert_eq!(tokens[3].kind, TokenKind::GtEq);
assert_eq!(tokens[4].kind, TokenKind::AmpAmp);
assert_eq!(tokens[5].kind, TokenKind::PipePipe);
assert_eq!(tokens[6].kind, TokenKind::DotDot);
}
#[test]
fn test_comments() {
let mut lexer = Lexer::new("let x = 5; // comment\nlet y = 10;");
let tokens = lexer.tokenize().unwrap();
// Should skip the comment
assert_eq!(tokens[0].kind, TokenKind::Let);
assert_eq!(tokens[5].kind, TokenKind::Let);
}
#[test]
fn test_range_vs_float() {
// "0..10" should parse as IntLit(0), DotDot, IntLit(10), not as a float
let mut lexer = Lexer::new("0..10");
let tokens = lexer.tokenize().unwrap();
assert_eq!(tokens[0].kind, TokenKind::IntLit(0));
assert_eq!(tokens[1].kind, TokenKind::DotDot);
assert_eq!(tokens[2].kind, TokenKind::IntLit(10));
}
}

View File

@ -0,0 +1,110 @@
pub mod ast;
pub mod error;
pub mod lexer;
pub mod token;
pub mod ui_decl;
pub mod parser;
pub mod validator;
pub mod opcodes;
pub mod codegen;
pub mod vm;
use error::CompileError;
use lexer::Lexer;
use parser::Parser;
pub use error::ScriptError;
pub use ui_decl::{UiDeclaration, UiElement};
pub use vm::{ScriptVM, SampleSlot, DrawVM, DrawCommand, MouseState};
/// Compiled script metadata — everything needed to create a ScriptNode
pub struct CompiledScript {
pub vm: ScriptVM,
pub name: String,
pub category: ast::CategoryKind,
pub input_ports: Vec<PortInfo>,
pub output_ports: Vec<PortInfo>,
pub parameters: Vec<ParamInfo>,
pub sample_slots: Vec<String>,
pub ui_declaration: UiDeclaration,
pub source: String,
pub draw_vm: Option<DrawVM>,
}
#[derive(Debug, Clone)]
pub struct PortInfo {
pub name: String,
pub signal: ast::SignalKind,
}
#[derive(Debug, Clone)]
pub struct ParamInfo {
pub name: String,
pub min: f32,
pub max: f32,
pub default: f32,
pub unit: String,
}
/// Compile BeamDSP source code into a ready-to-run script
pub fn compile(source: &str) -> Result<CompiledScript, CompileError> {
let mut lexer = Lexer::new(source);
let tokens = lexer.tokenize()?;
let mut parser = Parser::new(&tokens);
let script = parser.parse()?;
let validated = validator::validate(&script)?;
let (vm, ui_decl, draw_vm) = codegen::compile(&validated)?;
let input_ports = script
.inputs
.iter()
.map(|p| PortInfo {
name: p.name.clone(),
signal: p.signal,
})
.collect();
let output_ports = script
.outputs
.iter()
.map(|p| PortInfo {
name: p.name.clone(),
signal: p.signal,
})
.collect();
let parameters = script
.params
.iter()
.map(|p| ParamInfo {
name: p.name.clone(),
min: p.min,
max: p.max,
default: p.default,
unit: p.unit.clone(),
})
.collect();
let sample_slots = script
.state
.iter()
.filter(|s| s.ty == ast::StateType::Sample)
.map(|s| s.name.clone())
.collect();
Ok(CompiledScript {
vm,
name: script.name.clone(),
category: script.category,
input_ports,
output_ports,
parameters,
sample_slots,
ui_declaration: ui_decl,
source: source.to_string(),
draw_vm,
})
}

View File

@ -0,0 +1,223 @@
/// Bytecode opcodes for the BeamDSP VM
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OpCode {
// Stack operations
PushF32 = 0, // next 4 bytes: f32 constant index (u16)
PushI32 = 1, // next 2 bytes: i32 constant index (u16)
PushBool = 2, // next 1 byte: 0 or 1
Pop = 3,
// Variable access (all use u16 index)
LoadLocal = 10,
StoreLocal = 11,
LoadParam = 12,
LoadState = 13,
StoreState = 14,
// Buffer access (u8 port index)
// LoadInput: pops index from stack, pushes input[port][index]
LoadInput = 20,
// StoreOutput: pops value then index, stores output[port][index] = value
StoreOutput = 21,
// State arrays (u16 array id)
LoadStateArray = 22, // pops index, pushes state_array[id][index]
StoreStateArray = 23, // pops value then index, stores state_array[id][index]
// Sample access (u8 slot index)
SampleLen = 25, // pushes frame count
SampleRead = 26, // pops index, pushes sample data
SampleRateOf = 27, // pushes sample rate
// Float arithmetic
AddF = 30,
SubF = 31,
MulF = 32,
DivF = 33,
ModF = 34,
NegF = 35,
// Int arithmetic
AddI = 40,
SubI = 41,
MulI = 42,
DivI = 43,
ModI = 44,
NegI = 45,
// Float comparison (push bool)
EqF = 50,
NeF = 51,
LtF = 52,
GtF = 53,
LeF = 54,
GeF = 55,
// Int comparison (push bool)
EqI = 60,
NeI = 61,
LtI = 62,
GtI = 63,
LeI = 64,
GeI = 65,
// Logical
And = 70,
Or = 71,
Not = 72,
// Type conversion
F32ToI32 = 80,
I32ToF32 = 81,
// Control flow (u32 offset)
Jump = 90,
JumpIfFalse = 91,
// Built-in math functions (operate on stack)
Sin = 100,
Cos = 101,
Tan = 102,
Asin = 103,
Acos = 104,
Atan = 105,
Atan2 = 106,
Exp = 107,
Log = 108,
Log2 = 109,
Pow = 110,
Sqrt = 111,
Floor = 112,
Ceil = 113,
Round = 114,
Trunc = 115,
Fract = 116,
Abs = 117,
Clamp = 118,
Min = 119,
Max = 120,
Sign = 121,
Mix = 122,
Smoothstep = 123,
IsNan = 124,
// Array/buffer info
ArrayLen = 130, // u16 array_id, pushes length as int
// Built-in constants
LoadSampleRate = 140,
LoadBufferSize = 141,
// Draw commands (pop args from stack, push to draw command buffer)
DrawFillCircle = 150, // pops: color(i32), r, cy, cx
DrawStrokeCircle = 151, // pops: width, color(i32), r, cy, cx
DrawStrokeArc = 152, // pops: width, color(i32), end_deg, start_deg, r, cy, cx
DrawLine = 153, // pops: width, color(i32), y2, x2, y1, x1
DrawFillRect = 154, // pops: color(i32), h, w, y, x
DrawStrokeRect = 155, // pops: width, color(i32), h, w, y, x
// Mouse input (push onto stack)
MouseX = 160, // pushes canvas-relative X as f32
MouseY = 161, // pushes canvas-relative Y as f32
MouseDown = 162, // pushes 1.0 if pressed, 0.0 if not
// Param write (draw context only)
StoreParam = 170, // u16 param index, pops value from stack
Halt = 255,
}
impl OpCode {
pub fn from_u8(v: u8) -> Option<OpCode> {
// Safety: we validate the opcode values
match v {
0 => Some(OpCode::PushF32),
1 => Some(OpCode::PushI32),
2 => Some(OpCode::PushBool),
3 => Some(OpCode::Pop),
10 => Some(OpCode::LoadLocal),
11 => Some(OpCode::StoreLocal),
12 => Some(OpCode::LoadParam),
13 => Some(OpCode::LoadState),
14 => Some(OpCode::StoreState),
20 => Some(OpCode::LoadInput),
21 => Some(OpCode::StoreOutput),
22 => Some(OpCode::LoadStateArray),
23 => Some(OpCode::StoreStateArray),
25 => Some(OpCode::SampleLen),
26 => Some(OpCode::SampleRead),
27 => Some(OpCode::SampleRateOf),
30 => Some(OpCode::AddF),
31 => Some(OpCode::SubF),
32 => Some(OpCode::MulF),
33 => Some(OpCode::DivF),
34 => Some(OpCode::ModF),
35 => Some(OpCode::NegF),
40 => Some(OpCode::AddI),
41 => Some(OpCode::SubI),
42 => Some(OpCode::MulI),
43 => Some(OpCode::DivI),
44 => Some(OpCode::ModI),
45 => Some(OpCode::NegI),
50 => Some(OpCode::EqF),
51 => Some(OpCode::NeF),
52 => Some(OpCode::LtF),
53 => Some(OpCode::GtF),
54 => Some(OpCode::LeF),
55 => Some(OpCode::GeF),
60 => Some(OpCode::EqI),
61 => Some(OpCode::NeI),
62 => Some(OpCode::LtI),
63 => Some(OpCode::GtI),
64 => Some(OpCode::LeI),
65 => Some(OpCode::GeI),
70 => Some(OpCode::And),
71 => Some(OpCode::Or),
72 => Some(OpCode::Not),
80 => Some(OpCode::F32ToI32),
81 => Some(OpCode::I32ToF32),
90 => Some(OpCode::Jump),
91 => Some(OpCode::JumpIfFalse),
100 => Some(OpCode::Sin),
101 => Some(OpCode::Cos),
102 => Some(OpCode::Tan),
103 => Some(OpCode::Asin),
104 => Some(OpCode::Acos),
105 => Some(OpCode::Atan),
106 => Some(OpCode::Atan2),
107 => Some(OpCode::Exp),
108 => Some(OpCode::Log),
109 => Some(OpCode::Log2),
110 => Some(OpCode::Pow),
111 => Some(OpCode::Sqrt),
112 => Some(OpCode::Floor),
113 => Some(OpCode::Ceil),
114 => Some(OpCode::Round),
115 => Some(OpCode::Trunc),
116 => Some(OpCode::Fract),
117 => Some(OpCode::Abs),
118 => Some(OpCode::Clamp),
119 => Some(OpCode::Min),
120 => Some(OpCode::Max),
121 => Some(OpCode::Sign),
122 => Some(OpCode::Mix),
123 => Some(OpCode::Smoothstep),
124 => Some(OpCode::IsNan),
130 => Some(OpCode::ArrayLen),
140 => Some(OpCode::LoadSampleRate),
141 => Some(OpCode::LoadBufferSize),
150 => Some(OpCode::DrawFillCircle),
151 => Some(OpCode::DrawStrokeCircle),
152 => Some(OpCode::DrawStrokeArc),
153 => Some(OpCode::DrawLine),
154 => Some(OpCode::DrawFillRect),
155 => Some(OpCode::DrawStrokeRect),
160 => Some(OpCode::MouseX),
161 => Some(OpCode::MouseY),
162 => Some(OpCode::MouseDown),
170 => Some(OpCode::StoreParam),
255 => Some(OpCode::Halt),
_ => None,
}
}
}

View File

@ -0,0 +1,771 @@
use crate::ast::*;
use crate::error::CompileError;
use crate::token::{Span, Token, TokenKind};
use crate::ui_decl::UiElement;
pub struct Parser<'a> {
tokens: &'a [Token],
pos: usize,
}
impl<'a> Parser<'a> {
pub fn new(tokens: &'a [Token]) -> Self {
Self { tokens, pos: 0 }
}
fn peek(&self) -> &TokenKind {
&self.tokens[self.pos].kind
}
fn span(&self) -> Span {
self.tokens[self.pos].span
}
fn advance(&mut self) -> &Token {
let tok = &self.tokens[self.pos];
if self.pos + 1 < self.tokens.len() {
self.pos += 1;
}
tok
}
fn expect(&mut self, expected: &TokenKind) -> Result<&Token, CompileError> {
if std::mem::discriminant(self.peek()) == std::mem::discriminant(expected) {
Ok(self.advance())
} else {
Err(CompileError::new(
format!("Expected {:?}, found {:?}", expected, self.peek()),
self.span(),
))
}
}
fn expect_ident(&mut self) -> Result<String, CompileError> {
match self.peek().clone() {
TokenKind::Ident(name) => {
let name = name.clone();
self.advance();
Ok(name)
}
_ => Err(CompileError::new(
format!("Expected identifier, found {:?}", self.peek()),
self.span(),
)),
}
}
fn expect_string(&mut self) -> Result<String, CompileError> {
match self.peek().clone() {
TokenKind::StringLit(s) => {
let s = s.clone();
self.advance();
Ok(s)
}
_ => Err(CompileError::new(
format!("Expected string literal, found {:?}", self.peek()),
self.span(),
)),
}
}
fn eat(&mut self, kind: &TokenKind) -> bool {
if std::mem::discriminant(self.peek()) == std::mem::discriminant(kind) {
self.advance();
true
} else {
false
}
}
pub fn parse(&mut self) -> Result<Script, CompileError> {
let mut name = String::new();
let mut category = CategoryKind::Utility;
let mut inputs = Vec::new();
let mut outputs = Vec::new();
let mut params = Vec::new();
let mut state = Vec::new();
let mut ui = None;
let mut process = Vec::new();
let mut draw = None;
while *self.peek() != TokenKind::Eof {
match self.peek() {
TokenKind::Name => {
self.advance();
name = self.expect_string()?;
}
TokenKind::Category => {
self.advance();
category = match self.peek() {
TokenKind::Generator => { self.advance(); CategoryKind::Generator }
TokenKind::Effect => { self.advance(); CategoryKind::Effect }
TokenKind::Utility => { self.advance(); CategoryKind::Utility }
_ => {
return Err(CompileError::new(
"Expected generator, effect, or utility",
self.span(),
));
}
};
}
TokenKind::Inputs => {
self.advance();
inputs = self.parse_port_block()?;
}
TokenKind::Outputs => {
self.advance();
outputs = self.parse_port_block()?;
}
TokenKind::Params => {
self.advance();
params = self.parse_params_block()?;
}
TokenKind::State => {
self.advance();
state = self.parse_state_block()?;
}
TokenKind::Ui => {
self.advance();
ui = Some(self.parse_ui_block()?);
}
TokenKind::Process => {
self.advance();
process = self.parse_block()?;
}
TokenKind::Draw => {
self.advance();
draw = Some(self.parse_block()?);
}
_ => {
return Err(CompileError::new(
format!("Unexpected token {:?} at top level", self.peek()),
self.span(),
));
}
}
}
if name.is_empty() {
return Err(CompileError::new(
"Script must have a name declaration",
Span::new(1, 1),
));
}
Ok(Script {
name,
category,
inputs,
outputs,
params,
state,
ui,
process,
draw,
})
}
fn parse_port_block(&mut self) -> Result<Vec<PortDecl>, CompileError> {
self.expect(&TokenKind::LBrace)?;
let mut ports = Vec::new();
while *self.peek() != TokenKind::RBrace {
let span = self.span();
let name = self.expect_ident()?;
self.expect(&TokenKind::Colon)?;
let signal = match self.peek() {
TokenKind::Audio => { self.advance(); SignalKind::Audio }
TokenKind::Cv => { self.advance(); SignalKind::Cv }
TokenKind::Midi => { self.advance(); SignalKind::Midi }
_ => {
return Err(CompileError::new(
"Expected audio, cv, or midi",
self.span(),
));
}
};
ports.push(PortDecl { name, signal, span });
}
self.expect(&TokenKind::RBrace)?;
Ok(ports)
}
fn parse_params_block(&mut self) -> Result<Vec<ParamDecl>, CompileError> {
self.expect(&TokenKind::LBrace)?;
let mut params = Vec::new();
while *self.peek() != TokenKind::RBrace {
let span = self.span();
let name = self.expect_ident()?;
self.expect(&TokenKind::Colon)?;
let default = self.parse_number()?;
self.expect(&TokenKind::LBracket)?;
let min = self.parse_number()?;
self.expect(&TokenKind::Comma)?;
let max = self.parse_number()?;
self.expect(&TokenKind::RBracket)?;
let unit = self.expect_string()?;
params.push(ParamDecl {
name,
default,
min,
max,
unit,
span,
});
}
self.expect(&TokenKind::RBrace)?;
Ok(params)
}
fn parse_number(&mut self) -> Result<f32, CompileError> {
let negative = self.eat(&TokenKind::Minus);
let val = match self.peek() {
TokenKind::FloatLit(v) => {
let v = *v;
self.advance();
v
}
TokenKind::IntLit(v) => {
let v = *v as f32;
self.advance();
v
}
_ => {
return Err(CompileError::new(
format!("Expected number, found {:?}", self.peek()),
self.span(),
));
}
};
Ok(if negative { -val } else { val })
}
fn parse_state_block(&mut self) -> Result<Vec<StateDecl>, CompileError> {
self.expect(&TokenKind::LBrace)?;
let mut decls = Vec::new();
while *self.peek() != TokenKind::RBrace {
let span = self.span();
let name = self.expect_ident()?;
self.expect(&TokenKind::Colon)?;
let ty = self.parse_state_type()?;
decls.push(StateDecl { name, ty, span });
}
self.expect(&TokenKind::RBrace)?;
Ok(decls)
}
fn parse_state_type(&mut self) -> Result<StateType, CompileError> {
match self.peek() {
TokenKind::F32 => { self.advance(); Ok(StateType::F32) }
TokenKind::Int => { self.advance(); Ok(StateType::Int) }
TokenKind::Bool => { self.advance(); Ok(StateType::Bool) }
TokenKind::Sample => { self.advance(); Ok(StateType::Sample) }
TokenKind::LBracket => {
self.advance();
let size = match self.peek() {
TokenKind::IntLit(n) => {
let n = *n as usize;
self.advance();
n
}
_ => {
return Err(CompileError::new(
"Expected integer size for array",
self.span(),
));
}
};
self.expect(&TokenKind::RBracket)?;
match self.peek() {
TokenKind::F32 => { self.advance(); Ok(StateType::ArrayF32(size)) }
TokenKind::Int => { self.advance(); Ok(StateType::ArrayInt(size)) }
_ => Err(CompileError::new("Expected f32 or int after array size", self.span())),
}
}
_ => Err(CompileError::new(
format!("Expected type (f32, int, bool, sample, [N]f32, [N]int), found {:?}", self.peek()),
self.span(),
)),
}
}
fn parse_ui_block(&mut self) -> Result<Vec<UiElement>, CompileError> {
self.expect(&TokenKind::LBrace)?;
let mut elements = Vec::new();
while *self.peek() != TokenKind::RBrace {
elements.push(self.parse_ui_element()?);
}
self.expect(&TokenKind::RBrace)?;
Ok(elements)
}
fn parse_ui_element(&mut self) -> Result<UiElement, CompileError> {
match self.peek() {
TokenKind::Param => {
self.advance();
let name = self.expect_ident()?;
Ok(UiElement::Param(name))
}
TokenKind::Sample => {
self.advance();
let name = self.expect_ident()?;
Ok(UiElement::Sample(name))
}
TokenKind::Group => {
self.advance();
let label = self.expect_string()?;
let children = self.parse_ui_block()?;
Ok(UiElement::Group { label, children })
}
TokenKind::Canvas => {
self.advance();
self.expect(&TokenKind::LBracket)?;
let width = self.parse_number()?;
self.expect(&TokenKind::Comma)?;
let height = self.parse_number()?;
self.expect(&TokenKind::RBracket)?;
Ok(UiElement::Canvas { width, height })
}
TokenKind::Spacer => {
self.advance();
let px = self.parse_number()?;
Ok(UiElement::Spacer(px))
}
_ => Err(CompileError::new(
format!("Expected UI element (param, sample, group, canvas, spacer), found {:?}", self.peek()),
self.span(),
)),
}
}
fn parse_block(&mut self) -> Result<Block, CompileError> {
self.expect(&TokenKind::LBrace)?;
let mut stmts = Vec::new();
while *self.peek() != TokenKind::RBrace {
stmts.push(self.parse_stmt()?);
}
self.expect(&TokenKind::RBrace)?;
Ok(stmts)
}
fn parse_stmt(&mut self) -> Result<Stmt, CompileError> {
match self.peek() {
TokenKind::Let => self.parse_let(),
TokenKind::If => self.parse_if(),
TokenKind::For => self.parse_for(),
_ => {
// Assignment or expression statement
let span = self.span();
let expr = self.parse_expr()?;
if self.eat(&TokenKind::Eq) {
// This is an assignment: expr = value
let value = self.parse_expr()?;
self.eat(&TokenKind::Semicolon);
let target = self.expr_to_lvalue(expr, span)?;
Ok(Stmt::Assign { target, value, span })
} else {
self.eat(&TokenKind::Semicolon);
Ok(Stmt::ExprStmt(expr))
}
}
}
}
fn expr_to_lvalue(&self, expr: Expr, span: Span) -> Result<LValue, CompileError> {
match expr {
Expr::Ident(name, s) => Ok(LValue::Ident(name, s)),
Expr::Index(base, idx, s) => {
if let Expr::Ident(name, _) = *base {
Ok(LValue::Index(name, idx, s))
} else {
Err(CompileError::new("Invalid assignment target", span))
}
}
_ => Err(CompileError::new("Invalid assignment target", span)),
}
}
fn parse_let(&mut self) -> Result<Stmt, CompileError> {
let span = self.span();
self.advance(); // consume 'let'
let mutable = self.eat(&TokenKind::Mut);
let name = self.expect_ident()?;
self.expect(&TokenKind::Eq)?;
let init = self.parse_expr()?;
self.eat(&TokenKind::Semicolon);
Ok(Stmt::Let {
name,
mutable,
init,
span,
})
}
fn parse_if(&mut self) -> Result<Stmt, CompileError> {
let span = self.span();
self.advance(); // consume 'if'
let cond = self.parse_expr()?;
let then_block = self.parse_block()?;
let else_block = if self.eat(&TokenKind::Else) {
if *self.peek() == TokenKind::If {
// else if -> wrap in a block with single if statement
Some(vec![self.parse_if()?])
} else {
Some(self.parse_block()?)
}
} else {
None
};
Ok(Stmt::If {
cond,
then_block,
else_block,
span,
})
}
fn parse_for(&mut self) -> Result<Stmt, CompileError> {
let span = self.span();
self.advance(); // consume 'for'
let var = self.expect_ident()?;
self.expect(&TokenKind::In)?;
// Expect 0..end
let zero_span = self.span();
match self.peek() {
TokenKind::IntLit(0) => { self.advance(); }
_ => {
return Err(CompileError::new(
"For loop range must start at 0 (e.g. 0..buffer_size)",
zero_span,
));
}
}
self.expect(&TokenKind::DotDot)?;
let end = self.parse_expr()?;
let body = self.parse_block()?;
Ok(Stmt::For {
var,
end,
body,
span,
})
}
// Expression parsing with precedence climbing
fn parse_expr(&mut self) -> Result<Expr, CompileError> {
self.parse_or()
}
fn parse_or(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_and()?;
while *self.peek() == TokenKind::PipePipe {
let span = self.span();
self.advance();
let right = self.parse_and()?;
left = Expr::BinOp(Box::new(left), BinOp::Or, Box::new(right), span);
}
Ok(left)
}
fn parse_and(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_equality()?;
while *self.peek() == TokenKind::AmpAmp {
let span = self.span();
self.advance();
let right = self.parse_equality()?;
left = Expr::BinOp(Box::new(left), BinOp::And, Box::new(right), span);
}
Ok(left)
}
fn parse_equality(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_comparison()?;
loop {
let op = match self.peek() {
TokenKind::EqEq => BinOp::Eq,
TokenKind::BangEq => BinOp::Ne,
_ => break,
};
let span = self.span();
self.advance();
let right = self.parse_comparison()?;
left = Expr::BinOp(Box::new(left), op, Box::new(right), span);
}
Ok(left)
}
fn parse_comparison(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_additive()?;
loop {
let op = match self.peek() {
TokenKind::Lt => BinOp::Lt,
TokenKind::Gt => BinOp::Gt,
TokenKind::LtEq => BinOp::Le,
TokenKind::GtEq => BinOp::Ge,
_ => break,
};
let span = self.span();
self.advance();
let right = self.parse_additive()?;
left = Expr::BinOp(Box::new(left), op, Box::new(right), span);
}
Ok(left)
}
fn parse_additive(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_multiplicative()?;
loop {
let op = match self.peek() {
TokenKind::Plus => BinOp::Add,
TokenKind::Minus => BinOp::Sub,
_ => break,
};
let span = self.span();
self.advance();
let right = self.parse_multiplicative()?;
left = Expr::BinOp(Box::new(left), op, Box::new(right), span);
}
Ok(left)
}
fn parse_multiplicative(&mut self) -> Result<Expr, CompileError> {
let mut left = self.parse_unary()?;
loop {
let op = match self.peek() {
TokenKind::Star => BinOp::Mul,
TokenKind::Slash => BinOp::Div,
TokenKind::Percent => BinOp::Mod,
_ => break,
};
let span = self.span();
self.advance();
let right = self.parse_unary()?;
left = Expr::BinOp(Box::new(left), op, Box::new(right), span);
}
Ok(left)
}
fn parse_unary(&mut self) -> Result<Expr, CompileError> {
match self.peek() {
TokenKind::Minus => {
let span = self.span();
self.advance();
let expr = self.parse_unary()?;
Ok(Expr::UnaryOp(UnaryOp::Neg, Box::new(expr), span))
}
TokenKind::Bang => {
let span = self.span();
self.advance();
let expr = self.parse_unary()?;
Ok(Expr::UnaryOp(UnaryOp::Not, Box::new(expr), span))
}
_ => self.parse_postfix(),
}
}
fn parse_postfix(&mut self) -> Result<Expr, CompileError> {
let mut expr = self.parse_primary()?;
// Handle indexing: expr[index]
while *self.peek() == TokenKind::LBracket {
let span = self.span();
self.advance();
let index = self.parse_expr()?;
self.expect(&TokenKind::RBracket)?;
expr = Expr::Index(Box::new(expr), Box::new(index), span);
}
Ok(expr)
}
fn parse_primary(&mut self) -> Result<Expr, CompileError> {
let span = self.span();
match self.peek().clone() {
TokenKind::FloatLit(v) => {
self.advance();
Ok(Expr::FloatLit(v, span))
}
TokenKind::IntLit(v) => {
self.advance();
Ok(Expr::IntLit(v, span))
}
TokenKind::True => {
self.advance();
Ok(Expr::BoolLit(true, span))
}
TokenKind::False => {
self.advance();
Ok(Expr::BoolLit(false, span))
}
TokenKind::LParen => {
self.advance();
let expr = self.parse_expr()?;
self.expect(&TokenKind::RParen)?;
Ok(expr)
}
// Cast: int(expr) or float(expr)
TokenKind::Int => {
self.advance();
self.expect(&TokenKind::LParen)?;
let expr = self.parse_expr()?;
self.expect(&TokenKind::RParen)?;
Ok(Expr::Cast(CastKind::ToInt, Box::new(expr), span))
}
TokenKind::F32 => {
self.advance();
self.expect(&TokenKind::LParen)?;
let expr = self.parse_expr()?;
self.expect(&TokenKind::RParen)?;
Ok(Expr::Cast(CastKind::ToFloat, Box::new(expr), span))
}
TokenKind::Ident(name) => {
let name = name.clone();
self.advance();
// Check if it's a function call
if *self.peek() == TokenKind::LParen {
self.advance();
let mut args = Vec::new();
if *self.peek() != TokenKind::RParen {
args.push(self.parse_expr()?);
while self.eat(&TokenKind::Comma) {
args.push(self.parse_expr()?);
}
}
self.expect(&TokenKind::RParen)?;
Ok(Expr::Call(name, args, span))
} else {
Ok(Expr::Ident(name, span))
}
}
_ => Err(CompileError::new(
format!("Expected expression, found {:?}", self.peek()),
span,
)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::lexer::Lexer;
fn parse_script(source: &str) -> Result<Script, CompileError> {
let mut lexer = Lexer::new(source);
let tokens = lexer.tokenize()?;
let mut parser = Parser::new(&tokens);
parser.parse()
}
#[test]
fn test_minimal_script() {
let script = parse_script(r#"
name "Test"
category utility
process {}
"#).unwrap();
assert_eq!(script.name, "Test");
assert_eq!(script.category, CategoryKind::Utility);
}
#[test]
fn test_ports_and_params() {
let script = parse_script(r#"
name "Gain"
category effect
inputs {
audio_in: audio
cv_mod: cv
}
outputs {
audio_out: audio
}
params {
gain: 1.0 [0.0, 2.0] ""
}
process {}
"#).unwrap();
assert_eq!(script.inputs.len(), 2);
assert_eq!(script.outputs.len(), 1);
assert_eq!(script.params.len(), 1);
assert_eq!(script.params[0].name, "gain");
assert_eq!(script.params[0].default, 1.0);
}
#[test]
fn test_state_with_sample() {
let script = parse_script(r#"
name "Sampler"
category generator
state {
clip: sample
phase: f32
buffer: [4096]f32
counter: int
}
process {}
"#).unwrap();
assert_eq!(script.state.len(), 4);
assert_eq!(script.state[0].ty, StateType::Sample);
assert_eq!(script.state[1].ty, StateType::F32);
assert_eq!(script.state[2].ty, StateType::ArrayF32(4096));
assert_eq!(script.state[3].ty, StateType::Int);
}
#[test]
fn test_process_with_for_loop() {
let script = parse_script(r#"
name "Pass"
category effect
inputs { audio_in: audio }
outputs { audio_out: audio }
process {
for i in 0..buffer_size {
audio_out[i * 2] = audio_in[i * 2];
audio_out[i * 2 + 1] = audio_in[i * 2 + 1];
}
}
"#).unwrap();
assert_eq!(script.process.len(), 1);
}
#[test]
fn test_expressions() {
let script = parse_script(r#"
name "Expr"
category utility
process {
let x = 1.0 + 2.0 * 3.0;
let y = sin(x) + cos(3.14);
let z = int(x * 100.0);
}
"#).unwrap();
assert_eq!(script.process.len(), 3);
}
#[test]
fn test_ui_block() {
let script = parse_script(r#"
name "UI Test"
category utility
params {
gain: 1.0 [0.0, 2.0] ""
mix: 0.5 [0.0, 1.0] ""
}
state {
clip: sample
}
ui {
sample clip
param gain
group "Advanced" {
param mix
}
}
process {}
"#).unwrap();
let ui = script.ui.unwrap();
assert_eq!(ui.len(), 3);
}
}

View File

@ -0,0 +1,145 @@
/// Source location
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Span {
pub line: u32,
pub col: u32,
}
impl Span {
pub fn new(line: u32, col: u32) -> Self {
Self { line, col }
}
}
/// Token with source location
#[derive(Debug, Clone, PartialEq)]
pub struct Token {
pub kind: TokenKind,
pub span: Span,
}
#[derive(Debug, Clone, PartialEq)]
pub enum TokenKind {
// Header keywords
Name,
Category,
Inputs,
Outputs,
Params,
State,
Ui,
Process,
// Type keywords
Audio,
Cv,
Midi,
F32,
Int,
Bool,
Sample,
// Category values
Generator,
Effect,
Utility,
// Statement keywords
Let,
Mut,
If,
Else,
For,
In,
// UI keywords
Group,
Param,
Canvas,
Spacer,
// Draw block
Draw,
// Literals
FloatLit(f32),
IntLit(i32),
StringLit(String),
True,
False,
// Identifiers
Ident(String),
// Operators
Plus,
Minus,
Star,
Slash,
Percent,
Eq, // =
EqEq, // ==
BangEq, // !=
Lt, // <
Gt, // >
LtEq, // <=
GtEq, // >=
AmpAmp, // &&
PipePipe, // ||
Bang, // !
// Delimiters
LBrace, // {
RBrace, // }
LBracket, // [
RBracket, // ]
LParen, // (
RParen, // )
Colon, // :
Comma, // ,
Semicolon, // ;
DotDot, // ..
// End of file
Eof,
}
impl TokenKind {
/// Try to match an identifier string to a keyword
pub fn from_ident(s: &str) -> TokenKind {
match s {
"name" => TokenKind::Name,
"category" => TokenKind::Category,
"inputs" => TokenKind::Inputs,
"outputs" => TokenKind::Outputs,
"params" => TokenKind::Params,
"state" => TokenKind::State,
"ui" => TokenKind::Ui,
"process" => TokenKind::Process,
"audio" => TokenKind::Audio,
"cv" => TokenKind::Cv,
"midi" => TokenKind::Midi,
"f32" => TokenKind::F32,
"int" => TokenKind::Int,
"bool" => TokenKind::Bool,
"sample" => TokenKind::Sample,
"generator" => TokenKind::Generator,
"effect" => TokenKind::Effect,
"utility" => TokenKind::Utility,
"let" => TokenKind::Let,
"mut" => TokenKind::Mut,
"if" => TokenKind::If,
"else" => TokenKind::Else,
"for" => TokenKind::For,
"in" => TokenKind::In,
"group" => TokenKind::Group,
"param" => TokenKind::Param,
"canvas" => TokenKind::Canvas,
"spacer" => TokenKind::Spacer,
"draw" => TokenKind::Draw,
"true" => TokenKind::True,
"false" => TokenKind::False,
_ => TokenKind::Ident(s.to_string()),
}
}
}

View File

@ -0,0 +1,27 @@
use serde::{Deserialize, Serialize};
/// Declarative UI layout for a script node, rendered in bottom_ui()
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct UiDeclaration {
pub elements: Vec<UiElement>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum UiElement {
/// Render a parameter slider/knob
Param(String),
/// Render a sample picker dropdown
Sample(String),
/// Collapsible group with label
Group {
label: String,
children: Vec<UiElement>,
},
/// Drawable canvas area (phase 2)
Canvas {
width: f32,
height: f32,
},
/// Vertical spacer
Spacer(f32),
}

View File

@ -0,0 +1,388 @@
use crate::ast::*;
use crate::error::CompileError;
use crate::token::Span;
use crate::ui_decl::UiElement;
/// Type used during validation
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum VType {
F32,
Int,
Bool,
/// Array of f32 (state array or input/output buffer)
ArrayF32,
/// Array of int
ArrayInt,
/// Sample slot (accessed via sample_read/sample_len)
Sample,
}
struct VarInfo {
ty: VType,
mutable: bool,
}
struct Scope {
vars: Vec<(String, VarInfo)>,
}
impl Scope {
fn new() -> Self {
Self { vars: Vec::new() }
}
fn define(&mut self, name: String, ty: VType, mutable: bool) {
self.vars.push((name, VarInfo { ty, mutable }));
}
fn lookup(&self, name: &str) -> Option<&VarInfo> {
self.vars.iter().rev().find(|(n, _)| n == name).map(|(_, v)| v)
}
}
struct Validator<'a> {
script: &'a Script,
scopes: Vec<Scope>,
}
impl<'a> Validator<'a> {
fn new(script: &'a Script) -> Self {
Self {
script,
scopes: vec![Scope::new()],
}
}
fn current_scope(&mut self) -> &mut Scope {
self.scopes.last_mut().unwrap()
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
}
fn pop_scope(&mut self) {
self.scopes.pop();
}
fn lookup(&self, name: &str) -> Option<&VarInfo> {
for scope in self.scopes.iter().rev() {
if let Some(info) = scope.lookup(name) {
return Some(info);
}
}
None
}
fn define(&mut self, name: String, ty: VType, mutable: bool) {
self.current_scope().define(name, ty, mutable);
}
fn validate(&mut self) -> Result<(), CompileError> {
// Register built-in variables
self.define("sample_rate".into(), VType::Int, false);
self.define("buffer_size".into(), VType::Int, false);
// Register inputs as arrays
for input in &self.script.inputs {
let ty = match input.signal {
SignalKind::Audio | SignalKind::Cv => VType::ArrayF32,
SignalKind::Midi => continue, // MIDI not yet supported in process
};
self.define(input.name.clone(), ty, false);
}
// Register outputs as mutable arrays
for output in &self.script.outputs {
let ty = match output.signal {
SignalKind::Audio | SignalKind::Cv => VType::ArrayF32,
SignalKind::Midi => continue,
};
self.define(output.name.clone(), ty, true);
}
// Register params as f32
for param in &self.script.params {
self.define(param.name.clone(), VType::F32, false);
}
// Register state vars
for state in &self.script.state {
let (ty, mutable) = match &state.ty {
StateType::F32 => (VType::F32, true),
StateType::Int => (VType::Int, true),
StateType::Bool => (VType::Bool, true),
StateType::ArrayF32(_) => (VType::ArrayF32, true),
StateType::ArrayInt(_) => (VType::ArrayInt, true),
StateType::Sample => (VType::Sample, false),
};
self.define(state.name.clone(), ty, mutable);
}
// Validate process block
self.validate_block(&self.script.process)?;
// Validate UI references
if let Some(ui) = &self.script.ui {
self.validate_ui(ui)?;
}
Ok(())
}
fn validate_block(&mut self, block: &[Stmt]) -> Result<(), CompileError> {
for stmt in block {
self.validate_stmt(stmt)?;
}
Ok(())
}
fn validate_stmt(&mut self, stmt: &Stmt) -> Result<(), CompileError> {
match stmt {
Stmt::Let { name, mutable, init, span: _ } => {
let ty = self.infer_type(init)?;
self.define(name.clone(), ty, *mutable);
Ok(())
}
Stmt::Assign { target, value, span: _ } => {
match target {
LValue::Ident(name, s) => {
let info = self.lookup(name).ok_or_else(|| {
CompileError::new(format!("Undefined variable: {}", name), *s)
})?;
if !info.mutable {
return Err(CompileError::new(
format!("Cannot assign to immutable variable: {}", name),
*s,
));
}
}
LValue::Index(name, idx, s) => {
let info = self.lookup(name).ok_or_else(|| {
CompileError::new(format!("Undefined variable: {}", name), *s)
})?;
if !info.mutable {
return Err(CompileError::new(
format!("Cannot assign to immutable array: {}", name),
*s,
));
}
self.infer_type(idx)?;
}
}
self.infer_type(value)?;
Ok(())
}
Stmt::If { cond, then_block, else_block, .. } => {
self.infer_type(cond)?;
self.push_scope();
self.validate_block(then_block)?;
self.pop_scope();
if let Some(else_b) = else_block {
self.push_scope();
self.validate_block(else_b)?;
self.pop_scope();
}
Ok(())
}
Stmt::For { var, end, body, span } => {
let end_ty = self.infer_type(end)?;
if end_ty != VType::Int {
return Err(CompileError::new(
"For loop bound must be an integer expression",
*span,
).with_hint("Use int(...) to convert, or use buffer_size / len(array)"));
}
self.push_scope();
self.define(var.clone(), VType::Int, false);
self.validate_block(body)?;
self.pop_scope();
Ok(())
}
Stmt::ExprStmt(expr) => {
self.infer_type(expr)?;
Ok(())
}
}
}
fn infer_type(&self, expr: &Expr) -> Result<VType, CompileError> {
match expr {
Expr::FloatLit(_, _) => Ok(VType::F32),
Expr::IntLit(_, _) => Ok(VType::Int),
Expr::BoolLit(_, _) => Ok(VType::Bool),
Expr::Ident(name, span) => {
let info = self.lookup(name).ok_or_else(|| {
CompileError::new(format!("Undefined variable: {}", name), *span)
})?;
Ok(info.ty)
}
Expr::BinOp(left, op, right, span) => {
let lt = self.infer_type(left)?;
let rt = self.infer_type(right)?;
match op {
BinOp::And | BinOp::Or => Ok(VType::Bool),
BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Gt | BinOp::Le | BinOp::Ge => {
Ok(VType::Bool)
}
_ => {
// Arithmetic: both sides should be same numeric type
if lt == VType::F32 || rt == VType::F32 {
Ok(VType::F32)
} else if lt == VType::Int && rt == VType::Int {
Ok(VType::Int)
} else {
Err(CompileError::new(
format!("Cannot apply {:?} to {:?} and {:?}", op, lt, rt),
*span,
))
}
}
}
}
Expr::UnaryOp(op, inner, _) => {
let ty = self.infer_type(inner)?;
match op {
UnaryOp::Neg => Ok(ty),
UnaryOp::Not => Ok(VType::Bool),
}
}
Expr::Cast(kind, _, _) => match kind {
CastKind::ToInt => Ok(VType::Int),
CastKind::ToFloat => Ok(VType::F32),
},
Expr::Index(base, idx, span) => {
let base_ty = self.infer_type(base)?;
self.infer_type(idx)?;
match base_ty {
VType::ArrayF32 => Ok(VType::F32),
VType::ArrayInt => Ok(VType::Int),
_ => Err(CompileError::new("Cannot index non-array type", *span)),
}
}
Expr::Call(name, args, span) => {
self.validate_call(name, args, *span)
}
}
}
fn validate_call(&self, name: &str, args: &[Expr], span: Span) -> Result<VType, CompileError> {
// Validate argument count and infer return type
match name {
// 1-arg math functions returning f32
"sin" | "cos" | "tan" | "asin" | "acos" | "atan" | "exp" | "log" | "log2"
| "sqrt" | "floor" | "ceil" | "round" | "trunc" | "fract" | "abs" | "sign" => {
if args.len() != 1 {
return Err(CompileError::new(format!("{}() takes 1 argument", name), span));
}
for arg in args { self.infer_type(arg)?; }
Ok(VType::F32)
}
// 2-arg math functions returning f32
"atan2" | "pow" | "min" | "max" => {
if args.len() != 2 {
return Err(CompileError::new(format!("{}() takes 2 arguments", name), span));
}
for arg in args { self.infer_type(arg)?; }
Ok(VType::F32)
}
// 3-arg functions
"clamp" | "mix" | "smoothstep" => {
if args.len() != 3 {
return Err(CompileError::new(format!("{}() takes 3 arguments", name), span));
}
for arg in args { self.infer_type(arg)?; }
Ok(VType::F32)
}
// cv_or(value, default) -> f32
"cv_or" => {
if args.len() != 2 {
return Err(CompileError::new("cv_or() takes 2 arguments", span));
}
for arg in args { self.infer_type(arg)?; }
Ok(VType::F32)
}
// len(array) -> int
"len" => {
if args.len() != 1 {
return Err(CompileError::new("len() takes 1 argument", span));
}
let ty = self.infer_type(&args[0])?;
if ty != VType::ArrayF32 && ty != VType::ArrayInt {
return Err(CompileError::new("len() requires an array argument", span));
}
Ok(VType::Int)
}
// sample_len(sample) -> int
"sample_len" => {
if args.len() != 1 {
return Err(CompileError::new("sample_len() takes 1 argument", span));
}
let ty = self.infer_type(&args[0])?;
if ty != VType::Sample {
return Err(CompileError::new("sample_len() requires a sample argument", span));
}
Ok(VType::Int)
}
// sample_read(sample, index) -> f32
"sample_read" => {
if args.len() != 2 {
return Err(CompileError::new("sample_read() takes 2 arguments", span));
}
let ty = self.infer_type(&args[0])?;
if ty != VType::Sample {
return Err(CompileError::new("sample_read() first argument must be a sample", span));
}
self.infer_type(&args[1])?;
Ok(VType::F32)
}
// sample_rate_of(sample) -> int
"sample_rate_of" => {
if args.len() != 1 {
return Err(CompileError::new("sample_rate_of() takes 1 argument", span));
}
let ty = self.infer_type(&args[0])?;
if ty != VType::Sample {
return Err(CompileError::new("sample_rate_of() requires a sample argument", span));
}
Ok(VType::Int)
}
_ => Err(CompileError::new(format!("Unknown function: {}", name), span)),
}
}
fn validate_ui(&self, elements: &[UiElement]) -> Result<(), CompileError> {
for element in elements {
match element {
UiElement::Param(name) => {
if !self.script.params.iter().any(|p| p.name == *name) {
return Err(CompileError::new(
format!("UI references unknown parameter: {}", name),
Span::new(0, 0),
));
}
}
UiElement::Sample(name) => {
if !self.script.state.iter().any(|s| s.name == *name && s.ty == StateType::Sample) {
return Err(CompileError::new(
format!("UI references unknown sample: {}", name),
Span::new(0, 0),
));
}
}
UiElement::Group { children, .. } => {
self.validate_ui(children)?;
}
_ => {}
}
}
Ok(())
}
}
/// Validate a parsed script. Returns Ok(()) if valid.
pub fn validate(script: &Script) -> Result<&Script, CompileError> {
let mut validator = Validator::new(script);
validator.validate()?;
Ok(script)
}

View File

@ -0,0 +1,674 @@
use crate::error::ScriptError;
use crate::opcodes::OpCode;
const STACK_SIZE: usize = 256;
const MAX_LOCALS: usize = 64;
const DEFAULT_INSTRUCTION_LIMIT: u64 = 10_000_000;
/// A value on the VM stack (tagged union)
#[derive(Clone, Copy)]
pub union Value {
pub f: f32,
pub i: i32,
pub b: bool,
}
impl Default for Value {
fn default() -> Self {
Value { i: 0 }
}
}
/// A loaded audio sample slot
#[derive(Clone)]
pub struct SampleSlot {
pub data: Vec<f32>,
pub frame_count: usize,
pub sample_rate: u32,
pub name: String,
}
impl Default for SampleSlot {
fn default() -> Self {
Self {
data: Vec::new(),
frame_count: 0,
sample_rate: 0,
name: String::new(),
}
}
}
/// Result of a single opcode step in VmCore
enum StepResult {
/// Opcode was handled, continue execution
Continue,
/// Hit a Halt instruction
Halt,
/// Opcode not handled by core — caller must handle it
Unhandled(OpCode),
}
/// Shared VM state and opcode dispatch for arithmetic, logic, control flow, and math builtins.
#[derive(Clone)]
struct VmCore {
bytecode: Vec<u8>,
constants_f32: Vec<f32>,
constants_i32: Vec<i32>,
stack: Vec<Value>,
sp: usize,
locals: Vec<Value>,
params: Vec<f32>,
state_scalars: Vec<Value>,
state_arrays: Vec<Vec<f32>>,
instruction_limit: u64,
}
impl VmCore {
fn new(
bytecode: Vec<u8>,
constants_f32: Vec<f32>,
constants_i32: Vec<i32>,
num_params: usize,
param_defaults: &[f32],
num_state_scalars: usize,
state_array_sizes: &[usize],
instruction_limit: u64,
) -> Self {
let mut params = vec![0.0f32; num_params];
for (i, &d) in param_defaults.iter().enumerate() {
if i < params.len() {
params[i] = d;
}
}
Self {
bytecode,
constants_f32,
constants_i32,
stack: vec![Value::default(); STACK_SIZE],
sp: 0,
locals: vec![Value::default(); MAX_LOCALS],
params,
state_scalars: vec![Value::default(); num_state_scalars],
state_arrays: state_array_sizes.iter().map(|&sz| vec![0.0f32; sz]).collect(),
instruction_limit,
}
}
/// Reset execution state (sp + locals) at the start of each execute() call.
fn reset_frame(&mut self) {
self.sp = 0;
for l in &mut self.locals {
*l = Value::default();
}
}
/// Execute one opcode at `pc`. Returns the new `pc` and a `StepResult`.
/// Handles all opcodes shared between ScriptVM and DrawVM:
/// stack ops, locals, params, state, arrays, arithmetic, comparison,
/// logic, casts, control flow, and math builtins.
fn step(&mut self, pc: &mut usize) -> Result<StepResult, ScriptError> {
let op = self.bytecode[*pc];
*pc += 1;
let Some(opcode) = OpCode::from_u8(op) else {
return Err(ScriptError::InvalidOpcode(op));
};
match opcode {
OpCode::Halt => return Ok(StepResult::Halt),
// Stack operations
OpCode::PushF32 => {
let idx = self.read_u16(pc) as usize;
self.push_f(self.constants_f32[idx])?;
}
OpCode::PushI32 => {
let idx = self.read_u16(pc) as usize;
self.push_i(self.constants_i32[idx])?;
}
OpCode::PushBool => {
let v = self.bytecode[*pc];
*pc += 1;
self.push_b(v != 0)?;
}
OpCode::Pop => { self.pop()?; }
// Locals
OpCode::LoadLocal => {
let idx = self.read_u16(pc) as usize;
self.push(self.locals[idx])?;
}
OpCode::StoreLocal => {
let idx = self.read_u16(pc) as usize;
self.locals[idx] = self.pop()?;
}
// Params (read)
OpCode::LoadParam => {
let idx = self.read_u16(pc) as usize;
self.push_f(self.params[idx])?;
}
// State scalars
OpCode::LoadState => {
let idx = self.read_u16(pc) as usize;
self.push(self.state_scalars[idx])?;
}
OpCode::StoreState => {
let idx = self.read_u16(pc) as usize;
self.state_scalars[idx] = self.pop()?;
}
// State arrays
OpCode::LoadStateArray => {
let arr_id = self.read_u16(pc) as usize;
let idx = unsafe { self.pop()?.i };
let val = if arr_id < self.state_arrays.len() {
let arr_len = self.state_arrays[arr_id].len();
let idx = ((idx % arr_len as i32) + arr_len as i32) as usize % arr_len;
self.state_arrays[arr_id][idx]
} else {
0.0
};
self.push_f(val)?;
}
OpCode::StoreStateArray => {
let arr_id = self.read_u16(pc) as usize;
let val = unsafe { self.pop()?.f };
let idx = unsafe { self.pop()?.i };
if arr_id < self.state_arrays.len() {
let arr_len = self.state_arrays[arr_id].len();
let idx = ((idx % arr_len as i32) + arr_len as i32) as usize % arr_len;
self.state_arrays[arr_id][idx] = val;
}
}
OpCode::ArrayLen => {
let arr_id = self.read_u16(pc) as usize;
let len = if arr_id < self.state_arrays.len() {
self.state_arrays[arr_id].len() as i32
} else {
0
};
self.push_i(len)?;
}
// Float arithmetic
OpCode::AddF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(a + b)?; }
OpCode::SubF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(a - b)?; }
OpCode::MulF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(a * b)?; }
OpCode::DivF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(if b.abs() > 1e-30 { a / b } else { 0.0 })?; }
OpCode::ModF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(if b.abs() > 1e-30 { a % b } else { 0.0 })?; }
OpCode::NegF => { let v = self.pop_f()?; self.push_f(-v)?; }
// Int arithmetic
OpCode::AddI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_i(a.wrapping_add(b))?; }
OpCode::SubI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_i(a.wrapping_sub(b))?; }
OpCode::MulI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_i(a.wrapping_mul(b))?; }
OpCode::DivI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_i(if b != 0 { a / b } else { 0 })?; }
OpCode::ModI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_i(if b != 0 { a % b } else { 0 })?; }
OpCode::NegI => { let v = self.pop_i()?; self.push_i(-v)?; }
// Float comparison
OpCode::EqF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a == b)?; }
OpCode::NeF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a != b)?; }
OpCode::LtF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a < b)?; }
OpCode::GtF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a > b)?; }
OpCode::LeF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a <= b)?; }
OpCode::GeF => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_b(a >= b)?; }
// Int comparison
OpCode::EqI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a == b)?; }
OpCode::NeI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a != b)?; }
OpCode::LtI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a < b)?; }
OpCode::GtI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a > b)?; }
OpCode::LeI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a <= b)?; }
OpCode::GeI => { let b = self.pop_i()?; let a = self.pop_i()?; self.push_b(a >= b)?; }
// Logical
OpCode::And => { let b = self.pop_b()?; let a = self.pop_b()?; self.push_b(a && b)?; }
OpCode::Or => { let b = self.pop_b()?; let a = self.pop_b()?; self.push_b(a || b)?; }
OpCode::Not => { let v = self.pop_b()?; self.push_b(!v)?; }
// Casts
OpCode::F32ToI32 => { let v = self.pop_f()?; self.push_i(v as i32)?; }
OpCode::I32ToF32 => { let v = self.pop_i()?; self.push_f(v as f32)?; }
// Control flow
OpCode::Jump => {
*pc = self.read_u32(pc) as usize;
}
OpCode::JumpIfFalse => {
let target = self.read_u32(pc) as usize;
let cond = self.pop_b()?;
if !cond {
*pc = target;
}
}
// Math builtins
OpCode::Sin => { let v = self.pop_f()?; self.push_f(v.sin())?; }
OpCode::Cos => { let v = self.pop_f()?; self.push_f(v.cos())?; }
OpCode::Tan => { let v = self.pop_f()?; self.push_f(v.tan())?; }
OpCode::Asin => { let v = self.pop_f()?; self.push_f(v.asin())?; }
OpCode::Acos => { let v = self.pop_f()?; self.push_f(v.acos())?; }
OpCode::Atan => { let v = self.pop_f()?; self.push_f(v.atan())?; }
OpCode::Atan2 => { let x = self.pop_f()?; let y = self.pop_f()?; self.push_f(y.atan2(x))?; }
OpCode::Exp => { let v = self.pop_f()?; self.push_f(v.exp())?; }
OpCode::Log => { let v = self.pop_f()?; self.push_f(v.ln())?; }
OpCode::Log2 => { let v = self.pop_f()?; self.push_f(v.log2())?; }
OpCode::Pow => { let e = self.pop_f()?; let b = self.pop_f()?; self.push_f(b.powf(e))?; }
OpCode::Sqrt => { let v = self.pop_f()?; self.push_f(v.sqrt())?; }
OpCode::Floor => { let v = self.pop_f()?; self.push_f(v.floor())?; }
OpCode::Ceil => { let v = self.pop_f()?; self.push_f(v.ceil())?; }
OpCode::Round => { let v = self.pop_f()?; self.push_f(v.round())?; }
OpCode::Trunc => { let v = self.pop_f()?; self.push_f(v.trunc())?; }
OpCode::Fract => { let v = self.pop_f()?; self.push_f(v.fract())?; }
OpCode::Abs => { let v = self.pop_f()?; self.push_f(v.abs())?; }
OpCode::Sign => { let v = self.pop_f()?; self.push_f(v.signum())?; }
OpCode::Clamp => {
let hi = self.pop_f()?;
let lo = self.pop_f()?;
let v = self.pop_f()?;
self.push_f(v.clamp(lo, hi))?;
}
OpCode::Min => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(a.min(b))?; }
OpCode::Max => { let b = self.pop_f()?; let a = self.pop_f()?; self.push_f(a.max(b))?; }
OpCode::Mix => {
let t = self.pop_f()?;
let b = self.pop_f()?;
let a = self.pop_f()?;
self.push_f(a + (b - a) * t)?;
}
OpCode::Smoothstep => {
let x = self.pop_f()?;
let e1 = self.pop_f()?;
let e0 = self.pop_f()?;
let t = ((x - e0) / (e1 - e0)).clamp(0.0, 1.0);
self.push_f(t * t * (3.0 - 2.0 * t))?;
}
OpCode::IsNan => { let v = self.pop_f()?; self.push_b(v.is_nan())?; }
// VM-specific opcodes — caller must handle
other => return Ok(StepResult::Unhandled(other)),
}
Ok(StepResult::Continue)
}
// Stack helpers
#[inline]
fn push(&mut self, v: Value) -> Result<(), ScriptError> {
if self.sp >= STACK_SIZE {
return Err(ScriptError::StackOverflow);
}
self.stack[self.sp] = v;
self.sp += 1;
Ok(())
}
#[inline]
fn push_f(&mut self, v: f32) -> Result<(), ScriptError> { self.push(Value { f: v }) }
#[inline]
fn push_i(&mut self, v: i32) -> Result<(), ScriptError> { self.push(Value { i: v }) }
#[inline]
fn push_b(&mut self, v: bool) -> Result<(), ScriptError> { self.push(Value { b: v }) }
#[inline]
fn pop(&mut self) -> Result<Value, ScriptError> {
if self.sp == 0 {
return Err(ScriptError::StackUnderflow);
}
self.sp -= 1;
Ok(self.stack[self.sp])
}
#[inline]
fn pop_f(&mut self) -> Result<f32, ScriptError> { Ok(unsafe { self.pop()?.f }) }
#[inline]
fn pop_i(&mut self) -> Result<i32, ScriptError> { Ok(unsafe { self.pop()?.i }) }
#[inline]
fn pop_b(&mut self) -> Result<bool, ScriptError> { Ok(unsafe { self.pop()?.b }) }
#[inline]
fn read_u16(&self, pc: &mut usize) -> u16 {
let v = u16::from_le_bytes([self.bytecode[*pc], self.bytecode[*pc + 1]]);
*pc += 2;
v
}
#[inline]
fn read_u32(&self, pc: &mut usize) -> u32 {
let v = u32::from_le_bytes([
self.bytecode[*pc], self.bytecode[*pc + 1],
self.bytecode[*pc + 2], self.bytecode[*pc + 3],
]);
*pc += 4;
v
}
}
// ---- ScriptVM (runs on audio thread) ----
/// The BeamDSP virtual machine
#[derive(Clone)]
pub struct ScriptVM {
core: VmCore,
pub sample_slots: Vec<SampleSlot>,
}
impl ScriptVM {
pub fn new(
bytecode: Vec<u8>,
constants_f32: Vec<f32>,
constants_i32: Vec<i32>,
num_params: usize,
param_defaults: &[f32],
num_state_scalars: usize,
state_array_sizes: &[usize],
num_sample_slots: usize,
) -> Self {
Self {
core: VmCore::new(
bytecode, constants_f32, constants_i32,
num_params, param_defaults, num_state_scalars, state_array_sizes,
DEFAULT_INSTRUCTION_LIMIT,
),
sample_slots: (0..num_sample_slots).map(|_| SampleSlot::default()).collect(),
}
}
/// Access params for reading
pub fn params(&self) -> &[f32] {
&self.core.params
}
/// Access params mutably (backend sets values from parameter changes)
pub fn params_mut(&mut self) -> &mut Vec<f32> {
&mut self.core.params
}
/// Reset all state (scalars + arrays) to zero. Called on node reset.
pub fn reset_state(&mut self) {
for s in &mut self.core.state_scalars {
*s = Value::default();
}
for arr in &mut self.core.state_arrays {
arr.fill(0.0);
}
}
/// Execute the bytecode with the given I/O buffers
pub fn execute(
&mut self,
inputs: &[&[f32]],
outputs: &mut [&mut [f32]],
sample_rate: u32,
buffer_size: usize,
) -> Result<(), ScriptError> {
self.core.reset_frame();
let mut pc: usize = 0;
let mut ic: u64 = 0;
let limit = self.core.instruction_limit;
while pc < self.core.bytecode.len() {
ic += 1;
if ic > limit {
return Err(ScriptError::ExecutionLimitExceeded);
}
match self.core.step(&mut pc)? {
StepResult::Continue => {}
StepResult::Halt => return Ok(()),
StepResult::Unhandled(opcode) => {
match opcode {
// Input buffers
OpCode::LoadInput => {
let port = self.core.bytecode[pc] as usize;
pc += 1;
let idx = unsafe { self.core.pop()?.i } as usize;
let val = if port < inputs.len() && idx < inputs[port].len() {
inputs[port][idx]
} else {
0.0
};
self.core.push_f(val)?;
}
// Output buffers
OpCode::StoreOutput => {
let port = self.core.bytecode[pc] as usize;
pc += 1;
let val = unsafe { self.core.pop()?.f };
let idx = unsafe { self.core.pop()?.i } as usize;
if port < outputs.len() && idx < outputs[port].len() {
outputs[port][idx] = val;
}
}
// Sample access
OpCode::SampleLen => {
let slot = self.core.bytecode[pc] as usize;
pc += 1;
let len = if slot < self.sample_slots.len() {
self.sample_slots[slot].frame_count as i32
} else {
0
};
self.core.push_i(len)?;
}
OpCode::SampleRead => {
let slot = self.core.bytecode[pc] as usize;
pc += 1;
let idx = unsafe { self.core.pop()?.i } as usize;
let val = if slot < self.sample_slots.len() && idx < self.sample_slots[slot].data.len() {
self.sample_slots[slot].data[idx]
} else {
0.0
};
self.core.push_f(val)?;
}
OpCode::SampleRateOf => {
let slot = self.core.bytecode[pc] as usize;
pc += 1;
let sr = if slot < self.sample_slots.len() {
self.sample_slots[slot].sample_rate as i32
} else {
0
};
self.core.push_i(sr)?;
}
// Built-in constants
OpCode::LoadSampleRate => {
self.core.push_i(sample_rate as i32)?;
}
OpCode::LoadBufferSize => {
self.core.push_i(buffer_size as i32)?;
}
// Draw/mouse opcodes are not valid in the audio ScriptVM
_ => {
return Err(ScriptError::InvalidOpcode(opcode as u8));
}
}
}
}
}
Ok(())
}
}
// ---- Draw VM (runs on UI thread, produces draw commands) ----
/// A draw command produced by the draw block
#[derive(Debug, Clone)]
pub enum DrawCommand {
FillCircle { cx: f32, cy: f32, r: f32, color: u32 },
StrokeCircle { cx: f32, cy: f32, r: f32, color: u32, width: f32 },
StrokeArc { cx: f32, cy: f32, r: f32, start_deg: f32, end_deg: f32, color: u32, width: f32 },
Line { x1: f32, y1: f32, x2: f32, y2: f32, color: u32, width: f32 },
FillRect { x: f32, y: f32, w: f32, h: f32, color: u32 },
StrokeRect { x: f32, y: f32, w: f32, h: f32, color: u32, width: f32 },
}
/// Mouse state passed to the draw VM each frame
#[derive(Debug, Clone, Default)]
pub struct MouseState {
pub x: f32,
pub y: f32,
pub down: bool,
}
/// Lightweight VM for executing draw bytecode on the UI thread
#[derive(Clone)]
pub struct DrawVM {
core: VmCore,
pub draw_commands: Vec<DrawCommand>,
pub mouse: MouseState,
}
impl DrawVM {
pub fn new(
bytecode: Vec<u8>,
constants_f32: Vec<f32>,
constants_i32: Vec<i32>,
num_params: usize,
param_defaults: &[f32],
num_state_scalars: usize,
state_array_sizes: &[usize],
) -> Self {
Self {
core: VmCore::new(
bytecode, constants_f32, constants_i32,
num_params, param_defaults, num_state_scalars, state_array_sizes,
1_000_000, // lower limit for draw (runs per frame)
),
draw_commands: Vec::new(),
mouse: MouseState::default(),
}
}
/// Access params for reading/writing from the editor
pub fn params(&self) -> &[f32] {
&self.core.params
}
/// Access params mutably (editor sets values from node inputs each frame)
pub fn params_mut(&mut self) -> &mut Vec<f32> {
&mut self.core.params
}
/// Check if bytecode is non-empty
pub fn has_bytecode(&self) -> bool {
!self.core.bytecode.is_empty()
}
/// Execute the draw bytecode. Call once per frame.
/// Draw commands accumulate in `self.draw_commands` (cleared at start).
pub fn execute(&mut self) -> Result<(), ScriptError> {
self.core.reset_frame();
self.draw_commands.clear();
let mut pc: usize = 0;
let mut ic: u64 = 0;
let limit = self.core.instruction_limit;
while pc < self.core.bytecode.len() {
ic += 1;
if ic > limit {
return Err(ScriptError::ExecutionLimitExceeded);
}
match self.core.step(&mut pc)? {
StepResult::Continue => {}
StepResult::Halt => return Ok(()),
StepResult::Unhandled(opcode) => {
match opcode {
// Draw commands
OpCode::DrawFillCircle => {
let color = self.core.pop_i()? as u32;
let r = self.core.pop_f()?;
let cy = self.core.pop_f()?;
let cx = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::FillCircle { cx, cy, r, color });
}
OpCode::DrawStrokeCircle => {
let width = self.core.pop_f()?;
let color = self.core.pop_i()? as u32;
let r = self.core.pop_f()?;
let cy = self.core.pop_f()?;
let cx = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::StrokeCircle { cx, cy, r, color, width });
}
OpCode::DrawStrokeArc => {
let width = self.core.pop_f()?;
let color = self.core.pop_i()? as u32;
let end_deg = self.core.pop_f()?;
let start_deg = self.core.pop_f()?;
let r = self.core.pop_f()?;
let cy = self.core.pop_f()?;
let cx = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::StrokeArc { cx, cy, r, start_deg, end_deg, color, width });
}
OpCode::DrawLine => {
let width = self.core.pop_f()?;
let color = self.core.pop_i()? as u32;
let y2 = self.core.pop_f()?;
let x2 = self.core.pop_f()?;
let y1 = self.core.pop_f()?;
let x1 = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::Line { x1, y1, x2, y2, color, width });
}
OpCode::DrawFillRect => {
let color = self.core.pop_i()? as u32;
let h = self.core.pop_f()?;
let w = self.core.pop_f()?;
let y = self.core.pop_f()?;
let x = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::FillRect { x, y, w, h, color });
}
OpCode::DrawStrokeRect => {
let width = self.core.pop_f()?;
let color = self.core.pop_i()? as u32;
let h = self.core.pop_f()?;
let w = self.core.pop_f()?;
let y = self.core.pop_f()?;
let x = self.core.pop_f()?;
self.draw_commands.push(DrawCommand::StrokeRect { x, y, w, h, color, width });
}
// Mouse input
OpCode::MouseX => { self.core.push_f(self.mouse.x)?; }
OpCode::MouseY => { self.core.push_f(self.mouse.y)?; }
OpCode::MouseDown => { self.core.push_f(if self.mouse.down { 1.0 } else { 0.0 })?; }
// Param write
OpCode::StoreParam => {
let idx = self.core.read_u16(&mut pc) as usize;
let val = self.core.pop_f()?;
if idx < self.core.params.len() {
self.core.params[idx] = val;
}
}
// Sample access not available in draw context
OpCode::SampleLen | OpCode::SampleRead | OpCode::SampleRateOf => {
pc += 1; // skip slot byte
self.core.push_i(0)?;
}
// Audio I/O not available in draw context
_ => {
return Err(ScriptError::InvalidOpcode(opcode as u8));
}
}
}
}
}
Ok(())
}
}

View File

@ -0,0 +1,27 @@
#!/bin/bash
# Build script for static FFmpeg linking
set -e
# Point pkg-config to our static FFmpeg build
export PKG_CONFIG_PATH="/opt/ffmpeg-static/lib/pkgconfig:${PKG_CONFIG_PATH}"
# Tell pkg-config to use static linking
export PKG_CONFIG_ALL_STATIC=1
# Force static linking of codec libraries (and link required C++ and NUMA libraries)
export RUSTFLAGS="-C prefer-dynamic=no -C link-arg=-L/usr/lib/x86_64-linux-gnu -C link-arg=-Wl,-Bstatic -C link-arg=-lx264 -C link-arg=-lx265 -C link-arg=-lvpx -C link-arg=-lmp3lame -C link-arg=-Wl,-Bdynamic -C link-arg=-lstdc++ -C link-arg=-lnuma"
# Build with static features
echo "Building with static FFmpeg from /opt/ffmpeg-static..."
echo "PKG_CONFIG_PATH=$PKG_CONFIG_PATH"
echo "PKG_CONFIG_ALL_STATIC=$PKG_CONFIG_ALL_STATIC"
cargo build --release
echo ""
echo "Build complete! Binary at: target/release/lightningbeam-editor"
echo ""
echo "To verify static linking, run:"
echo " ldd target/release/lightningbeam-editor | grep -E '(ffmpeg|avcodec|avformat|x264|x265|vpx)'"
echo "(Should show no ffmpeg or codec libraries if fully static)"

View File

@ -0,0 +1,32 @@
@echo off
REM Build script for Windows
REM Requires: FFmpeg 8.0.0 dev files in C:\ffmpeg, LLVM installed, VS Build Tools
REM Set up MSVC environment
call "C:\Program Files (x86)\Microsoft Visual Studio\18\BuildTools\VC\Auxiliary\Build\vcvarsall.bat" x64
REM FFmpeg location (headers + libs + DLLs)
if not defined FFMPEG_DIR set FFMPEG_DIR=C:\ffmpeg
REM LLVM/libclang for bindgen (ffmpeg-sys-next)
if not defined LIBCLANG_PATH set LIBCLANG_PATH=C:\Program Files\LLVM\bin
REM Validate prerequisites
if not exist "%FFMPEG_DIR%\include\libavcodec\avcodec.h" (
echo ERROR: FFmpeg dev files not found at %FFMPEG_DIR%
echo Download FFmpeg 8.0.0 shared+dev from https://github.com/GyanD/codexffmpeg/releases
echo and extract to %FFMPEG_DIR%
exit /b 1
)
if not exist "%LIBCLANG_PATH%\libclang.dll" (
echo ERROR: LLVM/libclang not found at %LIBCLANG_PATH%
echo Install with: winget install LLVM.LLVM
exit /b 1
)
echo Building Lightningbeam Editor...
echo FFMPEG_DIR=%FFMPEG_DIR%
echo LIBCLANG_PATH=%LIBCLANG_PATH%
cargo build --package lightningbeam-editor %*

View File

@ -0,0 +1,21 @@
[package]
name = "egui_node_graph2"
description = "A helper library to create interactive node graphs using egui"
homepage = "https://github.com/trevyn/egui_node_graph2"
repository = "https://github.com/trevyn/egui_node_graph2"
license = "MIT"
version = "0.7.0"
keywords = ["egui_node_graph", "ui", "egui", "graph", "node"]
edition = "2021"
readme = "../README.md"
workspace = ".."
[features]
persistence = ["serde", "slotmap/serde", "smallvec/serde", "egui/persistence"]
[dependencies]
egui = "0.33.3"
slotmap = { version = "1.0" }
smallvec = { version = "1.10.0" }
serde = { version = "1.0", optional = true, features = ["derive"] }
thiserror = "1.0"

View File

@ -0,0 +1,94 @@
use egui::Color32;
/// Converts a hex string with a leading '#' into a egui::Color32.
/// - The first three channels are interpreted as R, G, B.
/// - The fourth channel, if present, is used as the alpha value.
/// - Both upper and lowercase characters can be used for the hex values.
///
/// *Adapted from: https://docs.rs/raster/0.1.0/src/raster/lib.rs.html#425-725.
/// Credit goes to original authors.*
pub fn color_from_hex(hex: &str) -> Result<Color32, String> {
// Convert a hex string to decimal. Eg. "00" -> 0. "FF" -> 255.
fn _hex_dec(hex_string: &str) -> Result<u8, String> {
match u8::from_str_radix(hex_string, 16) {
Ok(o) => Ok(o),
Err(e) => Err(format!("Error parsing hex: {}", e)),
}
}
if hex.len() == 9 && hex.starts_with('#') {
// #FFFFFFFF (Red Green Blue Alpha)
return Ok(Color32::from_rgba_premultiplied(
_hex_dec(&hex[1..3])?,
_hex_dec(&hex[3..5])?,
_hex_dec(&hex[5..7])?,
_hex_dec(&hex[7..9])?,
));
} else if hex.len() == 7 && hex.starts_with('#') {
// #FFFFFF (Red Green Blue)
return Ok(Color32::from_rgb(
_hex_dec(&hex[1..3])?,
_hex_dec(&hex[3..5])?,
_hex_dec(&hex[5..7])?,
));
}
Err(format!(
"Error parsing hex: {}. Example of valid formats: #FFFFFF or #ffffffff",
hex
))
}
/// Converts a Color32 into its canonical hexadecimal representation.
/// - The color string will be preceded by '#'.
/// - If the alpha channel is completely opaque, it will be ommitted.
/// - Characters from 'a' to 'f' will be written in lowercase.
#[allow(dead_code)]
pub fn color_to_hex(color: Color32) -> String {
if color.a() < 255 {
format!(
"#{:02x?}{:02x?}{:02x?}{:02x?}",
color.r(),
color.g(),
color.b(),
color.a()
)
} else {
format!("#{:02x?}{:02x?}{:02x?}", color.r(), color.g(), color.b())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_color_from_and_to_hex() {
assert_eq!(
color_from_hex("#00ff00").unwrap(),
Color32::from_rgb(0, 255, 0)
);
assert_eq!(
color_from_hex("#5577AA").unwrap(),
Color32::from_rgb(85, 119, 170)
);
assert_eq!(
color_from_hex("#E2e2e277").unwrap(),
Color32::from_rgba_premultiplied(226, 226, 226, 119)
);
assert!(color_from_hex("abcdefgh").is_err());
assert_eq!(
color_to_hex(Color32::from_rgb(0, 255, 0)),
"#00ff00".to_string()
);
assert_eq!(
color_to_hex(Color32::from_rgb(85, 119, 170)),
"#5577aa".to_string()
);
assert_eq!(
color_to_hex(Color32::from_rgba_premultiplied(226, 226, 226, 119)),
"#e2e2e277".to_string()
);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,10 @@
use super::*;
#[derive(Debug, thiserror::Error)]
pub enum EguiGraphError {
#[error("Node {0:?} has no parameter named {1}")]
NoParameterNamed(NodeId, String),
#[error("Parameter {0:?} was not found in the graph.")]
InvalidParameterId(AnyParameterId),
}

View File

@ -0,0 +1,95 @@
use std::num::NonZeroU32;
use super::*;
#[cfg(feature = "persistence")]
use serde::{Deserialize, Serialize};
/// A node inside the [`Graph`]. Nodes have input and output parameters, stored
/// as ids. They also contain a custom `NodeData` struct with whatever data the
/// user wants to store per-node.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct Node<NodeData> {
pub id: NodeId,
pub label: String,
pub inputs: Vec<(String, InputId)>,
pub outputs: Vec<(String, OutputId)>,
pub user_data: NodeData,
}
/// The three kinds of input params. These describe how the graph must behave
/// with respect to inline widgets and connections for this parameter.
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub enum InputParamKind {
/// No constant value can be set. Only incoming connections can produce it
ConnectionOnly,
/// Only a constant value can be set. No incoming connections accepted.
ConstantOnly,
/// Both incoming connections and constants are accepted. Connections take
/// precedence over the constant values.
ConnectionOrConstant,
}
#[cfg(feature = "persistence")]
fn shown_inline_default() -> bool {
true
}
/// An input parameter. Input parameters are inside a node, and represent data
/// that this node receives. Unlike their [`OutputParam`] counterparts, input
/// parameters also display an inline widget which allows setting its "value".
/// The `DataType` generic parameter is used to restrict the range of input
/// connections for this parameter, and the `ValueType` is use to represent the
/// data for the inline widget (i.e. constant) value.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct InputParam<DataType, ValueType> {
pub id: InputId,
/// The data type of this node. Used to determine incoming connections. This
/// should always match the type of the InputParamValue, but the property is
/// not actually enforced.
pub typ: DataType,
/// The constant value stored in this parameter.
pub value: ValueType,
/// The input kind. See [`InputParamKind`]
pub kind: InputParamKind,
/// Back-reference to the node containing this parameter.
pub node: NodeId,
/// How many connections can be made with this input. `None` means no limit.
pub max_connections: Option<NonZeroU32>,
/// When true, the node is shown inline inside the node graph.
#[cfg_attr(feature = "persistence", serde(default = "shown_inline_default"))]
pub shown_inline: bool,
}
/// An output parameter. Output parameters are inside a node, and represent the
/// data that the node produces. Output parameters can be linked to the input
/// parameters of other nodes. Unlike an [`InputParam`], output parameters
/// cannot have a constant inline value.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct OutputParam<DataType> {
pub id: OutputId,
/// Back-reference to the node containing this parameter.
pub node: NodeId,
pub typ: DataType,
}
/// The graph, containing nodes, input parameters and output parameters. Because
/// graphs are full of self-referential structures, this type uses the `slotmap`
/// crate to represent all the inner references in the data.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct Graph<NodeData, DataType, ValueType> {
/// The [`Node`]s of the graph
pub nodes: SlotMap<NodeId, Node<NodeData>>,
/// The [`InputParam`]s of the graph
pub inputs: SlotMap<InputId, InputParam<DataType, ValueType>>,
/// The [`OutputParam`]s of the graph
pub outputs: SlotMap<OutputId, OutputParam<DataType>>,
// Connects the input of a node, to the output of its predecessor that
// produces it
pub connections: SecondaryMap<InputId, Vec<OutputId>>,
}

View File

@ -0,0 +1,292 @@
use std::num::NonZeroU32;
use super::*;
impl<NodeData, DataType, ValueType> Graph<NodeData, DataType, ValueType> {
pub fn new() -> Self {
Self {
nodes: SlotMap::default(),
inputs: SlotMap::default(),
outputs: SlotMap::default(),
connections: SecondaryMap::default(),
}
}
pub fn add_node(
&mut self,
label: String,
user_data: NodeData,
f: impl FnOnce(&mut Graph<NodeData, DataType, ValueType>, NodeId),
) -> NodeId {
let node_id = self.nodes.insert_with_key(|node_id| {
Node {
id: node_id,
label,
// These get filled in later by the user function
inputs: Vec::default(),
outputs: Vec::default(),
user_data,
}
});
f(self, node_id);
node_id
}
#[allow(clippy::too_many_arguments)]
pub fn add_wide_input_param(
&mut self,
node_id: NodeId,
name: String,
typ: DataType,
value: ValueType,
kind: InputParamKind,
max_connections: Option<NonZeroU32>,
shown_inline: bool,
) -> InputId {
let input_id = self.inputs.insert_with_key(|input_id| InputParam {
id: input_id,
typ,
value,
kind,
node: node_id,
max_connections,
shown_inline,
});
self.nodes[node_id].inputs.push((name, input_id));
input_id
}
pub fn add_input_param(
&mut self,
node_id: NodeId,
name: String,
typ: DataType,
value: ValueType,
kind: InputParamKind,
shown_inline: bool,
) -> InputId {
self.add_wide_input_param(
node_id,
name,
typ,
value,
kind,
NonZeroU32::new(1),
shown_inline,
)
}
pub fn remove_input_param(&mut self, param: InputId) {
let node = self[param].node;
self[node].inputs.retain(|(_, id)| *id != param);
self.inputs.remove(param);
self.connections.retain(|i, _| i != param);
}
pub fn remove_output_param(&mut self, param: OutputId) {
let node = self[param].node;
self[node].outputs.retain(|(_, id)| *id != param);
self.outputs.remove(param);
for (_, conns) in &mut self.connections {
conns.retain(|o| *o != param);
}
}
pub fn add_output_param(&mut self, node_id: NodeId, name: String, typ: DataType) -> OutputId {
let output_id = self.outputs.insert_with_key(|output_id| OutputParam {
id: output_id,
node: node_id,
typ,
});
self.nodes[node_id].outputs.push((name, output_id));
output_id
}
/// Removes a node from the graph with given `node_id`. This also removes
/// any incoming or outgoing connections from that node
///
/// This function returns the list of connections that has been removed
/// after deleting this node as input-output pairs. Note that one of the two
/// ids in the pair (the one on `node_id`'s end) will be invalid after
/// calling this function.
pub fn remove_node(&mut self, node_id: NodeId) -> (Node<NodeData>, Vec<(InputId, OutputId)>) {
let mut disconnect_events = vec![];
for (i, conns) in &mut self.connections {
conns.retain(|o| {
if self.outputs[*o].node == node_id || self.inputs[i].node == node_id {
disconnect_events.push((i, *o));
false
} else {
true
}
});
}
// NOTE: Collect is needed because we can't borrow the input ids while
// we remove them inside the loop.
for input in self[node_id].input_ids().collect::<SVec<_>>() {
self.inputs.remove(input);
}
for output in self[node_id].output_ids().collect::<SVec<_>>() {
self.outputs.remove(output);
}
let removed_node = self.nodes.remove(node_id).expect("Node should exist");
(removed_node, disconnect_events)
}
pub fn remove_connection(&mut self, input_id: InputId, output_id: OutputId) -> bool {
self.connections
.get_mut(input_id)
.map(|conns| {
let old_size = conns.len();
conns.retain(|id| id != &output_id);
// connection removed if `conn` size changes
old_size != conns.len()
})
.unwrap_or(false)
}
pub fn iter_nodes(&self) -> impl Iterator<Item = NodeId> + '_ {
self.nodes.iter().map(|(id, _)| id)
}
pub fn add_connection(&mut self, output: OutputId, input: InputId, pos: usize) {
if !self.connections.contains_key(input) {
self.connections.insert(input, Vec::default());
}
let max_connections = self
.get_input(input)
.max_connections
.map(NonZeroU32::get)
.unwrap_or(std::u32::MAX) as usize;
let already_in = self.connections[input].contains(&output);
// connecting twice to the same port is a no-op
// even for wide ports.
if already_in {
return;
}
if self.connections[input].len() == max_connections {
// if full, replace the connected output
self.connections[input][pos] = output;
} else {
// otherwise, insert at a selected position
self.connections[input].insert(pos, output);
}
}
pub fn iter_connection_groups(&self) -> impl Iterator<Item = (InputId, Vec<OutputId>)> + '_ {
self.connections.iter().map(|(i, conns)| (i, conns.clone()))
}
pub fn iter_connections(&self) -> impl Iterator<Item = (InputId, OutputId)> + '_ {
self.iter_connection_groups()
.flat_map(|(i, conns)| conns.into_iter().map(move |o| (i, o)))
}
pub fn connections(&self, input: InputId) -> Vec<OutputId> {
self.connections.get(input).cloned().unwrap_or_default()
}
pub fn connection(&self, input: InputId) -> Option<OutputId> {
let is_limit_1 = self.get_input(input).max_connections == NonZeroU32::new(1);
let connections = self.connections(input);
if is_limit_1 && connections.len() == 1 {
connections.into_iter().next()
} else {
None
}
}
pub fn any_param_type(&self, param: AnyParameterId) -> Result<&DataType, EguiGraphError> {
match param {
AnyParameterId::Input(input) => self.inputs.get(input).map(|x| &x.typ),
AnyParameterId::Output(output) => self.outputs.get(output).map(|x| &x.typ),
}
.ok_or(EguiGraphError::InvalidParameterId(param))
}
pub fn try_get_input(&self, input: InputId) -> Option<&InputParam<DataType, ValueType>> {
self.inputs.get(input)
}
pub fn get_input(&self, input: InputId) -> &InputParam<DataType, ValueType> {
&self.inputs[input]
}
pub fn try_get_output(&self, output: OutputId) -> Option<&OutputParam<DataType>> {
self.outputs.get(output)
}
pub fn get_output(&self, output: OutputId) -> &OutputParam<DataType> {
&self.outputs[output]
}
}
impl<NodeData, DataType, ValueType> Default for Graph<NodeData, DataType, ValueType> {
fn default() -> Self {
Self::new()
}
}
impl<NodeData> Node<NodeData> {
pub fn inputs<'a, DataType, DataValue>(
&'a self,
graph: &'a Graph<NodeData, DataType, DataValue>,
) -> impl Iterator<Item = &'a InputParam<DataType, DataValue>> + 'a {
self.input_ids().map(|id| graph.get_input(id))
}
pub fn outputs<'a, DataType, DataValue>(
&'a self,
graph: &'a Graph<NodeData, DataType, DataValue>,
) -> impl Iterator<Item = &'a OutputParam<DataType>> + 'a {
self.output_ids().map(|id| graph.get_output(id))
}
pub fn input_ids(&self) -> impl Iterator<Item = InputId> + '_ {
self.inputs.iter().map(|(_name, id)| *id)
}
pub fn output_ids(&self) -> impl Iterator<Item = OutputId> + '_ {
self.outputs.iter().map(|(_name, id)| *id)
}
pub fn get_input(&self, name: &str) -> Result<InputId, EguiGraphError> {
self.inputs
.iter()
.find(|(param_name, _id)| param_name == name)
.map(|x| x.1)
.ok_or_else(|| EguiGraphError::NoParameterNamed(self.id, name.into()))
}
pub fn get_output(&self, name: &str) -> Result<OutputId, EguiGraphError> {
self.outputs
.iter()
.find(|(param_name, _id)| param_name == name)
.map(|x| x.1)
.ok_or_else(|| EguiGraphError::NoParameterNamed(self.id, name.into()))
}
}
impl<DataType, ValueType> InputParam<DataType, ValueType> {
pub fn value(&self) -> &ValueType {
&self.value
}
pub fn kind(&self) -> InputParamKind {
self.kind
}
pub fn node(&self) -> NodeId {
self.node
}
}

View File

@ -0,0 +1,37 @@
slotmap::new_key_type! { pub struct NodeId; }
slotmap::new_key_type! { pub struct InputId; }
slotmap::new_key_type! { pub struct OutputId; }
#[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))]
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub enum AnyParameterId {
Input(InputId),
Output(OutputId),
}
impl AnyParameterId {
pub fn assume_input(&self) -> InputId {
match self {
AnyParameterId::Input(input) => *input,
AnyParameterId::Output(output) => panic!("{:?} is not an InputId", output),
}
}
pub fn assume_output(&self) -> OutputId {
match self {
AnyParameterId::Output(output) => *output,
AnyParameterId::Input(input) => panic!("{:?} is not an OutputId", input),
}
}
}
impl From<OutputId> for AnyParameterId {
fn from(output: OutputId) -> Self {
Self::Output(output)
}
}
impl From<InputId> for AnyParameterId {
fn from(input: InputId) -> Self {
Self::Input(input)
}
}

View File

@ -0,0 +1,35 @@
use super::*;
macro_rules! impl_index_traits {
($id_type:ty, $output_type:ty, $arena:ident) => {
impl<A, B, C> std::ops::Index<$id_type> for Graph<A, B, C> {
type Output = $output_type;
fn index(&self, index: $id_type) -> &Self::Output {
self.$arena.get(index).unwrap_or_else(|| {
panic!(
"{} index error for {:?}. Has the value been deleted?",
stringify!($id_type),
index
)
})
}
}
impl<A, B, C> std::ops::IndexMut<$id_type> for Graph<A, B, C> {
fn index_mut(&mut self, index: $id_type) -> &mut Self::Output {
self.$arena.get_mut(index).unwrap_or_else(|| {
panic!(
"{} index error for {:?}. Has the value been deleted?",
stringify!($id_type),
index
)
})
}
}
};
}
impl_index_traits!(NodeId, Node<A>, nodes);
impl_index_traits!(InputId, InputParam<B, C>, inputs);
impl_index_traits!(OutputId, OutputParam<B>, outputs);

View File

@ -0,0 +1,47 @@
#![forbid(unsafe_code)]
use slotmap::{SecondaryMap, SlotMap};
pub type SVec<T> = smallvec::SmallVec<[T; 4]>;
/// Contains the main definitions for the node graph model.
pub mod graph;
pub use graph::*;
/// Type declarations for the different id types (node, input, output)
pub mod id_type;
pub use id_type::*;
/// Implements the index trait for the Graph type, allowing indexing by all
/// three id types
pub mod index_impls;
/// Implementing the main methods for the `Graph`
pub mod graph_impls;
/// Custom error types, crate-wide
pub mod error;
pub use error::*;
/// The main struct in the library, contains all the necessary state to draw the
/// UI graph
pub mod ui_state;
pub use ui_state::*;
/// The node finder is a tiny widget allowing to create new node types
pub mod node_finder;
pub use node_finder::*;
/// The inner details of the egui implementation. Most egui code lives here.
pub mod editor_ui;
pub use editor_ui::*;
/// Several traits that must be implemented by the user to customize the
/// behavior of this library.
pub mod traits;
pub use traits::*;
mod utils;
mod color_hex_utils;
mod scale;

View File

@ -0,0 +1,153 @@
use std::{collections::BTreeMap, marker::PhantomData};
use crate::{color_hex_utils::*, CategoryTrait, NodeTemplateIter, NodeTemplateTrait};
use egui::*;
#[derive(Clone)]
#[cfg_attr(feature = "persistence", derive(serde::Serialize, serde::Deserialize))]
pub struct NodeFinder<NodeTemplate> {
pub query: String,
/// Reset every frame. When set, the node finder will be moved at that position
pub position: Option<Pos2>,
pub just_spawned: bool,
_phantom: PhantomData<NodeTemplate>,
}
impl<NodeTemplate, NodeData, UserState, CategoryType> NodeFinder<NodeTemplate>
where
NodeTemplate:
NodeTemplateTrait<NodeData = NodeData, UserState = UserState, CategoryType = CategoryType>,
CategoryType: CategoryTrait,
{
pub fn new_at(pos: Pos2) -> Self {
NodeFinder {
query: "".into(),
position: Some(pos),
just_spawned: true,
_phantom: Default::default(),
}
}
/// Shows the node selector panel with a search bar. Returns whether a node
/// archetype was selected and, in that case, the finder should be hidden on
/// the next frame.
pub fn show(
&mut self,
ui: &mut Ui,
all_kinds: impl NodeTemplateIter<Item = NodeTemplate>,
user_state: &mut UserState,
) -> Option<NodeTemplate> {
let background_color;
let text_color;
if ui.visuals().dark_mode {
background_color = color_from_hex("#3f3f3f").unwrap();
text_color = color_from_hex("#fefefe").unwrap();
} else {
background_color = color_from_hex("#fefefe").unwrap();
text_color = color_from_hex("#3f3f3f").unwrap();
}
ui.visuals_mut().widgets.noninteractive.fg_stroke = Stroke::new(2.0, text_color);
let frame = Frame::dark_canvas(ui.style())
.fill(background_color)
.inner_margin(vec2(5.0, 5.0));
// The archetype that will be returned.
let mut submitted_archetype = None;
frame.show(ui, |ui| {
ui.vertical(|ui| {
let resp = ui.text_edit_singleline(&mut self.query);
if self.just_spawned {
resp.request_focus();
self.just_spawned = false;
}
let update_open = resp.changed();
let mut query_submit = resp.lost_focus() && ui.input(|i| i.key_pressed(Key::Enter));
let max_height = ui.input(|i| i.content_rect().height() * 0.5);
let scroll_area_width = resp.rect.width() - 30.0;
let all_kinds = all_kinds.all_kinds();
let mut categories: BTreeMap<String, Vec<&NodeTemplate>> = Default::default();
let mut orphan_kinds = Vec::new();
for kind in &all_kinds {
let kind_categories = kind.node_finder_categories(user_state);
if kind_categories.is_empty() {
orphan_kinds.push(kind);
} else {
for category in kind_categories {
categories.entry(category.name()).or_default().push(kind);
}
}
}
Frame::default()
.inner_margin(vec2(10.0, 10.0))
.show(ui, |ui| {
ScrollArea::vertical()
.max_height(max_height)
.show(ui, |ui| {
ui.set_width(scroll_area_width);
ui.set_min_height(1000.);
for (category, kinds) in categories {
let mut filtered_kinds: Vec<_> = kinds
.into_iter()
.map(|kind| {
let kind_name =
kind.node_finder_label(user_state).to_string();
(kind, kind_name)
})
.filter(|(_kind, kind_name)| {
kind_name
.to_lowercase()
.contains(self.query.to_lowercase().as_str())
})
.collect();
filtered_kinds.sort_by(|a, b| a.1.cmp(&b.1));
if !filtered_kinds.is_empty() {
let default_open = !self.query.is_empty();
CollapsingHeader::new(&category)
.default_open(default_open)
.open(update_open.then_some(default_open))
.show(ui, |ui| {
for (kind, kind_name) in filtered_kinds {
if ui
.selectable_label(false, kind_name)
.clicked()
{
submitted_archetype = Some(kind.clone());
} else if query_submit {
submitted_archetype = Some(kind.clone());
query_submit = false;
}
}
});
}
}
for kind in orphan_kinds {
let kind_name = kind.node_finder_label(user_state).to_string();
if ui.selectable_label(false, kind_name).clicked() {
submitted_archetype = Some(kind.clone());
} else if query_submit {
submitted_archetype = Some(kind.clone());
query_submit = false;
}
}
});
});
});
});
submitted_archetype
}
}

View File

@ -0,0 +1,109 @@
use egui::epaint::Shadow;
use egui::{style::WidgetVisuals, CornerRadius, Margin, Stroke, Style, Vec2};
// Copied from https://github.com/gzp-crey/shine
pub trait Scale {
fn scale(&mut self, amount: f32);
fn scaled(&self, amount: f32) -> Self
where
Self: Clone,
{
let mut scaled = self.clone();
scaled.scale(amount);
scaled
}
}
impl Scale for Vec2 {
fn scale(&mut self, amount: f32) {
self.x *= amount;
self.y *= amount;
}
}
impl Scale for Margin {
fn scale(&mut self, amount: f32) {
self.left = (self.left as f32 * amount) as i8;
self.right = (self.right as f32 * amount) as i8;
self.top = (self.top as f32 * amount) as i8;
self.bottom = (self.bottom as f32 * amount) as i8;
}
}
impl Scale for CornerRadius {
fn scale(&mut self, amount: f32) {
self.ne = (self.ne as f32 * amount) as u8;
self.nw = (self.nw as f32 * amount) as u8;
self.se = (self.se as f32 * amount) as u8;
self.sw = (self.sw as f32 * amount) as u8;
}
}
impl Scale for Stroke {
fn scale(&mut self, amount: f32) {
self.width *= amount;
}
}
impl Scale for Shadow {
fn scale(&mut self, amount: f32) {
self.spread = (self.spread as f32 * amount.clamp(0.4, 1.)) as u8;
}
}
impl Scale for WidgetVisuals {
fn scale(&mut self, amount: f32) {
self.bg_stroke.scale(amount);
self.fg_stroke.scale(amount);
self.corner_radius.scale(amount);
self.expansion *= amount.clamp(0.4, 1.);
}
}
impl Scale for Style {
fn scale(&mut self, amount: f32) {
if let Some(ov_font_id) = &mut self.override_font_id {
ov_font_id.size *= amount;
}
for text_style in self.text_styles.values_mut() {
text_style.size *= amount;
}
self.spacing.item_spacing.scale(amount);
self.spacing.window_margin.scale(amount);
self.spacing.button_padding.scale(amount);
self.spacing.indent *= amount;
self.spacing.interact_size.scale(amount);
self.spacing.slider_width *= amount;
self.spacing.text_edit_width *= amount;
self.spacing.icon_width *= amount;
self.spacing.icon_width_inner *= amount;
self.spacing.icon_spacing *= amount;
self.spacing.tooltip_width *= amount;
self.spacing.combo_height *= amount;
self.spacing.scroll.bar_width *= amount;
self.spacing.scroll.floating_allocated_width *= amount;
self.spacing.scroll.floating_width *= amount;
self.interaction.resize_grab_radius_side *= amount;
self.interaction.resize_grab_radius_corner *= amount;
self.visuals.widgets.noninteractive.scale(amount);
self.visuals.widgets.inactive.scale(amount);
self.visuals.widgets.hovered.scale(amount);
self.visuals.widgets.active.scale(amount);
self.visuals.widgets.open.scale(amount);
self.visuals.selection.stroke.scale(amount);
self.visuals.resize_corner_size *= amount;
self.visuals.text_cursor.stroke.width *= amount;
self.visuals.clip_rect_margin *= amount;
self.visuals.window_corner_radius.scale(amount);
self.visuals.window_shadow.scale(amount);
self.visuals.popup_shadow.scale(amount);
}
}

View File

@ -0,0 +1,284 @@
use super::*;
/// This trait must be implemented by the `ValueType` generic parameter of the
/// [`Graph`]. The trait allows drawing custom inline widgets for the different
/// types of the node graph.
///
/// The [`Default`] trait bound is required to circumvent borrow checker issues
/// using `std::mem::take` Otherwise, it would be impossible to pass the
/// `node_data` parameter during `value_widget`. The default value is never
/// used, so the implementation is not important, but it should be reasonably
/// cheap to construct.
pub trait WidgetValueTrait: Default {
type Response;
type UserState;
type NodeData;
/// This method will be called for each input parameter with a widget with an disconnected
/// input only. To display UI for connected inputs use [`WidgetValueTrait::value_widget_connected`].
/// The return value is a vector of custom response objects which can be used
/// to implement handling of side effects. If unsure, the response Vec can
/// be empty.
fn value_widget(
&mut self,
param_name: &str,
node_id: NodeId,
ui: &mut egui::Ui,
user_state: &mut Self::UserState,
node_data: &Self::NodeData,
) -> Vec<Self::Response>;
/// This method will be called for each input parameter with a widget with a connected
/// input only. To display UI for diconnected inputs use [`WidgetValueTrait::value_widget`].
/// The return value is a vector of custom response objects which can be used
/// to implement handling of side effects. If unsure, the response Vec can
/// be empty.
///
/// Shows the input name label by default.
fn value_widget_connected(
&mut self,
param_name: &str,
_node_id: NodeId,
ui: &mut egui::Ui,
_user_state: &mut Self::UserState,
_node_data: &Self::NodeData,
) -> Vec<Self::Response> {
ui.label(param_name);
Default::default()
}
}
/// This trait must be implemented by the `DataType` generic parameter of the
/// [`Graph`]. This trait tells the library how to visually expose data types
/// to the user.
pub trait DataTypeTrait<UserState>: PartialEq + Eq {
/// The associated port color of this datatype
fn data_type_color(&self, user_state: &mut UserState) -> egui::Color32;
/// The name of this datatype. Return type is specified as Cow<str> because
/// some implementations will need to allocate a new string to provide an
/// answer while others won't.
///
/// ## Example (borrowed value)
/// Use this when you can get the name of the datatype from its fields or as
/// a &'static str. Prefer this method when possible.
/// ```ignore
/// pub struct DataType { name: String }
///
/// impl DataTypeTrait<()> for DataType {
/// fn name(&self) -> std::borrow::Cow<str> {
/// Cow::Borrowed(&self.name)
/// }
/// }
/// ```
///
/// ## Example (owned value)
/// Use this when you can't derive the name of the datatype from its fields.
/// ```ignore
/// pub struct DataType { some_tag: i32 }
///
/// impl DataTypeTrait<()> for DataType {
/// fn name(&self) -> std::borrow::Cow<str> {
/// Cow::Owned(format!("Super amazing type #{}", self.some_tag))
/// }
/// }
/// ```
fn name(&self) -> std::borrow::Cow<'_, str>;
}
/// This trait must be implemented for the `NodeData` generic parameter of the
/// [`Graph`]. This trait allows customizing some aspects of the node drawing.
pub trait NodeDataTrait
where
Self: Sized,
{
/// Must be set to the custom user `NodeResponse` type
type Response;
/// Must be set to the custom user `UserState` type
type UserState;
/// Must be set to the custom user `DataType` type
type DataType;
/// Must be set to the custom user `ValueType` type
type ValueType;
/// Additional UI elements to draw in the nodes, after the parameters.
fn bottom_ui(
&self,
ui: &mut egui::Ui,
node_id: NodeId,
graph: &Graph<Self, Self::DataType, Self::ValueType>,
user_state: &mut Self::UserState,
) -> Vec<NodeResponse<Self::Response, Self>>
where
Self::Response: UserResponseTrait;
/// UI to draw on the top bar of the node.
fn top_bar_ui(
&self,
_ui: &mut egui::Ui,
_node_id: NodeId,
_graph: &Graph<Self, Self::DataType, Self::ValueType>,
_user_state: &mut Self::UserState,
) -> Vec<NodeResponse<Self::Response, Self>>
where
Self::Response: UserResponseTrait,
{
Default::default()
}
/// UI to draw for each output
///
/// Defaults to showing param_name as a simple label.
fn output_ui(
&self,
ui: &mut egui::Ui,
_node_id: NodeId,
_graph: &Graph<Self, Self::DataType, Self::ValueType>,
_user_state: &mut Self::UserState,
param_name: &str,
) -> Vec<NodeResponse<Self::Response, Self>>
where
Self::Response: UserResponseTrait,
{
ui.label(param_name);
Default::default()
}
/// Set background color on titlebar
/// If the return value is None, the default color is set.
fn titlebar_color(
&self,
_ui: &egui::Ui,
_node_id: NodeId,
_graph: &Graph<Self, Self::DataType, Self::ValueType>,
_user_state: &mut Self::UserState,
) -> Option<egui::Color32> {
None
}
/// Separator to put between elements in the node.
///
/// Invoked between inputs, outputs and bottom UI. Useful for
/// complicated UIs that start to lose structure without explicit
/// separators. The `param_id` argument is the id of input or output
/// *preceeding* the separator.
///
/// Default implementation does nothing.
fn separator(
&self,
_ui: &mut egui::Ui,
_node_id: NodeId,
_param_id: AnyParameterId,
_graph: &Graph<Self, Self::DataType, Self::ValueType>,
_user_state: &mut Self::UserState,
) {
}
fn can_delete(
&self,
_node_id: NodeId,
_graph: &Graph<Self, Self::DataType, Self::ValueType>,
_user_state: &mut Self::UserState,
) -> bool {
true
}
}
/// This trait can be implemented by any user type. The trait tells the library
/// how to enumerate the node templates it will present to the user as part of
/// the node finder.
pub trait NodeTemplateIter {
type Item;
fn all_kinds(&self) -> Vec<Self::Item>;
}
/// Describes a category of nodes.
///
/// Used by [`NodeTemplateTrait::node_finder_categories`] to categorize nodes
/// templates into groups.
///
/// If all nodes in a program are known beforehand, it's usefult to define
/// an enum containing all categories and implement [`CategoryTrait`] for it. This will
/// make it impossible to accidentally create a new category by mis-typing an existing
/// one, like in the case of using string types.
pub trait CategoryTrait {
/// Name of the category.
fn name(&self) -> String;
}
impl CategoryTrait for () {
fn name(&self) -> String {
String::new()
}
}
impl<'a> CategoryTrait for &'a str {
fn name(&self) -> String {
self.to_string()
}
}
impl CategoryTrait for String {
fn name(&self) -> String {
self.clone()
}
}
/// This trait must be implemented by the `NodeTemplate` generic parameter of
/// the [`GraphEditorState`]. It allows the customization of node templates. A
/// node template is what describes what kinds of nodes can be added to the
/// graph, what is their name, and what are their input / output parameters.
pub trait NodeTemplateTrait: Clone {
/// Must be set to the custom user `NodeData` type
type NodeData;
/// Must be set to the custom user `DataType` type
type DataType;
/// Must be set to the custom user `ValueType` type
type ValueType;
/// Must be set to the custom user `UserState` type
type UserState;
/// Must be a type that implements the [`CategoryTrait`] trait.
///
/// `&'static str` is a good default if you intend to simply type out
/// the categories of your node. Use `()` if you don't need categories
/// at all.
type CategoryType;
/// Returns a descriptive name for the node kind, used in the node finder.
///
/// The return type is Cow<str> to allow returning owned or borrowed values
/// more flexibly. Refer to the documentation for `DataTypeTrait::name` for
/// more information
fn node_finder_label(&self, user_state: &mut Self::UserState) -> std::borrow::Cow<'_, str>;
/// Vec of categories to which the node belongs.
///
/// It's often useful to organize similar nodes into categories, which will
/// then be used by the node finder to show a more manageable UI, especially
/// if the node template are numerous.
fn node_finder_categories(&self, _user_state: &mut Self::UserState) -> Vec<Self::CategoryType> {
Vec::default()
}
/// Returns a descriptive name for the node kind, used in the graph.
fn node_graph_label(&self, user_state: &mut Self::UserState) -> String;
/// Returns the user data for this node kind.
fn user_data(&self, user_state: &mut Self::UserState) -> Self::NodeData;
/// This function is run when this node kind gets added to the graph. The
/// node will be empty by default, and this function can be used to fill its
/// parameters.
fn build_node(
&self,
graph: &mut Graph<Self::NodeData, Self::DataType, Self::ValueType>,
user_state: &mut Self::UserState,
node_id: NodeId,
);
}
/// The custom user response types when drawing nodes in the graph must
/// implement this trait.
pub trait UserResponseTrait: Clone + std::fmt::Debug {}

View File

@ -0,0 +1,134 @@
use super::*;
use egui::{Rect, Style, Ui, Vec2};
use std::marker::PhantomData;
use std::sync::Arc;
use crate::scale::Scale;
#[cfg(feature = "persistence")]
use serde::{Deserialize, Serialize};
const MIN_ZOOM: f32 = 0.2;
const MAX_ZOOM: f32 = 2.0;
#[derive(Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct GraphEditorState<NodeData, DataType, ValueType, NodeTemplate, UserState> {
pub graph: Graph<NodeData, DataType, ValueType>,
/// Nodes are drawn in this order. Draw order is important because nodes
/// that are drawn last are on top.
pub node_order: Vec<NodeId>,
/// An ongoing connection interaction: The mouse has dragged away from a
/// port and the user is holding the click
pub connection_in_progress: Option<(NodeId, AnyParameterId)>,
/// The currently selected node. Some interface actions depend on the
/// currently selected node.
pub selected_nodes: Vec<NodeId>,
/// The mouse drag start position for an ongoing box selection.
pub ongoing_box_selection: Option<egui::Pos2>,
/// The position of each node.
pub node_positions: SecondaryMap<NodeId, egui::Pos2>,
/// The node finder is used to create new nodes.
pub node_finder: Option<NodeFinder<NodeTemplate>>,
/// The panning of the graph viewport.
pub pan_zoom: PanZoom,
/// A connection to highlight (e.g. as an insertion target during node drag).
/// Stored as (InputId, OutputId). Not serialized.
#[cfg_attr(feature = "persistence", serde(skip))]
pub highlighted_connection: Option<(InputId, OutputId)>,
pub _user_state: PhantomData<fn() -> UserState>,
}
impl<NodeData, DataType, ValueType, NodeKind, UserState>
GraphEditorState<NodeData, DataType, ValueType, NodeKind, UserState>
{
pub fn new(default_zoom: f32) -> Self {
Self {
pan_zoom: PanZoom::new(default_zoom),
..Default::default()
}
}
}
impl<NodeData, DataType, ValueType, NodeKind, UserState> Default
for GraphEditorState<NodeData, DataType, ValueType, NodeKind, UserState>
{
fn default() -> Self {
Self {
graph: Default::default(),
node_order: Default::default(),
connection_in_progress: Default::default(),
selected_nodes: Default::default(),
ongoing_box_selection: Default::default(),
node_positions: Default::default(),
node_finder: Default::default(),
pan_zoom: Default::default(),
highlighted_connection: Default::default(),
_user_state: Default::default(),
}
}
}
#[cfg(feature = "persistence")]
fn _default_clip_rect() -> Rect {
Rect::NOTHING
}
#[derive(Clone)]
#[cfg_attr(feature = "persistence", derive(Serialize, Deserialize))]
pub struct PanZoom {
pub pan: Vec2,
pub zoom: f32,
#[cfg_attr(feature = "persistence", serde(skip, default = "_default_clip_rect"))]
pub clip_rect: Rect,
#[cfg_attr(feature = "persistence", serde(skip, default))]
pub zoomed_style: Arc<Style>,
#[cfg_attr(feature = "persistence", serde(skip, default))]
pub started: bool,
}
impl Default for PanZoom {
fn default() -> Self {
PanZoom {
pan: Vec2::ZERO,
zoom: 1.0,
clip_rect: Rect::NOTHING,
zoomed_style: Default::default(),
started: false,
}
}
}
impl PanZoom {
pub fn new(zoom: f32) -> PanZoom {
let style: Style = Default::default();
PanZoom {
pan: Vec2::ZERO,
zoom,
clip_rect: Rect::NOTHING,
zoomed_style: Arc::new(style.scaled(1.0)),
started: false,
}
}
pub fn zoom(&mut self, clip_rect: Rect, style: &Arc<Style>, zoom_delta: f32) {
self.clip_rect = clip_rect;
let new_zoom = (self.zoom * zoom_delta).clamp(MIN_ZOOM, MAX_ZOOM);
self.zoomed_style = Arc::new(style.scaled(new_zoom));
self.zoom = new_zoom;
}
}
pub fn show_zoomed<R, F>(
default_style: Arc<Style>,
zoomed_style: Arc<Style>,
ui: &mut Ui,
add_content: F,
) -> R
where
F: FnOnce(&mut Ui) -> R,
{
*ui.style_mut() = (*zoomed_style).clone();
let response = add_content(ui);
*ui.style_mut() = (*default_style).clone();
response
}

View File

@ -0,0 +1,15 @@
pub trait ColorUtils {
/// Multiplies the color rgb values by `factor`, keeping alpha untouched.
fn lighten(&self, factor: f32) -> Self;
}
impl ColorUtils for egui::Color32 {
fn lighten(&self, factor: f32) -> Self {
egui::Color32::from_rgba_premultiplied(
(self.r() as f32 * factor) as u8,
(self.g() as f32 * factor) as u8,
(self.b() as f32 * factor) as u8,
self.a(),
)
}
}

View File

@ -0,0 +1,72 @@
[package]
name = "lightningbeam-core"
version = "0.1.0"
edition = "2021"
[dependencies]
serde = { workspace = true }
serde_json = { workspace = true }
# UI framework (for Color32 conversion)
egui = { workspace = true }
# GPU rendering infrastructure
wgpu = { workspace = true }
bytemuck = { version = "1.14", features = ["derive"] }
# Geometry and rendering
kurbo = { workspace = true }
vello = { workspace = true }
# Image decoding for image fills
image = { workspace = true }
# Unique identifiers
uuid = { version = "1.0", features = ["v4", "serde"] }
# Audio backend
daw-backend = { path = "../../daw-backend" }
# Video decoding
ffmpeg-next = "8.0"
lru = "0.12"
# File I/O
zip = "0.6"
chrono = "0.4"
base64 = "0.21"
pathdiff = "0.2"
# Audio encoding for embedded files
flacenc = "0.4" # For FLAC encoding (lossless)
claxon = "0.4" # For FLAC decoding
# Spatial indexing for DCEL vertex snapping
rstar = "0.12"
# System clipboard
arboard = "3"
# ── Temporary: platform-native custom MIME type clipboard ─────────────────────
# These deps exist because arboard does not yet support custom MIME types.
# Remove once arboard gains that feature (https://github.com/1Password/arboard/issues/14).
[target.'cfg(target_os = "linux")'.dependencies]
wl-clipboard-rs = "0.9"
x11-clipboard = "0.9"
[target.'cfg(target_os = "macos")'.dependencies]
objc2 = "0.6"
objc2-app-kit = { version = "0.3", features = ["NSPasteboard"] }
objc2-foundation = { version = "0.3", features = ["NSString", "NSData"] }
[target.'cfg(target_os = "windows")'.dependencies]
windows-sys = { version = "0.60", features = [
"Win32_Foundation",
"Win32_System_DataExchange",
"Win32_System_Memory",
] }
[dependencies.tiny-skia]
version = "0.11"
[dev-dependencies]

View File

@ -0,0 +1,540 @@
//! Action system for undo/redo functionality
//!
//! This module provides a type-safe action system that ensures document
//! mutations can only happen through actions, enforced by Rust's type system.
//!
//! ## Architecture
//!
//! - `Action` trait: Defines execute() and rollback() operations
//! - `ActionExecutor`: Wraps the document and manages undo/redo stacks
//! - Document mutations are only accessible via `pub(crate)` methods
//! - External code gets read-only access via `ActionExecutor::document()`
//!
//! ## Memory Model
//!
//! The document is stored in an `Arc<Document>` for efficient cloning during
//! GPU render callbacks. When mutation is needed, `Arc::make_mut()` provides
//! copy-on-write semantics - if other Arc holders exist (e.g., in-flight render
//! callbacks), the document is cloned before mutation, preserving their snapshot.
use crate::document::Document;
use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
/// Backend clip instance ID - wraps both MIDI and Audio instance IDs
#[derive(Debug, Clone, Copy)]
pub enum BackendClipInstanceId {
Midi(daw_backend::MidiClipInstanceId),
Audio(daw_backend::AudioClipInstanceId),
}
/// Backend context for actions that need to interact with external systems
///
/// This bundles all backend references (audio, future video) that actions
/// may need to synchronize state with external systems beyond the document.
pub struct BackendContext<'a> {
/// Audio engine controller (optional - may not be initialized)
pub audio_controller: Option<&'a mut daw_backend::EngineController>,
/// Mapping from all document layer/clip/group UUIDs to backend track IDs.
/// Covers audio layers, MIDI layers, group layers, and vector clip metatracks.
pub layer_to_track_map: &'a HashMap<Uuid, daw_backend::TrackId>,
/// Mapping from document clip instance UUIDs to backend clip instance IDs
pub clip_instance_to_backend_map: &'a mut HashMap<Uuid, BackendClipInstanceId>,
// Future: pub video_controller: Option<&'a mut VideoController>,
}
/// Action trait for undo/redo operations
///
/// Each action must be able to execute (apply changes) and rollback (undo changes).
/// Actions are stored in the undo stack and can be re-executed from the redo stack.
///
/// ## Backend Integration
///
/// Actions can optionally implement backend synchronization via `execute_backend()`
/// and `rollback_backend()`. Default implementations do nothing, so actions that
/// only affect the document (vector graphics) don't need to implement these.
pub trait Action: Send {
/// Apply this action to the document
///
/// Returns Ok(()) if successful, or Err(message) if the action cannot be performed
fn execute(&mut self, document: &mut Document) -> Result<(), String>;
/// Undo this action (rollback changes)
///
/// Returns Ok(()) if successful, or Err(message) if rollback fails
fn rollback(&mut self, document: &mut Document) -> Result<(), String>;
/// Get a human-readable description of this action (for UI display)
fn description(&self) -> String;
/// Execute backend operations after document changes
///
/// Called AFTER execute() succeeds. If this returns an error, execute()
/// will be automatically rolled back to maintain atomicity.
///
/// # Arguments
/// * `backend` - Backend context with audio/video controllers
/// * `document` - Read-only document access for looking up clip data
///
/// Default: No backend operations
fn execute_backend(&mut self, _backend: &mut BackendContext, _document: &Document) -> Result<(), String> {
Ok(())
}
/// Rollback backend operations during undo
///
/// Called BEFORE rollback() to undo backend changes in reverse order.
///
/// # Arguments
/// * `backend` - Backend context with audio/video controllers
/// * `document` - Read-only document access (if needed)
///
/// Default: No backend operations
fn rollback_backend(&mut self, _backend: &mut BackendContext, _document: &Document) -> Result<(), String> {
Ok(())
}
/// Return MIDI cache data reflecting the state after execute/redo.
/// Format: (clip_id, notes) where notes are (start_time, note, velocity, duration).
/// Used to keep the frontend MIDI event cache in sync after undo/redo.
fn midi_notes_after_execute(&self) -> Option<(u32, &[(f64, u8, u8, f64)])> {
None
}
/// Return MIDI cache data reflecting the state after rollback/undo.
fn midi_notes_after_rollback(&self) -> Option<(u32, &[(f64, u8, u8, f64)])> {
None
}
}
/// Action executor that wraps the document and manages undo/redo
///
/// This is the only way to get mutable access to the document, ensuring
/// all mutations go through the action system.
///
/// The document is stored in `Arc<Document>` for efficient sharing with
/// render callbacks. Use `document_arc()` for cheap cloning to GPU passes.
pub struct ActionExecutor {
/// The document being edited (wrapped in Arc for cheap cloning)
document: Arc<Document>,
/// Stack of executed actions (for undo)
undo_stack: Vec<Box<dyn Action>>,
/// Stack of undone actions (for redo)
redo_stack: Vec<Box<dyn Action>>,
/// Maximum number of actions to keep in undo stack
max_undo_depth: usize,
/// Monotonically increasing counter, bumped on every `execute` call.
/// Used to detect whether any actions were taken during a region selection.
epoch: u64,
}
impl ActionExecutor {
/// Create a new action executor with the given document
pub fn new(mut document: Document) -> Self {
// Rebuild transient lookup maps (not serialized)
document.rebuild_layer_to_clip_map();
Self {
document: Arc::new(document),
undo_stack: Vec::new(),
redo_stack: Vec::new(),
max_undo_depth: 100, // Default: keep last 100 actions
epoch: 0,
}
}
/// Get read-only access to the document
///
/// This is the public API for reading document state.
/// Mutations must go through execute() which requires an Action.
pub fn document(&self) -> &Document {
&self.document
}
/// Get a cheap clone of the document Arc for render callbacks
///
/// Use this when passing the document to GPU render passes or other
/// contexts that need to own a reference. Cloning Arc is just a pointer
/// copy + atomic increment, not a deep clone.
pub fn document_arc(&self) -> Arc<Document> {
Arc::clone(&self.document)
}
/// Get mutable access to the document
///
/// Uses copy-on-write semantics: if other Arc holders exist (e.g., in-flight
/// render callbacks), the document is cloned before mutation. Otherwise,
/// returns direct mutable access.
///
/// Note: This should only be used for live previews. Permanent changes
/// should go through the execute() method to support undo/redo.
pub fn document_mut(&mut self) -> &mut Document {
Arc::make_mut(&mut self.document)
}
/// Execute an action and add it to the undo stack
///
/// This clears the redo stack since we're creating a new timeline branch.
///
/// Returns Ok(()) if successful, or Err(message) if the action failed
pub fn execute(&mut self, mut action: Box<dyn Action>) -> Result<(), String> {
// Apply the action (uses copy-on-write if other Arc holders exist)
action.execute(Arc::make_mut(&mut self.document))?;
// Clear redo stack (new action invalidates redo history)
self.redo_stack.clear();
// Bump epoch so region selections can detect that an action occurred
self.epoch = self.epoch.wrapping_add(1);
// Add to undo stack
self.undo_stack.push(action);
// Limit undo stack size
if self.undo_stack.len() > self.max_undo_depth {
self.undo_stack.remove(0);
}
Ok(())
}
/// Undo the last action
///
/// Returns Ok(true) if an action was undone, Ok(false) if undo stack is empty,
/// or Err(message) if rollback failed
pub fn undo(&mut self) -> Result<bool, String> {
if let Some(mut action) = self.undo_stack.pop() {
// Rollback the action (uses copy-on-write if other Arc holders exist)
match action.rollback(Arc::make_mut(&mut self.document)) {
Ok(()) => {
// Move to redo stack
self.redo_stack.push(action);
Ok(true)
}
Err(e) => {
// Put action back on undo stack if rollback failed
self.undo_stack.push(action);
Err(e)
}
}
} else {
Ok(false)
}
}
/// Redo the last undone action
///
/// Returns Ok(true) if an action was redone, Ok(false) if redo stack is empty,
/// or Err(message) if re-execution failed
pub fn redo(&mut self) -> Result<bool, String> {
if let Some(mut action) = self.redo_stack.pop() {
// Re-execute the action (uses copy-on-write if other Arc holders exist)
match action.execute(Arc::make_mut(&mut self.document)) {
Ok(()) => {
// Move back to undo stack
self.undo_stack.push(action);
Ok(true)
}
Err(e) => {
// Put action back on redo stack if re-execution failed
self.redo_stack.push(action);
Err(e)
}
}
} else {
Ok(false)
}
}
/// Check if undo is available
pub fn can_undo(&self) -> bool {
!self.undo_stack.is_empty()
}
/// Check if redo is available
pub fn can_redo(&self) -> bool {
!self.redo_stack.is_empty()
}
/// Get the description of the next action to undo
pub fn undo_description(&self) -> Option<String> {
self.undo_stack.last().map(|a| a.description())
}
/// Get MIDI cache data from the last action on the undo stack (after redo).
/// Returns the notes reflecting execute state.
pub fn last_undo_midi_notes(&self) -> Option<(u32, &[(f64, u8, u8, f64)])> {
self.undo_stack.last().and_then(|a| a.midi_notes_after_execute())
}
/// Get MIDI cache data from the last action on the redo stack (after undo).
/// Returns the notes reflecting rollback state.
pub fn last_redo_midi_notes(&self) -> Option<(u32, &[(f64, u8, u8, f64)])> {
self.redo_stack.last().and_then(|a| a.midi_notes_after_rollback())
}
/// Get the description of the next action to redo
pub fn redo_description(&self) -> Option<String> {
self.redo_stack.last().map(|a| a.description())
}
/// Get the number of actions in the undo stack
pub fn undo_depth(&self) -> usize {
self.undo_stack.len()
}
/// Get the number of actions in the redo stack
pub fn redo_depth(&self) -> usize {
self.redo_stack.len()
}
/// Return the current action epoch.
///
/// The epoch is a monotonically increasing counter that is bumped every
/// time `execute` is called. It is never decremented on undo/redo, so
/// callers can record it at a point in time and later compare to detect
/// whether any action was executed in the interim.
pub fn epoch(&self) -> u64 {
self.epoch
}
/// Clear all undo/redo history
pub fn clear_history(&mut self) {
self.undo_stack.clear();
self.redo_stack.clear();
}
/// Set the maximum undo depth
pub fn set_max_undo_depth(&mut self, depth: usize) {
self.max_undo_depth = depth;
// Trim undo stack if needed
if self.undo_stack.len() > depth {
let remove_count = self.undo_stack.len() - depth;
self.undo_stack.drain(0..remove_count);
}
}
/// Execute an action with backend synchronization
///
/// This performs atomic execution: if backend operations fail, the document
/// changes are automatically rolled back to maintain consistency.
///
/// # Arguments
/// * `action` - The action to execute
/// * `backend` - Backend context for audio/video operations
///
/// # Returns
/// * `Ok(())` if both document and backend operations succeeded
/// * `Err(msg)` if backend failed (document changes are rolled back)
pub fn execute_with_backend(
&mut self,
mut action: Box<dyn Action>,
backend: &mut BackendContext,
) -> Result<(), String> {
// 1. Execute document changes
action.execute(Arc::make_mut(&mut self.document))?;
// 2. Execute backend changes (pass document for reading clip data)
if let Err(e) = action.execute_backend(backend, &self.document) {
// ATOMIC ROLLBACK: Backend failed → undo document
action.rollback(Arc::make_mut(&mut self.document))?;
return Err(e);
}
// 3. Push to undo stack (both succeeded)
self.redo_stack.clear();
self.epoch = self.epoch.wrapping_add(1);
self.undo_stack.push(action);
// Limit undo stack size
if self.undo_stack.len() > self.max_undo_depth {
self.undo_stack.remove(0);
}
Ok(())
}
/// Undo the last action with backend synchronization
///
/// Rollback happens in reverse order: backend first, then document.
///
/// # Arguments
/// * `backend` - Backend context for audio/video operations
///
/// # Returns
/// * `Ok(true)` if an action was undone
/// * `Ok(false)` if undo stack is empty
/// * `Err(msg)` if backend rollback failed
pub fn undo_with_backend(&mut self, backend: &mut BackendContext) -> Result<bool, String> {
if let Some(mut action) = self.undo_stack.pop() {
// Rollback in REVERSE order: backend first, then document
action.rollback_backend(backend, &self.document)?;
action.rollback(Arc::make_mut(&mut self.document))?;
// Move to redo stack
self.redo_stack.push(action);
Ok(true)
} else {
Ok(false)
}
}
/// Redo the last undone action with backend synchronization
///
/// Re-execution happens in normal order: document first, then backend.
///
/// # Arguments
/// * `backend` - Backend context for audio/video operations
///
/// # Returns
/// * `Ok(true)` if an action was redone
/// * `Ok(false)` if redo stack is empty
/// * `Err(msg)` if backend execution failed
pub fn redo_with_backend(&mut self, backend: &mut BackendContext) -> Result<bool, String> {
if let Some(mut action) = self.redo_stack.pop() {
// Re-execute in same order: document first, then backend
if let Err(e) = action.execute(Arc::make_mut(&mut self.document)) {
// Put action back on redo stack if document execute fails
self.redo_stack.push(action);
return Err(e);
}
if let Err(e) = action.execute_backend(backend, &self.document) {
// Rollback document if backend fails
action.rollback(Arc::make_mut(&mut self.document))?;
// Put action back on redo stack
self.redo_stack.push(action);
return Err(e);
}
// Move back to undo stack
self.undo_stack.push(action);
Ok(true)
} else {
Ok(false)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// Test action that just tracks execute/rollback calls
struct TestAction {
description: String,
executed: bool,
}
impl TestAction {
fn new(description: &str) -> Self {
Self {
description: description.to_string(),
executed: false,
}
}
}
impl Action for TestAction {
fn execute(&mut self, _document: &mut Document) -> Result<(), String> {
self.executed = true;
Ok(())
}
fn rollback(&mut self, _document: &mut Document) -> Result<(), String> {
self.executed = false;
Ok(())
}
fn description(&self) -> String {
self.description.clone()
}
}
#[test]
fn test_action_executor_basic() {
let document = Document::new("Test");
let mut executor = ActionExecutor::new(document);
assert!(!executor.can_undo());
assert!(!executor.can_redo());
// Execute an action
let action = Box::new(TestAction::new("Test Action"));
executor.execute(action).unwrap();
assert!(executor.can_undo());
assert!(!executor.can_redo());
assert_eq!(executor.undo_depth(), 1);
// Undo
assert!(executor.undo().unwrap());
assert!(!executor.can_undo());
assert!(executor.can_redo());
assert_eq!(executor.redo_depth(), 1);
// Redo
assert!(executor.redo().unwrap());
assert!(executor.can_undo());
assert!(!executor.can_redo());
}
#[test]
fn test_action_descriptions() {
let document = Document::new("Test");
let mut executor = ActionExecutor::new(document);
executor.execute(Box::new(TestAction::new("Action 1"))).unwrap();
executor.execute(Box::new(TestAction::new("Action 2"))).unwrap();
assert_eq!(executor.undo_description(), Some("Action 2".to_string()));
executor.undo().unwrap();
assert_eq!(executor.redo_description(), Some("Action 2".to_string()));
assert_eq!(executor.undo_description(), Some("Action 1".to_string()));
}
#[test]
fn test_new_action_clears_redo() {
let document = Document::new("Test");
let mut executor = ActionExecutor::new(document);
executor.execute(Box::new(TestAction::new("Action 1"))).unwrap();
executor.execute(Box::new(TestAction::new("Action 2"))).unwrap();
executor.undo().unwrap();
assert!(executor.can_redo());
// Execute new action should clear redo stack
executor.execute(Box::new(TestAction::new("Action 3"))).unwrap();
assert!(!executor.can_redo());
assert_eq!(executor.undo_depth(), 2);
}
#[test]
fn test_max_undo_depth() {
let document = Document::new("Test");
let mut executor = ActionExecutor::new(document);
executor.set_max_undo_depth(3);
executor.execute(Box::new(TestAction::new("Action 1"))).unwrap();
executor.execute(Box::new(TestAction::new("Action 2"))).unwrap();
executor.execute(Box::new(TestAction::new("Action 3"))).unwrap();
executor.execute(Box::new(TestAction::new("Action 4"))).unwrap();
// Should only keep last 3
assert_eq!(executor.undo_depth(), 3);
assert_eq!(executor.undo_description(), Some("Action 4".to_string()));
}
}

View File

@ -0,0 +1,341 @@
//! Add clip instance action
//!
//! Handles adding a clip instance to a layer.
use crate::action::{Action, BackendContext};
use crate::clip::ClipInstance;
use crate::document::Document;
use crate::layer::AnyLayer;
use uuid::Uuid;
/// Action that adds a clip instance to a layer
pub struct AddClipInstanceAction {
/// The target layer ID
layer_id: Uuid,
/// The clip instance to add
clip_instance: ClipInstance,
/// Whether the action has been executed (for rollback)
executed: bool,
/// Backend track ID (stored during execute_backend for undo)
backend_track_id: Option<daw_backend::TrackId>,
/// Backend MIDI clip instance ID (stored during execute_backend for undo)
backend_midi_instance_id: Option<daw_backend::MidiClipInstanceId>,
/// Backend audio clip instance ID (stored during execute_backend for undo)
backend_audio_instance_id: Option<daw_backend::AudioClipInstanceId>,
}
impl AddClipInstanceAction {
/// Create a new add clip instance action
///
/// # Arguments
///
/// * `layer_id` - The ID of the layer to add the clip instance to
/// * `clip_instance` - The clip instance to add
pub fn new(layer_id: Uuid, clip_instance: ClipInstance) -> Self {
Self {
layer_id,
clip_instance,
executed: false,
backend_track_id: None,
backend_midi_instance_id: None,
backend_audio_instance_id: None,
}
}
/// Get the ID of the clip instance that will be/was added
pub fn clip_instance_id(&self) -> Uuid {
self.clip_instance.id
}
/// Get the layer ID this action targets
pub fn layer_id(&self) -> Uuid {
self.layer_id
}
}
impl Action for AddClipInstanceAction {
fn execute(&mut self, document: &mut Document) -> Result<(), String> {
// Calculate the clip's effective duration
let clip_duration = document.get_clip_duration(&self.clip_instance.clip_id)
.ok_or_else(|| format!("Clip {} not found", self.clip_instance.clip_id))?;
let trim_start = self.clip_instance.trim_start;
let trim_end = self.clip_instance.trim_end.unwrap_or(clip_duration);
let effective_duration = trim_end - trim_start;
// Auto-adjust position for audio/video layers to avoid overlaps
let adjusted_start = document.find_nearest_valid_position(
&self.layer_id,
self.clip_instance.timeline_start,
effective_duration,
&[], // Not excluding any instance
);
if let Some(valid_start) = adjusted_start {
// Update instance to use the valid position
self.clip_instance.timeline_start = valid_start;
} else {
// No valid position found - reject the operation
return Err("Cannot add clip: no valid position found on layer (layer is full)".to_string());
}
// Add the clip instance with adjusted position
let layer = document.get_layer_mut(&self.layer_id)
.ok_or_else(|| format!("Layer {} not found", self.layer_id))?;
match layer {
AnyLayer::Vector(vector_layer) => {
vector_layer.clip_instances.push(self.clip_instance.clone());
}
AnyLayer::Audio(audio_layer) => {
audio_layer.clip_instances.push(self.clip_instance.clone());
}
AnyLayer::Video(video_layer) => {
video_layer.clip_instances.push(self.clip_instance.clone());
}
AnyLayer::Effect(_) => {
return Err("Cannot add clip instances to effect layers".to_string());
}
AnyLayer::Group(_) => {
return Err("Cannot add clip instances directly to group layers".to_string());
}
AnyLayer::Raster(_) => {
return Err("Cannot add clip instances directly to group layers".to_string());
}
}
self.executed = true;
Ok(())
}
fn rollback(&mut self, document: &mut Document) -> Result<(), String> {
if !self.executed {
return Ok(());
}
let instance_id = self.clip_instance.id;
let layer = document.get_layer_mut(&self.layer_id)
.ok_or_else(|| format!("Layer {} not found", self.layer_id))?;
match layer {
AnyLayer::Vector(vector_layer) => {
vector_layer
.clip_instances
.retain(|ci| ci.id != instance_id);
}
AnyLayer::Audio(audio_layer) => {
audio_layer
.clip_instances
.retain(|ci| ci.id != instance_id);
}
AnyLayer::Video(video_layer) => {
video_layer
.clip_instances
.retain(|ci| ci.id != instance_id);
}
AnyLayer::Effect(_) => {
// Effect layers don't have clip instances, nothing to rollback
}
AnyLayer::Group(_) => {
// Group layers don't have clip instances, nothing to rollback
}
AnyLayer::Raster(_) => {
// Raster layers don't have clip instances, nothing to rollback
}
}
self.executed = false;
Ok(())
}
fn description(&self) -> String {
"Add clip instance".to_string()
}
fn execute_backend(&mut self, backend: &mut BackendContext, document: &Document) -> Result<(), String> {
// Only sync audio/MIDI clips to the backend
// Check if this is an audio layer first
let layer = document
.get_layer(&self.layer_id)
.ok_or_else(|| format!("Layer {} not found", self.layer_id))?;
// Only process audio layers - vector and video clips don't need backend sync
if !matches!(layer, AnyLayer::Audio(_)) {
return Ok(());
}
// Look up the clip from the document
let clip = document
.get_audio_clip(&self.clip_instance.clip_id)
.ok_or_else(|| "Audio clip not found".to_string())?;
// Look up backend track ID from layer mapping
let backend_track_id = backend
.layer_to_track_map
.get(&self.layer_id)
.ok_or_else(|| format!("Layer {} not mapped to backend track", self.layer_id))?;
// Get audio controller
let controller = backend
.audio_controller
.as_mut()
.ok_or_else(|| "Audio controller not available".to_string())?;
// Handle different clip types
use crate::clip::AudioClipType;
match &clip.clip_type {
AudioClipType::Midi { midi_clip_id } => {
// Create a MIDI clip instance referencing the existing clip in the backend pool
// No need to add to pool again - it was added during MIDI import
use daw_backend::command::{Query, QueryResponse};
// Calculate internal start/end from trim parameters
let internal_start = self.clip_instance.trim_start;
let internal_end = self.clip_instance.trim_end.unwrap_or(clip.duration);
let external_start = self.clip_instance.timeline_start;
// Calculate external duration (for looping if timeline_duration is set)
let external_duration = self.clip_instance.timeline_duration
.unwrap_or(internal_end - internal_start);
// Create MidiClipInstance
let instance = daw_backend::MidiClipInstance::new(
0, // Instance ID will be assigned by backend
*midi_clip_id,
internal_start,
internal_end,
external_start,
external_duration,
);
// Send query to add instance and get instance ID
let query = Query::AddMidiClipInstanceSync(*backend_track_id, instance);
match controller.send_query(query)? {
QueryResponse::MidiClipInstanceAdded(Ok(instance_id)) => {
self.backend_track_id = Some(*backend_track_id);
self.backend_midi_instance_id = Some(instance_id);
// Add to global clip instance mapping
backend.clip_instance_to_backend_map.insert(
self.clip_instance.id,
crate::action::BackendClipInstanceId::Midi(instance_id)
);
Ok(())
}
QueryResponse::MidiClipInstanceAdded(Err(e)) => Err(e),
_ => Err("Unexpected query response".to_string()),
}
}
AudioClipType::Sampled { audio_pool_index } => {
let internal_start = self.clip_instance.trim_start;
let internal_end = self.clip_instance.trim_end.unwrap_or(clip.duration);
let effective_duration = self.clip_instance.timeline_duration
.unwrap_or(internal_end - internal_start);
let start_time = self.clip_instance.timeline_start;
let instance_id = controller.add_audio_clip(
*backend_track_id,
*audio_pool_index,
start_time,
effective_duration,
internal_start,
);
self.backend_track_id = Some(*backend_track_id);
self.backend_audio_instance_id = Some(instance_id);
// Add to global clip instance mapping
backend.clip_instance_to_backend_map.insert(
self.clip_instance.id,
crate::action::BackendClipInstanceId::Audio(instance_id)
);
Ok(())
}
AudioClipType::Recording => {
// Recording clips are not synced to backend until finalized
Ok(())
}
}
}
fn rollback_backend(&mut self, backend: &mut BackendContext, _document: &Document) -> Result<(), String> {
// Remove clip from backend if it was added
if let (Some(track_id), Some(controller)) =
(self.backend_track_id, backend.audio_controller.as_mut())
{
if let Some(midi_instance_id) = self.backend_midi_instance_id {
controller.remove_midi_clip(track_id, midi_instance_id);
} else if let Some(audio_instance_id) = self.backend_audio_instance_id {
controller.remove_audio_clip(track_id, audio_instance_id);
}
// Remove from global clip instance mapping
backend.clip_instance_to_backend_map.remove(&self.clip_instance.id);
// Clear stored IDs
self.backend_track_id = None;
self.backend_midi_instance_id = None;
self.backend_audio_instance_id = None;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::layer::VectorLayer;
#[test]
fn test_add_clip_instance_to_vector_layer() {
let mut document = Document::new("Test");
// Add a layer
let layer = VectorLayer::new("Test Layer");
let layer_id = layer.layer.id;
document.root_mut().add_child(AnyLayer::Vector(layer));
// Create a clip instance (using a fake clip_id since we're just testing the action)
let clip_id = Uuid::new_v4();
let clip_instance = ClipInstance::new(clip_id);
let instance_id = clip_instance.id;
// Execute action
let mut action = AddClipInstanceAction::new(layer_id, clip_instance);
action.execute(&mut document).unwrap();
// Verify clip instance was added
if let Some(AnyLayer::Vector(vector_layer)) = document.get_layer(&layer_id) {
assert_eq!(vector_layer.clip_instances.len(), 1);
assert_eq!(vector_layer.clip_instances[0].id, instance_id);
} else {
panic!("Layer not found");
}
// Rollback
action.rollback(&mut document).unwrap();
// Verify clip instance was removed
if let Some(AnyLayer::Vector(vector_layer)) = document.get_layer(&layer_id) {
assert_eq!(vector_layer.clip_instances.len(), 0);
} else {
panic!("Layer not found");
}
}
#[test]
fn test_add_clip_instance_description() {
let action = AddClipInstanceAction::new(Uuid::new_v4(), ClipInstance::new(Uuid::new_v4()));
assert_eq!(action.description(), "Add clip instance");
}
}

View File

@ -0,0 +1,232 @@
//! Add effect action
//!
//! Handles adding a new effect instance (as a ClipInstance) to an effect layer.
use crate::action::Action;
use crate::clip::ClipInstance;
use crate::document::Document;
use crate::layer::AnyLayer;
use uuid::Uuid;
/// Action that adds an effect instance to an effect layer
///
/// Effect instances are represented as ClipInstance objects where clip_id
/// references an EffectDefinition.
pub struct AddEffectAction {
/// ID of the layer to add the effect to
layer_id: Uuid,
/// The clip instance (effect) to add
instance: Option<ClipInstance>,
/// Index to insert at (None = append to end)
insert_index: Option<usize>,
/// ID of the created effect (set after execution)
created_effect_id: Option<Uuid>,
}
impl AddEffectAction {
/// Create a new add effect action
///
/// # Arguments
///
/// * `layer_id` - ID of the effect layer to add the effect to
/// * `instance` - The clip instance (referencing an effect definition) to add
pub fn new(layer_id: Uuid, instance: ClipInstance) -> Self {
Self {
layer_id,
instance: Some(instance),
insert_index: None,
created_effect_id: None,
}
}
/// Create a new add effect action that inserts at a specific index
///
/// # Arguments
///
/// * `layer_id` - ID of the effect layer to add the effect to
/// * `instance` - The clip instance (referencing an effect definition) to add
/// * `index` - Index to insert at
pub fn at_index(layer_id: Uuid, instance: ClipInstance, index: usize) -> Self {
Self {
layer_id,
instance: Some(instance),
insert_index: Some(index),
created_effect_id: None,
}
}
/// Get the ID of the created effect (after execution)
pub fn created_effect_id(&self) -> Option<Uuid> {
self.created_effect_id
}
/// Get the layer ID this effect was added to
pub fn layer_id(&self) -> Uuid {
self.layer_id
}
}
impl Action for AddEffectAction {
fn execute(&mut self, document: &mut Document) -> Result<(), String> {
// Take the instance (can only execute once without rollback)
let instance = self.instance.take()
.ok_or_else(|| "Effect already added (call rollback first)".to_string())?;
// Store the instance ID
let instance_id = instance.id;
// Find the effect layer
let layer = document.get_layer_mut(&self.layer_id)
.ok_or_else(|| format!("Layer {} not found", self.layer_id))?;
// Ensure it's an effect layer
let effect_layer = match layer {
AnyLayer::Effect(ref mut el) => el,
_ => return Err("Layer is not an effect layer".to_string()),
};
// Add or insert the effect
match self.insert_index {
Some(index) => {
effect_layer.insert_clip_instance(index, instance);
}
None => {
effect_layer.add_clip_instance(instance);
}
}
self.created_effect_id = Some(instance_id);
Ok(())
}
fn rollback(&mut self, document: &mut Document) -> Result<(), String> {
let instance_id = self.created_effect_id
.ok_or_else(|| "No effect to remove (not executed yet)".to_string())?;
// Find the effect layer
let layer = document.get_layer_mut(&self.layer_id)
.ok_or_else(|| format!("Layer {} not found", self.layer_id))?;
// Ensure it's an effect layer
let effect_layer = match layer {
AnyLayer::Effect(ref mut el) => el,
_ => return Err("Layer is not an effect layer".to_string()),
};
// Remove the instance and store it for potential re-execution
let removed = effect_layer.remove_clip_instance(&instance_id)
.ok_or_else(|| format!("Effect instance {} not found", instance_id))?;
// Store the instance back for potential redo
self.instance = Some(removed);
self.created_effect_id = None;
Ok(())
}
fn description(&self) -> String {
"Add effect".to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::effect::{EffectCategory, EffectDefinition, EffectParameterDef};
use crate::effect_layer::EffectLayer;
use crate::layer::AnyLayer;
fn create_test_setup() -> (Document, Uuid, EffectDefinition) {
let mut document = Document::new("Test");
// Create effect layer
let effect_layer = EffectLayer::new("Effects");
let layer_id = effect_layer.layer.id;
document.root_mut().add_child(AnyLayer::Effect(effect_layer));
// Create effect definition
let def = EffectDefinition::new(
"Test Effect",
EffectCategory::Color,
"// shader code",
vec![EffectParameterDef::float_range("intensity", "Intensity", 1.0, 0.0, 2.0)],
);
(document, layer_id, def)
}
#[test]
fn test_add_effect() {
let (mut document, layer_id, def) = create_test_setup();
let instance = def.create_instance(0.0, 10.0);
let instance_id = instance.id;
let mut action = AddEffectAction::new(layer_id, instance);
action.execute(&mut document).unwrap();
// Verify effect was added
assert_eq!(action.created_effect_id(), Some(instance_id));
let layer = document.get_layer(&layer_id).unwrap();
if let AnyLayer::Effect(el) = layer {
assert_eq!(el.clip_instances.len(), 1);
assert_eq!(el.clip_instances[0].id, instance_id);
} else {
panic!("Expected effect layer");
}
}
#[test]
fn test_add_effect_rollback() {
let (mut document, layer_id, def) = create_test_setup();
let instance = def.create_instance(0.0, 10.0);
let mut action = AddEffectAction::new(layer_id, instance);
action.execute(&mut document).unwrap();
action.rollback(&mut document).unwrap();
// Verify effect was removed
let layer = document.get_layer(&layer_id).unwrap();
if let AnyLayer::Effect(el) = layer {
assert_eq!(el.clip_instances.len(), 0);
} else {
panic!("Expected effect layer");
}
}
#[test]
fn test_add_effect_at_index() {
let (mut document, layer_id, def) = create_test_setup();
// Add first effect
let instance1 = def.create_instance(0.0, 10.0);
let id1 = instance1.id;
let mut action1 = AddEffectAction::new(layer_id, instance1);
action1.execute(&mut document).unwrap();
// Add second effect
let instance2 = def.create_instance(0.0, 10.0);
let id2 = instance2.id;
let mut action2 = AddEffectAction::new(layer_id, instance2);
action2.execute(&mut document).unwrap();
// Insert third effect at index 1 (between first and second)
let instance3 = def.create_instance(0.0, 10.0);
let id3 = instance3.id;
let mut action3 = AddEffectAction::at_index(layer_id, instance3, 1);
action3.execute(&mut document).unwrap();
// Verify order: [id1, id3, id2]
let layer = document.get_layer(&layer_id).unwrap();
if let AnyLayer::Effect(el) = layer {
assert_eq!(el.clip_instances.len(), 3);
assert_eq!(el.clip_instances[0].id, id1);
assert_eq!(el.clip_instances[1].id, id3);
assert_eq!(el.clip_instances[2].id, id2);
} else {
panic!("Expected effect layer");
}
}
}

View File

@ -0,0 +1,192 @@
//! Add layer action
//!
//! Handles adding a new layer to the document.
use crate::action::Action;
use crate::document::Document;
use crate::layer::{AnyLayer, VectorLayer};
use uuid::Uuid;
/// Action that adds a new layer to the document
pub struct AddLayerAction {
/// The layer to add
layer: AnyLayer,
/// If Some, add to this VectorClip's layers instead of root
target_clip_id: Option<Uuid>,
/// If Some, add as a child of this GroupLayer instead of root
target_group_id: Option<Uuid>,
/// ID of the created layer (set after execution)
created_layer_id: Option<Uuid>,
}
impl AddLayerAction {
/// Create a new add layer action with a vector layer
///
/// # Arguments
///
/// * `name` - The name for the new layer
pub fn new_vector(name: impl Into<String>) -> Self {
let layer = VectorLayer::new(name);
Self {
layer: AnyLayer::Vector(layer),
target_clip_id: None,
target_group_id: None,
created_layer_id: None,
}
}
/// Create a new add layer action with any layer type
///
/// # Arguments
///
/// * `layer` - The layer to add
pub fn new(layer: AnyLayer) -> Self {
Self {
layer,
target_clip_id: None,
target_group_id: None,
created_layer_id: None,
}
}
/// Set the target clip for this action (add layer inside a movie clip)
pub fn with_target_clip(mut self, clip_id: Option<Uuid>) -> Self {
self.target_clip_id = clip_id;
self
}
/// Set the target group for this action (add layer inside a group layer)
pub fn with_target_group(mut self, group_id: Uuid) -> Self {
self.target_group_id = Some(group_id);
self
}
/// Get the ID of the created layer (after execution)
pub fn created_layer_id(&self) -> Option<Uuid> {
self.created_layer_id
}
}
impl Action for AddLayerAction {
fn execute(&mut self, document: &mut Document) -> Result<(), String> {
let layer_id = if let Some(group_id) = self.target_group_id {
// Add layer inside a group layer
let id = self.layer.id();
if let Some(AnyLayer::Group(g)) = document.root.children.iter_mut()
.find(|l| l.id() == group_id)
{
g.add_child(self.layer.clone());
} else {
return Err(format!("Target group {} not found", group_id));
}
id
} else if let Some(clip_id) = self.target_clip_id {
// Add layer inside a vector clip (movie clip)
let clip = document.vector_clips.get_mut(&clip_id)
.ok_or_else(|| format!("Target clip {} not found", clip_id))?;
let id = self.layer.id();
clip.layers.add_root(self.layer.clone());
// Register in layer_to_clip_map for O(1) lookup
document.layer_to_clip_map.insert(id, clip_id);
id
} else {
// Add layer to the document's root
document.root_mut().add_child(self.layer.clone())
};
// Store the ID for rollback
self.created_layer_id = Some(layer_id);
Ok(())
}
fn rollback(&mut self, document: &mut Document) -> Result<(), String> {
// Remove the created layer if it exists
if let Some(layer_id) = self.created_layer_id {
if let Some(group_id) = self.target_group_id {
// Remove from group layer
if let Some(AnyLayer::Group(g)) = document.root.children.iter_mut()
.find(|l| l.id() == group_id)
{
g.children.retain(|l| l.id() != layer_id);
}
} else if let Some(clip_id) = self.target_clip_id {
// Remove from vector clip
if let Some(clip) = document.vector_clips.get_mut(&clip_id) {
clip.layers.roots.retain(|node| node.data.id() != layer_id);
}
document.layer_to_clip_map.remove(&layer_id);
} else {
document.root_mut().remove_child(&layer_id);
}
// Clear the stored ID
self.created_layer_id = None;
}
Ok(())
}
fn description(&self) -> String {
match &self.layer {
AnyLayer::Vector(_) => "Add vector layer",
AnyLayer::Audio(_) => "Add audio layer",
AnyLayer::Video(_) => "Add video layer",
AnyLayer::Effect(_) => "Add effect layer",
AnyLayer::Group(_) => "Add group layer",
AnyLayer::Raster(_) => "Add raster layer",
}
.to_string()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_add_vector_layer() {
let mut document = Document::new("Test");
assert_eq!(document.root.children.len(), 0);
// Create and execute action
let mut action = AddLayerAction::new_vector("New Layer");
action.execute(&mut document).unwrap();
// Verify layer was added
assert_eq!(document.root.children.len(), 1);
let layer = &document.root.children[0];
assert_eq!(layer.layer().name, "New Layer");
assert!(matches!(layer, AnyLayer::Vector(_)));
// Rollback
action.rollback(&mut document).unwrap();
// Verify layer was removed
assert_eq!(document.root.children.len(), 0);
}
#[test]
fn test_add_layer_description() {
let action = AddLayerAction::new_vector("Test");
assert_eq!(action.description(), "Add vector layer");
}
#[test]
fn test_add_multiple_layers() {
let mut document = Document::new("Test");
let mut action1 = AddLayerAction::new_vector("Layer 1");
let mut action2 = AddLayerAction::new_vector("Layer 2");
action1.execute(&mut document).unwrap();
action2.execute(&mut document).unwrap();
assert_eq!(document.root.children.len(), 2);
assert_eq!(document.root.children[0].layer().name, "Layer 1");
assert_eq!(document.root.children[1].layer().name, "Layer 2");
}
}

Some files were not shown because too many files have changed in this diff Show More