From c689767782029e1732b8f9ba93da208647dbd97c Mon Sep 17 00:00:00 2001 From: Daniel McNab <36049421+DJMcNab@users.noreply.github.com> Date: Sun, 27 Nov 2022 16:02:16 +0000 Subject: [PATCH] Remove the old pipeline (#209) Remove the old hal and pipeline. Update the README to reflect this --- .github/actions/shader/action.yml | 30 - .github/workflows/push-shader.yml | 38 - .github/workflows/shader.yml | 12 - Cargo.lock | 540 +------- Cargo.toml | 14 +- README.md | 41 +- pgpu-render/Cargo.toml | 22 - pgpu-render/build.rs | 30 - pgpu-render/pgpu.h | 238 ---- pgpu-render/src/lib.rs | 464 ------- pgpu-render/src/render.rs | 239 ---- piet-gpu-derive/Cargo.toml | 17 - piet-gpu-derive/src/derive.rs | 192 --- piet-gpu-derive/src/glsl.rs | 669 --------- piet-gpu-derive/src/layout.rs | 244 ---- piet-gpu-derive/src/lib.rs | 30 - piet-gpu-derive/src/parse.rs | 228 --- piet-gpu-hal/Cargo.toml | 33 - piet-gpu-hal/examples/collatz.rs | 39 - piet-gpu-hal/examples/shader/build.ninja | 24 - piet-gpu-hal/examples/shader/collatz.comp | 35 - piet-gpu-hal/src/backend.rs | 309 ----- piet-gpu-hal/src/bestfit.rs | 81 -- piet-gpu-hal/src/bufwrite.rs | 150 -- piet-gpu-hal/src/dx12.rs | 858 ------------ piet-gpu-hal/src/dx12/descriptor.rs | 309 ----- piet-gpu-hal/src/dx12/error.rs | 85 -- piet-gpu-hal/src/dx12/wrappers.rs | 1158 ---------------- piet-gpu-hal/src/hub.rs | 1109 --------------- piet-gpu-hal/src/lib.rs | 213 --- piet-gpu-hal/src/macros.rs | 205 --- piet-gpu-hal/src/metal.rs | 954 ------------- piet-gpu-hal/src/metal/clear.rs | 77 -- piet-gpu-hal/src/metal/timer.rs | 172 --- piet-gpu-hal/src/metal/util.rs | 39 - piet-gpu-hal/src/mux.rs | 925 ------------- piet-gpu-hal/src/vulkan.rs | 1526 --------------------- piet-gpu-types/Cargo.toml | 12 - piet-gpu-types/src/annotated.rs | 45 - piet-gpu-types/src/bins.rs | 12 - piet-gpu-types/src/encoder.rs | 151 -- piet-gpu-types/src/lib.rs | 12 - piet-gpu-types/src/main.rs | 18 - piet-gpu-types/src/pathseg.rs | 22 - piet-gpu-types/src/ptcl.rs | 63 - piet-gpu-types/src/scene.rs | 69 - piet-gpu-types/src/state.rs | 17 - piet-gpu-types/src/test.rs | 33 - piet-gpu-types/src/tile.rs | 26 - piet-gpu-types/src/tilegroup.rs | 39 - piet-gpu/Cargo.toml | 48 - piet-gpu/bin/android.rs | 164 --- piet-gpu/bin/cli.rs | 291 ---- piet-gpu/bin/winit.rs | 196 --- piet-gpu/shader/.clang-format | 5 - piet-gpu/shader/annotated.h | 296 ---- piet-gpu/shader/backdrop.comp | 118 -- piet-gpu/shader/bbox_clear.comp | 29 - piet-gpu/shader/binning.comp | 182 --- piet-gpu/shader/bins.h | 31 - piet-gpu/shader/blend.h | 291 ---- piet-gpu/shader/build.ninja | 118 -- piet-gpu/shader/clip_leaf.comp | 285 ---- piet-gpu/shader/clip_reduce.comp | 146 -- piet-gpu/shader/coarse.comp | 480 ------- piet-gpu/shader/draw_leaf.comp | 181 --- piet-gpu/shader/draw_reduce.comp | 61 - piet-gpu/shader/draw_scan.comp | 75 - piet-gpu/shader/drawtag.h | 41 - piet-gpu/shader/image.png | Bin 345084 -> 0 bytes piet-gpu/shader/kernel4.comp | 301 ---- piet-gpu/shader/mem.h | 145 -- piet-gpu/shader/path_coarse.comp | 289 ---- piet-gpu/shader/pathseg.comp | 291 ---- piet-gpu/shader/pathseg.h | 100 -- piet-gpu/shader/pathtag.h | 49 - piet-gpu/shader/pathtag_reduce.comp | 61 - piet-gpu/shader/pathtag_scan.comp | 75 - piet-gpu/shader/ptcl.h | 426 ------ piet-gpu/shader/scene.h | 350 ----- piet-gpu/shader/setup.h | 103 -- piet-gpu/shader/state.h | 73 - piet-gpu/shader/tile.h | 150 -- piet-gpu/shader/tile_alloc.comp | 112 -- piet-gpu/src/lib.rs | 928 ------------- piet-gpu/src/pico_svg.rs | 139 -- piet-gpu/src/ramp.rs | 129 -- piet-gpu/src/render_driver.rs | 318 ----- piet-gpu/src/samples.rs | 355 ----- piet-gpu/src/simple_text.rs | 82 -- piet-gpu/src/stages.rs | 160 --- piet-gpu/src/stages/clip.rs | 101 -- piet-gpu/src/stages/draw.rs | 177 --- piet-gpu/src/stages/path.rs | 356 ----- piet-gpu/src/stages/transform.rs | 36 - piet-wgsl/examples/winit/Cargo.toml | 1 + tests/Cargo.toml | 23 - tests/README.md | 47 - tests/shader/build.ninja | 72 - tests/shader/clear.comp | 26 - tests/shader/linkedlist.comp | 31 - tests/shader/message_passing.comp | 60 - tests/shader/prefix.comp | 226 --- tests/shader/prefix_reduce.comp | 53 - tests/shader/prefix_scan.comp | 77 -- tests/src/clear.rs | 131 -- tests/src/clip.rs | 236 ---- tests/src/config.rs | 82 -- tests/src/draw.rs | 162 --- tests/src/linkedlist.rs | 154 --- tests/src/main.rs | 143 -- tests/src/message_passing.rs | 138 -- tests/src/path.rs | 293 ---- tests/src/prefix.rs | 179 --- tests/src/prefix_tree.rs | 213 --- tests/src/runner.rs | 152 -- tests/src/test_result.rs | 123 -- 117 files changed, 37 insertions(+), 22797 deletions(-) delete mode 100644 .github/actions/shader/action.yml delete mode 100644 .github/workflows/push-shader.yml delete mode 100644 .github/workflows/shader.yml delete mode 100644 pgpu-render/Cargo.toml delete mode 100644 pgpu-render/build.rs delete mode 100644 pgpu-render/pgpu.h delete mode 100644 pgpu-render/src/lib.rs delete mode 100644 pgpu-render/src/render.rs delete mode 100644 piet-gpu-derive/Cargo.toml delete mode 100644 piet-gpu-derive/src/derive.rs delete mode 100644 piet-gpu-derive/src/glsl.rs delete mode 100644 piet-gpu-derive/src/layout.rs delete mode 100644 piet-gpu-derive/src/lib.rs delete mode 100644 piet-gpu-derive/src/parse.rs delete mode 100644 piet-gpu-hal/Cargo.toml delete mode 100644 piet-gpu-hal/examples/collatz.rs delete mode 100644 piet-gpu-hal/examples/shader/build.ninja delete mode 100644 piet-gpu-hal/examples/shader/collatz.comp delete mode 100644 piet-gpu-hal/src/backend.rs delete mode 100644 piet-gpu-hal/src/bestfit.rs delete mode 100644 piet-gpu-hal/src/bufwrite.rs delete mode 100644 piet-gpu-hal/src/dx12.rs delete mode 100644 piet-gpu-hal/src/dx12/descriptor.rs delete mode 100644 piet-gpu-hal/src/dx12/error.rs delete mode 100644 piet-gpu-hal/src/dx12/wrappers.rs delete mode 100644 piet-gpu-hal/src/hub.rs delete mode 100644 piet-gpu-hal/src/lib.rs delete mode 100644 piet-gpu-hal/src/macros.rs delete mode 100644 piet-gpu-hal/src/metal.rs delete mode 100644 piet-gpu-hal/src/metal/clear.rs delete mode 100644 piet-gpu-hal/src/metal/timer.rs delete mode 100644 piet-gpu-hal/src/metal/util.rs delete mode 100644 piet-gpu-hal/src/mux.rs delete mode 100644 piet-gpu-hal/src/vulkan.rs delete mode 100644 piet-gpu-types/Cargo.toml delete mode 100644 piet-gpu-types/src/annotated.rs delete mode 100644 piet-gpu-types/src/bins.rs delete mode 100644 piet-gpu-types/src/encoder.rs delete mode 100644 piet-gpu-types/src/lib.rs delete mode 100644 piet-gpu-types/src/main.rs delete mode 100644 piet-gpu-types/src/pathseg.rs delete mode 100644 piet-gpu-types/src/ptcl.rs delete mode 100644 piet-gpu-types/src/scene.rs delete mode 100644 piet-gpu-types/src/state.rs delete mode 100644 piet-gpu-types/src/test.rs delete mode 100644 piet-gpu-types/src/tile.rs delete mode 100644 piet-gpu-types/src/tilegroup.rs delete mode 100644 piet-gpu/Cargo.toml delete mode 100644 piet-gpu/bin/android.rs delete mode 100644 piet-gpu/bin/cli.rs delete mode 100644 piet-gpu/bin/winit.rs delete mode 100644 piet-gpu/shader/.clang-format delete mode 100644 piet-gpu/shader/annotated.h delete mode 100644 piet-gpu/shader/backdrop.comp delete mode 100644 piet-gpu/shader/bbox_clear.comp delete mode 100644 piet-gpu/shader/binning.comp delete mode 100644 piet-gpu/shader/bins.h delete mode 100644 piet-gpu/shader/blend.h delete mode 100644 piet-gpu/shader/build.ninja delete mode 100644 piet-gpu/shader/clip_leaf.comp delete mode 100644 piet-gpu/shader/clip_reduce.comp delete mode 100644 piet-gpu/shader/coarse.comp delete mode 100644 piet-gpu/shader/draw_leaf.comp delete mode 100644 piet-gpu/shader/draw_reduce.comp delete mode 100644 piet-gpu/shader/draw_scan.comp delete mode 100644 piet-gpu/shader/drawtag.h delete mode 100644 piet-gpu/shader/image.png delete mode 100644 piet-gpu/shader/kernel4.comp delete mode 100644 piet-gpu/shader/mem.h delete mode 100644 piet-gpu/shader/path_coarse.comp delete mode 100644 piet-gpu/shader/pathseg.comp delete mode 100644 piet-gpu/shader/pathseg.h delete mode 100644 piet-gpu/shader/pathtag.h delete mode 100644 piet-gpu/shader/pathtag_reduce.comp delete mode 100644 piet-gpu/shader/pathtag_scan.comp delete mode 100644 piet-gpu/shader/ptcl.h delete mode 100644 piet-gpu/shader/scene.h delete mode 100644 piet-gpu/shader/setup.h delete mode 100644 piet-gpu/shader/state.h delete mode 100644 piet-gpu/shader/tile.h delete mode 100644 piet-gpu/shader/tile_alloc.comp delete mode 100644 piet-gpu/src/lib.rs delete mode 100644 piet-gpu/src/pico_svg.rs delete mode 100644 piet-gpu/src/ramp.rs delete mode 100644 piet-gpu/src/render_driver.rs delete mode 100644 piet-gpu/src/samples.rs delete mode 100644 piet-gpu/src/simple_text.rs delete mode 100644 piet-gpu/src/stages.rs delete mode 100644 piet-gpu/src/stages/clip.rs delete mode 100644 piet-gpu/src/stages/draw.rs delete mode 100644 piet-gpu/src/stages/path.rs delete mode 100644 piet-gpu/src/stages/transform.rs delete mode 100644 tests/Cargo.toml delete mode 100644 tests/README.md delete mode 100644 tests/shader/build.ninja delete mode 100644 tests/shader/clear.comp delete mode 100644 tests/shader/linkedlist.comp delete mode 100644 tests/shader/message_passing.comp delete mode 100644 tests/shader/prefix.comp delete mode 100644 tests/shader/prefix_reduce.comp delete mode 100644 tests/shader/prefix_scan.comp delete mode 100644 tests/src/clear.rs delete mode 100644 tests/src/clip.rs delete mode 100644 tests/src/config.rs delete mode 100644 tests/src/draw.rs delete mode 100644 tests/src/linkedlist.rs delete mode 100644 tests/src/main.rs delete mode 100644 tests/src/message_passing.rs delete mode 100644 tests/src/path.rs delete mode 100644 tests/src/prefix.rs delete mode 100644 tests/src/prefix_tree.rs delete mode 100644 tests/src/runner.rs delete mode 100644 tests/src/test_result.rs diff --git a/.github/actions/shader/action.yml b/.github/actions/shader/action.yml deleted file mode 100644 index af03b5a..0000000 --- a/.github/actions/shader/action.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: compile shaders - -runs: - using: 'composite' - steps: - - uses: seanmiddleditch/gha-setup-ninja@master - - - name: setup SPIRV tools - # consider install-vulkan-sdk instead - uses: humbletim/setup-vulkan-sdk@v1.2.0 - with: - vulkan-query-version: 1.3.204.0 - vulkan-components: Glslang, SPIRV-Cross - vulkan-use-cache: true - - - name: install DXC - uses: napokue/setup-dxc@v1.0.0 - - - name: 'run shader compilers: piet-gpu' - run: mkdir gen && ninja - shell: pwsh - working-directory: piet-gpu/shader - - name: 'run shader compilers: tests' - run: mkdir gen && ninja - shell: pwsh - working-directory: tests/shader - - name: 'run shader compilers: piet-gpu-hal/examples' - run: mkdir gen && ninja - shell: pwsh - working-directory: piet-gpu-hal/examples/shader diff --git a/.github/workflows/push-shader.yml b/.github/workflows/push-shader.yml deleted file mode 100644 index 274147d..0000000 --- a/.github/workflows/push-shader.yml +++ /dev/null @@ -1,38 +0,0 @@ -on: - push: - branches: - - dev - -jobs: - push-shaders: - runs-on: windows-latest - name: compile shaders and push to main - steps: - - uses: actions/checkout@v3 - with: - # need history to make the merge work - # possibly we can optimize this and set - # allow-unrelated-histories on merge - fetch-depth: 0 - - name: prepare repo for compilation - run: | - git fetch origin main - git switch main - git config user.name "Commit by GitHub Action" - git config user.email "nobody@example.com" - git merge dev -m "merge from dev branch - ${{ github.ref_name }}" - sed -i '' '/shader\/gen/d' .gitignore - git add .gitignore - git rm -r --ignore-unmatch piet-gpu/shader/gen - git rm -r --ignore-unmatch tests/shader/gen - git rm -r --ignore-unmatch piet-gpu-hal/examples/shader/gen - - uses: ./.github/actions/shader - - name: commit compiled shaders - continue-on-error: true - run: | - git add piet-gpu/shader/gen - git add tests/shader/gen - git add piet-gpu-hal/examples/shader/gen - git commit -m "commit compiled shaders" - - name: push - run: git push origin main diff --git a/.github/workflows/shader.yml b/.github/workflows/shader.yml deleted file mode 100644 index a5972bc..0000000 --- a/.github/workflows/shader.yml +++ /dev/null @@ -1,12 +0,0 @@ -on: - pull_request: - branches-ignore: - - main - -jobs: - push-shaders: - runs-on: windows-latest - name: compile shaders - steps: - - uses: actions/checkout@v3 - - uses: ./.github/actions/shader diff --git a/Cargo.lock b/Cargo.lock index 1af69e3..c84704c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,7 +14,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom", "once_cell", "version_check", ] @@ -28,15 +28,6 @@ dependencies = [ "libc", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "arrayref" version = "0.3.6" @@ -64,28 +55,6 @@ dependencies = [ "libloading", ] -[[package]] -name = "ash-window" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b912285a7c29f3a8f87ca6f55afc48768624e5e33ec17dbd2f2075903f5e35ab" -dependencies = [ - "ash", - "raw-window-handle 0.5.0", - "raw-window-metal", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -164,25 +133,6 @@ dependencies = [ "vec_map", ] -[[package]] -name = "cbindgen" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e3973b165dc0f435831a9e426de67e894de532754ff7a3f307c03ee5dec7dc" -dependencies = [ - "clap 2.34.0", - "heck", - "indexmap", - "log", - "proc-macro2", - "quote", - "serde", - "serde_json", - "syn", - "tempfile", - "toml", -] - [[package]] name = "cc" version = "1.0.73" @@ -201,45 +151,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim 0.8.0", - "textwrap 0.11.0", - "unicode-width", - "vec_map", -] - -[[package]] -name = "clap" -version = "3.2.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" -dependencies = [ - "atty", - "bitflags", - "clap_lex", - "indexmap", - "strsim 0.10.0", - "termcolor", - "textwrap 0.16.0", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "cmake" version = "0.1.49" @@ -392,38 +303,14 @@ dependencies = [ "winapi", ] -[[package]] -name = "darling" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d706e75d87e35569db781a9b5e2416cff1236a47ed380831f959382ccd5f858" -dependencies = [ - "darling_core 0.10.2", - "darling_macro 0.10.2", -] - [[package]] name = "darling" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core 0.13.4", - "darling_macro 0.13.4", -] - -[[package]] -name = "darling_core" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c960ae2da4de88a91b2d920c2a7233b400bc33cb28453a2987822d8392519b" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.9.3", - "syn", + "darling_core", + "darling_macro", ] [[package]] @@ -436,18 +323,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn", -] - -[[package]] -name = "darling_macro" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5a2f4ac4969822c62224815d069952656cadc7084fdca9751e6d959189b72" -dependencies = [ - "darling_core 0.10.2", - "quote", + "strsim", "syn", ] @@ -457,7 +333,7 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core 0.13.4", + "darling_core", "quote", "syn", ] @@ -507,15 +383,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "fastrand" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" -dependencies = [ - "instant", -] - [[package]] name = "flate2" version = "1.0.24" @@ -622,17 +489,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.7" @@ -641,7 +497,7 @@ checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", ] [[package]] @@ -695,12 +551,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - [[package]] name = "hashbrown" version = "0.12.3" @@ -710,24 +560,6 @@ dependencies = [ "ahash", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hexf-parse" version = "0.2.1" @@ -762,12 +594,6 @@ dependencies = [ "web-sys", ] -[[package]] -name = "itoa" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" - [[package]] name = "jni-sys" version = "0.3.0" @@ -794,15 +620,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "kurbo" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16cb54cd28cb3d2e964d9444ca185676a94fd9b7cce5f02b22c717947ed8e9a2" -dependencies = [ - "arrayvec 0.5.2", -] - [[package]] name = "kurbo" version = "0.9.0" @@ -923,7 +740,7 @@ checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.42.0", ] @@ -954,18 +771,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "ndk" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8794322172319b972f528bf90c6b467be0079f1fa82780ffb431088e741a73ab" -dependencies = [ - "jni-sys", - "ndk-sys 0.2.2", - "num_enum", - "thiserror", -] - [[package]] name = "ndk" version = "0.7.0" @@ -974,7 +779,7 @@ checksum = "451422b7e4718271c8b5b3aadf5adedba43dc76312454b387e98fae0fc951aa0" dependencies = [ "bitflags", "jni-sys", - "ndk-sys 0.4.0", + "ndk-sys", "num_enum", "raw-window-handle 0.5.0", "thiserror", @@ -986,20 +791,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27b02d87554356db9e9a873add8782d4ea6e3e58ea071a9adb9a2e8ddb884a8b" -[[package]] -name = "ndk-glue" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5caf0c24d51ac1c905c27d4eda4fa0635bbe0de596b8f79235e0b17a4d29385" -dependencies = [ - "lazy_static", - "libc", - "log", - "ndk 0.3.0", - "ndk-macro 0.2.0", - "ndk-sys 0.2.2", -] - [[package]] name = "ndk-glue" version = "0.7.0" @@ -1008,46 +799,27 @@ checksum = "0434fabdd2c15e0aab768ca31d5b7b333717f03cf02037d5a0a3ff3c278ed67f" dependencies = [ "libc", "log", - "ndk 0.7.0", + "ndk", "ndk-context", - "ndk-macro 0.3.0", - "ndk-sys 0.4.0", + "ndk-macro", + "ndk-sys", "once_cell", "parking_lot", ] -[[package]] -name = "ndk-macro" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d1c6307dc424d0f65b9b06e94f88248e6305726b14729fd67a5e47b2dc481d" -dependencies = [ - "darling 0.10.2", - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ndk-macro" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0df7ac00c4672f9d5aece54ee3347520b7e20f158656c7db2e6de01902eb7a6c" dependencies = [ - "darling 0.13.4", - "proc-macro-crate 1.2.1", + "darling", + "proc-macro-crate", "proc-macro2", "quote", "syn", ] -[[package]] -name = "ndk-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1bcdd74c20ad5d95aacd60ef9ba40fdf77f767051040541df557b7a9b2a2121" - [[package]] name = "ndk-sys" version = "0.4.0" @@ -1103,7 +875,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" dependencies = [ - "proc-macro-crate 1.2.1", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -1134,12 +906,6 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" -[[package]] -name = "os_str_bytes" -version = "6.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" - [[package]] name = "parking_lot" version = "0.12.1" @@ -1168,7 +934,7 @@ name = "peniko" version = "0.1.0" source = "git+https://github.com/linebender/peniko#b83821720aa51a3942be5d20c71525a1ae61ac0a" dependencies = [ - "kurbo 0.9.0", + "kurbo", "smallvec", ] @@ -1178,88 +944,6 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" -[[package]] -name = "pgpu-render" -version = "0.1.0" -dependencies = [ - "cbindgen", - "cocoa", - "metal", - "objc", - "piet-gpu", - "piet-gpu-hal", - "piet-scene", -] - -[[package]] -name = "piet-gpu" -version = "0.1.0" -dependencies = [ - "bytemuck", - "clap 3.2.23", - "ndk 0.3.0", - "ndk-glue 0.3.0", - "ndk-sys 0.2.2", - "piet-gpu-hal", - "piet-gpu-types", - "piet-scene", - "png", - "rand 0.8.5", - "raw-window-handle 0.3.4", - "raw-window-handle 0.5.0", - "roxmltree", - "winit 0.27.5", -] - -[[package]] -name = "piet-gpu-derive" -version = "0.0.0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "piet-gpu-hal" -version = "0.1.0" -dependencies = [ - "ash", - "ash-window", - "bitflags", - "block", - "bytemuck", - "cocoa-foundation", - "core-graphics-types", - "foreign-types 0.3.2", - "metal", - "objc", - "raw-window-handle 0.5.0", - "smallvec", - "winapi", - "wio", -] - -[[package]] -name = "piet-gpu-tests" -version = "0.1.0" -dependencies = [ - "bytemuck", - "clap 3.2.23", - "kurbo 0.7.1", - "piet-gpu", - "piet-gpu-hal", - "rand 0.7.3", -] - -[[package]] -name = "piet-gpu-types" -version = "0.0.0" -dependencies = [ - "half", - "piet-gpu-derive", -] - [[package]] name = "piet-scene" version = "0.1.0" @@ -1312,21 +996,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5da3b0203fd7ee5720aa0b5e790b591aa5d3f41c3ed2c34a3a393382198af2f7" -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - [[package]] name = "proc-macro-crate" version = "1.2.1" @@ -1362,93 +1031,12 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom 0.2.7", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - [[package]] name = "range-alloc" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63e935c45e09cc6dcf00d2f0b2d630a58f4095320223d47fc68918722f0538b6" -[[package]] -name = "raw-window-handle" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28f55143d0548dad60bb4fbdc835a3d7ac6acc3324506450c5fdd6e42903a76" -dependencies = [ - "libc", - "raw-window-handle 0.4.3", -] - [[package]] name = "raw-window-handle" version = "0.4.3" @@ -1467,18 +1055,6 @@ dependencies = [ "cty", ] -[[package]] -name = "raw-window-metal" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d18241d631f19847a5f4cc0a3f81d978202c375573ab7d90ab14dcf0a9262ec" -dependencies = [ - "cocoa", - "core-graphics", - "objc", - "raw-window-handle 0.5.0", -] - [[package]] name = "redox_syscall" version = "0.2.16" @@ -1488,15 +1064,6 @@ dependencies = [ "bitflags", ] -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "renderdoc-sys" version = "0.7.1" @@ -1518,12 +1085,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "ryu" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" - [[package]] name = "safe_arch" version = "0.5.2" @@ -1562,9 +1123,6 @@ name = "serde" version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" -dependencies = [ - "serde_derive", -] [[package]] name = "serde_derive" @@ -1577,17 +1135,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_json" -version = "1.0.87" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" -dependencies = [ - "itoa", - "ryu", - "serde", -] - [[package]] name = "servo-fontconfig" version = "0.5.1" @@ -1659,18 +1206,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" - [[package]] name = "strsim" version = "0.10.0" @@ -1688,20 +1223,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - [[package]] name = "termcolor" version = "1.1.3" @@ -1711,21 +1232,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "textwrap" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" - [[package]] name = "thiserror" version = "1.0.37" @@ -1786,12 +1292,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" -[[package]] -name = "unicode-segmentation" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" - [[package]] name = "unicode-width" version = "0.1.10" @@ -1816,12 +1316,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -2228,8 +1722,8 @@ dependencies = [ "libc", "log", "mio", - "ndk 0.7.0", - "ndk-glue 0.7.0", + "ndk", + "ndk-glue", "objc", "once_cell", "parking_lot", diff --git a/Cargo.toml b/Cargo.toml index 8d022fb..b9146a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,19 +1,9 @@ [workspace] resolver = "2" -members = [ - "pgpu-render", - "piet-gpu", - "piet-gpu-derive", - "piet-gpu-hal", - "piet-gpu-types", - "piet-scene", - "piet-wgsl", - "piet-wgsl/examples/winit", - "tests", -] +members = ["piet-scene", "piet-wgsl", "piet-wgsl/examples/winit"] [patch.crates-io] # Required for metal support to work on wgpu # TODO: remove when wgpu is upgraded to 0.15 -naga = { git = "https://github.com/gfx-rs/naga", rev="ddcd5d3121150b2b1beee6e54e9125ff31aaa9a2" } +naga = { git = "https://github.com/gfx-rs/naga", rev = "ddcd5d3121150b2b1beee6e54e9125ff31aaa9a2" } diff --git a/README.md b/README.md index c7eb64e..d379c74 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,20 @@ This repo contains the new prototype for a new compute-centric 2D GPU renderer. It succeeds the previous prototype, [piet-metal]. +The latest version is a middleware for [`wgpu`]. This is used as the rendering backend for +[xilem], a UI toolkit. + + + ## Goals The main goal is to answer research questions about the future of 2D rendering: -* Is a compute-centered approach better than rasterization ([Direct2D])? How much so? +- Is a compute-centered approach better than rasterization ([Direct2D])? How much so? -* To what extent do "advanced" GPU features (subgroups, descriptor arrays) help? +- To what extent do "advanced" GPU features (subgroups, descriptor arrays) help? -* Can we improve quality and extend the imaging model in useful ways? - -Another goal is to explore a standards-based, portable approach to GPU compute. +- Can we improve quality and extend the imaging model in useful ways? ## Blogs and other writing @@ -22,17 +25,9 @@ Much of the research progress on piet-gpu is documented in blog entries. See [do There is a much larger and detailed [vision](doc/vision.md) that explains the longer-term goals of the project, and how we might get there. -### Why not gfx-hal? +## History -It makes a lot of sense to use gfx-hal, as it addresses the ability to write kernel and runtime code once and run it portably. But in exploring it I've found some points of friction, especially in using more "advanced" features. To serve the research goals, I'm enjoying using Vulkan directly, through [ash], which I've found does a good job tracking Vulkan releases. One example is experimenting with `VK_EXT_subgroup_size_control`. - -The hal layer in this repo is strongly inspired by gfx-hal, but with some differences. One is that we're shooting for a compile-time pipeline to generate GPU IR on DX12 and Metal, while gfx-hal ships [SPIRV-Cross] in the runtime. To access [Shader Model 6], that would also require bundling [DXC] at runtime, which is not yet implemented (though it's certainly possible). - -### Why not wgpu? - -The case for wgpu is also strong, but it's even less mature. I'd love to see it become a solid foundation, at which point I'd use it as the main integration with [Druid]. - -In short, the goal is to facilitate the research now, collect the data, and then use that to choose a best path for shipping later. +A prior incarnation used a custom cross-API hal. An archive of this version can be found in the branches [`custom-hal-archive-with-shaders`] and [`custom-hal-archive`]. ## License and contributions. @@ -42,14 +37,12 @@ In addition, the shaders are provided under the terms of the [Unlicense](UNLICEN The dx12 backend was adapted from piet-dx12 by Brian Merchant. -Contributions are welcome by pull request. The [Rust code of conduct] applies. Pull requests should be against the `dev` branch; see [shader_compilation.md] for explanation and details. +Contributions are welcome by pull request. The [Rust code of conduct] applies. [piet-metal]: https://github.com/linebender/piet-metal -[Direct2D]: https://docs.microsoft.com/en-us/windows/win32/direct2d/direct2d-portal -[ash]: https://github.com/MaikKlein/ash -[SPIRV-Cross]: https://github.com/KhronosGroup/SPIRV-Cross -[Shader Model 6]: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/hlsl-shader-model-6-0-features-for-direct3d-12 -[DXC]: https://github.com/microsoft/DirectXShaderCompiler -[Druid]: https://github.com/linebender/druid -[Rust code of conduct]: https://www.rust-lang.org/policies/code-of-conduct -[shader_compilation.md]: ./doc/shader_compilation.md +[direct2d]: https://docs.microsoft.com/en-us/windows/win32/direct2d/direct2d-portal +[`wgpu`]: https://wgpu.rs/ +[xilem]: https://github.com/linebender/xilem/ +[rust code of conduct]: https://www.rust-lang.org/policies/code-of-conduct +[`custom-hal-archive-with-shaders`]: https://github.com/linebender/piet-gpu/tree/custom-hal-archive-with-shaders +[`custom-hal-archive`]: https://github.com/linebender/piet-gpu/tree/custom-hal-archive diff --git a/pgpu-render/Cargo.toml b/pgpu-render/Cargo.toml deleted file mode 100644 index 8b2140a..0000000 --- a/pgpu-render/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "pgpu-render" -version = "0.1.0" -description = "C interface for glyph rendering using piet-gpu." -license = "MIT/Apache-2.0" -edition = "2021" - -[lib] -crate-type = ["cdylib"] - -[dependencies] -piet-gpu = { path = "../piet-gpu" } -piet-gpu-hal = { path = "../piet-gpu-hal" } -piet-scene = { path = "../piet-scene" } - -[target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies] -metal = "0.24" -objc = "0.2.7" -cocoa = "0.24.0" - -[build-dependencies] -cbindgen = "0.20.0" diff --git a/pgpu-render/build.rs b/pgpu-render/build.rs deleted file mode 100644 index ecbb647..0000000 --- a/pgpu-render/build.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -extern crate cbindgen; - -use std::env; - -fn main() { - let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - cbindgen::Builder::new() - .with_crate(crate_dir) - .with_define("target_os", "ios", "__APPLE__") - .with_header("/** Automatically generated from pgpu-render/src/lib.rs with cbindgen. **/") - .generate() - .expect("Unable to generate bindings") - .write_to_file("pgpu.h"); -} diff --git a/pgpu-render/pgpu.h b/pgpu-render/pgpu.h deleted file mode 100644 index 7d0976e..0000000 --- a/pgpu-render/pgpu.h +++ /dev/null @@ -1,238 +0,0 @@ -/** Automatically generated from pgpu-render/src/lib.rs with cbindgen. **/ - -#include -#include -#include -#include -#include - -enum class PgpuBrushKind { - Solid = 0, -}; - -enum class PgpuFill { - NonZero = 0, - EvenOdd = 1, -}; - -enum class PgpuPathVerb { - MoveTo = 0, - LineTo = 1, - QuadTo = 2, - CurveTo = 3, - Close = 4, -}; - -/// Encoded (possibly color) outline for a glyph. -struct PgpuGlyph; - -/// Context for loading and scaling glyphs. -struct PgpuGlyphContext; - -/// Context for loading a scaling glyphs from a specific font. -struct PgpuGlyphProvider; - -/// State and resources for rendering a scene. -struct PgpuRenderer; - -/// Encoded streams and resources describing a vector graphics scene. -struct PgpuScene; - -/// Builder for constructing an encoded scene. -struct PgpuSceneBuilder; - -/// Encoded streams and resources describing a vector graphics scene fragment. -struct PgpuSceneFragment; - -/// Affine transformation matrix. -struct PgpuTransform { - float xx; - float yx; - float xy; - float yy; - float dx; - float dy; -}; - -struct PgpuColor { - uint8_t r; - uint8_t g; - uint8_t b; - uint8_t a; -}; - -union PgpuBrushData { - PgpuColor solid; -}; - -struct PgpuBrush { - PgpuBrushKind kind; - PgpuBrushData data; -}; - -struct PgpuPoint { - float x; - float y; -}; - -struct PgpuPathElement { - PgpuPathVerb verb; - PgpuPoint points[3]; -}; - -struct PgpuPathIter { - void *context; - bool (*next_element)(void*, PgpuPathElement*); -}; - -/// Tag and value for a font variation axis. -struct PgpuFontVariation { - /// Tag that specifies the axis. - uint32_t tag; - /// Requested setting for the axis. - float value; -}; - -/// Description of a font. -struct PgpuFontDesc { - /// Pointer to the context of the font file. - const uint8_t *data; - /// Size of the font file data in bytes. - uintptr_t data_len; - /// Index of the requested font in the font file. - uint32_t index; - /// Unique identifier for the font. - uint64_t unique_id; - /// Requested size in pixels per em unit. Set to 0.0 for - /// unscaled outlines. - float ppem; - /// Pointer to array of font variation settings. - const PgpuFontVariation *variations; - /// Number of font variation settings. - uintptr_t variations_len; -}; - -/// Rectangle defined by minimum and maximum points. -struct PgpuRect { - float x0; - float y0; - float x1; - float y1; -}; - -extern "C" { - -#if defined(__APPLE__) -/// Creates a new piet-gpu renderer for the specified Metal device and -/// command queue. -/// -/// device: MTLDevice* -/// queue: MTLCommandQueue* -PgpuRenderer *pgpu_renderer_new(void *device, void *queue); -#endif - -#if defined(__APPLE__) -/// Renders a prepared scene into a texture target. Commands for rendering are -/// recorded into the specified command buffer. Returns an id representing -/// resources that may have been allocated during this process. After the -/// command buffer has been retired, call `pgpu_renderer_release` with this id -/// to drop any associated resources. -/// -/// target: MTLTexture* -/// cmdbuf: MTLCommandBuffer* -uint32_t pgpu_renderer_render(PgpuRenderer *renderer, - const PgpuScene *scene, - void *target, - void *cmdbuf); -#endif - -/// Releases the internal resources associated with the specified id from a -/// previous render operation. -void pgpu_renderer_release(PgpuRenderer *renderer, uint32_t id); - -/// Destroys the piet-gpu renderer. -void pgpu_renderer_destroy(PgpuRenderer *renderer); - -/// Creates a new, empty piet-gpu scene. -PgpuScene *pgpu_scene_new(); - -/// Destroys the piet-gpu scene. -void pgpu_scene_destroy(PgpuScene *scene); - -/// Creates a new, empty piet-gpu scene fragment. -PgpuSceneFragment *pgpu_scene_fragment_new(); - -/// Destroys the piet-gpu scene fragment. -void pgpu_scene_fragment_destroy(PgpuSceneFragment *fragment); - -/// Creates a new builder for filling a piet-gpu scene. The specified scene -/// should not be accessed while the builder is live. -PgpuSceneBuilder *pgpu_scene_builder_for_scene(PgpuScene *scene); - -/// Creates a new builder for filling a piet-gpu scene fragment. The specified -/// scene fragment should not be accessed while the builder is live. -PgpuSceneBuilder *pgpu_scene_builder_for_fragment(PgpuSceneFragment *fragment); - -/// Adds a glyph with the specified transform to the underlying scene. -void pgpu_scene_builder_add_glyph(PgpuSceneBuilder *builder, - const PgpuGlyph *glyph, - const PgpuTransform *transform); - -/// Sets the current absolute transform for the scene builder. -void pgpu_scene_builder_transform(PgpuSceneBuilder *builder, const PgpuTransform *transform); - -/// Fills a path using the specified fill style and brush. If the brush -/// parameter is nullptr, a solid color white brush will be used. The -/// brush_transform may be nullptr. -void pgpu_scene_builder_fill_path(PgpuSceneBuilder *builder, - PgpuFill fill, - const PgpuBrush *brush, - const PgpuTransform *brush_transform, - PgpuPathIter *path); - -/// Appends a scene fragment to the underlying scene or fragment. The -/// transform parameter represents an absolute transform to apply to -/// the fragment. If it is nullptr, the fragment will be appended to -/// the scene with an assumed identity transform regardless of the -/// current transform state. -void pgpu_scene_builder_append_fragment(PgpuSceneBuilder *builder, - const PgpuSceneFragment *fragment, - const PgpuTransform *transform); - -/// Finalizes the scene builder, making the underlying scene ready for -/// rendering. This takes ownership and consumes the builder. -void pgpu_scene_builder_finish(PgpuSceneBuilder *builder); - -/// Creates a new context for loading glyph outlines. -PgpuGlyphContext *pgpu_glyph_context_new(); - -/// Destroys the glyph context. -void pgpu_glyph_context_destroy(PgpuGlyphContext *gcx); - -/// Creates a new glyph provider for the specified glyph context and font -/// descriptor. May return nullptr if the font data is invalid. Only one glyph -/// provider may be live for a glyph context. -PgpuGlyphProvider *pgpu_glyph_provider_new(PgpuGlyphContext *gcx, const PgpuFontDesc *font); - -/// Returns an encoded outline for the specified glyph provider and glyph id. -/// May return nullptr if the requested glyph is not available. -PgpuGlyph *pgpu_glyph_provider_get(PgpuGlyphProvider *provider, uint16_t gid); - -/// Returns an encoded color outline for the specified glyph provider, color -/// palette index and glyph id. May return nullptr if the requested glyph is -/// not available. -PgpuGlyph *pgpu_glyph_provider_get_color(PgpuGlyphProvider *provider, - uint16_t palette_index, - uint16_t gid); - -/// Destroys the glyph provider. -void pgpu_glyph_provider_destroy(PgpuGlyphProvider *provider); - -/// Computes the bounding box for the glyph after applying the specified -/// transform. -PgpuRect pgpu_glyph_bbox(const PgpuGlyph *glyph, const float (*transform)[6]); - -/// Destroys the glyph. -void pgpu_glyph_destroy(PgpuGlyph *glyph); - -} // extern "C" diff --git a/pgpu-render/src/lib.rs b/pgpu-render/src/lib.rs deleted file mode 100644 index b699840..0000000 --- a/pgpu-render/src/lib.rs +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -// We only really have implementations for IOS targets so far -// Note that this is the same cfg that wgpu uses for metal support -#![cfg_attr( - not(all( - not(target_arch = "wasm32"), - any(target_os = "ios", target_os = "macos") - )), - allow(unused) -)] - -mod render; - -use piet_scene::kurbo::{Affine, PathEl, Point}; -use piet_scene::{Brush, Color, Fill}; -use render::*; -use std::ffi::c_void; -use std::mem::transmute; - -/// Creates a new piet-gpu renderer for the specified Metal device and -/// command queue. -/// -/// device: MTLDevice* -/// queue: MTLCommandQueue* -#[no_mangle] -#[cfg(all( - not(target_arch = "wasm32"), - any(target_os = "ios", target_os = "macos") -))] -pub unsafe extern "C" fn pgpu_renderer_new( - device: *mut c_void, - queue: *mut c_void, -) -> *mut PgpuRenderer { - let device: &metal::DeviceRef = transmute(device); - let queue: &metal::CommandQueueRef = transmute(queue); - Box::into_raw(Box::new(PgpuRenderer::new(device, queue))) -} - -/// Renders a prepared scene into a texture target. Commands for rendering are -/// recorded into the specified command buffer. Returns an id representing -/// resources that may have been allocated during this process. After the -/// command buffer has been retired, call `pgpu_renderer_release` with this id -/// to drop any associated resources. -/// -/// target: MTLTexture* -/// cmdbuf: MTLCommandBuffer* -#[no_mangle] -#[cfg(all( - not(target_arch = "wasm32"), - any(target_os = "ios", target_os = "macos") -))] -pub unsafe extern "C" fn pgpu_renderer_render( - renderer: *mut PgpuRenderer, - scene: *const PgpuScene, - target: *mut c_void, - cmdbuf: *mut c_void, -) -> u32 { - let cmdbuf: &metal::CommandBufferRef = transmute(cmdbuf); - let target: &metal::TextureRef = transmute(target); - (*renderer).render(&*scene, cmdbuf, target) -} - -/// Releases the internal resources associated with the specified id from a -/// previous render operation. -#[no_mangle] -pub unsafe extern "C" fn pgpu_renderer_release(renderer: *mut PgpuRenderer, id: u32) { - (*renderer).release(id); -} - -/// Destroys the piet-gpu renderer. -#[no_mangle] -pub unsafe extern "C" fn pgpu_renderer_destroy(renderer: *mut PgpuRenderer) { - Box::from_raw(renderer); -} - -/// Creates a new, empty piet-gpu scene. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_new() -> *mut PgpuScene { - Box::into_raw(Box::new(PgpuScene::new())) -} - -/// Destroys the piet-gpu scene. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_destroy(scene: *mut PgpuScene) { - Box::from_raw(scene); -} - -/// Creates a new, empty piet-gpu scene fragment. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_fragment_new() -> *mut PgpuSceneFragment { - Box::into_raw(Box::new(PgpuSceneFragment::new())) -} - -/// Destroys the piet-gpu scene fragment. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_fragment_destroy(fragment: *mut PgpuSceneFragment) { - Box::from_raw(fragment); -} - -#[derive(Copy, Clone, PartialEq, Debug)] -#[repr(C)] -pub enum PgpuPathVerb { - MoveTo = 0, - LineTo = 1, - QuadTo = 2, - CurveTo = 3, - Close = 4, -} - -#[derive(Copy, Clone, Default, Debug)] -#[repr(C)] -pub struct PgpuPoint { - pub x: f32, - pub y: f32, -} - -/// Rectangle defined by minimum and maximum points. -#[derive(Copy, Clone, Default)] -#[repr(C)] -pub struct PgpuRect { - pub x0: f32, - pub y0: f32, - pub x1: f32, - pub y1: f32, -} - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct PgpuPathElement { - pub verb: PgpuPathVerb, - pub points: [PgpuPoint; 3], -} - -#[derive(Copy, Clone)] -#[repr(C)] -pub struct PgpuPathIter { - pub context: *mut c_void, - pub next_element: extern "C" fn(*mut c_void, *mut PgpuPathElement) -> bool, -} - -#[derive(Copy, Clone, PartialEq, Debug)] -#[repr(C)] -pub enum PgpuFill { - NonZero = 0, - EvenOdd = 1, -} - -#[derive(Copy, Clone, PartialEq, Debug)] -#[repr(C)] -pub enum PgpuBrushKind { - Solid = 0, -} - -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct PgpuColor { - pub r: u8, - pub g: u8, - pub b: u8, - pub a: u8, -} - -#[repr(C)] -pub union PgpuBrushData { - pub solid: PgpuColor, -} - -#[repr(C)] -pub struct PgpuBrush { - pub kind: PgpuBrushKind, - pub data: PgpuBrushData, -} - -/// Affine transformation matrix. -#[derive(Copy, Clone, Debug)] -#[repr(C)] -pub struct PgpuTransform { - pub xx: f32, - pub yx: f32, - pub xy: f32, - pub yy: f32, - pub dx: f32, - pub dy: f32, -} - -impl From for Affine { - fn from(xform: PgpuTransform) -> Self { - Affine::new([ - xform.xx as f64, - xform.yx as f64, - xform.xy as f64, - xform.yy as f64, - xform.dx as f64, - xform.dy as f64, - ]) - } -} - -/// Creates a new builder for filling a piet-gpu scene. The specified scene -/// should not be accessed while the builder is live. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_for_scene( - scene: *mut PgpuScene, -) -> *mut PgpuSceneBuilder<'static> { - Box::into_raw(Box::new((*scene).builder())) -} - -/// Creates a new builder for filling a piet-gpu scene fragment. The specified -/// scene fragment should not be accessed while the builder is live. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_for_fragment( - fragment: *mut PgpuSceneFragment, -) -> *mut PgpuSceneBuilder<'static> { - Box::into_raw(Box::new((*fragment).builder())) -} - -/// Adds a glyph with the specified transform to the underlying scene. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_add_glyph( - builder: *mut PgpuSceneBuilder<'static>, - glyph: *const PgpuGlyph, - transform: *const PgpuTransform, -) { - (*builder).add_glyph(&*glyph, &(*transform).into()); -} - -impl Iterator for PgpuPathIter { - type Item = PathEl; - - fn next(&mut self) -> Option { - let mut el = PgpuPathElement { - verb: PgpuPathVerb::MoveTo, - points: [PgpuPoint::default(); 3], - }; - fn conv_pt(pt: PgpuPoint) -> Point { - Point::new(pt.x as f64, pt.y as f64) - } - if (self.next_element)(self.context, &mut el as _) { - let p = &el.points; - Some(match el.verb { - PgpuPathVerb::MoveTo => PathEl::MoveTo(conv_pt(p[0])), - PgpuPathVerb::LineTo => PathEl::LineTo(conv_pt(p[0])), - PgpuPathVerb::QuadTo => PathEl::QuadTo(conv_pt(p[0]), conv_pt(p[1])), - PgpuPathVerb::CurveTo => { - PathEl::CurveTo(conv_pt(p[0]), conv_pt(p[1]), conv_pt(p[2])) - } - PgpuPathVerb::Close => PathEl::ClosePath, - }) - } else { - None - } - } -} - -/// Sets the current absolute transform for the scene builder. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_transform( - builder: *mut PgpuSceneBuilder<'static>, - transform: *const PgpuTransform, -) { - if let Some(transform) = transform.as_ref() { - (*builder).transform = (*transform).into(); - } -} - -/// Fills a path using the specified fill style and brush. If the brush -/// parameter is nullptr, a solid color white brush will be used. The -/// brush_transform may be nullptr. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_fill_path( - builder: *mut PgpuSceneBuilder<'static>, - fill: PgpuFill, - brush: *const PgpuBrush, - brush_transform: *const PgpuTransform, - path: *mut PgpuPathIter, -) { - let fill = match fill { - PgpuFill::NonZero => Fill::NonZero, - PgpuFill::EvenOdd => Fill::EvenOdd, - }; - let brush = if brush.is_null() { - Brush::Solid(Color::rgb8(255, 255, 255)) - } else { - match (*brush).kind { - PgpuBrushKind::Solid => { - let color = &(*brush).data.solid; - Brush::Solid(Color::rgba8(color.r, color.g, color.b, color.a)) - } - } - }; - let brush_transform = if brush_transform.is_null() { - None - } else { - Some((*brush_transform).into()) - }; - let path_els = (*path).collect::>(); - (*builder).builder.fill( - fill, - (*builder).transform, - &brush, - brush_transform, - &&path_els[..], - ); -} - -/// Appends a scene fragment to the underlying scene or fragment. The -/// transform parameter represents an absolute transform to apply to -/// the fragment. If it is nullptr, the fragment will be appended to -/// the scene with an assumed identity transform regardless of the -/// current transform state. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_append_fragment( - builder: *mut PgpuSceneBuilder<'static>, - fragment: *const PgpuSceneFragment, - transform: *const PgpuTransform, -) { - let transform = if transform.is_null() { - None - } else { - Some((*transform).into()) - }; - (*builder).builder.append(&(*fragment).0, transform); -} - -/// Finalizes the scene builder, making the underlying scene ready for -/// rendering. This takes ownership and consumes the builder. -#[no_mangle] -pub unsafe extern "C" fn pgpu_scene_builder_finish(builder: *mut PgpuSceneBuilder<'static>) { - let builder = Box::from_raw(builder); - builder.finish(); -} - -/// Creates a new context for loading glyph outlines. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_context_new() -> *mut PgpuGlyphContext { - Box::into_raw(Box::new(PgpuGlyphContext::new())) -} - -/// Destroys the glyph context. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_context_destroy(gcx: *mut PgpuGlyphContext) { - Box::from_raw(gcx); -} - -/// Description of a font. -#[derive(Copy, Clone)] -#[repr(C)] -pub struct PgpuFontDesc { - /// Pointer to the context of the font file. - data: *const u8, - /// Size of the font file data in bytes. - data_len: usize, - /// Index of the requested font in the font file. - index: u32, - /// Unique identifier for the font. - unique_id: u64, - /// Requested size in pixels per em unit. Set to 0.0 for - /// unscaled outlines. - ppem: f32, - /// Pointer to array of font variation settings. - variations: *const PgpuFontVariation, - /// Number of font variation settings. - variations_len: usize, -} - -/// Creates a new glyph provider for the specified glyph context and font -/// descriptor. May return nullptr if the font data is invalid. Only one glyph -/// provider may be live for a glyph context. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_provider_new( - gcx: *mut PgpuGlyphContext, - font: *const PgpuFontDesc, -) -> *mut PgpuGlyphProvider<'static> { - let font = &*font; - let font_data = std::slice::from_raw_parts(font.data, font.data_len); - let variations = std::slice::from_raw_parts(font.variations, font.variations_len); - if let Some(provider) = (*gcx).new_provider( - font_data, - font.index, - font.unique_id, - font.ppem, - false, - variations, - ) { - Box::into_raw(Box::new(provider)) - } else { - std::ptr::null_mut() - } -} - -/// Returns an encoded outline for the specified glyph provider and glyph id. -/// May return nullptr if the requested glyph is not available. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_provider_get( - provider: *mut PgpuGlyphProvider, - gid: u16, -) -> *mut PgpuGlyph { - if let Some(glyph) = (*provider).get(gid) { - Box::into_raw(Box::new(glyph)) - } else { - std::ptr::null_mut() - } -} - -/// Returns an encoded color outline for the specified glyph provider, color -/// palette index and glyph id. May return nullptr if the requested glyph is -/// not available. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_provider_get_color( - provider: *mut PgpuGlyphProvider, - palette_index: u16, - gid: u16, -) -> *mut PgpuGlyph { - if let Some(glyph) = (*provider).get_color(palette_index, gid) { - Box::into_raw(Box::new(glyph)) - } else { - std::ptr::null_mut() - } -} - -/// Destroys the glyph provider. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_provider_destroy(provider: *mut PgpuGlyphProvider) { - Box::from_raw(provider); -} - -/// Computes the bounding box for the glyph after applying the specified -/// transform. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_bbox( - glyph: *const PgpuGlyph, - transform: &[f32; 6], -) -> PgpuRect { - let transform: PgpuTransform = std::mem::transmute(*transform); - let transform = transform.into(); - let rect = (*glyph).bbox(Some(transform)); - PgpuRect { - x0: rect.min_x() as f32, - y0: rect.min_y() as f32, - x1: rect.max_x() as f32, - y1: rect.max_y() as f32, - } -} - -/// Destroys the glyph. -#[no_mangle] -pub unsafe extern "C" fn pgpu_glyph_destroy(glyph: *mut PgpuGlyph) { - Box::from_raw(glyph); -} diff --git a/pgpu-render/src/render.rs b/pgpu-render/src/render.rs deleted file mode 100644 index 4bedf5c..0000000 --- a/pgpu-render/src/render.rs +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu::{PixelFormat, RenderConfig}; -use piet_gpu_hal::{QueryPool, Session}; -use piet_scene::glyph::pinot::{types::Tag, FontDataRef}; -use piet_scene::glyph::{GlyphContext, GlyphProvider}; -use piet_scene::kurbo::{Affine, Point, Rect}; -use piet_scene::{Scene, SceneFragment}; - -/// State and resources for rendering a scene. -pub struct PgpuRenderer { - session: Session, - pgpu_renderer: Option, - query_pool: QueryPool, - width: u32, - height: u32, - is_color: bool, -} - -impl PgpuRenderer { - #[cfg(all( - not(target_arch = "wasm32"), - any(target_os = "ios", target_os = "macos") - ))] - pub fn new(device: &metal::DeviceRef, queue: &metal::CommandQueueRef) -> Self { - let piet_device = piet_gpu_hal::Device::new_from_raw_mtl(device, &queue); - let session = Session::new(piet_device); - let query_pool = session.create_query_pool(12).unwrap(); - Self { - session, - pgpu_renderer: None, - query_pool, - width: 0, - height: 0, - is_color: false, - } - } - - #[cfg(all( - not(target_arch = "wasm32"), - any(target_os = "ios", target_os = "macos") - ))] - pub fn render( - &mut self, - scene: &PgpuScene, - cmdbuf: &metal::CommandBufferRef, - target: &metal::TextureRef, - ) -> u32 { - let is_color = target.pixel_format() != metal::MTLPixelFormat::R8Unorm; - let width = target.width() as u32; - let height = target.height() as u32; - if self.pgpu_renderer.is_none() - || self.width != width - || self.height != height - || self.is_color != is_color - { - self.width = width; - self.height = height; - self.is_color = is_color; - let format = if is_color { - PixelFormat::Rgba8 - } else { - PixelFormat::A8 - }; - let config = RenderConfig::new(width as usize, height as usize).pixel_format(format); - unsafe { - self.pgpu_renderer = - piet_gpu::Renderer::new_from_config(&self.session, config, 1).ok(); - } - } - unsafe { - let mut cmd_buf = self.session.cmd_buf_from_raw_mtl(cmdbuf); - let dst_image = self - .session - .image_from_raw_mtl(target, self.width, self.height); - if let Some(renderer) = &mut self.pgpu_renderer { - renderer.upload_scene(&scene.0, 0).unwrap(); - renderer.record(&mut cmd_buf, &self.query_pool, 0); - // TODO later: we can bind the destination image and avoid the copy. - cmd_buf.blit_image(&renderer.image_dev, &dst_image); - cmd_buf.flush(); - } - } - 0 - } - - pub fn release(&mut self, _id: u32) { - // TODO: worry about freeing resources / managing overlapping submits - } -} - -/// Encoded streams and resources describing a vector graphics scene. -pub struct PgpuScene(pub Scene); - -impl PgpuScene { - pub fn new() -> Self { - Self(Scene::default()) - } - - pub fn builder(&mut self) -> PgpuSceneBuilder { - PgpuSceneBuilder { - builder: piet_scene::SceneBuilder::for_scene(&mut self.0), - transform: Affine::IDENTITY, - } - } -} - -/// Encoded streams and resources describing a vector graphics scene fragment. -pub struct PgpuSceneFragment(pub SceneFragment); - -impl PgpuSceneFragment { - pub fn new() -> Self { - Self(SceneFragment::default()) - } - - pub fn builder(&mut self) -> PgpuSceneBuilder { - PgpuSceneBuilder { - builder: piet_scene::SceneBuilder::for_fragment(&mut self.0), - transform: Affine::IDENTITY, - } - } -} - -/// Builder for constructing an encoded scene. -pub struct PgpuSceneBuilder<'a> { - pub builder: piet_scene::SceneBuilder<'a>, - pub transform: Affine, -} - -impl<'a> PgpuSceneBuilder<'a> { - pub fn add_glyph(&mut self, glyph: &PgpuGlyph, transform: &Affine) { - self.builder.append(&glyph.fragment, Some(*transform)); - } - - pub fn finish(self) { - self.builder.finish(); - } -} - -/// Tag and value for a font variation axis. -#[derive(Copy, Clone)] -#[repr(C)] -pub struct PgpuFontVariation { - /// Tag that specifies the axis. - pub tag: u32, - /// Requested setting for the axis. - pub value: f32, -} - -/// Context for loading and scaling glyphs. -pub struct PgpuGlyphContext(GlyphContext); - -impl PgpuGlyphContext { - pub fn new() -> Self { - Self(GlyphContext::new()) - } - - pub fn new_provider<'a>( - &'a mut self, - font_data: &'a [u8], - font_index: u32, - font_id: u64, - ppem: f32, - hint: bool, - variations: &[PgpuFontVariation], - ) -> Option { - let font = FontDataRef::new(font_data).and_then(|f| f.get(font_index))?; - Some(PgpuGlyphProvider( - self.0.new_provider( - &font, - Some(font_id), - ppem, - hint, - variations - .iter() - .map(|variation| (Tag(variation.tag), variation.value)), - ), - )) - } -} - -/// Context for loading a scaling glyphs from a specific font. -pub struct PgpuGlyphProvider<'a>(GlyphProvider<'a>); - -impl<'a> PgpuGlyphProvider<'a> { - pub fn get(&mut self, gid: u16) -> Option { - let fragment = self.0.get(gid, None)?; - Some(PgpuGlyph { fragment }) - } - - pub fn get_color(&mut self, palette_index: u16, gid: u16) -> Option { - let fragment = self.0.get_color(palette_index, gid)?; - Some(PgpuGlyph { fragment }) - } -} - -/// Encoded (possibly color) outline for a glyph. -pub struct PgpuGlyph { - fragment: SceneFragment, -} - -impl PgpuGlyph { - pub fn bbox(&self, transform: Option) -> Rect { - let points = self.fragment.points(); - if points.is_empty() { - return Rect::default(); - } - let mut points = points - .iter() - .map(|pt| Point::new(pt[0] as f64, pt[1] as f64)); - if let Some(transform) = &transform { - let mut rect = Rect::from_center_size(points.next().unwrap(), (0.0, 0.0)); - for point in points { - rect = rect.union_pt(*transform * point); - } - rect - } else { - let mut rect = Rect::from_center_size(points.next().unwrap(), (0.0, 0.0)); - for point in points { - rect = rect.union_pt(point); - } - rect - } - } -} diff --git a/piet-gpu-derive/Cargo.toml b/piet-gpu-derive/Cargo.toml deleted file mode 100644 index 5f51963..0000000 --- a/piet-gpu-derive/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "piet-gpu-derive" -version = "0.0.0" -authors = ["Raph Levien "] -description = "Proc macro derives for piet-gpu." -license = "MIT/Apache-2.0" -edition = "2018" -keywords = ["graphics", "2d"] -categories = ["rendering::graphics-api"] - -[lib] -proc-macro = true - -[dependencies] -syn = {version = "1.0.17", features = ["extra-traits", "full"]} -quote = "1.0.3" -proc-macro2 = "1.0.10" diff --git a/piet-gpu-derive/src/derive.rs b/piet-gpu-derive/src/derive.rs deleted file mode 100644 index de00349..0000000 --- a/piet-gpu-derive/src/derive.rs +++ /dev/null @@ -1,192 +0,0 @@ -//! Generation of Rust derive functions for encoding. - -use quote::{format_ident, quote}; - -use crate::layout::{LayoutModule, LayoutTypeDef}; -use crate::parse::{GpuScalar, GpuType}; - -pub fn gen_derive(module: &LayoutModule) -> proc_macro2::TokenStream { - let mut ts = proc_macro2::TokenStream::new(); - let module_name = format_ident!("{}", module.name); - for name in &module.def_names { - let def = module.defs.get(name).unwrap(); - ts.extend(gen_derive_def(name, def.0.size, &def.1)); - } - quote! { - mod #module_name { - pub trait HalfToLeBytes { - fn to_le_bytes(&self) -> [u8; 2]; - } - - impl HalfToLeBytes for half::f16 { - fn to_le_bytes(&self) -> [u8; 2] { - self.to_bits().to_le_bytes() - } - } - - #ts - } - } -} - -fn gen_derive_def(name: &str, size: usize, def: &LayoutTypeDef) -> proc_macro2::TokenStream { - let name_id = format_ident!("{}", name); - match def { - LayoutTypeDef::Struct(fields) => { - let mut gen_fields = proc_macro2::TokenStream::new(); - let mut encode_fields = proc_macro2::TokenStream::new(); - for (field_name, offset, ty) in fields { - let field_name_id = format_ident!("{}", field_name); - let gen_ty = gen_derive_ty(&ty.ty); - let gen_field = quote! { - pub #field_name_id: #gen_ty, - }; - gen_fields.extend(gen_field); - - encode_fields.extend(gen_encode_field(field_name, *offset, &ty.ty)); - } - quote! { - #[derive(Clone)] - pub struct #name_id { - #gen_fields - } - - impl crate::encoder::Encode for #name_id { - fn fixed_size() -> usize { - #size - } - fn encode_to(&self, buf: &mut [u8]) { - #encode_fields - } - } - } - } - LayoutTypeDef::Enum(variants) => { - let mut gen_variants = proc_macro2::TokenStream::new(); - let mut cases = proc_macro2::TokenStream::new(); - for (variant_ix, (variant_name, payload)) in variants.iter().enumerate() { - let variant_id = format_ident!("{}", variant_name); - let field_tys = payload.iter().map(|(_offset, ty)| gen_derive_ty(&ty.ty)); - let variant = quote! { - #variant_id(#(#field_tys),*), - }; - gen_variants.extend(variant); - - let mut args = Vec::new(); - let mut field_encoders = proc_macro2::TokenStream::new(); - let mut tag_field = None; - for (i, (offset, ty)) in payload.iter().enumerate() { - let field_id = format_ident!("f{}", i); - if matches!(ty.ty, GpuType::Scalar(GpuScalar::TagFlags)) { - tag_field = Some(field_id.clone()); - } else { - let field_encoder = quote! { - #field_id.encode_to(&mut buf[#offset..]); - }; - field_encoders.extend(field_encoder); - } - args.push(field_id); - } - let tag = variant_ix as u32; - let tag_encode = match tag_field { - None => quote! { - buf[0..4].copy_from_slice(&#tag.to_le_bytes()); - }, - Some(tag_field) => quote! { - buf[0..4].copy_from_slice(&(#tag | ((*#tag_field as u32) << 16)).to_le_bytes()); - }, - }; - let case = quote! { - #name_id::#variant_id(#(#args),*) => { - #tag_encode - #field_encoders - } - }; - cases.extend(case); - } - quote! { - #[derive(Clone)] - pub enum #name_id { - #gen_variants - } - - impl crate::encoder::Encode for #name_id { - fn fixed_size() -> usize { - #size - } - fn encode_to(&self, buf: &mut [u8]) { - match self { - #cases - } - } - } - } - } - } -} - -/// Generate a Rust type. -fn gen_derive_ty(ty: &GpuType) -> proc_macro2::TokenStream { - match ty { - GpuType::Scalar(s) => gen_derive_scalar_ty(s), - GpuType::Vector(s, len) => { - let scalar = gen_derive_scalar_ty(s); - quote! { [#scalar; #len] } - } - GpuType::InlineStruct(name) => { - let name_id = format_ident!("{}", name); - quote! { #name_id } - } - GpuType::Ref(ty) => { - let gen_ty = gen_derive_ty(ty); - quote! { crate::encoder::Ref<#gen_ty> } - } - } -} - -fn gen_derive_scalar_ty(ty: &GpuScalar) -> proc_macro2::TokenStream { - match ty { - GpuScalar::F16 => quote!(half::f16), - GpuScalar::F32 => quote!(f32), - GpuScalar::I8 => quote!(i8), - GpuScalar::I16 => quote!(i16), - GpuScalar::I32 => quote!(i32), - GpuScalar::U8 => quote!(u8), - GpuScalar::U16 => quote!(u16), - GpuScalar::U32 => quote!(u32), - GpuScalar::TagFlags => quote!(u16), - } -} - -fn gen_encode_field(name: &str, offset: usize, ty: &GpuType) -> proc_macro2::TokenStream { - let name_id = format_ident!("{}", name); - match ty { - // encoding of flags into tag word is handled elsewhere - GpuType::Scalar(GpuScalar::TagFlags) => quote! {}, - GpuType::Scalar(s) => { - let end = offset + s.size(); - quote! { - buf[#offset..#end].copy_from_slice(&self.#name_id.to_le_bytes()); - } - } - GpuType::Vector(s, len) => { - let size = s.size(); - quote! { - for i in 0..#len { - let offset = #offset + i * #size; - buf[offset..offset + #size].copy_from_slice(&self.#name_id[i].to_le_bytes()); - } - } - } - GpuType::Ref(_) => { - quote! { - buf[#offset..#offset + 4].copy_from_slice(&self.#name_id.offset().to_le_bytes()); - } - } - _ => { - quote! { - &self.#name_id.encode_to(&mut buf[#offset..]); - } - } - } -} diff --git a/piet-gpu-derive/src/glsl.rs b/piet-gpu-derive/src/glsl.rs deleted file mode 100644 index 697c1e4..0000000 --- a/piet-gpu-derive/src/glsl.rs +++ /dev/null @@ -1,669 +0,0 @@ -//! Generation of GLSL struct definitions and accessor functions. - -use std::fmt::Write; -use std::ops::Deref; - -use crate::layout::{LayoutModule, LayoutType, LayoutTypeDef}; -use crate::parse::{GpuScalar, GpuType}; - -pub fn gen_glsl(module: &LayoutModule) -> String { - let mut r = String::new(); - writeln!( - &mut r, - "// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense\n" - ) - .unwrap(); - writeln!(&mut r, "// Code auto-generated by piet-gpu-derive\n").unwrap(); - // Note: GLSL needs definitions before uses. We could do a topological sort here, - // but easiest for now to just require that in spec. - for name in &module.def_names { - gen_refdef(&mut r, &name); - } - - for name in &module.def_names { - match module.defs.get(name).unwrap() { - (size, LayoutTypeDef::Struct(fields)) => { - gen_struct_def(&mut r, name, fields); - gen_item_def(&mut r, name, size.size); - } - (size, LayoutTypeDef::Enum(en)) => { - gen_enum_def(&mut r, name, en); - gen_item_def(&mut r, name, size.size); - gen_tag_def(&mut r, name); - } - } - } - - for name in &module.def_names { - let def = module.defs.get(name).unwrap(); - let is_mem = !module.name.eq(&"state") && !module.name.eq(&"scene"); - match def { - (_size, LayoutTypeDef::Struct(fields)) => { - gen_struct_read(&mut r, &module.name, &name, is_mem, fields); - if module.gpu_write { - gen_struct_write(&mut r, &module.name, &name, is_mem, fields); - } - } - (_size, LayoutTypeDef::Enum(en)) => { - gen_enum_read(&mut r, &module.name, &name, is_mem, en); - if module.gpu_write { - gen_enum_write(&mut r, &module.name, &name, is_mem, en); - } - } - } - } - - r -} - -fn gen_refdef(r: &mut String, name: &str) { - writeln!(r, "struct {}Ref {{", name).unwrap(); - writeln!(r, " uint offset;").unwrap(); - writeln!(r, "}};\n").unwrap(); -} - -fn gen_struct_def(r: &mut String, name: &str, fields: &[(String, usize, LayoutType)]) { - writeln!(r, "struct {} {{", name).unwrap(); - for (name, _offset, ty) in fields { - writeln!(r, " {} {};", glsl_type(&ty.ty), name).unwrap(); - } - writeln!(r, "}};\n").unwrap(); -} - -fn gen_enum_def(r: &mut String, name: &str, variants: &[(String, Vec<(usize, LayoutType)>)]) { - for (i, (var_name, _payload)) in variants.iter().enumerate() { - writeln!(r, "#define {}_{} {}", name, var_name, i).unwrap(); - } -} - -fn gen_item_def(r: &mut String, name: &str, size: usize) { - writeln!(r, "#define {}_size {}\n", name, size).unwrap(); - writeln!( - r, - "{}Ref {}_index({}Ref ref, uint index) {{", - name, name, name - ) - .unwrap(); - writeln!( - r, - " return {}Ref(ref.offset + index * {}_size);", - name, name - ) - .unwrap(); - writeln!(r, "}}\n").unwrap(); -} - -fn gen_tag_def(r: &mut String, name: &str) { - writeln!(r, "struct {}Tag {{", name).unwrap(); - writeln!(r, " uint tag;").unwrap(); - writeln!(r, " uint flags;").unwrap(); - writeln!(r, "}};\n").unwrap(); -} - -fn gen_struct_read( - r: &mut String, - bufname: &str, - name: &str, - is_mem: bool, - fields: &[(String, usize, LayoutType)], -) { - write!(r, "{} {}_read(", name, name).unwrap(); - if is_mem { - write!(r, "Alloc a, ").unwrap(); - } - writeln!(r, "{}Ref ref) {{", name).unwrap(); - writeln!(r, " uint ix = ref.offset >> 2;").unwrap(); - let coverage = crate::layout::struct_coverage(fields, false); - for (i, fields) in coverage.iter().enumerate() { - if !fields.is_empty() { - if is_mem { - writeln!(r, " uint raw{} = read_mem(a, ix + {});", i, i).unwrap(); - } else { - writeln!(r, " uint raw{} = {}[ix + {}];", i, bufname, i).unwrap(); - } - } - } - writeln!(r, " {} s;", name).unwrap(); - - let mut preload: bool = false; - for (name, offset, ty) in fields { - let (setup, extract) = gen_extract(*offset, &ty.ty, preload); - writeln!(r, "{} s.{} = {};", setup, name, extract).unwrap(); - - if let GpuType::Scalar(GpuScalar::F16) = &ty.ty { - if offset % 4 == 0 { - preload = true; - continue; - } - } - preload = false; - } - - writeln!(r, " return s;").unwrap(); - writeln!(r, "}}\n").unwrap(); -} - -fn gen_enum_read( - r: &mut String, - bufname: &str, - name: &str, - is_mem: bool, - variants: &[(String, Vec<(usize, LayoutType)>)], -) { - if is_mem { - writeln!(r, "{}Tag {}_tag(Alloc a, {}Ref ref) {{", name, name, name).unwrap(); - writeln!(r, " uint tag_and_flags = read_mem(a, ref.offset >> 2);").unwrap(); - } else { - writeln!(r, "{}Tag {}_tag({}Ref ref) {{", name, name, name).unwrap(); - writeln!(r, " uint tag_and_flags = {}[ref.offset >> 2];", bufname).unwrap(); - } - writeln!( - r, - " return {}Tag(tag_and_flags & 0xffff, tag_and_flags >> 16);", - name - ) - .unwrap(); - writeln!(r, "}}\n").unwrap(); - for (var_name, payload) in variants { - let payload_ix = if payload.len() == 1 { - Some(0) - } else if payload.len() == 2 { - if matches!(payload[0].1.ty, GpuType::Scalar(GpuScalar::TagFlags)) { - Some(1) - } else { - None - } - } else { - None - }; - if let Some(payload_ix) = payload_ix { - if let GpuType::InlineStruct(structname) = &payload[payload_ix].1.ty { - if is_mem { - writeln!( - r, - "{} {}_{}_read(Alloc a, {}Ref ref) {{", - structname, name, var_name, name - ) - .unwrap(); - writeln!( - r, - " return {}_read(a, {}Ref(ref.offset + {}));", - structname, structname, payload[0].0 - ) - .unwrap(); - } else { - writeln!( - r, - "{} {}_{}_read({}Ref ref) {{", - structname, name, var_name, name - ) - .unwrap(); - writeln!( - r, - " return {}_read({}Ref(ref.offset + {}));", - structname, structname, payload[0].0 - ) - .unwrap(); - } - writeln!(r, "}}\n").unwrap(); - } - } - // TODO: support for variants that aren't one struct. - } -} - -fn gen_extract(offset: usize, ty: &GpuType, preload: bool) -> (String, String) { - match ty { - GpuType::Scalar(scalar) => { - let setup = match scalar { - GpuScalar::F16 => { - if preload { - String::new() - } else { - let ix = offset / 4; - format!(" vec2 halves{} = unpackHalf2x16(raw{});\n", ix, ix) - } - } - _ => String::new(), - }; - - (setup, gen_extract_scalar(offset, scalar)) - } - GpuType::Vector(scalar, size) => { - let is_f16 = match scalar { - GpuScalar::F16 => true, - _ => false, - }; - - let mut setup = String::new(); - let mut extract = glsl_type(ty); - extract.push_str("("); - for i in 0..*size { - if i != 0 { - extract.push_str(", "); - } - - if is_f16 && i % 2 == 0 { - let ix = (offset + i * scalar.size()) / 4; - let s = format!(" vec2 halves{} = unpackHalf2x16(raw{});\n", ix, ix); - setup.push_str(&s); - }; - - let el_offset = offset + i * scalar.size(); - extract.push_str(&gen_extract_scalar(el_offset, scalar)); - } - extract.push_str(")"); - (setup, extract) - } - GpuType::InlineStruct(name) => ( - String::new(), - format!( - "{}_read({}Ref({}))", - name, - name, - simplified_add("ref.offset", offset) - ), - ), - GpuType::Ref(inner) => { - if let GpuType::InlineStruct(name) = inner.deref() { - ( - String::new(), - format!( - "{}Ref({})", - name, - gen_extract_scalar(offset, &GpuScalar::U32) - ), - ) - } else { - panic!("only know how to deal with Ref of struct") - } - } - } -} - -fn gen_extract_scalar(offset: usize, ty: &GpuScalar) -> String { - match ty { - GpuScalar::F16 | GpuScalar::F32 => extract_fbits(offset, ty.size()), - GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 => extract_ubits(offset, ty.size()), - GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => extract_ibits(offset, ty.size()), - GpuScalar::TagFlags => format!("0 /* TODO */"), - } -} - -fn extract_ubits(offset: usize, nbytes: usize) -> String { - if nbytes == 4 { - return format!("raw{}", offset / 4); - } - let mask = (1 << (nbytes * 8)) - 1; - if offset % 4 == 0 { - format!("raw{} & 0x{:x}", offset / 4, mask) - } else if offset % 4 + nbytes == 4 { - format!("raw{} >> {}", offset / 4, (offset % 4) * 8) - } else { - format!("(raw{} >> {}) & 0x{:x}", offset / 4, (offset % 4) * 8, mask) - } -} - -fn extract_ibits(offset: usize, nbytes: usize) -> String { - if nbytes == 4 { - return format!("int(raw{})", offset / 4); - } - if offset % 4 + nbytes == 4 { - format!("int(raw{}) >> {}", offset / 4, (offset % 4) * 8) - } else { - format!( - "int(raw{} << {}) >> {}", - offset / 4, - ((4 - nbytes) - offset % 4) * 8, - (4 - nbytes) * 8 - ) - } -} - -fn extract_fbits(offset: usize, nbytes: usize) -> String { - match nbytes { - 4 => format!("uintBitsToFloat(raw{})", offset / 4), - 2 => match offset % 4 { - 0 => { - let ix = offset / 4; - format!("halves{}.x", ix) - } - 2 => format!("halves{}.y", offset / 4), - _ => panic!("unexpected packing of f16 at offset {}", offset % 4), - }, - _ => { - panic!("unexpected extraction of float with nbytes = {}", nbytes); - } - } -} - -// Writing - -fn is_f16(ty: &GpuType) -> bool { - match ty { - GpuType::Scalar(GpuScalar::F16) => true, - GpuType::Vector(GpuScalar::F16, _) => true, - _ => false, - } -} - -fn is_f16_pair(field_ixs: &[usize], fields: &[(String, usize, LayoutType)]) -> bool { - if field_ixs.len() == 2 { - fields.iter().all(|(_, _, t)| is_f16(&t.ty)) - } else { - false - } -} - -fn gen_struct_write( - r: &mut String, - bufname: &str, - name: &str, - is_mem: bool, - fields: &[(String, usize, LayoutType)], -) { - write!(r, "void {}_write(", name).unwrap(); - if is_mem { - write!(r, "Alloc a, ").unwrap(); - } - writeln!(r, "{}Ref ref, {} s) {{", name, name).unwrap(); - writeln!(r, " uint ix = ref.offset >> 2;").unwrap(); - let coverage = crate::layout::struct_coverage(fields, true); - - for (i, field_ixs) in coverage.iter().enumerate() { - let mut pieces = Vec::new(); - - if is_f16_pair(field_ixs, fields) { - let (ix0, ix1) = (field_ixs[0], field_ixs[1]); - let inner0 = format!("s.{}", fields[ix0].0); - let inner1 = format!("s.{}", fields[ix1].0); - pieces.push(format!("packHalf2x16(vec2({}, {}))", &inner0, &inner1)); - } else { - for field_ix in field_ixs { - let (name, offset, ty) = &fields[*field_ix]; - match &ty.ty { - GpuType::Scalar(scalar) => { - let inner = format!("s.{}", name); - pieces.push(gen_pack_bits_scalar(scalar, *offset, &inner)); - } - GpuType::Vector(scalar, len) => { - let size = scalar.size(); - let ix_lo = (i * 4 - offset) / size; - let ix_hi = ((4 + i * 4 - offset) / size).min(*len); - match scalar { - GpuScalar::F16 => { - if ix_hi - ix_lo == 2 { - let inner0 = - format!("s.{}.{}", name, &"xyzw"[ix_lo..ix_lo + 1]); - let inner1 = - format!("s.{}.{}", name, &"xyzw"[ix_lo + 1..ix_hi]); - pieces.push(format!( - "packHalf2x16(vec2({}, {}))", - &inner0, &inner1 - )); - } else { - let ix = ix_lo; - let scalar_offset = offset + ix * size; - let inner = format!("s.{}.{}", name, &"xyzw"[ix..ix + 1]); - pieces.push(gen_pack_bits_scalar( - scalar, - scalar_offset, - &inner, - )); - } - } - _ => { - for ix in ix_lo..ix_hi { - let scalar_offset = offset + ix * size; - let inner = format!("s.{}.{}", name, &"xyzw"[ix..ix + 1]); - pieces.push(gen_pack_bits_scalar( - scalar, - scalar_offset, - &inner, - )); - } - } - } - } - GpuType::InlineStruct(structname) => { - writeln!( - r, - " {}_write({}Ref({}), s.{});", - structname, - structname, - simplified_add("ref.offset", *offset), - name - ) - .unwrap(); - } - GpuType::Ref(_) => pieces.push(format!("s.{}.offset", name)), - } - } - } - - if !pieces.is_empty() { - if is_mem { - write!(r, " write_mem(a, ix + {}, ", i).unwrap(); - } else { - write!(r, " {}[ix + {}] = ", bufname, i).unwrap(); - } - for (j, piece) in pieces.iter().enumerate() { - if j != 0 { - write!(r, " | ").unwrap(); - } - write!(r, "{}", piece).unwrap(); - } - if is_mem { - write!(r, ")").unwrap(); - } - writeln!(r, ";").unwrap(); - } - } - writeln!(r, "}}\n").unwrap(); -} - -fn gen_pack_bits_scalar(ty: &GpuScalar, offset: usize, inner: &str) -> String { - let shift = (offset % 4) * 8; - let bits = match ty { - GpuScalar::F16 => format!("packHalf2x16(vec2({}, 0.0)) & 0xffff", inner), - GpuScalar::F32 => format!("floatBitsToUint({})", inner), - // Note: this doesn't mask small unsigned int types; the caller is - // responsible for making sure they don't overflow. - GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 => inner.into(), - GpuScalar::I8 => { - if shift == 24 { - format!("uint({})", inner) - } else { - format!("(uint({}) & 0xff)", inner) - } - } - GpuScalar::I16 => { - if shift == 16 { - format!("uint({})", inner) - } else { - format!("(uint({}) & 0xffff)", inner) - } - } - GpuScalar::I32 => format!("uint({})", inner), - GpuScalar::TagFlags => format!("0"), - }; - if shift == 0 { - bits - } else { - format!("({} << {})", bits, shift) - } -} - -fn gen_enum_write( - r: &mut String, - bufname: &str, - name: &str, - is_mem: bool, - variants: &[(String, Vec<(usize, LayoutType)>)], -) { - for (var_name, payload) in variants { - if payload.is_empty() { - if is_mem { - writeln!( - r, - "void {}_{}_write(Alloc a, {}Ref ref) {{", - name, var_name, name - ) - .unwrap(); - writeln!( - r, - " write_mem(a, ref.offset >> 2, {}_{});", - name, var_name - ) - .unwrap(); - } else { - writeln!(r, "void {}_{}_write({}Ref ref) {{", name, var_name, name).unwrap(); - writeln!( - r, - " {}[ref.offset >> 2] = {}_{};", - bufname, name, var_name - ) - .unwrap(); - } - writeln!(r, "}}\n").unwrap(); - } else if payload.len() == 1 { - if let GpuType::InlineStruct(structname) = &payload[0].1.ty { - if is_mem { - writeln!( - r, - "void {}_{}_write(Alloc a, {}Ref ref, {} s) {{", - name, var_name, name, structname - ) - .unwrap(); - writeln!( - r, - " write_mem(a, ref.offset >> 2, {}_{});", - name, var_name - ) - .unwrap(); - writeln!( - r, - " {}_write(a, {}Ref(ref.offset + {}), s);", - structname, structname, payload[0].0 - ) - .unwrap(); - } else { - writeln!( - r, - "void {}_{}_write({}Ref ref, {} s) {{", - name, var_name, name, structname - ) - .unwrap(); - writeln!( - r, - " {}[ref.offset >> 2] = {}_{};", - bufname, name, var_name - ) - .unwrap(); - writeln!( - r, - " {}_write({}Ref(ref.offset + {}), s);", - structname, structname, payload[0].0 - ) - .unwrap(); - } - writeln!(r, "}}\n").unwrap(); - } - } else if payload.len() == 2 - && matches!(payload[0].1.ty, GpuType::Scalar(GpuScalar::TagFlags)) - { - if let GpuType::InlineStruct(structname) = &payload[1].1.ty { - if is_mem { - writeln!( - r, - "void {}_{}_write(Alloc a, {}Ref ref, uint flags, {} s) {{", - name, var_name, name, structname - ) - .unwrap(); - writeln!( - r, - " write_mem(a, ref.offset >> 2, (flags << 16) | {}_{});", - name, var_name - ) - .unwrap(); - writeln!( - r, - " {}_write(a, {}Ref(ref.offset + {}), s);", - structname, structname, payload[0].0 - ) - .unwrap(); - } else { - writeln!( - r, - "void {}_{}_write({}Ref ref, uint flags, {} s) {{", - name, var_name, name, structname - ) - .unwrap(); - writeln!( - r, - " {}[ref.offset >> 2] = (flags << 16) | {}_{};", - bufname, name, var_name - ) - .unwrap(); - writeln!( - r, - " {}_write({}Ref(ref.offset + {}), s);", - structname, structname, payload[0].0 - ) - .unwrap(); - } - writeln!(r, "}}\n").unwrap(); - } - } - // TODO: support for variants that aren't one struct. - } -} - -// Utility functions - -fn glsl_type(ty: &GpuType) -> String { - match ty { - GpuType::Scalar(scalar) => glsl_scalar(scalar).into(), - GpuType::Vector(scalar, size) => { - if *size == 1 { - glsl_scalar(scalar).into() - } else { - format!("{}{}", glsl_vecname(scalar), size) - } - } - GpuType::InlineStruct(name) => name.clone(), - GpuType::Ref(inner) => { - if let GpuType::InlineStruct(name) = inner.deref() { - format!("{}Ref", name) - } else { - panic!("only know how to deal with Ref of struct") - } - } - } -} - -// GLSL type that can contain the scalar value. -fn glsl_scalar(s: &GpuScalar) -> &'static str { - match s { - GpuScalar::F16 | GpuScalar::F32 => "float", - GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => "int", - GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 | GpuScalar::TagFlags => "uint", - } -} - -fn glsl_vecname(s: &GpuScalar) -> &'static str { - match s { - GpuScalar::F16 | GpuScalar::F32 => "vec", - GpuScalar::I8 | GpuScalar::I16 | GpuScalar::I32 => "ivec", - GpuScalar::U8 | GpuScalar::U16 | GpuScalar::U32 | GpuScalar::TagFlags => "uvec", - } -} - -/// If `c = 0`, return `"var_name"`, else `"var_name + c"` -fn simplified_add(var_name: &str, c: usize) -> String { - if c == 0 { - String::from(var_name) - } else { - format!("{} + {}", var_name, c) - } -} diff --git a/piet-gpu-derive/src/layout.rs b/piet-gpu-derive/src/layout.rs deleted file mode 100644 index 47f1dcb..0000000 --- a/piet-gpu-derive/src/layout.rs +++ /dev/null @@ -1,244 +0,0 @@ -//! Logic for layout of structures in memory. - -// This is fairly simple now, but there are some extensions that are likely: -// * Addition of f16 types -// + These will probably have 2-byte alignments to support `packHalf2x16` -// * 1 byte tag values (so small struct fields can be packed along with tag) -// * (Possibly) reordering for better packing - -use std::collections::{HashMap, HashSet}; - -use crate::parse::{GpuModule, GpuType, GpuTypeDef}; - -#[derive(Clone)] -pub struct LayoutType { - size: Size, - pub ty: GpuType, -} - -#[derive(Clone)] -pub enum LayoutTypeDef { - /// Name, offset, field type. Make a separate struct? - Struct(Vec<(String, usize, LayoutType)>), - Enum(Vec<(String, Vec<(usize, LayoutType)>)>), -} - -pub struct LayoutModule { - pub name: String, - pub def_names: Vec, - pub defs: HashMap, - enum_variants: HashSet, - - /// Generate shader code to write the module. - /// - /// This is derived from the presence of the `gpu_write` attribute in the source module. - pub gpu_write: bool, - /// Generate Rust code to encode the module. - /// - /// This is derived from the presence of the `rust_encode` attribute in the source module. - pub rust_encode: bool, -} - -struct LayoutSession<'a> { - enum_variants: HashSet, - orig_defs: HashMap, - defs: HashMap, -} - -#[derive(Clone, Copy)] -pub struct Size { - pub size: usize, - alignment: usize, -} - -impl LayoutType { - fn from_gpu(ty: &GpuType, session: &mut LayoutSession) -> LayoutType { - let size = session.get_size(ty); - LayoutType { - size, - ty: ty.clone(), - } - } -} - -impl LayoutTypeDef { - // Maybe have a type representing the tuple? - fn from_gpu(def: &GpuTypeDef, session: &mut LayoutSession) -> (Size, LayoutTypeDef) { - match def { - GpuTypeDef::Struct(_name, fields) => { - // TODO: We want to be able to pack enums more tightly, in particular - // other struct fields along with the enum tag. Structs in that category - // (first field has an alignment < 4, serve as enum variant) will have a - // different layout. This is why we're tracking `is_enum_variant`. - // - // But it's a bit of YAGNI for now; we're currently reserving 4 bytes for - // the tag, so structure layout doesn't care. - let mut offset = 0; - let mut result = Vec::new(); - for field in fields { - let layout_ty = LayoutType::from_gpu(&field.1, session); - offset += align_padding(offset, layout_ty.size.alignment); - let size = layout_ty.size.size; - result.push((field.0.clone(), offset, layout_ty)); - offset += size; - } - offset += align_padding(offset, 4); - let size = Size::new_struct(offset); - (size, LayoutTypeDef::Struct(result)) - } - GpuTypeDef::Enum(en) => { - let mut result = Vec::new(); - let mut max_offset = 0; - for variant in &en.variants { - let mut r2 = Vec::new(); - let mut offset = 4; - for field in &variant.1 { - let layout_ty = LayoutType::from_gpu(field, session); - offset += align_padding(offset, layout_ty.size.alignment); - let size = layout_ty.size.size; - r2.push((offset, layout_ty)); - offset += size; - } - max_offset = max_offset.max(offset); - result.push((variant.0.clone(), r2)); - } - max_offset += align_padding(max_offset, 4); - let size = Size::new_struct(max_offset); - (size, LayoutTypeDef::Enum(result)) - } - } - } -} - -impl LayoutModule { - pub fn from_gpu(module: &GpuModule) -> LayoutModule { - let def_names = module - .defs - .iter() - .map(|def| def.name().to_owned()) - .collect::>(); - let mut session = LayoutSession::new(module); - for def in &module.defs { - let _ = session.layout_def(def.name()); - } - let gpu_write = module.attrs.contains("gpu_write"); - let rust_encode = module.attrs.contains("rust_encode"); - LayoutModule { - name: module.name.clone(), - gpu_write, - rust_encode, - def_names, - enum_variants: session.enum_variants, - defs: session.defs, - } - } - - #[allow(unused)] - pub fn is_enum_variant(&self, name: &str) -> bool { - self.enum_variants.contains(name) - } -} - -impl<'a> LayoutSession<'a> { - fn new(module: &GpuModule) -> LayoutSession { - let mut orig_defs = HashMap::new(); - let mut enum_variants = HashSet::new(); - for def in &module.defs { - orig_defs.insert(def.name().to_owned(), def.clone()); - if let GpuTypeDef::Enum(en) = def { - for variant in &en.variants { - if let Some(GpuType::InlineStruct(name)) = variant.1.first() { - enum_variants.insert(name.clone()); - } - } - } - } - LayoutSession { - enum_variants, - orig_defs, - defs: HashMap::new(), - } - } - - /// Do layout of one def. - /// - /// This might be called recursively. - /// Note: expect stack overflow for circular dependencies. - fn layout_def(&mut self, name: &str) -> Size { - if let Some(def) = self.defs.get(name) { - return def.0; - } - let def = self.orig_defs.get(name).unwrap(); - let layout = LayoutTypeDef::from_gpu(def, self); - let size = layout.0; - self.defs.insert(name.to_owned(), layout); - size - } - - fn get_size(&mut self, ty: &GpuType) -> Size { - match ty { - GpuType::Scalar(scalar) => Size::new(scalar.size()), - GpuType::Vector(scalar, len) => Size::new(scalar.size() * len), - GpuType::Ref(_) => Size::new(4), - GpuType::InlineStruct(name) => self.layout_def(name), - } - } - - #[allow(unused)] - fn is_enum_variant(&self, name: &str) -> bool { - self.enum_variants.contains(name) - } -} - -/// Compute coverage of fields. -/// -/// Each element of the result represents a list of fields for one 4-byte chunk of -/// the struct layout. Inline structs are only included if requested. -pub fn struct_coverage( - fields: &[(String, usize, LayoutType)], - include_inline: bool, -) -> Vec> { - let mut result: Vec> = Vec::new(); - for (i, (_name, offset, ty)) in fields.iter().enumerate() { - let size = match ty.ty { - GpuType::Scalar(scalar) => scalar.size(), - GpuType::Vector(scalar, len) => scalar.size() * len, - GpuType::Ref(_) => 4, - GpuType::InlineStruct(_) => { - if include_inline { - 4 - } else { - 0 - } - } - }; - if size > 0 { - for ix in (offset / 4)..(offset + size + 3) / 4 { - if ix >= result.len() { - result.resize_with(ix + 1, Default::default); - } - result[ix].push(i); - } - } - } - result -} - -impl Size { - fn new(size: usize) -> Size { - // Note: there is special case we could do better: - // `(u8, u16, u8)`, where the alignment could be 1. However, - // this case can also be solved by reordering. - let alignment = size.min(4); - Size { size, alignment } - } - - fn new_struct(size: usize) -> Size { - let alignment = 4; - Size { size, alignment } - } -} - -fn align_padding(offset: usize, alignment: usize) -> usize { - offset.wrapping_neg() & (alignment.max(1) - 1) -} diff --git a/piet-gpu-derive/src/lib.rs b/piet-gpu-derive/src/lib.rs deleted file mode 100644 index 43177de..0000000 --- a/piet-gpu-derive/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -mod derive; -mod glsl; -mod layout; -mod parse; - -use proc_macro::TokenStream; -use quote::{format_ident, quote}; -use syn::parse_macro_input; - -use layout::LayoutModule; -use parse::GpuModule; - -#[proc_macro] -pub fn piet_gpu(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as syn::ItemMod); - //println!("input: {:#?}", input); - let module = GpuModule::from_syn(&input).unwrap(); - let layout = LayoutModule::from_gpu(&module); - let glsl = glsl::gen_glsl(&layout); - let gen_gpu_fn = format_ident!("gen_gpu_{}", layout.name); - let mut expanded = quote! { - pub fn #gen_gpu_fn() -> String { - #glsl.into() - } - }; - if layout.rust_encode { - expanded.extend(derive::gen_derive(&layout)); - } - expanded.into() -} diff --git a/piet-gpu-derive/src/parse.rs b/piet-gpu-derive/src/parse.rs deleted file mode 100644 index 1598987..0000000 --- a/piet-gpu-derive/src/parse.rs +++ /dev/null @@ -1,228 +0,0 @@ -//! Parsing of the source - -extern crate proc_macro; - -use std::collections::HashSet; - -use syn::{ - Expr, ExprLit, Fields, FieldsNamed, FieldsUnnamed, GenericArgument, ItemEnum, ItemStruct, Lit, - PathArguments, TypeArray, TypePath, -}; - -/// A scalar that can be represented in a packed data structure. -#[derive(Clone, Copy, PartialEq)] -pub enum GpuScalar { - F16, - F32, - I8, - I16, - I32, - U8, - U16, - U32, - TagFlags, -} - -/// An algebraic datatype. -#[derive(Clone)] -pub enum GpuType { - Scalar(GpuScalar), - Vector(GpuScalar, usize), - /// Used mostly for the body of enum variants. - InlineStruct(String), - Ref(Box), -} - -pub struct GpuEnum { - pub name: String, - pub variants: Vec<(String, Vec)>, -} - -pub enum GpuTypeDef { - Struct(String, Vec<(String, GpuType)>), - Enum(GpuEnum), -} - -pub struct GpuModule { - pub name: String, - pub attrs: HashSet, - pub defs: Vec, -} - -impl GpuScalar { - fn from_syn(ty: &syn::Type) -> Option { - ty_as_single_ident(ty).and_then(|ident| match ident.as_str() { - "f32" => Some(GpuScalar::F32), - "f16" => Some(GpuScalar::F16), - "i8" => Some(GpuScalar::I8), - "i16" => Some(GpuScalar::I16), - "i32" => Some(GpuScalar::I32), - "u8" => Some(GpuScalar::U8), - "u16" => Some(GpuScalar::U16), - "u32" => Some(GpuScalar::U32), - "TagFlags" => Some(GpuScalar::TagFlags), - _ => None, - }) - } - - /// Size of scalar type. - /// - /// This is arguably a concern at the layout level, not syntax, but it's here because - /// it's not likely to be variable, so reduces the total number of types. - pub fn size(self) -> usize { - match self { - GpuScalar::F32 | GpuScalar::I32 | GpuScalar::U32 => 4, - GpuScalar::I8 | GpuScalar::U8 => 1, - GpuScalar::F16 | GpuScalar::I16 | GpuScalar::U16 => 2, - GpuScalar::TagFlags => 0, - } - } -} - -impl GpuType { - fn from_syn(ty: &syn::Type) -> Result { - //println!("gputype {:#?}", ty); - if let Some(scalar) = GpuScalar::from_syn(ty) { - return Ok(GpuType::Scalar(scalar)); - } - if let Some(name) = ty_as_single_ident(ty) { - // Note: we're not doing any validation here. - return Ok(GpuType::InlineStruct(name)); - } - match ty { - syn::Type::Path(TypePath { - path: syn::Path { segments, .. }, - .. - }) => { - if segments.len() == 1 { - let seg = &segments[0]; - if seg.ident == "Ref" { - if let PathArguments::AngleBracketed(args) = &seg.arguments { - if args.args.len() == 1 { - if let GenericArgument::Type(inner) = &args.args[0] { - let inner_ty = GpuType::from_syn(inner)?; - return Ok(GpuType::Ref(Box::new(inner_ty))); - } - } - } - } - } - Err("unknown path case".into()) - } - syn::Type::Array(TypeArray { elem, len, .. }) => { - if let Some(elem) = GpuScalar::from_syn(&elem) { - if let Some(len) = expr_int_lit(len) { - // maybe sanity-check length here - Ok(GpuType::Vector(elem, len)) - } else { - Err("can't deal with variable length scalar arrays".into()) - } - } else { - Err("can't deal with non-scalar arrays".into()) - } - } - _ => Err("unknown type".into()), - } - } -} - -impl GpuTypeDef { - fn from_syn(item: &syn::Item) -> Result { - match item { - syn::Item::Struct(ItemStruct { - ident, - fields: Fields::Named(FieldsNamed { named, .. }), - .. - }) => { - let mut fields = Vec::new(); - for field in named { - let field_ty = GpuType::from_syn(&field.ty)?; - let field_name = field.ident.as_ref().ok_or("need name".to_string())?; - fields.push((field_name.to_string(), field_ty)); - } - Ok(GpuTypeDef::Struct(ident.to_string(), fields)) - } - syn::Item::Enum(ItemEnum { - ident, variants, .. - }) => { - let mut v = Vec::new(); - for variant in variants { - let vname = variant.ident.to_string(); - let mut fields = Vec::new(); - if let Fields::Unnamed(FieldsUnnamed { unnamed, .. }) = &variant.fields { - for field in unnamed { - fields.push(GpuType::from_syn(&field.ty)?); - } - } - v.push((vname, fields)); - } - let en = GpuEnum { - name: ident.to_string(), - variants: v, - }; - Ok(GpuTypeDef::Enum(en)) - } - _ => { - eprintln!("{:#?}", item); - Err("unknown item".into()) - } - } - } - - pub fn name(&self) -> &str { - match self { - GpuTypeDef::Struct(name, _) => name, - GpuTypeDef::Enum(en) => &en.name, - } - } -} - -impl GpuModule { - pub fn from_syn(module: &syn::ItemMod) -> Result { - let name = module.ident.to_string(); - let mut attrs = HashSet::new(); - for attr in &module.attrs { - if let Some(id) = path_as_single_ident(&attr.path) { - attrs.insert(id.to_owned()); - } - } - let mut defs = Vec::new(); - if let Some((_brace, items)) = &module.content { - for item in items { - let def = GpuTypeDef::from_syn(item)?; - defs.push(def); - } - } - Ok(GpuModule { name, attrs, defs }) - } -} - -fn path_as_single_ident(path: &syn::Path) -> Option { - if path.segments.len() == 1 { - let seg = &path.segments[0]; - if seg.arguments == PathArguments::None { - return Some(seg.ident.to_string()); - } - } - None -} - -fn ty_as_single_ident(ty: &syn::Type) -> Option { - if let syn::Type::Path(TypePath { path, .. }) = ty { - path_as_single_ident(path) - } else { - None - } -} - -fn expr_int_lit(e: &Expr) -> Option { - if let Expr::Lit(ExprLit { - lit: Lit::Int(lit_int), - .. - }) = e - { - lit_int.base10_parse().ok() - } else { - None - } -} diff --git a/piet-gpu-hal/Cargo.toml b/piet-gpu-hal/Cargo.toml deleted file mode 100644 index 110fc7f..0000000 --- a/piet-gpu-hal/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "piet-gpu-hal" -version = "0.1.0" -authors = ["Raph Levien "] -description = "An abstraction layer for running compute kernels on GPU." -license = "MIT/Apache-2.0" -edition = "2018" - -[dependencies] -ash = { version = "0.37", features = ["loaded"] } -ash-window = "0.12" -raw-window-handle = "0.5" -bitflags = "1.3.2" -smallvec = "1.9" -bytemuck = "1.12.1" - -[target.'cfg(target_os="windows")'.dependencies] -winapi = { version = "0.3.9", features = [ - 'd3d12', 'd3d12sdklayers', 'd3dcommon', 'd3dcompiler', 'dxgi', - 'dxgi1_2', 'dxgi1_3', 'dxgi1_4', 'dxgidebug', 'dxgiformat', 'dxgitype', - 'libloaderapi', 'shellscalingapi', 'synchapi', 'winbase', 'windef', - 'winerror', 'winuser' -] } -wio = "0.2.2" - -[target.'cfg(target_os="macos")'.dependencies] -metal = "0.24" -objc = "0.2.7" -block = "0.1.6" -cocoa-foundation = "0.1" -# Note: foreign-types is up to 0.5 but metal hasn't upgraded to it -foreign-types = "0.3.2" -core-graphics-types = "0.1.1" diff --git a/piet-gpu-hal/examples/collatz.rs b/piet-gpu-hal/examples/collatz.rs deleted file mode 100644 index afb3d27..0000000 --- a/piet-gpu-hal/examples/collatz.rs +++ /dev/null @@ -1,39 +0,0 @@ -use piet_gpu_hal::{include_shader, BindType, ComputePassDescriptor}; -use piet_gpu_hal::{BufferUsage, Instance, InstanceFlags, Session}; - -fn main() { - let instance = Instance::new(InstanceFlags::empty()).unwrap(); - unsafe { - let device = instance.device().unwrap(); - let session = Session::new(device); - let usage = BufferUsage::MAP_READ | BufferUsage::STORAGE; - let src = (0..256).map(|x| x + 1).collect::>(); - let buffer = session.create_buffer_init(&src, usage).unwrap(); - let code = include_shader!(&session, "./shader/gen/collatz"); - let pipeline = session - .create_compute_pipeline(code, &[BindType::Buffer]) - .unwrap(); - let descriptor_set = session - .create_simple_descriptor_set(&pipeline, &[&buffer]) - .unwrap(); - let query_pool = session.create_query_pool(2).unwrap(); - let mut cmd_buf = session.cmd_buf().unwrap(); - cmd_buf.begin(); - cmd_buf.reset_query_pool(&query_pool); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 0, 1)); - pass.dispatch(&pipeline, &descriptor_set, (256, 1, 1), (1, 1, 1)); - pass.end(); - cmd_buf.finish_timestamps(&query_pool); - cmd_buf.host_barrier(); - cmd_buf.finish(); - let submitted = session.run_cmd_buf(cmd_buf, &[], &[]).unwrap(); - submitted.wait().unwrap(); - let timestamps = session.fetch_query_pool(&query_pool); - let mut dst: Vec = Default::default(); - buffer.read(&mut dst).unwrap(); - for (i, val) in dst.iter().enumerate().take(16) { - println!("{}: {}", i, val); - } - println!("{:?}", timestamps); - } -} diff --git a/piet-gpu-hal/examples/shader/build.ninja b/piet-gpu-hal/examples/shader/build.ninja deleted file mode 100644 index 3b9cf3f..0000000 --- a/piet-gpu-hal/examples/shader/build.ninja +++ /dev/null @@ -1,24 +0,0 @@ -# Build file for shaders. - -# You must have Vulkan tools in your path, or patch here. - -glslang_validator = glslangValidator -spirv_cross = spirv-cross -dxc = dxc - -rule glsl - command = $glslang_validator -V -o $out $in - -rule hlsl - command = $spirv_cross --hlsl $in --output $out - -rule dxil - command = $dxc -T cs_6_0 $in -Fo $out - -rule msl - command = $spirv_cross --msl $in --output $out - -build gen/collatz.spv: glsl collatz.comp -build gen/collatz.hlsl: hlsl gen/collatz.spv -build gen/collatz.dxil: dxil gen/collatz.hlsl -build gen/collatz.msl: msl gen/collatz.spv diff --git a/piet-gpu-hal/examples/shader/collatz.comp b/piet-gpu-hal/examples/shader/collatz.comp deleted file mode 100644 index 7c0e2ab..0000000 --- a/piet-gpu-hal/examples/shader/collatz.comp +++ /dev/null @@ -1,35 +0,0 @@ -// Copied from wgpu hello-compute example - -// TODO: delete or clean up attribution before releasing - -#version 450 -layout(local_size_x = 1) in; - -layout(set = 0, binding = 0) buffer PrimeIndices { - uint[] indices; -}; // this is used as both input and output for convenience - -// The Collatz Conjecture states that for any integer n: -// If n is even, n = n/2 -// If n is odd, n = 3n+1 -// And repeat this process for each new n, you will always eventually reach 1. -// Though the conjecture has not been proven, no counterexample has ever been found. -// This function returns how many times this recurrence needs to be applied to reach 1. -uint collatz_iterations(uint n) { - uint i = 0; - while(n != 1) { - if (mod(n, 2) == 0) { - n = n / 2; - } - else { - n = (3 * n) + 1; - } - i++; - } - return i; -} - -void main() { - uint index = gl_GlobalInvocationID.x; - indices[index] = collatz_iterations(indices[index]); -} diff --git a/piet-gpu-hal/src/backend.rs b/piet-gpu-hal/src/backend.rs deleted file mode 100644 index b948ebc..0000000 --- a/piet-gpu-hal/src/backend.rs +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The generic trait for backends to implement. - -use crate::{ - BindType, BufferUsage, ComputePassDescriptor, Error, GpuInfo, ImageFormat, ImageLayout, - MapMode, SamplerParams, -}; - -pub trait Device: Sized { - type Buffer: 'static; - type Image; - type Pipeline; - type DescriptorSet; - type QueryPool; - type CmdBuf: CmdBuf; - type Fence; - type Semaphore; - type DescriptorSetBuilder: DescriptorSetBuilder; - type Sampler; - type ShaderSource: ?Sized; - - /// Query the GPU info. - /// - /// This method may be expensive, so the hub should call it once and retain - /// the info. - fn query_gpu_info(&self) -> GpuInfo; - - fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result; - - /// Destroy a buffer. - /// - /// The same safety requirements hold as in Vulkan: the buffer cannot be used - /// after this call, and all commands referencing this buffer must have completed. - /// - /// Maybe doesn't need result return? - unsafe fn destroy_buffer(&self, buffer: &Self::Buffer) -> Result<(), Error>; - - unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result; - - /// Destroy an image. - /// - /// The same safety requirements hold as in Vulkan: the image cannot be used - /// after this call, and all commands referencing this image must have completed. - /// - /// Use this only with images we created, not for swapchain images. - /// - /// Maybe doesn't need result return? - unsafe fn destroy_image(&self, image: &Self::Image) -> Result<(), Error>; - - /// Build a compute pipeline. - /// - /// A pipeline is a bit of shader IR plus a signature for what kinds of resources - /// it expects. - unsafe fn create_compute_pipeline( - &self, - code: &Self::ShaderSource, - bind_types: &[BindType], - ) -> Result; - - /// Start building a descriptor set. - /// - /// A descriptor set is a binding of resources for a given pipeline. - unsafe fn descriptor_set_builder(&self) -> Self::DescriptorSetBuilder; - - /// Create a descriptor set for a given pipeline, binding buffers and images. - /// - /// This is provided as a convenience but will probably go away, as the functionality - /// is subsumed by the builder. - unsafe fn create_descriptor_set( - &self, - pipeline: &Self::Pipeline, - bufs: &[&Self::Buffer], - images: &[&Self::Image], - ) -> Result { - let mut builder = self.descriptor_set_builder(); - builder.add_buffers(bufs); - builder.add_images(images); - builder.build(self, pipeline) - } - - /// Update a descriptor in a descriptor set. - /// - /// The index is the same as the binding number in Vulkan. - /// - /// # Safety - /// - /// The descriptor set must not be used in any in-flight command buffer. The index must be valid. - /// The resource type must match that at descriptor set creation time. - unsafe fn update_buffer_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - buf: &Self::Buffer, - ); - - /// Update a descriptor in a descriptor set. - /// - /// The index is the same as the binding number in Vulkan. - /// - /// # Safety - /// - /// The descriptor set must not be used in any in-flight command buffer. The index must be valid. - /// The resource type must match that at descriptor set creation time. - unsafe fn update_image_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - image: &Self::Image, - ); - - fn create_cmd_buf(&self) -> Result; - - /// If the command buffer was submitted, it must complete before this is called. - unsafe fn destroy_cmd_buf(&self, cmd_buf: Self::CmdBuf) -> Result<(), Error>; - - fn create_query_pool(&self, n_queries: u32) -> Result; - - /// Get results from query pool, destroying it in the process. - /// - /// The returned vector is one less than the number of queries; the first is used as - /// a baseline. - /// - /// # Safety - /// All submitted commands that refer to this query pool must have completed. - unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result, Error>; - - unsafe fn run_cmd_bufs( - &self, - cmd_buf: &[&Self::CmdBuf], - wait_semaphores: &[&Self::Semaphore], - signal_semaphores: &[&Self::Semaphore], - fence: Option<&mut Self::Fence>, - ) -> Result<(), Error>; - - /// Map the buffer into addressable memory. - /// - /// # Safety - /// - /// The buffer must be valid to access. The offset + size much be within the - /// buffer's allocation. The buffer must not already be mapped. Of course, - /// the usual safety rules apply to the returned pointer. - unsafe fn map_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<*mut u8, Error>; - - /// Map the buffer into addressable memory. - /// - /// # Safety - /// - /// The buffer must be mapped. The parameters must be the same as the map - /// call. - unsafe fn unmap_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<(), Error>; - - unsafe fn create_semaphore(&self) -> Result; - unsafe fn create_fence(&self, signaled: bool) -> Result; - unsafe fn destroy_fence(&self, fence: Self::Fence) -> Result<(), Error>; - unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error>; - unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result; - - unsafe fn create_sampler(&self, params: SamplerParams) -> Result; -} - -/// The trait implemented by backend command buffer implementations. -/// -/// Valid encoding is represented by a state machine (currently not validated -/// but it is easy to imagine there might be at least debug validation). Most -/// methods are only valid in a particular state, and some move it to another -/// state. -pub trait CmdBuf { - /// Begin encoding. - /// - /// State: init -> ready - unsafe fn begin(&mut self); - - /// State: ready -> finished - unsafe fn finish(&mut self); - - /// Commits any open command encoder. - unsafe fn flush(&mut self); - - /// Return true if the command buffer is suitable for reuse. - unsafe fn reset(&mut self) -> bool; - - /// Begin a compute pass. - /// - /// State: ready -> in_compute_pass - unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor); - - /// Dispatch - /// - /// State: in_compute_pass - unsafe fn dispatch( - &mut self, - pipeline: &D::Pipeline, - descriptor_set: &D::DescriptorSet, - workgroup_count: (u32, u32, u32), - workgroup_size: (u32, u32, u32), - ); - - /// State: in_compute_pass -> ready - unsafe fn end_compute_pass(&mut self); - - /// Insert an execution and memory barrier. - /// - /// Compute kernels (and other actions) after this barrier may read from buffers - /// that were written before this barrier. - unsafe fn memory_barrier(&mut self); - - /// Insert a barrier for host access to buffers. - /// - /// The host may read buffers written before this barrier, after the fence for - /// the command buffer is signaled. - /// - /// See http://themaister.net/blog/2019/08/14/yet-another-blog-explaining-vulkan-synchronization/ - /// ("Host memory reads") for an explanation of this barrier. - unsafe fn host_barrier(&mut self); - - unsafe fn image_barrier( - &mut self, - image: &D::Image, - src_layout: ImageLayout, - dst_layout: ImageLayout, - ); - - /// Clear the buffer. - /// - /// This is readily supported in Vulkan, but for portability it is remarkably - /// tricky (unimplemented in gfx-hal right now). Possibly best to write a compute - /// kernel, or organize the code not to need it. - unsafe fn clear_buffer(&mut self, buffer: &D::Buffer, size: Option); - - unsafe fn copy_buffer(&mut self, src: &D::Buffer, dst: &D::Buffer); - - unsafe fn copy_image_to_buffer(&mut self, src: &D::Image, dst: &D::Buffer); - - unsafe fn copy_buffer_to_image(&mut self, src: &D::Buffer, dst: &D::Image); - - // low portability, dx12 doesn't support it natively - unsafe fn blit_image(&mut self, src: &D::Image, dst: &D::Image); - - /// Reset the query pool. - /// - /// The query pool must be reset before each use, to avoid validation errors. - /// This is annoying, and we could tweak the API to make it implicit, doing - /// the reset before the first timestamp write. - unsafe fn reset_query_pool(&mut self, pool: &D::QueryPool); - - unsafe fn write_timestamp(&mut self, pool: &D::QueryPool, query: u32); - - /// Prepare the timestamps for reading. This isn't required on Vulkan but - /// is required on (at least) DX12. - unsafe fn finish_timestamps(&mut self, _pool: &D::QueryPool) {} - - /// Begin a labeled section for debugging and profiling purposes. - unsafe fn begin_debug_label(&mut self, _label: &str) {} - - /// End a section opened by `begin_debug_label`. - unsafe fn end_debug_label(&mut self) {} -} - -/// A builder for descriptor sets with more complex layouts. -/// -/// Note: the order needs to match the pipeline building, and it also needs to -/// be buffers, then images, then textures. -pub trait DescriptorSetBuilder { - fn add_buffers(&mut self, buffers: &[&D::Buffer]); - /// Add an array of storage images. - /// - /// The images need to be in `ImageLayout::General` layout. - fn add_images(&mut self, images: &[&D::Image]); - /// Add an array of textures. - /// - /// The images need to be in `ImageLayout::ShaderRead` layout. - /// - /// The same sampler is used for all textures, which is not very sophisticated; - /// we should have a way to vary the sampler. - fn add_textures(&mut self, images: &[&D::Image]); - unsafe fn build(self, device: &D, pipeline: &D::Pipeline) -> Result; -} diff --git a/piet-gpu-hal/src/bestfit.rs b/piet-gpu-hal/src/bestfit.rs deleted file mode 100644 index 12bb041..0000000 --- a/piet-gpu-hal/src/bestfit.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright © 2021 piet-gpu developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those - -//! A simple best-fit allocator. - -use std::collections::{BTreeMap, BTreeSet}; - -/// An allocator that tracks free ranges and returns best fit. -pub struct BestFit { - // map offset to size of free block - free_by_ix: BTreeMap, - // size and offset - free_by_size: BTreeSet<(u32, u32)>, -} - -impl BestFit { - pub fn new(size: u32) -> BestFit { - let mut free_by_ix = BTreeMap::new(); - free_by_ix.insert(0, size); - let mut free_by_size = BTreeSet::new(); - free_by_size.insert((size, 0)); - BestFit { - free_by_ix, - free_by_size, - } - } - - pub fn alloc(&mut self, size: u32) -> Option { - let block = *self.free_by_size.range((size, 0)..).next()?; - let ix = block.1; - self.free_by_ix.remove(&ix); - self.free_by_size.remove(&block); - let fragment_size = block.0 - size; - if fragment_size > 0 { - let fragment_ix = ix + size; - self.free_by_ix.insert(fragment_ix, fragment_size); - self.free_by_size.insert((fragment_size, fragment_ix)); - } - Some(ix) - } - - pub fn free(&mut self, ix: u32, size: u32) { - let next_ix = size + ix; - if let Some((&prev_ix, &prev_size)) = self.free_by_ix.range(..ix).rev().next() { - if prev_ix + prev_size == ix { - self.free_by_size.remove(&(prev_size, prev_ix)); - if let Some(&next_size) = self.free_by_ix.get(&next_ix) { - // consolidate with prev and next - let new_size = prev_size + size + next_size; - *self.free_by_ix.get_mut(&prev_ix).unwrap() = new_size; - self.free_by_ix.remove(&next_ix); - self.free_by_size.remove(&(next_size, next_ix)); - self.free_by_size.insert((new_size, prev_ix)); - } else { - // consolidate with prev - let new_size = prev_size + size; - *self.free_by_ix.get_mut(&prev_ix).unwrap() = new_size; - self.free_by_size.insert((new_size, prev_ix)); - } - return; - } - } - if let Some(&next_size) = self.free_by_ix.get(&next_ix) { - // consolidate with next - let new_size = size + next_size; - self.free_by_ix.remove(&next_ix); - self.free_by_ix.insert(ix, new_size); - self.free_by_size.remove(&(next_size, next_ix)); - self.free_by_size.insert((new_size, ix)); - } else { - // new isolated free block - self.free_by_ix.insert(ix, size); - self.free_by_size.insert((size, ix)); - } - } -} diff --git a/piet-gpu-hal/src/bufwrite.rs b/piet-gpu-hal/src/bufwrite.rs deleted file mode 100644 index 37f0292..0000000 --- a/piet-gpu-hal/src/bufwrite.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright © 2021 piet-gpu developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those - -//! An abstraction for writing to GPU buffers. - -use bytemuck::Pod; - -/// A GPU buffer to be filled. -pub struct BufWrite { - ptr: *mut u8, - len: usize, - capacity: usize, -} - -impl BufWrite { - pub(crate) fn new(ptr: *mut u8, len: usize, capacity: usize) -> BufWrite { - BufWrite { ptr, len, capacity } - } - - /// Append a plain data object to the buffer. - /// - /// Panics if capacity is inadequate. - #[inline] - pub fn push(&mut self, item: impl Pod) { - self.push_bytes(bytemuck::bytes_of(&item)); - } - - /// Extend with a slice of plain data objects. - /// - /// Panics if capacity is inadequate. - #[inline] - pub fn extend_slice(&mut self, slice: &[impl Pod]) { - self.push_bytes(bytemuck::cast_slice(slice)); - } - - /// Extend with a byte slice. - /// - /// Panics if capacity is inadequate. - #[inline] - pub fn push_bytes(&mut self, bytes: &[u8]) { - let len = bytes.len(); - assert!(self.capacity - self.len >= len); - unsafe { - std::ptr::copy_nonoverlapping(bytes.as_ptr(), self.ptr.add(self.len), len); - } - self.len += len; - } - - /// Extend with zeros. - /// - /// Panics if capacity is inadequate. - #[inline] - pub fn fill_zero(&mut self, len: usize) { - assert!(self.capacity - self.len >= len); - unsafe { - let slice = std::slice::from_raw_parts_mut(self.ptr.add(self.len), len); - slice.fill(0); - } - self.len += len; - } - - /// The total capacity of the buffer, in bytes. - #[inline] - pub fn capacity(&self) -> usize { - self.capacity - } - - /// Extend with an iterator over plain data objects. - /// - /// Currently, this doesn't panic, just truncates. That may change. - // Note: when specialization lands, this can be another impl of - // `Extend`. - pub fn extend_ref_iter<'a, I, T: Pod + 'a>(&mut self, iter: I) - where - I: IntoIterator, - { - let item_size = std::mem::size_of::(); - if item_size == 0 { - return; - } - let mut iter = iter.into_iter(); - let n_remaining = (self.capacity - self.len) / item_size; - unsafe { - let mut dst = self.ptr.add(self.len); - for _ in 0..n_remaining { - if let Some(item) = iter.next() { - std::ptr::copy_nonoverlapping( - bytemuck::bytes_of(item).as_ptr(), - dst, - item_size, - ); - self.len += item_size; - dst = dst.add(item_size); - } else { - break; - } - } - } - // TODO: should we test the iter and panic on overflow? - } -} - -impl std::ops::Deref for BufWrite { - type Target = [u8]; - fn deref(&self) -> &[u8] { - unsafe { std::slice::from_raw_parts(self.ptr, self.len) } - } -} - -impl std::ops::DerefMut for BufWrite { - fn deref_mut(&mut self) -> &mut [u8] { - unsafe { std::slice::from_raw_parts_mut(self.ptr, self.len) } - } -} - -impl std::iter::Extend for BufWrite { - fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - let item_size = std::mem::size_of::(); - if item_size == 0 { - return; - } - let mut iter = iter.into_iter(); - let n_remaining = (self.capacity - self.len) / item_size; - unsafe { - let mut dst = self.ptr.add(self.len); - for _ in 0..n_remaining { - if let Some(item) = iter.next() { - std::ptr::copy_nonoverlapping( - bytemuck::bytes_of(&item).as_ptr(), - dst, - item_size, - ); - self.len += item_size; - dst = dst.add(item_size); - } else { - break; - } - } - } - // TODO: should we test the iter and panic on overflow? - } -} diff --git a/piet-gpu-hal/src/dx12.rs b/piet-gpu-hal/src/dx12.rs deleted file mode 100644 index f7edb24..0000000 --- a/piet-gpu-hal/src/dx12.rs +++ /dev/null @@ -1,858 +0,0 @@ -//! DX12 implemenation of HAL trait. - -mod descriptor; -mod error; -mod wrappers; - -use std::{ - cell::Cell, - convert::{TryFrom, TryInto}, - mem, ptr, - sync::{Arc, Mutex}, -}; - -#[allow(unused)] -use winapi::shared::dxgi1_3; // for error reporting in debug mode -use winapi::shared::minwindef::TRUE; -use winapi::shared::{dxgi, dxgi1_2, dxgitype}; -use winapi::um::d3d12; - -use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; - -use smallvec::SmallVec; - -use crate::{ - BindType, BufferUsage, ComputePassDescriptor, Error, GpuInfo, ImageFormat, ImageLayout, - MapMode, WorkgroupLimits, -}; - -use self::{ - descriptor::{CpuHeapRefOwned, DescriptorPool, GpuHeapRefOwned}, - wrappers::{ - CommandAllocator, CommandQueue, DescriptorHeap, Device, Factory4, Resource, ShaderByteCode, - }, -}; - -pub struct Dx12Instance { - factory: Factory4, -} - -pub struct Dx12Surface { - hwnd: winapi::shared::windef::HWND, -} - -pub struct Dx12Swapchain { - swapchain: wrappers::SwapChain3, - size: (u32, u32), -} - -pub struct Dx12Device { - device: Device, - command_queue: CommandQueue, - ts_freq: u64, - gpu_info: GpuInfo, - memory_arch: MemoryArchitecture, - descriptor_pool: Mutex, -} - -#[derive(Clone)] -pub struct Buffer { - resource: Resource, - pub size: u64, - // Always present except for query readback buffer. - cpu_ref: Option>, - // Present when created with CLEAR usage. Heap is here for - // the same reason it's in DescriptorSet, and might be removed - // when CmdBuf has access to the descriptor pool. - gpu_ref: Option<(Arc, DescriptorHeap)>, -} - -#[derive(Clone)] -pub struct Image { - resource: Resource, - // Present except for swapchain images. - cpu_ref: Option>, - size: (u32, u32), -} - -pub struct CmdBuf { - c: wrappers::GraphicsCommandList, - allocator: CommandAllocator, - needs_reset: bool, - end_query: Option<(wrappers::QueryHeap, u32)>, -} - -pub struct Pipeline { - pipeline_state: wrappers::PipelineState, - root_signature: wrappers::RootSignature, -} - -pub struct DescriptorSet { - gpu_ref: GpuHeapRefOwned, - // Note: the heap is only needed here so CmdBuf::dispatch can get - // use it easily. If CmdBuf had a reference to the Device (or just - // the descriptor pool), we could get rid of this. - heap: DescriptorHeap, -} - -pub struct QueryPool { - heap: wrappers::QueryHeap, - // Maybe this should just be a Resource, not a full Buffer. - buf: Buffer, - n_queries: u32, -} - -pub struct Fence { - fence: wrappers::Fence, - event: wrappers::Event, - // This could as well be an atomic, if we needed to cross threads. - val: Cell, -} - -/// This will probably be renamed "PresentSem" or similar. I believe no -/// semaphore is needed for presentation on DX12. -pub struct Semaphore; - -#[derive(Default)] -pub struct DescriptorSetBuilder { - handles: SmallVec<[d3d12::D3D12_CPU_DESCRIPTOR_HANDLE; 16]>, -} - -#[derive(PartialEq, Eq)] -enum MemoryArchitecture { - /// Integrated graphics - CacheCoherentUMA, - /// Unified memory with no cache coherence (does this happen?) - UMA, - /// Discrete graphics - NUMA, -} - -impl Dx12Instance { - /// Create a new instance. - pub fn new() -> Result { - unsafe { - #[cfg(debug_assertions)] - if let Err(e) = wrappers::enable_debug_layer() { - // Maybe a better logging solution? - println!("{}", e); - } - - #[cfg(debug_assertions)] - let factory_flags = dxgi1_3::DXGI_CREATE_FACTORY_DEBUG; - - #[cfg(not(debug_assertions))] - let factory_flags: u32 = 0; - - let factory = Factory4::create(factory_flags)?; - - Ok(Dx12Instance { factory }) - } - } - - /// Create a surface for the specified window handle. - pub fn surface( - &self, - _display_handle: RawDisplayHandle, - window_handle: RawWindowHandle, - ) -> Result { - if let RawWindowHandle::Win32(w) = window_handle { - let hwnd = w.hwnd as *mut _; - Ok(Dx12Surface { hwnd }) - } else { - Err("can't create surface for window handle".into()) - } - } - - /// Get a device suitable for compute workloads. - pub fn device(&self) -> Result { - unsafe { - let device = Device::create_device(&self.factory)?; - let list_type = d3d12::D3D12_COMMAND_LIST_TYPE_DIRECT; - let command_queue = device.create_command_queue( - list_type, - 0, - d3d12::D3D12_COMMAND_QUEUE_FLAG_NONE, - 0, - )?; - - let ts_freq = command_queue.get_timestamp_frequency()?; - let features_architecture = device.get_features_architecture()?; - let uma = features_architecture.UMA == TRUE; - let cc_uma = features_architecture.CacheCoherentUMA == TRUE; - let memory_arch = match (uma, cc_uma) { - (true, true) => MemoryArchitecture::CacheCoherentUMA, - (true, false) => MemoryArchitecture::UMA, - _ => MemoryArchitecture::NUMA, - }; - let use_staging_buffers = memory_arch == MemoryArchitecture::NUMA; - // These values are appropriate for Shader Model 5. When we open up - // DXIL, fix this with proper dynamic queries. - let gpu_info = GpuInfo { - has_descriptor_indexing: false, - has_subgroups: false, - subgroup_size: None, - workgroup_limits: WorkgroupLimits { - max_size: [1024, 1024, 64], - max_invocations: 1024, - }, - has_memory_model: false, - use_staging_buffers, - }; - let descriptor_pool = Default::default(); - Ok(Dx12Device { - device, - command_queue, - ts_freq, - memory_arch, - gpu_info, - descriptor_pool, - }) - } - } - - pub unsafe fn swapchain( - &self, - width: usize, - height: usize, - device: &Dx12Device, - surface: &Dx12Surface, - ) -> Result { - const FRAME_COUNT: u32 = 2; - let desc = dxgi1_2::DXGI_SWAP_CHAIN_DESC1 { - Width: width as u32, - Height: height as u32, - AlphaMode: dxgi1_2::DXGI_ALPHA_MODE_IGNORE, - BufferCount: FRAME_COUNT, - Format: winapi::shared::dxgiformat::DXGI_FORMAT_R8G8B8A8_UNORM, - Flags: 0, - BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT, - SampleDesc: dxgitype::DXGI_SAMPLE_DESC { - Count: 1, - Quality: 0, - }, - Scaling: dxgi1_2::DXGI_SCALING_STRETCH, - Stereo: winapi::shared::minwindef::FALSE, - SwapEffect: dxgi::DXGI_SWAP_EFFECT_FLIP_DISCARD, - }; - let swapchain = - self.factory - .create_swapchain_for_hwnd(&device.command_queue, surface.hwnd, desc)?; - let size = (width as u32, height as u32); - Ok(Dx12Swapchain { swapchain, size }) - } -} - -impl crate::backend::Device for Dx12Device { - type Buffer = Buffer; - - type Image = Image; - - type Pipeline = Pipeline; - - type DescriptorSet = DescriptorSet; - - type QueryPool = QueryPool; - - type CmdBuf = CmdBuf; - - type Fence = Fence; - - type Semaphore = Semaphore; - - type DescriptorSetBuilder = DescriptorSetBuilder; - - type Sampler = (); - - // Currently due to type inflexibility this is hardcoded to either HLSL or - // DXIL, but it would be nice to be able to handle both at runtime. - type ShaderSource = [u8]; - - fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result { - // TODO: consider supporting BufferUsage::QUERY_RESOLVE here rather than - // having a separate function. - unsafe { - let page_property = self.memory_arch.page_property(usage); - let memory_pool = self.memory_arch.memory_pool(usage); - //TODO: consider flag D3D12_HEAP_FLAG_ALLOW_SHADER_ATOMICS? - let flags = d3d12::D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS; - let resource = self.device.create_buffer( - size, - d3d12::D3D12_HEAP_TYPE_CUSTOM, - page_property, - memory_pool, - d3d12::D3D12_RESOURCE_STATE_COMMON, - flags, - )?; - let mut descriptor_pool = self.descriptor_pool.lock().unwrap(); - let cpu_ref = Arc::new(descriptor_pool.alloc_cpu(&self.device)?); - let cpu_handle = descriptor_pool.cpu_handle(&cpu_ref); - self.device - .create_byte_addressed_buffer_unordered_access_view( - &resource, - cpu_handle, - 0, - (size / 4).try_into()?, - ); - let gpu_ref = if usage.contains(BufferUsage::CLEAR) { - let gpu_ref = Arc::new(descriptor_pool.alloc_gpu(&self.device, 1)?); - let gpu_handle = descriptor_pool.cpu_handle_of_gpu(&gpu_ref, 0); - self.device.copy_descriptors( - &[gpu_handle], - &[1], - &[cpu_handle], - &[1], - d3d12::D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, - ); - let heap = descriptor_pool.gpu_heap(&gpu_ref).to_owned(); - Some((gpu_ref, heap)) - } else { - None - }; - Ok(Buffer { - resource, - size, - cpu_ref: Some(cpu_ref), - gpu_ref, - }) - } - } - - unsafe fn destroy_buffer(&self, buffer: &Self::Buffer) -> Result<(), Error> { - buffer.resource.destroy(); - Ok(()) - } - - unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result { - let format = match format { - ImageFormat::A8 => winapi::shared::dxgiformat::DXGI_FORMAT_R8_UNORM, - ImageFormat::Rgba8 | ImageFormat::Surface => winapi::shared::dxgiformat::DXGI_FORMAT_R8G8B8A8_UNORM, - }; - let resource = self - .device - .create_texture2d_buffer(width.into(), height, format, true)?; - - let mut descriptor_pool = self.descriptor_pool.lock().unwrap(); - let cpu_ref = Arc::new(descriptor_pool.alloc_cpu(&self.device)?); - let cpu_handle = descriptor_pool.cpu_handle(&cpu_ref); - self.device - .create_unordered_access_view(&resource, cpu_handle); - let size = (width, height); - Ok(Image { - resource, - cpu_ref: Some(cpu_ref), - size, - }) - } - - unsafe fn destroy_image(&self, image: &Self::Image) -> Result<(), Error> { - image.resource.destroy(); - Ok(()) - } - - fn create_cmd_buf(&self) -> Result { - let list_type = d3d12::D3D12_COMMAND_LIST_TYPE_DIRECT; - let allocator = unsafe { self.device.create_command_allocator(list_type)? }; - let node_mask = 0; - unsafe { - let c = self - .device - .create_graphics_command_list(list_type, &allocator, None, node_mask)?; - Ok(CmdBuf { - c, - allocator, - needs_reset: false, - end_query: None, - }) - } - } - - unsafe fn destroy_cmd_buf(&self, _cmd_buf: Self::CmdBuf) -> Result<(), Error> { - Ok(()) - } - - fn create_query_pool(&self, n_queries: u32) -> Result { - unsafe { - let heap = self - .device - .create_query_heap(d3d12::D3D12_QUERY_HEAP_TYPE_TIMESTAMP, n_queries)?; - let buf = self.create_readback_buffer((n_queries * 8) as u64)?; - Ok(QueryPool { - heap, - buf, - n_queries, - }) - } - } - - unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result, Error> { - let mut buf = vec![0u64; pool.n_queries as usize]; - let size = mem::size_of_val(buf.as_slice()); - let mapped = self.map_buffer(&pool.buf, 0, size as u64, MapMode::Read)?; - std::ptr::copy_nonoverlapping(mapped, buf.as_mut_ptr() as *mut u8, size); - self.unmap_buffer(&pool.buf, 0, size as u64, MapMode::Read)?; - let tsp = (self.ts_freq as f64).recip(); - let result = buf.iter().map(|ts| *ts as f64 * tsp).collect(); - Ok(result) - } - - unsafe fn run_cmd_bufs( - &self, - cmd_bufs: &[&Self::CmdBuf], - _wait_semaphores: &[&Self::Semaphore], - _signal_semaphores: &[&Self::Semaphore], - fence: Option<&mut Self::Fence>, - ) -> Result<(), Error> { - // TODO: handle semaphores - let lists = cmd_bufs - .iter() - .map(|c| c.c.as_raw_command_list()) - .collect::>(); - self.command_queue.execute_command_lists(&lists); - if let Some(fence) = fence { - let val = fence.val.get() + 1; - fence.val.set(val); - self.command_queue.signal(&fence.fence, val)?; - fence.fence.set_event_on_completion(&fence.event, val)?; - } - Ok(()) - } - - unsafe fn map_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<*mut u8, Error> { - let mapped = buffer.resource.map_buffer(offset, size, mode)?; - Ok(mapped) - } - - unsafe fn unmap_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<(), Error> { - buffer.resource.unmap_buffer(offset, size, mode)?; - Ok(()) - } - - unsafe fn create_semaphore(&self) -> Result { - Ok(Semaphore) - } - - unsafe fn create_fence(&self, signaled: bool) -> Result { - let fence = self.device.create_fence(0)?; - let event = wrappers::Event::create(false, signaled)?; - let val = Cell::new(0); - Ok(Fence { fence, event, val }) - } - - unsafe fn destroy_fence(&self, _fence: Self::Fence) -> Result<(), Error> { - Ok(()) - } - - unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> { - for fence in fences { - // TODO: probably handle errors here. - let _status = fence.event.wait(winapi::um::winbase::INFINITE); - } - Ok(()) - } - - unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result { - let fence_val = fence.fence.get_value(); - Ok(fence_val == fence.val.get()) - } - - fn query_gpu_info(&self) -> crate::GpuInfo { - self.gpu_info.clone() - } - - unsafe fn create_compute_pipeline( - &self, - code: &Self::ShaderSource, - bind_types: &[BindType], - ) -> Result { - if u32::try_from(bind_types.len()).is_err() { - panic!("bind type length overflow"); - } - let mut ranges = Vec::new(); - let mut i = 0; - fn map_range_type(bind_type: BindType) -> d3d12::D3D12_DESCRIPTOR_RANGE_TYPE { - match bind_type { - BindType::Buffer | BindType::Image | BindType::ImageRead => { - d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_UAV - } - BindType::BufReadOnly => d3d12::D3D12_DESCRIPTOR_RANGE_TYPE_SRV, - } - } - while i < bind_types.len() { - let range_type = map_range_type(bind_types[i]); - let mut end = i + 1; - while end < bind_types.len() && map_range_type(bind_types[end]) == range_type { - end += 1; - } - let n_descriptors = (end - i) as u32; - ranges.push(d3d12::D3D12_DESCRIPTOR_RANGE { - RangeType: range_type, - NumDescriptors: n_descriptors, - BaseShaderRegister: i as u32, - RegisterSpace: 0, - OffsetInDescriptorsFromTableStart: d3d12::D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND, - }); - i = end; - } - - // We could always have ShaderSource as [u8] even when it's HLSL, and use the - // magic number to distinguish. In any case, for now it's hardcoded as one or - // the other. - /* - // HLSL code path - #[cfg(debug_assertions)] - let flags = winapi::um::d3dcompiler::D3DCOMPILE_DEBUG - | winapi::um::d3dcompiler::D3DCOMPILE_SKIP_OPTIMIZATION; - #[cfg(not(debug_assertions))] - let flags = 0; - let shader_blob = ShaderByteCode::compile(code, "cs_5_1", "main", flags)?; - let shader = ShaderByteCode::from_blob(shader_blob); - */ - - // DXIL code path - let shader = ShaderByteCode::from_slice(code); - - let mut root_parameter = d3d12::D3D12_ROOT_PARAMETER { - ParameterType: d3d12::D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE, - ShaderVisibility: d3d12::D3D12_SHADER_VISIBILITY_ALL, - ..mem::zeroed() - }; - *root_parameter.u.DescriptorTable_mut() = d3d12::D3D12_ROOT_DESCRIPTOR_TABLE { - NumDescriptorRanges: ranges.len() as u32, - pDescriptorRanges: ranges.as_ptr(), - }; - let root_signature_desc = d3d12::D3D12_ROOT_SIGNATURE_DESC { - NumParameters: 1, - pParameters: &root_parameter, - NumStaticSamplers: 0, - pStaticSamplers: ptr::null(), - Flags: d3d12::D3D12_ROOT_SIGNATURE_FLAG_NONE, - }; - let root_signature_blob = wrappers::RootSignature::serialize_description( - &root_signature_desc, - d3d12::D3D_ROOT_SIGNATURE_VERSION_1, - )?; - let root_signature = self.device.create_root_signature(0, root_signature_blob)?; - let desc = d3d12::D3D12_COMPUTE_PIPELINE_STATE_DESC { - pRootSignature: root_signature.0.as_raw(), - CS: shader.bytecode, - NodeMask: 0, - CachedPSO: d3d12::D3D12_CACHED_PIPELINE_STATE { - pCachedBlob: ptr::null(), - CachedBlobSizeInBytes: 0, - }, - Flags: d3d12::D3D12_PIPELINE_STATE_FLAG_NONE, - }; - let pipeline_state = self.device.create_compute_pipeline_state(&desc)?; - - Ok(Pipeline { - pipeline_state, - root_signature, - }) - } - - unsafe fn descriptor_set_builder(&self) -> Self::DescriptorSetBuilder { - DescriptorSetBuilder::default() - } - - unsafe fn update_buffer_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - buf: &Self::Buffer, - ) { - let src_cpu_ref = buf.cpu_ref.as_ref().unwrap().handle(); - ds.gpu_ref - .copy_one_descriptor(&self.device, src_cpu_ref, index); - } - - unsafe fn update_image_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - image: &Self::Image, - ) { - let src_cpu_ref = image.cpu_ref.as_ref().unwrap().handle(); - ds.gpu_ref - .copy_one_descriptor(&self.device, src_cpu_ref, index); - } - - unsafe fn create_sampler(&self, _params: crate::SamplerParams) -> Result { - todo!() - } -} - -impl Dx12Device { - fn create_readback_buffer(&self, size: u64) -> Result { - unsafe { - let resource = self.device.create_buffer( - size, - d3d12::D3D12_HEAP_TYPE_READBACK, - d3d12::D3D12_CPU_PAGE_PROPERTY_UNKNOWN, - d3d12::D3D12_MEMORY_POOL_UNKNOWN, - d3d12::D3D12_RESOURCE_STATE_COPY_DEST, - d3d12::D3D12_RESOURCE_FLAG_NONE, - )?; - let cpu_ref = None; - let gpu_ref = None; - Ok(Buffer { - resource, - size, - cpu_ref, - gpu_ref, - }) - } - } -} - -impl crate::backend::CmdBuf for CmdBuf { - unsafe fn begin(&mut self) { - if self.needs_reset {} - } - - unsafe fn finish(&mut self) { - let _ = self.c.close(); - self.needs_reset = true; - } - - unsafe fn flush(&mut self) {} - - unsafe fn reset(&mut self) -> bool { - self.allocator.reset().is_ok() && self.c.reset(&self.allocator, None).is_ok() - } - - unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) { - if let Some((pool, start, end)) = &desc.timer_queries { - #[allow(irrefutable_let_patterns)] - if let crate::hub::QueryPool::Dx12(pool) = pool { - self.write_timestamp(pool, *start); - self.end_query = Some((pool.heap.clone(), *end)); - } - } - } - - unsafe fn dispatch( - &mut self, - pipeline: &Pipeline, - descriptor_set: &DescriptorSet, - workgroup_count: (u32, u32, u32), - _workgroup_size: (u32, u32, u32), - ) { - self.c.set_pipeline_state(&pipeline.pipeline_state); - self.c - .set_compute_pipeline_root_signature(&pipeline.root_signature); - // TODO: persist heap ix and only set if changed. - self.c.set_descriptor_heaps(&[&descriptor_set.heap]); - self.c - .set_compute_root_descriptor_table(0, descriptor_set.gpu_ref.gpu_handle()); - self.c - .dispatch(workgroup_count.0, workgroup_count.1, workgroup_count.2); - } - - unsafe fn end_compute_pass(&mut self) { - if let Some((heap, end)) = self.end_query.take() { - self.c.end_timing_query(&heap, end); - } - } - - unsafe fn memory_barrier(&mut self) { - // See comments in CommandBuffer::pipeline_barrier in gfx-hal dx12 backend. - // The "proper" way to do this would be to name the actual buffers participating - // in the barrier. But it seems like this is a reasonable way to create a - // global barrier. - let bar = wrappers::create_uav_resource_barrier(ptr::null_mut()); - self.c.resource_barrier(&[bar]); - } - - unsafe fn host_barrier(&mut self) { - // My understanding is that a host barrier is not needed, but am still hunting - // down an authoritative source for that. Among other things, the docs for - // Map suggest that it does the needed visibility operation. - // - // https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12resource-map - } - - unsafe fn image_barrier( - &mut self, - image: &Image, - src_layout: crate::ImageLayout, - dst_layout: crate::ImageLayout, - ) { - let src_state = resource_state_for_image_layout(src_layout); - let dst_state = resource_state_for_image_layout(dst_layout); - if src_state != dst_state { - let bar = wrappers::create_transition_resource_barrier( - image.resource.get_mut(), - src_state, - dst_state, - ); - self.c.resource_barrier(&[bar]); - } - // Always do a memory barrier in case of UAV image access. We probably - // want to make these barriers more precise. - self.memory_barrier(); - } - - unsafe fn clear_buffer(&mut self, buffer: &Buffer, size: Option) { - let cpu_ref = buffer.cpu_ref.as_ref().unwrap(); - let (gpu_ref, heap) = buffer - .gpu_ref - .as_ref() - .expect("Need to set CLEAR usage on buffer"); - // Same TODO as dispatch: track and only set if changed. - self.c.set_descriptor_heaps(&[heap]); - // Discussion question: would compute shader be faster? Should measure. - self.c.clear_uav( - gpu_ref.gpu_handle(), - cpu_ref.handle(), - &buffer.resource, - 0, - size, - ); - } - - unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer) { - // TODO: consider using copy_resource here (if sizes match) - let size = src.size.min(dst.size); - self.c.copy_buffer(&dst.resource, 0, &src.resource, 0, size); - } - - unsafe fn copy_image_to_buffer(&mut self, src: &Image, dst: &Buffer) { - self.c - .copy_texture_to_buffer(&src.resource, &dst.resource, src.size.0, src.size.1); - } - - unsafe fn copy_buffer_to_image(&mut self, src: &Buffer, dst: &Image) { - self.c - .copy_buffer_to_texture(&src.resource, &dst.resource, dst.size.0, dst.size.1); - } - - unsafe fn blit_image(&mut self, src: &Image, dst: &Image) { - self.c.copy_resource(&src.resource, &dst.resource); - } - - unsafe fn reset_query_pool(&mut self, _pool: &QueryPool) {} - - unsafe fn write_timestamp(&mut self, pool: &QueryPool, query: u32) { - self.c.end_timing_query(&pool.heap, query); - } - - unsafe fn finish_timestamps(&mut self, pool: &QueryPool) { - self.c - .resolve_timing_query_data(&pool.heap, 0, pool.n_queries, &pool.buf.resource, 0); - } -} - -impl crate::backend::DescriptorSetBuilder for DescriptorSetBuilder { - fn add_buffers(&mut self, buffers: &[&Buffer]) { - for buf in buffers { - self.handles.push(buf.cpu_ref.as_ref().unwrap().handle()); - } - } - - fn add_images(&mut self, images: &[&Image]) { - for img in images { - self.handles.push(img.cpu_ref.as_ref().unwrap().handle()); - } - } - - fn add_textures(&mut self, images: &[&Image]) { - for img in images { - self.handles.push(img.cpu_ref.as_ref().unwrap().handle()); - } - } - - unsafe fn build( - self, - device: &Dx12Device, - _pipeline: &Pipeline, - ) -> Result { - let mut descriptor_pool = device.descriptor_pool.lock().unwrap(); - let n_descriptors = self.handles.len().try_into()?; - let gpu_ref = descriptor_pool.alloc_gpu(&device.device, n_descriptors)?; - gpu_ref.copy_descriptors(&device.device, &self.handles); - let heap = descriptor_pool.gpu_heap(&gpu_ref).to_owned(); - Ok(DescriptorSet { gpu_ref, heap }) - } -} - -impl MemoryArchitecture { - // See https://msdn.microsoft.com/de-de/library/windows/desktop/dn788678(v=vs.85).aspx - - fn page_property(&self, usage: BufferUsage) -> d3d12::D3D12_CPU_PAGE_PROPERTY { - if usage.contains(BufferUsage::MAP_READ) { - d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK - } else if usage.contains(BufferUsage::MAP_WRITE) { - if *self == MemoryArchitecture::CacheCoherentUMA { - d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_BACK - } else { - d3d12::D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE - } - } else { - d3d12::D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE - } - } - - fn memory_pool(&self, usage: BufferUsage) -> d3d12::D3D12_MEMORY_POOL { - if *self == MemoryArchitecture::NUMA - && !usage.intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE) - { - d3d12::D3D12_MEMORY_POOL_L1 - } else { - d3d12::D3D12_MEMORY_POOL_L0 - } - } -} - -fn resource_state_for_image_layout(layout: ImageLayout) -> d3d12::D3D12_RESOURCE_STATES { - match layout { - ImageLayout::Undefined => d3d12::D3D12_RESOURCE_STATE_COMMON, - ImageLayout::Present => d3d12::D3D12_RESOURCE_STATE_PRESENT, - ImageLayout::BlitSrc => d3d12::D3D12_RESOURCE_STATE_COPY_SOURCE, - ImageLayout::BlitDst => d3d12::D3D12_RESOURCE_STATE_COPY_DEST, - ImageLayout::General => d3d12::D3D12_RESOURCE_STATE_COMMON, - ImageLayout::ShaderRead => d3d12::D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, - } -} - -impl Dx12Swapchain { - pub unsafe fn next(&mut self) -> Result<(usize, Semaphore), Error> { - let idx = self.swapchain.get_current_back_buffer_index(); - Ok((idx as usize, Semaphore)) - } - - pub unsafe fn image(&self, idx: usize) -> Image { - let buffer = self.swapchain.get_buffer(idx as u32); - Image { - resource: buffer, - cpu_ref: None, - size: self.size, - } - } - - pub unsafe fn present( - &self, - _image_idx: usize, - _semaphores: &[&Semaphore], - ) -> Result { - self.swapchain.present(1, 0)?; - Ok(false) - } -} diff --git a/piet-gpu-hal/src/dx12/descriptor.rs b/piet-gpu-hal/src/dx12/descriptor.rs deleted file mode 100644 index de119e9..0000000 --- a/piet-gpu-hal/src/dx12/descriptor.rs +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright © 2021 piet-gpu developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those - -//! Descriptor management. - -use std::{ - convert::TryInto, - ops::Deref, - sync::{Arc, Mutex, Weak}, -}; - -use smallvec::SmallVec; -use winapi::um::d3d12::{ - D3D12_CPU_DESCRIPTOR_HANDLE, D3D12_DESCRIPTOR_HEAP_DESC, D3D12_DESCRIPTOR_HEAP_FLAG_NONE, - D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, - D3D12_GPU_DESCRIPTOR_HANDLE, -}; - -use crate::{bestfit::BestFit, Error}; - -use super::wrappers::{DescriptorHeap, Device}; - -const CPU_CHUNK_SIZE: u32 = 256; -const GPU_CHUNK_SIZE: u32 = 4096; - -#[derive(Default)] -pub struct DescriptorPool { - cpu_visible: Vec, - gpu_visible: Vec, - free_list: Arc>, -} - -#[derive(Default)] -pub struct DescriptorFreeList { - cpu_free: Vec>, - gpu_free: Vec, -} - -struct CpuHeap { - // Retained for lifetime reasons. - #[allow(unused)] - dx12_heap: DescriptorHeap, - cpu_handle: D3D12_CPU_DESCRIPTOR_HANDLE, - increment_size: u32, -} - -pub struct CpuHeapRef { - heap_ix: usize, - offset: u32, -} - -/// An owned reference to the CPU heap. -/// -/// When dropped, the corresponding heap range will be freed. -pub struct CpuHeapRefOwned { - heap_ref: CpuHeapRef, - handle: D3D12_CPU_DESCRIPTOR_HANDLE, - free_list: Weak>, -} - -/// A shader-visible descriptor heap. -struct GpuHeap { - dx12_heap: DescriptorHeap, - cpu_handle: D3D12_CPU_DESCRIPTOR_HANDLE, - gpu_handle: D3D12_GPU_DESCRIPTOR_HANDLE, - increment_size: u32, -} - -pub struct GpuHeapRef { - heap_ix: usize, - offset: u32, - n: u32, -} - -/// An owned reference to the GPU heap. -/// -/// When dropped, the corresponding heap range will be freed. -pub struct GpuHeapRefOwned { - heap_ref: GpuHeapRef, - cpu_handle: D3D12_CPU_DESCRIPTOR_HANDLE, - gpu_handle: D3D12_GPU_DESCRIPTOR_HANDLE, - increment_size: u32, - free_list: Weak>, -} - -impl DescriptorPool { - pub fn alloc_cpu(&mut self, device: &Device) -> Result { - let free_list = &self.free_list; - let mk_owned = |heap_ref, handle| CpuHeapRefOwned { - heap_ref, - handle, - free_list: Arc::downgrade(free_list), - }; - let mut free_list = free_list.lock().unwrap(); - for (heap_ix, free) in free_list.cpu_free.iter_mut().enumerate() { - if let Some(offset) = free.pop() { - let handle = self.cpu_visible[heap_ix].cpu_handle(offset); - return Ok(mk_owned(CpuHeapRef { heap_ix, offset }, handle)); - } - } - unsafe { - let heap_type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV; - let desc = D3D12_DESCRIPTOR_HEAP_DESC { - Type: heap_type, - NumDescriptors: CPU_CHUNK_SIZE, - Flags: D3D12_DESCRIPTOR_HEAP_FLAG_NONE, - NodeMask: 0, - }; - let dx12_heap = device.create_descriptor_heap(&desc)?; - let mut free = (0..CPU_CHUNK_SIZE).rev().collect::>(); - let offset = free.pop().unwrap(); - debug_assert_eq!(offset, 0); - let heap_ref = CpuHeapRef { - heap_ix: self.cpu_visible.len(), - offset, - }; - let cpu_handle = dx12_heap.get_cpu_descriptor_handle_for_heap_start(); - let increment_size = device.get_descriptor_increment_size(heap_type); - let heap = CpuHeap { - dx12_heap, - cpu_handle, - increment_size, - }; - self.cpu_visible.push(heap); - free_list.cpu_free.push(free); - Ok(mk_owned(heap_ref, cpu_handle)) - } - } - - pub fn cpu_handle(&self, cpu_ref: &CpuHeapRef) -> D3D12_CPU_DESCRIPTOR_HANDLE { - self.cpu_visible[cpu_ref.heap_ix].cpu_handle(cpu_ref.offset) - } - - pub fn alloc_gpu(&mut self, device: &Device, n: u32) -> Result { - let free_list = &self.free_list; - let heap_type = D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV; - let increment_size = unsafe { device.get_descriptor_increment_size(heap_type) }; - let mk_owned = |heap_ref, cpu_handle, gpu_handle| GpuHeapRefOwned { - heap_ref, - cpu_handle, - gpu_handle, - increment_size, - free_list: Arc::downgrade(free_list), - }; - let mut free_list = free_list.lock().unwrap(); - for (heap_ix, free) in free_list.gpu_free.iter_mut().enumerate() { - if let Some(offset) = free.alloc(n) { - let heap = &self.gpu_visible[heap_ix]; - let cpu_handle = heap.cpu_handle(offset); - let gpu_handle = heap.gpu_handle(offset); - return Ok(mk_owned( - GpuHeapRef { heap_ix, offset, n }, - cpu_handle, - gpu_handle, - )); - } - } - unsafe { - let size = n.max(GPU_CHUNK_SIZE).next_power_of_two(); - let desc = D3D12_DESCRIPTOR_HEAP_DESC { - Type: heap_type, - NumDescriptors: size, - Flags: D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE, - NodeMask: 0, - }; - let dx12_heap = device.create_descriptor_heap(&desc)?; - let heap_ix = self.gpu_visible.len(); - let mut free = BestFit::new(size); - let offset = free.alloc(n).unwrap(); - // We assume the first allocation is at 0, to avoid recomputing offsets. - debug_assert_eq!(offset, 0); - let cpu_handle = dx12_heap.get_cpu_descriptor_handle_for_heap_start(); - let gpu_handle = dx12_heap.get_gpu_descriptor_handle_for_heap_start(); - let increment_size = device.get_descriptor_increment_size(heap_type); - let heap = GpuHeap { - dx12_heap, - cpu_handle, - gpu_handle, - increment_size, - }; - self.gpu_visible.push(heap); - free_list.gpu_free.push(free); - Ok(mk_owned( - GpuHeapRef { heap_ix, offset, n }, - cpu_handle, - gpu_handle, - )) - } - } - - pub fn cpu_handle_of_gpu( - &self, - gpu_ref: &GpuHeapRef, - offset: u32, - ) -> D3D12_CPU_DESCRIPTOR_HANDLE { - debug_assert!(offset < gpu_ref.n); - let dx12_heap = &self.gpu_visible[gpu_ref.heap_ix]; - dx12_heap.cpu_handle(gpu_ref.offset + offset) - } - - pub fn gpu_heap(&self, gpu_ref: &GpuHeapRef) -> &DescriptorHeap { - &self.gpu_visible[gpu_ref.heap_ix].dx12_heap - } -} - -impl DescriptorFreeList { - fn free_cpu(&mut self, cpu_ref: &CpuHeapRef) { - self.cpu_free[cpu_ref.heap_ix].push(cpu_ref.offset); - } - - fn free_gpu(&mut self, gpu_ref: &GpuHeapRef) { - self.gpu_free[gpu_ref.heap_ix].free(gpu_ref.offset, gpu_ref.n); - } -} - -impl Drop for CpuHeapRefOwned { - fn drop(&mut self) { - if let Some(a) = self.free_list.upgrade() { - a.lock().unwrap().free_cpu(&self.heap_ref) - } - } -} - -impl CpuHeapRefOwned { - pub fn handle(&self) -> D3D12_CPU_DESCRIPTOR_HANDLE { - self.handle - } -} - -impl GpuHeapRefOwned { - pub fn gpu_handle(&self) -> D3D12_GPU_DESCRIPTOR_HANDLE { - self.gpu_handle - } - - pub unsafe fn copy_descriptors(&self, device: &Device, src: &[D3D12_CPU_DESCRIPTOR_HANDLE]) { - // TODO: optimize a bit (use simple variant where appropriate) - let n = src.len().try_into().unwrap(); - let sizes = (0..n).map(|_| 1).collect::>(); - device.copy_descriptors( - &[self.cpu_handle], - &[n], - src, - &sizes, - D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, - ); - } - - pub unsafe fn copy_one_descriptor( - &self, - device: &Device, - src: D3D12_CPU_DESCRIPTOR_HANDLE, - index: u32, - ) { - let mut dst = self.cpu_handle; - dst.ptr += (index * self.increment_size) as usize; - device.copy_one_descriptor(dst, src, D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV); - } -} - -impl Deref for CpuHeapRefOwned { - type Target = CpuHeapRef; - - fn deref(&self) -> &Self::Target { - &self.heap_ref - } -} - -impl Drop for GpuHeapRefOwned { - fn drop(&mut self) { - if let Some(a) = self.free_list.upgrade() { - a.lock().unwrap().free_gpu(&self.heap_ref) - } - } -} - -impl Deref for GpuHeapRefOwned { - type Target = GpuHeapRef; - - fn deref(&self) -> &Self::Target { - &self.heap_ref - } -} - -impl CpuHeap { - fn cpu_handle(&self, offset: u32) -> D3D12_CPU_DESCRIPTOR_HANDLE { - let mut handle = self.cpu_handle; - handle.ptr += (offset as usize) * (self.increment_size as usize); - handle - } -} - -impl GpuHeap { - fn cpu_handle(&self, offset: u32) -> D3D12_CPU_DESCRIPTOR_HANDLE { - let mut handle = self.cpu_handle; - handle.ptr += (offset as usize) * (self.increment_size as usize); - handle - } - - fn gpu_handle(&self, offset: u32) -> D3D12_GPU_DESCRIPTOR_HANDLE { - let mut handle = self.gpu_handle; - handle.ptr += (offset as u64) * (self.increment_size as u64); - handle - } -} diff --git a/piet-gpu-hal/src/dx12/error.rs b/piet-gpu-hal/src/dx12/error.rs deleted file mode 100644 index 6208fde..0000000 --- a/piet-gpu-hal/src/dx12/error.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright © 2019 piet-gpu developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This is a Windows-specific error mechanism (adapted from piet-dx12), -//! but we should adapt it to be more general. - -use winapi::shared::winerror; - -pub enum Error { - Hresult(winerror::HRESULT), - ExplainedHr(&'static str, winerror::HRESULT), -} - -impl std::fmt::Debug for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Error::Hresult(hr) => write!(f, "hresult {:x}", hr), - Error::ExplainedHr(exp, hr) => { - write!(f, "{}: ", exp)?; - write_hr(f, *hr) - } - } - } -} - -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - std::fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for Error {} - -/// Strings for errors we're likely to see. -/// -/// See https://docs.microsoft.com/en-us/windows/win32/direct3ddxgi/dxgi-error -fn err_str_for_hr(hr: winerror::HRESULT) -> Option<&'static str> { - Some(match hr as u32 { - 0x80004005 => "E_FAIL", - 0x80070057 => "E_INVALIDARG", - 0x887a0001 => "DXGI_ERROR_INVALID_CALL", - 0x887a0002 => "DXGI_ERROR_NOT_FOUND", - 0x887a0004 => "DXGI_ERROR_UNSUPPORTED", - 0x887a0005 => "DXGI_ERROR_DEVICE_REMOVED", - 0x887a0006 => "DXGI_ERROR_DEVICE_HUNG", - _ => return None, - }) -} - -fn write_hr(f: &mut std::fmt::Formatter, hr: winerror::HRESULT) -> std::fmt::Result { - if let Some(err_str) = err_str_for_hr(hr) { - write!(f, "{:x} ({})", hr, err_str) - } else { - write!(f, "{:x}", hr) - } -} - -pub type D3DResult = (T, winerror::HRESULT); - -pub fn error_if_failed_else_value(result: D3DResult) -> Result { - let (result_value, hresult) = result; - - if winerror::SUCCEEDED(hresult) { - Ok(result_value) - } else { - Err(Error::Hresult(hresult)) - } -} - -pub fn error_if_failed_else_unit(hresult: winerror::HRESULT) -> Result<(), Error> { - error_if_failed_else_value(((), hresult)) -} - -pub fn explain_error(hresult: winerror::HRESULT, explanation: &'static str) -> Result<(), Error> { - if winerror::SUCCEEDED(hresult) { - Ok(()) - } else { - Err(Error::ExplainedHr(explanation, hresult)) - } -} diff --git a/piet-gpu-hal/src/dx12/wrappers.rs b/piet-gpu-hal/src/dx12/wrappers.rs deleted file mode 100644 index a1005e7..0000000 --- a/piet-gpu-hal/src/dx12/wrappers.rs +++ /dev/null @@ -1,1158 +0,0 @@ -// Copyright © 2019 piet-gpu developers. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use crate::dx12::error::{self, error_if_failed_else_unit, explain_error, Error}; -use crate::MapMode; -use smallvec::SmallVec; -use std::convert::{TryFrom, TryInto}; -use std::sync::atomic::{AtomicPtr, Ordering}; -use std::{ffi, mem, ptr}; -use winapi::shared::{dxgi, dxgi1_2, dxgi1_3, dxgi1_4, dxgiformat, dxgitype, minwindef, windef}; -use winapi::um::d3dcommon::ID3DBlob; -use winapi::um::{ - d3d12, d3d12sdklayers, d3dcommon, d3dcompiler, dxgidebug, handleapi, synchapi, winnt, -}; -use winapi::Interface; -use wio::com::ComPtr; - -// everything is ripped from d3d12-rs, but wio::com::ComPtr, and winapi are used more directly - -#[derive(Clone)] -pub struct Heap(pub ComPtr); - -pub struct Resource { - // Note: the use of AtomicPtr is to support explicit destruction, - // similar to Vulkan. - ptr: AtomicPtr, -} - -#[derive(Clone)] -pub struct Adapter1(pub ComPtr); -#[derive(Clone)] -pub struct Factory2(pub ComPtr); -#[derive(Clone)] -pub struct Factory4(pub ComPtr); -#[derive(Clone)] -pub struct SwapChain3(pub ComPtr); - -#[derive(Clone)] -pub struct Device(pub ComPtr); - -#[derive(Clone)] -pub struct CommandQueue(pub ComPtr); - -#[derive(Clone)] -pub struct CommandAllocator(pub ComPtr); - -pub type CpuDescriptor = d3d12::D3D12_CPU_DESCRIPTOR_HANDLE; -pub type GpuDescriptor = d3d12::D3D12_GPU_DESCRIPTOR_HANDLE; - -#[derive(Clone)] -pub struct DescriptorHeap(ComPtr); - -#[derive(Clone)] -pub struct RootSignature(pub ComPtr); - -#[derive(Clone)] -pub struct CommandSignature(pub ComPtr); -#[derive(Clone)] -pub struct GraphicsCommandList(pub ComPtr); - -pub struct Event(pub winnt::HANDLE); -#[derive(Clone)] -pub struct Fence(pub ComPtr); - -#[derive(Clone)] -pub struct PipelineState(pub ComPtr); - -#[derive(Clone)] -pub struct CachedPSO(d3d12::D3D12_CACHED_PIPELINE_STATE); - -#[derive(Clone)] -pub struct Blob(pub ComPtr); - -#[derive(Clone)] -pub struct ShaderByteCode { - pub bytecode: d3d12::D3D12_SHADER_BYTECODE, -} - -#[derive(Clone)] -pub struct QueryHeap(pub ComPtr); - -impl Resource { - pub unsafe fn new(ptr: *mut d3d12::ID3D12Resource) -> Resource { - Resource { - ptr: AtomicPtr::new(ptr), - } - } - - pub fn get(&self) -> *const d3d12::ID3D12Resource { - self.get_mut() - } - - pub fn get_mut(&self) -> *mut d3d12::ID3D12Resource { - self.ptr.load(Ordering::Relaxed) - } - - // Safety: call only single-threaded. - pub unsafe fn destroy(&self) { - (*self.get()).Release(); - self.ptr.store(ptr::null_mut(), Ordering::Relaxed); - } - - pub unsafe fn map_buffer( - &self, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<*mut u8, Error> { - let mut mapped_memory: *mut u8 = ptr::null_mut(); - let (begin, end) = match mode { - MapMode::Read => (offset as usize, (offset + size) as usize), - MapMode::Write => (0, 0), - }; - let range = d3d12::D3D12_RANGE { - Begin: begin, - End: end, - }; - explain_error( - (*self.get()).Map(0, &range, &mut mapped_memory as *mut _ as *mut _), - "could not map GPU mem to CPU mem", - )?; - Ok(mapped_memory.add(offset as usize)) - } - - pub unsafe fn unmap_buffer(&self, offset: u64, size: u64, mode: MapMode) -> Result<(), Error> { - let (begin, end) = match mode { - MapMode::Read => (0, 0), - MapMode::Write => (offset as usize, (offset + size) as usize), - }; - let range = d3d12::D3D12_RANGE { - Begin: begin, - End: end, - }; - (*self.get()).Unmap(0, &range); - Ok(()) - } -} - -impl Drop for Resource { - fn drop(&mut self) { - unsafe { - let ptr = self.get(); - if !ptr.is_null() { - (*ptr).Release(); - } - } - } -} - -impl Clone for Resource { - fn clone(&self) -> Self { - unsafe { - let ptr = self.get_mut(); - (*ptr).AddRef(); - Resource { - ptr: AtomicPtr::new(ptr), - } - } - } -} - -impl Factory4 { - pub unsafe fn create(flags: minwindef::UINT) -> Result { - let mut factory = ptr::null_mut(); - - explain_error( - dxgi1_3::CreateDXGIFactory2( - flags, - &dxgi1_4::IDXGIFactory4::uuidof(), - &mut factory as *mut _ as *mut _, - ), - "error creating DXGI factory", - )?; - - Ok(Factory4(ComPtr::from_raw(factory))) - } - - pub unsafe fn enumerate_adapters(&self, id: u32) -> Result { - let mut adapter = ptr::null_mut(); - error_if_failed_else_unit(self.0.EnumAdapters1(id, &mut adapter))?; - let mut desc = mem::zeroed(); - (*adapter).GetDesc(&mut desc); - //println!("desc: {:?}", desc.Description); - Ok(Adapter1(ComPtr::from_raw(adapter))) - } - - pub unsafe fn create_swapchain_for_hwnd( - &self, - command_queue: &CommandQueue, - hwnd: windef::HWND, - desc: dxgi1_2::DXGI_SWAP_CHAIN_DESC1, - ) -> Result { - let mut swap_chain = ptr::null_mut(); - explain_error( - self.0.CreateSwapChainForHwnd( - command_queue.0.as_raw() as *mut _, - hwnd, - &desc, - ptr::null(), - ptr::null_mut(), - &mut swap_chain as *mut _ as *mut _, - ), - "could not creation swapchain for hwnd", - )?; - - Ok(SwapChain3(ComPtr::from_raw(swap_chain))) - } -} - -impl CommandQueue { - pub unsafe fn signal(&self, fence: &Fence, value: u64) -> Result<(), Error> { - explain_error( - self.0.Signal(fence.0.as_raw(), value), - "error setting signal", - ) - } - - pub unsafe fn execute_command_lists(&self, command_lists: &[*mut d3d12::ID3D12CommandList]) { - let num_command_lists = command_lists.len().try_into().unwrap(); - self.0 - .ExecuteCommandLists(num_command_lists, command_lists.as_ptr()); - } - - pub unsafe fn get_timestamp_frequency(&self) -> Result { - let mut result: u64 = 0; - - explain_error( - self.0.GetTimestampFrequency(&mut result), - "could not get timestamp frequency", - )?; - - Ok(result) - } -} - -impl SwapChain3 { - pub unsafe fn get_buffer(&self, id: u32) -> Resource { - let mut resource = ptr::null_mut(); - error::error_if_failed_else_unit(self.0.GetBuffer( - id, - &d3d12::ID3D12Resource::uuidof(), - &mut resource as *mut _ as *mut _, - )) - .expect("SwapChain3 could not get buffer"); - - Resource::new(resource) - } - - pub unsafe fn get_current_back_buffer_index(&self) -> u32 { - self.0.GetCurrentBackBufferIndex() - } - - pub unsafe fn present(&self, interval: u32, flags: u32) -> Result<(), Error> { - error::error_if_failed_else_unit(self.0.Present1( - interval, - flags, - &dxgi1_2::DXGI_PRESENT_PARAMETERS { ..mem::zeroed() } as *const _, - )) - } -} - -impl Blob { - #[allow(unused)] - pub unsafe fn print_to_console(blob: &Blob) { - println!("==SHADER COMPILE MESSAGES=="); - let message = { - let pointer = blob.0.GetBufferPointer(); - let size = blob.0.GetBufferSize(); - let slice = std::slice::from_raw_parts(pointer as *const u8, size as usize); - String::from_utf8_lossy(slice).into_owned() - }; - println!("{}", message); - println!("==========================="); - } -} - -impl Device { - pub unsafe fn create_device(factory4: &Factory4) -> Result { - let mut id = 0; - - loop { - // This always returns DXGI_ERROR_NOT_FOUND if no suitable adapter is found. - // Might be slightly more useful to retain the error from the attempt to create. - let adapter = factory4.enumerate_adapters(id)?; - - if let Ok(device) = - Self::create_using_adapter(&adapter, d3dcommon::D3D_FEATURE_LEVEL_12_0) - { - return Ok(device); - } - id += 1; - } - } - - pub unsafe fn create_using_adapter( - adapter: &Adapter1, - feature_level: d3dcommon::D3D_FEATURE_LEVEL, - ) -> Result { - let mut device = ptr::null_mut(); - error_if_failed_else_unit(d3d12::D3D12CreateDevice( - adapter.0.as_raw() as *mut _, - feature_level, - &d3d12::ID3D12Device::uuidof(), - &mut device as *mut _ as *mut _, - ))?; - - Ok(Device(ComPtr::from_raw(device))) - } - - pub unsafe fn create_command_allocator( - &self, - list_type: d3d12::D3D12_COMMAND_LIST_TYPE, - ) -> Result { - let mut allocator = ptr::null_mut(); - explain_error( - self.0.CreateCommandAllocator( - list_type, - &d3d12::ID3D12CommandAllocator::uuidof(), - &mut allocator as *mut _ as *mut _, - ), - "device could not create command allocator", - )?; - - Ok(CommandAllocator(ComPtr::from_raw(allocator))) - } - - pub unsafe fn create_command_queue( - &self, - list_type: d3d12::D3D12_COMMAND_LIST_TYPE, - priority: minwindef::INT, - flags: d3d12::D3D12_COMMAND_QUEUE_FLAGS, - node_mask: minwindef::UINT, - ) -> Result { - let desc = d3d12::D3D12_COMMAND_QUEUE_DESC { - Type: list_type, - Priority: priority, - Flags: flags, - NodeMask: node_mask, - }; - - let mut cmd_q = ptr::null_mut(); - explain_error( - self.0.CreateCommandQueue( - &desc, - &d3d12::ID3D12CommandQueue::uuidof(), - &mut cmd_q as *mut _ as *mut _, - ), - "device could not create command queue", - )?; - - Ok(CommandQueue(ComPtr::from_raw(cmd_q))) - } - - pub unsafe fn create_descriptor_heap( - &self, - heap_description: &d3d12::D3D12_DESCRIPTOR_HEAP_DESC, - ) -> Result { - let mut heap = ptr::null_mut(); - explain_error( - self.0.CreateDescriptorHeap( - heap_description, - &d3d12::ID3D12DescriptorHeap::uuidof(), - &mut heap as *mut _ as *mut _, - ), - "device could not create descriptor heap", - )?; - - Ok(DescriptorHeap(ComPtr::from_raw(heap))) - } - - pub unsafe fn get_descriptor_increment_size( - &self, - heap_type: d3d12::D3D12_DESCRIPTOR_HEAP_TYPE, - ) -> u32 { - self.0.GetDescriptorHandleIncrementSize(heap_type) - } - - pub unsafe fn copy_descriptors( - &self, - dst_starts: &[d3d12::D3D12_CPU_DESCRIPTOR_HANDLE], - dst_sizes: &[u32], - src_starts: &[d3d12::D3D12_CPU_DESCRIPTOR_HANDLE], - src_sizes: &[u32], - descriptor_heap_type: d3d12::D3D12_DESCRIPTOR_HEAP_TYPE, - ) { - debug_assert_eq!(dst_starts.len(), dst_sizes.len()); - debug_assert_eq!(src_starts.len(), src_sizes.len()); - debug_assert_eq!( - src_sizes.iter().copied().sum::(), - dst_sizes.iter().copied().sum() - ); - self.0.CopyDescriptors( - dst_starts.len().try_into().unwrap(), - dst_starts.as_ptr(), - dst_sizes.as_ptr(), - src_starts.len().try_into().unwrap(), - src_starts.as_ptr(), - src_sizes.as_ptr(), - descriptor_heap_type, - ); - } - - pub unsafe fn copy_one_descriptor( - &self, - dst: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, - src: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, - descriptor_heap_type: d3d12::D3D12_DESCRIPTOR_HEAP_TYPE, - ) { - self.0 - .CopyDescriptorsSimple(1, dst, src, descriptor_heap_type); - } - - pub unsafe fn create_compute_pipeline_state( - &self, - compute_pipeline_desc: &d3d12::D3D12_COMPUTE_PIPELINE_STATE_DESC, - ) -> Result { - let mut pipeline_state = ptr::null_mut(); - - explain_error( - self.0.CreateComputePipelineState( - compute_pipeline_desc as *const _, - &d3d12::ID3D12PipelineState::uuidof(), - &mut pipeline_state as *mut _ as *mut _, - ), - "device could not create compute pipeline state", - )?; - - Ok(PipelineState(ComPtr::from_raw(pipeline_state))) - } - - pub unsafe fn create_root_signature( - &self, - node_mask: minwindef::UINT, - blob: Blob, - ) -> Result { - let mut signature = ptr::null_mut(); - explain_error( - self.0.CreateRootSignature( - node_mask, - blob.0.GetBufferPointer(), - blob.0.GetBufferSize(), - &d3d12::ID3D12RootSignature::uuidof(), - &mut signature as *mut _ as *mut _, - ), - "device could not create root signature", - )?; - - Ok(RootSignature(ComPtr::from_raw(signature))) - } - - pub unsafe fn create_graphics_command_list( - &self, - list_type: d3d12::D3D12_COMMAND_LIST_TYPE, - allocator: &CommandAllocator, - initial_ps: Option<&PipelineState>, - node_mask: minwindef::UINT, - ) -> Result { - let mut command_list = ptr::null_mut(); - let p_initial_state = initial_ps.map(|p| p.0.as_raw()).unwrap_or(ptr::null_mut()); - explain_error( - self.0.CreateCommandList( - node_mask, - list_type, - allocator.0.as_raw(), - p_initial_state, - &d3d12::ID3D12GraphicsCommandList::uuidof(), - &mut command_list as *mut _ as *mut _, - ), - "device could not create graphics command list", - )?; - - Ok(GraphicsCommandList(ComPtr::from_raw(command_list))) - } - - pub unsafe fn create_byte_addressed_buffer_unordered_access_view( - &self, - resource: &Resource, - descriptor: CpuDescriptor, - first_element: u64, - num_elements: u32, - ) { - // shouldn't flags be dxgiformat::DXGI_FORMAT_R32_TYPELESS? - let mut uav_desc = d3d12::D3D12_UNORDERED_ACCESS_VIEW_DESC { - Format: dxgiformat::DXGI_FORMAT_R32_TYPELESS, - ViewDimension: d3d12::D3D12_UAV_DIMENSION_BUFFER, - ..mem::zeroed() - }; - *uav_desc.u.Buffer_mut() = d3d12::D3D12_BUFFER_UAV { - FirstElement: first_element, - NumElements: num_elements, - // shouldn't StructureByteStride be 0? - StructureByteStride: 0, - CounterOffsetInBytes: 0, - // shouldn't flags be d3d12::D3D12_BUFFER_UAV_FLAG_RAW? - Flags: d3d12::D3D12_BUFFER_UAV_FLAG_RAW, - }; - self.0 - .CreateUnorderedAccessView(resource.get_mut(), ptr::null_mut(), &uav_desc, descriptor) - } - - pub unsafe fn create_unordered_access_view( - &self, - resource: &Resource, - descriptor: CpuDescriptor, - ) { - self.0.CreateUnorderedAccessView( - resource.get_mut(), - ptr::null_mut(), - ptr::null(), - descriptor, - ) - } - - pub unsafe fn create_fence(&self, initial: u64) -> Result { - let mut fence = ptr::null_mut(); - explain_error( - self.0.CreateFence( - initial, - d3d12::D3D12_FENCE_FLAG_NONE, - &d3d12::ID3D12Fence::uuidof(), - &mut fence as *mut _ as *mut _, - ), - "device could not create fence", - )?; - - Ok(Fence(ComPtr::from_raw(fence))) - } - - pub unsafe fn create_committed_resource( - &self, - heap_properties: &d3d12::D3D12_HEAP_PROPERTIES, - flags: d3d12::D3D12_HEAP_FLAGS, - resource_description: &d3d12::D3D12_RESOURCE_DESC, - initial_resource_state: d3d12::D3D12_RESOURCE_STATES, - optimized_clear_value: *const d3d12::D3D12_CLEAR_VALUE, - ) -> Result { - let mut resource = ptr::null_mut(); - - explain_error( - self.0.CreateCommittedResource( - heap_properties as *const _, - flags, - resource_description as *const _, - initial_resource_state, - optimized_clear_value, - &d3d12::ID3D12Resource::uuidof(), - &mut resource as *mut _ as *mut _, - ), - "device could not create committed resource", - )?; - - Ok(Resource::new(resource)) - } - - pub unsafe fn create_query_heap( - &self, - heap_type: d3d12::D3D12_QUERY_HEAP_TYPE, - num_expected_queries: u32, - ) -> Result { - let query_heap_desc = d3d12::D3D12_QUERY_HEAP_DESC { - Type: heap_type, - Count: num_expected_queries, - NodeMask: 0, - }; - - let mut query_heap = ptr::null_mut(); - - explain_error( - self.0.CreateQueryHeap( - &query_heap_desc as *const _, - &d3d12::ID3D12QueryHeap::uuidof(), - &mut query_heap as *mut _ as *mut _, - ), - "could not create query heap", - )?; - - Ok(QueryHeap(ComPtr::from_raw(query_heap))) - } - - pub unsafe fn create_buffer( - &self, - buffer_size_in_bytes: u64, - heap_type: d3d12::D3D12_HEAP_TYPE, - cpu_page: d3d12::D3D12_CPU_PAGE_PROPERTY, - memory_pool_preference: d3d12::D3D12_MEMORY_POOL, - init_resource_state: d3d12::D3D12_RESOURCE_STATES, - resource_flags: d3d12::D3D12_RESOURCE_FLAGS, - ) -> Result { - let heap_properties = d3d12::D3D12_HEAP_PROPERTIES { - Type: heap_type, - CPUPageProperty: cpu_page, - MemoryPoolPreference: memory_pool_preference, - //we don't care about multi-adapter operation, so these next two will be zero - CreationNodeMask: 0, - VisibleNodeMask: 0, - }; - let resource_description = d3d12::D3D12_RESOURCE_DESC { - Dimension: d3d12::D3D12_RESOURCE_DIMENSION_BUFFER, - Width: buffer_size_in_bytes, - Height: 1, - DepthOrArraySize: 1, - MipLevels: 1, - SampleDesc: dxgitype::DXGI_SAMPLE_DESC { - Count: 1, - Quality: 0, - }, - Layout: d3d12::D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - Flags: resource_flags, - ..mem::zeroed() - }; - - let buffer = self.create_committed_resource( - &heap_properties, - d3d12::D3D12_HEAP_FLAG_NONE, - &resource_description, - init_resource_state, - ptr::null(), - )?; - - Ok(buffer) - } - - pub unsafe fn create_texture2d_buffer( - &self, - width: u64, - height: u32, - format: dxgiformat::DXGI_FORMAT, - allow_unordered_access: bool, - ) -> Result { - // Images are always created device-local. - let heap_properties = d3d12::D3D12_HEAP_PROPERTIES { - Type: d3d12::D3D12_HEAP_TYPE_DEFAULT, - CPUPageProperty: d3d12::D3D12_CPU_PAGE_PROPERTY_UNKNOWN, - MemoryPoolPreference: d3d12::D3D12_MEMORY_POOL_UNKNOWN, - //we don't care about multi-adapter operation, so these next two will be zero - CreationNodeMask: 0, - VisibleNodeMask: 0, - }; - - let (flags, initial_resource_state) = { - if allow_unordered_access { - ( - d3d12::D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS, - d3d12::D3D12_RESOURCE_STATE_UNORDERED_ACCESS, - ) - } else { - ( - d3d12::D3D12_RESOURCE_FLAG_NONE, - d3d12::D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE, - ) - } - }; - - let resource_description = d3d12::D3D12_RESOURCE_DESC { - Dimension: d3d12::D3D12_RESOURCE_DIMENSION_TEXTURE2D, - Width: width, - Height: height, - DepthOrArraySize: 1, - MipLevels: 1, - SampleDesc: dxgitype::DXGI_SAMPLE_DESC { - Count: 1, - Quality: 0, - }, - Layout: d3d12::D3D12_TEXTURE_LAYOUT_UNKNOWN, - Flags: flags, - Format: format, - ..mem::zeroed() - }; - - let buffer = self.create_committed_resource( - &heap_properties, - //TODO: is this heap flag ok? - d3d12::D3D12_HEAP_FLAG_NONE, - &resource_description, - initial_resource_state, - ptr::null(), - )?; - - Ok(buffer) - } - - pub unsafe fn get_features_architecture( - &self, - ) -> Result { - let mut features_architecture = mem::zeroed(); - explain_error( - self.0.CheckFeatureSupport( - d3d12::D3D12_FEATURE_ARCHITECTURE, - &mut features_architecture as *mut _ as *mut _, - mem::size_of::() as u32, - ), - "error querying feature architecture", - )?; - Ok(features_architecture) - } -} - -impl DescriptorHeap { - pub unsafe fn get_cpu_descriptor_handle_for_heap_start(&self) -> CpuDescriptor { - self.0.GetCPUDescriptorHandleForHeapStart() - } - - pub unsafe fn get_gpu_descriptor_handle_for_heap_start(&self) -> GpuDescriptor { - self.0.GetGPUDescriptorHandleForHeapStart() - } -} - -impl RootSignature { - pub unsafe fn serialize_description( - desc: &d3d12::D3D12_ROOT_SIGNATURE_DESC, - version: d3d12::D3D_ROOT_SIGNATURE_VERSION, - ) -> Result { - let mut blob = ptr::null_mut(); - let mut error_blob_ptr = ptr::null_mut(); - - let hresult = - d3d12::D3D12SerializeRootSignature(desc, version, &mut blob, &mut error_blob_ptr); - - #[cfg(debug_assertions)] - { - let error_blob = if error_blob_ptr.is_null() { - None - } else { - Some(Blob(ComPtr::from_raw(error_blob_ptr))) - }; - if let Some(error_blob) = &error_blob { - Blob::print_to_console(error_blob); - } - } - - explain_error(hresult, "could not serialize root signature description")?; - - Ok(Blob(ComPtr::from_raw(blob))) - } -} - -impl ShaderByteCode { - // `blob` may not be null. - // TODO: this is not super elegant, maybe want to move the get - // operations closer to where they're used. - #[allow(unused)] - pub unsafe fn from_blob(blob: Blob) -> ShaderByteCode { - ShaderByteCode { - bytecode: d3d12::D3D12_SHADER_BYTECODE { - BytecodeLength: blob.0.GetBufferSize(), - pShaderBytecode: blob.0.GetBufferPointer(), - }, - } - } - - /// Compile a shader from raw HLSL. - /// - /// * `target`: example format: `ps_5_1`. - #[allow(unused)] - pub unsafe fn compile( - source: &str, - target: &str, - entry: &str, - flags: minwindef::DWORD, - ) -> Result { - let mut shader_blob_ptr: *mut ID3DBlob = ptr::null_mut(); - //TODO: use error blob properly - let mut error_blob_ptr: *mut ID3DBlob = ptr::null_mut(); - - let target = ffi::CString::new(target) - .expect("could not convert target format string into ffi::CString"); - let entry = ffi::CString::new(entry) - .expect("could not convert entry name String into ffi::CString"); - - let hresult = d3dcompiler::D3DCompile( - source.as_ptr() as *const _, - source.len(), - ptr::null(), - ptr::null(), - d3dcompiler::D3D_COMPILE_STANDARD_FILE_INCLUDE, - entry.as_ptr(), - target.as_ptr(), - flags, - 0, - &mut shader_blob_ptr, - &mut error_blob_ptr, - ); - - let error_blob = if error_blob_ptr.is_null() { - None - } else { - Some(Blob(ComPtr::from_raw(error_blob_ptr))) - }; - #[cfg(debug_assertions)] - { - if let Some(error_blob) = &error_blob { - Blob::print_to_console(error_blob); - } - } - - // TODO: we can put the shader compilation error into the returned error. - explain_error(hresult, "shader compilation failed")?; - - Ok(Blob(ComPtr::from_raw(shader_blob_ptr))) - } - - /// Create bytecode from a slice. - /// - /// # Safety - /// - /// This call elides the lifetime from the slice. The caller is responsible - /// for making sure the reference remains valid for the lifetime of this - /// object. - #[allow(unused)] - pub unsafe fn from_slice(bytecode: &[u8]) -> ShaderByteCode { - ShaderByteCode { - bytecode: d3d12::D3D12_SHADER_BYTECODE { - BytecodeLength: bytecode.len(), - pShaderBytecode: bytecode.as_ptr() as *const _, - }, - } - } -} - -impl Fence { - pub unsafe fn set_event_on_completion(&self, event: &Event, value: u64) -> Result<(), Error> { - explain_error( - self.0.SetEventOnCompletion(value, event.0), - "error setting event completion", - ) - } - - pub unsafe fn get_value(&self) -> u64 { - self.0.GetCompletedValue() - } -} - -impl Event { - pub unsafe fn create(manual_reset: bool, initial_state: bool) -> Result { - let handle = synchapi::CreateEventA( - ptr::null_mut(), - manual_reset as _, - initial_state as _, - ptr::null(), - ); - if handle.is_null() { - // TODO: should probably call GetLastError here - Err(Error::Hresult(-1)) - } else { - Ok(Event(handle)) - } - } - - /// Wait for the event, or a timeout. - /// - /// If the timeout is `winapi::um::winbase::INFINITE`, it will wait until the - /// event is signaled. - /// - /// The return value is defined here: - /// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-waitforsingleobject - pub unsafe fn wait(&self, timeout_ms: u32) -> u32 { - synchapi::WaitForSingleObject(self.0, timeout_ms) - } -} - -impl Drop for Event { - fn drop(&mut self) { - unsafe { - handleapi::CloseHandle(self.0); - } - } -} - -impl CommandAllocator { - pub unsafe fn reset(&self) -> Result<(), Error> { - error::error_if_failed_else_unit(self.0.Reset()) - } -} - -impl GraphicsCommandList { - pub unsafe fn as_raw_command_list(&self) -> *mut d3d12::ID3D12CommandList { - self.0.as_raw() as *mut d3d12::ID3D12CommandList - } - - pub unsafe fn close(&self) -> Result<(), Error> { - explain_error(self.0.Close(), "error closing command list") - } - - pub unsafe fn reset( - &self, - allocator: &CommandAllocator, - initial_pso: Option<&PipelineState>, - ) -> Result<(), Error> { - let p_initial_state = initial_pso.map(|p| p.0.as_raw()).unwrap_or(ptr::null_mut()); - error::error_if_failed_else_unit(self.0.Reset(allocator.0.as_raw(), p_initial_state)) - } - - pub unsafe fn set_compute_pipeline_root_signature(&self, signature: &RootSignature) { - self.0.SetComputeRootSignature(signature.0.as_raw()); - } - - pub unsafe fn resource_barrier(&self, resource_barriers: &[d3d12::D3D12_RESOURCE_BARRIER]) { - self.0.ResourceBarrier( - resource_barriers - .len() - .try_into() - .expect("Waaaaaay too many barriers"), - resource_barriers.as_ptr(), - ); - } - - pub unsafe fn dispatch(&self, count_x: u32, count_y: u32, count_z: u32) { - self.0.Dispatch(count_x, count_y, count_z); - } - - pub unsafe fn set_pipeline_state(&self, pipeline_state: &PipelineState) { - self.0.SetPipelineState(pipeline_state.0.as_raw()); - } - - pub unsafe fn set_compute_root_descriptor_table( - &self, - root_parameter_index: u32, - base_descriptor: d3d12::D3D12_GPU_DESCRIPTOR_HANDLE, - ) { - self.0 - .SetComputeRootDescriptorTable(root_parameter_index, base_descriptor); - } - - pub unsafe fn set_descriptor_heaps(&self, descriptor_heaps: &[&DescriptorHeap]) { - let mut descriptor_heap_pointers: SmallVec<[_; 4]> = - descriptor_heaps.iter().map(|dh| dh.0.as_raw()).collect(); - self.0.SetDescriptorHeaps( - u32::try_from(descriptor_heap_pointers.len()) - .expect("could not safely convert descriptor_heap_pointers.len() into u32"), - descriptor_heap_pointers.as_mut_ptr(), - ); - } - - pub unsafe fn end_timing_query(&self, query_heap: &QueryHeap, index: u32) { - self.0.EndQuery( - query_heap.0.as_raw(), - d3d12::D3D12_QUERY_TYPE_TIMESTAMP, - index, - ); - } - - pub unsafe fn resolve_timing_query_data( - &self, - query_heap: &QueryHeap, - start_index: u32, - num_queries: u32, - destination_buffer: &Resource, - aligned_destination_buffer_offset: u64, - ) { - self.0.ResolveQueryData( - query_heap.0.as_raw() as *mut _, - d3d12::D3D12_QUERY_TYPE_TIMESTAMP, - start_index, - num_queries, - destination_buffer.get_mut(), - aligned_destination_buffer_offset, - ); - } - - pub unsafe fn clear_uav( - &self, - gpu_handle: d3d12::D3D12_GPU_DESCRIPTOR_HANDLE, - cpu_handle: d3d12::D3D12_CPU_DESCRIPTOR_HANDLE, - resource: &Resource, - value: u32, - size: Option, - ) { - // In testing, only the first value seems to be used, but just in case... - let values = [value, value, value, value]; - let mut rect = d3d12::D3D12_RECT { - left: 0, - right: 0, - top: 0, - bottom: 1, - }; - let (num_rects, p_rects) = if let Some(size) = size { - rect.right = (size / 4).try_into().unwrap(); - (1, &rect as *const _) - } else { - (0, std::ptr::null()) - }; - self.0.ClearUnorderedAccessViewUint( - gpu_handle, - cpu_handle, - resource.get_mut(), - &values, - num_rects, - p_rects, - ); - } - - /// Copy an entire resource (buffer or image) - pub unsafe fn copy_resource(&self, src: &Resource, dst: &Resource) { - self.0.CopyResource(dst.get_mut(), src.get_mut()); - } - - pub unsafe fn copy_buffer( - &self, - dst_buf: &Resource, - dst_offset: u64, - src_buf: &Resource, - src_offset: u64, - size: u64, - ) { - self.0.CopyBufferRegion( - dst_buf.get_mut(), - dst_offset, - src_buf.get_mut(), - src_offset, - size, - ); - } - - pub unsafe fn copy_buffer_to_texture( - &self, - buffer: &Resource, - texture: &Resource, - width: u32, - height: u32, - ) { - let mut src = d3d12::D3D12_TEXTURE_COPY_LOCATION { - pResource: buffer.get_mut(), - Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - ..mem::zeroed() - }; - let row_pitch = width * 4; - assert!( - row_pitch % d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT == 0, - "TODO: handle unaligned row pitch" - ); - let footprint = d3d12::D3D12_PLACED_SUBRESOURCE_FOOTPRINT { - Offset: 0, - Footprint: d3d12::D3D12_SUBRESOURCE_FOOTPRINT { - Format: dxgiformat::DXGI_FORMAT_R8G8B8A8_UNORM, - Width: width, - Height: height, - Depth: 1, - RowPitch: row_pitch, - }, - }; - *src.u.PlacedFootprint_mut() = footprint; - - let mut dst = d3d12::D3D12_TEXTURE_COPY_LOCATION { - pResource: texture.get_mut(), - Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - ..mem::zeroed() - }; - *dst.u.SubresourceIndex_mut() = 0; - - self.0.CopyTextureRegion(&dst, 0, 0, 0, &src, ptr::null()); - } - - pub unsafe fn copy_texture_to_buffer( - &self, - texture: &Resource, - buffer: &Resource, - width: u32, - height: u32, - ) { - let mut src = d3d12::D3D12_TEXTURE_COPY_LOCATION { - pResource: texture.get_mut(), - Type: d3d12::D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX, - ..mem::zeroed() - }; - *src.u.SubresourceIndex_mut() = 0; - - let mut dst = d3d12::D3D12_TEXTURE_COPY_LOCATION { - pResource: buffer.get_mut(), - Type: d3d12::D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT, - ..mem::zeroed() - }; - let row_pitch = width * 4; - assert!( - row_pitch % d3d12::D3D12_TEXTURE_DATA_PITCH_ALIGNMENT == 0, - "TODO: handle unaligned row pitch" - ); - let footprint = d3d12::D3D12_PLACED_SUBRESOURCE_FOOTPRINT { - Offset: 0, - Footprint: d3d12::D3D12_SUBRESOURCE_FOOTPRINT { - Format: dxgiformat::DXGI_FORMAT_R8G8B8A8_UNORM, - Width: width, - Height: height, - Depth: 1, - RowPitch: row_pitch, - }, - }; - *dst.u.PlacedFootprint_mut() = footprint; - - self.0.CopyTextureRegion(&dst, 0, 0, 0, &src, ptr::null()); - } -} - -pub unsafe fn create_uav_resource_barrier( - resource: *mut d3d12::ID3D12Resource, -) -> d3d12::D3D12_RESOURCE_BARRIER { - let uav = d3d12::D3D12_RESOURCE_UAV_BARRIER { - pResource: resource, - }; - - let mut resource_barrier: d3d12::D3D12_RESOURCE_BARRIER = mem::zeroed(); - resource_barrier.Type = d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV; - resource_barrier.Flags = d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE; - *resource_barrier.u.UAV_mut() = uav; - - resource_barrier -} - -pub unsafe fn create_transition_resource_barrier( - resource: *mut d3d12::ID3D12Resource, - state_before: d3d12::D3D12_RESOURCE_STATES, - state_after: d3d12::D3D12_RESOURCE_STATES, -) -> d3d12::D3D12_RESOURCE_BARRIER { - let transition = d3d12::D3D12_RESOURCE_TRANSITION_BARRIER { - pResource: resource, - Subresource: d3d12::D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, - StateBefore: state_before, - StateAfter: state_after, - }; - - let mut resource_barrier: d3d12::D3D12_RESOURCE_BARRIER = mem::zeroed(); - resource_barrier.Type = d3d12::D3D12_RESOURCE_BARRIER_TYPE_TRANSITION; - resource_barrier.Flags = d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE; - *resource_barrier.u.Transition_mut() = transition; - - resource_barrier -} - -#[allow(unused)] -pub unsafe fn enable_debug_layer() -> Result<(), Error> { - let mut debug_controller: *mut d3d12sdklayers::ID3D12Debug1 = ptr::null_mut(); - explain_error( - d3d12::D3D12GetDebugInterface( - &d3d12sdklayers::ID3D12Debug1::uuidof(), - &mut debug_controller as *mut _ as *mut _, - ), - "could not create debug controller", - )?; - - let debug_controller = ComPtr::from_raw(debug_controller); - debug_controller.EnableDebugLayer(); - - let mut queue = ptr::null_mut(); - let hr = dxgi1_3::DXGIGetDebugInterface1( - 0, - &dxgidebug::IDXGIInfoQueue::uuidof(), - &mut queue as *mut _ as *mut _, - ); - - explain_error(hr, "failed to enable debug layer")?; - - debug_controller.SetEnableGPUBasedValidation(minwindef::TRUE); - Ok(()) -} diff --git a/piet-gpu-hal/src/hub.rs b/piet-gpu-hal/src/hub.rs deleted file mode 100644 index 1d51459..0000000 --- a/piet-gpu-hal/src/hub.rs +++ /dev/null @@ -1,1109 +0,0 @@ -//! A somewhat higher level GPU abstraction. -//! -//! This layer is on top of the lower-level layer that multiplexes different -//! back-ends. It handles details such as managing staging buffers for creating -//! buffers with initial content, deferring dropping of resources until command -//! submission is complete, and a bit more. These conveniences might expand -//! even more in time. - -use std::convert::TryInto; -use std::ops::{Bound, RangeBounds}; -use std::sync::{Arc, Mutex, Weak}; - -use bytemuck::Pod; -use smallvec::SmallVec; - -use crate::{mux, BackendType, BufWrite, ComputePassDescriptor, ImageFormat, MapMode}; - -use crate::{BindType, BufferUsage, Error, GpuInfo, ImageLayout, SamplerParams}; - -pub use crate::mux::{DescriptorSet, Fence, Pipeline, QueryPool, Sampler, Semaphore, ShaderCode}; - -/// A session of GPU operations. -/// -/// This abstraction is generally called a "device" in other APIs, but that -/// term is very overloaded. It is the point to access resource creation, -/// work submission, and related concerns. -/// -/// Most of the methods are `&self`, indicating that they can be called from -/// multiple threads. -#[derive(Clone)] -pub struct Session(Arc); - -struct SessionInner { - device: mux::Device, - /// A pool of command buffers that can be reused. - /// - /// Currently this is not used, as it only works well on Vulkan. At some - /// point, we will want to efficiently reuse command buffers rather than - /// allocating them each time, but that is a TODO. - cmd_buf_pool: Mutex>, - /// Command buffers that are still pending (so resources can't be freed yet). - pending: Mutex>, - /// A command buffer that is used for copying from staging buffers. - staging_cmd_buf: Mutex>, - gpu_info: GpuInfo, -} - -/// A command buffer. -/// -/// Actual work done by the GPU is encoded into a command buffer and then -/// submitted to the session in a batch. -pub struct CmdBuf { - // The invariant is that these options are always populated except - // when the struct is being destroyed. It would be possible to get - // rid of them by using this unsafe trick: - // https://phaazon.net/blog/blog/rust-no-drop - cmd_buf: Option, - fence: Option, - resources: Vec, - session: Weak, -} - -/// A command buffer in submitted state. -/// -/// Submission of a command buffer is asynchronous, meaning that the submit -/// method returns immediately. The work done in the command buffer cannot -/// be accessed (for example, readback from buffers written) until the the -/// submission is complete. The main purpose of this structure is to wait on -/// that completion. -pub struct SubmittedCmdBuf(Option, Weak); - -struct SubmittedCmdBufInner { - // It's inconsistent, cmd_buf is unpacked, staging_cmd_buf isn't. Probably - // better to chose one or the other. - cmd_buf: mux::CmdBuf, - fence: Fence, - resources: Vec, - staging_cmd_buf: Option, -} - -/// An image or texture. -/// -/// At the moment, images are limited to 2D. -#[derive(Clone)] -pub struct Image(Arc); - -struct ImageInner { - image: mux::Image, - session: Weak, -} - -/// A buffer. -/// -/// A buffer is a segment of memory that can be accessed by the GPU, and -/// in some cases also by the host (if the appropriate [`BufferUsage`] flags -/// are set). -#[derive(Clone)] -pub struct Buffer(Arc); - -struct BufferInner { - buffer: mux::Buffer, - session: Weak, -} - -/// A builder for creating descriptor sets. -/// -/// Add bindings to the descriptor set before dispatching a shader. -pub struct DescriptorSetBuilder(mux::DescriptorSetBuilder); - -/// A resource to retain during the lifetime of a command submission. -pub enum RetainResource { - Buffer(Buffer), - Image(Image), -} - -/// A buffer mapped for writing. -/// -/// When this structure is dropped, the buffer will be unmapped. -pub struct BufWriteGuard<'a> { - buf_write: BufWrite, - session: Arc, - buffer: &'a mux::Buffer, - offset: u64, - size: u64, -} - -/// A buffer mapped for reading. -/// -/// When this structure is dropped, the buffer will be unmapped. -pub struct BufReadGuard<'a> { - bytes: &'a [u8], - session: Arc, - buffer: &'a mux::Buffer, - offset: u64, - size: u64, -} - -/// A sub-object of a command buffer for a sequence of compute dispatches. -pub struct ComputePass<'a> { - cmd_buf: &'a mut CmdBuf, -} - -impl Session { - /// Create a new session, choosing the best backend. - pub fn new(device: mux::Device) -> Session { - let gpu_info = device.query_gpu_info(); - Session(Arc::new(SessionInner { - device, - gpu_info, - cmd_buf_pool: Default::default(), - pending: Default::default(), - staging_cmd_buf: Default::default(), - })) - } - - /// Create a new command buffer. - /// - /// The caller is responsible for inserting pipeline barriers and other - /// transitions. If one dispatch writes a buffer (or image), and another - /// reads it, a barrier must intervene. No such barrier is needed for - /// uploads by the host before command submission, but a host barrier is - /// needed if the host will do readback of any buffers written by the - /// command list. - pub fn cmd_buf(&self) -> Result { - self.poll_cleanup(); - let (cmd_buf, fence) = if let Some(cf) = self.0.cmd_buf_pool.lock().unwrap().pop() { - cf - } else { - let cmd_buf = self.0.device.create_cmd_buf()?; - let fence = unsafe { self.0.device.create_fence(false)? }; - (cmd_buf, fence) - }; - Ok(CmdBuf { - cmd_buf: Some(cmd_buf), - fence: Some(fence), - resources: Vec::new(), - session: Arc::downgrade(&self.0), - }) - } - - fn poll_cleanup(&self) { - let mut pending = self.0.pending.lock().unwrap(); - unsafe { - let mut i = 0; - while i < pending.len() { - if let Ok(true) = self.0.device.get_fence_status(&mut pending[i].fence) { - let mut item = pending.swap_remove(i); - // TODO: wait is superfluous, can just reset - let _ = self.0.device.wait_and_reset(vec![&mut item.fence]); - self.0.cleanup_submitted_cmd_buf(item); - } else { - i += 1; - } - } - } - } - - /// Run a command buffer. - /// - /// The semaphores are for swapchain presentation and can be empty for - /// compute-only work. When provided, work is synchronized to start only - /// when the wait semaphores are signaled, and when work is complete, the - /// signal semaphores are signaled. - pub unsafe fn run_cmd_buf( - &self, - mut cmd_buf: CmdBuf, - wait_semaphores: &[&Semaphore], - signal_semaphores: &[&Semaphore], - ) -> Result { - // Again, SmallVec here? - let mut cmd_bufs = Vec::with_capacity(2); - let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap().take(); - if let Some(staging) = &mut staging_cmd_buf { - // With finer grained resource tracking, we might be able to avoid this in - // some cases. - staging.memory_barrier(); - staging.finish(); - cmd_bufs.push(staging.cmd_buf.as_ref().unwrap()); - } - cmd_bufs.push(cmd_buf.cmd_buf.as_ref().unwrap()); - self.0.device.run_cmd_bufs( - &cmd_bufs, - wait_semaphores, - signal_semaphores, - Some(cmd_buf.fence.as_mut().unwrap()), - )?; - Ok(SubmittedCmdBuf( - Some(SubmittedCmdBufInner { - cmd_buf: cmd_buf.cmd_buf.take().unwrap(), - fence: cmd_buf.fence.take().unwrap(), - resources: std::mem::take(&mut cmd_buf.resources), - staging_cmd_buf, - }), - std::mem::replace(&mut cmd_buf.session, Weak::new()), - )) - } - - /// Create a buffer. - /// - /// The `usage` flags must be specified to indicate what the buffer will - /// be used for. In general, when no `MAP_` flags are specified, the buffer - /// will be created in device memory, which means they are not host - /// accessible, but GPU access is much higher performance (at least on - /// discrete GPUs). - pub fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result { - let buffer = self.0.device.create_buffer(size, usage)?; - Ok(Buffer(Arc::new(BufferInner { - buffer, - session: Arc::downgrade(&self.0), - }))) - } - - /// Create a buffer with initialized data. - /// - /// This method takes care of creating a staging buffer if needed, so - /// it is not necessary to specify `MAP_WRITE` usage, unless of course - /// the buffer will subsequently be written by the host. - pub fn create_buffer_init( - &self, - contents: &[impl Pod], - usage: BufferUsage, - ) -> Result { - let size = std::mem::size_of_val(contents); - let bytes = bytemuck::cast_slice(contents); - self.create_buffer_with(size as u64, |b| b.push_bytes(bytes), usage) - } - - /// Create a buffer with initialized data. - /// - /// The buffer is filled by the provided function. The same details about - /// staging buffers apply as [`create_buffer_init`]. - pub fn create_buffer_with( - &self, - size: u64, - f: impl Fn(&mut BufWrite), - usage: BufferUsage, - ) -> Result { - unsafe { - let use_staging_buffer = !usage - .intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE) - && self.gpu_info().use_staging_buffers; - let create_usage = if use_staging_buffer { - BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC - } else { - usage | BufferUsage::MAP_WRITE - }; - let create_buf = self.create_buffer(size, create_usage)?; - let mapped = - self.0 - .device - .map_buffer(&create_buf.mux_buffer(), 0, size, MapMode::Write)?; - let mut buf_write = BufWrite::new(mapped, 0, size as usize); - f(&mut buf_write); - self.0 - .device - .unmap_buffer(&create_buf.mux_buffer(), 0, size, MapMode::Write)?; - if use_staging_buffer { - let buf = self.create_buffer(size, usage | BufferUsage::COPY_DST)?; - let mut staging_cmd_buf = self.0.staging_cmd_buf.lock().unwrap(); - if staging_cmd_buf.is_none() { - let mut cmd_buf = self.cmd_buf()?; - cmd_buf.begin(); - *staging_cmd_buf = Some(cmd_buf); - } - let staging_cmd_buf = staging_cmd_buf.as_mut().unwrap(); - // This will ensure the staging buffer is deallocated. - staging_cmd_buf.copy_buffer(&create_buf, &buf); - staging_cmd_buf.add_resource(create_buf); - Ok(buf) - } else { - Ok(create_buf) - } - } - } - - /// Create an image of the given size and pixel format. - pub unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result { - let image = self.0.device.create_image2d(width, height, format)?; - Ok(Image(Arc::new(ImageInner { - image, - session: Arc::downgrade(&self.0), - }))) - } - - /// Create a semaphore. - /// - /// These "semaphores" are only for swapchain integration and may be - /// stubs on back-ends that don't require semaphore synchronization. - pub unsafe fn create_semaphore(&self) -> Result { - self.0.device.create_semaphore() - } - - /// Create a compute shader pipeline. - /// - /// A pipeline is essentially a compiled shader, with more specific - /// details about what resources may be bound to it. - pub unsafe fn create_compute_pipeline<'a>( - &self, - code: ShaderCode<'a>, - bind_types: &[BindType], - ) -> Result { - self.0.device.create_compute_pipeline(code, bind_types) - } - - /// Create a descriptor set for a simple pipeline that just references buffers. - pub unsafe fn create_simple_descriptor_set<'a>( - &self, - pipeline: &Pipeline, - buffers: impl IntoRefs<'a, Buffer>, - ) -> Result { - self.descriptor_set_builder() - .add_buffers(buffers) - .build(self, pipeline) - } - - /// Start building a descriptor set. - /// - /// A descriptor set is a binding of actual resources (buffers and - /// images) to slots as specified in the pipeline. - pub unsafe fn descriptor_set_builder(&self) -> DescriptorSetBuilder { - DescriptorSetBuilder(self.0.device.descriptor_set_builder()) - } - - /// Update a buffer in a descriptor set. - pub unsafe fn update_buffer_descriptor( - &self, - ds: &mut DescriptorSet, - index: u32, - buffer: &Buffer, - ) { - self.0 - .device - .update_buffer_descriptor(ds, index, &buffer.0.buffer) - } - - /// Update an image in a descriptor set. - pub unsafe fn update_image_descriptor( - &self, - ds: &mut DescriptorSet, - index: u32, - image: &Image, - ) { - self.0 - .device - .update_image_descriptor(ds, index, &image.0.image) - } - - /// Create a query pool for timestamp queries. - pub fn create_query_pool(&self, n_queries: u32) -> Result { - self.0.device.create_query_pool(n_queries) - } - - /// Fetch the contents of the query pool. - /// - /// This should be called after waiting on the command buffer that wrote the - /// timer queries. - /// - /// The returned vector is one shorter than the number of timer queries in the - /// pool; the first value is subtracted off. It would likely be better to return - /// the raw timestamps, but that change should be made consistently. - pub unsafe fn fetch_query_pool(&self, pool: &QueryPool) -> Result, Error> { - let result = self.0.device.fetch_query_pool(pool)?; - // Subtract off first timestamp. - Ok(result[1..] - .iter() - .map(|ts| *ts as f64 - result[0]) - .collect()) - } - - #[doc(hidden)] - /// Create a sampler. - /// - /// Not yet implemented. - pub unsafe fn create_sampler(&self, _params: SamplerParams) -> Result { - todo!() - //self.0.device.create_sampler(params) - } - - /// Query the GPU info. - pub fn gpu_info(&self) -> &GpuInfo { - &self.0.gpu_info - } - - /// Choose shader code from the available choices. - pub fn choose_shader<'a>( - &self, - spv: &'a [u8], - hlsl: &'a str, - dxil: &'a [u8], - msl: &'a str, - ) -> ShaderCode<'a> { - self.0.device.choose_shader(spv, hlsl, dxil, msl) - } - - /// Report the backend type that was chosen. - pub fn backend_type(&self) -> BackendType { - self.0.device.backend_type() - } - - #[cfg(target_os = "macos")] - pub unsafe fn cmd_buf_from_raw_mtl(&self, raw_cmd_buf: &::metal::CommandBufferRef) -> CmdBuf { - let cmd_buf = Some(self.0.device.cmd_buf_from_raw_mtl(raw_cmd_buf)); - let resources = Vec::new(); - // Expect client to do cleanup manually. - let session = Weak::new(); - CmdBuf { - cmd_buf, - fence: None, - resources, - session, - } - } - - #[cfg(target_os = "macos")] - pub unsafe fn image_from_raw_mtl( - &self, - raw_texture: &::metal::TextureRef, - width: u32, - height: u32, - ) -> Image { - let image = self.0.device.image_from_raw_mtl(raw_texture, width, height); - // Expect client to do cleanup manually. - let session = Weak::new(); - Image(Arc::new(ImageInner { image, session })) - } -} - -impl SessionInner { - /// Clean up a submitted command buffer. - /// - /// This drops the resources used by the command buffer and also cleans up the command - /// buffer itself. Currently that means destroying it, but at some point we'll want to - /// be better at reuse. - unsafe fn cleanup_submitted_cmd_buf(&self, item: SubmittedCmdBufInner) { - let _should_handle_err = self.device.destroy_cmd_buf(item.cmd_buf); - let _should_handle_err = self.device.destroy_fence(item.fence); - - std::mem::drop(item.resources); - if let Some(mut staging_cmd_buf) = item.staging_cmd_buf { - staging_cmd_buf.destroy(self); - } - } -} - -impl CmdBuf { - fn cmd_buf(&mut self) -> &mut mux::CmdBuf { - self.cmd_buf.as_mut().unwrap() - } - - /// Begin recording into a command buffer. - /// - /// Always call this before encoding any actual work. - /// - /// Discussion question: can this be subsumed? - pub unsafe fn begin(&mut self) { - self.cmd_buf().begin(); - } - - /// Finish recording into a command buffer. - /// - /// Always call this as the last method before submitting the command - /// buffer. - pub unsafe fn finish(&mut self) { - self.cmd_buf().finish(); - } - - /// Commits any open command encoder. - pub unsafe fn flush(&mut self) { - self.cmd_buf().flush(); - } - - /// Begin a compute pass. - pub unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) -> ComputePass { - self.cmd_buf().begin_compute_pass(desc); - ComputePass { cmd_buf: self } - } - - /// Insert an execution and memory barrier. - /// - /// Compute kernels (and other actions) after this barrier may read from buffers - /// that were written before this barrier. - pub unsafe fn memory_barrier(&mut self) { - self.cmd_buf().memory_barrier(); - } - - /// Insert a barrier for host access to buffers. - /// - /// The host may read buffers written before this barrier, after the fence for - /// the command buffer is signaled. - /// - /// See http://themaister.net/blog/2019/08/14/yet-another-blog-explaining-vulkan-synchronization/ - /// ("Host memory reads") for an explanation of this barrier. - pub unsafe fn host_barrier(&mut self) { - self.cmd_buf().memory_barrier(); - } - - /// Insert an image barrier, transitioning image layout. - /// - /// When an image is written by one command and then read by another, an image - /// barrier must separate the uses. Also, the image layout must match the use - /// of the image. - /// - /// Additionally, when writing to an image for the first time, it must be - /// transitioned from an unknown layout to specify the layout. - pub unsafe fn image_barrier( - &mut self, - image: &Image, - src_layout: ImageLayout, - dst_layout: ImageLayout, - ) { - self.cmd_buf() - .image_barrier(image.mux_image(), src_layout, dst_layout); - } - - /// Clear the buffer. - /// - /// When the size is not specified, it clears the whole buffer. - pub unsafe fn clear_buffer(&mut self, buffer: &Buffer, size: Option) { - self.cmd_buf().clear_buffer(buffer.mux_buffer(), size); - } - - /// Copy one buffer to another. - /// - /// When the buffers differ in size, the minimum of the sizes is used. - pub unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer) { - self.cmd_buf() - .copy_buffer(src.mux_buffer(), dst.mux_buffer()); - } - - /// Copy an image to a buffer. - /// - /// The size of the image and buffer must match. - pub unsafe fn copy_image_to_buffer(&mut self, src: &Image, dst: &Buffer) { - self.cmd_buf() - .copy_image_to_buffer(src.mux_image(), dst.mux_buffer()); - // TODO: change the backend signature to allow failure, as in "not - // implemented" or "unaligned", and fall back to compute shader - // submission. - } - - /// Copy a buffer to an image. - /// - /// The size of the image and buffer must match. - pub unsafe fn copy_buffer_to_image(&mut self, src: &Buffer, dst: &Image) { - self.cmd_buf() - .copy_buffer_to_image(src.mux_buffer(), dst.mux_image()); - // See above. - } - - /// Copy an image to another. - /// - /// This is especially useful for writing to the swapchain image, as in - /// general that can't be bound to a compute shader. - /// - /// Discussion question: we might have a specialized version of this - /// function for copying to the swapchain image, and a separate type. - pub unsafe fn blit_image(&mut self, src: &Image, dst: &Image) { - self.cmd_buf().blit_image(src.mux_image(), dst.mux_image()); - } - - /// Reset the query pool. - /// - /// The query pool must be reset before each use, to avoid validation errors. - /// This is annoying, and we could tweak the API to make it implicit, doing - /// the reset before the first timestamp write. - pub unsafe fn reset_query_pool(&mut self, pool: &QueryPool) { - self.cmd_buf().reset_query_pool(pool); - } - - /// Prepare the timestamps for reading. This isn't required on Vulkan but - /// is required on (at least) DX12. - /// - /// It's possible we'll make this go away, by implicitly including it - /// on command buffer submission when a query pool has been written. - pub unsafe fn finish_timestamps(&mut self, pool: &QueryPool) { - self.cmd_buf().finish_timestamps(pool); - } - - /// Begin a labeled section for debugging and profiling purposes. - pub unsafe fn begin_debug_label(&mut self, label: &str) { - self.cmd_buf().begin_debug_label(label); - } - - /// End a section opened by `begin_debug_label`. - pub unsafe fn end_debug_label(&mut self) { - self.cmd_buf().end_debug_label(); - } - - /// Make sure the resource lives until the command buffer completes. - /// - /// The submitted command buffer will hold this reference until the corresponding - /// fence is signaled. - /// - /// There are two choices for upholding the lifetime invariant: this function, or - /// the caller can manually hold the reference. The latter is appropriate when it's - /// part of retained state. - pub fn add_resource(&mut self, resource: impl Into) { - self.resources.push(resource.into()); - } -} - -impl SubmittedCmdBuf { - /// Wait for the work to complete. - /// - /// After calling this function, buffers written by the command buffer - /// can be read (assuming they were created with `MAP_READ` usage and also - /// that a host barrier was placed in the command list). - /// - /// Further, resources referenced by the command list may be destroyed or - /// reused; it is a safety violation to do so beforehand. - /// - /// Resources for which destruction was deferred through - /// [`add_resource`][`CmdBuf::add_resource`] will actually be dropped here. - /// - /// If the command buffer is still available for reuse, it is returned. - pub fn wait(mut self) -> Result, Error> { - let mut item = self.0.take().unwrap(); - if let Some(session) = Weak::upgrade(&self.1) { - unsafe { - session.device.wait_and_reset(vec![&mut item.fence])?; - if let Some(mut staging_cmd_buf) = item.staging_cmd_buf { - staging_cmd_buf.destroy(&session); - } - if item.cmd_buf.reset() { - return Ok(Some(CmdBuf { - cmd_buf: Some(item.cmd_buf), - fence: Some(item.fence), - resources: Vec::new(), - session: std::mem::take(&mut self.1), - })); - } else { - return Ok(None); - } - } - } - // else session dropped error? - Ok(None) - } -} - -impl Drop for CmdBuf { - fn drop(&mut self) { - if let Some(session) = Weak::upgrade(&self.session) { - unsafe { - self.destroy(&session); - } - } - } -} - -impl CmdBuf { - unsafe fn destroy(&mut self, session: &SessionInner) { - if let Some(cmd_buf) = self.cmd_buf.take() { - let _ = session.device.destroy_cmd_buf(cmd_buf); - } - if let Some(fence) = self.fence.take() { - let _ = session.device.destroy_fence(fence); - } - self.resources.clear(); - } -} - -impl Drop for SubmittedCmdBuf { - fn drop(&mut self) { - if let Some(inner) = self.0.take() { - if let Some(session) = Weak::upgrade(&self.1) { - session.pending.lock().unwrap().push(inner); - } - } - } -} - -impl<'a> ComputePass<'a> { - /// Dispatch a compute shader. - /// - /// Request a compute shader to be run, using the pipeline to specify the - /// code, and the descriptor set to address the resources read and written. - /// - /// Both the workgroup count (number of workgroups) and the workgroup size - /// (number of threads in a workgroup) must be specified here, though not - /// all back-ends require the latter info. - pub unsafe fn dispatch( - &mut self, - pipeline: &Pipeline, - descriptor_set: &DescriptorSet, - workgroup_count: (u32, u32, u32), - workgroup_size: (u32, u32, u32), - ) { - self.cmd_buf - .cmd_buf() - .dispatch(pipeline, descriptor_set, workgroup_count, workgroup_size); - } - - /// Add a memory barrier. - /// - /// Inserts a memory barrier in the compute encoder. This is a convenience - /// function for calling the same function on the underlying command buffer, - /// avoiding borrow check issues. - pub unsafe fn memory_barrier(&mut self) { - self.cmd_buf.memory_barrier(); - } - - /// Begin a labeled section for debugging and profiling purposes. - pub unsafe fn begin_debug_label(&mut self, label: &str) { - self.cmd_buf.begin_debug_label(label); - } - - /// End a section opened by `begin_debug_label`. - pub unsafe fn end_debug_label(&mut self) { - self.cmd_buf.end_debug_label(); - } - - pub unsafe fn end(self) { - self.cmd_buf.cmd_buf().end_compute_pass(); - } -} - -impl Drop for BufferInner { - fn drop(&mut self) { - if let Some(session) = Weak::upgrade(&self.session) { - unsafe { - let _ = session.device.destroy_buffer(&self.buffer); - } - } - } -} - -impl Drop for ImageInner { - fn drop(&mut self) { - if let Some(session) = Weak::upgrade(&self.session) { - unsafe { - let _ = session.device.destroy_image(&self.image); - } - } - } -} - -impl Image { - /// Get a lower level image handle. - pub(crate) fn mux_image(&self) -> &mux::Image { - &self.0.image - } - - /// Wrap a swapchain image so it can be exported to the hub level. - /// Swapchain images don't need resource tracking (or at least we - /// don't do it), so no session ref is needed. - pub(crate) fn wrap_swapchain_image(image: mux::Image) -> Image { - Image(Arc::new(ImageInner { - image, - session: Weak::new(), - })) - } -} - -impl Buffer { - /// Get a lower level buffer handle. - pub(crate) fn mux_buffer(&self) -> &mux::Buffer { - &self.0.buffer - } - - /// Write the buffer contents. - /// - /// The buffer must have been created with `MAP_WRITE` usage, and with - /// a size large enough to accommodate the given slice. - pub unsafe fn write(&mut self, contents: &[impl Pod]) -> Result<(), Error> { - let bytes = bytemuck::cast_slice(contents); - if let Some(session) = Weak::upgrade(&self.0.session) { - let size = bytes.len().try_into()?; - let buf_size = self.0.buffer.size(); - if size > buf_size { - return Err(format!( - "Trying to write {} bytes into buffer of size {}", - size, buf_size - ) - .into()); - } - let mapped = session - .device - .map_buffer(&self.0.buffer, 0, size, MapMode::Write)?; - std::ptr::copy_nonoverlapping(bytes.as_ptr(), mapped, bytes.len()); - session - .device - .unmap_buffer(&self.0.buffer, 0, size, MapMode::Write)?; - } - // else session lost error? - Ok(()) - } - - /// Read the buffer contents. - /// - /// The buffer must have been created with `MAP_READ` usage. The caller - /// is also responsible for ensuring that this does not read uninitialized - /// memory. - pub unsafe fn read(&self, result: &mut Vec) -> Result<(), Error> { - let size = self.mux_buffer().size(); - // TODO: can bytemuck grow a method to do this more safely? - // It's similar to pod_collect_to_vec. - let len = size as usize / std::mem::size_of::(); - if len > result.len() { - result.reserve(len - result.len()); - } - if let Some(session) = Weak::upgrade(&self.0.session) { - let mapped = session - .device - .map_buffer(&self.0.buffer, 0, size, MapMode::Read)?; - std::ptr::copy_nonoverlapping(mapped, result.as_mut_ptr() as *mut u8, size as usize); - session - .device - .unmap_buffer(&self.0.buffer, 0, size, MapMode::Read)?; - result.set_len(len); - } - // else session lost error? - Ok(()) - } - - /// Map a buffer for writing. - /// - /// The mapped buffer is represented by a "guard" structure, which will unmap - /// the buffer when it's dropped. That also has a number of methods for pushing - /// bytes and [`bytemuck::Pod`] objects. - /// - /// The buffer must have been created with `MAP_WRITE` usage. - pub unsafe fn map_write<'a>( - &'a mut self, - range: impl RangeBounds, - ) -> Result, Error> { - let offset = match range.start_bound() { - Bound::Unbounded => 0, - Bound::Included(&s) => s.try_into()?, - Bound::Excluded(_) => unreachable!(), - }; - let end = match range.end_bound() { - Bound::Unbounded => self.size(), - Bound::Included(&s) => s.try_into()?, - Bound::Excluded(&s) => s.checked_add(1).unwrap().try_into()?, - }; - self.map_write_impl(offset, end - offset) - } - - unsafe fn map_write_impl<'a>( - &'a self, - offset: u64, - size: u64, - ) -> Result, Error> { - if let Some(session) = Weak::upgrade(&self.0.session) { - let ptr = session - .device - .map_buffer(&self.0.buffer, offset, size, MapMode::Write)?; - let buf_write = BufWrite::new(ptr, 0, size as usize); - let guard = BufWriteGuard { - buf_write, - session, - buffer: &self.0.buffer, - offset, - size, - }; - Ok(guard) - } else { - Err("session lost".into()) - } - } - - /// Map a buffer for reading. - /// - /// The mapped buffer is represented by a "guard" structure, which will unmap - /// the buffer when it's dropped, and derefs to a plain byte slice. - /// - /// The buffer must have been created with `MAP_READ` usage. The caller - /// is also responsible for ensuring that this does not read uninitialized - /// memory. - pub unsafe fn map_read<'a>( - // Discussion: should be &mut? Buffer is Clone, but maybe that should change. - &'a self, - range: impl RangeBounds, - ) -> Result, Error> { - let offset = match range.start_bound() { - Bound::Unbounded => 0, - Bound::Excluded(_) => unreachable!(), - Bound::Included(&s) => s.try_into()?, - }; - let end = match range.end_bound() { - Bound::Unbounded => self.size(), - Bound::Excluded(&s) => s.try_into()?, - Bound::Included(&s) => s.checked_add(1).unwrap().try_into()?, - }; - self.map_read_impl(offset, end - offset) - } - - unsafe fn map_read_impl<'a>( - &'a self, - offset: u64, - size: u64, - ) -> Result, Error> { - if let Some(session) = Weak::upgrade(&self.0.session) { - let ptr = session - .device - .map_buffer(&self.0.buffer, offset, size, MapMode::Read)?; - let bytes = std::slice::from_raw_parts(ptr, size as usize); - let guard = BufReadGuard { - bytes, - session, - buffer: &self.0.buffer, - offset, - size, - }; - Ok(guard) - } else { - Err("session lost".into()) - } - } - - /// The size of the buffer. - /// - /// This is at least as large as the value provided on creation. - pub fn size(&self) -> u64 { - self.0.buffer.size() - } -} - -impl DescriptorSetBuilder { - pub fn add_buffers<'a>(mut self, buffers: impl IntoRefs<'a, Buffer>) -> Self { - let mux_buffers = buffers - .into_refs() - .map(|b| b.mux_buffer()) - .collect::>(); - self.0.add_buffers(&mux_buffers); - self - } - - pub fn add_images<'a>(mut self, images: impl IntoRefs<'a, Image>) -> Self { - let mux_images = images - .into_refs() - .map(|i| i.mux_image()) - .collect::>(); - self.0.add_images(&mux_images); - self - } - - pub fn add_textures<'a>(mut self, images: impl IntoRefs<'a, Image>) -> Self { - let mux_images = images - .into_refs() - .map(|i| i.mux_image()) - .collect::>(); - self.0.add_textures(&mux_images); - self - } - - pub unsafe fn build( - self, - session: &Session, - pipeline: &Pipeline, - ) -> Result { - self.0.build(&session.0.device, pipeline) - } -} - -// This lets us use either a slice or a vector. The type is clunky but it -// seems fine enough to use. -pub trait IntoRefs<'a, T: 'a> { - type Iterator: Iterator; - - fn into_refs(self) -> Self::Iterator; -} - -impl<'a, T> IntoRefs<'a, T> for &'a [T] { - type Iterator = std::slice::Iter<'a, T>; - fn into_refs(self) -> Self::Iterator { - self.into_iter() - } -} - -impl<'a, T> IntoRefs<'a, T> for &'a [&'a T] { - type Iterator = std::iter::Copied>; - fn into_refs(self) -> Self::Iterator { - self.into_iter().copied() - } -} - -impl<'a, T, const N: usize> IntoRefs<'a, T> for &'a [&'a T; N] { - type Iterator = std::iter::Copied>; - fn into_refs(self) -> Self::Iterator { - self.into_iter().copied() - } -} - -impl<'a, T> IntoRefs<'a, T> for Vec<&'a T> { - type Iterator = std::vec::IntoIter<&'a T>; - fn into_refs(self) -> Self::Iterator { - self.into_iter() - } -} - -impl From for RetainResource { - fn from(buf: Buffer) -> Self { - RetainResource::Buffer(buf) - } -} - -impl From for RetainResource { - fn from(img: Image) -> Self { - RetainResource::Image(img) - } -} - -impl<'a, T: Clone + Into> From<&'a T> for RetainResource { - fn from(resource: &'a T) -> Self { - resource.clone().into() - } -} - -impl<'a> Drop for BufWriteGuard<'a> { - fn drop(&mut self) { - unsafe { - let _ = self.session.device.unmap_buffer( - self.buffer, - self.offset, - self.size, - MapMode::Write, - ); - } - } -} - -impl<'a> std::ops::Deref for BufWriteGuard<'a> { - type Target = BufWrite; - - fn deref(&self) -> &Self::Target { - &self.buf_write - } -} - -impl<'a> std::ops::DerefMut for BufWriteGuard<'a> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.buf_write - } -} - -impl<'a> Drop for BufReadGuard<'a> { - fn drop(&mut self) { - unsafe { - let _ = self.session.device.unmap_buffer( - self.buffer, - self.offset, - self.size, - MapMode::Read, - ); - } - } -} - -impl<'a> std::ops::Deref for BufReadGuard<'a> { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { - self.bytes - } -} - -impl<'a> BufReadGuard<'a> { - /// Interpret the buffer as a slice of a plain data type. - pub fn cast_slice(&self) -> &[T] { - bytemuck::cast_slice(self.bytes) - } -} diff --git a/piet-gpu-hal/src/lib.rs b/piet-gpu-hal/src/lib.rs deleted file mode 100644 index 0ec95c7..0000000 --- a/piet-gpu-hal/src/lib.rs +++ /dev/null @@ -1,213 +0,0 @@ -//! The cross-platform abstraction for a GPU device. -//! -//! This abstraction is inspired by gfx-hal, but is specialized to the needs of piet-gpu. -//! In time, it may go away and be replaced by either gfx-hal or wgpu. - -use bitflags::bitflags; - -mod backend; -mod bestfit; -mod bufwrite; -mod hub; - -#[macro_use] -mod macros; - -mod mux; - -pub use crate::mux::{ - DescriptorSet, Device, Fence, Instance, Pipeline, QueryPool, Sampler, Semaphore, ShaderCode, - Surface, Swapchain, -}; -pub use bufwrite::BufWrite; -pub use hub::{ - BufReadGuard, BufWriteGuard, Buffer, CmdBuf, ComputePass, DescriptorSetBuilder, Image, - RetainResource, Session, SubmittedCmdBuf, -}; - -// TODO: because these are conditionally included, "cargo fmt" does not -// see them. Figure that out, possibly including running rustfmt manually. -mux_cfg! { - #[cfg(vk)] - mod vulkan; -} -mux_cfg! { - #[cfg(dx12)] - mod dx12; -} -#[cfg(target_os = "macos")] -mod metal; - -/// The common error type for the crate. -/// -/// This keeps things simple and can be expanded later. -pub type Error = Box; - -bitflags! { - /// Options when creating an instance. - #[derive(Default)] - pub struct InstanceFlags: u32 { - /// Prefer DX12 over Vulkan. - const DX12 = 0x1; - // TODO: discrete vs integrated selection - } -} - -/// The GPU backend that was selected. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum BackendType { - Vulkan, - Dx12, - Metal, -} - -/// An image layout state. -/// -/// An image must be in a particular layout state to be used for -/// a purpose such as being bound to a shader. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum ImageLayout { - /// The initial state for a newly created image. - Undefined, - /// A swapchain ready to be presented. - Present, - /// The source for a copy operation. - BlitSrc, - /// The destination for a copy operation. - BlitDst, - /// Read/write binding to a shader. - General, - /// Able to be sampled from by shaders. - ShaderRead, -} - -/// The type of sampling for image lookup. -/// -/// This could take a lot more params, such as filtering, repeat, behavior -/// at edges, etc., but for now we'll keep it simple. -#[derive(Copy, Clone, Debug)] -pub enum SamplerParams { - Nearest, - Linear, -} - -/// Image format. -#[derive(Copy, Clone, Debug)] -pub enum ImageFormat { - // 8 bit grayscale / alpha - A8, - // 8 bit per pixel RGBA - Rgba8, - // Format that matches the target surface - Surface, -} - -bitflags! { - /// The intended usage for a buffer, specified on creation. - pub struct BufferUsage: u32 { - /// The buffer can be mapped for reading CPU-side. - const MAP_READ = 0x1; - /// The buffer can be mapped for writing CPU-side. - const MAP_WRITE = 0x2; - /// The buffer can be copied from. - const COPY_SRC = 0x4; - /// The buffer can be copied to. - const COPY_DST = 0x8; - /// The buffer can be bound to a compute shader. - const STORAGE = 0x80; - /// The buffer can be used to store the results of queries. - const QUERY_RESOLVE = 0x200; - /// The buffer may be cleared. - const CLEAR = 0x8000; - // May add other types. - } -} - -/// The type of resource that will be bound to a slot in a shader. -#[derive(Clone, Copy, PartialEq, Eq)] -pub enum BindType { - /// A storage buffer with read/write access. - Buffer, - /// A storage buffer with read only access. - BufReadOnly, - /// A storage image. - Image, - /// A storage image with read only access. - /// - /// A note on this. None of the backends are currently making a - /// distinction between Image and ImageRead as far as bindings go, - /// but the `--hlsl-nonwritable-uav-texture-as-srv` option to - /// spirv-cross (marked as unstable) would do so. - ImageRead, - // TODO: Uniform, Sampler, maybe others -} - -/// Whether to map a buffer in read or write mode. -pub enum MapMode { - /// Map for reading. - Read, - /// Map for writing. - Write, -} - -#[derive(Clone, Debug)] -/// Information about the GPU. -pub struct GpuInfo { - /// The GPU supports descriptor indexing. - pub has_descriptor_indexing: bool, - /// The GPU supports subgroups. - /// - /// Right now, this just checks for basic subgroup capability (as - /// required in Vulkan 1.1), and we should have finer grained - /// queries for shuffles, etc. - pub has_subgroups: bool, - /// Limits on workgroup size for compute shaders. - pub workgroup_limits: WorkgroupLimits, - /// Info about subgroup size control, if available. - pub subgroup_size: Option, - /// The GPU supports a real, grown-ass memory model. - pub has_memory_model: bool, - /// Whether staging buffers should be used. - pub use_staging_buffers: bool, -} - -/// The range of subgroup sizes supported by a back-end, when available. -/// -/// The subgroup size is always a power of 2. The ability to specify -/// subgroup size for a compute shader is a newer feature, not always -/// available. -#[derive(Clone, Debug)] -pub struct SubgroupSize { - pub min: u32, - pub max: u32, -} - -/// The range of workgroup sizes supported by a back-end. -#[derive(Clone, Debug)] -pub struct WorkgroupLimits { - /// The maximum size on each workgroup dimension can be. - pub max_size: [u32; 3], - /// The maximum overall invocations a workgroup can have. That is, the product of sizes in each - /// dimension. - pub max_invocations: u32, -} - -/// Options for creating a compute pass. -#[derive(Default)] -pub struct ComputePassDescriptor<'a> { - // Maybe label should go here? It does in wgpu and wgpu_hal. - /// Timer query parameters. - /// - /// To record timer queries for a compute pass, set the query pool, start - /// query index, and end query index here. The indices must be less than - /// the size of the query pool. - timer_queries: Option<(&'a QueryPool, u32, u32)>, -} - -impl<'a> ComputePassDescriptor<'a> { - pub fn timer(pool: &'a QueryPool, start_query: u32, end_query: u32) -> ComputePassDescriptor { - ComputePassDescriptor { - timer_queries: Some((pool, start_query, end_query)), - } - } -} diff --git a/piet-gpu-hal/src/macros.rs b/piet-gpu-hal/src/macros.rs deleted file mode 100644 index a4a441e..0000000 --- a/piet-gpu-hal/src/macros.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Macros, mostly to automate backend selection tedium. - -#[doc(hidden)] -/// Configure an item to be included only for the given GPU. -#[macro_export] -macro_rules! mux_cfg { - ( #[cfg(vk)] $($tokens:tt)* ) => { - #[cfg(not(target_os="macos"))] $( $tokens )* - }; - - ( #[cfg(dx12)] $($tokens:tt)* ) => { - #[cfg(target_os="windows")] $( $tokens )* - }; - - ( #[cfg(mtl)] $($tokens:tt)* ) => { - #[cfg(target_os="macos")] $( $tokens )* - }; -} - -#[doc(hidden)] -/// Define an enum with a variant per GPU. -#[macro_export] -macro_rules! mux_enum { - ( $(#[$outer:meta])* $v:vis enum $name:ident { - Vk($vk:ty), - Dx12($dx12:ty), - Mtl($mtl:ty), - } ) => { - $(#[$outer])* $v enum $name { - #[cfg(not(target_os="macos"))] - Vk($vk), - #[cfg(target_os="windows")] - Dx12($dx12), - #[cfg(target_os="macos")] - Mtl($mtl), - } - - impl $name { - $crate::mux_cfg! { - #[cfg(vk)] - #[allow(unused)] - fn vk(&self) -> &$vk { - match self { - $name::Vk(x) => x, - _ => panic!("downcast error") - } - } - } - $crate::mux_cfg! { - #[cfg(vk)] - #[allow(unused)] - fn vk_mut(&mut self) -> &mut $vk { - match self { - $name::Vk(x) => x, - _ => panic!("downcast error") - } - } - } - $crate::mux_cfg! { - #[cfg(vk)] - #[allow(unused)] - fn vk_owned(self) -> $vk { - match self { - $name::Vk(x) => x, - _ => panic!("downcast error") - } - } - } - - $crate::mux_cfg! { - #[cfg(dx12)] - #[allow(unused)] - fn dx12(&self) -> &$dx12 { - match self { - $name::Dx12(x) => x, - _ => panic!("downcast error") - } - } - } - $crate::mux_cfg! { - #[cfg(dx12)] - #[allow(unused)] - fn dx12_mut(&mut self) -> &mut $dx12 { - match self { - $name::Dx12(x) => x, - _ => panic!("downcast error") - } - } - } - $crate::mux_cfg! { - #[cfg(dx12)] - #[allow(unused)] - fn dx12_owned(self) -> $dx12 { - match self { - $name::Dx12(x) => x, - _ => panic!("downcast error") - } - } - } - - $crate::mux_cfg! { - #[cfg(mtl)] - #[allow(unused)] - fn mtl(&self) -> &$mtl { - match self { - $name::Mtl(x) => x, - } - } - } - $crate::mux_cfg! { - #[cfg(mtl)] - #[allow(unused)] - fn mtl_mut(&mut self) -> &mut $mtl { - match self { - $name::Mtl(x) => x, - } - } - } - $crate::mux_cfg! { - #[cfg(mtl)] - #[allow(unused)] - fn mtl_owned(self) -> $mtl { - match self { - $name::Mtl(x) => x, - } - } - } - } - }; -} - -/// Define an enum with a variant per GPU for a Device associated type. -macro_rules! mux_device_enum { - ( $(#[$outer:meta])* $assoc_type: ident) => { - $crate::mux_enum! { - $(#[$outer])* - pub enum $assoc_type { - Vk(<$crate::vulkan::VkDevice as $crate::backend::Device>::$assoc_type), - Dx12(<$crate::dx12::Dx12Device as $crate::backend::Device>::$assoc_type), - Mtl(<$crate::metal::MtlDevice as $crate::backend::Device>::$assoc_type), - } - } - } -} - -#[doc(hidden)] -/// A match statement where match arms are conditionally configured per GPU. -#[macro_export] -macro_rules! mux_match { - ( $e:expr ; - $vkname:ident::Vk($vkvar:ident) => $vkblock: block - $dx12name:ident::Dx12($dx12var:ident) => $dx12block: block - $mtlname:ident::Mtl($mtlvar:ident) => $mtlblock: block - ) => { - match $e { - #[cfg(not(target_os="macos"))] - $vkname::Vk($vkvar) => $vkblock - #[cfg(target_os="windows")] - $dx12name::Dx12($dx12var) => $dx12block - #[cfg(target_os="macos")] - $mtlname::Mtl($mtlvar) => $mtlblock - } - }; - - ( $e:expr ; - $vkname:ident::Vk($vkvar:ident) => $vkblock: expr, - $dx12name:ident::Dx12($dx12var:ident) => $dx12block: expr, - $mtlname:ident::Mtl($mtlvar:ident) => $mtlblock: expr, - ) => { - $crate::mux_match! { $e; - $vkname::Vk($vkvar) => { $vkblock } - $dx12name::Dx12($dx12var) => { $dx12block } - $mtlname::Mtl($mtlvar) => { $mtlblock } - } - }; -} - -/// A convenience macro for selecting a shader from included files. -#[macro_export] -macro_rules! include_shader { - ( $device:expr, $path_base:expr) => { - $device.choose_shader( - include_bytes!(concat!($path_base, ".spv")), - include_str!(concat!($path_base, ".hlsl")), - include_bytes!(concat!($path_base, ".dxil")), - include_str!(concat!($path_base, ".msl")), - ) - }; -} diff --git a/piet-gpu-hal/src/metal.rs b/piet-gpu-hal/src/metal.rs deleted file mode 100644 index 754c089..0000000 --- a/piet-gpu-hal/src/metal.rs +++ /dev/null @@ -1,954 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -mod clear; -mod timer; -mod util; - -use std::mem; -use std::sync::{Arc, Mutex}; - -use block::Block; -use cocoa_foundation::base::id; -use cocoa_foundation::foundation::{NSInteger, NSUInteger}; -use foreign_types::ForeignType; -use objc::rc::autoreleasepool; -use objc::runtime::{Object, BOOL, YES}; -use objc::{class, msg_send, sel, sel_impl}; - -use core_graphics_types::base::CGFloat; -use metal::{CommandBufferRef, MTLFeatureSet}; - -use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; - -use crate::{ - BufferUsage, ComputePassDescriptor, Error, GpuInfo, ImageFormat, MapMode, WorkgroupLimits, -}; - -use util::*; - -use self::timer::{CounterSampleBuffer, CounterSet, TimeCalibration}; - -pub struct MtlInstance; - -pub struct MtlDevice { - device: metal::Device, - cmd_queue: Arc>, - gpu_info: GpuInfo, - helpers: Arc, - timer_set: Option, - counter_style: CounterStyle, -} - -/// Type of counter sampling. -/// -/// See https://developer.apple.com/documentation/metal/counter_sampling/sampling_gpu_data_into_counter_sample_buffers -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum CounterStyle { - None, - Stage, - Command, -} - -pub struct MtlSurface { - layer: metal::MetalLayer, -} - -pub struct MtlSwapchain { - layer: metal::MetalLayer, - cmd_queue: Arc>, - drawable: Mutex>, - n_drawables: usize, - drawable_ix: usize, -} - -#[derive(Clone)] -pub struct Buffer { - buffer: metal::Buffer, - pub(crate) size: u64, -} - -#[derive(Clone)] -pub struct Image { - texture: metal::Texture, - width: u32, - height: u32, -} - -// This is the way gfx-hal does it, but a more Vulkan-like strategy would be -// to have a semaphore that gets signaled from the command buffer's completion -// handler. -pub enum Fence { - Idle, - CmdBufPending(metal::CommandBuffer), -} - -pub struct Semaphore; - -pub struct CmdBuf { - cmd_buf: metal::CommandBuffer, - helpers: Arc, - cur_encoder: Encoder, - time_calibration: Arc>, - counter_style: CounterStyle, -} - -enum Encoder { - None, - Compute(metal::ComputeCommandEncoder, Option<(id, u32)>), - Blit(metal::BlitCommandEncoder), -} - -#[derive(Default)] -pub struct QueryPool { - counter_sample_buf: Option, - calibration: Arc>>>>, -} - -pub struct Pipeline(metal::ComputePipelineState); - -#[derive(Default)] -pub struct DescriptorSetBuilder(DescriptorSet); - -#[derive(Default)] -pub struct DescriptorSet { - buffers: Vec, - images: Vec, -} - -struct Helpers { - clear_pipeline: metal::ComputePipelineState, -} - -impl MtlInstance { - pub fn new() -> Result { - Ok(MtlInstance) - } - - pub unsafe fn surface( - &self, - _display_handle: RawDisplayHandle, - window_handle: RawWindowHandle, - ) -> Result { - if let RawWindowHandle::AppKit(handle) = window_handle { - Ok(Self::make_surface(handle.ns_view as id, handle.ns_window as id).unwrap()) - } else { - Err("can't create surface for window handle".into()) - } - } - - unsafe fn make_surface(ns_view: id, ns_window: id) -> Option { - let ca_ml_class = class!(CAMetalLayer); - let is_ca_ml: BOOL = msg_send![ns_view, isKindOfClass: ca_ml_class]; - if is_ca_ml == YES { - todo!("create surface from layer") - } - let layer: id = msg_send![ns_view, layer]; - let use_current = !layer.is_null() && { - let result: BOOL = msg_send![layer, isKindOfClass: ca_ml_class]; - result == YES - }; - let metal_layer = if use_current { - mem::transmute::<_, &metal::MetalLayerRef>(layer).to_owned() - } else { - let metal_layer: metal::MetalLayer = msg_send![ca_ml_class, new]; - let () = msg_send![ns_view, setLayer: metal_layer.as_ref()]; - let () = msg_send![ns_view, setWantsLayer: YES]; - let bounds: CGRect = msg_send![ns_view, bounds]; - let () = msg_send![metal_layer, setFrame: bounds]; - - if !ns_window.is_null() { - let scale_factor: CGFloat = msg_send![ns_window, backingScaleFactor]; - let () = msg_send![metal_layer, setContentsScale: scale_factor]; - } - // gfx-hal sets a delegate here - metal_layer - }; - let () = msg_send![metal_layer, setContentsGravity: kCAGravityTopLeft]; - Some(MtlSurface { layer: metal_layer }) - } - - // TODO might do some enumeration of devices - - pub fn device(&self) -> Result { - if let Some(device) = metal::Device::system_default() { - let cmd_queue = device.new_command_queue(); - Ok(MtlDevice::new_from_raw_mtl(device, cmd_queue)) - } else { - Err("can't create system default Metal device".into()) - } - } - - pub unsafe fn swapchain( - &self, - _width: usize, - _height: usize, - device: &MtlDevice, - surface: &MtlSurface, - ) -> Result { - surface.layer.set_device(&device.device); - let n_drawables = surface.layer.maximum_drawable_count() as usize; - Ok(MtlSwapchain { - layer: surface.layer.to_owned(), - cmd_queue: device.cmd_queue.clone(), - drawable: Default::default(), - n_drawables, - drawable_ix: 0, - }) - } -} - -impl MtlDevice { - pub fn new_from_raw_mtl(device: metal::Device, cmd_queue: metal::CommandQueue) -> MtlDevice { - let is_mac = device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1); - let is_ios = device.supports_feature_set(MTLFeatureSet::iOS_GPUFamily1_v1); - let version = NSOperatingSystemVersion::get(); - - let use_staging_buffers = - if (is_mac && version.at_least(10, 15)) || (is_ios && version.at_least(13, 0)) { - !device.has_unified_memory() - } else { - !device.is_low_power() - }; - // TODO: these are conservative; we need to derive these from - // supports_feature_set queries. - let gpu_info = GpuInfo { - has_descriptor_indexing: false, - has_subgroups: false, - subgroup_size: None, - // The workgroup limits are taken from the minimum of a desktop installation; - // we don't support iOS right now, but in case of testing on those devices it might - // need to change these (or just queried properly). - workgroup_limits: WorkgroupLimits { - max_size: [1024, 1024, 64], - max_invocations: 1024, - }, - has_memory_model: false, - use_staging_buffers, - }; - let helpers = Arc::new(Helpers { - clear_pipeline: clear::make_clear_pipeline(&device), - }); - // Timer stuff - let timer_set = CounterSet::get_timer_counter_set(&device); - let counter_style = if timer_set.is_some() { - if device.supports_counter_sampling(metal::MTLCounterSamplingPoint::AtStageBoundary) { - CounterStyle::Stage - } else if device - .supports_counter_sampling(metal::MTLCounterSamplingPoint::AtDispatchBoundary) - { - CounterStyle::Command - } else { - CounterStyle::None - } - } else { - CounterStyle::None - }; - MtlDevice { - device, - cmd_queue: Arc::new(Mutex::new(cmd_queue)), - gpu_info, - helpers, - timer_set, - counter_style, - } - } - - pub fn cmd_buf_from_raw_mtl(&self, raw_cmd_buf: metal::CommandBuffer) -> CmdBuf { - let cmd_buf = raw_cmd_buf; - let helpers = self.helpers.clone(); - let cur_encoder = Encoder::None; - let time_calibration = Default::default(); - CmdBuf { - cmd_buf, - helpers, - cur_encoder, - time_calibration, - counter_style: self.counter_style, - } - } - - pub fn image_from_raw_mtl(&self, texture: metal::Texture, width: u32, height: u32) -> Image { - Image { - texture, - width, - height, - } - } -} - -impl crate::backend::Device for MtlDevice { - type Buffer = Buffer; - - type Image = Image; - - type Pipeline = Pipeline; - - type DescriptorSet = DescriptorSet; - - type QueryPool = QueryPool; - - type CmdBuf = CmdBuf; - - type Fence = Fence; - - type Semaphore = Semaphore; - - type DescriptorSetBuilder = DescriptorSetBuilder; - - type Sampler = (); - - type ShaderSource = str; - - fn query_gpu_info(&self) -> crate::GpuInfo { - self.gpu_info.clone() - } - - fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result { - let options = if usage.contains(BufferUsage::MAP_READ) { - metal::MTLResourceOptions::StorageModeShared - | metal::MTLResourceOptions::CPUCacheModeDefaultCache - } else if usage.contains(BufferUsage::MAP_WRITE) { - metal::MTLResourceOptions::StorageModeShared - | metal::MTLResourceOptions::CPUCacheModeWriteCombined - } else { - metal::MTLResourceOptions::StorageModePrivate - }; - let buffer = self.device.new_buffer(size, options); - Ok(Buffer { buffer, size }) - } - - unsafe fn destroy_buffer(&self, _buffer: &Self::Buffer) -> Result<(), Error> { - // This defers dropping until the buffer object is dropped. We probably need - // to rethink buffer lifetime if descriptor sets can retain references. - Ok(()) - } - - unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result { - let desc = metal::TextureDescriptor::new(); - desc.set_width(width as u64); - desc.set_height(height as u64); - // These are defaults so don't need to be explicitly set. - //desc.set_depth(1); - //desc.set_mipmap_level_count(1); - let mtl_format = match format { - ImageFormat::A8 => metal::MTLPixelFormat::R8Unorm, - ImageFormat::Rgba8 => metal::MTLPixelFormat::RGBA8Unorm, - ImageFormat::Surface => metal::MTLPixelFormat::BGRA8Unorm, - }; - desc.set_pixel_format(mtl_format); - desc.set_usage(metal::MTLTextureUsage::ShaderRead | metal::MTLTextureUsage::ShaderWrite); - let texture = self.device.new_texture(&desc); - Ok(Image { - texture, - width, - height, - }) - } - - unsafe fn destroy_image(&self, _image: &Self::Image) -> Result<(), Error> { - // TODO figure out what we want to do here - Ok(()) - } - - unsafe fn create_compute_pipeline( - &self, - code: &Self::ShaderSource, - _bind_types: &[crate::BindType], - ) -> Result { - let options = metal::CompileOptions::new(); - let library = self.device.new_library_with_source(code, &options)?; - let function = library.get_function("main0", None)?; - let pipeline = self - .device - .new_compute_pipeline_state_with_function(&function)?; - Ok(Pipeline(pipeline)) - } - - unsafe fn descriptor_set_builder(&self) -> Self::DescriptorSetBuilder { - DescriptorSetBuilder::default() - } - - unsafe fn update_buffer_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - buf: &Self::Buffer, - ) { - ds.buffers[index as usize] = buf.clone(); - } - - unsafe fn update_image_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - image: &Self::Image, - ) { - ds.images[index as usize - ds.buffers.len()] = image.clone(); - } - - fn create_cmd_buf(&self) -> Result { - let cmd_queue = self.cmd_queue.lock().unwrap(); - // A discussion about autorelease pools. - // - // Autorelease pools are a sore point in Rust/Objective-C interop. Basically, - // you can have any two of correctness, ergonomics, and performance. Here we've - // chosen the first two, using the pattern of a fine grained autorelease pool - // to give the Obj-C object Rust-like lifetime semantics whenever objects are - // created as autorelease (by convention, this is any object creation with an - // Obj-C method name that doesn't begin with "new" or "alloc"). - // - // To gain back some of the performance, we'd need a way to wrap an autorelease - // pool over a chunk of work - that could be one frame of rendering, but for - // tests that iterate a number of command buffer submissions, it would need to - // be around that. On non-mac platforms, it would be a no-op. - // - // In any case, this way, the caller doesn't need to worry, and the performance - // hit might not be so bad (perhaps we should measure). - - // consider new_command_buffer_with_unretained_references for performance - let cmd_buf = autoreleasepool(|| cmd_queue.new_command_buffer().to_owned()); - let helpers = self.helpers.clone(); - let cur_encoder = Encoder::None; - let time_calibration = Default::default(); - Ok(CmdBuf { - cmd_buf, - helpers, - cur_encoder, - time_calibration, - counter_style: self.counter_style, - }) - } - - unsafe fn destroy_cmd_buf(&self, _cmd_buf: Self::CmdBuf) -> Result<(), Error> { - Ok(()) - } - - fn create_query_pool(&self, n_queries: u32) -> Result { - if let Some(timer_set) = &self.timer_set { - let pool = CounterSampleBuffer::new(&self.device, n_queries as u64, timer_set) - .ok_or("error creating timer query pool")?; - return Ok(QueryPool { - counter_sample_buf: Some(pool), - calibration: Default::default(), - }); - } - Ok(QueryPool::default()) - } - - unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result, Error> { - if let Some(raw) = &pool.counter_sample_buf { - let resolved = raw.resolve(); - let calibration = pool.calibration.lock().unwrap(); - if let Some(calibration) = &*calibration { - let calibration = calibration.lock().unwrap(); - let result = resolved - .iter() - .map(|time_ns| calibration.correlate(*time_ns)) - .collect(); - return Ok(result); - } - } - // Maybe should return None indicating it wasn't successful? But that might break. - Ok(Vec::new()) - } - - unsafe fn run_cmd_bufs( - &self, - cmd_bufs: &[&Self::CmdBuf], - _wait_semaphores: &[&Self::Semaphore], - _signal_semaphores: &[&Self::Semaphore], - fence: Option<&mut Self::Fence>, - ) -> Result<(), Error> { - unsafe fn add_scheduled_handler( - cmd_buf: &metal::CommandBufferRef, - block: &Block<(&CommandBufferRef,), ()>, - ) { - msg_send![cmd_buf, addScheduledHandler: block] - } - for cmd_buf in cmd_bufs { - let time_calibration = cmd_buf.time_calibration.clone(); - let start_block = block::ConcreteBlock::new(move |buffer: &metal::CommandBufferRef| { - let device: id = msg_send![buffer, device]; - let mut time_calibration = time_calibration.lock().unwrap(); - let cpu_ts_ptr = &mut time_calibration.cpu_start_ts as *mut _; - let gpu_ts_ptr = &mut time_calibration.gpu_start_ts as *mut _; - // TODO: only do this if supported. - let () = msg_send![device, sampleTimestamps: cpu_ts_ptr gpuTimestamp: gpu_ts_ptr]; - }) - .copy(); - add_scheduled_handler(&cmd_buf.cmd_buf, &start_block); - let time_calibration = cmd_buf.time_calibration.clone(); - let completed_block = - block::ConcreteBlock::new(move |buffer: &metal::CommandBufferRef| { - let device: id = msg_send![buffer, device]; - let mut time_calibration = time_calibration.lock().unwrap(); - let cpu_ts_ptr = &mut time_calibration.cpu_end_ts as *mut _; - let gpu_ts_ptr = &mut time_calibration.gpu_end_ts as *mut _; - // TODO: only do this if supported. - let () = - msg_send![device, sampleTimestamps: cpu_ts_ptr gpuTimestamp: gpu_ts_ptr]; - }) - .copy(); - cmd_buf.cmd_buf.add_completed_handler(&completed_block); - cmd_buf.cmd_buf.commit(); - } - if let Some(last_cmd_buf) = cmd_bufs.last() { - if let Some(fence) = fence { - *fence = Fence::CmdBufPending(last_cmd_buf.cmd_buf.to_owned()); - } - } - Ok(()) - } - - unsafe fn map_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - _size: u64, - _mode: MapMode, - ) -> Result<*mut u8, Error> { - let contents_ptr = buffer.buffer.contents(); - if contents_ptr.is_null() { - return Err("probably trying to map private buffer".into()); - } - Ok((contents_ptr as *mut u8).add(offset as usize)) - } - - unsafe fn unmap_buffer( - &self, - _buffer: &Self::Buffer, - _offset: u64, - _size: u64, - _mode: MapMode, - ) -> Result<(), Error> { - Ok(()) - } - - unsafe fn create_semaphore(&self) -> Result { - Ok(Semaphore) - } - - unsafe fn create_fence(&self, _signaled: bool) -> Result { - // Doesn't handle signaled case. Maybe the fences should have more - // limited functionality than, say, Vulkan. - Ok(Fence::Idle) - } - - unsafe fn destroy_fence(&self, _fence: Self::Fence) -> Result<(), Error> { - Ok(()) - } - - unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> { - for fence in fences { - match fence { - Fence::Idle => (), - Fence::CmdBufPending(cmd_buf) => { - cmd_buf.wait_until_completed(); - // TODO: this would be a good place to check errors, currently - // dropped on the floor. - *fence = Fence::Idle; - } - } - } - Ok(()) - } - - unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result { - match fence { - Fence::Idle => Ok(true), - Fence::CmdBufPending(cmd_buf) => { - Ok(cmd_buf.status() == metal::MTLCommandBufferStatus::Completed) - } - } - } - - unsafe fn create_sampler(&self, params: crate::SamplerParams) -> Result { - todo!() - } -} - -impl crate::backend::CmdBuf for CmdBuf { - unsafe fn begin(&mut self) {} - - unsafe fn finish(&mut self) { - self.flush_encoder(); - } - - unsafe fn flush(&mut self) { - self.flush_encoder(); - } - - unsafe fn reset(&mut self) -> bool { - false - } - - unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) { - // TODO: we might want to get better about validation but the following - // assert is likely to trigger, and also a case can be made that - // validation should be done at the hub level, for consistency. - //debug_assert!(matches!(self.cur_encoder, Encoder::None)); - self.flush_encoder(); - autoreleasepool(|| { - let (encoder, end_query) = match (&desc.timer_queries, self.counter_style) { - (Some(queries), CounterStyle::Stage) => { - let descriptor: id = - msg_send![class!(MTLComputePassDescriptor), computePassDescriptor]; - let attachments: id = msg_send![descriptor, sampleBufferAttachments]; - let index: NSUInteger = 0; - let attachment: id = msg_send![attachments, objectAtIndexedSubscript: index]; - // Here we break the hub/mux separation a bit, for expedience - #[allow(irrefutable_let_patterns)] - if let crate::hub::QueryPool::Mtl(query_pool) = queries.0 { - if let Some(sample_buf) = &query_pool.counter_sample_buf { - let () = msg_send![attachment, setSampleBuffer: sample_buf.id()]; - } - } - let start_index = queries.1 as NSUInteger; - let end_index = queries.2 as NSInteger; - let () = msg_send![attachment, setStartOfEncoderSampleIndex: start_index]; - let () = msg_send![attachment, setEndOfEncoderSampleIndex: end_index]; - ( - msg_send![ - self.cmd_buf, - computeCommandEncoderWithDescriptor: descriptor - ], - None, - ) - } - (Some(queries), CounterStyle::Command) => { - let encoder = self.cmd_buf.new_compute_command_encoder(); - #[allow(irrefutable_let_patterns)] - let end_query = if let crate::hub::QueryPool::Mtl(query_pool) = queries.0 { - if let Some(sample_buf) = &query_pool.counter_sample_buf { - let sample_index = queries.1 as NSUInteger; - let sample_buf = sample_buf.id(); - let () = msg_send![encoder, sampleCountersInBuffer: sample_buf atSampleIndex: sample_index withBarrier: true]; - Some((sample_buf, queries.2)) - } else { - None - } - } else { - None - }; - (encoder, end_query) - } - _ => (self.cmd_buf.new_compute_command_encoder(), None), - }; - self.cur_encoder = Encoder::Compute(encoder.to_owned(), end_query); - }); - } - - unsafe fn dispatch( - &mut self, - pipeline: &Pipeline, - descriptor_set: &DescriptorSet, - workgroup_count: (u32, u32, u32), - workgroup_size: (u32, u32, u32), - ) { - let encoder = self.compute_command_encoder(); - encoder.set_compute_pipeline_state(&pipeline.0); - let mut buf_ix = 0; - for buffer in &descriptor_set.buffers { - encoder.set_buffer(buf_ix, Some(&buffer.buffer), 0); - buf_ix += 1; - } - let mut img_ix = buf_ix; - for image in &descriptor_set.images { - encoder.set_texture(img_ix, Some(&image.texture)); - img_ix += 1; - } - let workgroup_count = metal::MTLSize { - width: workgroup_count.0 as u64, - height: workgroup_count.1 as u64, - depth: workgroup_count.2 as u64, - }; - let workgroup_size = metal::MTLSize { - width: workgroup_size.0 as u64, - height: workgroup_size.1 as u64, - depth: workgroup_size.2 as u64, - }; - encoder.dispatch_thread_groups(workgroup_count, workgroup_size); - } - - unsafe fn end_compute_pass(&mut self) { - // TODO: might validate that we are in a compute encoder state - self.flush_encoder(); - } - - unsafe fn memory_barrier(&mut self) { - // We'll probably move to explicit barriers, but for now rely on - // Metal's own tracking. - } - - unsafe fn host_barrier(&mut self) {} - - unsafe fn image_barrier( - &mut self, - _image: &Image, - _src_layout: crate::ImageLayout, - _dst_layout: crate::ImageLayout, - ) { - // I think these are being tracked. - } - - unsafe fn clear_buffer(&mut self, buffer: &Buffer, size: Option) { - let size = size.unwrap_or(buffer.size); - let _ = self.compute_command_encoder(); - // Getting this directly is a workaround for a borrow checker issue. - if let Encoder::Compute(e, _) = &self.cur_encoder { - clear::encode_clear(e, &self.helpers.clear_pipeline, &buffer.buffer, size); - } - } - - unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer) { - let encoder = self.blit_command_encoder(); - let size = src.size.min(dst.size); - encoder.copy_from_buffer(&src.buffer, 0, &dst.buffer, 0, size); - } - - unsafe fn copy_image_to_buffer(&mut self, src: &Image, dst: &Buffer) { - let encoder = self.blit_command_encoder(); - assert_eq!(dst.size, (src.width as u64) * (src.height as u64) * 4); - let bytes_per_row = (src.width * 4) as NSUInteger; - let src_size = metal::MTLSize { - width: src.width as NSUInteger, - height: src.height as NSUInteger, - depth: 1, - }; - let origin = metal::MTLOrigin { x: 0, y: 0, z: 0 }; - encoder.copy_from_texture_to_buffer( - &src.texture, - 0, - 0, - origin, - src_size, - &dst.buffer, - 0, - bytes_per_row, - bytes_per_row * src.height as NSUInteger, - metal::MTLBlitOption::empty(), - ); - } - - unsafe fn copy_buffer_to_image(&mut self, src: &Buffer, dst: &Image) { - let encoder = self.blit_command_encoder(); - assert_eq!(src.size, (dst.width as u64) * (dst.height as u64) * 4); - let bytes_per_row = (dst.width * 4) as NSUInteger; - let src_size = metal::MTLSize { - width: dst.width as NSUInteger, - height: dst.height as NSUInteger, - depth: 1, - }; - let origin = metal::MTLOrigin { x: 0, y: 0, z: 0 }; - encoder.copy_from_buffer_to_texture( - &src.buffer, - 0, - bytes_per_row, - bytes_per_row * dst.height as NSUInteger, - src_size, - &dst.texture, - 0, - 0, - origin, - metal::MTLBlitOption::empty(), - ); - } - - unsafe fn blit_image(&mut self, src: &Image, dst: &Image) { - let encoder = self.blit_command_encoder(); - let src_size = metal::MTLSize { - width: src.width.min(dst.width) as NSUInteger, - height: src.width.min(dst.height) as NSUInteger, - depth: 1, - }; - let origin = metal::MTLOrigin { x: 0, y: 0, z: 0 }; - encoder.copy_from_texture( - &src.texture, - 0, - 0, - origin, - src_size, - &dst.texture, - 0, - 0, - origin, - ); - } - - unsafe fn reset_query_pool(&mut self, pool: &QueryPool) { - let mut calibration = pool.calibration.lock().unwrap(); - *calibration = Some(self.time_calibration.clone()); - } - - unsafe fn write_timestamp(&mut self, pool: &QueryPool, query: u32) { - if let Some(buf) = &pool.counter_sample_buf { - if matches!(self.cur_encoder, Encoder::None) { - self.cur_encoder = - Encoder::Compute(self.cmd_buf.new_compute_command_encoder().to_owned(), None); - } - let sample_index = query as NSUInteger; - if self.counter_style == CounterStyle::Command { - match &self.cur_encoder { - Encoder::Compute(e, _) => { - let () = msg_send![e.as_ptr(), sampleCountersInBuffer: buf.id() atSampleIndex: sample_index withBarrier: true]; - } - Encoder::None => unreachable!(), - _ => todo!(), - } - } else if self.counter_style == CounterStyle::Stage { - match &self.cur_encoder { - Encoder::Compute(_e, _) => { - println!("write_timestamp is not supported for stage-style encoders"); - } - _ => (), - } - } - } - } -} - -impl CmdBuf { - fn compute_command_encoder(&mut self) -> &metal::ComputeCommandEncoder { - if !matches!(self.cur_encoder, Encoder::Compute(..)) { - self.flush_encoder(); - self.cur_encoder = - Encoder::Compute(self.cmd_buf.new_compute_command_encoder().to_owned(), None); - } - if let Encoder::Compute(e, _) = &self.cur_encoder { - e - } else { - unreachable!() - } - } - - fn blit_command_encoder(&mut self) -> &metal::BlitCommandEncoder { - if !matches!(self.cur_encoder, Encoder::Blit(_)) { - self.flush_encoder(); - self.cur_encoder = Encoder::Blit(self.cmd_buf.new_blit_command_encoder().to_owned()); - } - if let Encoder::Blit(e) = &self.cur_encoder { - e - } else { - unreachable!() - } - } - - fn flush_encoder(&mut self) { - match std::mem::replace(&mut self.cur_encoder, Encoder::None) { - Encoder::Compute(e, Some((sample_buf, end_query))) => { - let sample_index = end_query as NSUInteger; - unsafe { - let () = msg_send![e.as_ptr(), sampleCountersInBuffer: sample_buf atSampleIndex: sample_index withBarrier: true]; - } - e.end_encoding(); - } - Encoder::Compute(e, None) => e.end_encoding(), - Encoder::Blit(e) => e.end_encoding(), - Encoder::None => (), - } - } -} - -impl crate::backend::DescriptorSetBuilder for DescriptorSetBuilder { - fn add_buffers(&mut self, buffers: &[&Buffer]) { - self.0.buffers.extend(buffers.iter().copied().cloned()); - } - - fn add_images(&mut self, images: &[&Image]) { - self.0.images.extend(images.iter().copied().cloned()); - } - - fn add_textures(&mut self, images: &[&Image]) { - self.add_images(images); - } - - unsafe fn build( - self, - _device: &MtlDevice, - _pipeline: &Pipeline, - ) -> Result { - Ok(self.0) - } -} - -impl MtlSwapchain { - pub unsafe fn next(&mut self) -> Result<(usize, Semaphore), Error> { - let drawable_ix = self.drawable_ix; - self.drawable_ix = (drawable_ix + 1) % self.n_drawables; - Ok((drawable_ix, Semaphore)) - } - - pub unsafe fn image(&self, _idx: usize) -> Image { - let (drawable, texture) = autoreleasepool(|| { - let drawable = self.layer.next_drawable().unwrap(); - (drawable.to_owned(), drawable.texture().to_owned()) - }); - *self.drawable.lock().unwrap() = Some(drawable); - let size = self.layer.drawable_size(); - Image { - texture, - width: size.width.round() as u32, - height: size.height.round() as u32, - } - } - - pub unsafe fn present( - &self, - _image_idx: usize, - _semaphores: &[&Semaphore], - ) -> Result { - let drawable = self.drawable.lock().unwrap().take(); - if let Some(drawable) = drawable { - autoreleasepool(|| { - let cmd_queue = self.cmd_queue.lock().unwrap(); - let cmd_buf = cmd_queue.new_command_buffer(); - cmd_buf.present_drawable(&drawable); - cmd_buf.commit(); - }); - } else { - println!("no drawable; present called without acquiring image?"); - } - Ok(false) - } -} - -#[repr(C)] -struct NSOperatingSystemVersion { - major: NSInteger, - minor: NSInteger, - patch: NSInteger, -} - -impl NSOperatingSystemVersion { - fn get() -> NSOperatingSystemVersion { - unsafe { - let process_info: *mut Object = msg_send![class!(NSProcessInfo), processInfo]; - msg_send![process_info, operatingSystemVersion] - } - } - - fn at_least(&self, major: u32, minor: u32) -> bool { - let major = major as NSInteger; - let minor = minor as NSInteger; - self.major > major || (self.major == major && self.minor >= minor) - } -} diff --git a/piet-gpu-hal/src/metal/clear.rs b/piet-gpu-hal/src/metal/clear.rs deleted file mode 100644 index ee9e716..0000000 --- a/piet-gpu-hal/src/metal/clear.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The compute shader and stage for clearing buffers. - -use metal::{ComputePipelineState, Device}; - -const CLEAR_MSL: &str = r#" -using namespace metal; - -struct ConfigBuf -{ - uint size; - uint value; -}; - -kernel void main0(const device ConfigBuf& config [[buffer(0)]], device uint *data [[buffer(1)]], uint3 gid [[thread_position_in_grid]]) -{ - uint ix = gid.x; - if (ix < config.size) - { - data[ix] = config.value; - } -} -"#; - -pub fn make_clear_pipeline(device: &Device) -> ComputePipelineState { - let options = metal::CompileOptions::new(); - let library = device.new_library_with_source(CLEAR_MSL, &options).unwrap(); - let function = library.get_function("main0", None).unwrap(); - device - .new_compute_pipeline_state_with_function(&function) - .unwrap() -} - -pub fn encode_clear( - encoder: &metal::ComputeCommandEncoderRef, - clear_pipeline: &ComputePipelineState, - buffer: &metal::Buffer, - size: u64, -) { - // TODO: should be more careful with overflow - let size_in_u32s = (size / 4) as u32; - encoder.set_compute_pipeline_state(&clear_pipeline); - let config = [size_in_u32s, 0]; - encoder.set_bytes( - 0, - std::mem::size_of_val(&config) as u64, - config.as_ptr() as *const _, - ); - encoder.set_buffer(1, Some(buffer), 0); - let n_wg = (size_in_u32s + 255) / 256; - let workgroup_count = metal::MTLSize { - width: n_wg as u64, - height: 1, - depth: 1, - }; - let workgroup_size = metal::MTLSize { - width: 256, - height: 1, - depth: 1, - }; - encoder.dispatch_thread_groups(workgroup_count, workgroup_size); -} diff --git a/piet-gpu-hal/src/metal/timer.rs b/piet-gpu-hal/src/metal/timer.rs deleted file mode 100644 index 65c8026..0000000 --- a/piet-gpu-hal/src/metal/timer.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Support for timer queries. -//! -//! Likely some of this should be upstreamed into metal-rs. - -use std::{ffi::CStr, ptr::null_mut}; - -use cocoa_foundation::{ - base::id, - foundation::{NSRange, NSUInteger}, -}; -use metal::{DeviceRef, MTLStorageMode}; -use objc::{class, msg_send, sel, sel_impl}; - -pub struct CounterSampleBuffer { - id: id, - count: u64, -} - -pub struct CounterSet { - id: id, -} - -#[derive(Default)] -pub struct TimeCalibration { - pub cpu_start_ts: u64, - pub gpu_start_ts: u64, - pub cpu_end_ts: u64, - pub gpu_end_ts: u64, -} - -impl Drop for CounterSampleBuffer { - fn drop(&mut self) { - unsafe { msg_send![self.id, release] } - } -} - -impl Clone for CounterSampleBuffer { - fn clone(&self) -> CounterSampleBuffer { - unsafe { - CounterSampleBuffer { - id: msg_send![self.id, retain], - count: self.count, - } - } - } -} - -impl CounterSampleBuffer { - pub fn id(&self) -> id { - self.id - } -} - -impl Drop for CounterSet { - fn drop(&mut self) { - unsafe { msg_send![self.id, release] } - } -} - -impl CounterSet { - pub fn get_timer_counter_set(device: &DeviceRef) -> Option { - unsafe { - // TODO: version check - let sets: id = msg_send!(device, counterSets); - let count: NSUInteger = msg_send![sets, count]; - for i in 0..count { - let set: id = msg_send![sets, objectAtIndex: i]; - let name: id = msg_send![set, name]; - let name_cstr = CStr::from_ptr(msg_send![name, UTF8String]); - if name_cstr.to_bytes() == b"timestamp" { - return Some(CounterSet { id: set }); - } - } - None - } - } -} - -// copied from metal-rs; should be in common utilities maybe? -fn nsstring_as_str(nsstr: &objc::runtime::Object) -> &str { - let bytes = unsafe { - let bytes: *const std::os::raw::c_char = msg_send![nsstr, UTF8String]; - bytes as *const u8 - }; - let len: NSUInteger = unsafe { msg_send![nsstr, length] }; - unsafe { - let bytes = std::slice::from_raw_parts(bytes, len as usize); - std::str::from_utf8(bytes).unwrap() - } -} - -impl CounterSampleBuffer { - pub fn new( - device: &DeviceRef, - count: u64, - counter_set: &CounterSet, - ) -> Option { - unsafe { - let desc_cls = class!(MTLCounterSampleBufferDescriptor); - let descriptor: id = msg_send![desc_cls, alloc]; - let _: id = msg_send![descriptor, init]; - let count = count as NSUInteger; - let () = msg_send![descriptor, setSampleCount: count]; - let () = msg_send![descriptor, setCounterSet: counter_set.id]; - let () = msg_send![ - descriptor, - setStorageMode: MTLStorageMode::Shared as NSUInteger - ]; - let mut error: id = null_mut(); - let buf: id = msg_send![device, newCounterSampleBufferWithDescriptor: descriptor error: &mut error]; - let () = msg_send![descriptor, release]; - if !error.is_null() { - let description = msg_send![error, localizedDescription]; - println!( - "error allocating sample buffer, code = {}", - nsstring_as_str(description) - ); - let () = msg_send![error, release]; - return None; - } - Some(CounterSampleBuffer { id: buf, count }) - } - } - - // Read the timestamps. - // - // Safety: the lifetime of the returned slice is wrong, it's actually autoreleased. - pub unsafe fn resolve(&self) -> &[u64] { - let range = NSRange::new(0, self.count); - let data: id = msg_send![self.id, resolveCounterRange: range]; - if data.is_null() { - &[] - } else { - let bytes: *const u64 = msg_send![data, bytes]; - std::slice::from_raw_parts(bytes, self.count as usize) - } - } -} - -impl TimeCalibration { - /// Convert GPU timestamp into CPU time base. - /// - /// See https://developer.apple.com/documentation/metal/performance_tuning/correlating_cpu_and_gpu_timestamps - pub fn correlate(&self, raw_ts: u64) -> f64 { - let delta_cpu = self.cpu_end_ts - self.cpu_start_ts; - let delta_gpu = self.gpu_end_ts - self.gpu_start_ts; - let adj_ts = if delta_gpu > 0 { - let scale = delta_cpu as f64 / delta_gpu as f64; - self.cpu_start_ts as f64 + (raw_ts as f64 - self.gpu_start_ts as f64) * scale - } else { - // Default is ns on Apple Silicon; on other hardware this will be wrong - raw_ts as f64 - }; - adj_ts * 1e-9 - } -} diff --git a/piet-gpu-hal/src/metal/util.rs b/piet-gpu-hal/src/metal/util.rs deleted file mode 100644 index 869f0a8..0000000 --- a/piet-gpu-hal/src/metal/util.rs +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Utilities and types for Metal integration - -use core_graphics_types::{base::CGFloat, geometry::CGSize}; - -#[link(name = "QuartzCore", kind = "framework")] -extern "C" { - #[allow(non_upper_case_globals)] - pub static kCAGravityTopLeft: cocoa_foundation::base::id; -} - -#[repr(C)] -#[derive(Clone, Copy, Debug, Default)] -pub struct CGPoint { - pub x: CGFloat, - pub y: CGFloat, -} - -#[repr(C)] -#[derive(Clone, Copy, Debug, Default)] -pub struct CGRect { - pub origin: CGPoint, - pub size: CGSize, -} diff --git a/piet-gpu-hal/src/mux.rs b/piet-gpu-hal/src/mux.rs deleted file mode 100644 index 6f0731c..0000000 --- a/piet-gpu-hal/src/mux.rs +++ /dev/null @@ -1,925 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! A multiplexer module that selects a back-end at runtime. - -use raw_window_handle::RawDisplayHandle; -use raw_window_handle::RawWindowHandle; -use smallvec::SmallVec; - -mux_cfg! { - #[cfg(vk)] - use crate::vulkan; -} -mux_cfg! { - #[cfg(dx12)] - use crate::dx12; -} -mux_cfg! { - #[cfg(mtl)] - use crate::metal; -} -use crate::backend::CmdBuf as CmdBufTrait; -use crate::backend::DescriptorSetBuilder as DescriptorSetBuilderTrait; -use crate::backend::Device as DeviceTrait; -use crate::BackendType; -use crate::BindType; -use crate::ComputePassDescriptor; -use crate::ImageFormat; -use crate::MapMode; -use crate::{BufferUsage, Error, GpuInfo, ImageLayout, InstanceFlags}; - -mux_enum! { - /// An instance, selected from multiple backends. - pub enum Instance { - Vk(vulkan::VkInstance), - Dx12(dx12::Dx12Instance), - Mtl(metal::MtlInstance), - } -} - -mux_enum! { - /// A device, selected from multiple backends. - pub enum Device { - Vk(vulkan::VkDevice), - Dx12(dx12::Dx12Device), - Mtl(metal::MtlDevice), - } -} - -mux_enum! { - /// A surface, which can apply to one of multiple backends. - pub enum Surface { - Vk(vulkan::VkSurface), - Dx12(dx12::Dx12Surface), - Mtl(metal::MtlSurface), - } -} - -mux_enum! { - /// A surface, which can apply to one of multiple backends. - pub enum Swapchain { - Vk(vulkan::VkSwapchain), - Dx12(dx12::Dx12Swapchain), - Mtl(metal::MtlSwapchain), - } -} - -mux_device_enum! { Buffer } -mux_device_enum! { Image } -mux_device_enum! { -/// An object for waiting on command buffer completion. -Fence } -mux_device_enum! { -/// A semaphore for swapchain presentation. -/// -/// Depending on what kind of synchronization is needed for swapchain -/// presentation by the back-end, this may or may not be a "real" -/// semaphore. -Semaphore } -mux_device_enum! { -/// A pipeline object; basically a compiled shader. -Pipeline } -mux_device_enum! { DescriptorSetBuilder } -mux_device_enum! { -/// A descriptor set; a binding of resources for access by a shader. -DescriptorSet } -mux_device_enum! { CmdBuf } -mux_device_enum! { -/// An object for recording timer queries. -QueryPool } -mux_device_enum! { Sampler } - -/// The code for a shader, either as source or intermediate representation. -pub enum ShaderCode<'a> { - /// SPIR-V (binary intermediate representation) - Spv(&'a [u8]), - /// HLSL (source) - Hlsl(&'a str), - /// DXIL (DX12 intermediate language) - Dxil(&'a [u8]), - /// Metal Shading Language (source) - Msl(&'a str), -} - -impl Instance { - /// Create a new GPU instance. - /// - /// When multiple back-end GPU APIs are available (for example, Vulkan - /// and DX12), this function selects one at runtime. - /// - /// When no surface is given, the instance is suitable for compute-only - /// work. - pub fn new(flags: InstanceFlags) -> Result { - let mut backends = [BackendType::Vulkan, BackendType::Dx12]; - if flags.contains(InstanceFlags::DX12) { - backends.swap(0, 1); - } - for backend in backends { - if backend == BackendType::Vulkan { - mux_cfg! { - #[cfg(vk)] - { - if let Ok(instance) = vulkan::VkInstance::new() { - return Ok(Instance::Vk(instance)); - } - } - } - } - if backend == BackendType::Dx12 { - mux_cfg! { - #[cfg(dx12)] - { - if let Ok(instance) = dx12::Dx12Instance::new() { - return Ok(Instance::Dx12(instance)) - } - } - } - } - } - mux_cfg! { - #[cfg(mtl)] - { - if let Ok(instance) = metal::MtlInstance::new() { - return Ok(Instance::Mtl(instance)); - } - } - } - // TODO plumb creation errors through. - Err("No suitable instances found".into()) - } - - /// Create a surface from the specified window handle. - pub unsafe fn surface( - &self, - display_handle: RawDisplayHandle, - window_handle: RawWindowHandle, - ) -> Result { - mux_match! { self; - Instance::Vk(i) => i.surface(display_handle, window_handle).map(Surface::Vk), - Instance::Dx12(i) => i.surface(display_handle, window_handle).map(Surface::Dx12), - Instance::Mtl(i) => i.surface(display_handle, window_handle).map(Surface::Mtl), - } - } - - /// Create a device. - /// - /// The "device" is the low-level GPU abstraction for creating resources - /// and submitting work. Most users of this library will want to wrap it in - /// a "session" which is similar but provides many conveniences. - pub unsafe fn device(&self) -> Result { - mux_match! { self; - Instance::Vk(i) => i.device().map(Device::Vk), - Instance::Dx12(i) => i.device().map(Device::Dx12), - Instance::Mtl(i) => i.device().map(Device::Mtl), - } - } - - /// Create a swapchain. - /// - /// A swapchain is a small vector of images shared with the platform's - /// presentation logic. To actually display pixels, the application writes - /// into the swapchain images, then calls the present method to display - /// them. - pub unsafe fn swapchain( - &self, - width: usize, - height: usize, - device: &Device, - surface: &Surface, - ) -> Result { - mux_match! { self; - Instance::Vk(i) => i - .swapchain(width, height, device.vk(), surface.vk()) - .map(Swapchain::Vk), - Instance::Dx12(i) => i - .swapchain(width, height, device.dx12(), surface.dx12()) - .map(Swapchain::Dx12), - Instance::Mtl(i) => i - .swapchain(width, height, device.mtl(), surface.mtl()) - .map(Swapchain::Mtl), - } - } -} - -// This is basically re-exporting the backend device trait, and we could do that, -// but not doing so lets us diverge more easily (at the moment, the divergence is -// missing functionality). -impl Device { - #[cfg(target_os = "macos")] - pub fn new_from_raw_mtl( - device: &::metal::DeviceRef, - queue: &::metal::CommandQueueRef, - ) -> Device { - Device::Mtl(metal::MtlDevice::new_from_raw_mtl( - device.to_owned(), - queue.to_owned(), - )) - } - - #[cfg(target_os = "macos")] - pub fn cmd_buf_from_raw_mtl(&self, raw_cmd_buf: &::metal::CommandBufferRef) -> CmdBuf { - // Note: this will cause problems if we support multiple back-ends on mac. But it will - // be a compile error; - let Device::Mtl(d) = self; - CmdBuf::Mtl(d.cmd_buf_from_raw_mtl(raw_cmd_buf.to_owned())) - } - - #[cfg(target_os = "macos")] - pub fn image_from_raw_mtl( - &self, - raw_texture: &::metal::TextureRef, - width: u32, - height: u32, - ) -> Image { - // Note: this will cause problems if we support multiple back-ends on mac. But it will - // be a compile error; - let Device::Mtl(d) = self; - Image::Mtl(d.image_from_raw_mtl(raw_texture.to_owned(), width, height)) - } - - pub fn query_gpu_info(&self) -> GpuInfo { - mux_match! { self; - Device::Vk(d) => d.query_gpu_info(), - Device::Dx12(d) => d.query_gpu_info(), - Device::Mtl(d) => d.query_gpu_info(), - } - } - - pub fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_buffer(size, usage).map(Buffer::Vk), - Device::Dx12(d) => d.create_buffer(size, usage).map(Buffer::Dx12), - Device::Mtl(d) => d.create_buffer(size, usage).map(Buffer::Mtl), - } - } - - pub unsafe fn destroy_buffer(&self, buffer: &Buffer) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.destroy_buffer(buffer.vk()), - Device::Dx12(d) => d.destroy_buffer(buffer.dx12()), - Device::Mtl(d) => d.destroy_buffer(buffer.mtl()), - } - } - - pub unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_image2d(width, height, format).map(Image::Vk), - Device::Dx12(d) => d.create_image2d(width, height, format).map(Image::Dx12), - Device::Mtl(d) => d.create_image2d(width, height, format).map(Image::Mtl), - } - } - - pub unsafe fn destroy_image(&self, image: &Image) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.destroy_image(image.vk()), - Device::Dx12(d) => d.destroy_image(image.dx12()), - Device::Mtl(d) => d.destroy_image(image.mtl()), - } - } - - pub unsafe fn create_fence(&self, signaled: bool) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_fence(signaled).map(Fence::Vk), - Device::Dx12(d) => d.create_fence(signaled).map(Fence::Dx12), - Device::Mtl(d) => d.create_fence(signaled).map(Fence::Mtl), - } - } - - pub unsafe fn destroy_fence(&self, fence: Fence) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.destroy_fence(fence.vk_owned()), - Device::Dx12(d) => d.destroy_fence(fence.dx12_owned()), - Device::Mtl(d) => d.destroy_fence(fence.mtl_owned()), - } - } - - // Consider changing Vec to iterator (as is done in gfx-hal) - pub unsafe fn wait_and_reset(&self, fences: Vec<&mut Fence>) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => { - let fences = fences - .into_iter() - .map(|f| f.vk_mut()) - .collect::>(); - d.wait_and_reset(fences) - } - Device::Dx12(d) => { - let fences = fences - .into_iter() - .map(|f| f.dx12_mut()) - .collect::>(); - d.wait_and_reset(fences) - } - Device::Mtl(d) => { - let fences = fences - .into_iter() - .map(|f| f.mtl_mut()) - .collect::>(); - d.wait_and_reset(fences) - } - } - } - - pub unsafe fn get_fence_status(&self, fence: &mut Fence) -> Result { - mux_match! { self; - Device::Vk(d) => d.get_fence_status(fence.vk_mut()), - Device::Dx12(d) => d.get_fence_status(fence.dx12_mut()), - Device::Mtl(d) => d.get_fence_status(fence.mtl_mut()), - } - } - - pub unsafe fn create_semaphore(&self) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_semaphore().map(Semaphore::Vk), - Device::Dx12(d) => d.create_semaphore().map(Semaphore::Dx12), - Device::Mtl(d) => d.create_semaphore().map(Semaphore::Mtl), - } - } - - pub unsafe fn create_compute_pipeline<'a>( - &self, - code: ShaderCode<'a>, - bind_types: &[BindType], - ) -> Result { - mux_match! { self; - Device::Vk(d) => { - let shader_code = match code { - ShaderCode::Spv(spv) => spv, - // Panic or return "incompatible shader" error here? - _ => panic!("Vulkan backend requires shader code in SPIR-V format"), - }; - d.create_compute_pipeline(shader_code, bind_types) - .map(Pipeline::Vk) - } - Device::Dx12(d) => { - let shader_code = match code { - //ShaderCode::Hlsl(hlsl) => hlsl, - ShaderCode::Dxil(dxil) => dxil, - // Panic or return "incompatible shader" error here? - _ => panic!("DX12 backend requires shader code in DXIL format"), - }; - d.create_compute_pipeline(shader_code, bind_types) - .map(Pipeline::Dx12) - } - Device::Mtl(d) => { - let shader_code = match code { - ShaderCode::Msl(msl) => msl, - // Panic or return "incompatible shader" error here? - _ => panic!("Metal backend requires shader code in MSL format"), - }; - d.create_compute_pipeline(shader_code, bind_types) - .map(Pipeline::Mtl) - } - } - } - - pub unsafe fn descriptor_set_builder(&self) -> DescriptorSetBuilder { - mux_match! { self; - Device::Vk(d) => DescriptorSetBuilder::Vk(d.descriptor_set_builder()), - Device::Dx12(d) => DescriptorSetBuilder::Dx12(d.descriptor_set_builder()), - Device::Mtl(d) => DescriptorSetBuilder::Mtl(d.descriptor_set_builder()), - } - } - - pub unsafe fn update_buffer_descriptor( - &self, - ds: &mut DescriptorSet, - index: u32, - buffer: &Buffer, - ) { - mux_match! { self; - Device::Vk(d) => d.update_buffer_descriptor(ds.vk_mut(), index, buffer.vk()), - Device::Dx12(d) => d.update_buffer_descriptor(ds.dx12_mut(), index, buffer.dx12()), - Device::Mtl(d) => d.update_buffer_descriptor(ds.mtl_mut(), index, buffer.mtl()), - } - } - - pub unsafe fn update_image_descriptor( - &self, - ds: &mut DescriptorSet, - index: u32, - image: &Image, - ) { - mux_match! { self; - Device::Vk(d) => d.update_image_descriptor(ds.vk_mut(), index, image.vk()), - Device::Dx12(d) => d.update_image_descriptor(ds.dx12_mut(), index, image.dx12()), - Device::Mtl(d) => d.update_image_descriptor(ds.mtl_mut(), index, image.mtl()), - } - } - - pub fn create_cmd_buf(&self) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_cmd_buf().map(CmdBuf::Vk), - Device::Dx12(d) => d.create_cmd_buf().map(CmdBuf::Dx12), - Device::Mtl(d) => d.create_cmd_buf().map(CmdBuf::Mtl), - } - } - - pub unsafe fn destroy_cmd_buf(&self, cmd_buf: CmdBuf) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.destroy_cmd_buf(cmd_buf.vk_owned()), - Device::Dx12(d) => d.destroy_cmd_buf(cmd_buf.dx12_owned()), - Device::Mtl(d) => d.destroy_cmd_buf(cmd_buf.mtl_owned()), - } - } - - pub fn create_query_pool(&self, n_queries: u32) -> Result { - mux_match! { self; - Device::Vk(d) => d.create_query_pool(n_queries).map(QueryPool::Vk), - Device::Dx12(d) => d.create_query_pool(n_queries).map(QueryPool::Dx12), - Device::Mtl(d) => d.create_query_pool(n_queries).map(QueryPool::Mtl), - } - } - - pub unsafe fn fetch_query_pool(&self, pool: &QueryPool) -> Result, Error> { - mux_match! { self; - Device::Vk(d) => d.fetch_query_pool(pool.vk()), - Device::Dx12(d) => d.fetch_query_pool(pool.dx12()), - Device::Mtl(d) => d.fetch_query_pool(pool.mtl()), - } - } - - pub unsafe fn run_cmd_bufs( - &self, - cmd_bufs: &[&CmdBuf], - wait_semaphores: &[&Semaphore], - signal_semaphores: &[&Semaphore], - fence: Option<&mut Fence>, - ) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.run_cmd_bufs( - &cmd_bufs - .iter() - .map(|c| c.vk()) - .collect::>(), - &wait_semaphores - .iter() - .copied() - .map(Semaphore::vk) - .collect::>(), - &signal_semaphores - .iter() - .copied() - .map(Semaphore::vk) - .collect::>(), - fence.map(Fence::vk_mut), - ), - Device::Dx12(d) => d.run_cmd_bufs( - &cmd_bufs - .iter() - .map(|c| c.dx12()) - .collect::>(), - &wait_semaphores - .iter() - .copied() - .map(Semaphore::dx12) - .collect::>(), - &signal_semaphores - .iter() - .copied() - .map(Semaphore::dx12) - .collect::>(), - fence.map(Fence::dx12_mut), - ), - Device::Mtl(d) => d.run_cmd_bufs( - &cmd_bufs - .iter() - .map(|c| c.mtl()) - .collect::>(), - &wait_semaphores - .iter() - .copied() - .map(Semaphore::mtl) - .collect::>(), - &signal_semaphores - .iter() - .copied() - .map(Semaphore::mtl) - .collect::>(), - fence.map(Fence::mtl_mut), - ), - } - } - - pub unsafe fn map_buffer( - &self, - buffer: &Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<*mut u8, Error> { - mux_match! { self; - Device::Vk(d) => d.map_buffer(buffer.vk(), offset, size, mode), - Device::Dx12(d) => d.map_buffer(buffer.dx12(), offset, size, mode), - Device::Mtl(d) => d.map_buffer(buffer.mtl(), offset, size, mode), - } - } - - pub unsafe fn unmap_buffer( - &self, - buffer: &Buffer, - offset: u64, - size: u64, - mode: MapMode, - ) -> Result<(), Error> { - mux_match! { self; - Device::Vk(d) => d.unmap_buffer(buffer.vk(), offset, size, mode), - Device::Dx12(d) => d.unmap_buffer(buffer.dx12(), offset, size, mode), - Device::Mtl(d) => d.unmap_buffer(buffer.mtl(), offset, size, mode), - } - } - - /// Choose shader code from the available choices. - pub fn choose_shader<'a>( - &self, - _spv: &'a [u8], - _hlsl: &'a str, - _dxil: &'a [u8], - _msl: &'a str, - ) -> ShaderCode<'a> { - mux_match! { self; - Device::Vk(_d) => ShaderCode::Spv(_spv), - Device::Dx12(_d) => ShaderCode::Dxil(_dxil), - Device::Mtl(_d) => ShaderCode::Msl(_msl), - } - } - - pub fn backend_type(&self) -> BackendType { - mux_match! { self; - Device::Vk(_d) => BackendType::Vulkan, - Device::Dx12(_d) => BackendType::Dx12, - Device::Mtl(_d) => BackendType::Metal, - } - } -} - -impl DescriptorSetBuilder { - pub fn add_buffers(&mut self, buffers: &[&Buffer]) { - mux_match! { self; - DescriptorSetBuilder::Vk(x) => x.add_buffers( - &buffers - .iter() - .copied() - .map(Buffer::vk) - .collect::>(), - ), - DescriptorSetBuilder::Dx12(x) => x.add_buffers( - &buffers - .iter() - .copied() - .map(Buffer::dx12) - .collect::>(), - ), - DescriptorSetBuilder::Mtl(x) => x.add_buffers( - &buffers - .iter() - .copied() - .map(Buffer::mtl) - .collect::>(), - ), - } - } - - pub fn add_images(&mut self, images: &[&Image]) { - mux_match! { self; - DescriptorSetBuilder::Vk(x) => x.add_images( - &images - .iter() - .copied() - .map(Image::vk) - .collect::>(), - ), - DescriptorSetBuilder::Dx12(x) => x.add_images( - &images - .iter() - .copied() - .map(Image::dx12) - .collect::>(), - ), - DescriptorSetBuilder::Mtl(x) => x.add_images( - &images - .iter() - .copied() - .map(Image::mtl) - .collect::>(), - ), - } - } - - pub fn add_textures(&mut self, images: &[&Image]) { - mux_match! { self; - DescriptorSetBuilder::Vk(x) => x.add_textures( - &images - .iter() - .copied() - .map(Image::vk) - .collect::>(), - ), - DescriptorSetBuilder::Dx12(x) => x.add_textures( - &images - .iter() - .copied() - .map(Image::dx12) - .collect::>(), - ), - DescriptorSetBuilder::Mtl(x) => x.add_textures( - &images - .iter() - .copied() - .map(Image::mtl) - .collect::>(), - ), - } - } - - pub unsafe fn build( - self, - device: &Device, - pipeline: &Pipeline, - ) -> Result { - mux_match! { self; - DescriptorSetBuilder::Vk(x) => - x.build(device.vk(), pipeline.vk()).map(DescriptorSet::Vk), - DescriptorSetBuilder::Dx12(x) => x - .build(device.dx12(), pipeline.dx12()) - .map(DescriptorSet::Dx12), - DescriptorSetBuilder::Mtl(x) => x - .build(device.mtl(), pipeline.mtl()) - .map(DescriptorSet::Mtl), - } - } -} - -impl CmdBuf { - pub unsafe fn begin(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.begin(), - CmdBuf::Dx12(c) => c.begin(), - CmdBuf::Mtl(c) => c.begin(), - } - } - - pub unsafe fn flush(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.flush(), - CmdBuf::Dx12(c) => c.flush(), - CmdBuf::Mtl(c) => c.flush(), - } - } - - pub unsafe fn finish(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.finish(), - CmdBuf::Dx12(c) => c.finish(), - CmdBuf::Mtl(c) => c.finish(), - } - } - - pub unsafe fn reset(&mut self) -> bool { - mux_match! { self; - CmdBuf::Vk(c) => c.reset(), - CmdBuf::Dx12(c) => c.reset(), - CmdBuf::Mtl(c) => c.reset(), - } - } - - pub unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) { - mux_match! { self; - CmdBuf::Vk(c) => c.begin_compute_pass(desc), - CmdBuf::Dx12(c) => c.begin_compute_pass(desc), - CmdBuf::Mtl(c) => c.begin_compute_pass(desc), - } - } - - /// Dispatch a compute shader. - /// - /// Note that both the number of workgroups (`workgroup_count`) and the number of - /// threads in a workgroup (`workgroup_size`) are given. The latter is needed on - /// Metal, while it's baked into the shader on Vulkan and DX12. - /// - /// Perhaps we'll have a mechanism to plumb the latter value to configure the size - /// of a workgroup using specialization constants in the future. - pub unsafe fn dispatch( - &mut self, - pipeline: &Pipeline, - descriptor_set: &DescriptorSet, - workgroup_count: (u32, u32, u32), - workgroup_size: (u32, u32, u32), - ) { - mux_match! { self; - CmdBuf::Vk(c) => c.dispatch(pipeline.vk(), descriptor_set.vk(), workgroup_count, workgroup_size), - CmdBuf::Dx12(c) => c.dispatch(pipeline.dx12(), descriptor_set.dx12(), workgroup_count, workgroup_size), - CmdBuf::Mtl(c) => c.dispatch(pipeline.mtl(), descriptor_set.mtl(), workgroup_count, workgroup_size), - } - } - - pub unsafe fn end_compute_pass(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.end_compute_pass(), - CmdBuf::Dx12(c) => c.end_compute_pass(), - CmdBuf::Mtl(c) => c.end_compute_pass(), - } - } - - pub unsafe fn memory_barrier(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.memory_barrier(), - CmdBuf::Dx12(c) => c.memory_barrier(), - CmdBuf::Mtl(c) => c.memory_barrier(), - } - } - - pub unsafe fn host_barrier(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.host_barrier(), - CmdBuf::Dx12(c) => c.host_barrier(), - CmdBuf::Mtl(c) => c.host_barrier(), - } - } - - pub unsafe fn image_barrier( - &mut self, - image: &Image, - src_layout: ImageLayout, - dst_layout: ImageLayout, - ) { - mux_match! { self; - CmdBuf::Vk(c) => c.image_barrier(image.vk(), src_layout, dst_layout), - CmdBuf::Dx12(c) => c.image_barrier(image.dx12(), src_layout, dst_layout), - CmdBuf::Mtl(c) => c.image_barrier(image.mtl(), src_layout, dst_layout), - } - } - - pub unsafe fn clear_buffer(&mut self, buffer: &Buffer, size: Option) { - mux_match! { self; - CmdBuf::Vk(c) => c.clear_buffer(buffer.vk(), size), - CmdBuf::Dx12(c) => c.clear_buffer(buffer.dx12(), size), - CmdBuf::Mtl(c) => c.clear_buffer(buffer.mtl(), size), - } - } - - pub unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer) { - mux_match! { self; - CmdBuf::Vk(c) => c.copy_buffer(src.vk(), dst.vk()), - CmdBuf::Dx12(c) => c.copy_buffer(src.dx12(), dst.dx12()), - CmdBuf::Mtl(c) => c.copy_buffer(src.mtl(), dst.mtl()), - } - } - - pub unsafe fn copy_image_to_buffer(&mut self, src: &Image, dst: &Buffer) { - mux_match! { self; - CmdBuf::Vk(c) => c.copy_image_to_buffer(src.vk(), dst.vk()), - CmdBuf::Dx12(c) => c.copy_image_to_buffer(src.dx12(), dst.dx12()), - CmdBuf::Mtl(c) => c.copy_image_to_buffer(src.mtl(), dst.mtl()), - } - } - - pub unsafe fn copy_buffer_to_image(&mut self, src: &Buffer, dst: &Image) { - mux_match! { self; - CmdBuf::Vk(c) => c.copy_buffer_to_image(src.vk(), dst.vk()), - CmdBuf::Dx12(c) => c.copy_buffer_to_image(src.dx12(), dst.dx12()), - CmdBuf::Mtl(c) => c.copy_buffer_to_image(src.mtl(), dst.mtl()), - } - } - - pub unsafe fn blit_image(&mut self, src: &Image, dst: &Image) { - mux_match! { self; - CmdBuf::Vk(c) => c.blit_image(src.vk(), dst.vk()), - CmdBuf::Dx12(c) => c.blit_image(src.dx12(), dst.dx12()), - CmdBuf::Mtl(c) => c.blit_image(src.mtl(), dst.mtl()), - } - } - - pub unsafe fn reset_query_pool(&mut self, pool: &QueryPool) { - mux_match! { self; - CmdBuf::Vk(c) => c.reset_query_pool(pool.vk()), - CmdBuf::Dx12(c) => c.reset_query_pool(pool.dx12()), - CmdBuf::Mtl(c) => c.reset_query_pool(pool.mtl()), - } - } - - pub unsafe fn write_timestamp(&mut self, pool: &QueryPool, query: u32) { - mux_match! { self; - CmdBuf::Vk(c) => c.write_timestamp(pool.vk(), query), - CmdBuf::Dx12(c) => c.write_timestamp(pool.dx12(), query), - CmdBuf::Mtl(c) => c.write_timestamp(pool.mtl(), query), - } - } - - pub unsafe fn finish_timestamps(&mut self, pool: &QueryPool) { - mux_match! { self; - CmdBuf::Vk(c) => c.finish_timestamps(pool.vk()), - CmdBuf::Dx12(c) => c.finish_timestamps(pool.dx12()), - CmdBuf::Mtl(c) => c.finish_timestamps(pool.mtl()), - } - } - - pub unsafe fn begin_debug_label(&mut self, label: &str) { - mux_match! { self; - CmdBuf::Vk(c) => c.begin_debug_label(label), - CmdBuf::Dx12(c) => c.begin_debug_label(label), - CmdBuf::Mtl(c) => c.begin_debug_label(label), - } - } - - pub unsafe fn end_debug_label(&mut self) { - mux_match! { self; - CmdBuf::Vk(c) => c.end_debug_label(), - CmdBuf::Dx12(c) => c.end_debug_label(), - CmdBuf::Mtl(c) => c.end_debug_label(), - } - } -} - -impl Buffer { - pub fn size(&self) -> u64 { - mux_match! { self; - Buffer::Vk(b) => b.size, - Buffer::Dx12(b) => b.size, - Buffer::Mtl(b) => b.size, - } - } -} - -impl Swapchain { - pub unsafe fn next(&mut self) -> Result<(usize, Semaphore), Error> { - mux_match! { self; - Swapchain::Vk(s) => { - let (idx, sem) = s.next()?; - Ok((idx, Semaphore::Vk(sem))) - } - Swapchain::Dx12(s) => { - let (idx, sem) = s.next()?; - Ok((idx, Semaphore::Dx12(sem))) - } - Swapchain::Mtl(s) => { - let (idx, sem) = s.next()?; - Ok((idx, Semaphore::Mtl(sem))) - } - } - } - - pub unsafe fn image(&self, idx: usize) -> crate::Image { - crate::Image::wrap_swapchain_image(self.image_raw(idx)) - } - - pub unsafe fn image_raw(&self, idx: usize) -> Image { - mux_match! { self; - Swapchain::Vk(s) => Image::Vk(s.image(idx)), - Swapchain::Dx12(s) => Image::Dx12(s.image(idx)), - Swapchain::Mtl(s) => Image::Mtl(s.image(idx)), - } - } - - pub unsafe fn present( - &self, - image_idx: usize, - semaphores: &[&Semaphore], - ) -> Result { - mux_match! { self; - Swapchain::Vk(s) => s.present( - image_idx, - &semaphores - .iter() - .copied() - .map(Semaphore::vk) - .collect::>(), - ), - Swapchain::Dx12(s) => s.present( - image_idx, - &semaphores - .iter() - .copied() - .map(Semaphore::dx12) - .collect::>(), - ), - Swapchain::Mtl(s) => s.present( - image_idx, - &semaphores - .iter() - .copied() - .map(Semaphore::mtl) - .collect::>(), - ), - } - } -} diff --git a/piet-gpu-hal/src/vulkan.rs b/piet-gpu-hal/src/vulkan.rs deleted file mode 100644 index 7189b8d..0000000 --- a/piet-gpu-hal/src/vulkan.rs +++ /dev/null @@ -1,1526 +0,0 @@ -//! Vulkan implemenation of HAL trait. - -use std::borrow::Cow; -use std::convert::TryInto; -use std::ffi::{CStr, CString}; -use std::os::raw::c_char; -use std::sync::Arc; - -use ash::extensions::{ext::DebugUtils, khr}; -use ash::vk::DebugUtilsLabelEXT; -use ash::{vk, Device, Entry, Instance}; - -use raw_window_handle::{RawDisplayHandle, RawWindowHandle}; -use smallvec::SmallVec; - -use crate::backend::Device as DeviceTrait; -use crate::{ - BindType, BufferUsage, ComputePassDescriptor, Error, GpuInfo, ImageFormat, ImageLayout, - MapMode, SamplerParams, SubgroupSize, WorkgroupLimits, -}; - -pub struct VkInstance { - /// Retain the dynamic lib. - #[allow(unused)] - entry: Entry, - instance: Instance, - vk_version: u32, - dbg_loader: Option, - _dbg_callbk: Option, -} - -pub struct VkDevice { - device: Arc, - physical_device: vk::PhysicalDevice, - device_mem_props: vk::PhysicalDeviceMemoryProperties, - queue: vk::Queue, - qfi: u32, - timestamp_period: f32, - gpu_info: GpuInfo, -} - -struct RawDevice { - device: Device, - dbg_loader: Option, -} - -pub struct VkSurface { - surface: vk::SurfaceKHR, - surface_fn: khr::Surface, -} - -pub struct VkSwapchain { - swapchain: vk::SwapchainKHR, - swapchain_fn: khr::Swapchain, - - present_queue: vk::Queue, - - acquisition_idx: usize, - acquisition_semaphores: Vec, // same length as `images` - images: Vec, - extent: vk::Extent2D, -} - -/// A handle to a buffer. -/// -/// There is no lifetime tracking at this level; the caller is responsible -/// for destroying the buffer at the appropriate time. -pub struct Buffer { - buffer: vk::Buffer, - buffer_memory: vk::DeviceMemory, - // TODO: there should probably be a Buffer trait and this should be a method. - pub size: u64, -} - -pub struct Image { - image: vk::Image, - image_memory: vk::DeviceMemory, - image_view: vk::ImageView, - extent: vk::Extent3D, -} - -pub struct Pipeline { - pipeline: vk::Pipeline, - descriptor_set_layout: vk::DescriptorSetLayout, - pipeline_layout: vk::PipelineLayout, -} - -pub struct DescriptorSet { - descriptor_set: vk::DescriptorSet, -} - -pub struct CmdBuf { - cmd_buf: vk::CommandBuffer, - cmd_pool: vk::CommandPool, - device: Arc, - end_query: Option<(vk::QueryPool, u32)>, -} - -pub struct QueryPool { - pool: vk::QueryPool, - n_queries: u32, -} - -#[derive(Clone, Copy)] -pub struct MemFlags(vk::MemoryPropertyFlags); - -pub struct DescriptorSetBuilder { - buffers: Vec, - images: Vec, - textures: Vec, - // TODO: we had a sampler here, might need it again -} - -struct Extensions { - exts: Vec<*const c_char>, - exist_exts: Vec, -} - -struct Layers { - layers: Vec<*const c_char>, - exist_layers: Vec, -} - -unsafe extern "system" fn vulkan_debug_callback( - message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, - message_type: vk::DebugUtilsMessageTypeFlagsEXT, - p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, - _user_data: *mut std::os::raw::c_void, -) -> vk::Bool32 { - let callback_data = &*p_callback_data; - let message_id_number: i32 = callback_data.message_id_number as i32; - - let message_id_name = if callback_data.p_message_id_name.is_null() { - Cow::from("") - } else { - CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy() - }; - - let message = if callback_data.p_message.is_null() { - Cow::from("") - } else { - CStr::from_ptr(callback_data.p_message).to_string_lossy() - }; - - println!( - "{:?}:\n{:?} [{} ({})] : {}\n", - message_severity, message_type, message_id_name, message_id_number, message, - ); - - vk::FALSE -} - -impl VkInstance { - /// Create a new instance. - /// - /// There's more to be done to make this suitable for integration with other - /// systems, but for now the goal is to make things simple. - pub fn new() -> Result { - unsafe { - let app_name = CString::new("VkToy").unwrap(); - let entry = Entry::load()?; - - let mut layers = Layers::new(entry.enumerate_instance_layer_properties()?); - if cfg!(debug_assertions) { - layers - .try_add(CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap()); - } - - let mut exts = Extensions::new(entry.enumerate_instance_extension_properties(None)?); - let mut has_debug_ext = false; - if cfg!(debug_assertions) { - has_debug_ext = exts.try_add(DebugUtils::name()); - } - - // Enable platform specific surface extensions. - exts.try_add(khr::Surface::name()); - - #[cfg(target_os = "windows")] - exts.try_add(khr::Win32Surface::name()); - - #[cfg(any( - target_os = "linux", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "netbsd", - target_os = "openbsd" - ))] - { - exts.try_add(khr::XlibSurface::name()); - exts.try_add(khr::XcbSurface::name()); - exts.try_add(khr::WaylandSurface::name()); - } - - #[cfg(any(target_os = "android"))] - exts.try_add(khr::AndroidSurface::name()); - - #[cfg(any(target_os = "macos", target_os = "ios"))] - exts.try_add(kkr::MetalSurface::name()); - - let supported_version = entry - .try_enumerate_instance_version()? - .unwrap_or(vk::make_api_version(0, 1, 0, 0)); - let vk_version = if supported_version >= vk::make_api_version(0, 1, 1, 0) { - // We need Vulkan 1.1 to do subgroups; most other things can be extensions. - vk::make_api_version(0, 1, 1, 0) - } else { - vk::make_api_version(0, 1, 0, 0) - }; - - let instance = entry.create_instance( - &vk::InstanceCreateInfo::builder() - .application_info( - &vk::ApplicationInfo::builder() - .application_name(&app_name) - .application_version(0) - .engine_name(&app_name) - .api_version(vk_version), - ) - .enabled_layer_names(layers.as_ptrs()) - .enabled_extension_names(exts.as_ptrs()), - None, - )?; - - let (dbg_loader, _dbg_callbk) = if has_debug_ext { - let flags = vk::DebugUtilsMessageTypeFlagsEXT::GENERAL - | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE - | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION; - let dbg_info = vk::DebugUtilsMessengerCreateInfoEXT::builder() - .message_severity( - vk::DebugUtilsMessageSeverityFlagsEXT::ERROR - | vk::DebugUtilsMessageSeverityFlagsEXT::WARNING, - ) - .message_type(flags) - .pfn_user_callback(Some(vulkan_debug_callback)); - let dbg_loader = DebugUtils::new(&entry, &instance); - let dbg_callbk = dbg_loader - .create_debug_utils_messenger(&dbg_info, None) - .unwrap(); - (Some(dbg_loader), Some(dbg_callbk)) - } else { - (None, None) - }; - - let vk_instance = VkInstance { - entry, - instance, - vk_version, - dbg_loader, - _dbg_callbk, - }; - - Ok(vk_instance) - } - } - - /// Create a surface from the instance for the specified window handle. - /// - /// # Safety - /// - /// The caller is responsible for making sure that the instance outlives the surface. - pub unsafe fn surface( - &self, - display_handle: RawDisplayHandle, - window_handle: RawWindowHandle, - ) -> Result { - Ok(VkSurface { - surface: ash_window::create_surface( - &self.entry, - &self.instance, - display_handle, - window_handle, - None, - )?, - surface_fn: khr::Surface::new(&self.entry, &self.instance), - }) - } - - /// Create a device from the instance, suitable for compute and graphics. - /// - /// # Safety - /// - /// The caller is responsible for making sure that the instance outlives the device. - /// We could enforce that, for example having an `Arc` of the raw instance, - /// but for now keep things simple. - pub unsafe fn device(&self) -> Result { - let devices = self.instance.enumerate_physical_devices()?; - let (pdevice, qfi) = choose_device(&self.instance, &devices).ok_or("no suitable device")?; - - let mut has_descriptor_indexing = false; - let vk1_1 = self.vk_version >= vk::make_api_version(0, 1, 1, 0); - let mut features2 = vk::PhysicalDeviceFeatures2::builder(); - let mut set_features2 = vk::PhysicalDeviceFeatures2::builder(); - if vk1_1 { - let mut descriptor_indexing_features = - vk::PhysicalDeviceDescriptorIndexingFeatures::builder(); - features2 = features2.push_next(&mut descriptor_indexing_features); - self.instance - .get_physical_device_features2(pdevice, &mut features2); - set_features2 = set_features2.features(features2.features); - has_descriptor_indexing = descriptor_indexing_features - .shader_storage_image_array_non_uniform_indexing - == vk::TRUE - && descriptor_indexing_features.descriptor_binding_variable_descriptor_count - == vk::TRUE - && descriptor_indexing_features.runtime_descriptor_array == vk::TRUE; - } - - let queue_priorities = [1.0]; - let queue_create_infos = [vk::DeviceQueueCreateInfo::builder() - .queue_family_index(qfi) - .queue_priorities(&queue_priorities) - .build()]; - - let mut descriptor_indexing = vk::PhysicalDeviceDescriptorIndexingFeatures::builder() - .shader_storage_image_array_non_uniform_indexing(true) - .descriptor_binding_variable_descriptor_count(true) - .runtime_descriptor_array(true); - - let mut extensions = Extensions::new( - self.instance - .enumerate_device_extension_properties(pdevice)?, - ); - extensions.try_add(khr::Swapchain::name()); - if has_descriptor_indexing { - extensions.try_add(vk::KhrMaintenance3Fn::name()); - extensions.try_add(vk::ExtDescriptorIndexingFn::name()); - } - let has_subgroup_size = vk1_1 && extensions.try_add(vk::ExtSubgroupSizeControlFn::name()); - let has_memory_model = vk1_1 && extensions.try_add(vk::KhrVulkanMemoryModelFn::name()); - let mut create_info = vk::DeviceCreateInfo::builder() - .queue_create_infos(&queue_create_infos) - .enabled_extension_names(extensions.as_ptrs()); - let mut set_memory_model_features = vk::PhysicalDeviceVulkanMemoryModelFeatures::builder(); - if vk1_1 { - create_info = create_info.push_next(&mut set_features2); - if has_memory_model { - set_memory_model_features = set_memory_model_features - .vulkan_memory_model(true) - .vulkan_memory_model_device_scope(true); - create_info = create_info.push_next(&mut set_memory_model_features); - } - } - if has_descriptor_indexing { - create_info = create_info.push_next(&mut descriptor_indexing); - } - let device = self.instance.create_device(pdevice, &create_info, None)?; - - let device_mem_props = self.instance.get_physical_device_memory_properties(pdevice); - - let queue_index = 0; - let queue = device.get_device_queue(qfi, queue_index); - - let device = Arc::new(RawDevice { - device, - dbg_loader: self.dbg_loader.clone(), - }); - - let props = self.instance.get_physical_device_properties(pdevice); - let timestamp_period = props.limits.timestamp_period; - let subgroup_size = if has_subgroup_size { - let mut subgroup_props = vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT::default(); - let mut properties = - vk::PhysicalDeviceProperties2::builder().push_next(&mut subgroup_props); - self.instance - .get_physical_device_properties2(pdevice, &mut properties); - Some(SubgroupSize { - min: subgroup_props.min_subgroup_size, - max: subgroup_props.max_subgroup_size, - }) - } else { - None - }; - - // The question of when and when not to use staging buffers is complex, and this - // is only a first approximation. Basically, it *must* be false when buffers can - // be created with a memory type that is not host-visible. That is not guaranteed - // here but is likely to be the case. - // - // I'm still investigating what should be done in systems with Resizable BAR. - let use_staging_buffers = props.device_type != vk::PhysicalDeviceType::INTEGRATED_GPU; - - // TODO: finer grained query of specific subgroup info. - let has_subgroups = vk1_1; - - let workgroup_limits = WorkgroupLimits { - max_invocations: props.limits.max_compute_work_group_invocations, - max_size: props.limits.max_compute_work_group_size, - }; - - let gpu_info = GpuInfo { - has_descriptor_indexing, - has_subgroups, - subgroup_size, - workgroup_limits, - has_memory_model, - use_staging_buffers, - }; - - Ok(VkDevice { - device, - physical_device: pdevice, - device_mem_props, - qfi, - queue, - timestamp_period, - gpu_info, - }) - } - - pub unsafe fn swapchain( - &self, - width: usize, - height: usize, - device: &VkDevice, - surface: &VkSurface, - ) -> Result { - let formats = surface - .surface_fn - .get_physical_device_surface_formats(device.physical_device, surface.surface)?; - let surface_format = formats - .iter() - .map(|surface_fmt| match surface_fmt.format { - vk::Format::UNDEFINED => { - vk::SurfaceFormatKHR { - format: vk::Format::B8G8R8A8_UNORM, // most common format on desktop - color_space: surface_fmt.color_space, - } - } - _ => *surface_fmt, - }) - .next() - .ok_or("no surface format found")?; - - let capabilities = surface - .surface_fn - .get_physical_device_surface_capabilities(device.physical_device, surface.surface)?; - - let present_modes = surface - .surface_fn - .get_physical_device_surface_present_modes(device.physical_device, surface.surface)?; - - // Can change to MAILBOX to force high frame rates. - const PREFERRED_MODE: vk::PresentModeKHR = vk::PresentModeKHR::FIFO; - let present_mode = present_modes - .into_iter() - .find(|mode| *mode == PREFERRED_MODE) - .unwrap_or(vk::PresentModeKHR::FIFO); - - // Note: can be 2 for non-Android to improve latency, but the real answer is to - // implement some kind of frame pacing. - const PREFERRED_IMAGE_COUNT: u32 = 3; - let max_image_count = match capabilities.max_image_count { - 0 => u32::MAX, - x => x, - }; - let image_count = - PREFERRED_IMAGE_COUNT.clamp(capabilities.min_image_count, max_image_count); - let mut extent = capabilities.current_extent; - if extent.width == u32::MAX || extent.height == u32::MAX { - // We're deciding the size. - extent.width = width as u32; - extent.height = height as u32; - } - - let create_info = vk::SwapchainCreateInfoKHR::builder() - .surface(surface.surface) - .min_image_count(image_count) - .image_format(surface_format.format) - .image_color_space(surface_format.color_space) - .image_extent(extent) - .image_array_layers(1) - .image_usage(vk::ImageUsageFlags::TRANSFER_DST) - .image_sharing_mode(vk::SharingMode::EXCLUSIVE) - .pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY) - .composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE) - .present_mode(present_mode) - .clipped(true); - - let swapchain_fn = khr::Swapchain::new(&self.instance, &device.device.device); - let swapchain = swapchain_fn.create_swapchain(&create_info, None)?; - - let images = swapchain_fn.get_swapchain_images(swapchain)?; - let acquisition_semaphores = (0..images.len()) - .map(|_| device.create_semaphore()) - .collect::, Error>>()?; - - Ok(VkSwapchain { - swapchain, - swapchain_fn, - - present_queue: device.queue, - - images, - acquisition_semaphores, - acquisition_idx: 0, - extent, - }) - } -} - -impl crate::backend::Device for VkDevice { - type Buffer = Buffer; - type Image = Image; - type CmdBuf = CmdBuf; - type DescriptorSet = DescriptorSet; - type Pipeline = Pipeline; - type QueryPool = QueryPool; - type Fence = vk::Fence; - type Semaphore = vk::Semaphore; - type DescriptorSetBuilder = DescriptorSetBuilder; - type Sampler = vk::Sampler; - type ShaderSource = [u8]; - - fn query_gpu_info(&self) -> GpuInfo { - self.gpu_info.clone() - } - - fn create_buffer(&self, size: u64, usage: BufferUsage) -> Result { - unsafe { - let device = &self.device.device; - let mut vk_usage = vk::BufferUsageFlags::empty(); - if usage.contains(BufferUsage::STORAGE) { - vk_usage |= vk::BufferUsageFlags::STORAGE_BUFFER; - } - if usage.contains(BufferUsage::COPY_SRC) { - vk_usage |= vk::BufferUsageFlags::TRANSFER_SRC; - } - if usage.contains(BufferUsage::COPY_DST) { - vk_usage |= vk::BufferUsageFlags::TRANSFER_DST; - } - let buffer = device.create_buffer( - &vk::BufferCreateInfo::builder() - .size(size) - .usage( - vk::BufferUsageFlags::STORAGE_BUFFER - | vk::BufferUsageFlags::TRANSFER_SRC - | vk::BufferUsageFlags::TRANSFER_DST, - ) - .sharing_mode(vk::SharingMode::EXCLUSIVE), - None, - )?; - let mem_requirements = device.get_buffer_memory_requirements(buffer); - let mem_flags = memory_property_flags_for_usage(usage); - let mem_type = find_memory_type( - mem_requirements.memory_type_bits, - mem_flags, - &self.device_mem_props, - ) - .unwrap(); // TODO: proper error - let buffer_memory = device.allocate_memory( - &vk::MemoryAllocateInfo::builder() - .allocation_size(mem_requirements.size) - .memory_type_index(mem_type), - None, - )?; - device.bind_buffer_memory(buffer, buffer_memory, 0)?; - Ok(Buffer { - buffer, - buffer_memory, - size, - }) - } - } - - unsafe fn destroy_buffer(&self, buffer: &Self::Buffer) -> Result<(), Error> { - let device = &self.device.device; - device.destroy_buffer(buffer.buffer, None); - device.free_memory(buffer.buffer_memory, None); - Ok(()) - } - - unsafe fn create_image2d( - &self, - width: u32, - height: u32, - format: ImageFormat, - ) -> Result { - let device = &self.device.device; - let extent = vk::Extent3D { - width, - height, - depth: 1, - }; - // TODO: maybe want to fine-tune these for different use cases, especially because we'll - // want to add sampling for images and so on. - let usage = vk::ImageUsageFlags::STORAGE - | vk::ImageUsageFlags::TRANSFER_SRC - | vk::ImageUsageFlags::TRANSFER_DST; - let vk_format = match format { - ImageFormat::A8 => vk::Format::R8_UNORM, - ImageFormat::Rgba8 | ImageFormat::Surface => vk::Format::R8G8B8A8_UNORM, - }; - let image = device.create_image( - &vk::ImageCreateInfo::builder() - .image_type(vk::ImageType::TYPE_2D) - .format(vk_format) - .extent(extent) - .mip_levels(1) - .array_layers(1) - .samples(vk::SampleCountFlags::TYPE_1) - .tiling(vk::ImageTiling::OPTIMAL) - .initial_layout(vk::ImageLayout::UNDEFINED) - .usage(usage) - .sharing_mode(vk::SharingMode::EXCLUSIVE), - None, - )?; - let mem_requirements = device.get_image_memory_requirements(image); - let mem_flags = vk::MemoryPropertyFlags::DEVICE_LOCAL; - let mem_type = find_memory_type( - mem_requirements.memory_type_bits, - mem_flags, - &self.device_mem_props, - ) - .unwrap(); // TODO: proper error - let image_memory = device.allocate_memory( - &vk::MemoryAllocateInfo::builder() - .allocation_size(mem_requirements.size) - .memory_type_index(mem_type), - None, - )?; - device.bind_image_memory(image, image_memory, 0)?; - let image_view = device.create_image_view( - &vk::ImageViewCreateInfo::builder() - .view_type(vk::ImageViewType::TYPE_2D) - .image(image) - .format(vk::Format::R8G8B8A8_UNORM) - .subresource_range(vk::ImageSubresourceRange { - aspect_mask: vk::ImageAspectFlags::COLOR, - base_mip_level: 0, - level_count: 1, - base_array_layer: 0, - layer_count: 1, - }) - .components(vk::ComponentMapping { - r: vk::ComponentSwizzle::IDENTITY, - g: vk::ComponentSwizzle::IDENTITY, - b: vk::ComponentSwizzle::IDENTITY, - a: vk::ComponentSwizzle::IDENTITY, - }) - .build(), - None, - )?; - Ok(Image { - image, - image_memory, - image_view, - extent, - }) - } - - unsafe fn destroy_image(&self, image: &Self::Image) -> Result<(), Error> { - let device = &self.device.device; - device.destroy_image(image.image, None); - device.destroy_image_view(image.image_view, None); - device.free_memory(image.image_memory, None); - Ok(()) - } - - unsafe fn create_fence(&self, signaled: bool) -> Result { - let device = &self.device.device; - let mut flags = vk::FenceCreateFlags::empty(); - if signaled { - flags |= vk::FenceCreateFlags::SIGNALED; - } - Ok(device.create_fence(&vk::FenceCreateInfo::builder().flags(flags).build(), None)?) - } - - unsafe fn destroy_fence(&self, fence: Self::Fence) -> Result<(), Error> { - let device = &self.device.device; - device.destroy_fence(fence, None); - Ok(()) - } - - unsafe fn create_semaphore(&self) -> Result { - let device = &self.device.device; - Ok(device.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)?) - } - - unsafe fn wait_and_reset(&self, fences: Vec<&mut Self::Fence>) -> Result<(), Error> { - let device = &self.device.device; - let fences = fences.iter().map(|f| **f).collect::>(); - device.wait_for_fences(&fences, true, !0)?; - device.reset_fences(&fences)?; - Ok(()) - } - - unsafe fn get_fence_status(&self, fence: &mut Self::Fence) -> Result { - let device = &self.device.device; - Ok(device.get_fence_status(*fence)?) - } - - unsafe fn create_compute_pipeline( - &self, - code: &[u8], - bind_types: &[BindType], - ) -> Result { - let device = &self.device.device; - let bindings = bind_types - .iter() - .enumerate() - .map(|(i, bind_type)| { - let descriptor_type = match bind_type { - BindType::Buffer | BindType::BufReadOnly => vk::DescriptorType::STORAGE_BUFFER, - BindType::Image | BindType::ImageRead => vk::DescriptorType::STORAGE_IMAGE, - }; - vk::DescriptorSetLayoutBinding::builder() - .binding(i.try_into().unwrap()) - .descriptor_type(descriptor_type) - .descriptor_count(1) - .stage_flags(vk::ShaderStageFlags::COMPUTE) - .build() - }) - .collect::>(); - let descriptor_set_layout = device.create_descriptor_set_layout( - &vk::DescriptorSetLayoutCreateInfo::builder().bindings(&bindings), - None, - )?; - let descriptor_set_layouts = [descriptor_set_layout]; - - // Create compute pipeline. - let code_u32 = convert_u32_vec(code); - let compute_shader_module = device - .create_shader_module(&vk::ShaderModuleCreateInfo::builder().code(&code_u32), None)?; - let entry_name = CString::new("main").unwrap(); - let pipeline_layout = device.create_pipeline_layout( - &vk::PipelineLayoutCreateInfo::builder().set_layouts(&descriptor_set_layouts), - None, - )?; - - let pipeline = device - .create_compute_pipelines( - vk::PipelineCache::null(), - &[vk::ComputePipelineCreateInfo::builder() - .stage( - vk::PipelineShaderStageCreateInfo::builder() - .stage(vk::ShaderStageFlags::COMPUTE) - .module(compute_shader_module) - .name(&entry_name) - .build(), - ) - .layout(pipeline_layout) - .build()], - None, - ) - .map_err(|(_pipeline, err)| err)?[0]; - Ok(Pipeline { - pipeline, - pipeline_layout, - descriptor_set_layout, - }) - } - - unsafe fn descriptor_set_builder(&self) -> DescriptorSetBuilder { - DescriptorSetBuilder { - buffers: Vec::new(), - images: Vec::new(), - textures: Vec::new(), - } - } - - unsafe fn update_buffer_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - buf: &Self::Buffer, - ) { - let device = &self.device.device; - device.update_descriptor_sets( - &[vk::WriteDescriptorSet::builder() - .dst_set(ds.descriptor_set) - .dst_binding(index) - .descriptor_type(vk::DescriptorType::STORAGE_BUFFER) - .buffer_info(&[vk::DescriptorBufferInfo::builder() - .buffer(buf.buffer) - .offset(0) - .range(vk::WHOLE_SIZE) - .build()]) - .build()], - &[], - ); - } - - unsafe fn update_image_descriptor( - &self, - ds: &mut Self::DescriptorSet, - index: u32, - image: &Self::Image, - ) { - let device = &self.device.device; - device.update_descriptor_sets( - &[vk::WriteDescriptorSet::builder() - .dst_set(ds.descriptor_set) - .dst_binding(index) - .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) - .image_info(&[vk::DescriptorImageInfo::builder() - .image_view(image.image_view) - .image_layout(vk::ImageLayout::GENERAL) - .build()]) - .build()], - &[], - ); - } - - fn create_cmd_buf(&self) -> Result { - unsafe { - let device = &self.device.device; - let cmd_pool = device.create_command_pool( - &vk::CommandPoolCreateInfo::builder() - .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER) - .queue_family_index(self.qfi), - None, - )?; - let cmd_buf = device.allocate_command_buffers( - &vk::CommandBufferAllocateInfo::builder() - .command_pool(cmd_pool) - .level(vk::CommandBufferLevel::PRIMARY) - .command_buffer_count(1), - )?[0]; - Ok(CmdBuf { - cmd_buf, - cmd_pool, - device: self.device.clone(), - end_query: None, - }) - } - } - - unsafe fn destroy_cmd_buf(&self, cmd_buf: CmdBuf) -> Result<(), Error> { - let device = &self.device.device; - device.destroy_command_pool(cmd_buf.cmd_pool, None); - Ok(()) - } - - /// Create a query pool for timestamp queries. - fn create_query_pool(&self, n_queries: u32) -> Result { - unsafe { - let device = &self.device.device; - let pool = device.create_query_pool( - &vk::QueryPoolCreateInfo::builder() - .query_type(vk::QueryType::TIMESTAMP) - .query_count(n_queries), - None, - )?; - Ok(QueryPool { pool, n_queries }) - } - } - - unsafe fn fetch_query_pool(&self, pool: &Self::QueryPool) -> Result, Error> { - let device = &self.device.device; - let mut buf = vec![0u64; pool.n_queries as usize]; - // It's unclear to me why WAIT is needed here, as the wait on the command buffer's - // fence should make the query available, but otherwise we get sporadic NOT_READY - // results (Windows 10, AMD 5700 XT). - let flags = vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT; - device.get_query_pool_results(pool.pool, 0, pool.n_queries, &mut buf, flags)?; - let tsp = self.timestamp_period as f64 * 1e-9; - let result = buf.iter().map(|ts| *ts as f64 * tsp).collect(); - Ok(result) - } - - /// Run the command buffers. - /// - /// This submits the command buffer for execution. The provided fence - /// is signalled when the execution is complete. - unsafe fn run_cmd_bufs( - &self, - cmd_bufs: &[&CmdBuf], - wait_semaphores: &[&Self::Semaphore], - signal_semaphores: &[&Self::Semaphore], - fence: Option<&mut Self::Fence>, - ) -> Result<(), Error> { - let device = &self.device.device; - - let fence = match fence { - Some(fence) => *fence, - None => vk::Fence::null(), - }; - let wait_stages = wait_semaphores - .iter() - .map(|_| vk::PipelineStageFlags::ALL_COMMANDS) - .collect::>(); - let cmd_bufs = cmd_bufs - .iter() - .map(|c| c.cmd_buf) - .collect::>(); - let wait_semaphores = wait_semaphores - .iter() - .copied() - .copied() - .collect::>(); - let signal_semaphores = signal_semaphores - .iter() - .copied() - .copied() - .collect::>(); - device.queue_submit( - self.queue, - &[vk::SubmitInfo::builder() - .command_buffers(&cmd_bufs) - .wait_semaphores(&wait_semaphores) - .wait_dst_stage_mask(&wait_stages) - .signal_semaphores(&signal_semaphores) - .build()], - fence, - )?; - Ok(()) - } - - unsafe fn map_buffer( - &self, - buffer: &Self::Buffer, - offset: u64, - size: u64, - _mode: MapMode, - ) -> Result<*mut u8, Error> { - let device = &self.device.device; - let buf = device.map_memory( - buffer.buffer_memory, - offset, - size, - vk::MemoryMapFlags::empty(), - )?; - Ok(buf as *mut u8) - } - - unsafe fn unmap_buffer( - &self, - buffer: &Self::Buffer, - _offset: u64, - _size: u64, - _mode: MapMode, - ) -> Result<(), Error> { - self.device.device.unmap_memory(buffer.buffer_memory); - Ok(()) - } - - unsafe fn create_sampler(&self, params: SamplerParams) -> Result { - let device = &self.device.device; - let filter = match params { - SamplerParams::Linear => vk::Filter::LINEAR, - SamplerParams::Nearest => vk::Filter::NEAREST, - }; - let sampler = device.create_sampler( - &vk::SamplerCreateInfo::builder() - .mag_filter(filter) - .min_filter(filter) - .mipmap_mode(vk::SamplerMipmapMode::LINEAR) - .address_mode_u(vk::SamplerAddressMode::CLAMP_TO_BORDER) - .address_mode_v(vk::SamplerAddressMode::CLAMP_TO_BORDER) - .address_mode_w(vk::SamplerAddressMode::CLAMP_TO_BORDER) - .mip_lod_bias(0.0) - .compare_op(vk::CompareOp::NEVER) - .min_lod(0.0) - .max_lod(0.0) - .border_color(vk::BorderColor::FLOAT_TRANSPARENT_BLACK) - .max_anisotropy(1.0) - .anisotropy_enable(false), - None, - )?; - Ok(sampler) - } -} - -impl crate::backend::CmdBuf for CmdBuf { - unsafe fn begin(&mut self) { - self.device - .device - .begin_command_buffer( - self.cmd_buf, - &vk::CommandBufferBeginInfo::builder() - .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT), - ) - .unwrap(); - } - - unsafe fn finish(&mut self) { - self.device.device.end_command_buffer(self.cmd_buf).unwrap(); - } - - unsafe fn flush(&mut self) {} - - unsafe fn reset(&mut self) -> bool { - true - } - - unsafe fn begin_compute_pass(&mut self, desc: &ComputePassDescriptor) { - if let Some((pool, start, end)) = &desc.timer_queries { - #[allow(irrefutable_let_patterns)] - if let crate::hub::QueryPool::Vk(pool) = pool { - self.write_timestamp_raw(pool.pool, *start); - self.end_query = Some((pool.pool, *end)); - } - } - } - - unsafe fn dispatch( - &mut self, - pipeline: &Pipeline, - descriptor_set: &DescriptorSet, - workgroup_count: (u32, u32, u32), - _workgroup_size: (u32, u32, u32), - ) { - let device = &self.device.device; - device.cmd_bind_pipeline( - self.cmd_buf, - vk::PipelineBindPoint::COMPUTE, - pipeline.pipeline, - ); - device.cmd_bind_descriptor_sets( - self.cmd_buf, - vk::PipelineBindPoint::COMPUTE, - pipeline.pipeline_layout, - 0, - &[descriptor_set.descriptor_set], - &[], - ); - device.cmd_dispatch( - self.cmd_buf, - workgroup_count.0, - workgroup_count.1, - workgroup_count.2, - ); - } - - unsafe fn end_compute_pass(&mut self) { - if let Some((pool, end)) = self.end_query.take() { - self.write_timestamp_raw(pool, end); - } - } - - /// Insert a pipeline barrier for all memory accesses. - unsafe fn memory_barrier(&mut self) { - let device = &self.device.device; - device.cmd_pipeline_barrier( - self.cmd_buf, - vk::PipelineStageFlags::ALL_COMMANDS, - vk::PipelineStageFlags::ALL_COMMANDS, - vk::DependencyFlags::empty(), - &[vk::MemoryBarrier::builder() - .src_access_mask(vk::AccessFlags::MEMORY_WRITE) - .dst_access_mask(vk::AccessFlags::MEMORY_READ) - .build()], - &[], - &[], - ); - } - - unsafe fn host_barrier(&mut self) { - let device = &self.device.device; - device.cmd_pipeline_barrier( - self.cmd_buf, - vk::PipelineStageFlags::ALL_COMMANDS, - vk::PipelineStageFlags::HOST, - vk::DependencyFlags::empty(), - &[vk::MemoryBarrier::builder() - .src_access_mask(vk::AccessFlags::MEMORY_WRITE) - .dst_access_mask(vk::AccessFlags::HOST_READ) - .build()], - &[], - &[], - ); - } - - unsafe fn image_barrier( - &mut self, - image: &Image, - src_layout: ImageLayout, - dst_layout: ImageLayout, - ) { - let device = &self.device.device; - device.cmd_pipeline_barrier( - self.cmd_buf, - vk::PipelineStageFlags::ALL_COMMANDS, - vk::PipelineStageFlags::ALL_COMMANDS, - vk::DependencyFlags::empty(), - &[], - &[], - &[vk::ImageMemoryBarrier::builder() - .image(image.image) - .src_access_mask(vk::AccessFlags::MEMORY_WRITE) - .dst_access_mask(vk::AccessFlags::MEMORY_READ) - .old_layout(map_image_layout(src_layout)) - .new_layout(map_image_layout(dst_layout)) - .subresource_range(vk::ImageSubresourceRange { - aspect_mask: vk::ImageAspectFlags::COLOR, - base_mip_level: 0, - level_count: vk::REMAINING_MIP_LEVELS, - base_array_layer: 0, - layer_count: vk::REMAINING_MIP_LEVELS, - }) - .build()], - ); - } - - unsafe fn clear_buffer(&mut self, buffer: &Buffer, size: Option) { - let device = &self.device.device; - let size = size.unwrap_or(vk::WHOLE_SIZE); - device.cmd_fill_buffer(self.cmd_buf, buffer.buffer, 0, size, 0); - } - - unsafe fn copy_buffer(&mut self, src: &Buffer, dst: &Buffer) { - let device = &self.device.device; - let size = src.size.min(dst.size); - device.cmd_copy_buffer( - self.cmd_buf, - src.buffer, - dst.buffer, - &[vk::BufferCopy::builder().size(size).build()], - ); - } - - unsafe fn copy_image_to_buffer(&mut self, src: &Image, dst: &Buffer) { - let device = &self.device.device; - device.cmd_copy_image_to_buffer( - self.cmd_buf, - src.image, - vk::ImageLayout::TRANSFER_SRC_OPTIMAL, - dst.buffer, - &[vk::BufferImageCopy { - buffer_offset: 0, - buffer_row_length: 0, // tight packing - buffer_image_height: 0, // tight packing - image_subresource: vk::ImageSubresourceLayers { - aspect_mask: vk::ImageAspectFlags::COLOR, - mip_level: 0, - base_array_layer: 0, - layer_count: 1, - }, - image_offset: vk::Offset3D { x: 0, y: 0, z: 0 }, - image_extent: src.extent, - }], - ); - } - - unsafe fn copy_buffer_to_image(&mut self, src: &Buffer, dst: &Image) { - let device = &self.device.device; - device.cmd_copy_buffer_to_image( - self.cmd_buf, - src.buffer, - dst.image, - vk::ImageLayout::TRANSFER_DST_OPTIMAL, - &[vk::BufferImageCopy { - buffer_offset: 0, - buffer_row_length: 0, // tight packing - buffer_image_height: 0, // tight packing - image_subresource: vk::ImageSubresourceLayers { - aspect_mask: vk::ImageAspectFlags::COLOR, - mip_level: 0, - base_array_layer: 0, - layer_count: 1, - }, - image_offset: vk::Offset3D { x: 0, y: 0, z: 0 }, - image_extent: dst.extent, - }], - ); - } - - unsafe fn blit_image(&mut self, src: &Image, dst: &Image) { - let device = &self.device.device; - device.cmd_blit_image( - self.cmd_buf, - src.image, - vk::ImageLayout::TRANSFER_SRC_OPTIMAL, - dst.image, - vk::ImageLayout::TRANSFER_DST_OPTIMAL, - &[vk::ImageBlit { - src_subresource: vk::ImageSubresourceLayers { - aspect_mask: vk::ImageAspectFlags::COLOR, - mip_level: 0, - base_array_layer: 0, - layer_count: 1, - }, - src_offsets: [ - vk::Offset3D { x: 0, y: 0, z: 0 }, - vk::Offset3D { - x: src.extent.width as i32, - y: src.extent.height as i32, - z: src.extent.depth as i32, - }, - ], - dst_subresource: vk::ImageSubresourceLayers { - aspect_mask: vk::ImageAspectFlags::COLOR, - mip_level: 0, - base_array_layer: 0, - layer_count: 1, - }, - dst_offsets: [ - vk::Offset3D { x: 0, y: 0, z: 0 }, - vk::Offset3D { - x: dst.extent.width as i32, - y: dst.extent.height as i32, - z: dst.extent.depth as i32, - }, - ], - }], - vk::Filter::LINEAR, - ); - } - - unsafe fn reset_query_pool(&mut self, pool: &QueryPool) { - let device = &self.device.device; - device.cmd_reset_query_pool(self.cmd_buf, pool.pool, 0, pool.n_queries); - } - - unsafe fn write_timestamp(&mut self, pool: &QueryPool, query: u32) { - self.write_timestamp_raw(pool.pool, query); - } - - unsafe fn begin_debug_label(&mut self, label: &str) { - if let Some(utils) = &self.device.dbg_loader { - let label_cstr = CString::new(label).unwrap(); - let label_ext = DebugUtilsLabelEXT::builder() - .label_name(&label_cstr) - .build(); - utils.cmd_begin_debug_utils_label(self.cmd_buf, &label_ext); - } - } - - unsafe fn end_debug_label(&mut self) { - if let Some(utils) = &self.device.dbg_loader { - utils.cmd_end_debug_utils_label(self.cmd_buf); - } - } -} - -impl CmdBuf { - unsafe fn write_timestamp_raw(&mut self, pool: vk::QueryPool, query: u32) { - let device = &self.device.device; - device.cmd_write_timestamp( - self.cmd_buf, - vk::PipelineStageFlags::COMPUTE_SHADER, - pool, - query, - ); - } -} - -impl crate::backend::DescriptorSetBuilder for DescriptorSetBuilder { - fn add_buffers(&mut self, buffers: &[&Buffer]) { - self.buffers.extend(buffers.iter().map(|b| b.buffer)); - } - - fn add_images(&mut self, images: &[&Image]) { - self.images.extend(images.iter().map(|i| i.image_view)); - } - - fn add_textures(&mut self, images: &[&Image]) { - self.textures.extend(images.iter().map(|i| i.image_view)); - } - - unsafe fn build(self, device: &VkDevice, pipeline: &Pipeline) -> Result { - let device = &device.device.device; - let mut descriptor_pool_sizes = Vec::new(); - if !self.buffers.is_empty() { - descriptor_pool_sizes.push( - vk::DescriptorPoolSize::builder() - .ty(vk::DescriptorType::STORAGE_BUFFER) - .descriptor_count(self.buffers.len() as u32) - .build(), - ); - } - if !self.images.is_empty() { - descriptor_pool_sizes.push( - vk::DescriptorPoolSize::builder() - .ty(vk::DescriptorType::STORAGE_IMAGE) - .descriptor_count(self.images.len() as u32) - .build(), - ); - } - if !self.textures.is_empty() { - descriptor_pool_sizes.push( - vk::DescriptorPoolSize::builder() - .ty(vk::DescriptorType::STORAGE_IMAGE) - .descriptor_count(self.textures.len() as u32) - .build(), - ); - } - let descriptor_pool = device.create_descriptor_pool( - &vk::DescriptorPoolCreateInfo::builder() - .pool_sizes(&descriptor_pool_sizes) - .max_sets(1), - None, - )?; - let descriptor_set_layouts = [pipeline.descriptor_set_layout]; - - let descriptor_sets = device - .allocate_descriptor_sets( - &vk::DescriptorSetAllocateInfo::builder() - .descriptor_pool(descriptor_pool) - .set_layouts(&descriptor_set_layouts), - ) - .unwrap(); - let mut binding = 0; - // Maybe one call to update_descriptor_sets with an array of descriptor_writes? - for buf in &self.buffers { - device.update_descriptor_sets( - &[vk::WriteDescriptorSet::builder() - .dst_set(descriptor_sets[0]) - .dst_binding(binding) - .descriptor_type(vk::DescriptorType::STORAGE_BUFFER) - .buffer_info(&[vk::DescriptorBufferInfo::builder() - .buffer(*buf) - .offset(0) - .range(vk::WHOLE_SIZE) - .build()]) - .build()], - &[], - ); - binding += 1; - } - // maybe chain images and textures together; they're basically identical now - for image in &self.images { - device.update_descriptor_sets( - &[vk::WriteDescriptorSet::builder() - .dst_set(descriptor_sets[0]) - .dst_binding(binding) - .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) - .image_info(&[vk::DescriptorImageInfo::builder() - .sampler(vk::Sampler::null()) - .image_view(*image) - .image_layout(vk::ImageLayout::GENERAL) - .build()]) - .build()], - &[], - ); - binding += 1; - } - for image in &self.textures { - device.update_descriptor_sets( - &[vk::WriteDescriptorSet::builder() - .dst_set(descriptor_sets[0]) - .dst_binding(binding) - .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) - .image_info(&[vk::DescriptorImageInfo::builder() - .sampler(vk::Sampler::null()) - .image_view(*image) - .image_layout(vk::ImageLayout::GENERAL) - .build()]) - .build()], - &[], - ); - binding += 1; - } - Ok(DescriptorSet { - descriptor_set: descriptor_sets[0], - }) - } -} - -impl VkSwapchain { - pub unsafe fn next(&mut self) -> Result<(usize, vk::Semaphore), Error> { - let acquisition_semaphore = self.acquisition_semaphores[self.acquisition_idx]; - let (image_idx, _suboptimal) = self.swapchain_fn.acquire_next_image( - self.swapchain, - !0, - acquisition_semaphore, - vk::Fence::null(), - )?; - self.acquisition_idx = (self.acquisition_idx + 1) % self.acquisition_semaphores.len(); - - Ok((image_idx as usize, acquisition_semaphore)) - } - - pub unsafe fn image(&self, idx: usize) -> Image { - Image { - image: self.images[idx], - image_memory: vk::DeviceMemory::null(), - image_view: vk::ImageView::null(), - extent: vk::Extent3D { - width: self.extent.width, - height: self.extent.height, - depth: 1, - }, - } - } - - pub unsafe fn present( - &self, - image_idx: usize, - semaphores: &[&vk::Semaphore], - ) -> Result { - let semaphores = semaphores - .iter() - .copied() - .copied() - .collect::>(); - Ok(self.swapchain_fn.queue_present( - self.present_queue, - &vk::PresentInfoKHR::builder() - .swapchains(&[self.swapchain]) - .image_indices(&[image_idx as u32]) - .wait_semaphores(&semaphores) - .build(), - )?) - } -} - -impl Extensions { - fn new(exist_exts: Vec) -> Extensions { - Extensions { - exist_exts, - exts: vec![], - } - } - - fn try_add(&mut self, ext: &'static CStr) -> bool { - unsafe { - if self - .exist_exts - .iter() - .find(|x| CStr::from_ptr(x.extension_name.as_ptr()) == ext) - .is_some() - { - self.exts.push(ext.as_ptr()); - true - } else { - false - } - } - } - - fn as_ptrs(&self) -> &[*const c_char] { - &self.exts - } -} - -impl Layers { - fn new(exist_layers: Vec) -> Layers { - Layers { - exist_layers, - layers: vec![], - } - } - - fn try_add(&mut self, ext: &'static CStr) -> bool { - unsafe { - if self - .exist_layers - .iter() - .find(|x| CStr::from_ptr(x.layer_name.as_ptr()) == ext) - .is_some() - { - self.layers.push(ext.as_ptr()); - true - } else { - false - } - } - } - - fn as_ptrs(&self) -> &[*const c_char] { - &self.layers - } -} - -unsafe fn choose_device( - instance: &Instance, - devices: &[vk::PhysicalDevice], -) -> Option<(vk::PhysicalDevice, u32)> { - for pdevice in devices { - let props = instance.get_physical_device_queue_family_properties(*pdevice); - for (ix, info) in props.iter().enumerate() { - // Select a device that supports both compute and graphics workloads. - // This function used to check for surface compatibility but that was removed - // to allow device creation without an instantiated surface. This follows from - // both Metal and DX12 which do not require such validation. It might be worth - // exposing this to the user in a future device enumeration API, which would - // also allow selection between discrete and integrated devices. - if info - .queue_flags - .contains(vk::QueueFlags::COMPUTE | vk::QueueFlags::GRAPHICS) - { - return Some((*pdevice, ix as u32)); - } - } - } - None -} - -fn memory_property_flags_for_usage(usage: BufferUsage) -> vk::MemoryPropertyFlags { - if usage.intersects(BufferUsage::MAP_READ | BufferUsage::MAP_WRITE) { - vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT - } else { - vk::MemoryPropertyFlags::DEVICE_LOCAL - } -} - -// This could get more sophisticated about asking for CACHED when appropriate, but is -// probably going to get replaced by a gpu-alloc solution anyway. -fn find_memory_type( - memory_type_bits: u32, - property_flags: vk::MemoryPropertyFlags, - props: &vk::PhysicalDeviceMemoryProperties, -) -> Option { - for i in 0..props.memory_type_count { - if (memory_type_bits & (1 << i)) != 0 - && props.memory_types[i as usize] - .property_flags - .contains(property_flags) - { - return Some(i); - } - } - None -} - -fn convert_u32_vec(src: &[u8]) -> Vec { - src.chunks(4) - .map(|chunk| { - let mut buf = [0; 4]; - buf.copy_from_slice(chunk); - u32::from_le_bytes(buf) - }) - .collect() -} - -fn map_image_layout(layout: ImageLayout) -> vk::ImageLayout { - match layout { - ImageLayout::Undefined => vk::ImageLayout::UNDEFINED, - ImageLayout::Present => vk::ImageLayout::PRESENT_SRC_KHR, - ImageLayout::BlitSrc => vk::ImageLayout::TRANSFER_SRC_OPTIMAL, - ImageLayout::BlitDst => vk::ImageLayout::TRANSFER_DST_OPTIMAL, - ImageLayout::General => vk::ImageLayout::GENERAL, - ImageLayout::ShaderRead => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, - } -} diff --git a/piet-gpu-types/Cargo.toml b/piet-gpu-types/Cargo.toml deleted file mode 100644 index 629cd62..0000000 --- a/piet-gpu-types/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "piet-gpu-types" -version = "0.0.0" -authors = ["Raph Levien "] -description = "The scene graph and internal GPU types for piet-gpu." -license = "MIT/Apache-2.0" -edition = "2018" -keywords = ["graphics", "2d"] - -[dependencies] -piet-gpu-derive = { path = "../piet-gpu-derive" } -half = "1.5.0" diff --git a/piet-gpu-types/src/annotated.rs b/piet-gpu-types/src/annotated.rs deleted file mode 100644 index 90e54bc..0000000 --- a/piet-gpu-types/src/annotated.rs +++ /dev/null @@ -1,45 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[gpu_write] - mod annotated { - struct AnnoImage { - bbox: [f32; 4], - linewidth: f32, - index: u32, - offset: [i16; 2], - } - struct AnnoColor { - bbox: [f32; 4], - // For stroked fills. - // For the nonuniform scale case, this needs to be a 2x2 matrix. - // That's expected to be uncommon, so we could special-case it. - linewidth: f32, - rgba_color: u32, - } - struct AnnoLinGradient { - bbox: [f32; 4], - // For stroked fills. - linewidth: f32, - index: u32, - line_x: f32, - line_y: f32, - line_c: f32, - } - struct AnnoBeginClip { - bbox: [f32; 4], - linewidth: f32, - } - struct AnnoEndClip { - bbox: [f32; 4], - } - enum Annotated { - Nop, - Color(TagFlags, AnnoColor), - LinGradient(TagFlags, AnnoLinGradient), - Image(TagFlags, AnnoImage), - BeginClip(TagFlags, AnnoBeginClip), - EndClip(AnnoEndClip), - } - } -} diff --git a/piet-gpu-types/src/bins.rs b/piet-gpu-types/src/bins.rs deleted file mode 100644 index b9945f9..0000000 --- a/piet-gpu-types/src/bins.rs +++ /dev/null @@ -1,12 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -// The output of the binning stage, organized as a linked list of chunks. - -piet_gpu! { - #[gpu_write] - mod bins { - struct BinInstance { - element_ix: u32, - } - } -} diff --git a/piet-gpu-types/src/encoder.rs b/piet-gpu-types/src/encoder.rs deleted file mode 100644 index 7ac8bbb..0000000 --- a/piet-gpu-types/src/encoder.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The xi-editor authors. - -//! New-style encoders (supporting proc macros) - -pub struct A; - -/// A reference to an encoded object within a buffer -#[derive(Clone, Copy, Debug)] -pub struct Ref { - offset: u32, - _phantom: std::marker::PhantomData, -} - -pub struct Encoder { - buf: Vec, -} - -// TODO: we probably do want to encode slices, get rid of Sized bound -pub trait Encode: Sized { - /// Size if it's a fixed-size object, otherwise 0. - fn fixed_size() -> usize; - - /// Encoded size, for both fixed and variable sized objects. - fn encoded_size(&self) -> usize { - Self::fixed_size() - } - - /// Encode into a buffer; panics if not appropriately sized. - fn encode_to(&self, buf: &mut [u8]); - - /// Allocate a chunk and encode, returning a reference. - fn encode(&self, encoder: &mut Encoder) -> Ref { - let size = self.encoded_size(); - let (offset, buf) = encoder.alloc_chunk(size as u32); - self.encode_to(buf); - Ref::new(offset) - } -} - -impl Ref { - fn new(offset: u32) -> Ref { - Ref { - offset, - _phantom: Default::default(), - } - } - - pub fn offset(&self) -> u32 { - self.offset - } - - pub fn transmute(&self) -> Ref { - Ref::new(self.offset) - } -} - -impl Encoder { - pub fn new() -> Encoder { - Encoder { buf: Vec::new() } - } - - pub fn alloc_chunk(&mut self, size: u32) -> (u32, &mut [u8]) { - let offset = self.buf.len(); - self.buf.resize(size as usize + offset, 0); - (offset as u32, &mut self.buf[offset..]) - } - - pub fn buf(&self) -> &[u8] { - &self.buf - } - - pub fn buf_mut(&mut self) -> &mut [u8] { - &mut self.buf - } -} - -impl Encode for Ref { - fn fixed_size() -> usize { - 4 - } - - fn encode_to(&self, buf: &mut [u8]) { - buf[0..4].copy_from_slice(&self.offset.to_le_bytes()); - } -} - -// Encode impls for scalar and small vector types are as needed; it's a finite set of -// possibilities, so we could do it all with macros, but by hand is expedient. - -impl Encode for u32 { - fn fixed_size() -> usize { - 4 - } - - fn encode_to(&self, buf: &mut [u8]) { - buf[0..4].copy_from_slice(&self.to_le_bytes()); - } -} - -impl Encode for f32 { - fn fixed_size() -> usize { - 4 - } - - fn encode_to(&self, buf: &mut [u8]) { - buf[0..4].copy_from_slice(&self.to_le_bytes()); - } -} - -impl Encode for [u16; 4] { - fn fixed_size() -> usize { - 8 - } - - fn encode_to(&self, buf: &mut [u8]) { - buf[0..2].copy_from_slice(&self[0].to_le_bytes()); - buf[2..4].copy_from_slice(&self[1].to_le_bytes()); - buf[4..6].copy_from_slice(&self[2].to_le_bytes()); - buf[6..8].copy_from_slice(&self[3].to_le_bytes()); - } -} - -impl Encode for [f32; 2] { - fn fixed_size() -> usize { - 8 - } - - fn encode_to(&self, buf: &mut [u8]) { - buf[0..4].copy_from_slice(&self[0].to_le_bytes()); - buf[4..8].copy_from_slice(&self[1].to_le_bytes()); - } -} - -// TODO: make this work for slices too, but need to deal with Sized bound -// -// Note: only works for vectors of fixed size objects. -impl Encode for Vec { - fn fixed_size() -> usize { - 0 - } - fn encoded_size(&self) -> usize { - self.len() * T::fixed_size() - } - - fn encode_to(&self, buf: &mut [u8]) { - let size = T::fixed_size(); - for (ix, val) in self.iter().enumerate() { - val.encode_to(&mut buf[ix * size..]); - } - } -} diff --git a/piet-gpu-types/src/lib.rs b/piet-gpu-types/src/lib.rs deleted file mode 100644 index 62450d2..0000000 --- a/piet-gpu-types/src/lib.rs +++ /dev/null @@ -1,12 +0,0 @@ -// Structures used only internally probably don't need to be pub. - -pub mod annotated; -pub mod bins; -pub mod encoder; -pub mod pathseg; -pub mod ptcl; -pub mod scene; -pub mod state; -pub mod test; -pub mod tile; -pub mod tilegroup; diff --git a/piet-gpu-types/src/main.rs b/piet-gpu-types/src/main.rs deleted file mode 100644 index 7913c5f..0000000 --- a/piet-gpu-types/src/main.rs +++ /dev/null @@ -1,18 +0,0 @@ -fn main() { - let mod_name = std::env::args() - .skip(1) - .next() - .expect("provide a module name"); - match mod_name.as_str() { - "scene" => print!("{}", piet_gpu_types::scene::gen_gpu_scene()), - "state" => print!("{}", piet_gpu_types::state::gen_gpu_state()), - "annotated" => print!("{}", piet_gpu_types::annotated::gen_gpu_annotated()), - "pathseg" => print!("{}", piet_gpu_types::pathseg::gen_gpu_pathseg()), - "bins" => print!("{}", piet_gpu_types::bins::gen_gpu_bins()), - "tile" => print!("{}", piet_gpu_types::tile::gen_gpu_tile()), - "tilegroup" => print!("{}", piet_gpu_types::tilegroup::gen_gpu_tilegroup()), - "ptcl" => print!("{}", piet_gpu_types::ptcl::gen_gpu_ptcl()), - "test" => print!("{}", piet_gpu_types::test::gen_gpu_test()), - _ => println!("Oops, unknown module name"), - } -} diff --git a/piet-gpu-types/src/pathseg.rs b/piet-gpu-types/src/pathseg.rs deleted file mode 100644 index c6067fa..0000000 --- a/piet-gpu-types/src/pathseg.rs +++ /dev/null @@ -1,22 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[gpu_write] - mod pathseg { - struct PathCubic { - p0: [f32; 2], - p1: [f32; 2], - p2: [f32; 2], - p3: [f32; 2], - path_ix: u32, - // trans_ix is the transform index. It is 1-based, 0 means no transformation. - trans_ix: u32, - // Halfwidth in both x and y for binning. For strokes only. - stroke: [f32; 2], - } - enum PathSeg { - Nop, - Cubic(TagFlags, PathCubic), - } - } -} diff --git a/piet-gpu-types/src/ptcl.rs b/piet-gpu-types/src/ptcl.rs deleted file mode 100644 index 14831ca..0000000 --- a/piet-gpu-types/src/ptcl.rs +++ /dev/null @@ -1,63 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[gpu_write] - mod ptcl { - struct CmdStroke { - // This is really a Ref, but we don't have cross-module - // references. - tile_ref: u32, - half_width: f32, - } - struct CmdFill { - // As above, really Ref - tile_ref: u32, - backdrop: i32, - } - struct CmdColor { - rgba_color: u32, - } - struct CmdLinGrad { - index: u32, - // line equation for gradient - line_x: f32, - line_y: f32, - line_c: f32, - } - struct CmdRadGrad { - index: u32, - mat: [f32; 4], - xlat: [f32; 2], - c1: [f32; 2], - ra: f32, - roff: f32, - } - struct CmdImage { - index: u32, - offset: [i16; 2], - } - struct CmdAlpha { - alpha: f32, - } - struct CmdEndClip { - blend: u32, - } - struct CmdJump { - new_ref: u32, - } - enum Cmd { - End, - Fill(CmdFill), - Stroke(CmdStroke), - Solid, - Alpha(CmdAlpha), - Color(CmdColor), - LinGrad(CmdLinGrad), - RadGrad(CmdRadGrad), - Image(CmdImage), - BeginClip, - EndClip(CmdEndClip), - Jump(CmdJump), - } - } -} diff --git a/piet-gpu-types/src/scene.rs b/piet-gpu-types/src/scene.rs deleted file mode 100644 index 9591f04..0000000 --- a/piet-gpu-types/src/scene.rs +++ /dev/null @@ -1,69 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -pub use self::scene::{ - Clip, CubicSeg, Element, FillColor, FillLinGradient, LineSeg, QuadSeg, SetFillMode, - SetLineWidth, Transform, -}; - -piet_gpu! { - #[rust_encode] - mod scene { - struct LineSeg { - p0: [f32; 2], - p1: [f32; 2], - } - struct QuadSeg { - p0: [f32; 2], - p1: [f32; 2], - p2: [f32; 2], - } - struct CubicSeg { - p0: [f32; 2], - p1: [f32; 2], - p2: [f32; 2], - p3: [f32; 2], - } - struct FillColor { - rgba_color: u32, - } - struct FillLinGradient { - index: u32, - p0: [f32; 2], - p1: [f32; 2], - } - struct FillImage { - index: u32, - offset: [i16; 2], - } - struct SetLineWidth { - width: f32, - } - struct Transform { - mat: [f32; 4], - translate: [f32; 2], - } - struct Clip { - bbox: [f32; 4], - // TODO: add alpha? - } - struct SetFillMode { - fill_mode: u32, - } - enum Element { - Nop, - - Line(LineSeg), - Quad(QuadSeg), - Cubic(CubicSeg), - - FillColor(FillColor), - FillLinGradient(FillLinGradient), - FillImage(FillImage), - SetLineWidth(SetLineWidth), - Transform(Transform), - BeginClip(Clip), - EndClip(Clip), - SetFillMode(SetFillMode), - } - } -} diff --git a/piet-gpu-types/src/state.rs b/piet-gpu-types/src/state.rs deleted file mode 100644 index 6e2b581..0000000 --- a/piet-gpu-types/src/state.rs +++ /dev/null @@ -1,17 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[gpu_write] - mod state { - struct State { - mat: [f32; 4], - translate: [f32; 2], - bbox: [f32; 4], - linewidth: f32, - flags: u32, - path_count: u32, - pathseg_count: u32, - trans_count: u32, - } - } -} diff --git a/piet-gpu-types/src/test.rs b/piet-gpu-types/src/test.rs deleted file mode 100644 index e92aaca..0000000 --- a/piet-gpu-types/src/test.rs +++ /dev/null @@ -1,33 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[rust_encode] - #[gpu_write] - mod test { - struct StructA { - a: f16, - b: f16, - } - - struct StructB { - a: f16, - b: u16, - c: f16, - } - - struct StructC { - a: f16, - b: u16, - c: u16, - d: f16, - } - - struct StructD { - a: [f16; 2], - } - - struct StructE { - a: [f16; 3], - } - } -} diff --git a/piet-gpu-types/src/tile.rs b/piet-gpu-types/src/tile.rs deleted file mode 100644 index 27e87f4..0000000 --- a/piet-gpu-types/src/tile.rs +++ /dev/null @@ -1,26 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -piet_gpu! { - #[gpu_write] - mod tile { - struct Path { - bbox: [u16; 4], - tiles: Ref, - } - struct Tile { - tile: Ref, - backdrop: i32, - } - // Segments within a tile are represented as a linked list. - struct TileSeg { - origin: [f32; 2], - vector: [f32; 2], - y_edge: f32, - next: Ref, - } - struct TransformSeg { - mat: [f32; 4], - translate: [f32; 2], - } - } -} diff --git a/piet-gpu-types/src/tilegroup.rs b/piet-gpu-types/src/tilegroup.rs deleted file mode 100644 index ea295d9..0000000 --- a/piet-gpu-types/src/tilegroup.rs +++ /dev/null @@ -1,39 +0,0 @@ -use piet_gpu_derive::piet_gpu; - -// Structures representing tilegroup instances (output of kernel 1). -// There are three outputs: the main instances, the stroke instances, -// and the fill instances. All three are conceptually a list of -// instances, but the encoding is slightly different. The first is -// encoded with Instance, Jump, and End. The other two are encoded -// as a linked list of Chunk. - -// The motivation for the difference is that the first requires fewer -// registers to track state, but the second contains information that -// is useful up front for doing dynamic allocation in kernel 2, as -// well as increasing read parallelism; the "jump" approach really is -// geared to sequential reading. - -piet_gpu! { - #[gpu_write] - mod tilegroup { - struct Instance { - // Note: a better type would be `Ref` but to do that we - // would need cross-module references. Punt for now. - item_ref: u32, - // A better type would be Point. - offset: [f32; 2], - } - struct Jump { - new_ref: Ref, - } - struct Chunk { - chunk_n: u32, - next: Ref, - } - enum TileGroup { - Instance(Instance), - Jump(Jump), - End, - } - } -} diff --git a/piet-gpu/Cargo.toml b/piet-gpu/Cargo.toml deleted file mode 100644 index 8e5b82e..0000000 --- a/piet-gpu/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "piet-gpu" -version = "0.1.0" -authors = ["Raph Levien "] -description = "A compute-centric GPU 2D renderer." -readme = "README.md" -license = "MIT/Apache-2.0" -edition = "2018" - -[[bin]] -name = "cli" -path = "bin/cli.rs" - -[[bin]] -name = "winit" -path = "bin/winit.rs" - -[[example]] -name = "android" -path = "bin/android.rs" -crate-type = ["cdylib"] - -[dependencies.piet-gpu-hal] -path = "../piet-gpu-hal" - -[dependencies.piet-gpu-types] -path = "../piet-gpu-types" - -[dependencies.piet-scene] -path = "../piet-scene" - -[dependencies] -png = "0.17.6" -rand = "0.8.5" -roxmltree = "0.13" -winit = {version = "0.27.3", default-features = false, features = ["x11", "wayland", "wayland-dlopen"]} -raw-window-handle = "0.5" -clap = "3.2.22" -bytemuck = { version = "1.7.2", features = ["derive"] } - -[target.'cfg(target_os = "android")'.dependencies] -ndk = "0.3" -ndk-sys = "0.2.0" -ndk-glue = "0.3" -raw-window-handle = "0.3" - -[package.metadata.android.application] -debuggable = true diff --git a/piet-gpu/bin/android.rs b/piet-gpu/bin/android.rs deleted file mode 100644 index 0558d02..0000000 --- a/piet-gpu/bin/android.rs +++ /dev/null @@ -1,164 +0,0 @@ -#![cfg(target_os = "android")] -//! Android example -//! -//! Run using `cargo apk run --example android` -//! -//! Requires the [cargo-apk] tool. -//! [cargo-apk]: https://crates.io/crates/cargo-apk - -use raw_window_handle::{ - AndroidDisplayHandle, AndroidNdkWindowHandle, RawDisplayHandle, RawWindowHandle, -}; - -use ndk_glue::Event; - -use piet_gpu_hal::{ - Error, ImageLayout, Instance, InstanceFlags, Semaphore, Session, Surface, Swapchain, -}; - -use piet_gpu::{samples, RenderDriver, Renderer, SimpleText}; -use piet_scene::{Scene, SceneBuilder}; - -#[cfg_attr(target_os = "android", ndk_glue::main(backtrace = "on"))] -fn main() { - my_main().unwrap(); -} - -// State required to render and present the contents -struct GfxState { - session: Session, - render_driver: RenderDriver, - swapchain: Swapchain, - current_frame: usize, - present_semaphores: Vec, -} - -const NUM_FRAMES: usize = 2; - -fn my_main() -> Result<(), Error> { - let mut gfx_state = None; - loop { - for event in ndk_glue::poll_events() { - println!("got event {:?}", event); - match event { - Event::WindowCreated => { - let window = ndk_glue::native_window(); - if let Some(window) = &*window { - let width = window.width() as usize; - let height = window.height() as usize; - let instance = Instance::new(InstanceFlags::default())?; - let mut android_handle = AndroidNdkWindowHandle::empty(); - android_handle.a_native_window = window.ptr().as_ptr() as *mut _; - let window_handle = RawWindowHandle::AndroidNdk(android_handle); - let display_handle = - RawDisplayHandle::Android(AndroidDisplayHandle::empty()); - let surface = unsafe { instance.surface(display_handle, window_handle)? }; - gfx_state = Some(GfxState::new(&instance, Some(&surface), width, height)?); - } else { - println!("native window is sadly none"); - } - } - Event::WindowRedrawNeeded => { - if let Some(gfx_state) = gfx_state.as_mut() { - for _ in 0..1000 { - gfx_state.redraw(); - } - } - } - _ => (), - } - } - } -} - -impl GfxState { - fn new( - instance: &Instance, - surface: Option<&Surface>, - width: usize, - height: usize, - ) -> Result { - unsafe { - let device = instance.device()?; - let swapchain = instance.swapchain(width, height, &device, surface.unwrap())?; - let session = Session::new(device); - let current_frame = 0; - let present_semaphores = (0..NUM_FRAMES) - .map(|_| session.create_semaphore()) - .collect::, Error>>()?; - - let renderer = Renderer::new(&session, width, height, NUM_FRAMES)?; - let render_driver = RenderDriver::new(&session, NUM_FRAMES, renderer); - - Ok(GfxState { - session, - render_driver, - swapchain, - current_frame, - present_semaphores, - }) - } - } - - fn redraw(&mut self) { - println!("redraw"); - unsafe { - let frame_idx = self.current_frame % NUM_FRAMES; - let mut info_string = String::new(); - - if self.current_frame >= NUM_FRAMES { - let stats = self - .render_driver - .get_timing_stats(&self.session, frame_idx); - info_string = stats.short_summary(); - println!("{}", info_string); - } - let mut text = SimpleText::new(); - let mut scene = Scene::default(); - let mut builder = SceneBuilder::for_scene(&mut scene); - samples::render_anim_frame(&mut builder, self.current_frame); - //samples::render_tiger(&mut builder, false); - render_info(&mut text, &mut builder, &info_string); - builder.finish(); - if let Err(e) = self.render_driver.upload_scene(&self.session, &scene) { - println!("error in uploading: {}", e); - } - let (image_idx, acquisition_semaphore) = self.swapchain.next().unwrap(); - let swap_image = self.swapchain.image(image_idx); - self.render_driver.run_coarse(&self.session).unwrap(); - let target = self.render_driver.record_fine(&self.session).unwrap(); - let cmd_buf = target.cmd_buf; - - // Image -> Swapchain - cmd_buf.image_barrier(&swap_image, ImageLayout::Undefined, ImageLayout::BlitDst); - cmd_buf.blit_image(target.image, &swap_image); - cmd_buf.image_barrier(&swap_image, ImageLayout::BlitDst, ImageLayout::Present); - - self.render_driver - .submit( - &self.session, - &[&acquisition_semaphore], - &[&self.present_semaphores[frame_idx]], - ) - .unwrap(); - - self.swapchain - .present(image_idx, &[&self.present_semaphores[frame_idx]]) - .unwrap(); - - self.render_driver.next_buffer(); - self.current_frame += 1; - } - } -} - -fn render_info(simple_text: &mut SimpleText, sb: &mut SceneBuilder, info: &str) { - simple_text.add( - sb, - None, - 60.0, - None, - piet_scene::Affine::translate(110.0, 120.0), - info, - ); -} diff --git a/piet-gpu/bin/cli.rs b/piet-gpu/bin/cli.rs deleted file mode 100644 index 457dab5..0000000 --- a/piet-gpu/bin/cli.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::fs::File; -use std::io::BufWriter; -use std::path::Path; - -use clap::{App, Arg}; - -use piet_gpu_hal::{BufferUsage, Error, Instance, InstanceFlags, Session}; - -use piet_gpu::{samples, PicoSvg, RenderDriver, Renderer}; -use piet_scene::{Scene, SceneBuilder}; - -const WIDTH: usize = 2048; -const HEIGHT: usize = 1536; - -#[allow(unused)] -fn dump_scene(buf: &[u8]) { - for i in 0..(buf.len() / 4) { - let mut buf_u32 = [0u8; 4]; - buf_u32.copy_from_slice(&buf[i * 4..i * 4 + 4]); - println!("{:4x}: {:8x}", i * 4, u32::from_le_bytes(buf_u32)); - } -} - -#[allow(unused)] -fn dump_state(buf: &[u8]) { - for i in 0..(buf.len() / 48) { - let j = i * 48; - let floats = (0..11) - .map(|k| { - let mut buf_f32 = [0u8; 4]; - buf_f32.copy_from_slice(&buf[j + k * 4..j + k * 4 + 4]); - f32::from_le_bytes(buf_f32) - }) - .collect::>(); - println!( - "{}: [{} {} {} {} {} {}] ({}, {})-({} {}) {} {}", - i, - floats[0], - floats[1], - floats[2], - floats[3], - floats[4], - floats[5], - floats[6], - floats[7], - floats[8], - floats[9], - floats[10], - buf[j + 44] - ); - } -} - -/// Interpret the output of the binning stage, for diagnostic purposes. -#[allow(unused)] -fn trace_merge(buf: &[u32]) { - for bin in 0..256 { - println!("bin {}:", bin); - let mut starts = (0..16) - .map(|i| Some((bin * 16 + i) * 64)) - .collect::>>(); - loop { - let min_start = starts - .iter() - .map(|st| { - st.map(|st| { - if buf[st / 4] == 0 { - !0 - } else { - buf[st / 4 + 2] - } - }) - .unwrap_or(!0) - }) - .min() - .unwrap(); - if min_start == !0 { - break; - } - let mut selected = !0; - for i in 0..16 { - if let Some(st) = starts[i] { - if buf[st / 4] != 0 && buf[st / 4 + 2] == min_start { - selected = i; - break; - } - } - } - let st = starts[selected].unwrap(); - println!("selected {}, start {:x}", selected, st); - for j in 0..buf[st / 4] { - println!("{:x}", buf[st / 4 + 2 + j as usize]) - } - if buf[st / 4 + 1] == 0 { - starts[selected] = None; - } else { - starts[selected] = Some(buf[st / 4 + 1] as usize); - } - } - } -} - -/// Interpret the output of the coarse raster stage, for diagnostic purposes. -#[allow(unused)] -fn trace_ptcl(buf: &[u32]) { - for y in 0..96 { - for x in 0..128 { - let tile_ix = y * 128 + x; - println!("tile {} @({}, {})", tile_ix, x, y); - let mut tile_offset = tile_ix * 1024; - loop { - let tag = buf[tile_offset / 4]; - match tag { - 0 => break, - 3 => { - let backdrop = buf[tile_offset / 4 + 2]; - let rgba_color = buf[tile_offset / 4 + 3]; - println!(" {:x}: fill {:x} {}", tile_offset, rgba_color, backdrop); - let mut seg_chunk = buf[tile_offset / 4 + 1] as usize; - let n = buf[seg_chunk / 4] as usize; - let segs = buf[seg_chunk / 4 + 2] as usize; - println!(" chunk @{:x}: n={}, segs @{:x}", seg_chunk, n, segs); - for i in 0..n { - let x0 = f32::from_bits(buf[segs / 4 + i * 5]); - let y0 = f32::from_bits(buf[segs / 4 + i * 5 + 1]); - let x1 = f32::from_bits(buf[segs / 4 + i * 5 + 2]); - let y1 = f32::from_bits(buf[segs / 4 + i * 5 + 3]); - let y_edge = f32::from_bits(buf[segs / 4 + i * 5 + 4]); - println!( - " ({:.3}, {:.3}) - ({:.3}, {:.3}) | {:.3}", - x0, y0, x1, y1, y_edge - ); - } - loop { - seg_chunk = buf[seg_chunk / 4 + 1] as usize; - if seg_chunk == 0 { - break; - } - } - } - 4 => { - let line_width = f32::from_bits(buf[tile_offset / 4 + 2]); - let rgba_color = buf[tile_offset / 4 + 3]; - println!( - " {:x}: stroke {:x} {}", - tile_offset, rgba_color, line_width - ); - let mut seg_chunk = buf[tile_offset / 4 + 1] as usize; - let n = buf[seg_chunk / 4] as usize; - let segs = buf[seg_chunk / 4 + 2] as usize; - println!(" chunk @{:x}: n={}, segs @{:x}", seg_chunk, n, segs); - for i in 0..n { - let x0 = f32::from_bits(buf[segs / 4 + i * 5]); - let y0 = f32::from_bits(buf[segs / 4 + i * 5 + 1]); - let x1 = f32::from_bits(buf[segs / 4 + i * 5 + 2]); - let y1 = f32::from_bits(buf[segs / 4 + i * 5 + 3]); - let y_edge = f32::from_bits(buf[segs / 4 + i * 5 + 4]); - println!( - " ({:.3}, {:.3}) - ({:.3}, {:.3}) | {:.3}", - x0, y0, x1, y1, y_edge - ); - } - loop { - seg_chunk = buf[seg_chunk / 4 + 1] as usize; - if seg_chunk == 0 { - break; - } - } - } - 6 => { - let backdrop = buf[tile_offset / 4 + 2]; - println!(" {:x}: begin_clip {}", tile_offset, backdrop); - let mut seg_chunk = buf[tile_offset / 4 + 1] as usize; - let n = buf[seg_chunk / 4] as usize; - let segs = buf[seg_chunk / 4 + 2] as usize; - println!(" chunk @{:x}: n={}, segs @{:x}", seg_chunk, n, segs); - for i in 0..n { - let x0 = f32::from_bits(buf[segs / 4 + i * 5]); - let y0 = f32::from_bits(buf[segs / 4 + i * 5 + 1]); - let x1 = f32::from_bits(buf[segs / 4 + i * 5 + 2]); - let y1 = f32::from_bits(buf[segs / 4 + i * 5 + 3]); - let y_edge = f32::from_bits(buf[segs / 4 + i * 5 + 4]); - println!( - " ({:.3}, {:.3}) - ({:.3}, {:.3}) | {:.3}", - x0, y0, x1, y1, y_edge - ); - } - loop { - seg_chunk = buf[seg_chunk / 4 + 1] as usize; - if seg_chunk == 0 { - break; - } - } - } - 7 => { - let backdrop = buf[tile_offset / 4 + 1]; - println!("{:x}: solid_clip {:x}", tile_offset, backdrop); - } - 8 => { - println!("{:x}: end_clip", tile_offset); - } - _ => { - println!("{:x}: {}", tile_offset, tag); - } - } - if tag == 0 { - break; - } - if tag == 8 { - tile_offset = buf[tile_offset / 4 + 1] as usize; - } else { - tile_offset += 20; - } - } - } - } -} - -fn main() -> Result<(), Error> { - let matches = App::new("piet-gpu test") - .arg(Arg::with_name("INPUT").index(1)) - .arg(Arg::with_name("flip").short('f').long("flip")) - .arg( - Arg::with_name("scale") - .short('s') - .long("scale") - .takes_value(true), - ) - .get_matches(); - let instance = Instance::new(InstanceFlags::default())?; - let mut scene = Scene::default(); - unsafe { - let device = instance.device()?; - let session = Session::new(device); - let mut builder = SceneBuilder::for_scene(&mut scene); - if let Some(input) = matches.value_of("INPUT") { - let mut scale = matches - .value_of("scale") - .map(|scale| scale.parse().unwrap()) - .unwrap_or(8.0); - if matches.is_present("flip") { - scale = -scale; - } - let xml_str = std::fs::read_to_string(input).unwrap(); - let start = std::time::Instant::now(); - let svg = PicoSvg::load(&xml_str, scale).unwrap(); - println!("parsing time: {:?}", start.elapsed()); - samples::render_svg(&mut builder, &svg, true); - } else { - //test_scenes::render_scene(&mut ctx); - samples::render_blend_grid(&mut builder); - } - builder.finish(); - - let renderer = Renderer::new(&session, WIDTH, HEIGHT, 1)?; - let mut render_driver = RenderDriver::new(&session, 1, renderer); - let start = std::time::Instant::now(); - render_driver.upload_scene(&session, &scene)?; - let image_usage = BufferUsage::MAP_READ | BufferUsage::COPY_DST; - let image_buf = session.create_buffer((WIDTH * HEIGHT * 4) as u64, image_usage)?; - - render_driver.run_coarse(&session)?; - let target = render_driver.record_fine(&session)?; - target - .cmd_buf - .copy_image_to_buffer(target.image, &image_buf); - render_driver.submit(&session, &[], &[])?; - render_driver.wait(&session); - println!("elapsed = {:?}", start.elapsed()); - render_driver.get_timing_stats(&session, 0).print_summary(); - - let mut img_data: Vec = Default::default(); - // Note: because png can use a `&[u8]` slice, we could avoid an extra copy - // (probably passing a slice into a closure). But for now: keep it simple. - image_buf.read(&mut img_data).unwrap(); - - // Write image as PNG file. - let path = Path::new("image.png"); - let file = File::create(path).unwrap(); - let ref mut w = BufWriter::new(file); - - let mut encoder = png::Encoder::new(w, WIDTH as u32, HEIGHT as u32); - encoder.set_color(png::ColorType::Rgba); - encoder.set_depth(png::BitDepth::Eight); - let mut writer = encoder.write_header().unwrap(); - - writer.write_image_data(&img_data).unwrap(); - } - - Ok(()) -} diff --git a/piet-gpu/bin/winit.rs b/piet-gpu/bin/winit.rs deleted file mode 100644 index 4b3e990..0000000 --- a/piet-gpu/bin/winit.rs +++ /dev/null @@ -1,196 +0,0 @@ -use piet_gpu::{samples, PicoSvg, RenderDriver, Renderer, SimpleText}; -use piet_gpu_hal::{Error, ImageLayout, Instance, InstanceFlags, Session}; -use piet_scene::{Scene, SceneBuilder}; - -use clap::{App, Arg}; - -use raw_window_handle::{HasRawDisplayHandle, HasRawWindowHandle}; - -use winit::{ - event::{Event, WindowEvent}, - event_loop::{ControlFlow, EventLoop}, - window::WindowBuilder, -}; - -const NUM_FRAMES: usize = 2; - -const WIDTH: usize = 2048; -const HEIGHT: usize = 1536; - -fn main() -> Result<(), Error> { - let matches = App::new("piet-gpu test") - .arg(Arg::with_name("INPUT").index(1)) - .arg(Arg::with_name("flip").short('f').long("flip")) - .arg( - Arg::with_name("scale") - .short('s') - .long("scale") - .takes_value(true), - ) - .get_matches(); - - // Collect SVG if input - let svg = match matches.value_of("INPUT") { - Some(file) => { - let mut scale = matches - .value_of("scale") - .map(|scale| scale.parse().unwrap()) - .unwrap_or(8.0); - if matches.is_present("flip") { - scale = -scale; - } - let xml_str = std::fs::read_to_string(file).unwrap(); - let start = std::time::Instant::now(); - let svg = PicoSvg::load(&xml_str, scale).unwrap(); - println!("parsing time: {:?}", start.elapsed()); - Some(svg) - } - None => None, - }; - - let event_loop = EventLoop::new(); - let window = WindowBuilder::new() - .with_inner_size(winit::dpi::LogicalSize { - width: (WIDTH / 2) as f64, - height: (HEIGHT / 2) as f64, - }) - .with_resizable(false) // currently not supported - .build(&event_loop)?; - - let instance = Instance::new(InstanceFlags::default())?; - let mut info_string = "info".to_string(); - let mut scene = Scene::default(); - let mut simple_text = piet_gpu::SimpleText::new(); - unsafe { - let display_handle = window.raw_display_handle(); - let window_handle = window.raw_window_handle(); - let surface = instance.surface(display_handle, window_handle)?; - let device = instance.device()?; - let mut swapchain = instance.swapchain(WIDTH / 2, HEIGHT / 2, &device, &surface)?; - let session = Session::new(device); - - let mut current_frame = 0; - let present_semaphores = (0..NUM_FRAMES) - .map(|_| session.create_semaphore()) - .collect::, Error>>()?; - - let renderer = Renderer::new(&session, WIDTH, HEIGHT, NUM_FRAMES)?; - let mut render_driver = RenderDriver::new(&session, NUM_FRAMES, renderer); - let mut sample_index = 0usize; - - event_loop.run(move |event, _, control_flow| { - *control_flow = ControlFlow::Poll; // `ControlFlow::Wait` if only re-render on event - - match event { - Event::WindowEvent { event, window_id } if window_id == window.id() => { - use winit::event::{ElementState, VirtualKeyCode}; - match event { - WindowEvent::CloseRequested => { - *control_flow = ControlFlow::Exit; - } - WindowEvent::KeyboardInput { input, .. } => { - if input.state == ElementState::Pressed { - match input.virtual_keycode { - Some(VirtualKeyCode::Left) => { - sample_index = sample_index.saturating_sub(1) - } - Some(VirtualKeyCode::Right) => { - sample_index = sample_index.saturating_add(1) - } - _ => {} - } - } - } - _ => (), - } - } - Event::MainEventsCleared => { - window.request_redraw(); - } - Event::RedrawRequested(window_id) if window_id == window.id() => { - let frame_idx = current_frame % NUM_FRAMES; - - if current_frame >= NUM_FRAMES { - let stats = render_driver.get_timing_stats(&session, frame_idx); - info_string = stats.short_summary(); - } - - if let Some(svg) = &svg { - let mut builder = SceneBuilder::for_scene(&mut scene); - samples::render_svg(&mut builder, svg, false); - render_info(&mut simple_text, &mut builder, &info_string); - builder.finish(); - if let Err(e) = render_driver.upload_scene(&session, &scene) { - println!("error in uploading: {}", e); - } - } else { - let mut builder = SceneBuilder::for_scene(&mut scene); - - const N_SAMPLES: usize = 6; - match sample_index % N_SAMPLES { - 0 => samples::render_anim_frame( - &mut builder, - &mut simple_text, - current_frame, - ), - 1 => samples::render_blend_grid(&mut builder), - 2 => samples::render_tiger(&mut builder, false), - 3 => samples::render_brush_transform(&mut builder, current_frame), - 4 => samples::render_funky_paths(&mut builder), - _ => samples::render_scene(&mut builder), - } - render_info(&mut simple_text, &mut builder, &info_string); - builder.finish(); - if let Err(e) = render_driver.upload_scene(&session, &scene) { - println!("error in uploading: {}", e); - } - } - - let (image_idx, acquisition_semaphore) = swapchain.next().unwrap(); - let swap_image = swapchain.image(image_idx); - render_driver.run_coarse(&session).unwrap(); - let target = render_driver.record_fine(&session).unwrap(); - let cmd_buf = target.cmd_buf; - - // Image -> Swapchain - cmd_buf.image_barrier( - &swap_image, - ImageLayout::Undefined, - ImageLayout::BlitDst, - ); - cmd_buf.blit_image(target.image, &swap_image); - cmd_buf.image_barrier(&swap_image, ImageLayout::BlitDst, ImageLayout::Present); - render_driver - .submit( - &session, - &[&acquisition_semaphore], - &[&present_semaphores[frame_idx]], - ) - .unwrap(); - - swapchain - .present(image_idx, &[&present_semaphores[frame_idx]]) - .unwrap(); - - render_driver.next_buffer(); - current_frame += 1; - } - Event::LoopDestroyed => { - render_driver.wait_all(&session); - } - _ => (), - } - }) - } -} - -fn render_info(simple_text: &mut SimpleText, sb: &mut SceneBuilder, info: &str) { - simple_text.add( - sb, - None, - 40.0, - None, - piet_scene::kurbo::Affine::translate((110.0, 50.0)), - info, - ); -} diff --git a/piet-gpu/shader/.clang-format b/piet-gpu/shader/.clang-format deleted file mode 100644 index 9801ccd..0000000 --- a/piet-gpu/shader/.clang-format +++ /dev/null @@ -1,5 +0,0 @@ -BasedOnStyle: LLVM -IndentWidth: 4 -ColumnLimit: 120 -AllowShortFunctionsOnASingleLine: None -SortIncludes: false diff --git a/piet-gpu/shader/annotated.h b/piet-gpu/shader/annotated.h deleted file mode 100644 index 5a35088..0000000 --- a/piet-gpu/shader/annotated.h +++ /dev/null @@ -1,296 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct AnnoImageRef { - uint offset; -}; - -struct AnnoColorRef { - uint offset; -}; - -struct AnnoLinGradientRef { - uint offset; -}; - -struct AnnoBeginClipRef { - uint offset; -}; - -struct AnnoEndClipRef { - uint offset; -}; - -struct AnnotatedRef { - uint offset; -}; - -struct AnnoImage { - vec4 bbox; - float linewidth; - uint index; - ivec2 offset; -}; - -#define AnnoImage_size 28 - -AnnoImageRef AnnoImage_index(AnnoImageRef ref, uint index) { - return AnnoImageRef(ref.offset + index * AnnoImage_size); -} - -struct AnnoColor { - vec4 bbox; - float linewidth; - uint rgba_color; -}; - -#define AnnoColor_size 24 - -AnnoColorRef AnnoColor_index(AnnoColorRef ref, uint index) { - return AnnoColorRef(ref.offset + index * AnnoColor_size); -} - -struct AnnoLinGradient { - vec4 bbox; - float linewidth; - uint index; - float line_x; - float line_y; - float line_c; -}; - -#define AnnoLinGradient_size 36 - -AnnoLinGradientRef AnnoLinGradient_index(AnnoLinGradientRef ref, uint index) { - return AnnoLinGradientRef(ref.offset + index * AnnoLinGradient_size); -} - -struct AnnoBeginClip { - vec4 bbox; - float linewidth; - uint blend; -}; - -#define AnnoBeginClip_size 24 - -AnnoBeginClipRef AnnoBeginClip_index(AnnoBeginClipRef ref, uint index) { - return AnnoBeginClipRef(ref.offset + index * AnnoBeginClip_size); -} - -struct AnnoEndClip { - vec4 bbox; - uint blend; -}; - -#define AnnoEndClip_size 20 - -AnnoEndClipRef AnnoEndClip_index(AnnoEndClipRef ref, uint index) { - return AnnoEndClipRef(ref.offset + index * AnnoEndClip_size); -} - -#define Annotated_Nop 0 -#define Annotated_Color 1 -#define Annotated_LinGradient 2 -#define Annotated_Image 3 -#define Annotated_BeginClip 4 -#define Annotated_EndClip 5 -#define Annotated_size 40 - -AnnotatedRef Annotated_index(AnnotatedRef ref, uint index) { - return AnnotatedRef(ref.offset + index * Annotated_size); -} - -struct AnnotatedTag { - uint tag; - uint flags; -}; - -AnnoImage AnnoImage_read(Alloc a, AnnoImageRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - uint raw6 = read_mem(a, ix + 6); - AnnoImage s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.linewidth = uintBitsToFloat(raw4); - s.index = raw5; - s.offset = ivec2(int(raw6 << 16) >> 16, int(raw6) >> 16); - return s; -} - -void AnnoImage_write(Alloc a, AnnoImageRef ref, AnnoImage s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.bbox.x)); - write_mem(a, ix + 1, floatBitsToUint(s.bbox.y)); - write_mem(a, ix + 2, floatBitsToUint(s.bbox.z)); - write_mem(a, ix + 3, floatBitsToUint(s.bbox.w)); - write_mem(a, ix + 4, floatBitsToUint(s.linewidth)); - write_mem(a, ix + 5, s.index); - write_mem(a, ix + 6, (uint(s.offset.x) & 0xffff) | (uint(s.offset.y) << 16)); -} - -AnnoColor AnnoColor_read(Alloc a, AnnoColorRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - AnnoColor s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.linewidth = uintBitsToFloat(raw4); - s.rgba_color = raw5; - return s; -} - -void AnnoColor_write(Alloc a, AnnoColorRef ref, AnnoColor s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.bbox.x)); - write_mem(a, ix + 1, floatBitsToUint(s.bbox.y)); - write_mem(a, ix + 2, floatBitsToUint(s.bbox.z)); - write_mem(a, ix + 3, floatBitsToUint(s.bbox.w)); - write_mem(a, ix + 4, floatBitsToUint(s.linewidth)); - write_mem(a, ix + 5, s.rgba_color); -} - -AnnoLinGradient AnnoLinGradient_read(Alloc a, AnnoLinGradientRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - uint raw6 = read_mem(a, ix + 6); - uint raw7 = read_mem(a, ix + 7); - uint raw8 = read_mem(a, ix + 8); - AnnoLinGradient s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.linewidth = uintBitsToFloat(raw4); - s.index = raw5; - s.line_x = uintBitsToFloat(raw6); - s.line_y = uintBitsToFloat(raw7); - s.line_c = uintBitsToFloat(raw8); - return s; -} - -void AnnoLinGradient_write(Alloc a, AnnoLinGradientRef ref, AnnoLinGradient s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.bbox.x)); - write_mem(a, ix + 1, floatBitsToUint(s.bbox.y)); - write_mem(a, ix + 2, floatBitsToUint(s.bbox.z)); - write_mem(a, ix + 3, floatBitsToUint(s.bbox.w)); - write_mem(a, ix + 4, floatBitsToUint(s.linewidth)); - write_mem(a, ix + 5, s.index); - write_mem(a, ix + 6, floatBitsToUint(s.line_x)); - write_mem(a, ix + 7, floatBitsToUint(s.line_y)); - write_mem(a, ix + 8, floatBitsToUint(s.line_c)); -} - -AnnoBeginClip AnnoBeginClip_read(Alloc a, AnnoBeginClipRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - AnnoBeginClip s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.linewidth = uintBitsToFloat(raw4); - s.blend = raw5; - return s; -} - -void AnnoBeginClip_write(Alloc a, AnnoBeginClipRef ref, AnnoBeginClip s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.bbox.x)); - write_mem(a, ix + 1, floatBitsToUint(s.bbox.y)); - write_mem(a, ix + 2, floatBitsToUint(s.bbox.z)); - write_mem(a, ix + 3, floatBitsToUint(s.bbox.w)); - write_mem(a, ix + 4, floatBitsToUint(s.linewidth)); - write_mem(a, ix + 5, s.blend); -} - -AnnoEndClip AnnoEndClip_read(Alloc a, AnnoEndClipRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - AnnoEndClip s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.blend = raw4; - return s; -} - -void AnnoEndClip_write(Alloc a, AnnoEndClipRef ref, AnnoEndClip s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.bbox.x)); - write_mem(a, ix + 1, floatBitsToUint(s.bbox.y)); - write_mem(a, ix + 2, floatBitsToUint(s.bbox.z)); - write_mem(a, ix + 3, floatBitsToUint(s.bbox.w)); - write_mem(a, ix + 4, s.blend); -} - -AnnotatedTag Annotated_tag(Alloc a, AnnotatedRef ref) { - uint tag_and_flags = read_mem(a, ref.offset >> 2); - return AnnotatedTag(tag_and_flags & 0xffff, tag_and_flags >> 16); -} - -AnnoColor Annotated_Color_read(Alloc a, AnnotatedRef ref) { - return AnnoColor_read(a, AnnoColorRef(ref.offset + 4)); -} - -AnnoLinGradient Annotated_LinGradient_read(Alloc a, AnnotatedRef ref) { - return AnnoLinGradient_read(a, AnnoLinGradientRef(ref.offset + 4)); -} - -AnnoImage Annotated_Image_read(Alloc a, AnnotatedRef ref) { - return AnnoImage_read(a, AnnoImageRef(ref.offset + 4)); -} - -AnnoBeginClip Annotated_BeginClip_read(Alloc a, AnnotatedRef ref) { - return AnnoBeginClip_read(a, AnnoBeginClipRef(ref.offset + 4)); -} - -AnnoEndClip Annotated_EndClip_read(Alloc a, AnnotatedRef ref) { - return AnnoEndClip_read(a, AnnoEndClipRef(ref.offset + 4)); -} - -void Annotated_Nop_write(Alloc a, AnnotatedRef ref) { - write_mem(a, ref.offset >> 2, Annotated_Nop); -} - -void Annotated_Color_write(Alloc a, AnnotatedRef ref, uint flags, AnnoColor s) { - write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_Color); - AnnoColor_write(a, AnnoColorRef(ref.offset + 4), s); -} - -void Annotated_LinGradient_write(Alloc a, AnnotatedRef ref, uint flags, AnnoLinGradient s) { - write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_LinGradient); - AnnoLinGradient_write(a, AnnoLinGradientRef(ref.offset + 4), s); -} - -void Annotated_Image_write(Alloc a, AnnotatedRef ref, uint flags, AnnoImage s) { - write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_Image); - AnnoImage_write(a, AnnoImageRef(ref.offset + 4), s); -} - -void Annotated_BeginClip_write(Alloc a, AnnotatedRef ref, uint flags, AnnoBeginClip s) { - write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_BeginClip); - AnnoBeginClip_write(a, AnnoBeginClipRef(ref.offset + 4), s); -} - -void Annotated_EndClip_write(Alloc a, AnnotatedRef ref, uint flags, AnnoEndClip s) { - write_mem(a, ref.offset >> 2, (flags << 16) | Annotated_EndClip); - AnnoEndClip_write(a, AnnoEndClipRef(ref.offset + 4), s); -} - diff --git a/piet-gpu/shader/backdrop.comp b/piet-gpu/shader/backdrop.comp deleted file mode 100644 index 60f3783..0000000 --- a/piet-gpu/shader/backdrop.comp +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Propagation of tile backdrop for filling. -// -// Each thread reads one path element and calculates the row and column counts of spanned tiles -// based on the bounding box. -// The row count then goes through a prefix sum to redistribute and load-balance the work across the workgroup. -// In the following step, the workgroup loops over the corresponding tile rows per element in parallel. -// For each row the per tile backdrop will be read, as calculated in the previous coarse path segment kernel, -// and propagated from the left to the right (prefix summed). -// -// Output state: -// - Each path element has an array of tiles covering the whole path based on boundig box -// - Each tile per path element contains the 'backdrop' and a list of subdivided path segments - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_BACKDROP_WG (7 + LG_WG_FACTOR) -#define BACKDROP_WG (1 << LG_BACKDROP_WG) -#ifndef BACKDROP_DIST_FACTOR -// Some paths (those covering a large area) can generate a lot of backdrop tiles; BACKDROP_DIST_FACTOR defines how much -// additional threads should we spawn for parallel row processing. The additional threads does not participate in the -// earlier stages (calculating the tile counts) but does work in the final prefix sum stage which has a lot more -// parallelism. - -// This feature is opt-in: one variant is compiled with the following default, while the other variant is compiled with -// a larger BACKDROP_DIST_FACTOR, which is used on GPUs supporting a larger workgroup size to improve performance. -#define BACKDROP_DIST_FACTOR 1 -#endif - -layout(local_size_x = BACKDROP_WG, local_size_y = BACKDROP_DIST_FACTOR) in; - -layout(set = 0, binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -#include "tile.h" - -shared uint sh_row_count[BACKDROP_WG]; -shared Alloc sh_row_alloc[BACKDROP_WG]; -shared uint sh_row_width[BACKDROP_WG]; - -void main() { - if (!check_deps(STAGE_BINNING | STAGE_TILE_ALLOC | STAGE_PATH_COARSE)) { - return; - } - - uint th_ix = gl_LocalInvocationIndex; - uint element_ix = gl_GlobalInvocationID.x; - - // Work assignment: 1 thread : 1 path element - uint row_count = 0; - if (gl_LocalInvocationID.y == 0) { - if (element_ix < conf.n_elements) { - // Possible TODO: it's not necessary to process backdrops of stroked paths. - // We had logic for that but took it out because it used the Annotated struct. - PathRef path_ref = PathRef(conf.tile_alloc.offset + element_ix * Path_size); - Path path = Path_read(conf.tile_alloc, path_ref); - sh_row_width[th_ix] = path.bbox.z - path.bbox.x; - row_count = path.bbox.w - path.bbox.y; - // Paths that don't cross tile top edges don't have backdrops. - // Don't apply the optimization to paths that may cross the y = 0 - // top edge, but clipped to 1 row. - if (row_count == 1 && path.bbox.y > 0) { - // Note: this can probably be expanded to width = 2 as - // long as it doesn't cross the left edge. - row_count = 0; - } - Alloc path_alloc = new_alloc( - path.tiles.offset, (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, true); - sh_row_alloc[th_ix] = path_alloc; - } - sh_row_count[th_ix] = row_count; - } - - // Prefix sum of sh_row_count - for (uint i = 0; i < LG_BACKDROP_WG; i++) { - barrier(); - if (gl_LocalInvocationID.y == 0 && th_ix >= (1u << i)) { - row_count += sh_row_count[th_ix - (1u << i)]; - } - barrier(); - if (gl_LocalInvocationID.y == 0) { - sh_row_count[th_ix] = row_count; - } - } - barrier(); - // Work assignment: 1 thread : 1 path element row - uint total_rows = sh_row_count[BACKDROP_WG - 1]; - for (uint row = th_ix; row < total_rows; row += BACKDROP_WG * BACKDROP_DIST_FACTOR) { - // Binary search to find element - uint el_ix = 0; - for (uint i = 0; i < LG_BACKDROP_WG; i++) { - uint probe = el_ix + (uint(BACKDROP_WG / 2) >> i); - if (row >= sh_row_count[probe - 1]) { - el_ix = probe; - } - } - uint width = sh_row_width[el_ix]; - if (width > 0) { - // Process one row sequentially - // Read backdrop value per tile and prefix sum it - Alloc tiles_alloc = sh_row_alloc[el_ix]; - uint seq_ix = row - (el_ix > 0 ? sh_row_count[el_ix - 1] : 0); - uint tile_el_ix = (tiles_alloc.offset >> 2) + 1 + seq_ix * 2 * width; - uint sum = read_mem(tiles_alloc, tile_el_ix); - for (uint x = 1; x < width; x++) { - tile_el_ix += 2; - sum += read_mem(tiles_alloc, tile_el_ix); - write_mem(tiles_alloc, tile_el_ix, sum); - } - } - } -} diff --git a/piet-gpu/shader/bbox_clear.comp b/piet-gpu/shader/bbox_clear.comp deleted file mode 100644 index 52577f9..0000000 --- a/piet-gpu/shader/bbox_clear.comp +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Clear path bbox to prepare for atomic min/max. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_WG_SIZE 9 -#define WG_SIZE (1 << LG_WG_SIZE) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -void main() { - uint ix = gl_GlobalInvocationID.x; - if (ix < conf.n_path) { - uint out_ix = (conf.path_bbox_alloc.offset >> 2) + 6 * ix; - memory[out_ix] = 0xffff; - memory[out_ix + 1] = 0xffff; - memory[out_ix + 2] = 0; - memory[out_ix + 3] = 0; - } -} diff --git a/piet-gpu/shader/binning.comp b/piet-gpu/shader/binning.comp deleted file mode 100644 index 7485eee..0000000 --- a/piet-gpu/shader/binning.comp +++ /dev/null @@ -1,182 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The binning stage of the pipeline. -// -// Each workgroup processes N_TILE paths. -// Each thread processes one path and calculates a N_TILE_X x N_TILE_Y coverage mask -// based on the path bounding box to bin the paths. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -layout(local_size_x = N_TILE, local_size_y = 1) in; - -layout(set = 0, binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -#include "bins.h" -#include "drawtag.h" - -// scale factors useful for converting coordinates to bins -#define SX (1.0 / float(N_TILE_X * TILE_WIDTH_PX)) -#define SY (1.0 / float(N_TILE_Y * TILE_HEIGHT_PX)) - -// Constant not available in GLSL. Also consider uintBitsToFloat(0x7f800000) -#define INFINITY (1.0 / 0.0) - -// Note: cudaraster has N_TILE + 1 to cut down on bank conflicts. -// Bitmaps are sliced (256bit into 8 (N_SLICE) 32bit submaps) -shared uint bitmaps[N_SLICE][N_TILE]; -shared uint count[N_SLICE][N_TILE]; -shared uint sh_chunk_offset[N_TILE]; - -DrawMonoid load_draw_monoid(uint element_ix) { - uint base = (conf.drawmonoid_alloc.offset >> 2) + 4 * element_ix; - uint path_ix = memory[base]; - uint clip_ix = memory[base + 1]; - uint scene_offset = memory[base + 2]; - uint info_offset = memory[base + 3]; - return DrawMonoid(path_ix, clip_ix, scene_offset, info_offset); -} - -// Load bounding box computed by clip processing -vec4 load_clip_bbox(uint clip_ix) { - uint base = (conf.clip_bbox_alloc.offset >> 2) + 4 * clip_ix; - float x0 = uintBitsToFloat(memory[base]); - float y0 = uintBitsToFloat(memory[base + 1]); - float x1 = uintBitsToFloat(memory[base + 2]); - float y1 = uintBitsToFloat(memory[base + 3]); - vec4 bbox = vec4(x0, y0, x1, y1); - return bbox; -} - -vec4 bbox_intersect(vec4 a, vec4 b) { - return vec4(max(a.xy, b.xy), min(a.zw, b.zw)); -} - -// Load path's bbox from bbox (as written by pathseg). -vec4 load_path_bbox(uint path_ix) { - uint base = (conf.path_bbox_alloc.offset >> 2) + 6 * path_ix; - float bbox_l = float(memory[base]) - 32768.0; - float bbox_t = float(memory[base + 1]) - 32768.0; - float bbox_r = float(memory[base + 2]) - 32768.0; - float bbox_b = float(memory[base + 3]) - 32768.0; - vec4 bbox = vec4(bbox_l, bbox_t, bbox_r, bbox_b); - return bbox; -} - -void store_draw_bbox(uint draw_ix, vec4 bbox) { - uint base = (conf.draw_bbox_alloc.offset >> 2) + 4 * draw_ix; - memory[base] = floatBitsToUint(bbox.x); - memory[base + 1] = floatBitsToUint(bbox.y); - memory[base + 2] = floatBitsToUint(bbox.z); - memory[base + 3] = floatBitsToUint(bbox.w); -} - -void main() { - uint my_partition = gl_WorkGroupID.x; - - for (uint i = 0; i < N_SLICE; i++) { - bitmaps[i][gl_LocalInvocationID.x] = 0; - } - - // Read inputs and determine coverage of bins - uint element_ix = my_partition * N_TILE + gl_LocalInvocationID.x; - int x0 = 0, y0 = 0, x1 = 0, y1 = 0; - if (element_ix < conf.n_elements) { - DrawMonoid draw_monoid = load_draw_monoid(element_ix); - uint path_ix = draw_monoid.path_ix; - vec4 clip_bbox = vec4(-1e9, -1e9, 1e9, 1e9); - uint clip_ix = draw_monoid.clip_ix; - if (clip_ix > 0) { - clip_bbox = load_clip_bbox(clip_ix - 1); - } - // For clip elements, clip_bbox is the bbox of the clip path, intersected - // with enclosing clips. - // For other elements, it is the bbox of the enclosing clips. - - vec4 path_bbox = load_path_bbox(path_ix); - vec4 bbox = bbox_intersect(path_bbox, clip_bbox); - // Avoid negative-size bbox (is this necessary)? - bbox.zw = max(bbox.xy, bbox.zw); - // Store clip-intersected bbox for tile_alloc. - store_draw_bbox(element_ix, bbox); - x0 = int(floor(bbox.x * SX)); - y0 = int(floor(bbox.y * SY)); - x1 = int(ceil(bbox.z * SX)); - y1 = int(ceil(bbox.w * SY)); - } - - // At this point, we run an iterator over the coverage area, - // trying to keep divergence low. - // Right now, it's just a bbox, but we'll get finer with - // segments. - uint width_in_bins = (conf.width_in_tiles + N_TILE_X - 1) / N_TILE_X; - uint height_in_bins = (conf.height_in_tiles + N_TILE_Y - 1) / N_TILE_Y; - x0 = clamp(x0, 0, int(width_in_bins)); - x1 = clamp(x1, x0, int(width_in_bins)); - y0 = clamp(y0, 0, int(height_in_bins)); - y1 = clamp(y1, y0, int(height_in_bins)); - if (x0 == x1) - y1 = y0; - int x = x0, y = y0; - uint my_slice = gl_LocalInvocationID.x / 32; - uint my_mask = 1u << (gl_LocalInvocationID.x & 31); - while (y < y1) { - atomicOr(bitmaps[my_slice][y * width_in_bins + x], my_mask); - x++; - if (x == x1) { - x = x0; - y++; - } - } - - barrier(); - // Allocate output segments. - uint element_count = 0; - for (uint i = 0; i < N_SLICE; i++) { - element_count += bitCount(bitmaps[i][gl_LocalInvocationID.x]); - count[i][gl_LocalInvocationID.x] = element_count; - } - // element_count is number of elements covering bin for this invocation. - uint chunk_offset = 0; - if (element_count != 0) { - chunk_offset = malloc_stage(element_count * BinInstance_size, conf.mem_size, STAGE_BINNING); - sh_chunk_offset[gl_LocalInvocationID.x] = chunk_offset; - } - // Note: it might be more efficient for reading to do this in the - // other order (each bin is a contiguous sequence of partitions) - uint out_ix = (conf.bin_alloc.offset >> 2) + (my_partition * N_TILE + gl_LocalInvocationID.x) * 2; - write_mem(conf.bin_alloc, out_ix, element_count); - write_mem(conf.bin_alloc, out_ix + 1, chunk_offset); - - barrier(); - - // Use similar strategy as Laine & Karras paper; loop over bbox of bins - // touched by this element - x = x0; - y = y0; - while (y < y1) { - uint bin_ix = y * width_in_bins + x; - uint out_mask = bitmaps[my_slice][bin_ix]; - if ((out_mask & my_mask) != 0) { - uint idx = bitCount(out_mask & (my_mask - 1)); - if (my_slice > 0) { - idx += count[my_slice - 1][bin_ix]; - } - uint chunk_offset = sh_chunk_offset[bin_ix]; - if (chunk_offset != MALLOC_FAILED) { - memory[(chunk_offset >> 2) + idx] = element_ix; - } - } - x++; - if (x == x1) { - x = x0; - y++; - } - } -} diff --git a/piet-gpu/shader/bins.h b/piet-gpu/shader/bins.h deleted file mode 100644 index 853adab..0000000 --- a/piet-gpu/shader/bins.h +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct BinInstanceRef { - uint offset; -}; - -struct BinInstance { - uint element_ix; -}; - -#define BinInstance_size 4 - -BinInstanceRef BinInstance_index(BinInstanceRef ref, uint index) { - return BinInstanceRef(ref.offset + index * BinInstance_size); -} - -BinInstance BinInstance_read(Alloc a, BinInstanceRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - BinInstance s; - s.element_ix = raw0; - return s; -} - -void BinInstance_write(Alloc a, BinInstanceRef ref, BinInstance s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.element_ix); -} - diff --git a/piet-gpu/shader/blend.h b/piet-gpu/shader/blend.h deleted file mode 100644 index 7366006..0000000 --- a/piet-gpu/shader/blend.h +++ /dev/null @@ -1,291 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Mode definitions and functions for blending and composition. - -#define Blend_Normal 0 -#define Blend_Multiply 1 -#define Blend_Screen 2 -#define Blend_Overlay 3 -#define Blend_Darken 4 -#define Blend_Lighten 5 -#define Blend_ColorDodge 6 -#define Blend_ColorBurn 7 -#define Blend_HardLight 8 -#define Blend_SoftLight 9 -#define Blend_Difference 10 -#define Blend_Exclusion 11 -#define Blend_Hue 12 -#define Blend_Saturation 13 -#define Blend_Color 14 -#define Blend_Luminosity 15 -#define Blend_Clip 128 - -vec3 screen(vec3 cb, vec3 cs) { - return cb + cs - (cb * cs); -} - -float color_dodge(float cb, float cs) { - if (cb == 0.0) - return 0.0; - else if (cs == 1.0) - return 1.0; - else - return min(1.0, cb / (1.0 - cs)); -} - -float color_burn(float cb, float cs) { - if (cb == 1.0) - return 1.0; - else if (cs == 0.0) - return 0.0; - else - return 1.0 - min(1.0, (1.0 - cb) / cs); -} - -vec3 hard_light(vec3 cb, vec3 cs) { - return mix( - screen(cb, 2.0 * cs - 1.0), - cb * 2.0 * cs, - lessThanEqual(cs, vec3(0.5)) - ); -} - -vec3 soft_light(vec3 cb, vec3 cs) { - vec3 d = mix( - sqrt(cb), - ((16.0 * cb - vec3(12.0)) * cb + vec3(4.0)) * cb, - lessThanEqual(cb, vec3(0.25)) - ); - return mix( - cb + (2.0 * cs - vec3(1.0)) * (d - cb), - cb - (vec3(1.0) - 2.0 * cs) * cb * (vec3(1.0) - cb), - lessThanEqual(cs, vec3(0.5)) - ); -} - -float sat(vec3 c) { - return max(c.r, max(c.g, c.b)) - min(c.r, min(c.g, c.b)); -} - -float lum(vec3 c) { - vec3 f = vec3(0.3, 0.59, 0.11); - return dot(c, f); -} - -vec3 clip_color(vec3 c) { - float L = lum(c); - float n = min(c.r, min(c.g, c.b)); - float x = max(c.r, max(c.g, c.b)); - if (n < 0.0) - c = L + (((c - L) * L) / (L - n)); - if (x > 1.0) - c = L + (((c - L) * (1.0 - L)) / (x - L)); - return c; -} - -vec3 set_lum(vec3 c, float l) { - return clip_color(c + (l - lum(c))); -} - -void set_sat_inner(inout float cmin, inout float cmid, inout float cmax, float s) { - if (cmax > cmin) { - cmid = (((cmid - cmin) * s) / (cmax - cmin)); - cmax = s; - } else { - cmid = 0.0; - cmax = 0.0; - } - cmin = 0.0; -} - -vec3 set_sat(vec3 c, float s) { - if (c.r <= c.g) { - if (c.g <= c.b) { - set_sat_inner(c.r, c.g, c.b, s); - } else { - if (c.r <= c.b) { - set_sat_inner(c.r, c.b, c.g, s); - } else { - set_sat_inner(c.b, c.r, c.g, s); - } - } - } else { - if (c.r <= c.b) { - set_sat_inner(c.g, c.r, c.b, s); - } else { - if (c.g <= c.b) { - set_sat_inner(c.g, c.b, c.r, s); - } else { - set_sat_inner(c.b, c.g, c.r, s); - } - } - } - return c; -} - -// Blends two RGB colors together. The colors are assumed to be in sRGB -// color space, and this function does not take alpha into account. -vec3 mix_blend(vec3 cb, vec3 cs, uint mode) { - vec3 b = vec3(0.0); - switch (mode) { - case Blend_Multiply: - b = cb * cs; - break; - case Blend_Screen: - b = screen(cb, cs); - break; - case Blend_Overlay: - b = hard_light(cs, cb); - break; - case Blend_Darken: - b = min(cb, cs); - break; - case Blend_Lighten: - b = max(cb, cs); - break; - case Blend_ColorDodge: - b = vec3(color_dodge(cb.x, cs.x), color_dodge(cb.y, cs.y), color_dodge(cb.z, cs.z)); - break; - case Blend_ColorBurn: - b = vec3(color_burn(cb.x, cs.x), color_burn(cb.y, cs.y), color_burn(cb.z, cs.z)); - break; - case Blend_HardLight: - b = hard_light(cb, cs); - break; - case Blend_SoftLight: - b = soft_light(cb, cs); - break; - case Blend_Difference: - b = abs(cb - cs); - break; - case Blend_Exclusion: - b = cb + cs - 2 * cb * cs; - break; - case Blend_Hue: - b = set_lum(set_sat(cs, sat(cb)), lum(cb)); - break; - case Blend_Saturation: - b = set_lum(set_sat(cb, sat(cs)), lum(cb)); - break; - case Blend_Color: - b = set_lum(cs, lum(cb)); - break; - case Blend_Luminosity: - b = set_lum(cb, lum(cs)); - break; - default: - b = cs; - break; - } - return b; -} - -#define Comp_Clear 0 -#define Comp_Copy 1 -#define Comp_Dest 2 -#define Comp_SrcOver 3 -#define Comp_DestOver 4 -#define Comp_SrcIn 5 -#define Comp_DestIn 6 -#define Comp_SrcOut 7 -#define Comp_DestOut 8 -#define Comp_SrcAtop 9 -#define Comp_DestAtop 10 -#define Comp_Xor 11 -#define Comp_Plus 12 -#define Comp_PlusLighter 13 - -// Apply general compositing operation. -// Inputs are separated colors and alpha, output is premultiplied. -vec4 mix_compose(vec3 cb, vec3 cs, float ab, float as, uint mode) { - float fa = 0.0; - float fb = 0.0; - switch (mode) { - case Comp_Copy: - fa = 1.0; - fb = 0.0; - break; - case Comp_Dest: - fa = 0.0; - fb = 1.0; - break; - case Comp_SrcOver: - fa = 1.0; - fb = 1.0 - as; - break; - case Comp_DestOver: - fa = 1.0 - ab; - fb = 1.0; - break; - case Comp_SrcIn: - fa = ab; - fb = 0.0; - break; - case Comp_DestIn: - fa = 0.0; - fb = as; - break; - case Comp_SrcOut: - fa = 1.0 - ab; - fb = 0.0; - break; - case Comp_DestOut: - fa = 0.0; - fb = 1.0 - as; - break; - case Comp_SrcAtop: - fa = ab; - fb = 1.0 - as; - break; - case Comp_DestAtop: - fa = 1.0 - ab; - fb = as; - break; - case Comp_Xor: - fa = 1.0 - ab; - fb = 1.0 - as; - break; - case Comp_Plus: - fa = 1.0; - fb = 1.0; - break; - case Comp_PlusLighter: - return min(vec4(1.0), vec4(as * cs + ab * cb, as + ab)); - default: - break; - } - float as_fa = as * fa; - float ab_fb = ab * fb; - vec3 co = as_fa * cs + ab_fb * cb; - return vec4(co, as_fa + ab_fb); -} - -#define BlendComp_default (Blend_Normal << 8 | Comp_SrcOver) -#define BlendComp_clip (Blend_Clip << 8 | Comp_SrcOver) - -// This is added to alpha to prevent divide-by-zero -#define EPSILON 1e-15 - -// Apply blending and composition. Both input and output colors are -// premultiplied RGB. -vec4 mix_blend_compose(vec4 backdrop, vec4 src, uint mode) { - if ((mode & 0x7fff) == BlendComp_default) { - // Both normal+src_over blend and clip case - return backdrop * (1.0 - src.a) + src; - } - // Un-premultiply colors for blending - float inv_src_a = 1.0 / (src.a + EPSILON); - vec3 cs = src.rgb * inv_src_a; - float inv_backdrop_a = 1.0 / (backdrop.a + EPSILON); - vec3 cb = backdrop.rgb * inv_backdrop_a; - uint blend_mode = mode >> 8; - vec3 blended = mix_blend(cb, cs, blend_mode); - cs = mix(cs, blended, backdrop.a); - uint comp_mode = mode & 0xff; - if (comp_mode == Comp_SrcOver) { - vec3 co = mix(backdrop.rgb, cs, src.a); - return vec4(co, src.a + backdrop.a * (1 - src.a)); - } else { - return mix_compose(cb, cs, backdrop.a, src.a, comp_mode); - } -} diff --git a/piet-gpu/shader/build.ninja b/piet-gpu/shader/build.ninja deleted file mode 100644 index 079c0e7..0000000 --- a/piet-gpu/shader/build.ninja +++ /dev/null @@ -1,118 +0,0 @@ -# Build file for shaders. - -# You must have Vulkan tools in your path, or patch here. - -glslang_validator = glslangValidator -spirv_cross = spirv-cross -dxc = dxc - -# See https://github.com/KhronosGroup/SPIRV-Cross/issues/1248 for -# why we set this. -msl_flags = --msl-decoration-binding - -rule glsl - command = $glslang_validator $flags -V -o $out $in - -rule hlsl - command = $spirv_cross --hlsl --shader-model 60 $in --output $out - -rule dxil - command = $dxc -T cs_6_0 $in -Fo $out - -rule msl - command = $spirv_cross --msl $in --output $out $msl_flags - -build gen/binning.spv: glsl binning.comp | bins.h drawtag.h setup.h mem.h -build gen/binning.hlsl: hlsl gen/binning.spv -build gen/binning.dxil: dxil gen/binning.hlsl -build gen/binning.msl: msl gen/binning.spv - -build gen/tile_alloc.spv: glsl tile_alloc.comp | drawtag.h tile.h setup.h mem.h -build gen/tile_alloc.hlsl: hlsl gen/tile_alloc.spv -build gen/tile_alloc.dxil: dxil gen/tile_alloc.hlsl -build gen/tile_alloc.msl: msl gen/tile_alloc.spv - -build gen/path_coarse.spv: glsl path_coarse.comp | pathseg.h tile.h setup.h mem.h -build gen/path_coarse.hlsl: hlsl gen/path_coarse.spv -build gen/path_coarse.dxil: dxil gen/path_coarse.hlsl -build gen/path_coarse.msl: msl gen/path_coarse.spv - -build gen/backdrop.spv: glsl backdrop.comp | tile.h setup.h mem.h -build gen/backdrop.hlsl: hlsl gen/backdrop.spv -build gen/backdrop.dxil: dxil gen/backdrop.hlsl -build gen/backdrop.msl: msl gen/backdrop.spv - -build gen/backdrop_lg.spv: glsl backdrop.comp | tile.h setup.h mem.h - flags = -DBACKDROP_DIST_FACTOR=4 -build gen/backdrop_lg.hlsl: hlsl gen/backdrop_lg.spv -build gen/backdrop_lg.dxil: dxil gen/backdrop_lg.hlsl -build gen/backdrop_lg.msl: msl gen/backdrop_lg.spv - -build gen/coarse.spv: glsl coarse.comp | drawtag.h bins.h ptcl.h blend.h setup.h mem.h -build gen/coarse.hlsl: hlsl gen/coarse.spv -build gen/coarse.dxil: dxil gen/coarse.hlsl -build gen/coarse.msl: msl gen/coarse.spv - -build gen/kernel4.spv: glsl kernel4.comp | blend.h ptcl.h setup.h mem.h -build gen/kernel4.hlsl: hlsl gen/kernel4.spv -build gen/kernel4.dxil: dxil gen/kernel4.hlsl -build gen/kernel4.msl: msl gen/kernel4.spv - -build gen/kernel4_gray.spv: glsl kernel4.comp | blend.h ptcl.h setup.h mem.h - flags = -DGRAY -build gen/kernel4_gray.hlsl: hlsl gen/kernel4_gray.spv -build gen/kernel4_gray.dxil: dxil gen/kernel4_gray.hlsl -build gen/kernel4_gray.msl: msl gen/kernel4_gray.spv - -# New element pipeline follows - -build gen/pathtag_reduce.spv: glsl pathtag_reduce.comp | pathtag.h setup.h mem.h -build gen/pathtag_reduce.hlsl: hlsl gen/pathtag_reduce.spv -build gen/pathtag_reduce.dxil: dxil gen/pathtag_reduce.hlsl -build gen/pathtag_reduce.msl: msl gen/pathtag_reduce.spv - -build gen/pathtag_root.spv: glsl pathtag_scan.comp | pathtag.h setup.h - flags = -DROOT -build gen/pathtag_root.hlsl: hlsl gen/pathtag_root.spv -build gen/pathtag_root.dxil: dxil gen/pathtag_root.hlsl -build gen/pathtag_root.msl: msl gen/pathtag_root.spv - -build gen/bbox_clear.spv: glsl bbox_clear.comp | setup.h mem.h -build gen/bbox_clear.hlsl: hlsl gen/bbox_clear.spv -build gen/bbox_clear.dxil: dxil gen/bbox_clear.hlsl -build gen/bbox_clear.msl: msl gen/bbox_clear.spv - -build gen/pathseg.spv: glsl pathseg.comp | scene.h tile.h pathseg.h pathtag.h setup.h mem.h -build gen/pathseg.hlsl: hlsl gen/pathseg.spv -build gen/pathseg.dxil: dxil gen/pathseg.hlsl -build gen/pathseg.msl: msl gen/pathseg.spv - -build gen/draw_reduce.spv: glsl draw_reduce.comp | scene.h drawtag.h setup.h mem.h -build gen/draw_reduce.hlsl: hlsl gen/draw_reduce.spv -build gen/draw_reduce.dxil: dxil gen/draw_reduce.hlsl -build gen/draw_reduce.msl: msl gen/draw_reduce.spv - -build gen/draw_root.spv: glsl draw_scan.comp | drawtag.h setup.h - flags = -DROOT -build gen/draw_root.hlsl: hlsl gen/draw_root.spv -build gen/draw_root.dxil: dxil gen/draw_root.hlsl -build gen/draw_root.msl: msl gen/draw_root.spv - -build gen/draw_leaf.spv: glsl draw_leaf.comp | blend.h scene.h drawtag.h setup.h mem.h -build gen/draw_leaf.hlsl: hlsl gen/draw_leaf.spv -build gen/draw_leaf.dxil: dxil gen/draw_leaf.hlsl -build gen/draw_leaf.msl: msl gen/draw_leaf.spv - -build gen/clip_reduce.spv: glsl clip_reduce.comp | mem.h setup.h -build gen/clip_reduce.hlsl: hlsl gen/clip_reduce.spv -build gen/clip_reduce.dxil: dxil gen/clip_reduce.hlsl -build gen/clip_reduce.msl: msl gen/clip_reduce.spv - -build gen/clip_leaf.spv: glsl clip_leaf.comp | mem.h setup.h -build gen/clip_leaf.hlsl: hlsl gen/clip_leaf.spv -build gen/clip_leaf.dxil: dxil gen/clip_leaf.hlsl -build gen/clip_leaf.msl: msl gen/clip_leaf.spv - -build spv: phony gen/backdrop_lg.spv gen/backdrop.spv gen/bbox_clear.spv gen/binning.spv gen/clip_leaf.spv gen/clip_reduce.spv gen/coarse.spv gen/draw_leaf.spv gen/draw_reduce.spv gen/draw_root.spv gen/kernel4.spv gen/kernel4_gray.spv gen/path_coarse.spv gen/pathseg.spv gen/pathtag_reduce.spv gen/pathtag_root.spv gen/tile_alloc.spv -build dxil: phony gen/backdrop.hlsl gen/backdrop_lg.hlsl gen/bbox_clear.hlsl gen/binning.hlsl gen/clip_leaf.hlsl gen/clip_reduce.hlsl gen/coarse.hlsl gen/draw_leaf.hlsl gen/draw_reduce.hlsl gen/draw_root.hlsl gen/kernel4.hlsl gen/kernel4_gray.hlsl gen/path_coarse.hlsl gen/pathseg.hlsl gen/pathtag_reduce.hlsl gen/pathtag_root.hlsl gen/tile_alloc.hlsl -build msl: phony gen/backdrop_lg.msl gen/backdrop.msl gen/bbox_clear.msl gen/binning.msl gen/clip_leaf.msl gen/clip_reduce.msl gen/coarse.msl gen/draw_leaf.msl gen/draw_reduce.msl gen/draw_root.msl gen/kernel4.msl gen/kernel4_gray.msl gen/path_coarse.msl gen/pathseg.msl gen/pathtag_reduce.msl gen/pathtag_root.msl gen/tile_alloc.msl diff --git a/piet-gpu/shader/clip_leaf.comp b/piet-gpu/shader/clip_leaf.comp deleted file mode 100644 index 5353b0b..0000000 --- a/piet-gpu/shader/clip_leaf.comp +++ /dev/null @@ -1,285 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The second dispatch of clip stack processing. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE WG_SIZE - -layout(local_size_x = WG_SIZE) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -// Some of this is cut'n'paste duplication with the reduce pass, and -// arguably should be moved to a common .h file. -// The bicyclic monoid - -struct ClipEl { - // index of parent node - uint parent_ix; - // bounding box - vec4 bbox; -}; - -struct Bic { - uint a; - uint b; -}; - -Bic bic_combine(Bic x, Bic y) { - uint m = min(x.b, y.a); - return Bic(x.a + y.a - m, x.b + y.b - m); -} - -// Load path's bbox from bbox (as written by pathseg). -vec4 load_path_bbox(uint path_ix) { - uint base = (conf.path_bbox_alloc.offset >> 2) + 6 * path_ix; - float bbox_l = float(memory[base]) - 32768.0; - float bbox_t = float(memory[base + 1]) - 32768.0; - float bbox_r = float(memory[base + 2]) - 32768.0; - float bbox_b = float(memory[base + 3]) - 32768.0; - vec4 bbox = vec4(bbox_l, bbox_t, bbox_r, bbox_b); - return bbox; -} - -vec4 bbox_intersect(vec4 a, vec4 b) { - return vec4(max(a.xy, b.xy), min(a.zw, b.zw)); -} - -shared Bic sh_bic[WG_SIZE * 2 - 2]; -shared uint sh_stack[PARTITION_SIZE]; -shared vec4 sh_stack_bbox[PARTITION_SIZE]; -shared uint sh_link[PARTITION_SIZE]; -shared vec4 sh_bbox[PARTITION_SIZE]; - -// This is adapted directly from the stack monoid impl. -// Return value is reference within partition if >= 0, -// otherwise reference to stack. -uint search_link(inout Bic bic) { - uint ix = gl_LocalInvocationID.x; - uint j = 0; - while (j < LG_WG_SIZE) { - uint base = 2 * WG_SIZE - (2u << (LG_WG_SIZE - j)); - if (((ix >> j) & 1) != 0) { - Bic test = bic_combine(sh_bic[base + (ix >> j) - 1], bic); - if (test.b > 0) { - break; - } - bic = test; - ix -= 1u << j; - } - j++; - } - if (ix > 0) { - while (j > 0) { - j--; - uint base = 2 * WG_SIZE - (2u << (LG_WG_SIZE - j)); - Bic test = bic_combine(sh_bic[base + (ix >> j) - 1], bic); - if (test.b == 0) { - bic = test; - ix -= 1u << j; - } - } - } - // ix is the smallest value such that reduce(ix..th).b == 0 - if (ix > 0) { - return ix - 1; - } else { - return ~0u - bic.a; - } -} - -Bic load_bic(uint ix) { - uint base = (conf.clip_bic_alloc.offset >> 2) + 2 * ix; - return Bic(memory[base], memory[base + 1]); -} - -ClipEl load_clip_el(uint ix) { - uint base = (conf.clip_stack_alloc.offset >> 2) + 5 * ix; - uint parent_ix = memory[base]; - float x0 = uintBitsToFloat(memory[base + 1]); - float y0 = uintBitsToFloat(memory[base + 2]); - float x1 = uintBitsToFloat(memory[base + 3]); - float y1 = uintBitsToFloat(memory[base + 4]); - vec4 bbox = vec4(x0, y0, x1, y1); - return ClipEl(parent_ix, bbox); -} - -uint load_path_ix(uint ix) { - // This is one approach to a partial final block. Another would be - // to do a memset to the padding in the command queue. - if (ix < conf.n_clip) { - return memory[(conf.clip_alloc.offset >> 2) + ix]; - } else { - // EndClip tags don't implicate further loads. - return 0x80000000; - } -} - -void store_clip_bbox(uint ix, vec4 bbox) { - uint base = (conf.clip_bbox_alloc.offset >> 2) + 4 * ix; - memory[base] = floatBitsToUint(bbox.x); - memory[base + 1] = floatBitsToUint(bbox.y); - memory[base + 2] = floatBitsToUint(bbox.z); - memory[base + 3] = floatBitsToUint(bbox.w); -} - -void main() { - // materialize stack up to the start of this partition. This - // is based on the pure stack monoid, but with two additions. - - // First, (this only matters if the stack goes deeper than the - // partition size, which might be unlikely in practice), the - // topmost stack element from each partition is picked, then an - // exclusive scan of those. Also note that if this is skipped, - // a scan is not needed in the reduce stage. - - // Second, after the stream compaction, do a scan of the retrieved - // bbox values. - uint th = gl_LocalInvocationID.x; - Bic bic = Bic(0, 0); - if (th < gl_WorkGroupID.x) { - bic = load_bic(th); - } - sh_bic[th] = bic; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (th + (1u << i) < WG_SIZE) { - Bic other = sh_bic[th + (1u << i)]; - bic = bic_combine(bic, other); - } - barrier(); - sh_bic[th] = bic; - } - barrier(); - uint stack_size = sh_bic[0].b; - - // TODO: do bbox scan here (to unlock greater stack depth) - - // binary search in stack - uint sp = PARTITION_SIZE - 1 - th; - uint ix = 0; - for (uint i = 0; i < LG_WG_SIZE; i++) { - uint probe = ix + (uint(PARTITION_SIZE / 2) >> i); - if (sp < sh_bic[probe].b) { - ix = probe; - } - } - // ix is largest value such that sp < sh_bic[ix].b (if any) - uint b = sh_bic[ix].b; - vec4 bbox = vec4(-1e9, -1e9, 1e9, 1e9); - if (sp < b) { - // maybe store the index here for future use? - ClipEl el = load_clip_el(ix * PARTITION_SIZE + b - sp - 1); - sh_stack[th] = el.parent_ix; - bbox = el.bbox; - // other element values here? - } - - // forward scan of bbox values of prefix stack - for (uint i = 0; i < LG_WG_SIZE; i++) { - sh_stack_bbox[th] = bbox; - barrier(); - if (th >= (1u << i)) { - bbox = bbox_intersect(sh_stack_bbox[th - (1u << i)], bbox); - } - barrier(); - } - sh_stack_bbox[th] = bbox; - - // Read input and compute bicyclic semigroup binary tree - uint inp = load_path_ix(gl_GlobalInvocationID.x); - bool is_push = int(inp) >= 0; - bic = Bic(1 - uint(is_push), uint(is_push)); - sh_bic[th] = bic; - if (is_push) { - bbox = load_path_bbox(inp); - } else { - bbox = vec4(-1e9, -1e9, 1e9, 1e9); - } - uint inbase = 0; - for (uint i = 0; i < LG_WG_SIZE - 1; i++) { - uint outbase = 2 * WG_SIZE - (1u << (LG_WG_SIZE - i)); - barrier(); - if (th < (1u << (LG_WG_SIZE - 1 - i))) { - sh_bic[outbase + th] = bic_combine(sh_bic[inbase + th * 2], sh_bic[inbase + th * 2 + 1]); - } - inbase = outbase; - } - barrier(); - // Search for predecessor node - bic = Bic(0, 0); - uint link = search_link(bic); - // we use N_SEQ > 1 convention here: - // link >= 0 is index within partition - // link < 0 is reference to stack - - // We want grandparent bbox for pop nodes, so follow those links. - sh_link[th] = link; - barrier(); - uint grandparent; - if (int(link) >= 0) { - grandparent = sh_link[link]; - } else { - grandparent = link - 1; - } - - // Resolve parent - uint parent; - if (int(link) >= 0) { - parent = gl_WorkGroupID.x * PARTITION_SIZE + link; - } else if (int(link + stack_size) >= 0) { - parent = sh_stack[PARTITION_SIZE + link]; - } else { - parent = ~0u; - } - - // bbox scan along parent links - for (uint i = 0; i < LG_WG_SIZE; i++) { - // sh_link was already stored for first iteration - if (i != 0) { - sh_link[th] = link; - } - sh_bbox[th] = bbox; - barrier(); - if (int(link) >= 0) { - bbox = bbox_intersect(sh_bbox[link], bbox); - link = sh_link[link]; - } - barrier(); - } - if (int(link + stack_size) >= 0) { - bbox = bbox_intersect(sh_stack_bbox[PARTITION_SIZE + link], bbox); - } - // At this point, bbox is the reduction of bounding boxes along the tree. - sh_bbox[th] = bbox; - barrier(); - - uint path_ix = inp; - if (!is_push && gl_GlobalInvocationID.x < conf.n_clip) { - // Is this load expensive? If so, it's loaded earlier for in-partition - // and is in the ClipEl for cross-partition. - // If not, can probably get rid of it in the stack intermediate buf. - path_ix = load_path_ix(parent); - uint drawmonoid_out_base = (conf.drawmonoid_alloc.offset >> 2) + 4 * ~inp; - // Fix up drawmonoid so path_ix at EndClip matches BeginClip - memory[drawmonoid_out_base] = path_ix; - - if (int(grandparent) >= 0) { - bbox = sh_bbox[grandparent]; - } else if (int(grandparent + stack_size) >= 0) { - bbox = sh_stack_bbox[PARTITION_SIZE + grandparent]; - } else { - bbox = vec4(-1e9, -1e9, 1e9, 1e9); - } - } - store_clip_bbox(gl_GlobalInvocationID.x, bbox); -} diff --git a/piet-gpu/shader/clip_reduce.comp b/piet-gpu/shader/clip_reduce.comp deleted file mode 100644 index 8b247ab..0000000 --- a/piet-gpu/shader/clip_reduce.comp +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The reduce pass for clip stack processing. - -// The primary input is a sequence of path ids representing paths to -// push, with a special value of ~0 to represent pop. - -// For each path, the bounding box is found in the anno stream -// (anno_alloc), though this may change. - -// Output is a stack monoid reduction for the partition. The Bic -// is stored in the BicBuf, and the stack slice in StackBuf. - -// Note: for this shader, only pushes are represented in the stack -// monoid reduction output, so we don't have to worry about the -// interpretation of pops. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE WG_SIZE - -layout(local_size_x = WG_SIZE) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -// The intermediate state for clip processing. -struct ClipEl { - // index of parent node - uint parent_ix; - // bounding box - vec4 bbox; -}; - -// The bicyclic monoid -struct Bic { - uint a; - uint b; -}; - -Bic bic_combine(Bic x, Bic y) { - uint m = min(x.b, y.a); - return Bic(x.a + y.a - m, x.b + y.b - m); -} - -shared Bic sh_bic[WG_SIZE]; -shared uint sh_parent[WG_SIZE]; -shared uint sh_path_ix[WG_SIZE]; -shared vec4 sh_bbox[WG_SIZE]; - -// Load path's bbox from bbox (as written by pathseg). -vec4 load_path_bbox(uint path_ix) { - uint base = (conf.path_bbox_alloc.offset >> 2) + 6 * path_ix; - float bbox_l = float(memory[base]) - 32768.0; - float bbox_t = float(memory[base + 1]) - 32768.0; - float bbox_r = float(memory[base + 2]) - 32768.0; - float bbox_b = float(memory[base + 3]) - 32768.0; - vec4 bbox = vec4(bbox_l, bbox_t, bbox_r, bbox_b); - return bbox; -} - -vec4 bbox_intersect(vec4 a, vec4 b) { - return vec4(max(a.xy, b.xy), min(a.zw, b.zw)); -} - -void store_bic(uint ix, Bic bic) { - uint base = (conf.clip_bic_alloc.offset >> 2) + 2 * ix; - memory[base] = bic.a; - memory[base + 1] = bic.b; -} - -void store_clip_el(uint ix, ClipEl el) { - uint base = (conf.clip_stack_alloc.offset >> 2) + 5 * ix; - memory[base] = el.parent_ix; - memory[base + 1] = floatBitsToUint(el.bbox.x); - memory[base + 2] = floatBitsToUint(el.bbox.y); - memory[base + 3] = floatBitsToUint(el.bbox.z); - memory[base + 4] = floatBitsToUint(el.bbox.w); -} - -void main() { - uint th = gl_LocalInvocationID.x; - uint inp = memory[(conf.clip_alloc.offset >> 2) + gl_GlobalInvocationID.x]; - bool is_push = int(inp) >= 0; - // reverse scan of bicyclic semigroup - Bic bic = Bic(1 - uint(is_push), uint(is_push)); - sh_bic[gl_LocalInvocationID.x] = bic; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (th + (1u << i) < WG_SIZE) { - Bic other = sh_bic[gl_LocalInvocationID.x + (1u << i)]; - bic = bic_combine(bic, other); - } - barrier(); - sh_bic[th] = bic; - } - if (th == 0) { - store_bic(gl_WorkGroupID.x, bic); - } - barrier(); - uint size = sh_bic[0].b; - bic = Bic(0, 0); - if (th + 1 < WG_SIZE) { - bic = sh_bic[th + 1]; - } - if (is_push && bic.a == 0) { - uint local_ix = size - bic.b - 1; - sh_parent[local_ix] = th; - sh_path_ix[local_ix] = inp; - } - barrier(); - // Do forward scan of bounding box intersection - vec4 bbox; - uint path_ix; - if (th < size) { - path_ix = sh_path_ix[th]; - bbox = load_path_bbox(path_ix); - } - // Not necessary if depth is bounded by wg size -#if 0 - for (uint i = 0; i < LG_WG_SIZE; i++) { - // We gate so we never access uninit data, but it might - // be more efficient to avoid the conditionals. - if (th < size) { - sh_bbox[th] = bbox; - } - barrier(); - if (th < size && th >= (1u << i)) { - bbox = bbox_intersect(sh_bbox[th - (1u << i)], bbox); - } - barrier(); - } -#endif - if (th < size) { - uint parent_ix = sh_parent[th] + gl_WorkGroupID.x * PARTITION_SIZE; - ClipEl el = ClipEl(parent_ix, bbox); - store_clip_el(gl_GlobalInvocationID.x, el); - } -} diff --git a/piet-gpu/shader/coarse.comp b/piet-gpu/shader/coarse.comp deleted file mode 100644 index edc61b2..0000000 --- a/piet-gpu/shader/coarse.comp +++ /dev/null @@ -1,480 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The coarse rasterizer stage of the pipeline. -// -// As input we have the ordered partitions of paths from the binning phase and -// the annotated tile list of segments and backdrop per path. -// -// Each workgroup operating on one bin by stream compacting -// the elements corresponding to the bin. -// -// As output we have an ordered command stream per tile. Every tile from a path (backdrop + segment list) will be -// encoded. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -layout(local_size_x = N_TILE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#include "drawtag.h" -#include "bins.h" -#include "tile.h" -#include "ptcl.h" -#include "blend.h" - -#define LG_N_PART_READ (7 + LG_WG_FACTOR) -#define N_PART_READ (1 << LG_N_PART_READ) - -shared uint sh_elements[N_TILE]; - -// Number of elements in the partition; prefix sum. -shared uint sh_part_count[N_PART_READ]; -shared Alloc sh_part_elements[N_PART_READ]; - -shared uint sh_bitmaps[N_SLICE][N_TILE]; - -shared uint sh_tile_count[N_TILE]; -// The width of the tile rect for the element, intersected with this bin -shared uint sh_tile_width[N_TILE]; -shared uint sh_tile_x0[N_TILE]; -shared uint sh_tile_y0[N_TILE]; - -// These are set up so base + tile_y * stride + tile_x points to a Tile. -shared uint sh_tile_base[N_TILE]; -shared uint sh_tile_stride[N_TILE]; - -#ifdef MEM_DEBUG -// Store allocs only when MEM_DEBUG to save shared memory traffic. -shared Alloc sh_tile_alloc[N_TILE]; - -void write_tile_alloc(uint el_ix, Alloc a) { - sh_tile_alloc[el_ix] = a; -} - -Alloc read_tile_alloc(uint el_ix, bool mem_ok) { - return sh_tile_alloc[el_ix]; -} -#else -void write_tile_alloc(uint el_ix, Alloc a) { - // No-op -} - -Alloc read_tile_alloc(uint el_ix, bool mem_ok) { - // All memory. - return new_alloc(0, conf.mem_size, mem_ok); -} -#endif - -// The maximum number of commands per annotated element. -#define ANNO_COMMANDS 2 - -// All writes to the output must be gated by mem_ok. -bool mem_ok = true; - -// Perhaps cmd allocations should be a global? This is a style question. -void alloc_cmd(inout Alloc cmd_alloc, inout CmdRef cmd_ref, inout uint cmd_limit) { - if (cmd_ref.offset < cmd_limit) { - return; - } - uint new_cmd = malloc_stage(PTCL_INITIAL_ALLOC, conf.mem_size, STAGE_COARSE); - if (new_cmd == MALLOC_FAILED) { - mem_ok = false; - } - if (mem_ok) { - CmdJump jump = CmdJump(new_cmd); - Cmd_Jump_write(cmd_alloc, cmd_ref, jump); - } - cmd_alloc = new_alloc(new_cmd, PTCL_INITIAL_ALLOC, true); - cmd_ref = CmdRef(new_cmd); - // Reserve space for the maximum number of commands and a potential jump. - cmd_limit = new_cmd + PTCL_INITIAL_ALLOC - (ANNO_COMMANDS + 1) * Cmd_size; -} - -void write_fill(Alloc alloc, inout CmdRef cmd_ref, Tile tile, float linewidth) { - if (linewidth < 0.0) { - if (tile.tile.offset != 0) { - CmdFill cmd_fill = CmdFill(tile.tile.offset, tile.backdrop); - if (mem_ok) { - Cmd_Fill_write(alloc, cmd_ref, cmd_fill); - } - cmd_ref.offset += 4 + CmdFill_size; - } else { - if (mem_ok) { - Cmd_Solid_write(alloc, cmd_ref); - } - cmd_ref.offset += 4; - } - } else { - CmdStroke cmd_stroke = CmdStroke(tile.tile.offset, 0.5 * linewidth); - if (mem_ok) { - Cmd_Stroke_write(alloc, cmd_ref, cmd_stroke); - } - cmd_ref.offset += 4 + CmdStroke_size; - } -} - -void main() { - if (!check_deps(STAGE_BINNING | STAGE_TILE_ALLOC | STAGE_PATH_COARSE)) { - return; - } - // Could use either linear or 2d layouts for both dispatch and - // invocations within the workgroup. We'll use variables to abstract. - uint width_in_bins = (conf.width_in_tiles + N_TILE_X - 1) / N_TILE_X; - uint bin_ix = width_in_bins * gl_WorkGroupID.y + gl_WorkGroupID.x; - uint partition_ix = 0; - uint n_partitions = (conf.n_elements + N_TILE - 1) / N_TILE; - uint th_ix = gl_LocalInvocationID.x; - - // Coordinates of top left of bin, in tiles. - uint bin_tile_x = N_TILE_X * gl_WorkGroupID.x; - uint bin_tile_y = N_TILE_Y * gl_WorkGroupID.y; - - // Per-tile state - uint tile_x = gl_LocalInvocationID.x % N_TILE_X; - uint tile_y = gl_LocalInvocationID.x / N_TILE_X; - uint this_tile_ix = (bin_tile_y + tile_y) * conf.width_in_tiles + bin_tile_x + tile_x; - Alloc cmd_alloc = slice_mem(conf.ptcl_alloc, this_tile_ix * PTCL_INITIAL_ALLOC, PTCL_INITIAL_ALLOC); - CmdRef cmd_ref = CmdRef(cmd_alloc.offset); - // Reserve space for the maximum number of commands and a potential jump. - uint cmd_limit = cmd_ref.offset + PTCL_INITIAL_ALLOC - (ANNO_COMMANDS + 1) * Cmd_size; - // The nesting depth of the clip stack - uint clip_depth = 0; - // State for the "clip zero" optimization. If it's nonzero, then we are - // currently in a clip for which the entire tile has an alpha of zero, and - // the value is the depth after the "begin clip" of that element. - uint clip_zero_depth = 0; - - // I'm sure we can figure out how to do this with at least one fewer register... - // Items up to rd_ix have been read from sh_elements - uint rd_ix = 0; - // Items up to wr_ix have been written into sh_elements - uint wr_ix = 0; - // Items between part_start_ix and ready_ix are ready to be transferred from sh_part_elements - uint part_start_ix = 0; - uint ready_ix = 0; - - Alloc scratch_alloc = slice_mem(cmd_alloc, 0, Alloc_size); - cmd_ref.offset += 4; - // Accounting for allocation of blend memory - uint render_blend_depth = 0; - uint max_blend_depth = 0; - - uint drawmonoid_start = conf.drawmonoid_alloc.offset >> 2; - uint drawtag_start = conf.drawtag_offset >> 2; - uint drawdata_start = conf.drawdata_offset >> 2; - uint drawinfo_start = conf.drawinfo_alloc.offset >> 2; - while (true) { - for (uint i = 0; i < N_SLICE; i++) { - sh_bitmaps[i][th_ix] = 0; - } - - // parallel read of input partitions - do { - if (ready_ix == wr_ix && partition_ix < n_partitions) { - part_start_ix = ready_ix; - uint count = 0; - if (th_ix < N_PART_READ && partition_ix + th_ix < n_partitions) { - uint in_ix = (conf.bin_alloc.offset >> 2) + ((partition_ix + th_ix) * N_TILE + bin_ix) * 2; - count = read_mem(conf.bin_alloc, in_ix); - uint offset = read_mem(conf.bin_alloc, in_ix + 1); - sh_part_elements[th_ix] = new_alloc(offset, count * BinInstance_size, true); - } - // prefix sum of counts - for (uint i = 0; i < LG_N_PART_READ; i++) { - if (th_ix < N_PART_READ) { - sh_part_count[th_ix] = count; - } - barrier(); - if (th_ix < N_PART_READ) { - if (th_ix >= (1u << i)) { - count += sh_part_count[th_ix - (1u << i)]; - } - } - barrier(); - } - if (th_ix < N_PART_READ) { - sh_part_count[th_ix] = part_start_ix + count; - } - barrier(); - ready_ix = sh_part_count[N_PART_READ - 1]; - partition_ix += N_PART_READ; - } - // use binary search to find element to read - uint ix = rd_ix + th_ix; - if (ix >= wr_ix && ix < ready_ix) { - uint part_ix = 0; - for (uint i = 0; i < LG_N_PART_READ; i++) { - uint probe = part_ix + (uint(N_PART_READ / 2) >> i); - if (ix >= sh_part_count[probe - 1]) { - part_ix = probe; - } - } - ix -= part_ix > 0 ? sh_part_count[part_ix - 1] : part_start_ix; - Alloc bin_alloc = sh_part_elements[part_ix]; - BinInstanceRef inst_ref = BinInstanceRef(bin_alloc.offset); - BinInstance inst = BinInstance_read(bin_alloc, BinInstance_index(inst_ref, ix)); - sh_elements[th_ix] = inst.element_ix; - } - barrier(); - - wr_ix = min(rd_ix + N_TILE, ready_ix); - } while (wr_ix - rd_ix < N_TILE && (wr_ix < ready_ix || partition_ix < n_partitions)); - - // We've done the merge and filled the buffer. - - // Read one element, compute coverage. - uint tag = Drawtag_Nop; - uint element_ix; - if (th_ix + rd_ix < wr_ix) { - element_ix = sh_elements[th_ix]; - tag = scene[drawtag_start + element_ix]; - } - - // Bounding box of element in pixel coordinates. - uint tile_count; - switch (tag) { - case Drawtag_FillColor: - case Drawtag_FillImage: - case Drawtag_FillLinGradient: - case Drawtag_FillRadGradient: - case Drawtag_BeginClip: - case Drawtag_EndClip: - uint drawmonoid_base = drawmonoid_start + 4 * element_ix; - uint path_ix = memory[drawmonoid_base]; - Path path = Path_read(conf.tile_alloc, PathRef(conf.tile_alloc.offset + path_ix * Path_size)); - uint stride = path.bbox.z - path.bbox.x; - sh_tile_stride[th_ix] = stride; - int dx = int(path.bbox.x) - int(bin_tile_x); - int dy = int(path.bbox.y) - int(bin_tile_y); - int x0 = clamp(dx, 0, N_TILE_X); - int y0 = clamp(dy, 0, N_TILE_Y); - int x1 = clamp(int(path.bbox.z) - int(bin_tile_x), 0, N_TILE_X); - int y1 = clamp(int(path.bbox.w) - int(bin_tile_y), 0, N_TILE_Y); - sh_tile_width[th_ix] = uint(x1 - x0); - sh_tile_x0[th_ix] = x0; - sh_tile_y0[th_ix] = y0; - tile_count = uint(x1 - x0) * uint(y1 - y0); - // base relative to bin - uint base = path.tiles.offset - uint(dy * stride + dx) * Tile_size; - sh_tile_base[th_ix] = base; - Alloc path_alloc = new_alloc(path.tiles.offset, - (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, true); - write_tile_alloc(th_ix, path_alloc); - break; - default: - tile_count = 0; - break; - } - - // Prefix sum of sh_tile_count - sh_tile_count[th_ix] = tile_count; - for (uint i = 0; i < LG_N_TILE; i++) { - barrier(); - if (th_ix >= (1u << i)) { - tile_count += sh_tile_count[th_ix - (1u << i)]; - } - barrier(); - sh_tile_count[th_ix] = tile_count; - } - barrier(); - uint total_tile_count = sh_tile_count[N_TILE - 1]; - for (uint ix = th_ix; ix < total_tile_count; ix += N_TILE) { - // Binary search to find element - uint el_ix = 0; - for (uint i = 0; i < LG_N_TILE; i++) { - uint probe = el_ix + (uint(N_TILE / 2) >> i); - if (ix >= sh_tile_count[probe - 1]) { - el_ix = probe; - } - } - uint element_ix = sh_elements[el_ix]; - uint tag = scene[drawtag_start + element_ix]; - uint seq_ix = ix - (el_ix > 0 ? sh_tile_count[el_ix - 1] : 0); - uint width = sh_tile_width[el_ix]; - uint x = sh_tile_x0[el_ix] + seq_ix % width; - uint y = sh_tile_y0[el_ix] + seq_ix / width; - bool include_tile = false; - Tile tile = Tile_read(read_tile_alloc(el_ix, true), - TileRef(sh_tile_base[el_ix] + (sh_tile_stride[el_ix] * y + x) * Tile_size)); - bool is_clip = (tag & 1) != 0; - // Always include the tile if it contains a path segment. - // For draws, include the tile if it is solid. - // For clips, include the tile if it is empty - this way, logic - // below will suppress the drawing of inner elements. - // For blends, include the tile if - // (blend_mode, composition_mode) != (Normal, SrcOver) - bool is_blend = false; - if (is_clip) { - uint drawmonoid_base = drawmonoid_start + 4 * element_ix; - uint scene_offset = memory[drawmonoid_base + 2]; - uint dd = drawdata_start + (scene_offset >> 2); - uint blend = scene[dd]; - is_blend = (blend != BlendComp_clip); - } - include_tile = tile.tile.offset != 0 || (tile.backdrop == 0) == is_clip - || is_blend; - if (include_tile) { - uint el_slice = el_ix / 32; - uint el_mask = 1u << (el_ix & 31); - atomicOr(sh_bitmaps[el_slice][y * N_TILE_X + x], el_mask); - } - } - - barrier(); - - // Output draw objects for this tile. The thread does a sequential walk - // through the draw objects. - uint slice_ix = 0; - uint bitmap = sh_bitmaps[0][th_ix]; - while (true) { - if (bitmap == 0) { - slice_ix++; - if (slice_ix == N_SLICE) { - break; - } - bitmap = sh_bitmaps[slice_ix][th_ix]; - if (bitmap == 0) { - continue; - } - } - uint element_ref_ix = slice_ix * 32 + findLSB(bitmap); - uint element_ix = sh_elements[element_ref_ix]; - - // Clear LSB - bitmap &= bitmap - 1; - - uint drawtag = scene[drawtag_start + element_ix]; - - if (clip_zero_depth == 0) { - Tile tile = Tile_read(read_tile_alloc(element_ref_ix, true), - TileRef(sh_tile_base[element_ref_ix] + - (sh_tile_stride[element_ref_ix] * tile_y + tile_x) * Tile_size)); - uint drawmonoid_base = drawmonoid_start + 4 * element_ix; - uint scene_offset = memory[drawmonoid_base + 2]; - uint info_offset = memory[drawmonoid_base + 3]; - uint dd = drawdata_start + (scene_offset >> 2); - uint di = drawinfo_start + (info_offset >> 2); - switch (drawtag) { - case Drawtag_FillColor: - float linewidth = uintBitsToFloat(memory[di]); - alloc_cmd(cmd_alloc, cmd_ref, cmd_limit); - write_fill(cmd_alloc, cmd_ref, tile, linewidth); - uint rgba = scene[dd]; - if (mem_ok) { - Cmd_Color_write(cmd_alloc, cmd_ref, CmdColor(rgba)); - } - cmd_ref.offset += 4 + CmdColor_size; - break; - case Drawtag_FillLinGradient: - alloc_cmd(cmd_alloc, cmd_ref, cmd_limit); - linewidth = uintBitsToFloat(memory[di]); - write_fill(cmd_alloc, cmd_ref, tile, linewidth); - CmdLinGrad cmd_lin; - cmd_lin.index = scene[dd]; - cmd_lin.line_x = uintBitsToFloat(memory[di + 1]); - cmd_lin.line_y = uintBitsToFloat(memory[di + 2]); - cmd_lin.line_c = uintBitsToFloat(memory[di + 3]); - if (mem_ok) { - Cmd_LinGrad_write(cmd_alloc, cmd_ref, cmd_lin); - } - cmd_ref.offset += 4 + CmdLinGrad_size; - break; - case Drawtag_FillRadGradient: - alloc_cmd(cmd_alloc, cmd_ref, cmd_limit); - linewidth = uintBitsToFloat(memory[di]); - write_fill(cmd_alloc, cmd_ref, tile, linewidth); - CmdRadGrad cmd_rad; - cmd_rad.index = scene[dd]; - // Given that this is basically a memcpy, we might consider - // letting the fine raster read the info itself. - cmd_rad.mat = uintBitsToFloat(uvec4(memory[di + 1], memory[di + 2], - memory[di + 3], memory[di + 4])); - cmd_rad.xlat = uintBitsToFloat(uvec2(memory[di + 5], memory[di + 6])); - cmd_rad.c1 = uintBitsToFloat(uvec2(memory[di + 7], memory[di + 8])); - cmd_rad.ra = uintBitsToFloat(memory[di + 9]); - cmd_rad.roff = uintBitsToFloat(memory[di + 10]); - if (mem_ok) { - Cmd_RadGrad_write(cmd_alloc, cmd_ref, cmd_rad); - } - cmd_ref.offset += 4 + CmdRadGrad_size; - break; - case Drawtag_FillImage: - alloc_cmd(cmd_alloc, cmd_ref, cmd_limit); - linewidth = uintBitsToFloat(memory[di]); - write_fill(cmd_alloc, cmd_ref, tile, linewidth); - uint index = scene[dd]; - uint raw1 = scene[dd + 1]; - ivec2 offset = ivec2(int(raw1 << 16) >> 16, int(raw1) >> 16); - if (mem_ok) { - Cmd_Image_write(cmd_alloc, cmd_ref, CmdImage(index, offset)); - } - cmd_ref.offset += 4 + CmdImage_size; - break; - case Drawtag_BeginClip: - if (tile.tile.offset == 0 && tile.backdrop == 0) { - clip_zero_depth = clip_depth + 1; - } else { - alloc_cmd(cmd_alloc, cmd_ref, cmd_limit); - if (mem_ok) { - Cmd_BeginClip_write(cmd_alloc, cmd_ref); - } - cmd_ref.offset += 4; - render_blend_depth++; - max_blend_depth = max(max_blend_depth, render_blend_depth); - } - clip_depth++; - break; - case Drawtag_EndClip: - clip_depth--; - write_fill(cmd_alloc, cmd_ref, tile, -1.0); - uint blend = scene[dd]; - if (mem_ok) { - Cmd_EndClip_write(cmd_alloc, cmd_ref, CmdEndClip(blend)); - } - cmd_ref.offset += 4 + CmdEndClip_size; - render_blend_depth--; - break; - } - } else { - // In "clip zero" state, suppress all drawing - switch (drawtag) { - case Drawtag_BeginClip: - clip_depth++; - break; - case Drawtag_EndClip: - if (clip_depth == clip_zero_depth) { - clip_zero_depth = 0; - } - clip_depth--; - break; - } - } - } - barrier(); - - rd_ix += N_TILE; - if (rd_ix >= ready_ix && partition_ix >= n_partitions) - break; - } - if (bin_tile_x + tile_x < conf.width_in_tiles && bin_tile_y + tile_y < conf.height_in_tiles) { - if (mem_ok) { - Cmd_End_write(cmd_alloc, cmd_ref); - } - if (max_blend_depth > BLEND_STACK_SPLIT) { - uint scratch_size = max_blend_depth * TILE_WIDTH_PX * TILE_HEIGHT_PX * CLIP_STATE_SIZE * 4; - uint scratch = atomicAdd(blend_offset, scratch_size); - write_mem(scratch_alloc, scratch_alloc.offset >> 2, scratch); - } - } -} diff --git a/piet-gpu/shader/draw_leaf.comp b/piet-gpu/shader/draw_leaf.comp deleted file mode 100644 index 434c7ea..0000000 --- a/piet-gpu/shader/draw_leaf.comp +++ /dev/null @@ -1,181 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The leaf scan pass for draw tag scan implemented as a tree reduction. -// This stage can be fused with its consumer but is separate now. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define N_ROWS 8 -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#include "scene.h" -#include "tile.h" -#include "drawtag.h" -#include "blend.h" - -#define Monoid DrawMonoid - -layout(set = 0, binding = 3) readonly buffer ParentBuf { - Monoid[] parent; -}; - -shared Monoid sh_scratch[WG_SIZE]; - -void main() { - Monoid local[N_ROWS]; - - uint ix = gl_GlobalInvocationID.x * N_ROWS; - uint drawtag_base = conf.drawtag_offset >> 2; - uint tag_word = scene[drawtag_base + ix]; - - Monoid agg = map_tag(tag_word); - local[0] = agg; - for (uint i = 1; i < N_ROWS; i++) { - tag_word = scene[drawtag_base + ix + i]; - agg = combine_draw_monoid(agg, map_tag(tag_word)); - local[i] = agg; - } - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_draw_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - - barrier(); - Monoid row = draw_monoid_identity(); - if (gl_WorkGroupID.x > 0) { - row = parent[gl_WorkGroupID.x - 1]; - } - if (gl_LocalInvocationID.x > 0) { - row = combine_draw_monoid(row, sh_scratch[gl_LocalInvocationID.x - 1]); - } - uint drawdata_base = conf.drawdata_offset >> 2; - uint drawinfo_base = conf.drawinfo_alloc.offset >> 2; - uint out_ix = gl_GlobalInvocationID.x * N_ROWS; - uint out_base = (conf.drawmonoid_alloc.offset >> 2) + out_ix * 4; - uint clip_out_base = conf.clip_alloc.offset >> 2; - for (uint i = 0; i < N_ROWS; i++) { - Monoid m = row; - if (i > 0) { - m = combine_draw_monoid(m, local[i - 1]); - } - // m now holds exclusive scan of draw monoid - memory[out_base + i * 4] = m.path_ix; - memory[out_base + i * 4 + 1] = m.clip_ix; - memory[out_base + i * 4 + 2] = m.scene_offset; - memory[out_base + i * 4 + 3] = m.info_offset; - - // u32 offset of drawobj data - uint dd = drawdata_base + (m.scene_offset >> 2); - uint di = drawinfo_base + (m.info_offset >> 2); - - // For compatibility, we'll generate an Annotated object, same as old - // pipeline. However, going forward we'll get rid of that, and have - // later stages read scene + bbox etc. - tag_word = scene[drawtag_base + ix + i]; - if (tag_word == Drawtag_FillColor || tag_word == Drawtag_FillLinGradient || tag_word == Drawtag_FillRadGradient || - tag_word == Drawtag_FillImage || tag_word == Drawtag_BeginClip) { - uint bbox_offset = (conf.path_bbox_alloc.offset >> 2) + 6 * m.path_ix; - float bbox_l = float(memory[bbox_offset]) - 32768.0; - float bbox_t = float(memory[bbox_offset + 1]) - 32768.0; - float bbox_r = float(memory[bbox_offset + 2]) - 32768.0; - float bbox_b = float(memory[bbox_offset + 3]) - 32768.0; - vec4 bbox = vec4(bbox_l, bbox_t, bbox_r, bbox_b); - float linewidth = uintBitsToFloat(memory[bbox_offset + 4]); - uint fill_mode = uint(linewidth >= 0.0); - vec4 mat; - vec2 translate; - if (linewidth >= 0.0 || tag_word == Drawtag_FillLinGradient || tag_word == Drawtag_FillRadGradient) { - uint trans_ix = memory[bbox_offset + 5]; - uint t = (conf.trans_offset >> 2) + trans_ix * 6; - mat = uintBitsToFloat(uvec4(scene[t], scene[t + 1], scene[t + 2], scene[t + 3])); - if (tag_word == Drawtag_FillLinGradient || tag_word == Drawtag_FillRadGradient) { - translate = uintBitsToFloat(uvec2(scene[t + 4], scene[t + 5])); - } - } - if (linewidth >= 0.0) { - // TODO: need to deal with anisotropic case - linewidth *= sqrt(abs(mat.x * mat.w - mat.y * mat.z)); - } - switch (tag_word) { - case Drawtag_FillColor: - case Drawtag_FillImage: - memory[di] = floatBitsToUint(linewidth); - break; - case Drawtag_FillLinGradient: - memory[di] = floatBitsToUint(linewidth); - vec2 p0 = uintBitsToFloat(uvec2(scene[dd + 1], scene[dd + 2])); - vec2 p1 = uintBitsToFloat(uvec2(scene[dd + 3], scene[dd + 4])); - p0 = mat.xy * p0.x + mat.zw * p0.y + translate; - p1 = mat.xy * p1.x + mat.zw * p1.y + translate; - vec2 dxy = p1 - p0; - float scale = 1.0 / (dxy.x * dxy.x + dxy.y * dxy.y); - float line_x = dxy.x * scale; - float line_y = dxy.y * scale; - float line_c = -(p0.x * line_x + p0.y * line_y); - memory[di + 1] = floatBitsToUint(line_x); - memory[di + 2] = floatBitsToUint(line_y); - memory[di + 3] = floatBitsToUint(line_c); - break; - case Drawtag_FillRadGradient: - p0 = uintBitsToFloat(uvec2(scene[dd + 1], scene[dd + 2])); - p1 = uintBitsToFloat(uvec2(scene[dd + 3], scene[dd + 4])); - float r0 = uintBitsToFloat(scene[dd + 5]); - float r1 = uintBitsToFloat(scene[dd + 6]); - float inv_det = 1.0 / (mat.x * mat.w - mat.y * mat.z); - vec4 inv_mat = inv_det * vec4(mat.w, -mat.y, -mat.z, mat.x); - vec2 inv_tr = inv_mat.xz * translate.x + inv_mat.yw * translate.y; - inv_tr += p0; - vec2 center1 = p1 - p0; - float rr = r1 / (r1 - r0); - float rainv = rr / (r1 * r1 - dot(center1, center1)); - vec2 c1 = center1 * rainv; - float ra = rr * rainv; - float roff = rr - 1.0; - memory[di] = floatBitsToUint(linewidth); - memory[di + 1] = floatBitsToUint(inv_mat.x); - memory[di + 2] = floatBitsToUint(inv_mat.y); - memory[di + 3] = floatBitsToUint(inv_mat.z); - memory[di + 4] = floatBitsToUint(inv_mat.w); - memory[di + 5] = floatBitsToUint(inv_tr.x); - memory[di + 6] = floatBitsToUint(inv_tr.y); - memory[di + 7] = floatBitsToUint(c1.x); - memory[di + 8] = floatBitsToUint(c1.y); - memory[di + 9] = floatBitsToUint(ra); - memory[di + 10] = floatBitsToUint(roff); - break; - case Drawtag_BeginClip: - break; - } - } - // Generate clip stream. - if (tag_word == Drawtag_BeginClip || tag_word == Drawtag_EndClip) { - uint path_ix = ~(out_ix + i); - if (tag_word == Drawtag_BeginClip) { - path_ix = m.path_ix; - } - memory[clip_out_base + m.clip_ix] = path_ix; - } - } -} diff --git a/piet-gpu/shader/draw_reduce.comp b/piet-gpu/shader/draw_reduce.comp deleted file mode 100644 index d125d6e..0000000 --- a/piet-gpu/shader/draw_reduce.comp +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The reduction phase for draw scan implemented as a tree reduction. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define N_ROWS 8 -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#include "scene.h" -#include "drawtag.h" - -#define Monoid DrawMonoid - -layout(set = 0, binding = 3) buffer OutBuf { - Monoid[] outbuf; -}; - -shared Monoid sh_scratch[WG_SIZE]; - -void main() { - uint ix = gl_GlobalInvocationID.x * N_ROWS; - uint drawtag_base = conf.drawtag_offset >> 2; - uint tag_word = scene[drawtag_base + ix]; - - Monoid agg = map_tag(tag_word); - for (uint i = 1; i < N_ROWS; i++) { - uint tag_word = scene[drawtag_base + ix + i]; - agg = combine_draw_monoid(agg, map_tag(tag_word)); - } - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - // We could make this predicate tighter, but would it help? - if (gl_LocalInvocationID.x + (1u << i) < WG_SIZE) { - Monoid other = sh_scratch[gl_LocalInvocationID.x + (1u << i)]; - agg = combine_draw_monoid(agg, other); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - if (gl_LocalInvocationID.x == 0) { - outbuf[gl_WorkGroupID.x] = agg; - } -} diff --git a/piet-gpu/shader/draw_scan.comp b/piet-gpu/shader/draw_scan.comp deleted file mode 100644 index d285020..0000000 --- a/piet-gpu/shader/draw_scan.comp +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// A scan pass for draw tag scan implemented as a tree reduction. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "setup.h" -#include "drawtag.h" - -#define N_ROWS 8 -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -#define Monoid DrawMonoid -#define combine_monoid combine_draw_monoid -#define monoid_identity draw_monoid_identity - -layout(binding = 0) buffer DataBuf { - Monoid[] data; -}; - -#ifndef ROOT -layout(binding = 1) readonly buffer ParentBuf { - Monoid[] parent; -}; -#endif - -shared Monoid sh_scratch[WG_SIZE]; - -void main() { - Monoid local[N_ROWS]; - - uint ix = gl_GlobalInvocationID.x * N_ROWS; - - local[0] = data[ix]; - for (uint i = 1; i < N_ROWS; i++) { - local[i] = combine_monoid(local[i - 1], data[ix + i]); - } - Monoid agg = local[N_ROWS - 1]; - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - - barrier(); - // This could be a semigroup instead of a monoid if we reworked the - // conditional logic, but that might impact performance. - Monoid row = monoid_identity(); -#ifdef ROOT - if (gl_LocalInvocationID.x > 0) { - row = sh_scratch[gl_LocalInvocationID.x - 1]; - } -#else - if (gl_WorkGroupID.x > 0) { - row = parent[gl_WorkGroupID.x - 1]; - } - if (gl_LocalInvocationID.x > 0) { - row = combine_monoid(row, sh_scratch[gl_LocalInvocationID.x - 1]); - } -#endif - for (uint i = 0; i < N_ROWS; i++) { - Monoid m = combine_monoid(row, local[i]); - data[ix + i] = m; - } -} diff --git a/piet-gpu/shader/drawtag.h b/piet-gpu/shader/drawtag.h deleted file mode 100644 index 1e35318..0000000 --- a/piet-gpu/shader/drawtag.h +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Common data structures and functions for the draw tag stream. - -// Design of draw tag: & 0x1c gives scene size in bytes -// & 1 gives clip -// (tag >> 4) & 0x3c is info size in bytes - -#define Drawtag_Nop 0 -#define Drawtag_FillColor 0x44 -#define Drawtag_FillLinGradient 0x114 -#define Drawtag_FillRadGradient 0x2dc -#define Drawtag_FillImage 0x48 -#define Drawtag_BeginClip 0x05 -#define Drawtag_EndClip 0x25 - -struct DrawMonoid { - uint path_ix; - uint clip_ix; - uint scene_offset; - uint info_offset; -}; - -DrawMonoid draw_monoid_identity() { - return DrawMonoid(0, 0, 0, 0); -} - -DrawMonoid combine_draw_monoid(DrawMonoid a, DrawMonoid b) { - DrawMonoid c; - c.path_ix = a.path_ix + b.path_ix; - c.clip_ix = a.clip_ix + b.clip_ix; - c.scene_offset = a.scene_offset + b.scene_offset; - c.info_offset = a.info_offset + b.info_offset; - return c; -} - -DrawMonoid map_tag(uint tag_word) { - // TODO: at some point, EndClip should not generate a path - uint has_path = uint(tag_word != Drawtag_Nop); - return DrawMonoid(has_path, tag_word & 1, tag_word & 0x1c, (tag_word >> 4) & 0x3c); -} diff --git a/piet-gpu/shader/image.png b/piet-gpu/shader/image.png deleted file mode 100644 index 5cb8adc453dcc89bf3888a68b70b525da88c1b0f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 345084 zcmdSBcU)6>^frj%fJzq?2%#t{h&4bUfdB#u7U~FMp(s&Gs39T=Qgl#?h5(|{gNV|L zj3Px45KI6=g0x7H7Dy;kLhtMa#Bm&dyZgSof9(Ec`CPvD-cz3QoadZNu(6>Y-*(V; z4h{~!GpA4f$-%J|_$wF3RxaR=o~Tr3jz4rN&YaXT#m0Rr*>>lpgIUK)NBRhHgh-6P zbmy+v)vH{hw**^e%%tU>oi;Vzxha3Es5I(iQq&fk9V;eCo5p=Mcy#c)JRxDlWCEI7-jG-qUK&WQk?X=_U>(UJ7 z83Bl`gM)fw?ta>NvCfZ(!xPbg>{hB75)Z#fg@z_5Fk@nQF@fx~Kw)*(M+*{`r?Pj9 z`9V=j^RpUt=uc!ei!4fHZ}S!XwD>Gq{4@oZ_{yMJI}q{(IQ!ymh^I004U~UY^olt1 z9I4i*;Aw|r{##Wug`soC8SaT5?!CA_D*Z{Kk*>lW(rAjS7fH-9E|Rq+rLeBbh^3&+ z28CTJlVLFJ%PAZ0X3A${;hK*0wMfM4#j_8Z^8LAw zB1TG8@eV^znE6}pOy`1Rt29A0vyDkf)LG!42X`=ek&WUSRRweD<;S+0`HRwH2cx9S zpX2Kl6}w|h5TO*EF_4MLoNLmBDxyB+IR~lIbC6c5Pit#~#27~eBPwEfXj@8Z4Gtp(LkfvfNGxeU z=s77`N(sXm3_rHBBP!WZ`Ep=xIk#A7vSaTnELlBtYyI)OmSC1seL9+1&F--y+RF3n z=&Lnk@fB*xL|Q@LUIPurl<7$(YsaSU!y1Zb^&*ZTdWAuc(PTsPwJ2hQpa@V$> zu%YtYH}gL)Fn5pZMmq$ssNU_>=mNd7&EQhhv}bbdz*xy;g5tcYWR>I?glNrR=*54T z%=n`87vtyT8x1rrsgMJ|4^8ERp{SxBXZt}heyQu;Ndpfay)G9eLSkbR@d6=cOq%6W&(9f0=kpqau2H6nl=j4{d547LI=SdlL|yMjfqknO5JFp3r2^OIC^ZQwl08x( z)GdgeNSYND=mBgj*>JO|;rNvI(?3uQ(^FM??;cyEV&K+aMJuuyGAh!({tq_Q`?p%e zE9=wgje7{4RnJ@CVHveC*>$>d4Db7OSS@t+KD&xW2vs5`8A}fG$Hu0jk1~WimFA+< z)HO-hdTsRUZ+@NOGlYFk1?^*b@zA0i)gQAHsZpAr&d~SMLm(Oujs2xhK@fbQRb<|9 zd$*$#zVK=I9$S3=34MOeUbZMXOJFTR8rawHP1T4Gdq{yG1`-j(DmK0dt!f{bgYe6U zApb_WZFn&gwC{iSlJOV62KTkQS9Kh6W3%TSFpW&xu=ZR7b9Efa$tB>CI2CGi=qT9% z5hG5j!iX$gPREgvk`vC@n2A=1iWr7lm2^d#Y_W*dZxsvy=bQ^E^ybSscUxgR5w7wC zH_30(`ov3#6ct(a-{t^~j4;#m;(ZJW|z%L1(AyXWDPER+B6DvR4n^n`? zmDDfYulq8GRkGIrB~oQ{WfCS&rD=@>Vi;f>bvSu;%PZzs7^jgt{#pRqMYmU!b^e4s zi5Qpc*d$%p0zY=$#? z$jJIU3Aj}x2r~z=Qn_$ot}ZUN2Juw5CcQZcLTXPhAFv=DuYjq@xZ@`dN}Ks3zvsvG zrmoY`WOwaLO{=eV1qzG*Bx)JCckfN$BLH8v?hhGX_%zmt_h~z)3DR{qONcT{^MqXgcJ?drCx zA0O$&E7snBG;=wfp*6v5qzoyNq5VYuxh|#YhtKqTs_(iCealaqOk}>!t>IA_{Ub?C z?FMtPHlv`R1G5EA3qhx<9%Tz--d9Jxeh5E}M-y=*S@}zT84oJLCg_7>pTqhbQ>n4v zR=5tt0igVdanoi(+)82)kLZ}MDh89ryw9Q~2QBDt=pO@ABCV0SWJ+#;gp~ViRd-0r z&3H2S@yoj(20XHY)5{OC&HTw;KNqnOON^Pn{9L*gu~AeO&4RfBH_tkIiw+Ta(VhOk-dkEI0I(HDbDVL zKLJbpxhVMByQ({ycJ@OzZp&#tI8A#>31-(}D+l7c0E)M`CE5^+JZxX_xUfPLM{XXI z^Spns(!SEdy07f~%eytmF*y|#8|K1jx-KDi*7=ajjs!V4vdRtyUmA*b`(OeMV%@ZH zkMj?k@G|oc=XrF#+Js|vkL_Gog<`^!dleH~!$9uD`-5rW(RNuGI(%&146eDuvoZFo z=4+RiTm$zF>JVa?V!|UoL-YDu*J6RQnr&jrr@gzF1=Hw_k_|@2VYm|Zr94XF7}KGQ z?+LWiIJ7O0B_s{)MesD8mrIs!xZX=6zuYKTYNYstUI|Jw?}+ zn3v5DtrS=HaD4Ue68-ALtHd|P2Ik_i^|ooK^GZsm2_09IEUO#*#A9CL(O$jG*z9y1 zOZ6B6bAvR=D&PmP_!xZ;?!Wp22r+JYw}WEgkt!fxCf?0?fL7k@Y6%*Vsx&+Ql6 zGQ!CVJry!TpMpBjX@QWyLXw>4ALT~m#i9yxI||c*;*UoM>AfyLi(t2QTyUCvn%-9#jk6jL8z{rpjG&{kU(Wq~HclB^NDxgq5%#%aQh!vZsboito{qsg|8}It7*~x|2yZ7H)fUppt)-q^fkbH$5oeh9Hx9cZiXZ zp%lhcltmqvLt|+bYV_ocnh4#IXAnAWiv{RD>E_;AF)|F}jAiCIjg2esRSpdkx7VP~ zAZ&bkC&Qlh_JgUR=$?Tn*2jcPA!v7e^h{h;c~$M{Z?h(n{_b97hm>RUHSC|&@7g~- z!0XMC6R&)N`&n<#+3-lOm}IY;Paoz4e`s+Tku<~vWDP3v0Mm<43bJfg- z45N|0eN%Z=#fN(gPkbbRb#JCC4BSgXg~e$wQ;$>Q`%HSqh=HC*rA*{#v6)=Clt6h* zUl_}qCey5EC~(OKoz)E<<%Uoov<#4Guml6K9ZwJOdR_hfyaJS4#VU!e|1!mzf7*N{ z(^dWEnq^#kC=C$}WbiIh;cLWMaL2?LwNGYy-zDfp=<~ zBUzE{5-CT{91f+z6K)8`FcN!TX$ib|EG--)p%TpMhI&7`ijT4bPd;MJU-lN_9kg({ zXC92kc!?({^0PAsP!u>sr0>vueq^0vtoV|0^B75*HOn#$ge#fi!h+awD(w7{R}nA2 z_SRvH&Nk$s1V^!zlqrxGYfvXRgRA>0e5@#)%T?Z#uSX&@s?bVj)WSy&-6Rb@%xz`f z)Mao#U8|TOq)2z{kIxX;pAEX_-&Z!2{wq`3Nw~}R^1f?Pj9QG+Wrxh#VjpGJnb>T( zNSRQ(cIHb)OXkN%3DiBuML=LC6LzZsO$-u$-V@gmEn(=2vKU>jz1kCsq;1&y z=R@U{)hl@0?cW{<_ED4%IbSO2c_=$6w$>z?)oL%y#7Ve3z|;yK4=^Olc7U@*=s{6v z+=%7@G*G0g^m^fte3W1hBas@hnQ__7U*%vr*IW=nmU)3s+Z)k{`}3+r25RiUsU~Tu za8_k|x>itVpsLY!yrbE#v9dk3BA{LyYWXtctY3vZD1Nfd>1j-xG-OFR_Avo14`)6X zVhGug3JH8QWBW=ZpTcO-)Q@C(0}w4%{ci8!u`%hG)1SW}kKRPMD~!ka^Uoy;J?Qfi zoKl5EmL(64c?5VF!LyE{D#8F!XYX#fA4I}KBQO@p$0VkN zXSUMl?D2pUZaeCb+km~k%cAbn7x0=FqWsiLf*8b!!usn> z_fv7lt*r5kl5KKl5SVC@vmK5tcj7I^^o>><=zC0{hadA+Vixm95iEq z7=w~0gwn`)xr3DFFj-$J| zy5Rnd$u{$^ghYnEK!rgbTn#ebp_&}vl?p1ZHi4c7W9Ck$bBO_IzIA^h5|tX5ULFf% z>2EIog)r`%U#H`-Pv0#L%c;IlmRmANn>CR4a#2>Z19CY^DkLI>TSad2GO9A#=>GJu zb|P*y?Qe8&^%t0^B~4V3VOqVqy;;Y(*!5g#APgHKnTxb%V)^FU%qqUP5!qoXOd`Ad zHh#h$!OzeL+7?F+hur*dsx3x{i0ds-ZPalG@6qeoM2JTJ<3y^!-IWKkN=1ea_w^pO?frGhWF4QeGMV{-ouq|=@1iDV~6EFNu92AiB zNDriC;YbEo-~>Lf7&3zw=s?Dii=;?Cg2J#+8Km4$gf`@ZVkp==(CaioruQ_W42?Np z++2e6_OMY*g$xy=!aOCcL*IQEGW(sb7o9uwsVEl+bu<4897(4m>Nbgc6hcLQ71*%L z=u*8WGbq=^@$Wcmk~ zt0;z_#ZZ@htkWAMyiZTlt#|OwuVS8~W^~)lH5R+X&As6{!_c+D0 zi(YJb5zIAKb4iP$D8zaOj#I1*i#qm4+(VWahy>Pdz;M80pEEhrl?0}6wa*|Y=h73k zi0oGIgx7n&61Xm9p~GyTeKsQFuUJxw2qWT(-~I?gz;&O&rG<{he1q{R?xKUI1*&tM zxZ;^Qg?H$m5Zxrv>QuPKSQX&!JF-H!{_^nj@^mfunzTUqlE-Y#0f7f?lywzQ1Odc2 zV()>jGbbgz++Ltwp^OP@V!Pt4tiox)zeQ0Y;rJK^XDA{xXd8A%c%URW4TMPhk8uHM z^m2RhA(T!7*IqL4uR#0Hgx_O=&;|Kx|6RI&~bdBb)-)z&hGb zASOsGUR*!D>nf)Qg4D*d9}22q!M>i$5kVm&D=>lDaDF1L<8>XD7x_XO9`gjE`C^B(DPV9caArWcwxU z;)&>?1!+i_HzhQ-B1*v2^=QXCFLs0az{w<(ple8)5ChNw;#@m`lKU890>wr=|6lm; zUxZ>gCw}p>JS{i#v<6+xTkhd?<_D94AMeaR+aLgkAz8~$Axc#`k3xYfB32tV^iFTUEGFDB5;UvYIb^HXmKfJBnx7f>q5 z^q)sH^RK^D*>POSrkG*oFTOU+5ql?TSDhKT)<^0^5SpTr4th-8yZ#88XMP)K zKo-BrMNQP(+ATYni6_prohK=)C54|&s4c9(`BOtbbt8%~g&3j?5`T}?!YhG*ldxwh z#aY*_JScHFMtMSmP%zJ<2|(Z>tEa7SM%%lKwOHgnH_47dZ-w=@GUG!D3gB6BSQ9gU zbyGg~p?I}8mHx#7FttK0(ujn)jt?Iz7Pj<<5dOYikWD8m?w+{Th(G%t6($;EcXbRr z)$s*eBFFvbaTjT%cSwYV|Gx#r`+xie^dzXFv#~Ki7@rw%wGQ!nR|9FnRyEveP&4Wo zD7>$mv`dO{e1a$iXAa4bMYWxLjma z{3~D&B+&kqJD91Z;WjE%k*Kei7@`Z6l<&RTQRYP1KYGWi3ugEPk>Q{kAn>err|#H` zo(vu(MpV)cF9{4J;q~694RI5Vc)Q;CL$7r+bGO6o=CjWv^*Xn;OkHlCS0AgXca(RtDGhzB`G>+ndz*?u6*yr~Z^8e*vD z6^Y@V*`|DweXC=v4LuP|Zoco4T;!XOCv61%;Iv)ThKkKpA2@vp@|K%RpE+lcG;ymS1iANZEn7mm)G?2ZIQACU)+Nj(D>U8SwtgfwWxm26}aC+ETps?$&>9C1osK|I9{v{~L z>D{f*Lh6_4p9L&uT5dHpNx6>Bw(GU&3HTnEcUMLXcjHPDbGNgVEre`9SvO%A@HJjd zRax=Ga2e8$bKXQAq!k_*Z_wi=*x?nTDFU?+(EV-)5Xl4UrUrY&*tcTPR2!Q~{M=eD zlkbqAfxWTN3pIaBv$%Z)lr8+FwPt(m09<{vjJxe6Yvsnfiu-;|)GsravZZ15N>g5v z{z@m{n?t>Kk+=nX6sST9$*R+afI;QHG#+b4y!+UaU@cwWFw^WCQ&g-f93OhX=sCY)Y zzGhDOlc`&!pO#&3m&Gq1qceNF`7Z1%BAlPo{wpqY(_n5~Cd6Ev+dI{`uzV8}zT28- z|8_$LmapD|Bo{z=20jhf#m_(<;^MtebgO&1w?v#?ok^v;54uu8CqJm|-X}@*Pn|Hk z1P$8pM+&-_%i$<;r;e>L{|m4TX(8%kv7yGZG2#_yLHBpdkDB?*Mbz%`pD(*|8?A8} zYVW&qLUr!WhubbqT>)$L8+NP2obS25av<*@r|a;?Is2w(b0(CbUe4{Kg|cbCn>_I} z-TWHzzW-L_Te79wwWM<(zBkb|lj={-mvVJZR|~wMuY7H5wd1!msL81ilrV^mZ#f(S z&ed@5HW_j0EqI~&32rzsl?vvLE!avFuuT_I^qb9+M-;Y1?R$j-v7UEu)+?d7UzVIB zVCGUK7^X0XLyjVEj3m4ewap7}o;1C@Bm*@|9!=Bti@Z|$Qhk|N+WOQpEBE>AUhh-y zd2{&Q1qc;)otb@cwS$uMS~GOJ8BHKz!q`SCvwaMFm1%OAB}3mDI(sS+<(BC_n8B5QAsW~)c&Odfb+NC}oX;go z!ROZ@bRw`}QU5{7(kSK9yicT6>xd^6eoM;^P@7gwGmuiGD?84 zZfWTSFB@Dao$gmjv%%|T5T)JA_mI!Mgq)Ft+UY2P-d0Y|M=AD_kgq$l6$l6n`@Nmv z-HN&87Be!;a94`90{h~X(o+lVH4LoOOxv5582N@7(`55w>MQEQ4g#}M!*47D9p28L zx^^eWX<}zcT!3@$(cAdT>dM-5|5{A8CboM2u$Endfyzz$r;sN~yo&$oVybqO;(^_BQz-?r;` z_>*`Z#N|8+)2mcQID2mgW&D+2qeYtq`&7dBKBk!Zd&lqD-M&q|P@19R)FIj@r1BX& z10U&#moggvZp%0#9`t4!VJ;DhzC_xi)zo!D3QgF$U$IHr+Aio1Pi>>4RkMPzD!RBz z10}K#;hi~DG-h=zDkOtTOwZ>kTyK95Ag+$wBU*&9_b9vK$Ae<+2@7D)9Mti(T@{cy zp8{dN%$;zyvJhaN|9zI~wBMF4CCgEtt`iISkya>AO!Gc<-5c}7nn{TgU!RvH)9S8H zqsxs8Scw<*b*Iknk}@!9v`{*Ob0*ct)i&N(S!%8hL&6#-Ch{|wi?91?|pAQn#Sgr6TipZ`DgAtl;XPB!$=)K!V(sZwQ?#44rSewZNlM#WSY9 z$qyrc70=3p??&53*_;hwG1ao^M-ofI`7ijxv0_Fj#9s49CMb^_Umahi2c zl2!K1+m`Tbvc)mfo z8AtnO0y_4;FHir~IBC4%XVu;h6yaB@+dAHU9hhtwwxY%)kyG_M&qu#c*|9<}dY`kj ze7mELKSyG3pW}lLiAkuC;*pr=H%~BB-hdedJtVZ`!8WKZh2(_ALWlI&}U;0OW6l{s)rn4m)P-{tf3rmx^JdP{8ISzu+gl8mB_IV^h$rT0Fm?@ zQ#U1=V`v%)FUIZE+~dP*Tz&t8XeFT4w&KxaP1S3dHbQni=1G*$HrN zO-Og^))^xw|KFAH;o0ON1{Id%GjmGhl+5E1TkExe1Imft@L=M!U*}ZObRx|+sE&T4 zlbU?!tP=wQdZp#wj%IzP@kqfb&+6hBaUvn67(ww|KHCmBq@%qtL!VKU8bbN%=@Mvo z+xE&OwL_r97D2P`2M@5OG9+h*hJ8H?YRtF5=VP$(naVPfA7qqrs3dRMZJjM)0?hU_ zD$`3R*cL*C>)=`Mu?gJCV9)kNcVd2|sZ;f6A4Cs}?8G!rYQzrhS%S*sxdWY4~ zKay)%mcR@RD`N~)6CgC)2a~vl7P=spq1dGV{MRp10?h21j)R_xrDxI#qdDJ2gm|kp zQ>fJpIAsm+>RwLMrF`^mH!O74U7(v|os+R4H&0DnOr9^|hTfu(P!tS*Ozs5b^MW|j zX0i5&M|7xupV*+QB@U0l%ig}Mwj`DJ#8My*dV=*MxU6Wf;lXKI!g54$LSA>s9sA<# z$YKF24M@%Htx=((Jvu45p)H6Ut#3LKOx~HhS7DK{uy?!Pgr`5^4LOi8XmCJJJQj2M z2s#qpiIJVb?pz~*K;i)z9oTUb8gx&EwWC}cm4P}g`5I^C|0PhBA~wQV?silKed`Ed zrp(B-UJE^+t~HJ*zEx@Ff5)HYE^J5tYjv4sy;n-f0qvtzIO~8 z*lhE9r=@Lv3{*46SM3dt>&+Kd01G_t8J=yjIZJZ2y4JFMpKn~oVXcmW8)0Ppw$EvL z$iwboq^@W93H021BqdO_R>m1e+wVL;$#Fh3lpjR0Q~t?liawtstQuTA%k{iHvK5}j z@EEGcD8q2V6t-YAjKBI|fx!EQP58NhU^|SA#@6M;Oo2bO=O z8Nxat%%8Cj3#v#juj_>wT0>70{&U?0KK}K6l?HRZM>3(Ne9tAhY3(pQw?-XjZwX>s z>hGwXTG7CKCMS0*4;3yOL;Sq>c;KHnF*NUQS&|H)3-}JIE1h)cHW1Hvt$V0QNR6L; zE(ro<9c@_DvHxQnU=Pcenl35s4t0Jv)A{;H!c^wlqPJ=wV#{p1{)RtTW*U7OuJ)lN z3s){)FTUHWs%QPmN^s(6l~@HDEa~x{7$?AM6f}SOm`XNZwQD7~Td$RXEDo4Qt|gelQXKHM@)=yA;%kr(L>|qdsq5L_sIh`yFZpGYDrK*e+;D&L zQ0a^A`DaTDb_TSotL1KDt?+lAq~o0ZrtfN^To>vRCR80*pRsw>Am7XeQhJEF=GKYj zOBXtJBi#?$v}Pdi>|Rr%bM)bBZMU0c+Q!AKb(1igRX_>rwT-rl6F7jdmii1aeEoNk z9K zmfhaTW@oe}6fdmQb6UvsLNWQcYQcn}Gt|z4@e-l8ecQ6kSYt$=&bccWDWqfh4acyT zuV^ft3q{>ZPB7oVL1InAQ&;X8(UFq@Qh`dDc*&_2hy0O4?zL?`sIp# z9Pp77cjAN&5Fw$tY&b-+gGIu%H|{a?h+CkB%d!p*E=l6mfHL)6jImVB^+~vCL*>trzda8UV^f!Z<6JLy5a}DS@u;Dk;&pElD z7e*UU*RpOsRc3bjG6QIDI&Zff%l8k|Bs;3~4W)E)Z<9$b6=9EEF~d(Br>O-XqC{~8 ztTz*_@O&Sa*l$I0pjc(t2D|_OFk?Dc%G7do3N0FUwZAvcg>{XX&6DqtGn@~4Tyy&& zZvF+lU03F5l8O#Zlv@&y3&ES6>5x$g;lwo!NpI0FKOsep)}JjdCTJ{QKNPy48CEV# zSo7rriQ^V91`y$`G6{dVhEjj|jT(iJxliYbexYW0A ze!8f1?#c9kqXxxsW~!l0Cs1?7^>DNyLx9KmjbV^k5BAO5(X#!+#ce*ECFP2?AqE@t zU@dcBF0(!Tt}G04FCVNmwL*;EZ`%Ekpdx8CMrBmRYJ-X4XbHsXv)zWN95bVzc{X)3ZsKgb1PH z+R>rA+}k!oC=bz)ESVkU1^gF|$uODoL5@O?$|mBzOB>DReEdTeYbgt~ylZcnmIFc@ z{SlbNv))b8$A~i@P?%r_yyuTmWkT`6O7tRLGKKL04WrypACuOS4!}WQlV=8?}P#hERUIg?02AA6-F*O7Ypx(dz>m$ z9Ip~Q9&j?i`_2qC3wY#(ZW(Wjupsy*rVu(zhnly;iaq}!*5FOZuW-1w^@@zm?ku%4 zy1g^RTs)@b3-7v1zk|Kk%_nNh^BKM`eP$y@`_y@tN@tH8l0-QzPb8j9dIEclp~6^4 zho45O2ihK>M=2`-UEM}d04MzEx1v^D>a&1cepT4$ooZ8YF0|M&Ci#7feHBRWFIX}B z8s3i2txHU3Y-^!)cIYNNT9eaW$yH2Ny)sBk$@yAbz$0zE*zK;R;MF1Qcbqf2;WY{D zQbY48;5<~_7Bm*~L6ipSDpTCsCFEXeA;l>=!Nv0iHFk0RBPOH|AhxZ^KpSZZh)9wm zKOgr7W$a}dAz3KYyw!DjXXdZX=juiyP&3}6W=5ttKshaSy#EURXy5dV$*JWr=amef zZ;F1OZnJ6GM|C=cLybXx%hS6?pK^VB5s#zBw`Lt%2{7<$lm8{VM?_Y9ZqG$nlNB(z zh3(R{H2a&|#WRcbTsTGDA-4KN9RwHeAS(gWv{j;Fwk*sx&M{)|01znwr}TAmo~c^nY!t`80r%T#x5Y75?jd$!|IA+ktt85@ghn(WGmutYim}!Bz6L zK^?-g-{ehmyDog3PNVs{+5bJRRls(QfP%8iU*2kldphNp3}M<@TSqR*U^dccH5@f_ z6iU3vJenQ8V-BSZd4|M%3)_&bo6cv*3sH4`Ge5`GTn*Wgi^PgSKxGS}*_^hJliXsY zpn~dlzhkgC`Z+2zSWoso0@%Ftvfha9fCFKIeYW`Zv886wGJ3N}i2 zW*`>a6q_J^1-uYCqHx6u4;F%4Lhz)c+-lZ792m;rQu&U5tFO0ITr-Jk>Ox=3MH&Aq zh33OA`}@tlU1`ynYY{Nx`%}31%Ka#aXu&0v;2=(2HurxrqqW1;j=Nm8$_k->Zk+#} z&N?Vrbql;)fYNCKP2U=!cYwGEFh8@$)q|299Ag4L_d0QG%Nl(E218pgcUPM4aBC z(-?>oqW2A4Z0gGW{(<3qdO6=l60H-<5-$jJR>sUfK|t*-m)}zl@0OeDtRmtX-1%B< zKp|^YG3r+151W`UgX|NOl#GB70GVVTMBq`S;M6s%N)Mds+ut{*0L-oMl=Qv18y!Mf zmxWh5m9^mX6;}WIeSOE6$jWOUCPpfmZddW-)*-~ix3;%x9Soee_H8>pO4gdMfk<4{ zJ^_w5h)c0<^Br#~l}r$G4QL{nxC-x#dHxB0x-v|TwPh?~mhbRgS$? z^rZtlwJ#6M@g~93W!0R6+`BdvDAtql zUyrn!jKB=x&{~|WI@-i>jBjPs?=8*pbr2ML1Uf68=^puauVUtZl(FMLU-~6^$S6B2 z?_K?yH|S`-e?n%xylLi)jm?KolT=n{Z{NRpXNg;3HB7ci8Jvra-0nzHIABdBJsGh=~;`JF+k1T$A zzv6D)(0I&E2={#K1U=HDQeh72`qP9!*^= zoeeXtq30`97y>C;6&@4ZijFgF>iYce?|fHrzif3-^3Nk*_?77zLf0DyKLfkbyQv1B zE^#XUtBwH?)A8zkN4AwltkDfWr6VsxKCNfW5AgSz(unka;nNw>S*gIlFcP*Ghoam>ps+L$9;C&IUnr|ANGN@< zZGP)oaCXUuFV z=KJJiLH(&RtOAC?P3IZnty%|>H!_arK4-phIQTP9TE!Hvsc>`eS0(+BmCzeZ8^-G$ zN|%>-2fXSRPbO_j7vRJk?XA8}`fwst4$hB|*(}Mf+*&zN#ydG*VE<&39kA#Uom38ceYLjGE1`3`7+|NnuJ z^(Ze5&Q7G4$$V<;l7|*thW3A!jS%DERDa$J8^dqKRtn1q(l{0Ch8UU0N{P_j8bq`- z+3s$PXWThcK0+~=Jbx;^{H>jlV}8N2EOv|EU_+LKMse=%?)u9&BxziL+z&8`BYK24 zIZQqZC~{rWTz{%nQQnZ=vRFETzw%aR=$&2d*)FOwg`_bQ6I zdsRDnwOgK9odW~-{M}NRqW8ramY(*75|=GqG9oOUZ#WoTQD6GjqT2Dp;OnVmzRudO zY{&~saoI`q#K10j-y8Qq7FnRl)U#3=KOW<<8Z5```}p`ftckqj5??vedbDVNQLIWs zt5EX@Swkc9TJ#X^XMO}ǝo6pKRvJG4O6v({;tnsap2VJL`4rER>zcP24VjyY!A zfMj%DGWaR`*HgA+K91X>iHIwm`I|D^ZXdNb6Q-eePhf`;kv!HbU7aS0)OVehO!Dr8Gxj7KfU}%R{I(XJa<{nDeLCus53*k<)Mx< zyS#bJ3^1FD1@cgCGCzNcIDaaw{8q-pD{Z9-p*Ea!>&{j^4dqOR1#P2=+As#{*LiZc zsXyOKiME?Q3YI02_1F;74dvtKHz0T2vr+@ym}4mJnRpe)KMB7HKTTYtb)Kx%oaf=! z*K$9GC#Q#wc|R4NXWoJZbC56b9|JxNR>JSt8i(0urGHTboT%3bAxbXhn=(Lhn($aUJ_Z9IU!nba~U-MbB zz9LnVpZYg*{m2~-9}0XF}KI|0#`QshpZW4Emohg^zq z&)o3D_prx?JkKUWTw@Y}>CS~~yHdJ^~J=9mHQycFM zNZz4&zjo(t>EFf55Nd}J7y(l{{yH4K&Og`rJYao2Iu!;$(U4kRQ{EXRS=^zGk?7jV z>z9R2>6MO$wmr$8Km3XEl4NJcFzf4X(6v)CzngFwWdVIA&T#e@^MxOzYi-3m5H= zLJ6Nu`_fPx3AW&*xf;7edsRYwwrB@6i}8dWrvm96aIxTmpaAgIg(#%fjL?nL(2ozv zea|av^j??@2%K6Q0f7Uyc~&b7&9*1J<-m&R*I9B3Km6w3@3OwZUkwO+tNK17S<~L4 zH9XpypV8_DyrTfa-dtU!8}fAGdrnW%tNSu*bf3J;apx>w_FBwJrgL%sYik4u(5&)X z^8;C5`_A@!E#~aHJal(gSB&q1+7&hIQkP|C+P~SX(SNJVgY_yixbl3!OUTuxxi0Rf zLBXW@*Vidu-B`EtWZo@#hB)Zg$9?++2mm-uA(pPpPxExna>cwjswg6!&cZwd*K7(+ zMQrX3L?b|@>bo%ZZ#OGQau5np95h8Gs!jkWFxSsGi5(Ai~08N$R(HC@)Jb( zY@7`s(+nfYQRa%ZbyLhr&)uOb%**Re`=0uZ|J`)h3Q!+l5LlCu<06n-5ou8SBiDhS zERFt9;rU)q&6|Y#{FuV*UC*fXiu64irZz?T7MF}4^sC;%P>j{M=zOoh{&`PgLK2Q} zPjagNrlHc+0sq05p_>Y#1a^8?Qn|Yw;z%CGQW|1g6Eg;dQ^U&*$);SgMkPA;WJUk6 zrG$Dh*Vjf*OhWB)(#@F20mdlkSFDUK5&eTo|iE5 zA9tCt2Wn8ka9Q4-KUH+HIVKwt#C*3((`Q*fWTkb_{jybkUI@_UZ#GPc^GiBe`z=lB z=qSd<{tWW~yKQGvhsTZ_i-zw1*@hd=vGc25XjMUsab_MDi8&X7R61#LAMw@DJvD@8 z%;l|0JCmw?{5TPO9RWX0l(Z18@L($$a-Oz0W;1?vK9|IyFokIA{j~Ni>xLraD?FvL zD6r7B=rXbpyMhs45&QII%p61cf{C0#k2J4%>;0@u1JS|I1;BQlk1ZV-be#nNvhZ_f zLkQnW?c+593H#^f&5us>cnL+A*M~y6!qQlmps;awa7yPH=$GsQjAMS`y(|39DPSa+ z<~L3n)A8$Ueb=F$$+j3;FS_ggOSt^2Q~=wxz7!$l-fz6fG*g1UE`KV<+-u%44j_wF0#HAaDp;GdPj%uixy_gY_rebV~Os9ME$G37z zX)wAhBsck7x%%Dj=!#i9+We}0UOe2ye8$PPcXYcG>$D85d6 zpc{c>nn>z*1wVuwPRK%T({Lv6c8T7U58||X0*-&4a!KAP#vM!~;4F#ncVprZ@%|o% zzscoO=w7Wab=+)BpmN_kPvGuAOIXF{tvrdW=#=6plS+bfCx`N_;BHL#dC-^Nj^mbj z3xC4LU&8S;{j-j}(?~^o|LDN0!q84_bMrMuSzzdu(*HU@JJE<-eALMLA>5EiEmz%r z>v0AMQS%8M=PTQIRc*AmS#r@>!&C#(BwH7A##Gw1ELP0F%dFY2 zpG2}i*M?qc7G_V~Au3=8t^pK;{B70AdsIM16k0|NQi{$X|+4tex(dw<- z1wOwL)V1*uu=F#or#V-B%Sg>f8UDHjV5r}T9`II19#hK#PI^b*TTb+~gl(kYj}o|g zA+m&=rP1cwBj$?4ZPSnirD#lkh!Su$2L0{*47+n|zEZ$)6_iji}y{L&RQSk1>||ZL8PK) z)1v8(d1K{8JJqG~J7Wug|Mx0bXl{VLJgeaxpSQz&iY4c2jdgZKLO zsjnoK^mmWi;n-rICwco)YpU2S_cvRBPhp9f`X^=JJGfy+<6FS?%|-m1&&NYeou7M2 z8j-P^`J-CE3Eg*xw&!XML1k&h25WTS2Ol(9nOwQ1GV-<8T3b9ez{`(=!w;`2@?-#0 zu_AY4LUR%1i*vnl>FZ*`OxP;$eizzTS3Q9!|Mw}421_kW4gJoBYZ$50`!(g~{STS> zXZx3#2F3teXyvQJmi1pjw=jI!e|S1yYI#!A@kN$aVx)}~KnU*Sp3QA+ja`0`H?uNK zYL!}-^w&oQ$p7-ybbzliVzZ43MvA80K{>B&S^vz-fY5ER%a^g9HdU6Gp@71FF2+A- znAFgW2J`E1wYl$yrdUX?IC&N;pFA z9%F2BG=01JP;lk(z@0q!up#fMk3vL1^iN)M8?fi4M=qU9kgs4x4ceCCo?HyDOVVuo z60%}CuCP>d-SvuPY-KAAWPIcMBnNP^)c--(m50T=zVG5FTMa^*mdS|_N>Wo==A=XC zNJ1*K(I7R|w6D_)qEJ%^Ez=^=z8Gm=rc^WSi}t2v+Befo+sxE#zfU@x!_W7+{DF(9 zp7(v9<$mtxeh?iL?5U3HU%y8HvG;Z7;P|Wf4pfcFR%iq9cmB~KtG_X&(0M#fDBwTC zg5(cE%g9#7i{!jKKaf{VO~UkRjmeL6tv}O3!=nOrp&1)%W%Xi&Plrf6br*2`b8ll8>!g+(tfK}l z+`Rl~lO#Uewjb;TV8W4WEsR5pM*Qr8RGK{huUih&&FMIHlkP2t*>pg^4VHX-JjD^@ z2K_Ox0oCEa!Ea~b2MloX58rW`P6m!fT5W00Jmnk#c<`aTLjvDi0uZJFbCg_60iXMt zA|T&6@vNQqdUa`P=__a3d?Cf7R`8)LWkwHXA>Uln^=67-?a`h~-w>q#{np2nINZT} z^Q=q}k!A?)*yDbblEDtu$Wc=yHUOf3viMJo%{xLu2ulpGymMqvP9k#U(Or4;ARX{#T@%1zOcRLioh9$f?Khz+T|qicIA3M9 zTk3VS-2pK-DtBU)K50pn=U7F4Q8^`AqT$4LN*{CZzor@Z{*|G|JoAtK)oq2DD0g({@_K<&N+Iw5>fdak@cQgf z^md}R5;vXD**nPRUJ$+tT1$rkgZu&or~|y}Hp4DqYm7fTAq;i%4(j%L_wRYeg73w{i>OGp|XmRAfu}8%2l*A9>OISpOhC{^|+X5T$l*Pq}Tq zOL_MQ>Uc!>qwO++(Qj>}jHLfQnReoaTxFZ`c5AP*jJx*+4g}o_9Vg7b`dTuqq1}e= z9*;$Iby^HD0(V8YtYpE5npd82wMit9t12t4QC*+)OSr9!aHC{P>lB8pRp990*Ja-zoY$>p)&}^n6CGWjl$|g6;WByb9z3$EkY;eMiFmv0L3ojt!UZi$dO$6=CYbefU|k* zt3!t+&j1yG5`KG)e3>MFZoR;>dVK1qNR`j4kV)YUW6Ub52VCqI_&Jb31})Hgdhd+Q z_r?X#93jYldju0F?6-NS{CI;K{ec@?q^x#Obq_jGQSDo~?+YM-TmFrFKrr)M-X)3S zT~bYA!yuY3h&coe^sOoC{+?`kI*-N7&0T8Sb0Z7K zo+J|am!lx?=|y2d&S8NC{??lB2oX?VSa190QQc0G%YzD2<4;N&Jgn^g&GPhKFg%@o zwVB=O4;Wi=rQtMGjb{ucUg91WI1FTCT7trH$D7_|Eq($A|NZoU!dzR)wSO8_cBq_A zS)fG~w>WyEa&YN+Z{lBuqsiLw;u=YeniUfa+sm9CobS|L{XNb4MZ3TQ@XnD*Gb%8^ zJ2yIaZj}B9Gj?`ly0TeUgL;FSOB4KA839yEX0wx#&m;kkXu&?g3XQG~)BKqF0=d5x zH&mE!$zEZjLvD=?5377=oig4K?opJs&!Iffb#(o{PTd!BboF^1i7m&hs}43#-IJRh zGochk_TDvYf7BT8XanNFrRNqg`sHyhI28A5`)c(I8rCLx%pqj%7RBJ*(Y?*I*9%jQEKU2j!h;)OC2ReiQDZ)#y zsj@aCe0_U|`Z9!IB`8bwXx0pQ$zjx*Q1aR7x(eok{jnF|m_r@~r~=C{*s0L_!$rql zE;-nx75B6c=KEeF7rTOs@~IEmroWTyc-WWeYq&6(xQPGu4>cS3H5y8~II zhNZ3Eh}c9{Zdl*2GrMM4AGia0&DuLTbPedouo!=Bt!^DpdGaFzMBmBtV{`givv}9| zt0OMo-OG@%i_-SqVma`!`pw?Ikc0sr{Zqq5X6qO!WjQU4oW;Yb9l9E%iDvjRT zv9_xT!#)uyK|R+wHHrld@^X?`$kimF9tX}I9QJTM$|`0z?qOQ|3rL~@$!dq2ko~dK zz4()dOd1Yfu^yr9>p;2YL>4)VxgAndR=TM&{|YZrh+O(z?F6zq>78VFzJs4Bvi8ix zQL>_%;sP|{okuRvf&)@e&V68}sUtyXS*K>4b0Hv2wT4*%%zn}W>Jc%d{*guaKcuCM ziHz8Pes6+shN%{Woy-wpI$a7VZb{SC_K6=)7t&kR26{IqZXST;@5UN(&C3^teA-wR zi!uF~vDiA5+M@{ml%)lFdz0>hMAdXOrqY4xJH>9fvy z#nYa`WKBhl?yP=&CjAUW(}zRpJ)Afci6sG_{KM(4dE(@_e3~=6bb`Co^4Bf9>Hr=2 z*oF%_`%Q6m>hUJ57)j-#^Us@I_qaVO85v6G61cHmecbuagxAFG~y;g+cA-wZd)46*F$B*H*di&i=%}Q?yM>K4C zct3xniL`P#0HAt}b*!hBPsfs{4x4@W+ecjOAgxYl%#wU_?0!m41~f`%HP{gkT?;@# zLcpVJSsYtWWZvI7s=DgM)y$=TCLI98WqwCS&>C-iwRD^_s}TjMe3Zfwdg3 z1!UUO{~UgiQPE8y$l;g~30{97gRIsubzye_t)z4*c2It&3@9IBwZ$5AC@k@>T6Yg@ zDO(OwMSyV47fb2>tS}YZU4-K)=l(xLe6Ypr{+2d(6T%FHm!Li57N4ke7XCtLCfD|8 zzL!OXJZTq#N{z_=m_InTQL9hKMbz?>(vJMGx$z|!GC6YvxeJ6PeRtwN)HdxH_X(q1 zh<9JmmCIU_K6!a3Ol-nVm}rRB*{ZrE?v!3hRM4oom%RC6Xh2u%i?K&o_#{PrSVPug zTlH)2eOZuw4*_W73odcH5zx_e9|QB`J4bO&y5kSvc!YD;B|l-wCuY=Zj!0`AE$6NT z#wO@W)Q4wgP*7q|6iyk+%H9;& zyk6F{3!4T17B(lg$V;DQ(u7+jMl(AQ4Otbu3+%qpbzwVH}ht^hwDsG?# zd-|bv3IX_$ymPf!y=6VRkF{5>xg?YTeQJr_CBReOgX5wgZ)XR}8ko9jJeaO!ll`6V zWimN^yTX#mU}T0+t}ubw>5dpfKV35qFWiobEotI!tw3 zry^-^J4T-C0_z-A*MacFYL@c$hL&SMunmXA3=XL?j*na>cqGM7*%l1= z4f#=u9}jl)4tc#XtK2nyEVT^ulj!K?kFO|z|L*e2&aX$ZAA9!Pf*>$1@w+AYkRop^ zw5C`~khMW3%*x;5gjCL4_tzxuUx09}2#8;~pY89@PTIoE*FIc)^$JQyD=_YWVM9yE zMOPWQ-;>XVp|9(#3#mrxC#X-lT-YAhY`ux%Bf6lf8+S}p%Oq}niKmvv`?&@exuElL zUVs`+ut`8Nro@tw1j;2`&zX~UP(we$^1CC$xSO3O8Y-6!xh>}vcLwfAw)OejY51vF ziosp+-Eef=@p1arbE>YJj`;gu4mgi*ir#ut6e|V3nf!ax!n*cT0$7cZ4XNQZ6^qlT z45w(^^pQZvTc15M2VP-oH4~XmO1$3s-UY>y0(&|!;8uCpeFK9&@n0`5SbfEGL*Sfs z`dIvJxdj&kjQ{m%t30FjD}%t)|G(TUKZVqa{q`-8->$q7B4ELsBw!82Z(B?PKqb`V z-or|42Lw|CYluA+{@-hSXkCMP9%paztDXFyE*8ll)7lqP+%f> z;`JuUcPi>Y&&zqVz9$5$LDafc6@4o#Wns@mk9;!>AbV7&Rp=dgQV**nuj@0O=x&j5 zuY;@cfRd~-k$9b`tEB{I9N=Sp&KzMmSOQY(zuVks@O^Z}-kW$vhlW%O3>=amG%AJY zR<+I1@2ff~s!g_DJr`RQx*=vcRYt5bDt=lSU)4N5f2*l5Etkt?0?3i~kt-teLsm^# zhLx*PtlNtpv(fnI8Q&j|u`RuC_Ke7EI+%0n{RJTA3@kC4JVo{TT4ckAlhTOrriqYP zoRwh{iTS{KOFqnMJ%4jnQ|sr8;S%ODQ{fBv6Zy(s@BBYlwlZW=`MZz)xCtPpceNcK zoh7EmGgY*blc-K}EP2O6>npzJMe0^`PFsw7fzO zUG0labXghVQp+%2`JQ}G-{I)@UcAX$i)s-;M1^`K%!qo zwRJ0w=Y0b0T#oWp+Y=)txj5cRe;&Ol4BV`?)LLUu;@hyt9`!<>sg;qNgSqD|3N-aM z==c%_@CfvediDNKebvjrTBk%@^&$?v%80uBAAoNagu2>QSZ4cUDXm?^IQGi1gChRq zVR}V8GC5!WW2w5h`{xZ!hB|fW#M%%)coa;UXY4KC$14Drfn^@FJ)a}rN^$wzjq+8s z`x0rd#h4Fd2%}?Awl*H97jP#52c!_uOos$!9^mBB0U)=`!X}9y%~KA14kg zAL&6@!^oVUX@+ZW%*v8CheXsHu?PENGhK)x!o6G_cP1mn3s#^#E)8)pHrCH;$qcXRM(~jb*b5lMvvRdWEiQA ztJkPpmL2jT9$&b2XY|4|opwv%_p|b_N08s>!5VQgSjg|M&jfNP6VO?by$(pZPj0vDebg0_~s|R0U+Y$ z*yX+?o@5Fs>J8PJ`%9Dax-yeGWa1>&7fZ^{;44VF8ss$6zH=G>1!lBgst{61Dp&=25)DEINfX~^}WtqGt{;?GHPP{e1q%7@vX7CWe zRXVyab0gt0%^cmk-J=sx3Y7sqSY^;%(A|;CKD9~k1(}8u&-ZM@EmID9Yz49b`?L}7 zb`f@Cdp|Z(SS4Z=InYq+>FUkQ1}_p{+dB*C_yR}8{qd;mO1wUs$`s9??xd(5T#|wb z{L%WrRTD3{wBZM;VrmAWYvsQa2xW<#Lqui;k*V9MICoXSrfuMLJ8sSTd8yWJJP z?DLQ)yzI8nXaD7MXRp^E6JMkX9`hj-VX$i3-u=5478e@um)&Y#dOETliXnqeGlbJW zjh=6iJWGBv1?Dygx3u^TUo-b#v^R(K2lg=TV-y!7d@S(3T?s>=oaMDf;(w#?>g}@T zrCHl2^C#;I^{my;!#)yL-bY0 zc52Cen?~Gvji(cjY`g=R@4pmP0qbXGY&t3V=r_517pgn|{!H?UM-q;e(^o?5-d$(A zOL7?=TNZQy4*M%kxk+VSORpl(wVb51v~)s#FML0&t?vtS%lpIIeeC7hyXWwiG?fR8 zYn+J6V0davj4vhqnkHZWF<~R1^^X2s+rFzEQQ(R)Z}laQT{k z%>_5-@3|)MSdu?vSq|!dNA-aau8}Lm9XJ3j16|UC>Wb*BNQ0b=uFC=Ih3G$gxD?u(74Y`My+&`EX=s_RmP&>L6Yt*}YUeHcM&bwAQ^qU^qM4F~oMkdupZrwg~1&@4W`z z139wJQiHX>CwYMabqjI=$M{iu98T#->3ry?(rBmJUgi#6TAB*9zrU2TeaKs=rFPLM z$93IhORp5Hc_-!R$fl8PxGIb0Uq-mTWULF`ci3H^&jQ-hhx^dY(C+@%>8sI96b#ZsQMNr$^F7jjf zQT7eRV%zI1TqQAr<$dlUy)Mi*=|C-a%1evrJC~i8N2{oUZU;DftJ{OV?}K=pUZK}c zu}zV9qNv85xNH+tbpOAsrue!dPk+dFt~wCL^rx9xQG@(T&76)-hE}A;FFq2;C2uJ6 zyYe6tpT-eViHeNIf{t#vA>*N@>g)=bLwnQiMe@5M32F0t@X~1h8EZ!*vHyhnhF__G zxXJCbLx&B{EQ)`AH6ehq1GC!=~PpQlX%(ET*kiW zmj?vAcywuU7k@~FTsb&bo+qVQkRcw&IIr5PUNcmFa^&@dhTQwd;48y759r1vv?BE1 zT-B8JDm`B|^7e$Kc#aUYW{|6$>Hpy2r>Yo`XQP zxRpClXtuLf-i&FcCBsz9diGDi^#QE&HM)L~ZDU*vm?LcebcS>r_E61KBF-=LiZ#H= z{kM;r+P38XJx2tJUD#*UcA5{G7CzN+M#O4$`-9D1#8b^4bTZZU@fV^*VY!KTVY{tj z2PbkQ1{O2BjPMMAd4aaXb(!u-f`z4OtA^c!;@-mCAs0+$-uHd?N-QQm>$I{LSs+t-g^cN{a=WEa!crb?WKjDiH{kO z-a@+N4ea{)V)dXdO5g2#+H#_nFVE}4GOM2@7LrzxxVEe)_$ui@zEF=1JHP9KBEvM^V`iqn8(F8fYHN+D5>f%z9wp2*A^i?9LM+JZ$769sFF- zN5u=KCk~L%=N2@~k}xtQl6}i8N!IEaIgX|uFNH5iO)NE)-a9O9|NnD`Q=bAqZ$cI+ z(#)Ow;_>-Z?s%!=@^He9ym0z4ne6c!i7-jtFUbf%=9fJ%-e`pMQ)J6}O^s6CG+VTk zjf)r|jeP49$#Eu_M5j+;_gUc?TDChw=U!;pCAp5Keh>-%Y&j-zOdRg~32Y_L3sq^1 zxpTxxq--LvbCw-?0~TNyaU){_1dHN3HBZW5{`;~_mr?-vhDJN8+dWSUj z&tFiLk6exv4x6_q_a5KW{;0?Qkx$i-URWp&vjS0#^O-&{eo=4eqL)hO{N(Mqi!!AP zICD+van9esup(u3s?h*J|7aW2$?#t&19WkNQY%0gps2>~Q3#+g_(E-G+P4B2s3nun zcQJt+;Tt~W3dmysGd@IsI+dRw#Izu49N1(M5b#yc2$=;gd4O9saD+4gDtd)!Qo#E} zfmnvlF7!6K(h&G<%>C6Bmu=SMxKO_A?C5pfUwX1n(OCw`M|0O(V_r*p58TUhFN@8H z4f5CG35P9oDTbT@itVrBP(Q;NK+=F_YG>O=@CKl+5$AQ9ipOIZCP4 z;-BfOCEfqICR)`XoO*D=k~!doEUr#>YV{m!4V-AI&Yx%1EoDDhJq4f(`kkKh1KzNO z-x2oF9MzWX8sVg4F^>->dvWLUKNh1WH~*e@)3Pj$;~&x~3PXj*O^;GBy}iP)|;9jJ8w)1&=|?CbhN7E_<; zshE-q4frMqaeuKw-1gPmJ935KZ%-F%rv#qmlJA7vRrIZ-tO^hA|kGH(&FynS28gv_|mC=|)WcU^TT75MgV-NlY~g z2)i2`1art-wtoJ?xCI#tH@qFAu+)_gNkEl;Y7E-&OL0QOUUy3!|H4@QhXwLgYm6zU zr!F=m*oU(xkr_|G<5cEk$Cf)sF4-w67S2jO&29O?4xH-JpK;7 zEUvo+;^+P1`0kSct$bzgCEWqmPSM73U<}*6Ng6(DAR;G$;Ej@@@mNVC5nC z)*>ISE3yxFN~>t#D7 z3s`uuZi{7rt*aZRFqSxfyR~Zd5PS^5)*A_uU?rJ@$qc0-{e(facZD~J3kqGm&ph92 z$9AAqTN{Hn{8HZ7u+v?8WUuW<^U@Vl$L9Th1z!N9i0(bPQu(GeUIg=vX!YOX5+L9$ z+3L(0QqO1moF<J{#G{hlr5H1)@TzJXTpX@kvxxmVGd>E^dW zw9BgA#y5r#^j(f-eis&0UEuI%KrVe-fgm`rMNx5JGbV5DsMsZ|ibY&%JH!rYA5@S- z-lSRZGi7^qyQ8&xo5%9UF;AS?KvMrO0;z1_BIqm;1{uQ>g<`Qh2UhyGu(b9BzcE=- z37iSuX-!%!rXjZ1D)N2d3Jd1S~L6Kx6w;&l$`?D8i@vi`RP0g@vc=W2Uf5RW0b9 zB>Yt)o%So!;lzxDsAvPQK`D-*P;VbAu59eN-=@I_{;=fHP>|qje({?ow7?(l^6s~H zy;7L-;um%*dLL(c;()>U;!etelnSp)#(ksRHTqxM=qeAjdYYxaHP_-i;&6)(@0S>GgjFMHF_QMe*nhde0vsRf@`|10gCCd_U+h*wc6yd#t zXO6ZWHJJwF!q!>;KScRp1JL^w?{K#q$nTq7RPnO6g{?R!=Bl#W;RDOuPiC zmDHVl4F$D6N+{Z;p=+t9vA11ajb;Mp-ku&WvyV9eHMSD=*k$!DBkyv-#O_S(FAA@w z;_@7U(e%b`sw`5w?3P2&`L9+b9fZj4*q8h6>xI5LQFs-y*3Wkqp)v8CIJH%Z<=75Y z);oY4yMK3?kYz9b9o24vXz2Cz2Cp2YyB4s#p&XYQ{-vvF=4lp=@lMc>6T=4^9G^&= z{>+SDT_&JZFjX`YVoA-)E<|X~=-RqORaf-;pkQGRjBn}Fk2e_jTTC&HTj*IF!8z__ z?>CY~D}Gb@#gLW z#UGg01#;i}Jn|iEYbMHqag5Yajv;SINg$==iqg`ERMB{l^t9o)7V0TT{c@65xD2+@ zeb7;af-8Kg;#p8QW@IIZXj#an*$5!t6Y{nlhM#B!ZDyqAL{->?4& z_(1`wIOzWKkHIJ~l#2l4D)2Rh&A?8LMXx}m2g#!7b!A(!KHUh%T+m?5Utnvv{{MXa z)9v>OiyKrejWlT=J;SU^^fea-Y+WB$3(~SmpOdIvia6Dk@8XUBxZ-aa$~1XEvKOdo z@8_{&5S#K~v-AW5ay@26v-e&=a*osA1sAK`WQBb*c*K`@LsM0odE$Z8DPQ|`N-`+l zbD3{P71|vhzg}`B!8IHr@Td`0JF)HN2tS4J+c7!GDFx=ASjdh)f+Ce)f;#(pFUvJ) zS$eAy8wxMR*PVKVryuKR^d+6z>%%W0fHj~@J%INECkDc32i(t=zHREuIM-;%mqkZu zKOmJQ=4wwk8+i$&3FU8pBZP?uR~5U!0NT*kIhq#}4(l;%Kv9MlbavHQ)h)V34-)cL zB?T@Grc?wO2Nb6ZAw4q~(~&2!@Q{bEBlN4C zMicSR!bFwf(=QMXlj-=VFk+vJsHn!|83H_Eo2B{w&g}QHDfz3kxD#GI3)hM#(hDOr zH<|>HHx4eFwNr#sb8bzC^n3mjy$iR@$2g7xj}m|lv}d$E1x=z>iPIX5m>C+?e=K-nY^CCQI?w1&@ zywLkv2#z~2v)Bd^pf_`iA7dIo0`vi{fq;bDNT9}@rxp!ER;RaeZ3S#}APylCBhujr z6y1LGIwud)^sGu*Smb%I2JaPYz>wV&&kw@aB1_UlLmK^0xQ39Bya_2A)$vKLtzZfi z$~M6WTsM4(ssNz$p5A-pA+J5ajo!p#oV2HVDfbN~s?8?<^FmW@m2DmR3PTqS?4Q9H z08{GrRF*b^!#(^J0TxQFAYBMZ9t_RStpqehw4DW^Op_yw{W6s{W~J3rM;}Cdie(Ns zZAPst+bna>iD7WI8Hq6_bc*KFe&?yTHhoh2C8UY9FB(`ml<&mk)0h(@ev~%pV5!%471%VFH%vG5?%BM>9mSz{RFE)1EDOtI!5(Yu9pLq-T zrc5wue8!x;1j3W>RcO-9>%MBdj_!7&v4xG`Akjh=6@5Yi#~yF#*<%IBN4-Y9soVps znQ!zTjtn54(f09o_>$hFtc2xK{oB>YUstHwkmxy(-!3GFD8CfpC-T+_gg^vjWnP@t zrKURFPMcRVa-t?=56R4405StHtp)6A5d&2N_h&?W8@)ROf$Z>$UIj90eL)|1`Ij}8 zIztB2=h>$JfE)9_+tYPT0uHPLG*-}Cpeto~O6kx7N){!gR<-_xDDr(^Mutnw6}?=K+0~pqEN^@6%duN<%G|rt8nw#aouTsIn&7DUP1bBD^V* zxgQIO?;&L_mE|PB7K*z$XW%1{m-rjaw#o}BF05y+V!~lfBz$4^ydVj*DP{Y}#SwY} zaY`5~;;(Hlg=4p=f2Dq3gf4WrNTJ{2TeJ1CwlbnHfxDOdoK}icqcDP$$Jq$#{n+A( zjwV25gvz#F`V15nl9-jp01vOYT~hL?)92OXY}L+?M3m(|K=akr<7_7%$obG=bu_bY zcL=pO10ED!j@H}>xSsW16nVmt-9?LM{DPN~DvCnf^W}SUElaqMbk3xou&V$zwbi z#Owl6y%Cpezd!xz5D;hv#}2NFE`B01aaB|ry(Ku8KM#x{XfN5*Zvf=;c+7pSs8jaM z5o=5R{t|2Q!wGIy)JV?bn7A02z&6Y#f%rO+m<6}9;@U4jP+keaYJVs}s$3W%Gi_|E z0bOqGxeW_(aew6|2FxWj#Ta2l(4{!;9nNV1)Tn90P^vbRYphgYjWFW*&-RM~>QibJ zmFRFVXQu}#DKW;e-Ro;MOh}y{vH`LX?UxzLvNM>&OF{5?_T{zrsjVFbMge%%<4=do z0tTzGW&TGzk>2yq=#SsiHR&Kq@1t}o_XRKASyPhy@mJ*r30UcY8R9V(^I@e&<8SA< z;@`)Um~-7ZA7Ig(o-hBhe11rRr{t^`jH#cKdAqz2w(zdHEXETE`ftsVqV=tpbBFAA zNwvAFTD_5A&xg<3ni-XXIUUS=Mh8C#tK*c-j26Lgdx6o7q`L0+L$H+9v9@^Fwr$GH z1LF^xrOxWKM}nXukw&PG#{=WLy36CuCP5pjq@Xh@rYg5pV3<9!u2ngPysKKViN|+O zRDlG}z3i{c~2Mik^etgogGnNweeLs+km?1pKSY zk}#Yzuy5hJA^rEMdIQqYUk%Bm$YbC~JZDwPRCU5w`Fv)@r=^xm-i+)Ri8<04`Z500 z|8MF&Y!;S03s|#s^l`*T5^oqDRp#xzBTPR-+rD%32VCg8C6P(V*7LQD4y50H>|fv) zTkcNq8qD&GOBP(C(~e+pS(9{g)2-(x8MG->Q<(3jFXrIHqzaRA1}hLSw?S znN-P@G~kyHlw~X(#1r^~H;!>&B@>CTqM{H{$yi&f&%IqGSdS^I)^$LKK~mP9K9QoE z*JWdj?&|!;NPxgtm59AsXZIsuH7k&G*tYON+;<8?$9seD`UePul%Z#7_l!x*Lq?8E^e-;RRW1ul$KQy z??c|lBF)vS`NIV3w<;J&RmK9Wxdd{*HC=wT(}tp)?SIE_aYp831{$0`M>cVg0xAC) zgb8|?i*W@?h}$AO7C<|S#gr#Ops{SKJjQ|2{KC+cz5}Bz@74sz^L`AHBH5mz>M8@ntbz%uKNv_Kg)2CKO`AegoQ(}sa!GX#11+-~yVg(?n@clmi$B01ipVgVU?l)V^HzE74+*mc` zlYHyWG6q2>Hc8>$uDdd1_v&+Zh`bz?N9WbgH-bw-J9$*ho7{8|NRr*R(A^HYH6oea zq{c54#}|f$SycJHjPyILNJ?S3W%+Go&<8shBcY*{B#-t!VN*B+ z&w+U8!PJ(4MOV$P$JCf|fkVlKQ$zx6BW0Yx`axqu_Z6*2Ms)cg_0tQq;rK|QJw%}~XIsk-`ya#s3_Eo{n zekrQ&4jEkAnE*sC5nnD%`8Q=**{@`Wf{dyj1tvN2L2=zp4xl?qp9<+) z7G7^96C5$0vz!Q6b18?)UnTj{W^GFwXg<1{&h_ihSxad>C#^S?*&%|C`p^CQ0Ul`l z2ykPSP+bIsuwv1x>n_g3j^upIyx!o>#*P$$=uD_v+Y&%^T&DT(K6PAY*W@}8Xp`6V6`SkOFr+1$k>_{I`ozk+M&25a=jT)hnZ)D-67En1oc>@iGUB(}Ts#mTd5K642JcbI!f7r(GNm^e3tbh%k~s_6&v}D*1_LYAQQi~3w|~q@NVO~ zYUAXnOi+uw3)PU=Lrj(N|0Ph zuc}nP*@k`J&B;fIRYJddo*W?AB&9osG@ZHXk1={ua<RUrI{N6xYIN8(Rn@@Om zF`zH8jZ%B%@oLYKyq=donMj7S%1#|B|8)Dxz0A@)E}FeXCiwpUyqH$Js(P{+f7ndL zeGkC^kr688e>-QR0gf6>fs;ZFDrL6 zc8tBV+uB-t9Jqi=u)7*V>SOj7GIE+a_aU69@%;1VY$m4L#tm~8vrQH)d)M!}6pojm zr%3GkNNTR}2ft8uH+e{`>^_cTIy8xqkV}d9D5XjjqFRIfK|GF?b~9&?u>1ANyt*WR zhk0QMbK3?H{}CxYTX#{(Xk}6)mIsWvX^^@nL-yz_n!oB5v#Pj1>Yr!n+&t_6C?-3J zUBKwtzvohD9~o)T_nm5LW8Z8&);>YROygk!PwNcEa=-T`7gk=2=P_7jC#>W~-zk2! z76kT@FIQ;V54OaSn;dKxN*|hwSlT~SK3GM03w)h^o&o!689z&mhWg*0Gh|Ei9VIk7 zn4H!2n) zVOVh#lRhCL}rAjRHV0JkOJ>fM1I4 z?xpfdVe{e<0Ca4;p|h_OjH7>(&2Pb?XP{7?aU+tqULeiK>i8nNV@N@;w{B{f)c`q0jRc+ox+sOCZd0lrNa}}M#J@1F~gP?8U zxXz#GuwPrh1<_e6B>%yY-byRZvGYAWtohvJTyv?yTmSLGKhyL`D^(EA2%{$ANK^b( z=@FgmV)J4?f~JvyicV{FH|e{6+OZ&QJDYq(33dqMx%47kmruoD1j-vRn2pz8dgL=` z=&(5&)Sf3794Hy>0Y;g(yw?L(QEJn7QSGRoJPpcdfcA$giR^S` z+cNBeBCN1o=d(^Hx{#~zzYUMe@?+5Klzi$9dV9p<8|^g3#OJ*~KT`?=1`yC|Skza9 z>c4&_-k0)erk7d9#eD2%|8-CE0U#uWF#DIeUpX+-8fM)nB#WCX8SEYn!r!QwP#*0*L25>iSbMY$ z_@vz&xOh??;+j&mim7~|7bXyNh7kCBu%_zUIxY_5^?0KqADC&a{5voJ5j4F}odgDu zv*&VS#9+T28*M77c4e1!DdqMHUkNEm?y(kl>eqAc=Fu;grC48hjFOJanLZNEo<#lFdZ}DlO{3Zou8>csB)}E{FoXaBPff!CF49yT1XvyU> zhGGzjk?fV+UZdo5T5lNvFF8Np_jQdBcEJHZ}aAzwRa$-eD?-BqC3$_Lb# zwT4hYQ|YVZq&Y=9WOXq5z1&VOGzr zb`RVQM!eugJdQpnp#8*wF#!^|iK=jn=ml{Ym&|)BZ9=e^U1^sH&C)foK|qV`LFH55 zU@oxp)p`iShcLA>OKIU58lAwy{2C1=Z+mNATV|ObK`zO=x7cDv8yJuH?)*F&W#C5j0lTcc{W~?%rZxw`Dt$i# zZ{nQD3X%_)X%xwYECD`!(AUkJ8wv>);dIkMAMmzXZzLb$?PvvcM7~3)vU_+u&LFN3 zRj!=$Jj@V1zE#<+r+*8~X0)${1gi5%a29#vG@G5hpvn;xAiaf;+QK%LkC#o$j~#n; zGZb!IMjt$q%hrBod1k=afe{4G+1eIQ6n$G10Q4KMe~35%!oBpPNGEC(tSK5Ei(#!NM`mf^~n2?yMkEPK`}c zSmOESef?m6=Rr?`C&|;t;pTy(1}4*%_PlN7sgGy&R!{yerBw3_W9DbXUSRO)n!=i| ze}!NIEjoo}by^7HlQrr_@!~*4UhG|MEg1~!(BjU1**_+ae$UK+Q1L@wblQ=&JjHkd zLm!Pl3Ps#=ZVGC$2XRx`dD_q~p&dFmq|--)5Kp~XhKEbuZbHa;&VT+2XFdk+bmBpB zd$KFRn1?B4*nkFDr@s z6GRfw=D;n6R?B0EA=TqL_EK<%=E++j3HXHU9gv*4<}-wmXP_OLfkjtS35!hzR5m8A zxf?du?5D9JEN~%8gTGk-as2>L1mNz0LrPVrR|D`t@>A=DWaN5G{$r z3-SP9$GFEoy7HE>^VPQ8Qzb$j$}0JgqW1GgFFc~2oO~XBq^?Cdf)$_I!I+`uv*K0w z!j5cs(62VaO>MC+0@F0%)_y|>tM1vw*P1R-RIFx}Wve0#HI~sD{|!z2xfDw-<9H3U zWT8e$g~q`F<|VWk#@+ZbTe8Q^-cw$=WQ%uE;5ZKlsj8-V1&xtfm^LtcXZ%K}tGnaM zJF|EkH?<@%P4Za{CzYI$TC;YgL`~B9v26S&LnxE9 z85%1l$x3a|xb23q%8#xj5Bp@*r{73Pdl3;5U+PDhE2zk-eI|n&e_SSyJlxJvz_@s2B%QNU}rYVG84klmBE0eyzL zYGwU>MnyL!ll%cwGe{O4-e+tUj@;d2-1yK{I`1mrKA_N%6Ss|Fwmumd_=+02hFw`tr0?3)R0XY-h zVA>%GP|&vKJn7_76gs75!VH%afjwB@ljH)(;XZdaOonrt2HqvUE@pNdJw1VgbLaBB zt<%nBTlvIymQ9erG~xL?HQ)%Ki`fDW@YO?L4ZaC;3Uf@(EyY(TxwTopg#evn=h|5C z>uo^vw{Ti;A9}JF?y0BQZbh~t`7bm%8hv^HD-_KCqM=zK`(r=#YBtK$ue(d<$S^f< zOQ)i}m!|vR)kN!WaPLq30zAJ9kWKXr^23B`<_f%qqj9j(l>V_Ur@>d<*Ri{1qhpS3 zJA3Y0_hpf=ZU=0nZ9}!BQQ6GuG(wpWH=iFSWfKsvv>V3;#Kcm0jeg|bu2S#r65BSv z3u+6O)dAc_Pt-#Wwu>lrLXDF=o#a1yv2e~v;8>913j770rNU1M85}l~mV+I{`iCymBx}Em%HWZ5DVC{Xp>Ob8m7OvAWgt3df$FYf2FH!OW-7 z0$?2}x&tc6qn;M|mr?=2)kU(%3!@H7t#>U{RD7<$tT-Fe?9vuCE}*6AEV776;d}`V zL)sx7A&!trqzZkATK>ypXf-smLq!Djz0;oPXtoo#a21T`*D082Hvy+5pGSBtza!_m4f>g8IN*{5J8e%~GQnl6lO)`#Rw>-> zc0F#w$jTMSZnszmr0ZYPbdBuPK{&n4U1iW(`YnUL^VBD8mf`Z5jhAtS}qP~*{~wT(9v#g5Uf_CzyBy5H|?GSvpLapL*)|xb5Bd76tOXES~RNI`x1y~?#`y<|83;>G3l=SYt)tsbkCCHrp4vHV|o=fU%XL<34 zk|de%ht~9%?)OHwnp_-b2O~Aaw2;sKkFYNROEUf5ZkcqNBHA@FsA<|HC$zyMk=12bA>{g_dO#)a8`n3Zk;CMgngnbY&PhP0M4l1 zc|e`5rYW@08lT9_q#eOmPPB{O0py2HF$CCXhRs(GZ3T%q{pYwy~ssY??jOJu>bwN2$hFn&ytTl1cl89q(E_WQ+g};>CDL8Xuy*LdG23qid`QXpO zR`4z;H>3lX)9&pXvkDA>L|>a<_Gd2+0@~>&9orBi`!!h>`9qars~$PTF3nN1=lk39 zX&uG5bKdXpqM;)o%`V1Mw{dsi5G*AoV4vk63%Eiam>9MN2^jx?UBEqWRQY=pwC-q0 zSLV0oLj;6?yn|wqU0<};8>E{NS|lA6FE73}V%>6|klWIgXv$c#WbxXy2miXIvGw-W z7Y0L{uN;1K^3W;slwn1S!>HpHu^X+9@7ZX9f~dLdIn}bvc%`Lvcl@3`N0+Sq(|+sn zTfaT|yD-fAhPPBYvzcwji1ZJ6f2_L8ndB4W9O8_i32_X0?|Su8Eggz)TEVn9#QRjB z!Rp@@{*>zgBZIy!jTXZsZbzare!yMwoM}(uW)!~@2UXeQ-g4g+kq*X@Sn`P4%i&{r z=5H8h+Hl4fg(}})#7oSiZV!^dQm4%rj6f zUET!&6{Ys~+zS${<~Hi{F7v$|%Ogi*1z8B?G4v-)aF897ZS+bei0GY0v37a1tXR3J zdps7GdX*B-6(*HSB|-wMK9$oXJCps^7rhG15^gKW&60LYYyzqVu)~RVR;8mN6#mlFNPVV_qXLnB$${38zfsPDP2-#C6kEJG<@!uKJ34b^eFo z2uC~IMNBhy^Q;iedLtYb!9xn?q*` zgkoa*c);%=xELqS);Nn_=;Wd>AbbqQpUqQYOgteuxKSQ zR;gunWn_(Cep5Jp!QkR5WCU$QGC#;+^U&&UD~>DudQ1Kk!?0ZDANCFb=(B1JGnw@Ys}NOGnu4vNXg zvZUxkpsZHxJ(`dlBw*T}yHYh6UBvtoyfr}Mdf#IRLe~wyzv--=ZJdnt$Mx&4hL34Y zDHdNJN~Z#QG)2r$sW^W`v@>WNFjy3NxI%2*lkAPwW`Kv%({5? zm)*4Ws`pq16o?NUF3taJc(D-$gPGuF&u$L&w*0T3jkH|9*5xNmry5}^jg(2V&PY#8 zZIRwxVzGKYZ}=M3tV^_5LlAJYJUjTJ=`GI=IH^q-VvoO$wReO!$i!@-1Jm$ z+dxzS(wBlsE9f)~Xv!}cE;QG*Wa$v4ouSG^N>6Gie(~SD#roh`r67jhsM?F(Y32VZ ziMvUn_y=|SW!W7)7U#o*X5f}Fd<0Kv2mTnGR)K|ybbZl2YNaPmxS_rJDHEw+BG_Nw zbTvj=BrC{f>@kHH)6w{P!|}7AKKIpU0xPK5y=_!ss!RcV1+8lLpGQ?*oNB|FlK*LW znWL{p88V76CQUc@^&nh560^h+HPgaB7}k_U9-}}UqP!OID1JHE)*u_lrZr@1MYY%c zt82v9!^fI8i!NxwwTlr-F(t(m7mg+_CKJ1e0M&cy_6j!e;6PK!`W$W!O zok`k%=kxmEm%tE9i@+ptbD+1)FLc*+eQb6KXc{bh3bxfBqTxPqZ|_3hB9@k-M@vkt zppurV#=*prqj*otALAqH`-@@Z)Vi+M*hZvuN5;pcvT~08o|x89#Cgmhtt(f-?vj1m z<@CEs51-{S59;SHIW}nRDe893TiZmkV79vCWVm?emYu?Af^!!8bL8Bs^D^qMfgB2b zt-GdMGN-YRDE==)>mBsq5vi0ZkhIN<$&g;wtDV^<-Ju_**iJVTecIDP%;-PjsMCM$ zPZvAz1j-RS14jRC<$?$yOP+war~3?g&kS1aQQlGd)-^UuJW2A;!8YRS|JR2bh4u@( zJ*VRn-({oRDIvQ~;6QxApK$c2dG!(8uY+2&q{#*w)eryX96q)ycU&a>!{QBP3M|QH za>1gazM!cTAh2eM_Y9+jFEYSGwgG5{1khBT`6_8FtpE@T$_!cDl9X(5(f{@tiseOr z-i@Q19Nb^YnEGKX5&mrxAsGfzk+d(Os&Dqux;u)1zEakw6fbqS&00G1Ht+AAwZLZB zgV+AB_``?OOm$Z!pU0PY*1PWgA*OECHhLX4E~UlIM9J(KuyH9B`TW=3{87g z>|n*LDAGy%#72k&kv}LapIS3*tJvlKL5W7khK5h3YUzHVcDz+v!sHS>rDgho8v@E==GVQZ$zUuQ6gpg;4-U|ZB z#oK`7XB+4quzw^p-V)5Q4oU$}*+0xCHDhVvV`|a?cU7YFt+O(balgastD!c|Em66xitUbthd?-qvnsL6YX*Ik=A0yO z#XmUEg)=sNQDG>+S&?lF$MU9TNZZwv^MLDTr=rx-;77%rNZs%;WANz=aL>t${>{6x zz~@xF-N~|N0;?uQnbyF+3M203ApL8Y1IGOu$=40S4~Cz&++)7S-2I*XserQ;rjt9X z8@U3FGfBWdwTgEXiA<d-;y<;udTvfzwnS=TardDn%0h>G zLY2m+aPNxRn!T+9^-o||fMGMR;mt)Gc`sbYGFx1|q`gH%Dub67p=5*@jq`T^3!R@# zwu_0!Lw3Z-io*>$pW883$K7UDdo?#!LTk2f^Jf1w4}uW>b8eqoK_~Gk}Uq>-ol{ zfI}b~-KLdfpMw{L>uUBLnkC&VpH2tg5%{j1i}N}$C3E38y048oViD-eorF^T`SOoz} z5ybC}*p<4JizB~GYsrNb#`PI4t_;0#GxG2Q!WO^1+8nzRuVatD?z( zc(FL-tb2M8UomsCQf*NVL5FvWl!$@n0hw37j7|CNlsC4Ol0s4LHpH=#;ix<5rU5e* zOqdj9L~T$(sQ3HP4TX(QbMq#JHcp5u(Y#gT&0FwgMhBa^%kjk*bp!=XFF()eWG)Q@ zDjJ=AVIRl~HaVlK-0Ql*CbWfKS~I>*u*bHG`~aG-G7*3WSO z!RsVMu=#$jA3w*&pwrDP-XLS`wC?N0`lg8jx5BHblbh&%m+QazHqGINs2+&rQ&L0X z#ENBhyte_T_q=I}AIS1Q`#-6A&uP8X6%Vq_A21y8zWfp<5X*a{i_jkQvp>`<1|9_9 zRB*!OLd_8Og0To@0lvS08WhkGq(T#XbXJjbpJ0!4`FbQUPg75xLdTu9Du3W?g|I-Jp|@mkc{ro8l4#1*)GtRH8C)~g}EahFrY zLo@mB)AZ;hFwfRLY@oS*EY(Sm1rdj;S*uD>q&h?NfN&8>Rx>_mw=(>$em7ZGEb&^T zg-%5DGm`rd$8{d^_QHUrwyE5|@rnJz>uPtF7t=36W(6xt1_M3vIIZ7-z5a%twRd9jRebuH zK*uSan1R__;=3^d54$x#qdDr?xIT*P$<+k1`jKt6kIj?w zSUsFE`*gc^O=o*LnzuTqxJ$h?|J2{QUa0t)f#L;A)|M?@hchPKp@nZk`&;B7Ta@g! z0DOf7u1l3;dFE$)g#m6{Ow}a40-5H{g&^nm*+lQTh?GVdiS<>1iB7}pOj;9>!XfGl za6woeR@1Fyrk?pmOYd$161?jMSo!Wkvi>1xU&Tk_#);xCE*vlPv3ntuLosf=w)0yC zK8(K~1Xqir@-@e`>OL8c=MDca=e|o{4g%R{nz1&uU;7WRMfE26AaoRY`%XtzchF7A z9)g1ITke2TxjL>QE zd2wFu>X2b9o+O?zaN(Fo7eS_IQltxJC2CTBY{nvofoj0D*!Hlclv%CKB3OdtaFc^2 zplEjLWeSSpTK__019Ai(uVv1uXaqkmLq>T~n;zqiE zzPK*bA)?4VZiu3v^z2v60uY`9nw-?s)UY4i8_K428?`o~#;qg#9eiGQ2pP^t_Ck?DTa_fkuaE!kBg!NljE& zj-BLlsDr9>+;tehqIFV1{LM2EVMiZVze9~?cOE998{(RRgku(c2846z;gCEO*$WEg z06!h9ka7`&bfM`Y?L^6vvhWquIG9&XF=r$+qG-JTSkD^Qbl6reJ<}9Sr8E9uouS1W zlzUN#>?2=Ko+$vcNgm@aj}p&{q+4HIvTCJ@fWx?Ru!NM+)J?}vc@1ef>OxzySD7}N z!N%D`$=<55BZ2tGxtaI*`}pT9+P z>|ffJ-F$Vo-r2|ufKU0mPyE5upUcJ35|*zZ32>(NZbiIlW`q_ZB7p+lEiibW`AdJL zknwDPphD_MBRsx=@@&vl~!Ee}{^-MQ;yg@2T;M%UyoTyUU3CPt|jK zovPXo^~Ry(fi%q8tIX#PT?O?95!;vfbl`n0koBNVx(Ci{f^1CtM=aKUzIsh>Hfu9* zyaWYDQ`7TQDKmHbc)q>mdI$WQ(J^suA$5EuBrlK}?=Ia3otk_keiVd7aL9NTrRiA^ z_cDLWCZ{`1lu1W*Z>v3hfLB^(iy12oJZ?+#%V+qtL{nz?P2JutevgYQ zpU>k~#~L0^gIUQNf)oqGQ4#+5hYwzN*$pdS{qttzr2Z2*z48BPv7YI)VTLA>%gete z{&{TX1f)rH`?Wl^P-`f)3ex=t+rzQ=S)U49Tmr^l@7;NeA!6aR?W}Y=CEtl`MSD5CF*CIQ#bf3Tk42z*&XF-&J}$0eS6+AXxYxa6MmPT@gISkH#>I zBq-^&!I8i?W7I=AbULbZaF{zR=cMY5%-1hM1Y$}{3ptBI4quUrU<=Mz zo=3hSj$u4B)<2hN8y_6(^}61(@l&2E8QXS^nu>2(Q6cV*0d^VeueSi< z6ydYao4PMP7}$5m>ptYrsR)^`V&i_IypqA+gIv&S2^8w32MNi3eHrQC}v;DgJ0aUQ0C>u+ME$VlHgV_Ud< zl5ocgsocBIH+Sdt|5guFef1B3)0f&}lU0(c^98c?##g3$0d##{Z2;dVqozWc`UiM! zzloCKzh(mcpFmcAXD+w?4h;s%Y`4gaVIrQ7e}kP3!l&u|9Vu;IfdEK_Zw1vn8vtNS zAdIm`RPr3`asAF+`Ncy2k3FKLy*Dd|8*JIugz+BrK|La|!k?^cR9v|jY#OlWysuCT zC-%U!QFd9In2s->vNfsK$qb88*A zY`RXW!#M-H-dp-g3On+gSkJt}br2m|^x>t_Grh(r^Yg`cE@j|lo`bd>t{++Ox>y)z zo$LA{8kou!%nWi&e zs2UUJ9woP_yVHF$c-%TDyQM;GdD+d;HEl!WWO)ggQ)MV_dE8D{Z#m$Vo_A^z$t6y^ zuAV20n@xv_@47 zxp~cUD2z;wJ*_qL{T>})%@}5+Gnz2@!M~);&MpIEe?ryc=hFN4LA7%4=Ii1&?n1|i zAidFsqJ)tz8lNLo0G#&y1=(?u&bBV)B4Z5%H_BwiLGFH3ui1e-)gbo*f1P4s%s(L} zn^QZ_s=RUJjr*%9OUGr^=yv0O=$)9>wC87rB)2>>u~;jMx>pyLS}>*_f1wf#vL*CP z6AHOHH}xEQt%J8BTwGnG5b~~CIDoF3JLUwsQmDV%^`qk= zPWh_?xNT=ePs+x7^t~^2N-r+9mM}y!R3>mBqe@GP0qZxxD2vJh{b8aowmUy+Qf&r}}Yq_!{dI_Hx`efID@3<#^&@9ZY_Sp=OHu2>Xak&60A=0A&LDj}b2FbcT- zVtu>g&G;>$LHC0YP00QpNt&#j>iEFZn(5R88OAxpZdVV{lA9ETmx2|G#Sa$~c+uCw znQG!0YP?i5QWfam(v!wJMcEMPn%+@*uu8C4f=;54ozi^zZHzK_uKJM>(Y@0!`(5v+ z69xPC7ZwS zK;>(W>JapB{Vu_Ny*J{2&I{hlHy>vu3<;p@iC0W_QHT^nx6(6Npt*k_O5c!p&tV=H z`8}|LL_zEwB@IIcx22bBIhF3KCaYogaHeAgyXXFBykOa)%}GAp){5<>0w?KRhcR{o z@6|Dvkx$DkO)Q^o+zmP%`MmNkQFfZ>7AY=R7VzxWYYV51L?T6s*- zv98RF!PC7;=sstC$y;uPqhF3)J~Lk*`+Bz=bRZQlt6}-cmV6coaPC)tmVQqzU*{5V z+Br3E;4Z<6U`I=op%5Y1HSp>KQ;TtWwG7mtl`C_4!-1B0PY>AQl)gQW+|=!qT&~uZ zQr^tI!t$2@n7}j=f(?2EiEfhW=A%71Ew)7FMj*`xO8zMu^ZolNv5x?LEg0@%*l{{1 zVj-w|0WT?%yvvWJtI}mf65phLX=Byl?scb4ol~wNul(%e=-0p!mx3#WIPNa|G`Ec; zcJ>7|HO{qMzSG{{no}+9styjg(J09n_i0c`#Hu8)@XG7Y=1fSNe7ej+L3d9gJ$xdO3*MSZ!_=WpR9elTP$z$C+r2xyt+0TPQ{wOO2lou;W(;u8EmyQENu@E!z%963S4O}Pu zz+3Lqs@ICt47XA08h11$|HYN zhjo*99*oSE$GXXRTYw6@6`rr(WK?wW`20J8Ru4%7KD%lgJb#WjH^J+a;g6S56gY$J zL}utLed#F)+ciq=TxwI%*7_yn_fmB(zEG(>^$f%N)r9S_;t6Rl#n5pnpZ!skx4X(< zX*XZdXjEZTZ2xoJJ=gp7ZoDpzB=gx3iF#vg&FjZ@UYYBG{wq@hX*(S+UB)O94S~_i zUmW`UP~)Jf#Wx){Ffif;T zLUHti;TE4w0#CYLwHyqTw$$)Fq__9ob?4o}ha``vEfR@C^=u?l*@Tf^^KVoS)C5#& zcodW|uOR%v^m6>MpB0gr4IQ7c8mWHKM3F9W;Vk|Cz8N{PpR7zx{2O*w&RH$waOz%m z-KFJpwcncF-qe4`ijJ>eGq4%D-F&i+K*p)D3P}>%DW4!JMd~?zOw76QFTx{mrRLN6 z!;L(G?zmm4xLvvo#_G*jbKuvKA5PTczo+`*MaX2C%UVCk{j{`>Cs)exD8SzLzt9*4 z+x#po>x>|>xkZnyv`#n0Qu|oUxdO!gwizx&6UAbZLQ4wH(=j zfrIIm&=nubx9@x^ZTwN{5OFE8zz)|oGjWq7duygqZ4vgfS!e`X^6E6$UM}%VKL@aj z%PyQP2VFeA7=!DSFuC0E9~tY=A|Ud;hK`eM3IgsBUhq69RW?TBR>-jrP2E+YeuIVD zUt9zr~M#jjtIBs_7 zMa=A$8S|w%n`6I!wNwcM>3t3}=I#}0XRn-T2&wD%6Y^s8a)N*z2jv=SL?*~n;D-wd z>br(%NpGCr#GE5298Va?ZdTu7%(853Qoms}z)4XE-oiTjx88-UkzACcFeG|mNZvMJDC{M zS$N@{D$qxhZ=Q>5CQ2*cGo7Us+e}Vwl%Kg9b4evfuyeza!>1t8ipcnP^?8e3oo=wm z!!L|$5qB?F@1w(t3m`!kw2wHJ3ldQu)EoMaW!|WM>`MrtM^-FvtJKD@w54xZosSY+ zzlq6=w*mRaPSPdESq-CANB}?)u2~3{Fp?>Yx)qun7oAQ>OWAx!gK1=+|og zE#{$sz|y%8SW-%#Ktzto2IJ@Snw;$;-umTJT;?+Kjn5Tgr@6db^_g-IghRk*e{;Dt zd0p@7g8R&2J-1h1Z2@UQs;(F(cgGa2vQy%fIflQ@Zz!-X=}SNFxNIcu>`4gBqc;^V zCv{aW?)&*!WEKW`fWLq*wVv|p*0P_#3vDG6+$1BtQV6MY(TiWriNs4%NfS${_=yhK zKh&d>W*I-JK#Sq;3Ba(DQ+1|nuu`zh&JZ));KfgzL&s8oKOPu{8Pi!O?@4k}@KvBc zd*|&!m=F=R5C*QmU9O0pWcLiOsoz32Ub?a3kKQ29oxEpL`GV}!=$T$pA<+D6>Q4FM zgns;8lFwd4i4s@n6l`Mk6WQB}sr~y(n`9;7QBScUJ+?-xc-!zw*GJcaH#gVV1I=Py zc4&mOH}D8(J^g1XeAfsio`~m?RfrD23<;Es`KpdLQV;1X0+4j$WJtjJa^I%EA5TVq z>G=VLC>g*>MUTIM1Y_h~=LuNVor~7NvXbEwndF1)E$-KHNIpA9E*5Z?UIuP<9F}!@ zF;u01KmuRWzp?qhG3Zw3^}`IFJt<#QN15ns=8RUdz85`Ta_kp6L~0N^`<@vjY1L9Q zk{eq8X-fGgJ$!;mCTsx^{=Ra@%st`tqj8PQOWVV0fqQCQgSUAp9IL;X;H-68r|WJK zW*Y5XFRx*gQg2pgsfYB*2!}d`pdyn?HyS5!dCvCgQ0F6X{p--lEfl6HK#44Pf=UT? z2x#*-7N0E;*&;nfz^R!aiP2@@u@CgtdedP;iKUhIM?vbML0LTj`WV9Shd>$dt;vD= z7mVtdS)jYzw>a(&tnvV#3Z$3-;veb3DNSv|^8@a{-{?0X?Vs=01K5$lHBH?xV~{0i zh@b48K81Ewgrn2HQ2)h~gLRgHs95RxGJkJRMy%taRwLG>7X_j}t8n{WN$)R$jt+O9 zSO^rKevbZ}vv)x8vTltQpSB7)p|kl+xtL z+STC$c`fhj_g7-O8nm@OxGY&mtkSu-+7VOP-gu{EEZfa_s&M-kJU3_f#_qGZOfF@% z0-(?s8c;5b^Kx}KZ!g}_dJkMSoaf=_6C5NGD!%ZhzmH>9A$yj^RFJEBBSw8-l2BQ2tm_s0KkZk) zGEIB%H4YH&i)q41DomV&JY84+qpW`*{Emx#5iDCGav}zKc;>OILH6WrV5Db(5PU_M z-JGYa#%J2P+}e^BV!k=bZ*ycgQNuuK=ST1Bw{$_bCO0Y6St0tB`^}vYisnuUaVASN zX|9rcOGotf4$3DVATn~ers?M=yx!#n4E&H}fqJBL3MJut4H+Tvi2j)#eMM@v;*DX$ zfJwznWws%%`B{)ycZV8}F|o~vN2VDss(2S>K@M60?GlBnK3>bB3cg2=N9k23*(FY(8QAEB%rtXYobY_H1dDi`R{LB>>^?t3$jZV;WVcQr?qxms@<3T?XK= zXTqc>*##f&I`OGM-H>b{c>=AKc^m!u5JRB>LtcEQlr&+w__g1#0UkNapkGY7$~Kg^J`Iuz zXqetR%y#|DX_$?5xeJ9AfHcE^RH-_jD!)v^;S!Mmb@c?&FK(#_0C-MekHlIL$(m^D z&aR4dZm>(8&o5uh>jwoo&R1BrlxOg%p04*wr&i0oFO882-2luR@EC7Jd4oDhFO&7D>++@FI#*KD5=<17GUv(|n(a{WMd^(~7{)6jQP~ zFe4?+{jZGC=?gJsu1cKY+?*2Px!YFcrA87v;E|EPl?^0#hCEIkNU1w%{#pLwWxb5% zzSCp?Tzf`&)lI z(U)?iKvY4qRW0G8MtUgV@sV4|QYbes($V6L+C@D!0ByEP9efp3q~qJwP6ei^wqVKP zbqkIN*!@>wpZhlFn+%`DpAH0eeHS~ofdzR$pG*7_Z_If3S?EhM1|2V{_dL1uMac2s z({vIYf2FRVJE0CgUS%OOBymh6#N3vCk{0<8&g-1c&G?xJudUNoOJ~ozr%UJ z#>oOo$zUwic6^{Tvo=2Q zpQw(?EPue2A#hScPzmaC@AroD`;E698!8wVJo|`Z@|n;bcZ_sf zZu)^lkJ-jPtL*7PxSDGUo}G^&-)vn13E=yx5b@Jg%%TrV-|-Bj@WPehW4@H%_YURh zi!bwwcR|Nm1mDXfAZ-E^s4%!`>o=N%K1enTREv|rNarLiG>}H%M#7TIcPacI?-L5v zpdy%RP)siBL4Y2pWXYwHDV0%4Xr~Qhp-cekTSxfMSrZ|D&TwdPy-Lh89z6xS(F-BfcE}NkrrA7*?(w%cyx?62#?5vq$jbG z6h7#)wvCM5n~f!5=YqAt*QU{!X|v-jjYy?T`&KQ*A~2Eg7=nevt63aCw9|o-#wjYy5Tt+Kr@q)>xuHdbWW@k!QySGfkvH?zM*yykynz}o-@Jkq!U ze>9GF^O6HUZbdv^LM=Uq84&7`BO{4wp#uYH-$;P377SQwc4>D#45*pRO5Jpd%dYhu zxbE%!^Y1|w4^1-}5^fiz%kV979mhcWJK)v3b*{SyqbgUU9D0up1*E5Fou{!2^h}uw+YMa$x3FF|bsuvt z(fjDi`9lEem;~SL=qlgjU5sais7d8Tg_)=FSN&JzKUbhffC}_)4SVt3a7K<|Z_#2Q z16$#%5{jPfOG$34tF!(w_I*j5wj{qPDwTp`t2vGrGNA_obL5zxJ34;yB%TS%&AN0Z zJ~Sy|XNk$i%{32n3EMzt+|<Mfn-LpPi490lDDoagi6S9WxKEV>WyP*vn~p>4LCX+3O(55;z(R zueRacl9o;{m0FW(Dq8U3E+dh&+lp~`V2qyev`y{A%!`&q4fAufs;s)FcQ#7n+q@%<3zIs>r1JY?yLoK2e4*w5M z0Z955BW1E9ZuY)o4r1aBeH0S;{@atx$pwG`IGQo6q{IKdu94boS3h1~nY<}nCMizyoKjUjbd!mt_q!8L(dyUV1@~Zf?GCZ1#HvgN)}BktaSFGV zM5RJ!s!j9fivNY+YpbYM!ly@2ku#_J9gx6Kir}@BTfPi&96OY9pb}cn68FrMyhco{ zvb;mTng1;?eVya`1Z2&F-#throY-{`B-saSqoeT4@H5@8l2;B#)RKek0Y(yp6deMN z0$48db_tjtpZV_f0M=oz`yO`Old&sB(+@>j3BJT!uO*cO&UOKMU@7I^GVUT^ZGj`Y z)(aR`HPpY`U!Tc}dl{5uJ#xOkE(DSc*%x<4fnehs?qnAlP5iklUcUxdwA^8WIu_UL zZsn70!J}p9^N-;#^Gzw-sMN=8CUrky^e->m^@_1Ht~gYj#SCN%bP9uV7GJp#1Duff zf?>Z&0XCa!q(Tm$9)>(u)r{C&cS`VF`^l@#r+B$8`V-4;T;*L;V)31=^dzTYbK_9o(*R&NYT{0km8!UR zml~1m@EIT>{W$o8{ODOW{!;_D9IWd=-}SGq4{0hP&4BBpimj_koI7S^=MYl`!LEbP z9(Un9f?P;<6E7*-CJ_P@K$lN(hH28zbM|w1U4PqmM*tA%u+S4&_G}w*Cq#GA9ba39 zO}?}BxMZIf-@U<8L1%4z|CEoigQbbi-SJ9HLsapjM_HU*BT1baJ7WGlx@wud+-0n6B@IX=8VtHaX$FH z=l{p;M8m6ME@lnLH=<%ZohJXfR@{=8S`FM>`T)htuW>@;nb505ne(tcW?>wK^pPPd zWPTbc&4o_W1Et3(y*)-$x*ma6%kt49eWfWEDp7!R5#Da<-n4*lmj`Tt&sqYRHb=T* zk)!7dcs`G=W%FZ(1M~r~V>8Q>%l9u}c5CUMu%U+lJapUmTmOg?MLTM)n~d zAh(`My>Ya~&T6Cz@j>=)&`-IQ1zhCj_Bp{7Zha+Vy!6V&6OGMxQzgq_yypOjtd$#u zJj-54Ctrd%V5gL`{2rz*jJtu_v+Mxg^nz`R zMyva8E*N;s^n`gU2}cM&O6piimYrWS$wrmJcO=cOSfrK@0Q7p_p6p2-|9=TOK1=V5 z4gR1ea~F;eFcbwyx(PU{wZyW*SN}HcWCndQc|jLnGHKkuRg{Kmpm=*)>q9nL87l`~ zEMNr@nd9WMTe8oz@USWW64&LQtr-1!?&it1>xad87UH`b43g_Cy-%bhj3i=3a%wP1 zv1vPeFL3JVYcT!-W&qNSbb}dB1$RsXGs|-08I>2c_I_`ee))f)0S%jBrJ{EuFnvg@ zvZk9#ojF4BnT1nFFO%~}45Fc0?)k$5Bz?s-PJ)_rr;pe@OVggl*0cVe{QbeM*Uv{F zsgp^;?bj~?q*K?dy@Q8gsShv~Z1~OYC`Z@ib19d}V@yq-l&@eY5suc4B(v zEJ;vHwuuqfhRzEMs4@Vn{86Z(#Z!??JW}wE*{NC_d2%a>=<4xPSqJETP_393OO^5q zjT-kdW3oQy7l}MJ^ONx_Hsn5c2@DIdx>&1VAoNtO!9?4}kX)jB0noc+@>*n8&*`0$ zZi>EjScls^E`{R~gc8Pyg_Ok2owGX==nQ11FRqk0tVgy|r3{3sgxcYp`3#uPIZU4I zY#R0?eF7M@_>L(6pZkv$Ci}6KBB%g|V$`&i#OuV)(pcBG{EYv8W%dc|w}pH;UuCKj zx%85C*VKJ%JSl9;@3ZZiIf6i<}Xs9ePeS%%8Pvz>v-Vu zp|cOJR6h|up$R`T17bwCiYl4^DT%tgsGU?KWoi#Kn;oETkIc9=wV$*QRla;(JptE$ z+y=+-2_@gL1f4pNHagL9oUP^3m|eQX=Z?B}SQF;pn^Hy)sWh+>unS3?GNpnv<_tZ3p=SZ zTxlSi0*OH(%4n3R||le*X%`VnhIDh)WWgGG2G^-{gH<-kT+qnwl|BTa332|u{T z$~5hs;1~$HZ~!c^vdP+I@7Hd@w}K{F-6sNa0;GTM=h!%Zq|yv-e=X;eb$t18phK7% zV!1Q6DidTSb`;m(Un4}Zp$xd{zvJT!kJB3uoZAr`rdGsoHtG+TbKf!0e69sU!lQ1P zDGgDb+l1hw^L;#Qe(8Y!6bR*RTRP&ye!p+~dvR_X<|bKF!_nTy&q+`)Kpr$DgZK_7=oI@wb+GXq>ljcvui098v%Ue}Fd3 zI@`v~8o6!r2bi|6DP{iYv|KrKyeFT^n4p^ly6P2Zx~#Nuy$!F*e4vvIr0bTsGi1d( z6Y9?dCL4Ieozr*GkAxVwhP)oowp8B*aEx@n`6Gt!n%V-=%PGe}`WkTUf7K1ZILME^ z?D9Ym0C!>%l)5TYOf3G#9DIBDz}W%9??#QyI%5>BF)+AMyPe@pFNb)}SS61RkXH|-9I{%TLt!H1S!XrkOA~FOM)se|MX{|U z4VwXp{`uay*KA|zng>rHb>=I@4K*PNyIq&6`Nk9vcL;o~=zv!`K^@7rw8ljln?^b; zxo|o6NuS#AK3Z0UbMHFj=sevp7ty}LX>wxoS$kR3j!4J_Xa|Oy;%ye_3`}0koq77R z($=;`!2mEftN-J_(5!!f3;zM`@-v6{B*Lv9_Omk8#2xN-F&yA#Sx~Lpg*WpN^;_Q) zAiF+$1W;(W2JXEXk-feg>G2zw+^rBA3kXpLyGD^&%h^N8lQ{RbHSrMhT9 z*#}f}O}hmBF88?iXD{s5)$dV_FdF6OQ_DA0ZsI&x{f`&A-pU4aI=IeU&1}m8vO4IW8e+ z07~BYaz=I6dDn+RP5NAzYK_?+VY593XsmAeLcQE?{oGqxHy^Fuu!7@T?yK~OFJ!ckM?CmrFl&g0wjq*O%v87ZYw%6#|L7L#;~L z<1==%9vVh%lNh_}&3i4{GGnnfy#7~2MR=hFF&clL#sy!*cnMKfxu$fA@|R#4X?WKkq!?r828nB=YpEK&Fqt0~e+Gtl2K5=o2C z>M{mCKR9e%hrPa*4z_u1%!&6|2p#dM^N0w)w@$;_D{uHB*!ObxH_vs7^IRgN2*aDJaD$bEp z3&qxq(f-Of*uHwwJI! zBwBAJ#ZM{L=c?meH0jI^Ay7|74nuuvb^11&&ssb;7woLeY zf^C)FUqr~E`$qck%O>(L+Gg)z1<~~wr4PJ6Lk|o@pEjHOrK<4O(bmZGRq1`uC&r+E z-fbsAa&`uXvO;uxR^blN!u$Om8*TBfPue3&som;p_6r$H+zT^+6>=1x!@-}ea?1~B zS5m`#N7o?*)Cgz&vCwfyuvTx}4xai@ku8VvWF~+LS6e$Y}(}U43DOC;( zlsBga?>`a~S|o`|=_e<>3r>diO`O=4aO`;SYVWIiypIjG)zA@;H0`H4?|#SC?q|U* zM-b8P++B6RMTnuwxjF5`1*kp~FVpszIhkkF<{fn=@XH4!+*@;=pmI@lxt=XQk_bVp zp*dIMe5yqtJJc_r5{Y*S2w7kyoq6)6h5*y01LXTSOinvX%1 zLNUmCpZuFZ<}tD;9*8=0Emg4p$mXF{`;IteFR%;qL7^AV4E~#Vj|aGX3@vj$8#1{^e8})aa+cR<;l@XwYSb$mD)yg+_7q<*zJKoh z<>n1{uhkm42H)mcsN1k*O+a4D?1P#tTpo@5kDxQ#v{n5=a9ogc8R9MGL&VyO2MsAo zE8Cxiw#Y9ZNj;y_1&)OD|53OqIkjmkec2XoPO`!IYI!3t?Iz~-CD%?axh5(vjoX=jSsy|1lmX_yVEzccask@ME*vgd5a-7lH{|}MqSe1;hE-L ztglOZx2}s`hBC7V^-E(VRVofo6g_il7KEjt$f|7MILtgw{T4~E{w|NUSnKbqS;Z~* zkW^`x*|b%6-v-^Pk?hG6h@Jot)`g=TQ=*XpvCSPG_1FvF&F{Ei?v!B#yzBXc7k#`o z%+452sf<1xtv~~+v5<5dpBviGAWyMw`%=_V%J6E!iTCvrf>FdeQ?%n!eAq8~43cYw zpQ5vGrDVmSjLbGLbaL;JoWxF0u>(~OTcc&+k5UZ--sdj+Px+`V=uaH6UstI4NaJG}GmWu@0FIhQM`L z+nb*&>kJs|Jn$yUskk2Swh&7i*%^&tgNv&rz)TkUt7M1TZ3E>s`2 zzVIm~K!G#|U0cr3*Xz33yL|NgK-hja%dmPf@E9ngwDJ^d@&OM5bZ=VcAaG7ZVM43t z$eBQb@eKv_r&|ujz)rY$@Zfh3kWgv#Q?RqY03?o48AcidgDc95pMwo?4uXtxf0dmG#+UGU#_C?Xo#BY2A5R*v>U_1k*Y&v_7dxhJ|P`_&tsY&!DY^@vx`ujzJ|5Yp9recxGpFcAr>p;u zz4wl4GW*(w9Sb^2v5pWzP*JIZQ3C{mhy@g+S0R8B5lm4NzD-hvZ9;v zYpnmUTb~b^O8jRpsXS0MxngS1Fy)~i{B|+8*wj9)K;TtWS}QM{eUtF%0eQ(hc^;qa zVH@9e;U}e7q-K&A?U&=L%K z>Oqj6-bRcC(C2R*{t`%hh&5fGUwv5~jr&H|ga1Iy-5Af(3@1ubW-SuPGmQPap)yrLzAA5k91FztsNbUb~ z=l>D0Ou6<0w{m|88VgZdYmbj)nCh?8Gw+A>wL~0MyW5x&q4aT6yf9Mr81n8@?&68i zFVS2b4gC!LA!(eZxn5s-|3m*?9(>reLQ+-J_w?ub#oZVZVfiCs3~25;qY}FE?MJJ$ zKPV@@1{jKn`HS4m_ITT|0$jdxxsF|Zm9S0Tm^T>Q-O83B0yxbh4S$bI$D)c z5YSHN?hs)DHuBe~-jJ)-oC!vvH#&>*YvKX0<*zN5LFIMZT-u+GBGI=N%XOlruO+^a zbd@FOsl%eEi)+vR(`k4s@w)Vy=6$XJu+M*{8h$lLahPajQ=2oCboQw)t5tLS$2)hM zRw>Ufunq?-a;3g>JAa*l@6{p?J%)(gR2f>=qV`08Vt!O_X6Mek$t^D=Ne(b0-AyJ$ zjko5+Vo^^UsP5t5IpsIqRtqjA%OfK#pG42f5feeKCc;}|Z9n4nT?h5?osnuKPZ#|9 zv?kLpjQuis+hs4?$_@9vb(82!AQZ>4bwE{E`zxJ;o&5D;%I0Hs>tqs~*^2LwgyU9k zMp6c*>piPSW%7VtprrZ5-~Xt=nEhc0^Z^vIRSJmFa#G^I|HD7~jK3Umwv^4)wDf)q z>hXFHj<)&|Ys&(B#FHGtQf=1FCShoEzoXByk=yMH<6>_wehbMikI9*bEzZ2vUluhD zo_HEDx9EwOk(n(HBcT=Uk5|kR2df~xs}sP%2t$(HJvwNf1x4td&r_4~trB`S37205 zsmU)M)F>J|o;zK!sr%d4exg(lc0{W6FV{=CQm4WQtfB*lNt0VJ2jjg5`L^-fc4{%x zG1Up;chf7VD|cTY+E16Z*6#s`$KT3@d!aa+Vm}rJyu728nirt%(#8*rwVN4{8G~;- zD7@GL@P*m?XiRRN7)6bIUlyFKj#jMqZB`KJc>Y*DZ3ZS4hoc;ZX*)nDH$e%jQDJC{ z#J}iN7^1v@OGu~T=?n$SQ62xHP>Jv0Ro4Sy+(Bap-vuPA_W|Wz^d(O z_mLKTvU0i%M{oTEx#761N`hYyShQbUYBD9}oD+GEI@zuBRePDNey7LhA#)Aa7*;?nIu$<5@Q1J;7>N!q(adD&@^WO^%qQ7)zRRnFqRdAo)%Dyq@2?NW~I!3F8T6 zEd$sSeexEdEvUt3`797bhoi(>sxcAR|vb^vN##%dBOKv&*HPN*aY=&&d2wOw?70J;7HV6%9~#KK;s*^#{9K zMU{kYVAi`BW86mxhs5w#?QK^JmnXD`!m~#B`ooyy$T!n<=<9*UF8yiQfx?zA0pNO# z)EDk)YV}?{ci1y&rpu;JEjg*lq`G~1x&mry>wvvYI1wthsvX8^whBbGWt~30A4a}) z;lbyfzJ#`!Mg3X*eeyG}<+Ez*KMtlQehZNG->FXGXJ?AZAJ_G*nrsr}`g$0gQ-=5T zVVnEla`stUA=7X!e>sH}AwNoMKQx!VoZvwzaS}xn?1^A9K@zuwW^7LOFk>qY()#Y^ z>ho7-@#NIPr-Pm4|2}I!bZo8U?kK9LXY8sbT^&tDT~My1y|diAD<##azPmkF?2dVJ zrQ0yR8JE)LhCS0IX~Uwpugc`^xRy`2UgUfkI05Pys)i&MA`)IT#}u1x z&R^V(==g{W|7TwPc)#zVl&3rYrswi;m9Sxad5ngcMcblI$9~+WnpUi7-!*8&sSv#u zAwFB0yB;2raV^J0{e@(33Z8pQ8R2+*p7jSX%Z62&@}3N-zATryGHyz;bh$y}4RzQ= zs4`sGrU-_R=!e`f%PAxpZr3%9jr^`;$&Kv{_r-y`UuYv6E>`yiOgg<&nmN3O-TGI{ z$LnWbu9IEDMn14!Z5-KG#BFO*I}tJSd9=qt$>{zteOy6u{G_%MdSl}IV#R9(aGopr7IxQZwhWL`J!a(7 zyfe~Kv0(aFz^?uA+xZ4IkDpN16$p@5@(q~L`(YlM4;tl=BG(z zZ<_z$jvNl@X7dg!8tAKPrv=P%4>6rgEA|flC|k(@OWhPB!Dsu z={y+3Zv8VO{mf9q^^AJ;SW|LOzfhQzr8|+&o39e>HKyNUNV$cM?MRk^8Qq+=3E}0S46T&=3j1qabHx>|B`hG^v)|dgHB2N)4q+i=q>9d zf0?iRya|-TK+JlV7WZE}I*lThA1VEEserz#nu#ZC9%J7Iv5ry?>7yVe#-zQ<=;mz@ zW1rk=^IIJ|t`N zb6o)FUEULX<51~Y*zwlyjuNd;kk39QoAXI%ts2>V33kE0FC8Ta-2K5pHes8I|8|W& zdgwrW^?AauoVl#&YoDY)8-t&9OJnq{Om4}Nu0$kp86<>?|hH}|#-I8L_klIGum;WJ8bPNVeh zn+YYjw{Y_-){Paih0F(IGa-yD$gz5G2Y}e^nbI2qf#zwS^V^|J*5HmSeUpD8|I>j0 z3P4O<0&pDM0bL?@;6dppGawlTuyuarl6WAd2tcBNbg{F0$f2KC_UaE0ym+miahk{5 zf6(Gy6>@Q>~R2@=!A7cCg^aa zN9l9popz&Xb)PHRNh$I7%V~v{eh(3!=bq6+N6kaO%rIP+o)Kh9kz+oTKcZCoFI&hT zk2UEN$Wn##%3d+u&%u*ODQxJ<4@;+@=@&Clf+FM()-))s}RSYdLOmz8dJ8Xe<`$9UVLNcf>PRygkrsrUHn`+A5R+(xF`3 zSQo9Bs}=nTTPO!rJb^*kXPqssxx4I|Ec`TNK6Btay`$r`+1KoMp}EB8tIz-6eBln5 z1I+L@844U$jTTykqliCh+*IP}ckSE)6@e6PivKwRt-OEM_-B_qjSXb){~Zx_3i2LZ zRc^B1GFNe@z<;AD?`GYjdKJZnwuB=1p^xCUGFcR6ymvmB%Tk{VK_~AV%P`~>7{#eMBC0FYaBq%4x5#x*TkQ4U zmqG^FL&(abRY@v(rd9&ha`%(~J9liQhRRX&GvoJ3ipv~dwJXbeG>{b0n0_To2SV~UKe6AX@Sb>o7JGEf|ug7Z$K=7Z;g z)O6GC8&;crA=u<_9SAOCQCmKSHPc=!40<25KT)r%2`b7yeLh_CqUFj(;ym=}#6X~^ z@7}!q-_z_Q{~D&!WUWtiAM+fpVAV-6LFYG(4_V>x`SXqiYJli)eXKWLRI#-4>h^6r z%U!brfrL=5n33KYBL$Y;UNedNf+?%Xa+@=$51MgdLq87_XoNJEAO)9_3`rrU(?Q4C zgV}wU{L(|G?g*Fb$_Lr}(a6;-BfN%PT71&-l(|`j~_)$>zqQKiyI*4{VNw*C{C~`BLQQn{@Yv z^dfXMXPPhX=s3&D9+p-LhS+u>FK{b%DAJ#%uWB-n&2qt6)hSgQ)Im;sEo6^Hp3g5w z0ltJG+<+h&uaE77cJT#WwA&s&=#BOSp93M6!=vEz?_DX&q zWhl)WlEbO%Y6>#hpRlqC6k$h}(Y_!EL#y~iYDDO$s(s1Or>W%R5Z{l*?sR-mm4-0* z^rw&cWAsj1?EF2uRk*ooi0KL^$D;a2Vdqzrh`*#6Y|~-2 zM4{GZJSsrlPpHUInuyXf{P^%mHI{If_qox3EHNZmf!e-CjGUy}WJ_(if^W7?;=P&P ze_^`yp2*2PBrU03DWjcH_b}0(-sqA8yTHB$?t{8rL-&HlEQQX`QwsO0j0#IlV{AlR z(ZkzqCPHJuxrZ=w$i@)07np~J(3a5kZ+K%;Nn#~ZLdVg+X+ z#>5PfI#G}E4LX$e!aAY$fUAE}(*3_yNE+yTtecO;zvOV%y!N|>!`Xb#quIA5Mr4~> z$|h^t?uj=Bwrad4lEr9avZegPn8wjv(8HXI@i|dR$N_@wRnz*S5LD+K^rgH&MXyDfgALohUs{ZH9;VYzi+~jM+ z4r3o{vBm2~%vGq9rA--5BJ@NtO>`S=1$mVs_mn9Z{MDWHm_japJ{)tiXp37FiZkeM zgFJs9cMDBCknQGl->ddu>xI#jCWQDoLRtYz5>CCP@@YC$)1eFPayz_PPgEsWHE|X` zWj26R{T@#O`Vv>wbYRhh+sc_7 zBlP|vLP`e9mMC1GWYa1Hn7pvQTm&j;LNh&e^F7s!GRy)LEOVnR!?`ySy;gx<7?-bHesQ9Gn@cY|CG)Uo5a6!jFVHyd%KEK z_k3=x37XMIwX^$Xg47L#p!j@)gphs!cSLMhhuX+bX|$skY~dp1N<4C(P*38-p?!|W zq!n1C_T$ZCw}YrSt8j(Kft?xBPd;c@a8W@0#sFulm`B%fX)*J4zAcb;W0XhiHzdgw zMyK!;CXv8BHb!3WAL>GV-edjpS$*SxSGyAWK-#qmHQsp-JF=$_JYQW-?vE{LJYb-$ zkK6w;!#8mOx?SpwU>#+dcw6>WsalZA!)+l4PE1Oh;Oa&QYgeBf0pqICoc1Q&sdyfO zx^Zz+B`4D3^8P!O&!uRh6ykm{C@J47u~5h05A6hx#T;U71}e z_n$r8oAv5i&vwI%?Elt}_dPq*Rv6XyT%$bfrL46%X}(3K+AqM?g77fL=pf0hGv?ao-KNIk14N5G zYhz7LbynDP#Uw_u8j<2o>~h*gQV{v|?D^k|5y@wj4zUv7jHLV4+W3-mgF;;#Os$z? zp?UpOHQ~SrNpq z!*%$mOT~`Jlo09wKZhSH=fR3NaKVhj??@Y~YUBWL?oZ;dD8YP6;l|ny$Pg4Ii7J4`?^v7(4=yQ=IR~*g_Z68P(mgxh#{bT2tx8354wn=06&%`tIHutxHqS#vx>ENL3i75cW}hB&DkX5o3&-A z@MbK4vGS)xuG)bjh7_S7O+I`0%;rg`{mzJDy*d;V%(Ox6HJBM$JLlrGtrl_b1@|oa zA^oN$=4E*g>^%EieJtpJ<|LktdO!KE)9U0)ojjZ?JRp>=NimU2{4LUhC3c68RCgIc zNUc4>l%98BA(Gx0Gh^@cqo_y4okF5mL^v^@dHULLlhkO{W0eTPksJ9nYft}+NARw- z@mi(eZR1~tY`k-CHrqt@fu`~taP{x%o+ylXKq$WZY#m89{%kESU56|S7@ZOt|D#0r zYuE?vv|>GVLK$()h(jOmH1wj}WIhV;(yLz9A+PR9F}mhmH*Q$C$rV)>{ytJCw+fA9 z-DCQH81HP39_(s=jHzD9c>&p>D~0YPL_O+hwCjUDj(K~Zm7J6EWsfY@ zcQxwrsahVkZ0b7U-4;e|Abs1Ssoa!kZ;dE7TltIw^5R zo(1>)nicovZ-xl>t0b?~`_NmJTDvm~8`FKcf+ z@%MSZD{pFEOHaO>zo+?^OIM1*)Wgu1ABau=A)O>9(~SYF)ca}E1u45mEFG0@cnLCe zYXPiv3JZG;sH(mqP`ZIw>#z70WgH9jssq~_Q?6Q}$Yp`X(d-sJQaZvaydwsccrXGW z%~Wv!@v9af-xJ7zUJ`V}Vlt42T;5Ub-+XSL4beV!%;C~Omu=P$5XKv_Z?PFT59u5+ z8IiV`-@l*`qg@6(4|iSqVYW=$dXaJQ3wVf(Lrtf(!JR`ib4}YZ!kNBpKqn;dROz z4o|v|uf0(+fKe{UT!UCu*L#|aVM}UEU8}IcFB%V+jG5(sYXYh#To9Q z^wyWe>@*eOvPFxQbkMRNcuR<+{bLdkP@B$$oz`@#KS>bltdA9SdfH}KvmM3gY1?## zE7x|Ne@q*_d*0T&u=~7w3YNsup1d{oxkxdt;`2ZfnfKoD;$LSXuzNkm4BtOJDM1p^ zR9otxQj^$iCEff2R^I<8;aQ0{QJ_Qib2oxzJJBS7q`ce(*sza%xYV260d4upnbTm= z6vTeIK#v+sbbNmBNHhC*Rmb?$N{uL4*x^ita%Dug%hLK6_S={lC4|)-0F#K^%BFJ+ zAD;0yu!C-(o|6`ucLME~PIe~jB@yuCLVP>KUe9pffXoiTh5Ig;K>XG1b+MqkuWom5 zr5I>jUNcy@p3mVSR$`QXj=&L}fMfW%-}6ua?t8Xdu4j2X2Ms&;E^5)k-uvvt$v$ja zOx8PNNppX#WVxLbI+N4_fTdxa4Jf_4Y%3h9h?I4;n-5`~&8HCpy%aR{<$;_-kF@Z7 zEzV4RFO<@WGNw1{`Et&6P7Om?q*JG*whVN2IM?tm*wmjt z@jCI6s>Ws8*emO87f6Wbfe)&gS1C66^qKL_C~`SYQ}@${B=#$~a7>zq3`FjUqSV8a zF&5}%Fc%q{CFt^`9+2wp3~*6irRf+MckH9IENy#A-J_8+lRko{;XT@Ys2?=>SRMHV zUF^}}&TA(P%LRGoi0$O3yh-O2y0kU&V^4wOQ?kXUyvC>l*n=wPOLr-YKbd)L-|is$ zB53#*we}g9}ucA^K%FtjKG7mzEqp_e%gskBS|D{Swg1q?3PWAw_@E2)9ICo3=q zVe4dA^((W zG!;nP458kq7wW@?TDhVE-lG{j zYBOv11?5$Uo~Nq#UgG*YMJLzS7BgmTVI2`ua)cEo<)N{w^zyB6BVDvj@)#lVP5#d~ zLPkx*=F63>38Mnf?`!YzrqR1j<-eSH5Pb%oAeV-K=u=ESZkUsGe4nAqw3`@;=Z|+< z%oOkyL620}R*A>uc(JX7^qV%c#ZTBkFF!UO($V(R>FRlK zfXlL3CLhQw+P!n8x@eQOriZT+o6|f~G7~=6wsqOM$cqJE8L-{HHlY(f-n_|<6!fc+ z8XKCOnp&+Oo33}bIv+N)s{tyd0p$qMS;KQGSlAicK(9QHqg@2z?6Rg6W+K&hGL&4n zcMKHr`DWqak8k$#$er$kYd}U<J7uW{_;N$!AJnp#Z#gJFt2zVKou3!;_MNeg{@fpEaQ4_zFDgwPO z%kj3IudPODX&udX9KXB_dH^ARb}hhA0}BfLi3O?ry$k})$qVdc*&YuAuwQ>W1hNoM)>Lk zfprB1iMfRQrKi#n7AR)dpiFL!WA|dm#qR4$ z0!L^Z_rc?M#KdQId#X(LGahKg+&eGJeDTS4uXO4AlaEs$ETMG|7|l6Z!JaTXz_koj z1WM`Xxb&nGjP<*tNal%hqag_^*5z3c%#g4Y`Q=`a5gwshIUjgzHL;S9cazNzqaNw~1v@dXcf2+aC6iLS;hCZ}Eh;I7wUD#Y%IX@#Ax9y}4rSQwa1p@{>ft z&zlV3+1gxH)k-W7D19$_$J`9V#awvXy-_aKGA$<;=FEN~&(fiOK#HV+ey-}Y6o<|>3Gh~)ls)c6!J)4o2fY)$l4 zj%?U)R4X5@a5&#>PRz45#DC@K{V&C)#@9v)KOna2y zU?}OPJ*&;4iXerh?_(>I3^@@>r3=2fqu>%gI<$_XtS^*#%g_E&UBK$O=N>+M$J*3> zVVqS~`FY^OjQZL3^*@X&-ym)Q)>5S7ZjTNdrwp4?>{aC^&GKNLp;PhP&FN&QpA-9PeZIx3%3;v$wK17gP3;N zdlHikzwWQuD*L=l9!v>Zkq7(0#2x;Rjid;iq+t+mMF+O!4^o<6e0o$iiPUf#Lk1lT zs>9cht26DnAz9<~}Ezq}79aCFAPUF=9> z;O!~kuvm78xZ;ha%<$SEs$){TJd4zkbEzoNz*EfzXK`=V&T<_?ay5@b%kX-1 zG%btMmL?x-gPMuke%@)c$G(n_=N}Ug`EqSMDE^DMY9N7Axe_Y$P~ZRaqa}mQRX`HC~WN-gnS&iioi1J1>zVK^07P$=223kL&nk248S**ut|Kw(G`FtFN6=n{brg+W^{Qd z^l8{$*D!WFf!o-Gaz)|GapNlL?!F|5^0m$DFYmdUR6I0O`~YzuC7XMCX3M5{g~xl| zfpKvUnX}=|sxW;nDh5m*$e7friN_IR#okWE=^iD)gB@_ua}?eMWh*oXTP2dCwW1HU z@AZPYN6RG(MxE@3FJ>oqUk~_$*#fR2qxU-Dbvz^^1b zU5*`W=gh>kNLqQ5|=r$qu_{X9a1c&DHGI-voub+2`S!PrU%ONgXKRE?bb5 z{SnuKs>fEFbD)}|D13w5P8L|Nh+Y@mg4wq-TL9+i_TT3209F!y{7zH19~CeRj;wpY za_1~o_Ksr2O9DyGFSEzlmWJz$m1YxHJ3fwSdJ;DiMJkrsyBHnSu&MHB2Fh%6M@rZ& zLbf0>`1Ec5CWgpa5${i;S<2U%Oss=n%jyvR zQmQot4Au1qPJ=ygpmc(fG;OnT6QL%eddh0!orIte7ug3QZCy=VGr#OZ1+5Qkz&>Y0 z!n{aX+`@ku4;Ll{r&)*;aWkDf>9E`r4{w&7h^;82#b`HG$QEZ0d+T*byNtX^c&+g! zHop?ln5zbR)+5TLd%f!=hRSwMsjagovc8z}YKvSKd+edv!k6`584C=x@0W6)7@r zD&jc%+%M+~%mu@=66mto&9mEGZ-TH-I#SDsg&!OnDmMeuKjULuoCdKDA|#lfL5C%y zcr0yR(~zRyr?jA6=;Il$?9)y{EBQLFY=|&a&AhN#?XhY!b2e)C1U{p7wlZ~ za`g02LpsjnjBCMSJe)9&5r5XUtMeuAzF}{1xj>CyP-Qnmtpn8-GSaNZa#&nv$cS|k zkXGoNgL39d7k)*45y%A*ixE!BZ5?#(TBuV#3*OI2ZePkz!NopxQwJc?cO&PN%q5cXPiB%?o{H-mfx)lNQI7dTy>wA5q+5OsAc0*=%U7R1$MF&}+KLuh>5s&b*ER4e+}eL=<;bbLxdpShP`clxn7kiN1RjG3tMaNA@o9 z51)I{S-1WL#DI6^be?TwDyGElG=7&M(9wC;Pjzwjm80diGwq_+n~K9$;lWW)M{Pu$ zVpMbH>e_>Lz+j8$!?Ujm$MhaoHA-}BKy6W77qZ&f1Da_kho)9JLI<1Ex+`-(@P10eW=w|P!iHKu8j1?7( z*B^5X%4PyS{(WX8gB6>;&!`=TtSL-%vW>fr&+BiIX^JVayxPC&SkdpPYwp}A(c~8h z@Hb+b%;9QvDC6io@fWYBDqOHf<)L1F@Qh9L$av&&?-4V3WF^WgvCOi-J~jihYMTrW zs$#XcB+ADN0l1nyw8&7E&@zavj!ZSDGWWL-4n+eNLw-{BCyOyZZWaEgOsdELv%7`< zg~o9krRF}uQD>%MDpMh6qqbR;?I@cnAGL$ARBe!9jpuektc}&~q=P~~K7Y@@{L_4l zQGj+PeDZw7ZQPAQ&O4l~kL{_=L%_we9%`!9hHJv4#Az$hzn%ZB+V0JN3=kmvman+X z@bxVI-pq0+@J*SC1ZZ*wqUBHZYlmN^HWsU#I*Q;<3W@G7d3nz#ukN%v!oJr0f>+?w z4lc34E-FXfz@Jv8o%jD40*in3B4YC=9gywMTR3B!DozwrZi9aqzpJ%%p|{<{OYQSbE7SJ(V-< zltNGo|1|(cHraeDmA`FH!KgWP{=bqewEdUIteEubv7(ctDZKjCMaPdyRs`xq#rZrO z4Q3Z}t`K3LX*07aU+#b(@gVaTuQ6!W1VSU|9wI-W4=QIa;g1-FCi+sb>@!ukQxD$} zBsgO!xW@Q{>h4rbu-5ieS&9hnA+1njc-kePmrts?dy`PNyOT+<4O9$e4>G=P7jlTY z?c<=4Lg0=?3M*Tz4qz3I8j(_9lb821C!Q;TWy(9hQU<%1dI8*GDx&<#_xwbqV6J(@qPV5NJ0K-z=2s7E)^oCb959$UL&ZYqHTkgo;LxO#nQ%#;GG{l0BCUhGP0Yo+ZAyYN>qy z&5AonJ7CvxL|GYE>EMhK*{iltV_#c|APCf&R{F1nLfJz>RXlr)bsXykZD`_ z7bN;NT;R3WrR~y7T|)~rk{|%aO6Khsg2iR0buRSn%|Qj9&POp$sjs2XcXtAvzBq&# zOmJ?L?*&mu5#?V@J7l?+Mm{XKgkC=U z>))p{=DIWb7P9yx7Y_7XrSgwZFR@1&iF7bOeHWioX4g2$WFpDXNw^IwcmnHFtV=5A zG;VpDQK(}Mv=x6{x5;?cIw0lVf&{W^(;z{5_&Eg^R&%A9TpP+*j&JI(Jk|Wwx?5-P zu?5iW#1HDH{jmTo3aSs6N1E&9_c{iFF&T-9 zuXODLC_sbq*M+AsP1=+Ivj3{{Sw5^8zP8qoLAn!^p_9W(&|#(xTMgU+5CeO1by$2G zl)Cg?3~@W0dMTp~*9GX`#J_x(B~UoOgCVV=zlJ5N9u)W!aWO>Q7Dwf0va&cmCPQc; zH%4dC%O|uubX5OTJKYtt9wxCyQjgjcTNb1DSS(^%;IgFxb%id`F<7ZTq zmn{JBzk+vPQrsjKm6qi-r6OT^SZkC(QyO~!qT>57Epn(^ay?PJ>6zwIc+8&2NEb7! zT9omf0W9C?%D5S(A6#93tEClFl1g0w4!w&7| z2eib*7ApeQ_mSNmG|)xGtN?kkxGUhA2%9ciEM88dr3h#7FCg?l&HJ9(t)Aa2L>IkQ zCBDJ7ZSytrZKGA(ghMi_&_v|`AGu?c)w1;VLZ-Cl5Rms^vQlvSvtbcHF7ZpamNT5v za(G#K4Bj4*zKpQnb5&ktj-%>oOte>x1Ryf1xse%&`8QtuUS%EO?h*?XvYzi{ zTa;5=Hg0^;#$S(D@vb74o%)PKvyG209*XaSU3W!MmEZ}-LvrplTX#WizHn3mhoe8W z70#KeGH@#9uUyD4V%srb;ad)+Rs08x`TC)RAFmYs`||*J$FmhbUIqWd zXW@#)HNXGJ-2eAy?X~q9D^~vm1p4-34g5&hFMmI|?(g5)v5xc0%!U>J`j&y8rFioP-|$U`7%+tuH;X4Q_o^*lpT2Pb)?ES$ZC6+JK_!VwI|(aua(KGF3oBx$j^ z7Vjf0fHA>ptW;J>5nUk3>;-^>_N)Fx^9tDW<|E9M4K`D=q&1XnbBZUIz#@GuEF zvHXf7u^ER{MoLl>NYIKSOg2poJz*hGv+o?lj5nre$Y@=8PM@WOoKY_^7L4xX7Noyb zfzL2j4Q0ZZ?~+#?-hpMi&Jqtk_Cv~BBXQY@uEGO>18|iqacf-=uH@GaktD_+l?IO{!(^QTB;ao=6iW+eRGY|77nU8cnlSTp+1@!Loy2@RGt~lAV)GQ zr`wNBG+Hp_WEVF86x-Xe-zCKrC;uVoLrwJ~E$@-#%#`pE2hl=e00Z7}AIDl4Jy$aK zhn7>oeMrLIx6UmN%pHz6y=vwZvGP3AK^Ph6b1Q{!qe+4nie+HVZNyQ=&V<{`@HYx9 z!a2%Q=Q4!S^usw70%O%@9QPu0-b~2u7F)CKWECnqP~sWwxJGM z1vCpTDD$&uqv@PespPlPStnSwvo%qU&?0(zPedN>vYhBO&Bw>C8CV2=iRXRRO(xv) z&o$sf_9F&_c?!j%;nc0wdQykSV{|--60YYNsno>UA-Q?&T_h! zAj$=m4Vy=5sUkxk&ynszrsioA=Pe73wRR4sHp2af8A!chNr|sQw`is-vtklih)0NS;G##{15XFkTuU%crA4eWVK%Z7TSMD z7~?;@?%X>P4KTt*JU}BFJQ7g(D5BrrRQs>0^hI_>+Imx|5^aU{+)ZR(Cm6hk`fk1V z^#dH$gbvvP)E>BO4hcK%nK~q=pg!b%zk^1UU&w6W^#Xe~GO-ZqHWL{XO4~x!vI-wY z`i{taU7UwUiHnu#mz@3kKztH5IdosSn*&Zy1Jgt2!$>S-Q+<>SUtwc>Eg1pfSIk{? z7rze+l9NXjnF%(Lq+QX(lIdL(zVSKqE&A9rOj6Yz6}MJiOOQs+H}I!EnIY0UOl0H( zn9I&)Pu1bRu~=4xNp;fK6-G%ef8RR|Vxg$+=j({mo1z*9jUZ%f@6z)FVy&r_Br?=a z@h9y7_?&O!xC2TDxDdKbP`vZ#TmLG=OvJy?PK9bY&tY&}!G+ts<)-#}1g4iyQk*O+ zHFq12e+a^kh0Y*PxSGJeBK4{z%yE=TfhK0+9x8OTfAh;0)awY(5j9Bz2fZJ6OnN#9rr~ME>P`STb*l zQGTG|kCD;RE$qr6#ZW`ao$JE}Q1q2A&Z*d2Hl4RuHCFy@A0k zY7>;}l;Dzznpei#TOIb0pX}@=z?oCXmpYOGR0+$LoBMpJt|%i`b8FtE(BY}XTG^6E zch2A}maZxxTbznde!Q-N(&PCaPmFs~19>{#e_-3WB)z=(s16oLJbk7a84(n(D z?fI29tV~XTX^4v#0rv@{t8;+_8(*j&RE^Yfd8bnXS29_3V~kK7%^ffXsFoZxzPBp? z@q;r!7?K!@z7-4N@1TNpNe5dh%1>OgJ~*++e)H^~#y@x?%(pY>bK|m(#mGaOBEgi!qTOK1 z-Ie*bMM^g2;W(jp#QpZO*{uy1St)&B)Ct*~W;xWbT;(+*y2|ODhN^;PpdU0*Ohvyo z3)vfUXYyI0Bb+(fV~5;5p#(<>aOWf^myZ6S%xSRMX%cFqKScxPK;H#V1h4u1Uw$!m z9D#9`gVn9AYCuo@9)kB1Za**x5HUpw{oMS);kQJg`8AE(8uEvgRxC%IIXpbo?6GHX zYI*C)*mR@JVDx$6nX%Elori(;fRGx@{co6+Z%`lrUwbGp!){+5Qh%%ggncg+$G3YR zwsS)t3M>y7oEicNQwGz9ZTz>c2&+?*_s0v$;{5Po>mL|$vMlsqrJ|yM0PaTl*&3&R z#CqAlefH%vW$&Z{%@Za!hu>D&wk0AyJ!wsMC)TPy>Az&l*5wgK?x^bK=(7l~bbN@( zn?r!qNFjWo)$hdIL3S~Vi)1)LCyj~;e!~|}48=C3)%I)*pt4?d>=t6k7nB2&gMfPh zBFqGR(#p886f8T4NYs72t7%<(>&0@*r>oY+7o5>N-n#U}UX?u0PBx-Kb_TPuAst`= zC=0o5^)u4qQ+~Tl8NpGs12sPCL8~zi9iUe|ANDHhpf9}YZO9bh<-_=mQeg|XGzNbE zx{pv`7dm$UVc6KKa{(!mQ{a<|>kk|a*cv|;x>j{YAqxdA@)|M3flV0a0u za3U5zVihj>&v96uri$dWQ*79!7he6$PvJ)a?nPSGbdL7+>D~y_0_8pG3Pfp|=#jvD z$Yk%Pw0_J}&U){t(f4fxuy1%eN{Jxcr9`=q%_7n`nM*m?k&8P6i;kvcY@4s3gf3bM zrov1b;C8sFO6=u#p7-oXr~>yhs5tOwXS5*(JGcA_?mj8?#!8d5E+R6~!nFJCakwf3 z6TW3+bZ8BM@Dh%q#YM|xYdOWnnjWdnTe6qPcdWKb_M^CF0Z%| zE3TSeaY{0Pc>g&W?>Dz>v32~@xy(`kZjUIsjxHUoxx={Kh&^XPRd>0StaU~r?u;bl z!wOPjCME3+_FA+tb5$)aM>oH@f!n$EVe~LE%2(jtD49#zO zldn8wf6-3?hj#^RhjnjPafV}UNE^ca1E#MrLZ|-8BK6Lo^-@4HxGlSjk!an zb8pq{OQK)A(JJ?hDwP#U&(9%}Csf#W0nIX((pTH<2_*8pnkTBSp70sA6eJvFNy>jF zoPIh1oZD?Y@U{+GCBhil$DiUy*hi&Zp=wa?jY-sTY`zJXm)rOW^^N9T%Ei*d0v}= zqo2t30=fnKB@Rr~8U|{!EE^=u{$iSAQuTRH9`&Iwk7c3+DTYM<1`LIgUMi&rq>Dt#j-FcU6HM-Bf?BNz&*;1YW)+!{ zC980A*>zz~O%WdKjG4>oC^!%cT8JK4!jr9Bb2LDwPSg##y@LUm_Z`c>64Ab^uW@Y8eN#7E0KLo#f(MbfT#k zHrE`U0f@P*!|^QD3sfPHYIQYE2VJi%7C-Ez^tCti z4$mmjSnsb?Hsb6^(oS^fVQ$VvVT5t$r@W63tQ+ZG1)xgZildS?%y0=AEj488$^S*# zdq*|3K2gJ{7vq(t(jgEO6$An4geoGUC{jajf^w89r}QTUU59H%TL3#Jg?kny>cCK!W^oZx3KCdoqL8eHB1h@qtI?~@e8q9+ zJW4;O^f%+sD%ZME0~K*jlI>NYRUjS`5&bbAr{oYPV-#7z&bw4TI8tlEYOoLIaydlQ>Vdy4!V+duS`>C$g$;_ zFmHWKd?Cp2pYQze#F-Z0hd%OS0P53UnwM#qn@5xG5EfJm%z$L@ENoI$eMNCkI11SD zUJs~vbIrt`E4fy83|da+#6I}AhED`G@kP+zKVeU_qgubIHgV-%q+ya{t@9(^K|fO4 z!5%LDT43{;EB;$X9r&YNDq3|;pXV1^U0^x-sS$nKq^Vb-f8}Lq_e9Y1&xq3c-W)EXl=4&n=N0-q(OlYxDY1kLPq2@Lu?e_PER z;ut?VquC?|oU`cg3CNolLV>N+BSrY*!X$|qc1T9kw%(p+F3i>eF|{}W{T3~0|I#8( zL-Rr4I9Ut&v<(%U1IKCUORbvYhZTixBj?}l!+U(62pXj?U5=~d0;bZ6j>^D+ zHFFXt-}FB|@sHibhfVK}B6xAX?Z=8D%|eG)f}Ru*eM!9oI#`{b8T%d|ZLEKvvm)KQ z!-N?#zN5+VB%(3Fs5xvoQfdHFFyfVN-CCz%VpB&&QacS$fIxY#+XQ&%hOhnEQ1BO5yz=y@wvQfUH&2kjCLpQr2A zm7rJt$?@|Fs0CYKCe4h!;3{uDmE^B~{)e2NMFhlY;OA++F25Y|kCiY*by!LR2mehL z_04|Et_|0BrD2jKv0z@+GzIpVy06bk$2cCwCWFDVli!jC+8syD!lP}tsV8uRB=Eo# zUP?C`!a49lOobM2ZLM`Jqh%8>uC^o-XDsY*?e_Zb@VZFO)#vZnReTql80-3C=-=v- z^pTPXShV~lt>49|9nni@`Uv$EH0P0zD-%#3eXp=0gumvxI({{he?)c*?Mf^BcpD~i+4=O< zt4|)_Rnf1@+`eG->f%N5J~F8bFRa1s)FBrST70>2lfE+o+!?Q#V`Jq{wTrJuyt*2Q z*T=Bq8iLrb7*g3=Psc*-Y%FbdzI{awCT=HBI96n)Jbu=-kzg^Hxecb&J6)hJ18La2 zD(|~!E=4Ql>2#mgtUk2x#U7^bIoJFgE|C=bl7@S*$AGwFV(8wy0kJX>28Ri`C{*4= zyV77C1?`z?2 zU+108scKAgt?x>cxTqcQ>w)BhnKb5ga1Iq8toGrpQ)}QHSZi$KYdW2AKgY4cR0UZ}3P$+UL{h45ZC+P$ScHgzx8SkVf0nPx8^@1b3rYp55T zcJ-@nOpwfL`Q6C%7s?9icjF*JvlDC`k9{X3kpWTe)+Gt8@LtG*C`_%350m`S&q+59 zwVne0gp(8+i4>(V6GszhW7SL_ee!v2KiNP}kez0Aj#d3!^4m*ok2;?}YdNPUL_00Y z%vxZgcfTp%w1v+EET0B@!8?-3IG5Sf*_{RMxh-HA&v|z%8OG|^wP6@lx3V$=kE^p6 zD`6SH>^HzFAK<3jAI~feRo>H=8fT!{0||k;q|30$X`#X$68%(se#HH$Hh2M)bFZAY zjnklY_?dGzd9M#k-W9SnkR4|ry}Y`?1xh`8Y-Th6y{5|R93DuX?r_}89{XS#(#0#W zbj3MLh+7CbOxfYfVYE+!u5*LJN~zY{RT2KbFEFyHlDE0?#KOmht|qH|*|AEYB~K7a zA%}JE==TIRL6AL#TW*{AX70`6)-l*2vZsBI15CDGud!s$Q9$D|YhhD$*zOCh_H)Qk z`^i>agH!nfb#QzJ<3><}rO{;LyxbKX~F@|^TNJ0aXDHm7b; z<=Pw!^;e7b{ORa)d0Zt%Nr=`xO}Lyurdj~QznmFy>Egp}=Fsn|iy6$SHV(S9NIt+( zFXR&9`aNHcYiPVrY12tpr==-Q%GVi-;AsQF3CO3P(|;B9j)Q@$*}`MPqovi)E)kmb ztL9$(NRmRO?j`cX*l%rhP1fZyZ2;W|)TX_gUuyq2Mw41KKyd{>henDxl$QQsIx@{X zB)|FndXqSIgbLBNuRX5&!=ku$PIKa?#3_O3x@R$7RCVk3KA<|%UmxCgY2Ao;ekq5p zpv<>-X_AT$#07%YH{eRz?)vIG$2EyhDOX3{%BywQ>0t(u{`B}GJSzT0Ag!oki>6R! z(^jAL)k!zK%u_C6V4j^zp^%(g1kcrpi`&cTBU!+NLpR<4fZE8V>UZd$#Iapm=ws@K z?U=9ZB~#oH@j#;Uw&?YQb_NP4#HKbTdX;y)3|9L@C8V@wjpQ9idOiG1AHB)DQGZ&Q z;vw{AFnLlPg7c$nGW`VRv;GA>b)xUy`@IR}$?6W|4)8e?Oao4KUv1=K=SGThe`awR z4OSXMYth&U-{f)lV!fKS7TLS)x|5qWsZUFg{(hT#I#V}3-EEn`m`aEX1YNVh&h|_i zSD3F?m9VMQBA?F1RqkxXS8bQ=&Ci(#{yjfrVC9yfrMP;-TI2Gz|}d?EFe zwW+vHPHptl_p9KvJ!T0HoRf<%(#}~GaW7s35lGelSPG{$Ba*$g7n^4Ma_z5t)V zW3Vg(gZfFeXO=lbF#^Op-F(}lZE+N;jNORq%v6_hQ}C6$o9zP$=Ap4AEMakNcjDA3 z$#ZfBK6Mwf&Dl@l`>)+LG2$WGUL4Jc2o(@XHu2L`m~)mZlyVA`2H>3J8f0FO&0QJr=O zSW%R$47c=-$e8Y2i>Fn}Q3$p-*M4u5g5yVAh#!JG#9(+D#+lRf>C%19tMysvnhgZo z3$326bBfO64n2$9JRij8PA@F8mPo+!z1UBB(%L)~o2%kC7TrA8`Py^z9qR{_vtyq> zRy%{D;3#%uvmKvACi@RHi3hnBjJWrAFo(ru`P=2Ovph8@d690X(LdKl0G@f#(PB)q zI>IW~Pv*49(-pca&D`~5mUbW!(S89j4$bwy~Yv4w;H($T} z5F>3RhHuu<2DH;1Rk?$c=osI*%b1W!ZzLC;K9N|PwVq8^?$*Bor(zP`e=DYesmuN^ z9%DZ%5Z(z^q6HqZ4nh05U%70rn)qGH(JHJpThD|5@7;#@TTk$lM>--cEYQiKnFzsk zFqtEy3H*wElA#6s$~**GSl4L(Z9Ruhh?pDHE{JJs67P43GOk|j!lea(AN@vgfEV)U z;&CRiRHiF2$rj$9jBCUN2x7ju?L6u1Va{r9QMfpu5z@|TQIhR4KW^4YX;ECPiQFN| zXE&<+N)uUUc0TojWhMFLgf?k+?(K`)i|NSN3+mX{+0~aIJD-cS=A;py|eCSqZ9st$n;N(}ng)o0o5x6Oz5zGvxYsYn*K&h}J^H>!leKidb@lKyU7)mx3ebR-s_`n@n-()XpsH90BtWI=5?Vl3Ien5Rd zMKbC?IShV{KZ2i-y}2RrrQGAE+zCb*y+e_-fJd*;p5zttI{)S-aXpI1AhZ778;C#x zDO#qCd(D_|O6($Vv;Ps>iepnHn?j;&^&F?Xm_q0!VFam4w`n~fFz6!x+@O596%IBA z$t+INbnB5JV@o_a zxU7_h#W?jMvm)W;eK#%oy-yY6z2NYu%GY;7tXahFw~cq)E)#NoXr7wR>H>?gdCA(_ zQofxl7uz7NIfTk%(<7637?)<86F{}VJKteCOK`)ozx0*y-ShM*E!kn`*G|J zh|Ku_L*o8H@58%K5AY+b0*>;EkbS8UajdyHZATKp;J4JBxDO~yf!^_*wBr5~;jnf) zVk9m(F~|svd=w+CPGBs79w|{G-o3QjX%`YgqPdi8Q>DfKb+k3c$2*~>g*mvJPY(%C zr5~H^<|9rf#?B^g7OT**`V^jvB;anN$wjWDvk1ehL?7rb<<0vz5gRASVVTFY`g=nj z!XYJ}wfpjIZHwkJj1xHU!?_EDk0lZj++9nOvpbm97Ygcif1@G7N;Q}CDMVXQ*l%uk*G{N~h^Tr~ZYWH; z=?l)iZXJ?!U9zXC64UU4A0)QdoEfy?w)XV{iZzK)%nv#_Ue;8ZQV9eDuJ30eP>zsU zWol6baLhI$E8S+exw4eRK9w-)&n$eOCo7tVSF(qV?4jHDAjJko5}_uR$t(v!CF=5r z2AeO~gi?kEBMi@XNd88ydaI-3m+FpY7&PHift=i-^Nh&HEiv$*!A7!E+hZ z2{6^2+r0@UW*bBmS_YcfK8gh+L(1OSg-osdfVJD!EFa_9A2vz_huMpuT9@j{ebQsN zTtXTar*B9hlS>!dD>Nt!pUIi8d44|cnp2~ghf!1IwWk$(-rzi;e>!MR(*0^}VEAx+}!;kvXfG^RogMQIB+N^QUV zzkEn7>_34h)4nzoP^2ft8WMue(bU0=e!~cuI_cc|h9p>%|eR$S!+`XFTN6Q(`{b*9f4hGj zGeBjK%+$`;f)H{3fPx*8%y{c@hP@w8-3%UmG-;ChvBC?^e@|`tL7v}=X6p&tHwlN{ z7n^=*071V-a)Y3xPW>!XmU>t}`Cz9@aGY>YZUNt)Fg_xa-mx=|8U?`z4h=Vy`8_2& zN|FhO;<;Qr2eV64OBWtOZc^$zyRm?}e%>}e$vab0z7wN@7esaJMo#PTkTzJpJ&bmV$O>)l)V>+ z%&}8x=T#PSO!L7)$$9P#)V<_jDmJpFdnIF9E$mX@@rUxM+AdE zCa#{;=1t-v@SBkVs8E#bKJcZSM_n z4)!vf+~u|tCrY$m>sF~T%1);fp2EXw>|5Mg+4^5~dNrk%(PD*D$AhsK8W`0wFoo1Y z!jZ3Pu`KCqj>nH#R;<3r(Vt=ub&<`t2O(vv&6Ry(m2S`Q=gs1Ldp?^wkSlh*7bVE) z!g?fgHV~wMeF!F?y^2=x5sfC7+ePhflzm7ysGB9YaVd)b=IfLn_Xcd)JEwx?7oOpF z&?)_Uh?F3XkV9!GSr;y}P-bEJD9YNsM!odm88jv(Yn@21)Rx+!s>rJHHaNS%Yozzw zgzRdmTy`eh>Auud)ShkAaR4ZPc+C|Itv7&dr%=`9oIFcC7+mcz#r4etG4k?-fF7E( zD*Iwf)MSHrs~HC(%QTCw06ETJDx_a;Z3rp3)yW0Yx^+P+%|=gv@ADSv2h;$dMgj5m z11f3G_j26_YFV&W>anI=lotK9Yq4b3wD$Ced=~i97m_3R_MgL%b5n5;+kJe7J6V( zt3<^fe3I?XQ>n$Ap|hCz`x;HH03bmLPt>fC0aegfR8DQ~%RKAIwLL_nbX70s7PJtq zv6ZbKKsPR#cm)1u$zTTraN25dmL~8Mak?6bmqBw2KeBu)_^D7(vjF?FDnO~oIU`rT zJ9-24$dtGmPEEXDp!KA9-xF`G^f|z2T33y|D(7J5?-4>+MNBa8w5m?Ybvq~r@@;=} z^|QJ457q7przDWQ+9i9StIq<&%E7r=w{E_&PdV~>z%5}rW85lkF)ouiBtZj>B@UK+ zOZ3U!)7j?JI~JY2q6DdSLIvWC_CnaCJzlKd?s%|Z)ip&&*qVk)#*Aof*_HyvMWc^* z=lzpYwn_$@?1vn}1JVU2;16-ziPaffxycLptMhZ}t=f{sbuW2Q3HLc(kIfWRuR&o% z9fQ4m7(8_4cH3;@128>-`hA#5nz~O~3eyL^hTd5T#i-ob{k%2EHd&ME$Wn+)6j!Ys zfVMJCHVfxpCE7?L8|ck69PwKBauFJ;8Zu%}kLs0#V@z7ki05)JSv%Y;tbk6Imn}_1 z*YD_R96e*IA+f*d_^|6?+CPF7j2k4yi}=HE+44`sO=5jACgjZb7F=>!ouB>8I-R+qw5^WV7ML7&xT#;!uPp9Ie1-#bQg6`u2$2dm1x$F5$~dJD@&KXSI3Hd(C-nx~9td!Ht;rF9)4g>#gK-OP|i7 z@~I0hY9?Xj_AJkv)ecR?Z)JAL_;?ch0#aNCtHk6Y_X7jC>NoK!4Z6#plrfAewiyy) zt=E#UGbV`2#g|DB9f~G;1J^}3&16;AZf!GgJnv9Gljln6gV`^V?ewKRSrMeYsq)^~ z#|Z|bq955~vq>By$qk~Nhw^TH1i8%r;;|y4!P$Jp@({_|I z8P5HkN`DVBiZ9o1`aHw>ziRyJ#gp3zGEGQ{<4V?~+7WS-KeDHec!;AVWWPZyNLFX* z;vmzQ@cB#%wN%TQ=q%HbE`lc|6~X8L$*9?$_sc9koRNHJl@BrKtef=Pqg=Uj8aHg$ zha*259Vgg;LpUwZ;y73{-D;yaKRc|HJ_(s_S6>7>k$ysiL`0ukn<78K(@ZMv<=J#od#<5o? zhSf%gNgi1Nm0^qXX(D~8K@j#RdRDwNB<5=Ze=|CwKZgVkzZq=){y$NYE!ji8X(R=f zC;kwYTJxACi0F`s9kLK$6>;Y)tmY;f#I$yMI`KlzIktww2f3;}f&E#`wkk1{)9fJrAi}ZU&KY#VPdojE57W#F7rEYQ75Q;mK?qc&MOOns`B>wulavP z9sSqmrpl&DRhdw1umND{doyvc>lg13*u>EVJT zD(rFOV+*1i^EqaYpoI@M^b2Jax=Xz&`NIt#-MVNrB3<1$! zv3(iq1O$n8LVBtJsIEpDgpJm8_BoSJD|$L^_YT_46l|Npnh$>5`uheis5y^@n}G`y zKOwVT(W@FWTxhn>*9nwrZoTHQCZJlD#>vzZPwx|WRyeb={J3&Y13H?uMJJ5y zFk)!2tN|!{^%G?PiH#c{=B}>{G5j+Q^IZV6J@fW8sT;+jme$s)y=niv5y+JiR=VM~ zKQNXu!#={D=hkc*C-b5!mbLhiSowOgW(wRhO3Pnx%fP_IF@u69E-U#T$dum0H*(D` zo%T8yX2x-I_itV$F4>-A(axtcIlUd&f5#a=Dnb$8_wqzl{I=8WE0|=(1GHPO@8$@=+j2 zuZHy*VV@YGaq|ZvWZcl&=;%J6NCVWBlNgvRcCgymKpVNfQ4=#B>n)-U973tcvsTsp zGR+#!kTJ;j#Mg*0>pP5bn8v0Y3?Z7txhO~HJ3JI?mhq+PjActJiQ&1-oN&;);#KSa zrCEG=;l}6_--VwA)o}_Ec9Mk}w=`$@g+h(bSg&FJ))&@)PM%3~swZ$|Y}5F56q9Q7 zP8f=Jr}22SZTeX)#FeItqO)h4{G_8 zSD}{~;XNM;`?cm{d1*c&V`C#~zsjbWNM}5E(@e#PA<8SB#a*ldS(6x#hh_ByuYa=H zr5-BzUXXt0A6LikUuJSiFP$r*u7(r-6aiu}_t0W<%RCj8D4NRw*V?iqN$Cy7}!z07?+6c8{^ z(*KbWG>TJiqAPY*Awe@1rKLA=Y%Ygy7SyW~WT5wKCZCWnGezt*Y}*J&mXDmpUtvU2jqymm$XoyDA5WBkE*C^1#-1^CA) z#_v}V`yy2EyVZH$spVuwdgy?*XY^j3*h`OWMugS1BdT}kJMmgTh0R0)6J-=MfihR6 zz6(dK^+1dcM?>_HTdC{!jfORTw}^f~ zv-EbM(43LLyXgtGmxj(&4oP`g#ZcUe3oQEeDvz(7_VOH%+ph>IFSud6{K2V_#)B>@ zd*2DBwCTlayaGrGG#4m(D*FQfMc^tYfnW6nyKJpa+;UFql@vCMs`KL1QfYtt^G|tc?T8{5nzpmI|Lh=RtF+Ky84;ibry01q;<8^sjuOz8% z%wTALJR0+?vPvz0&Jpr6zW@T9Um%Jp5LNLW>LB~R7re_>yR-+wL2K9KjH^q z`hR;=OS*|aXJNdL!=2k}dVXfzdzTSOCoJ%Y<8ketFGe`3Y<;S8tN3R172Uagvrxgp z-P2qwS$H^RsFhqDOmRy^@wX}ffQGh+USs;0iq3xGTIkXfDtOu&g?NmN5|lQX^Qt#| zbq!YW;@Vl{%av=!Pw=4_xQsQdEp9_#wP(+vi+s-a%FXj?OI!(Iuz>)~K51)}FF9aF z2;Tcz@G9biCs6QZe}ededFDlyzV&k%jv}piPu%5@s(<{TnWUh9pA4%s48*T>n|z?( z4NPeisUNn@W`KW6Iz-#AaxU}Vxs~QPw{8T7H2zJ-9{0THxF$J{tA|(86(dZH9L3yw z!Ys=ZZWPix={dGl=`_3dDfxtSoE|s*!H-W2nJQ&j62IH})T={%M%N~=6Z_8m3 zGkXS4@dL_*k4c0Jgg^7dSGo9}iJ9&zBVitfD<4l`m!rqgy>(#Q@U`Q_nR>))kDgg@ z0P4UB-8AyG@yr>e0d#{ah(pT(R0{$FXnAubiOcLpqfEh5;^mm^GYd{sap(;Bej2I~ zY7(N?0R6!T-^yg^*7=i(9Cm%LV;*mUCY%%eHooQ|@PGdZ^9BxUr=+x02AAk46^2KP)h1u1iKm5KVWP~RN|fC6 zOWbB-*)d6R?>Fsj5gl3Qy$4D{@_Ll7&Oa;(eo^J!hFgxbwWFQnPp8RBkHqXN(tA_0 z3h!5o;@v_+d&Af!gs1a;Urg0k?z}@)+=_v!ZjELiiYddu8#{Tsz*s~7RMqY7(MQd6 zJ0=a_Z`j7J@0i2p)R^g^0GJlF4)(P!4LwN%KdL>`KL>gA;(*zwY6>m4$c90EIiKLF z(}qXLJ0^O^RfQ0MFg9*%KRuiwZi*VcwIg^;^Z?Y*cpr%>Kj!?M$GZ52&knbAHNg z3on!RwB5)RrxYsJ^JSB)9KvZ*c%r4pb*ql-qAF$wEzEnjGyKHhnVSY(RmcAq+xVso z<}Om)jI;U|WUfVoaTttD=Hzv9^vGv@6R+etA94)lUk1z+-D+xVq>WnV^Z;aYvun(G zPDII03cGjBPATaS`NA)A@Fir@Owz>r9q}Fqy}!Pz-69eZ&7e@<;>^cO&CD#0^)q|X zue~FFFhznf2tT9*c%PKUhJ~RHe?jZ1O6Tv{Sk*=TdqvY%Cm1}L%M$%WK$3%?U++yt z0kJqKPOKdQt*5Y?^YNQXJq!4!eogAVPrQ*-FB~WyNdyFNSL`~DtFEoVRPwCZHjgg2X8&@;g#xS`m+5dm$PKfL4L(%uiYnmJUITl$Jbjv!dRkns1+ z3!q7=>&pSW&idMTl5%j)yHuNIEtz(Od#4b;Qu#4E?V=3diC;zr?A)aEnyI}bNy;aW zU(-2#)8LCuT5XnPkD5*>g zq~X+ds{k#xvcs+y5>Ge9X7N4l^%wE1ciGO({9&9o^@THk>R-oRS1R}gs>&qxd&oLc zP^L!@tn1$RpupcA5Nm(+eL{_M{MR!BlEj_y-yB%FK{cs*VYWHM%-$kXU$xdj`=nI+ zQ87EL;gc|eAph!(_6h*HAE4+g#7=I9#>b1eHy$gG$B%gQJWiW`Pa38KX%cy(-4lcY zQ!Qe=FwDAta4xRWZIOU|&8-$G?azrL#qzf#sjM}nUJ-aDM#MX2zs*a|nBfx$Ia?^M zfj=mj?yd|weeZ94CBha=LO>werv^r2*VQve^m)f={Fp!hnJ*M9JpW38?+wZ~<`dns zH>e&yoIvf$4{hRDZ33#2Lju<195lFzkULI_$5phelB_^XP;*!jnX?s$|8r`FX|1*I z#IXwWt1MADCi}l!EKU%`bx{l2vnk{h)&MJjr~l_y{x3F*WQUa%$T_6DH!|xUcgKI3 z5iYW;dYwKXTKWsR7avx9UW@tM~zt?2tj zv4Eg>lB5o?7~@F?n=UwbW=+nlK#XRG;K^l{#s|&3e>ECMOz`1Y-~OQx0o)tAy*}ac zQFwm8N3dbtWyTxrqh4{tV1(34O&sUdt349R_}*i!CgoD^g1?=HQx-XfvFa?&RPD*M zCu0C=cnDWgV;33m;3JZ-5-sHANMnUd9cf*{Al#hz7D>M zHsJL=z7teE|5{Ecv{1GltobuKlF&WgjpOxL?;E!~%}~CKfsYkyj88lGTMztRdLQzf z^#6(2Tj;U_*PXe10W}}=u^jjtNwg5k3&KGrJ2|=@WSo|4h{<{Igw&3@Ea>n0FpGE} z(cdiogV12N9}X=AjD_zE)O(H~KC3&x)Lhgi(`sPDb_P6p=DBaKB;C44lVwCSTa}w( zOw*TVQgBYgC-F`?Cz2VJ*azl%t9dcbVxqC-)J`k(%t&^h`-&iuFUn*jjUm5h5tdpQ}!=Q_ULb2pkVT>Y4~UEYH1Bwjz)Y~?R{kTc0-g_BQR@7n=(<{#TP_QFR#X8 zY`MK;ytl42;GxGFT|@pUJ6-(+uy{=bvPtlgfHpeGbr9tj@j5hF4BWx)FDId*jSn1;REaIFgLKwjT#v$2@o)XR$ReM} zz+Fly_VMr`y4Q0lK9dIs*DN$kb?d%i2l{=Taj4!i& zKs~2W!2{$V{799qVn|0CUa|k5QsGHMI;v=EeA$83g#y6#Kg2$r2V*FD^#%-MlC;hE zjmH^D=Jcco@Ung7>tgIra<$BB?Kgg>LXhoA#SAvFl~UQ|@ghT%d;7iDsgg>6r}bmoc#S)Jl*9r33!EJ(;){08WiWJmJoL3T@$QsgNq<=@^#Pi%-pcCvYbZm}Skl^+5rC2al zwa&HAScY*#71?Ibs!w-u((9M~1R3z8;?!P)VWMi^5Sm)w?ar`zSDLVsS zUMVKRlnI{X-kM^&X(Pmchx{dw{G_`nS-&)*^<$yzk)KWQ6#`WB`_YT=bh`Tv+Iy6~ zWs}oOOxvfPdo@%7f;{QtabHAth*U3rrksz8CXlB(e3z(Z08{*;IWg}dQKiy8@ow)3cg|YRHSM~I`UKopP8VQZSjpH)>0O=60bkZrl~=%l*87p`yD z$E37{An8Lw7HatH>CI93nf=NdU7|5z00_L~UjKGMwG1 zMtKurrk_=?Tqm(k(tHh~YS)GJ7cjsq*vGG~{@0s~0wJv3?CW~^iNROiygByF4f zPO6p8_2T*b`-ju~C%s$IPOu9Nv=9vzeqOkyY_n*|!~=TL%!*!=w6*=ew>z48E5uSVo!7^2OxSd8kBel0=xTc1Iw_EX7LS zLnlK+SVIy>qWuGIuf3J%s5+k!^`CQUw(YR}4*m)?dc$}Q?;wBz|DrLTkq7S;9t)tQ zuNsBEdBj! zOG7$-!( z;vamr+XS%v2FK$oho#&}P;i;ludh^;9piyGxQwxm4txH`bxoCfcgCYns+pL_%R@aN zyw1w%1Gy2E3@v8J{-`G13g`Oq*vJn}8Q%P-Y0Z^@jZd4gGbDpn5{^rB&v*_YeLOAo zqrON@jL>e_3P`;4MJ?fi8qm4!pst@!O{sR9)Q{I(C%4xZ^NBdFfPOGwjB8z0x%S{@ zb)&aL8ig{fJ0%USS(1XM(i7{X?fd91qZ+A7R@5lIs(hLqSc%Y9RWASav}@@s4qx?& z2X-@sULYe-J$kI`pe379OJu)V3@ZrRrZwT z`k2UxO@9#mLz@Ql?X-{{@q}B)67oYpyv_R>si5}9(bqS2^!15C3tYUrJ{RvYvvOmwS0M&XL2d>6SVCA|#9rJa&5Of4GVUe zUs)O*zGUw_ddv!?~0xxWzGH)JVK9zHLOw9u#IV)s+o_M*m~=|K=0q0G}wIbh-j7 zKR}G3W#qZMgMM0T*K`|vUBqA5j+ENkidIX|>*HiRs{ z_4z#^1~W1Rmvk{t6*$BgUdI=?_on~0^FjPE-*v;&)f$g}cOV5wWnTnGAhr&#&Pp27 zYSYD>rAv%Cqm7`0Ziak7k)Pb@t>W6mV#?f^bYS0Nu9zO`-Q}_n`T$xWP#oL4B2@H% zHy(^rr4q|1@&LQYj_XR!aYwfk*VDL<)6``SuD|HEH^=osvM(d7ED!gqXO@7|sslvv zbW215YjbqMS(Y9zONEgh0Film!mxptzcplIw^| z4kKUMlm9I5$|qUe7R+>5>)1F`DJ2_@2kwr@d#jt$4HD<8UA?awj=7~%S}@lFVA2bQ-*;_Q0RmL+2p-hpIS*zW&6 z$m+!uyu3>(j~wlsAD>k?iW$# zxODD@4mBC9=rI}0y~nHl-lN-`s^o6vvj??A)oC9 zo*R{BWoOc;>%!OZor+DY+@te~o*b0aY1^)CDlXxDDsB`%zi0Q-ElB&Ijqi+;J>8ul zLpoMDc=q^klmRh)J@~1X(-2Y&*i5jad)(%fHYF}!bk~&NnC`E1?0Ht^1Kw4C2m0S; z)&DM*_jo!Wi7>M`YKJzpma7y%F6zO-b&t(!{X=UZD`RFV>!$Q~|DO!|jqX`cRQWW^ z`M0?;oHjojeUL6T4e70nkT_4d7o$xI&%uYYo(ioa)79m`Uu=dgi{gTYw)PY(Dk7?^ zzy#61ATY@xy5TeSa%br7z}Jx)hSUZ-y=3WG-qae|UWa~F8Y{ROzp-;T2gZO&?^XQE zE6K)nXQUX(VcuE!?jgw7{coqz1N6^wD02s@xa}SWWpW@Whk;%C8s%*QvsJ%*9M;zK zh=JQzvUXmtGD(6j<1CBV5Lt92V88Ha=QpVJjq)9Vm)VR9sN+pcjQ!47Y(tKl?*Smn zF?=fO8QO|ysyzSwMI0w^)o-&Pp5V0Uzse~)AMD9tmGleb2L>^d+?ZBX-rx}!)BcCa zp@(C_t`-U3YDWs_VjRn7v0b=i3RKLy%UWJ;3TL2&@OIhO*8 z-&1!bSx~is3rTSYu9H0}XjNq|CnEjoOxjFp#%--^>V`%%=8mGy?ju`OTbQF)E&phj zc~$Dc@)3BO9Pzek^gSEVv`GE^n8h-IW?yGB!LH8|FT|5{bT z58eK56w6z2``t6DwoQ-HX+HYYjb(kmTE9|~W4a$OUGNTN^cDrWNU2rr=?~@usKXl$g@w8N6-xKAZM2 z#Mpovr{c=xWxbzGB<+9GHGnOJddH?Nak#Ffj(sbcH}!repZ2SxmOc@o(vTbVaO@4Y z^r>Zw!>l}N)qQpb{igOUBpXoa&mb3uf|i$LtpxmyF5CY4CgkQ4f5^escMic6haH#a za=mfJ4K%I!`q{75^H0}dY4#V9E$BSVmR_@(PPP~^=#=Su70H(<|JI~Da-n)G&2$- z&If{s3pj$T@7HF^c`II5-s)lCQ2Gv?CY;LiRQV3dsgM7*az2H}8?)1OK)9k78sVRAd7M@vPU z$V|TDiox+N2Vi^LrZAa^OLQGXv4K>(Dj7J*>$TkTZx2Np&8Mq}r>PI`nbyz{M6Y}#a0V5gH?pT(+s}0bT@k%^EKqWQ=_YH zh8l8#*^Hebn>ZGrcflKyI`~+h{zkJbJwGQ0asl?Un9o;C{oo*83g3wSt0t*C)KSJcShnuue|GAP9w9l0$XZL1G$n%Mc5rCZvVRh zcLHwvkw}q96vQoGC>;t->1u~=3OWdDifvNulHRAwb0mK9cNu=!MLMgVHRf-2bmyNT z&y<}S@XC3Y7g!8H6}}f2g3J(Jyzkvk$ma8r_|{_z`pe&?V+6L((Zsi!)!)xcWG7lL zbRz72kpL=&fUKGsbb0aS4!ta68f^9T^>XYdb#aZ%`9|89bj(Ul*+D~`-Qna!CrWHkUHgJ z0n8&{?+Rb$0pcEFuahO(HEFAg^kj2SDv510WrO1yBzqG0N}^W9n+i zCdcWG${JevLOjY$OMz!}2RB~i4i)kt0x|vbjq5-53U_RZPDFb>i6FdsDY1l zt+)`+zls$rC7}l`)Rir&E@u{zuv?Ls&!g&qABa-^KXiS0I27vp{zx@R%~UF6nNyv} zo@8f+B!`5uXGOU@Q%?j&;VYzgOq``{(oNbIsLt z`J=AP`@YZfKF|H!_x<7m#_E2r*?gDjfS}VOB*ZnuO;2PWHI2x*D7>SQ0asa%NsUf@ zl^W@M-NBOsbY=9|nUWbwQ;h0TI*2VG;GtP+)jlN=$en!>SqbS0nIoCi{T4tfnW5o0 zeuptnMGc<5%YDz&wDsN<^-8TsnYh40!<}NMsD7JsRLtrhe1(VkeUIrm9g21`$R)6?Y1L1Bz7_RgI){z(jRUqK$4$ZM^cGv>4Dm7Cf4IL`v{Ads^dvTZNuUO`>OYRPk~J*!S;<jK=($yJaOQ@oe@J;UxdS9Na$UUmJ{|7>ZL|6LEIq$=#Bfp)ms&P&A93;?e z30D~$TQ>8O62S=r#S)|jb$8-WB8GH;M+&-pFi?BV1D-s?FUL-AMJ9dUz6VGVdOzc4 ztB!lVDUPxiZ3^?P>3e3@C&MVY6OgmLn=h_{8`=|$pPC0_|LP;Vd>~hpTQdGw!z{cJ zvzq7kZXdH5%(nGFC{@jy{zy88_%9jNUwX#R_4S_`;%PfY3AU3@Ifj}{U%dgXihA!2 z@KHhhu@RPWs35NES1ZGR9DVxUjybx5k8$ul|G4%VoNIp;l`$1$`8<%Vmdo-5^MbEb z6}wHAi(j87;bW@XAfmT$ z?8NJ!B=Ukalk0TM_c`N%7n=t@)kNAwLDfgW$dD^CmqFsw>;#gB{q5dymY3_@n`r*C z&FFB)a7wl#uEAe%Lc8YM$Fzmh1|fd8*WmuEO~xVp;^pou4`pR-`>lTF54+IYZqyw^ zDYQ#8;T8h(HqLtE8?Ncd7u8hw*U~T--Cp|5*}BNR*8E$lla|mU>cNT1>9TnG_$PWJ z@{9jgd~UZY_W0QTmPffEXljoM4A~`RJxUZ~Mp^8QQ(lLiKIUZprX9Jd?}d4qH$Uyl zqd$$F<}PkN2&GO>Tq52|4vq+~gr8l*+$x|>Z+&Y(lOjd9T19st^x#v~0%<0)9i<(- zybr28T;sQX7R{yEq<5_ZdCjcf2p@ZIiTSnqOY;6ZmO{1T(HdiGgJB(VmN`xXCQEZD zjt{l=yiaGbOLrDII%Q=79OQjB7M@fbn3C<9bUjUKDCuZSm~PoW1^He68>--l{1=(> zlo+U|-eoRtPPqGYY2rmu9!!T@K5~Xyf=W6nev6;&h4wEvp!F&xI#vX3Ajy`0S$x&` z(fRMqH=(ZVK^p0k$V~g;C|Zm4$4|^(#X3X>x9nIyu}?iMnSV2KV7}_+*W3Z=i>C@; zy7?`7r=b?I!g&i6ku%GRWN4xPRJ zEBRd+Qa{(r_sXvevW0Sx>{7bmA^n_H_pEZ1d~OLPJ(i3tKKea#&-H;7P=v0!N`2Qb zTg)KVPxnrH#py5qee4Co@MNHYI$6)dX5K;Y3d?5w)vflhcv-O&lRESoIk#OE?Yy7E zKP)4?g-8wS)YfU_5#8sVQvTYu1OrX8Av>O^Y^R2oL%T9X&*9IPc?7S|e10>VYVhfY zn#d7fE6fDcFHMvH{jh>u>;)>uxqU|K6LSw&c2Wb}LbYRMK6z8(qryh-$K;Uz(_w1W z8vj+an{lrA>+PFK=Zb}!{A$X^#iO#3436mllTr&8zs1HTqU*-<&5qlO9WGsUA+3@@ z=85!iO`F{axUff>QW5Rj0KF0pMCSit+6KQAgRub?oC`fE$=LzlC|FXu?uQIvV(@J2?>n4--=eS@I`O zaO`F@wbRS4Q?EOiC0s9;w~schb>f-|dx?ikSLAU}2ij?B_j8D{RpH5RV?@q+>-zyYJ@)Fm+4cipnC$PL-p-$WFXY1GpqC4-0$K<3R7%pj0 zFT~GiP$LPIM1m`Qjx>V}?bOS;n*;v+D`H0GMs>%)n@OmT4XmujvCUV1##!E*lhhzp zSdyPQXg~g|^22AV7M*a(Ub6diIIF0jtdq#^cc~P`;OH}=W|M3@S$f=DEWPC@Dr;kl zjjXp^I$Z`g_~k5lS!CDEyZ#F$wF9B}Hq#-7t;NE2Mc4~T>j{O|mWVo)`IgHvB>Yb~ zsxL{r-o-e8MVjX4LonhQl zpgsz<2tfDDYU`WWkXvL6?YkoSU)mjX?(2ovDGfUr+&glx;}#5ensg_`ewzKuy!-Vl zqE)-57d>rwiWm+t_m%hAfX7fysGmm;uAKs@7g{upa#l!U=y#x;6l#Ym z{zs44SNB@erG|>NQbB*L8iZMZlCTThS@h(QHUF!1G#snNx^;GcOY|%SYtw$XX^k>gT9tXl0ZMeNWee zi;Z~Pb7?Dx*6dJiyT6}E1?G_zTQw;*PjIJPd_4Vmm?4a!9qjj#i^;)DT3;m|??z@L z$HysrL^jti#1of!Gez$F8+YPyh-N06vnTI$7!vT`PY)(w&`_sGWWj2Sj~ zsK(yx(jv3|7lJ#f3~>cvw@cXUjj79XoNSPGZA1#J8OEFmRL@2)&RA;U`#;qY{wkDO zm*UzpVc*o-@gS=vSLQOM^<zo*Da?==XJA^i(j2!g&&+pS_U)Fy|`sX zQ*t@6H)GxZ&2=aJ{f%2GD0g@9|I>GU9m8o(Tm16bA@~lw>-=Zi?$P@NuO5rJ)-1g$ zZaNpTwl3n~`VBO>?k!}MAJdq+du3B~-`{s1`xn5m(xi$V#i^Usx`S}=JrWomaf@zM!l(?K!_$N27{tiguE(R;35vpFf!?b@X$S?*pDJM*^0qnE#qsUu%3m zP<6R#xN#;cv6V8M{e>!2-MM*|{V|Cj#cp-bI25IQZ)ji6Q+98R#_g?@c*7s&Ynl9V z;`lG+g^w5Qq(4s$Y1DmWe)Fq@Qk~M4l&<&!dT-)tHEwG%eOSlg31^duTW)>CW99nv z?pB4u7fWmDWgGV2UOgMW9BRB%`1oBqijkS$X2l7Ie79!UO?8#DH+?Tldj zkO3sMUDpHX`d>Z@o-D{cyZIH)CD+THksL!2oKH8cHY>I0`Vh&)UqejayD(k`cim@y z9-psSb*ro`VuX4389$^Muk>&?g$%&wdnmeDaw@c%^o{q2e#5u0eb>}~^*F%9vjVoIA;k|f zfHCSz3F=rsk60ak#b5J)z)OTxBlm72!=K3fTwJRp?%Zk~Zpn}-bswj64VE%DCEttY znU9_g47i>Z(Y&=NFdLH9saQ5)5cqWMjAIQCzZ@}^ovBcvJ39TJ*>vAPNTEV~#3T1< zfQZYi>FMv$)04b^__NcTDilOlOpAH#5i>BG@RH07b8gzAEhey8A8xpL6s|;O-4M%QJSpZ1=5O;z!x6WuVQG4IiR7KjkBD zEX$VWeGCZQkl)`dR0SN8+_}f40b|VZ5z3i_xYNwNms_qmG$lhAZr|ObN*QXr*vKVW>fvQ&L;(IbAYe%5b>+QH5nuFWN*7e$K8 z!XbaCPpQ*Nez4xP?tve=-QavAFY3jlyc*ycJqq;1!yq2 zN+Lh`@V7Dkf#g#eWd%1t_U*W|hUwJz27YffF64Ollw}~*$M{N2}=7 zikfv2mmxJ9H(vI7yvQXV)rkZ^5K})LzBA?MXJ}NSakU-I+o5S*gTUoBFFl?;bb1%I zXYq_|MXaCsvx1xdvSa;!Rx10>g#`iqap6OWAG_Zqj&!}58mkLog?HE!B>o_@`URoz zOCQeccaAY3%tPXh?JZ73!WDrXFp4kt!mYfu6LUFg z%_S_mV;*W37mAF1;>Uxx%F4~ZjfWKO|9$UMRNyMlhi#Ui(r*oy+=-G2 z@l=bpzs~9W$k&S-8niqEi<2?Di96XPr<4}C78~QI47nej*SGv}eelCViILUELpKFU zY6Ls0W_Duz>|^riCZZ#X5UYJ|vrMC+$Z24A>`O`VJ@LjUC{zl{S5%9*7aJaf@k01Z z%X{~DBOi1(Foq_e@*j7QH(`*)|1ey_CYY(S$Vv9v;;s5iDaQGNrOJ#kOg=MNfhjTe)7~3D6N6%n zX6O0yKLOi)LtO{srzxv(Aqx_3Dvf*N7(Y*!w?>XM88-xA)@{$;YX^y!DW-(bK{8+r z!Nng){SqK<9lGS*OO__%r@h@eN(JK+uuR1cDJ1><)si1Oc84WcBMgHiTdN9%O*irm zDr0ZkoWOrNP#R?%6;K&7H50UpcfH&II`)w@uuDA+Y1JQUmW+$C$h~cmn)J7Qc!ybp z)fpqc=BqT1Xko&8v1BoKcUdPLn%T>Qo#!Xbu8k=EEJ~32ydc`L_#?16;);KD@9u44 z{hZf~S%=8dMDVo{gE62LEU&$W%rw`J7B`-0jlleap-jPn+^6L<9_liU^ycYtn7_td zDfvBa{QEP%B$pQp4L|D#&tEJaFuzL*yKXUythkm1B)BAR;(T6+# zBHD)dFQ@Iz`Kan&Lpm1qam8&M&iOIOIX#@=PxRTFL#y+Zvb~PBW)-gA`BT(5`+c5r zR0Va2X~c!O(9QSAU1plzz7WGN~ocXun0gnRcE^*d?KBE8C% zC%#*?cR2iIjgV-Mcp&UxY2Ta6)0%f^_$baE@rk2tbYq`HGKzr6*~xqVv>?Q!dY^1T zBOh!37|~>$#3nS3%8d6sy343+yDA6tKm{)+<1~4&4f9dz3A1HWKf1Bw5^y;~8bni|@xAdO?t;!vpX22hWuN+?p1u^D zYkfBX<+QBPld1I`9G|~G)*e92ui#$Xxqt`-l9+s_Eiq!0t1l5f%?yofthKYuCEdta zdpAqH4|b-l-KwgD)_)O+dp{z&Z@-p(wMMuGd3|T;uUc0{Yu?qnqnu9mim>%wuAxPb zib|)i+{VhUW!AE!!mc);h)cX1vnQiKxP0cr9>V_odjPlVu|f;<#=FAIUpRya{nieB z^i?4}Byf!Ks=#KM#IKk zr#*;pLZnPERpxyFWIevd`WG$`Q-MF%De)80W)ue&5nC+Y1*{m3GTp>RMV?zKpTGgTAO6VnVrv z1e|18F;;#9ww+^X6{VVj%qQIY{jQ_mNrYNdeirAd(~5rfOXN^pS=DXtp)_v$v}5aN z{_D0f8DWE7NBG5$`5Q8DGpvyPTmKMl<}7XYJYf>|EPg@4UjjOVKSDY+)zBLZWz~=W z&#AxA^F!Iir!6=U#U|1IZU5M;u^2(&{FV^>k##;LWz-R?V;|vIIQq=mbIHVFG+CW2 z?oD*vloos=i{08GtcdjdIjis8&V@O1x#;j|y|93q@0KyESzMiOOTXRA7>7|&O1mx= zKD1&m4Rv-W$`%T_&^9`Sc0C@Xd@lk_>BhX1S!K!z?U=1{kNCGfqv|WoOy|XBtYtyT zsn3=-&bY&NwHa9@pLLd8LUhRat0qf0r#U}}N=@yXO*4X%_Jz%jKQ6jU9KXpcydxNQ zYvjiWvnbSUJ{=9MB(*y2M)T+gB4`4n1_#%=7*jxEp$0{sbIb9+-T)zsV%=$pb@8L;);j1@} z_c4+L)>(o)6ELzX!t4IuG&(+wG-%QlDkaS7mX2;nZe`c}ydi#2-UI;&(xG^CJAA?~ zvp`HFc=L?qB@6=~Dh09B2tCi&R^S#M@~(kHR)S(KqYa&fnJ#kuIfsoSEKoHxjNx4xRQ>QOa({m?K=?Ao~QbLOGf82wiW45#YN2ZU1wn5+%{ zwI4`@Ns?-{6m(q?RvBpPvq0Zl-#L}lZg919UqGEb71OYj(^oDuq0=mr7XT;+In8Skzr<*ZL#CBMMT7s>V-aLECQ5?%m>Q^^###uU#*O-*K&{bi( z(iLc;n9@1QGnmYWKYn*#KtC;3t#05t=z%kVHjGDtuwhW_Y59mwplLkbEXkI%03pK1 z8OEF%an(k7W^_4PoVk>Ox8i@+F5ms_gWbJ55rv(L=?y1vB`L*v|oi||HU z>y2x(^j2;`>(~QOU-^@knIRYDd6w4RSpoL5L>)350paRvN6%eNk7Za^@TaA1-4UdT zY?SoRHwS8OhCM6QoEqWUNFuHhP;%FoZ#+!OFXb~T6tj)fog0S3p2EiQeuMA)x7=7K zpb}t`E#iC@EIS2hhY?vO3%?a`UGiCdkyr5TFWgkMK{{8jR#??K$u2)2<3jYH5q09`QkK|vl66|~*KVe^H#X&=I~E(l zG*6exDUN0M3Nq9d`fe{C2w1vMJQ0eo$=)tQzorL9W7ZJSq43~kHjd=qB_OK#(-hOU zLV}F0aKg6)zA;|6lo?54+YDodK}?udps7Glyt&W`Z;)u9H(@c%BO{!?owb`?a8zy$ zMHgG$CEh*WcxzMB@Kx@aQA&Y|<#Fy#sVJknuAVqX%rB|J`W7o>83|>sjBm~rQAA@8 zCVziBkua0L*~(dYB%@8@&E{lVg8PvXx096Pf#@CV$dD#e@8_|Q@mO95GDPL_suU~b z4;9lFX4Yml-kw`QHNbLMW& zLxS5T^`S(>FH(hqLG!}ZVQ7d`X2EXhlO*EX>%c1A0QUlgdhi5mpTuPQDH)9NIY>QVpcT2b@4yblZ&Vs&!1rAY?YNcc{@GGm@^g*4O`2 zbFwewd7AdBP%7zh2P=9j*oLZK?vb;@cFP3|gNIS0)1Jql{fJ0=fr63vKyiB=D_e=& zAW9&=))2e`KXJAT)%@Jm&RxHfQPmtxf6f0m3_12AO=9s_BD+7D6TN!huOycr*K%8c@k#x;Ql{x#SW{|{@p=Z;|^{~(dX10nHnP`RtQHNhKavKs196vkhhz> z`1S|zfLc_v(HH7fj1GTpDsbfO%dcn=K+)o$>rf<4F4LtTo?lL_zsD{e#7>1X@28N% zJhpw3`D0<8M^!=lvWtOOW^T+`YYSRxILo(R)(~XiYS4TB^0ltwBl2qwu;K6+gazpA zj=LjjBB|2^Ie{$U_Cs6QcU{bg{$zI7x#^K<)O8O>?r8g@YKKA{X22B>&@YCd&~AH$ zX*B;>_ta%Q;Ekbf&LVz0#4QQdvFMN=pn9^cM3)O^D;na|m?mLBeeR4ZKTbee#8H1t z813qZ+uDst9IH_P#2W-0B6;Je;c=@5KDqxw-w5b0XY-2HjY?ZfQ$`ekRssmnD0~EC{>>2^YuT(WO@_i)ZySOV zk6akNt25E1v!xB|8i4Jv+rbq!Q2r2zf?CF|-Cz+J0s%c6^^c(BVlIh2dBbdGwqBMK zyE%{lk-GP^^BfVkM0Kugp0vJfTR^cKGfcfV)p>dqmbnuHd<&mqfs>0 zNJpJQpR38+F4R6NjBR67h_TvMmw!YJdt4MYR&;`kq&f$x(J(PtSA_qxU*~K}bp`C& zTVF6pGwSv4WBDeXd&OD*MXOAn6Sx?T>@0v2|H@83WW9-zP8u^^i`!v=#8I;q)EujY z7obzX;Vex2L~mAvT>u0EP=0}3e6;hV0l(*uAwrYcl;t}(?l^v+&t;AXg!|srardylP&k>>db4AlGahi?c#(Y*&6ck>s z4BvB*Wv)Eh_f?*nL^z3}`02E{(=yg30C=Ai%abpriT09LCG<-dezZQq9$S~C>pr+J zt*Idi)eXnHFsWt`kT};_A?!J^^d=}o?<4EB4rLj1YxsdgJuWPZisywa9ZG(LUXgR? zl0d~XZ(-VXAFq_4r`4dqg;~q;AL+%!zF_fVr$sM%VUMu*k+-`r_7HwU z@sFKSnBJ%;iaiFy12=9cZxL9leq zO82&AWcC1h`TPxFcZXkxLIh->Sa%1Xwu_AC@&}R6v?-m$q&`7@wWH3DVX3Ak1nJAH zkj3F(XH`EtCCK!PY~d85*Y1rSYe;Df1rqeD!r0jo=GEB^E5pe&lr}BQ40hoE!6^QL zI2kHq{&;hD(#9b%(uKd=&BeVB8$98+Jc?*M$+HL$3Q_~R{Lqhke9IoJp6(!BiTnO;4%z;=Xjy2-8L&glBVuJ>YEp) zy#mseW+m!$`q82C8ah><_*1`>z6s;E>^QG|b83|ZvgmSP^f@I7vg-@djl(R4C69yY zJEVygq@-+*fV7zohX4gja=Pn04@!~!HR^p9-hlO0$zz zPwvK;S=o3?7h;ZBgqE@6-h+2dju#zPjz(!|$w03Z+9qx~+QPc_5eIEPD^S8xk zfOu8U)$^oJG01#JWbpOL7qd^vM&4d>KS_hGiW3zTCm9Zl<%sp@w2*{`_U4nayhc+D zcGSIjuU_ekR(J0kI{Z^=@w7#vX=fLBd zGrRRHKr!=Ba03U2EbbrdX3enLKvsy`2rESAx_9Kvdo)hUXqXF(YcHn>Rd=jv%P7oz zgK!gn5_CK7aFDy_>Stt~B~W|O}SD_U4b-zqzYqnB16mYlyCJuOD9O-9PvQF06$ix;szu2K}s2 z0A$)}d6^|B7mRplrt+(?`1i!#V`@qeSFBiea&d}g(B9+OG|g0sNbtUF7>GBatdX)H zE5lPH@6dO=7k>L99Fllx-X11%Ky0@*XTr(Z!C-B!Y$g+cTuFj z1h-RGyav!d|B!0fVs0kEAzfd;Q**#!4`W$kg?t^t9}E)DWY2-K@;1sa4hJ>aDZVMA zI*Hc~GG600LwFy;qeFVbVxv9Z{jS*x5Uz4YKKAhGU$DBN-1}Pu?1F0zynWEiY0%%z3y^|D9ge{g{aVPLvoyukZI)r zMX)<<&cay#mAgK4Q&g|yF3n-vl=xKcr^(0P);1qAm)m=hl}dDYYR`NuyyFSMiTD?o z&k=>LoUAxe2bCcU(768!c0Wq&!24^;FGtv3Il;v^YtcV%X7m(hS96Ry#~hAp!+!)` zX2MV}iG*_HZFIs#Y@i4SMCvqEHo<+u=qPhb`#`-nB%RI!nYD%pviYA-*-?cEUZCv& zcysy%w&e2CewC0yPCy=;F8X6waRbE;c7Zr@awJ@@otn#$1%)$Vby?;;?=K>1Crm8w z1|~H$dZ!#%0hRqOVEc@ElT&~N?FVMu^Dun0Wd2LAdZ z7{C8_UL|UI$3F@9X65iuQ!ZPDYxlcs@`SIe^w?jTPby_9TLLU2mlHhs32ubO&Av^x%%Amj^XUQmRlMptCT$i;j;V;9(KsLL8eU7> zBHpZspiXasbTpnIFNai`Pe9$VBsOunOI#qWTYtKUQ3T4_yEF~@zOP0OIK=OR&f#e!Rmc#O#nG#=Av{!Jqv*OIFQqvz_IQR4 z&6uwbdc^-kJ-CdQ(R|*hWeAgxvFz^YW3xxNYK-Vign^?>DfqT8zjN%Orps3lM%o*! z{`5Vh9XUHlgoDB2c=|p_zmYjV{e)TIShuH>dGgb<;c7~9#ln}>kbWnP@+YJ)gmV+U zubqBphrjw@%J&i6<1`ND$$aOz8(3sn0s3{A-=`&)1H?cjUA$5i+fHiWj?0;4ye+_PT^wG_G-e5M!I*MCd2nuHb&l)gn%gAf$DO z5o#-bHk_Ar5p$vZ9684RX$NQM`!)RJtHR|~kfysDn8X-DKXW53ar)!*dNWp?DW<&% zf}O?z$$>-UeinJnS`|@vAZPCg!j{T83`Y^((gKWZBtPV>MhY3o=&iVN0+xSZ*>u?G z?C!+#JlCKjuCotW@laG}08!VqhkVJXQQ~imEV%M`cx0ey6ccgP4g zWl(MvePKL49euTpnj5>9M=~ju?Yktfn9@9=Vdkh9dCAG}3HwetYct){mltuA$XE#& zrB*W!%h~Ji#RzV9`UP$ncRVDIEMBwzpKDY^A8_b2V%{X3o=GIM6HXE~mLKK01k7Tz z{b^1t<&5TfXiBz|z1 z3TK+zkdx4lQc#3d51w+`Jd+(EiV%P^56CJ5r!Lg!v!Mb)m=0anW>?(h#WLF#tGkQ& zFSoa(^m9%?L|l|UfoJ|0kBQUe-wd4n*#ImNqLxK|2Ub%Jo(60n>MD{Mp9aVL6|S^x zvJ#Gek{vqr@WCZOYkY(2kchJqQZ)T=2u`Nu5?ZkIt_^8j8GEj11RxGGSkI_7H$GK@h$xVQ+Obb;foMzO{6ir-<4JjDP+!0J@SuiX$b&BCO^9? zf{f)NuA5X0C@A#kJB_}eJW-qh)d1r$1l*Bw5$31fTP9*)9-qst|6Pn90mK@V? zK<_%0)~-Ip2K%C!*XV*>2_!L^{+{=G>{ngr@Nc!yySa_ElW#gRBrBzbLXJOm|76%;tN^xypc1g#sPSEFOPk`UiI6xz{W{ z5P-9SYblpq@8ZJ)5s<%|@Q`{L$)7GOFR*oBwoUOr0m7#K2J38%T1u>3P6d0J2NC^G zaA$LDOhC6P)z=00id#_zELxl>&h!*1lE*YK<&uZ1JswIdLFjX7F<*naU=pwV>Ah^!lW;!M4oZ^+~u7y(C(smdDxlL7FB6 zaZnG3WvO?eQ?CU5>Z$!|m!9b?Di-6zM3f;{K<52A;=-5<1Gktt>|NN_2`OTFkejJ< z=&?qMKvx5H+*RHLH}ZJ)q1qHQt?BisS4|k!}P~2=8Jc@pif9$d1MjL30ZSSm2t|u~a?wU*yn}K4T z-@*^N>c(`bz*%m>b6Jae@z^NNH=OYoJKa2I@2ovv`sd1d=U^{YL4L6>y>Ie6WIGf) z(`E#D;8NX@O@SQvHvU^`R+(Wm= zf|O)4_ync*4xE@rV4moxo&sKFISdgvR;D6I1MQk1Bc+QZ%OdxWM9LxnS0E|Ggbi1) zjvkI#Fc7jQV!q3>87{$(I%5>mL=jRv&^pXV4^w`BIY$(3X@8@C(?Gv>L|H3>UOd{`2R0ZZ`BS3Fh1>OE zla!m7qX1uS)pgGgHTg94$`8A@!83uMvuntF_5p`24ZR)4dRwbr8Y0b)s2=LPfqDyKdZ7$sUA^}<=*wht5+ zdgQ_u_D<>A8u))amwYN8sJW4x7DHi5>*oms?3VkpByFW!rR-!q9X=g^6E3m2KAcnV zhCUsRo&BL_+mX;Y(iuWvCAT;0Ge#bEw5KVjJ`fJRXtis)mxs2E<0>u)17%{3XMi#! zT}KU;YVeNtX`|XZv^!&*y|ZjEbx@lo)vgb+7r5PHnRx(PL>$bqytK;#VU}UB$5oa9 zO7j<5DriX!vgKWVC*Z{W;Pyk70Cjrn0?MTv%WUbUZlccU4-2)!7aiS+>z(f(pgle> znT2?3x@>@c-UzQ(P z^K^Q2lHhEg4+&>p?FsX}P;DKlrG@>9BJ*Sz{A z3AtFt+sDn+4FgSUL#H|$^~ll3C!bZ8gP^srLLX3x1HPHy2qx@}&L?y5-<3#sOP8aW z#)y_MY3c%G&avS3y)%F>kOdY5L@sC_YSC0Y{$RSo27V6$sj8Pto4b8~nfKtBRJ6}>*ou^6-tZ0fp_X@#u3iErea4b+Q^GT#G;I8*ZJ8{zbptDT4v z&^5X`zU4hVMf$V6MyvLFLdq>db{Ne>ex)a$ZTRSvZ4HZWiGJt$;n z&uLU2LoCUxIzbr*G^^t714}LoiFclW4uQ70V!k(boAY(>e;Huh;1s>Q>eau~_Yfvl z#GD&Rpr!<%o~7|78}pm$h}eD)zxi>@fE&5}v_jJD(l(AM_4oB-(|3*{v`% zYa49w{yQy7EPVq zN(_-mzlPI=3H+8VLA7`qhl?IHvLr!^L_Q{aX(b-)Dp4v?g3Fo-asxVDC|vmfKTP&) zGMTBhLDRDi$r83*mZuDEB0pUAnjJ>Ls%QH)Us(CwNW{iwN@mrz$r<4e4QtKLCmDIm zUvAvRU)==^;r;2sk3Z+90hybB?i<_w3T83BB`D{2VX3SIP)M0fRmdomC7Bolg~((u zI?zMmG;h#fm!#o$(^RR=Itch&-!zutvOd$(z8f z&x+EY?1_{;qy8G4>4a`ieG$M?yuq5L8C(8!SfTrEMfqsU=10*)4D>Y51!xFK{>j>m||6-zw4!|jMa_jZsTO)M(AlHaZu!LFz@4lITk{f z`%U?5p;PxivZIB z13^alm4AwUO(lcM;`kwbU^JkI-2>J0A4Ut|4vOEaoj{AwrS=^Pag;sO8tRb2@#MY_E7kfzRyfeXo3OY-rN?!h*F*DY6F zOpcr2E`MDT84IjMf$K&KhD_v57KC<7YV>G1lKA$TBqSoR3=QpOm#HmC5SFc86CD5s zqT`^Eh}a$q_kSR`yMu)2DK9l+;Rob4TpX*P@?f35<@c!eHS-WE`efRjODD^Ur(xvd z-_m&olJ(bn6gzdmts}3Sn!6Lu`VYoIZln188b;UF^pWO-*F^g!G9HX^LvwpoXYAbE zQD3|59F_-T(0=la4&9m7LrjZ>C>Q=EPf;-Z=B*nd?!_g&au!gRf_x{JOxlQ%9md;wxmc&h;i1eJU>?i$g|O6aPgR& z*a)4qe87QBg~}dA!q;Eu$lW!!FeEtc+3lHER_T}&N{J@8&oULul6Teywb0?h`Az+j zV_8|klHHA&_B$_IhRrq%P+_10A`E39pBj5+ag?PrGMw$djtqnVj`9e%90}G7&>C? z<`sazQE8B%YnDawWZ>)bmwO*ul(>6{{Z#1=kx87h>C|Oh--6VCP#2iKgE(k1+Vx(^ zAcdS3#t?!C(m4;Gi=_ept{41)$I&Oe;>Q9T6;K&h+pd88`}Xz{2D0xBX1p3SBSXIZ zEhrx<(>88To%Fd~8O=*O7D%5~SxBdn0w3KLKz?8#y~R5vMZ;Oc988c6zesC=9)Fjh zYDP-OoXy%%^PkehKKjqFRyu`Vu+LYsY6w~|zA;9$X3yb4}(kD$kO^l>6?kEm^W)P@d3)^`8 zugO<*v3XusbC=F5tGtWrI-WhTe&g|^EjK!F3lC*E`}QBw`5;hT+j-2Qi%=|A^w5(4 zfcEl(1nuUv_SMNh-btCZ1&}#es!dOj;uO1bH?I7uBkB*LKAMm6gzu@xY{wmO8d-DEs-kNo9_<^sd6vIK`Pxozam;;`e?G2ZJcUzp3MD^_II<3k5 zTW4KTxXzJe%EU-MNa1ndT^au?e^4f#1>b7mRcjdD3%}hL z9@zf4)S@)SVkxe=w}Nd7!tEHxZ*aWuEb!ClY;VXXLg9dPBb`isyF{Jd5QX2@QL)^Fea1y9S+Ruzt83mPA)VYlC!8_59Fn29%^Z& zJQ2z(1T{ywYou=6EuurrnAzhFNrciDUb=2r97DVncXclMLH?YM(dE18XYT;JQ(s4f z6YC_$t?0d`Uls&_Bc5Iu#;cUXJ8>QMPB*^E5ZGaJx!H}~>2QmYEAO&wHOYrVuCTAC z9nf3f-DhQ%cms8j1_F|UWxL-uAINTgbe@9@KkNk7o5O0r!jt{0g=aPOg#}S-QLum& zw#mOcaKAa@iw0~;2NuYW-8G5eU6GGQdq?zFNDh%2!oD~@1PedWxG5Bm3GQkbnjCyZ{D z8=t3cqrKhHtU>3LOyn!Rxy#5`yjZw9>0m+Q-8#4+c4edI5gl+VavVWvBc86aBEj2u zMcwv^$A00J1#AatVi4z#;#WT;>Q0N5C{G+Td{_(W807cWb?L%juj_~J@f{}VpbYC& zfWu*F!#yy{s@?DOKI|faYW!1j#N72utp^a03XDy`-}0m0`&!7&`-F5NKF<7uR>m{Q zH9x2EXWQsKmA!b`nnPYz)7jgt1nk`Ze-Ux1GGRDrLF*ju*mhfgTZZpE4}O!VU$Bu5UVDn3|Fz7K3KYb#7VGYusr++qSaWBC&LtqhuAd8k%Bj8B!I==utq_V{fc;JG-+@SAD8n4G zbuFblUHG}?j-FgWcN);eUr@dLbN5qJM7JGby0~;=N+L|lB;X;?R#p~JUCc9*92%qA zaXs%Jb*aA)Mix!2Qk9OM(;QA0Ktm2FYF_(FcaH?_4F@KAngn)Qohxcl`?ixjW_7R$ zeiM%C$rpAyQaR4zxX^kDl)>wVn8isI#}F1Kop-YF^&t**+JX($E0M2nL?@LWLo^%1 z64r-WWwRsc48Jk8Nl!)U|Aj*4Ss#*{NcuIL$atVY%eTkEE~|5t$fcgnz@T&n*VzS_ zX0ki&|Y?k__0X+_d4Wf%7)(J$3}O!dYZo9Z2HQ`8dP~eqOl_ImH$D1p6W$F%UmTG ztFcbGI7dnjTx@+FzZw{5N_-%R*K-i$WnGpp?sU4{$J=<2D%N}FVBa)QhIA5gK1Kit zjkKNQ7M^<_PDQZwl+jJ;&CU|Q$)nvwujZ#beHs2pl59I79VBF!+YM$tO?#Atfz7>_ z?F0sf=U{wXoPiTc2e_WjM_u(P@Up0jh>H_GQ}U5sk^f1E9IhZQD;0?(Wd#WXLl=N+ z*pk0GEL@(E>QXQG?iv}eb(DWJL{Y#_CR&q_n*_@XgKbd_0a^9Y#tDK*gWy;D3) z&x2=8lszl*ZC+OTjP+MrV;QephBpP!ABe2qPK#`n0tB)$Hxp z9b!mr4=4w6D!i|~E|KVT^38tqg^;M@5&PB1NM+()5OX(RsY;xXZKVUaG6Ef4RkDSc z7?~qitL*+#F54s9ffqPdJ?8{{iazd311c6{sz4QQjlRt6Q=^>}R_IefyDw1m;kn%R zMIxVU;m*{r1(ZO?IaTSz>2YIDu2w`wCM|Cnd0p}BWI|4UxT0S-`V}^OKS$BI^Xjhfl;rEF9yv zy!I$bgC9#UkeVulhIYa{(K8gsBmvR@*ks9aUmemH)Na9PZZ~VF_ic^Ma_B+2eb=0k z$&*gJtYAB2pu(kd+?R7)NaFu2ae%Ui#WN`7-rr)RuL^$}f>ODecLTHRm{I|XMg)D4 z)Nt;0n6?pF;?314HlgYrqK?<4yIt6C$;h7Tf>O(`;6UrqCD@w3oe{X2w< z@0;yB-lx7`7CxS{E!lgfgL3FgOUGkg%wK z6E2UFzIYV@;sEG)7CI%G-R&^27l-kB9MyA~yTv#*ImaC$w2{YIDI=5SXZ2uYcx%!$ z-i0>dB%2$CrhK7*!qCNmL&O@JRy~n<`xkKNr-Ky4n`+KG!n{B~vYd5)OCV}pkEXBW zIIKv29`}|c`5*e%g@#>o$NFxFXaaLkM}0_Ib#BETYm2ugZ5`Fwnvrg2K~3wX-f*Rq zI1JDH$62TSQ1yhMd%Myc#U<%JOCLC7lamXKh*HY2lnzp9-ho{%fH=&2ZeEKpZGJVE zRPF#RxWxEm*Z81uu$!T`tV`zJ`2Ts-JMe>jy(jWM&>6Wl^iU{juAi763dyZ8+utSX z{L$q>zFfL7A0XKG_gri?SvDplS8-NEN zpV8df^B=*=y$`4aHa``33hizRScMR}enS{5-Gk6eSgU7ZRMk6r3kCG{zB^_$xs&p6 zy<1(4@4#l83d-_NHJLD=)<{3#di#z|XpGl?X_5h>n`RBa9lEiMm0b-Ct)B-Psb5ud zdJ^S*7SK>;_2{yS%|wh@q}d#RhrWqKtzNDo=Huyz52;%3L#}#CzVFCkhXRM1xTUl& ziVr6@S!qsUJYsN_D577-_MjYKRtMNE!eM@x5G&xbO3HcF)hcJ8NwZ!8lK=;wNs-qoZ;p1YPY(dC6WR4FeY;zO)62qY2ltz5x;fI9eCRE>FMtI5Yb2lCzME4-8h^Bl-Ib) zn}M_6K&uR`Q58s++|id2*o;_Q-;VQo44j!5{Rr(gIuJcAjX5`AGGWsyp1uwR$)jAJ zeWCk4?Z5XAt|Y)&!7eMHy$`sfuRcnCMg;>JeNr>ek9sJwyrWk_o}zj1f%58P*t`8EA-*rAWL9FI+V}WsZYj-Dv=HQdQ5{QHrAX*1O|ZPv$GLXt zU5sX=S%JU(g_6%mIU_%j(|bEc_I>(h&an*-w$ZRX<&+O2INVM!RZXolv|Rnw*k6=W zuVO1oxflQa!;K-JdN=B|LA8gmaQbQkaQsE;vPfMx`dXyU`_&6`3S#xqr#H;KvR3DI zX=C_CV&PL4`+(duatyT$a2=qLdCe1$(48s8Y3Y+$#aE=4CvcuOBP9p7C!HKwaG*Ub zQ#jB)$uSy2cR8JX1#c~xpVU4C%ms?o@7!vqKl`m@*Fet(*5XX3Ijh2hTciyB;%s086_2K?ZP z7X67nJKmg^D?lDJJAm=xP9qOySoYpkL!Vl-e_;yt~vDv6#4b*rHdL;z!;CWb5dg4_ZS3hc_i`P zV79;fVt(X>I!kp7@vTXOOQnhM*%oSMzq$VoS6q|wVQ-i2wigMr4l^geANhbwwU746 zsj|gP!UD5$8owlatE7`1x>xt^+LOTJcN_V*-aAjJS#M$Rj&X`#)Uie(CrK(n5Vc?% zARkm)0(y=fntY4a4nsf6vUso4V7xe_WpPjoTllpHYV!TX8m9x-xAPr=617v)BGe&6 za!&T@b;WcqRBAi~Y6m4QPI?U^LUcSFEFNB_kF^)gIics0=SFs_dQ>We?R>Hhod0v| zmVPWScnjh(Rgk!VX;BN9224vkFXFft6nf`E3MB1zd$#M{S7;BUL*jUmf6!5iR)lXf&0P#$%x_C`K+9{+!A`ROI5M+vyp##=a+@l%uc)=;Trtb z;X)Sf;s4C}Ip@E4Cu5RUe|vsAeFDrEqpI_%$<>>;yU0`z?@-Y~DE0)m|v!3Rl>yV5xW z?_~4jx5*^}Q|XRJI|Y)2cywAGZJb=SJ$re(zo)9zB<6+RI4s*<7Y z2eG{jUZCCk`kU>KO)DA!O|VmiRV627REWx_wutOoJ$Rz$UjIW3f)31^g$DqA(qcud zuUw1SLRRjf{+kax%&@(Lh850tf%aEcv*9|Ub&NK$Za$Bz?6}qHqSKuaA^w@8hKUp$ z{>g1tgQj)++zXcctnyJl`t$c&eA|U&PqS$b_Y^ok!359WSqJ|gXk1bD=zG&v%jVYN z$y(|oVeIW)`0+S$L3AHBB3J6ojbnOs$9;#&2YmdLPYww*o{nuXlpa$Jg{)<6x_Bf~ z{lUvj^p?UEgmCMgf$MpP5u^`D+8E_7HCP;BNQ%JtsQ1#mJV1CJp-pt1$!KntQ7QjWyV( zNYs`u%STlOZ&^S|SBA%v5NNJEsvAvVrQ1pPXs)T(4(@i^f_bdwfATr)BlHpw#()M1 zeV*t;UQ)PV{!FiP6U)&F{pPWE;)-5vqsP(>QD~3nsKMULF7;b#rqzZTqtJFVj!L8A zgNqCw=cm1uQweWW1wDKFTW^H7iF%%<<)OL9LA{shz&}?@mkHnUM$I!_u~{uI&>nW@ zfvYA)2ZdA>Z11+bBQ~9-`-TcJtxAt`8N{U>s?->4h=t{eiu4|Eed&4JU?e_K%c}On--?yv-?a z@Vsbb9_9B)f#d8I{oski&Fy3@zpvM#R`cXnB9os<-+mnLi|JI2^cg8X)=KKY%+^jH zzk6+xAPX%10>i#t&WS>AC%k>4B-q{?_gr#d-TI6uoZz+#`%=Jtp@Y+J7mBw)oe@Z` zo+{qLV=NQO8A&dObogxi;-E6U`juWnIQ#Pa40-exAw#x3B}aYQ37#Ojj3xBYXWPJK z-Ugvk-Cd?%TP2Ge!q8K7T2e1`83XI-z_jF3@d}`^^2+3vh29J@sJ&>g=U_#0SIrJ` zb=#do4);7=9E&3)_lr6a-U4>rlW(Y3x0@ncCOVt;_Ba)d!W+&!PA+Y;sYRwtU$U6k zhtm+eMxVL&O>+GH`#Xb}YHnQYvD~{JW?e;w+jEboy_C%hI`bHIAy;DVGD}uN~EO21bU<;1K3)Hcr5}al;*$sBiM!9QKhsj9ZSgq3azv+B=kc!1~2iYUPF! z?gfSMRIt3(!3`^rtD36*8&TasyMC;{u26j5p>N*d+~tSipJg!tqbC=z)lgFlg4Q(w)Ne_=*11h|tX!YXdv z-0Q*~kglK+RbJtCavDHn56%Xm`h|v^FF`uvz)=zILWh&hoEY2ppfWTDXd@gv6RaY` z^@18Xn7ri<9W^=-escfz+0AGp*RYA2GlVN|)Skqza$o+DSq+3$;MNb_o#&JV1Izm@ zTe#>!tmwJ~Zl1eHfXE6(eb=zHK4z{PPiBDaMM|RpDA3YO1@7U|1;$ToI|j1oz*!Db zJ`j~bL)-Q9KIH+kWp8XLc)f#KdH-vPWnp#XSK(KVf)l=&z-iO?S7+4pjdnW3ob-2? z)b6wgrW1upGY-NaofEP9CS2NDfKe5|WB(t+{T(QCZ>j2pndY+PqreT;i#O3}$^7@` zrQ0$3`99$z?>~8*unEt5QT2RJyCXQ0rk`k%_RKPj{p4h5!fFSMHxkKs?{VFc15d(H z9$s`lQPo?%dgxoJtwPx1T?Grz`yPm&5@a!DuIJn~aX(nFua8_~C>vX22;R&%eg5<= zyL-MPLBku)wJkTeesnih;!v4fH7tKLf8BUMw0Dm5oJkvLgVChK=oNoJ6y5uBM50qk zW%UK6)7zJ?ZHU>Hz z2GcEj5rE>AAAi&<2ja=;Yy(x9?VAE84@bAaPBqNPhh_L(#NJ=o{W+ENMk(>?l^Ok^ z2igIX(k7_!>qJH}nU6epfLvUbSj=yjU2K#ndfWak=U0}ht{1YxPn^+Kia+K*B{3RZTbaAFSQCrQ1d9T z9%_Cj`EbXj@5b#&-P;_6hzT1Xk@>5i@nK;TxrR9=ch|Luh0KEG@XL*&9yT9PO0S-? z@SX~tNGw`p+gC8W=s#RvQkj42&2Rd_dk?5YJ&1T`&KJ}1B-+S z=|{V2!0&P&PL@csJBhl2c9ZG+>EMA9g^k@fH__2%o)Oc~l(5oSU3!%gzO)kgtw7Uo zr^)_wE8lsJi83B_bp|)rO{5p$2H2)|#{&(Fawu09J`+B>ccKLH3=Vs63D5dQDlz); z;dP;O*RWU^L8*-C6DwakFQ#mm$Slisl;6eN_)VpY6Io21BHZp}ebsDD0nb=2JF;dd zF|*i?_wk5ejX;Pei6HXn(x``|k5WZ%#!Ez3wxg35SBEtJMlhQjiQ)HCNE5dpEi#C! zi@y4ci1D^Tcfi{26o&kd*iTzXs+Nt<`} zVK-*pCh;EX>cpDwl^VbY889NIET&i5@0eq1wh*yn!I;z~Is-V3$*2r=a|EyKZ@bXRAi#cdfr z&(`3%S`k>AE5tbRwH=p}nz9U&9p!^9VzDHFY<8SZvD_KRPH=iTN^a&=-sB&qR)SBd zJ|6#JV!hEJK@8X&I|KCkDSwi(pQ(}U1b*d$ryQPln~e?Q)&FUU;>=u6`*+w*5n_;ZN_;8@)Yi-%!{)N#p{)q$kVM<;2G)-9ts-SMh|WtTd{! zH+ld1RKh5V)u2mE=N-d6PYyhggYr9$)vIZ)U~hyq*hbytCLjK;T=^yebuJqc!+7~x z?d8Sbp#-&Ex|PO_P2w{0)}F=(tt*ueC2-b1fQ;Bn%AD>mLxXGout*^}2QfkMnRa0o zBK^*~N=WaesN~GAB3MDvVM2Lti9emAQAMd_ipMKwv0W^4g-y z@ISUlaX}sm9vlpZzx*L=R@Dq!`SL)afvYTzQVn^uO)ohg>4y$HDB55uk)@7G@Q_#R zKhzL*J4Li8Kk3Y|ng)|k{Kn*g9m9yLDZ5+-c3JmlS2$~hT#-dYVfMgZ)==`{f~Q=ka8qo{!8)Cs^Sn1NDzbDUCocoeKxi@* zPyW8`#wF#+4k;*ziGR)7P`>{!3r9g%TKnKXJgaeb+KCH@a>9hcn68tt&@Ia)KnJ`6 zIwCb+2oy`l(PYu+Uw)F*j{d_gD@$tEOX#Q~8@CC>1f|exQv9h(Jnm^F{%MEt zM$ysrqfu^H!MJR{h!eY*y-Fxstul)+IoBh~OQdEYXGt@@^x7EM97d-I#>$=lin907 zUCTf=2uk?Nzqndu%Y(gF1wP*<|JRmW^bB`4)9O!TQs8|S01-rbs4jDV_c?|iL0H!* zt>4%+scD9d5!dYp!fFc@*Ei$(X->4lCr;XWN<&f|s|we5z;0U%@#%#S)(T1@qjmGM2d*1r~zw1=th(4#+8Lfwaoc$i*?r0-^s1=8vLxs=Ra@kCU6fAw9>Ul7Li( z#Rifi;x{1W&Td31@Dqn8b$Q{kojAinAe!p|s2ZPsmasO~;Cy3Np2{iJU(dflV1gVi z$!tdn4B{N(GDqp8rK8HRs-VF*yxq%his5gTT_QPtI7abL@T&844cVcjos?!!YYYi3 zXwexm>5sURMEd$75y8t!+W~Y_2Qzn7dSb_>TENwds6_UfE$Bouy9tO1C_4pcET1bw zd_kz8#)+t543~O^@LUONG=D&lf~=x}g}%)t5rn3ua=}C!u4}<;iU#xHE$KJE{8-O> z)7(3T1%5aRG5s9(Bh)(GohT6qO6X0wE*|ZB88b=EUwg#_^REZZB#aDdYdhTA@$O3? z)gtwMCc!?{4G))vPuYP<&uqX$o+=Ho#Rw|r@|!QURZqKl-Y{s*$UgD_E8g#Qw!dS4^4g2n zq=v$@5Y)tUY^*!$9k|F6j_ql8>VXM`btM2Fji1;+Y!!s-nK}{##fVRcnPCji4h0-- z_altLIJqskFE`UL+*JPFhocyN>}J|>P7NwUUere~mDjQ2CzLOu#@RMB2eHMaljgOt z*;N}UlJ*lj=6VR8rNHg+dH5!be8UH_UxVG{(dddJkBwd9M>L@zjMMHql2&C)L7SId z(9pJE`|uhmjY+E6^f*-#gUJR#!yznf`w6DziC=rbDKQ1YnjpC6^T-}9ftIUMNQ(py z;IS-=deT|ie`cI)(5(epyF{*#-~Y9cA7G)Jkpd@lXxtbEQH3f-RLck^?H)YnpZWL` z!%$am!x^iFP2MwNk;wpCC{o}g+9b&fF#3Y(*DGu5tQQ>XCj*f2nE;bBi$>jZo0fN4 zNF=JLFjSP}z;_=RUwmitz*kRvd8L75t*ORS0RlgPf}J>z?CD>B*=Fhc(uan2LqCiT zZL^3owDK>SeH#^RG-|d&UAa}3;lkD}*Lcc&l#RN0n*-p;57P88ofm6?G2zWp%^TCa zG?iPFZHmB!^cOxRioV$PRhoC#Na`uVr}8j1U$Za(m-jN(fU8x`k0C|zlLAxxCObcvykmU?Pz&D3j*JQTJizyDYP2lolTa zUD_e3U~TB8Zl8QG@WQ)JlgY1T!93h>?z{c@q9(rt67ZXGvnCWTBHhW#-9q80;QNL8 zvi*auhE?rd178fvUY$AjmXt@|Xrgvl;g!M|>0FC3 z3!SG-U49E-#2x_ZdI_K9!QAKOGs4VX*+0J{-0Cr8nAHgdN)kVyZV5o}465M1ndbA< ztDVmyR@! zLF7>dGqsfbE^p$l=7I4$Lx5%V79{Z^LZo8qp=izRsbD66bg@qPW2ufKCEz~6p?T!X z{mlNQPX;6qAMm_zGTieMqEZy{m=t(U28_`Qjyt&QQ~up+$}Y{vw8ddQ)ljs=jgRN&bhYfL4+-zL8~ZDo}?1Yk-V zidP`l@r`FUSM&08eiEmJ&Hj-|KS}5Z(RyZXrp`QLH?{3sxf_1;x9WeiJMSAnp{h}8ZIiU?^gO1s`pXLZY~HN*tk40k zsGpFyV9_K2Yn!MgnP;V$`;`=WaH#K5zn1Gu*RNL-RIaZwc{ggEaOwN|iXil^MhOMw zxht0p(n!blI905cs}?@-C0WI4XNBq3GX08hu-|@=sHil(a3Nolb+d|G_%4ce_e88`FA0EzWuj{(Vubnh6NR=Rs7 zt>7RGF)+0I=U-)MPAhOFxq9-KG-0`VKGW%SZoSz$KA|SH%FO)XQc?-J^~~n3-`a5@ zY?#zaaUVX1todn}6g|XDs61*OM!1#l=C@ML_v|_#ODe%I9*FO$tXf#5wh&%^MpImq-f05hEv#vq(PaY@ z4r8!~f^~X5ytbb)ZUHo53=VQ`4-s5cHFyGt3)Z(f8L~qUy=|vg&~ucsU#w=4*ZfB% zXyONdl`{dQ)rvXAnjH9hg=G%%lcR){dJ3NSQ3fT2wT3~dLLr`(SdFr_@9Ilp{kVbzKo zm_3>lgvM-au3~=|z|MZU2k=ea`=B6HRADWV%v=7@TZoaR1F`Rw&CZ!}b%sJgyR1LY zY^K1&6#fhHC@ME7joD>?3IfuEPk92VAVk?@367^uY{k=)^SVJ{tw=yB#SdpS$eg12 zGOd>qSU&>Y|GDsSBOSrU!1YPt$)iKaI?NFq?=>vb-tiB*Y?uSRM3D5^+2zwfh@DlK zNX4d6{P|8{UpV)FDWk>BpFZEL9x2W7>r5)SyQS%Cg^b!wY5Tf!hZ=0(7zf37deaGI z=GCvRu1gWA=&OC6pdmUWZeTtfeQY}^+aD@!5p3LKZy0h5=1OtTL3w%(4GoG3H4Sbc zfM7Elh(t20mHm*tnF14{F6wLOux}U6j|r6ZcdS1&5#w|^?2&UjH@#ZweMgr@5H~0C%U8kx7C0L zgbPBcx6=i89MxzsD$NTzdv@(`;}tSIopI?Gy#gNcKMG|NpiqM1?V^Xz6bPz^I zFa~AGL=I_;Stlp9*VdZ-1hNI_SAzT}IDt?^YI(p|IK%l!VIN{LgVW%2irtnc+TGRl zi)StF{a_lRo}LZ^C^RZ4v(HmFCQ-I#`ofmvAnQlu-28XV8^4N+960D)bFKfiVtE9{ z^Qt$Fcu+QSk7rP8;v417AJ36bhR=zJiqh)9LNB*Zt8%4^Xnbm@pbc{bjO=9IA9C_V zBvM8(JYFp7K7;p_;60=sNqsGNt&At#yzw>F-shKmU37K{AJO?$)s}>rC~jjVTkRKY z+%<LZ%ly{*iAK}TPZ4$%Y^KX#G!PIdYV0{_jQ1OecGM#+*VEGPJ$^n|v09LL^*vdQP9 zX`2{iMMwW3*8%h)uOsXf+;U_32`*&A&r|4mcoClLtkuiM*P^3;X99pNkc^+HRv<#X z8QslIA9Jg2o-wEO|JH5pLyZQ9ZAh#g*dtbA17cwlyLFr>Xp~}~%i*yFx z?+@_4gZyguH9(Z}1_;ZtU?5$@+(lF+jJjfE2u$}bAAA#2Oo*EaL}Ol{CxPWuJW-vg z(cgUSVJL7J(zVw4-)WMCV5B-PHQV8Vs#9hYE4h-7e-eIOdr>@qkKq*3TN+Hr_V!4m z4rJ^`JSe^2%@-x&($SB)v(YXF3-Q#z7Hi01i{JFO134>eEa~$cuAEX2pVHQJ_o5df zpIMO1Q=6TE#I2E&HZG9-jE)Z+GpGEu9hLuyxI1s08JTw~NK!J><7Xv%blo4KUlPpZ zHq>-=Dl7k{ML@`MN<{4EThtde%$E|3M!lk&x4ooNo*`GwCm&i%MrHiq7C%|Hpr<+1 zl#G(bNjsgPSKW_eKb?%Z!;CCbc5>v&7&yF%x^nlSdG=ZhwNdX|N@lg2jt8Aob}Mh1 zD%$+ALRJ|^x|Mbh}L1#Zfx z!x(|6;%-(vZ#Gk77;oO1jsL@87ged#0A#o*z-LaG-3NgCjbD0;{mo;>b&A22oNc5k zY~y{=vvUQ+Hyq|mScVeeH|@hP2{=+%?jH8QiJ8cHpQaSf&4Jor$W{2apz6U-qxfV= zcgPFK6O|#)ITxUgLwe?6b(>@fQ0g>Qc?vyA&uS70g(qv?CATKZPV>JCvg$jp(3a5% zOZ@d0D*cb>LvM&3D+FZu=Xo*X?t4Z&8xtITAk>`ytrcOQe5^3h>vw*BiH(kC)f0lq zk6JMNq^8EKln`7B!HtU^4vpDwdrQh-k+3dWaWQ&-6wGcAg&`-pF?Vkjw3#|8of)zD zzBSbrliRDtA6=(+*|`6u_~AN8sN~bDF7r`=nC8dX{*$8`caxTIwf3k1j1@*iwC$;GA3W(rJTn^~1V2bn)MFNfTs5J)C2+XLUYP`FrN?ZEFyCV6v2fwzm-#Nf~?vqwy` zOb&OIp;mg!RC69!O8?Jd{j2R(OX#$^H#V=9d)x9IBA&{3O@3qh%+egoc@t`h6kvRK zY0rPq&>!jr2z(aMExdD|@v|neSZpk>r7srhj}+5+0sorg?YBoU1>$B z+s&0V?xuWimBfK-FUeoJ9F5y?JtnInv1tSoa_&&e4vT)X1J}@P+@v+w>_?N+v6IWj zZZS=aWv4a0tZ|efxBqHo$cM#DX97aGH2{9WX{D1Tsy?0cV8SeAdefAj0N_v>gO`Xd zOm#cROk6sU30w-XKOFy~DqU(;+RC%s=GtOHabwO8(F&|#E^@Gwtu`#Z8^_fX`kc8s zk|)^2H*ndX5Becn7sBsSQVQ7HqH7i+^-m4mj%OT4F{I@wtcvu(eb;MS;?xtT%BK0b z0y9$4Nb;{t-w!o6!sPaqVQ}!u)D&Ay70t$f=<0_u|L=@Wx7ho+HWy=zuv{Ax=%h8& zo?S{|C^vr@KR5W7zyn^LG@m^T>^wRQK(RRs4BDwgaQ5XFCh{-k0Q@Hz9v%j7qetaZ ztS(*jezmIE^yoTOyE`33Yeg-#V^LIp%`r3ArrC&i$9ve1!L9RSvh-94zR&2biC0kT zIZFRg__=*}zb3oD@pu{T> zn_h`$HvUO?0h!~7G>rCRW49b!r zaZ3M0j`r(kMhYAXjKd=aM<>ClL3ax}oSRXjGN=-IJ8tfF;p)L(^}ioMAthi~@Cd&W z!WR~z2ouEhhMXL(|E|w2{UgZy8K*+H7qwHWfTj1Ee%Y>MdM*7+bXgPZO5#Augoe9T z;9JJ2kT$~0y*DM@6LKG%N_|q^2-m!oy9t;QW}g?Ki1`-@ z1~93?q^LiQ3Zi(o+CHYYux#(f^<61RNSfXR<{;SCZ*2zxgU;^Kla6>2g?F*v5Cx9? zqO)_<^I(*=zJ_n@wN_J0?O4agh<@AkUVUYih|x=2VW9+0ZLzk#_YGzbhf<8_1f%%$ zS&kK9W5xq7QYhkhbtQ>2&OrPwFD@y`;SG3@*xc@+g8n{>Z@L(MOIT+(lRC7<>4Z>O#>d9t-IC(qPV1qk9k+gJ40nH?-QuPH@(8>|9$uKi+mNtIU0)#KXr z$eOeHA=bp1go?xDF+;0kPa#Hi(CHOzF!@i)yFxy}dW}K*U?eIm#4Q4Jd z_5m0HF<@Zz+5TGHpZEiSw$sRQM=u&ZbP_&7uV)foq)9VR&0~M%(tmJiDIZ?3hdhN5 zS=n+rdG(=P#=!M^I#8#kI2)dO(>A-opgoC6;gTpVKcL2KC~YNfQuev&m0Gg*uI!r? zlJ(b8U#$ek(^brp{iStk$azBta=qCzEA?D?KpFrZBzn8U^$H@Z7}Z&+a>`8AQdLyr%rAuhZgXvLsE5Z^GqpZYak6S$>%@!hOQ%e$oa1EGDw1t2C0F0RjKzd|#B9)J^QquBgTj1zpOzojBUlq{D*iW(fU zEf0}ww>b=V8r)HZNpOz8z#Xp=moUXjhWzUGD@%mPVchK3f{CjP#y>T?4Acc+W?RY)UQ zGGqin$T^FqMJw5YJPfbrIPV%S@q?V7TS#WvB>c?M$tc71sU@1pkkfUXu)j29`2D%- zbGvm3lVz7a|6Q^bUI{Q#m#IJ!d^b7b0hxS@*nyBNf(kl6=pp;{z=B!|w}P7ESoHmi1^^JhOEdt% z817^`TJQ%gaHa6R)M>T|)o12ieAR2ys}>20f$pbL=m`~aYg^AXU*|ElRVcQ|4t_`q zZo(daGK4%S*u2GA>824jFeA39!gT0NCP<0T%&YH*_6~AZ2{FxpZ+w?8o;4{0{P%b8 z;7`Eu`&WTU(jAaI`B1@Qi*HzU$2J^V0D(`vZ$|@u`qz8e{z;e?s9F5TpcFU1^$74W z^Wi`)hRdL~P@Re<5{qQW;y9WtEHzBNg1h{1U$g|uGJA;VpYI$z2 zMPB7M+{zw&H3yrCK;8EmD8qiTzy2HcmdG+RF@VDalX!h5@}lrQb=D5nAx{#&@m98&30FM|--J_SC~H<=i11J0%0f zS#3Ghs1GR{in_e$N!DY8vVw|~CrN4q>pdGg@c4!KGw(Hx0VLvSl8 zw+KQHJ)H7~)@VM2u*BNXdMS*qv4UUjv9xrkt9qSicj;>pwsX7td+NeXcEN+^dP=3- zzIpYp=BvGewhV?3$;?^t|t#2xRbu|*yY;}J=Ddgl>AfG@dX&@ zNxOpY7h|t@n1^SdN2c-kV7>tn66$qsVKLelQ30qN{t)}PX8*A){M0sLw;n>B7RWr` z85;eigfpYHBC?VG96A%OWJS22PYruokTvTAXq!l&cj@{w-y4GthXEzU~cG|jc zXmO3HP`WOPo4T*s2^RX4%jz2Rd5|@sWh{h)C=R@Ym`Y;hA`7&ODwNyM!tk)se1Pxh zUHM5%ZEB@bJZikA9v*j6g@=i~TSz+4QpYYw~3VuMiaCMou0Otm+dTJC#w;a7Q42BxwxQ@3OzsZ}EZoBK1yWi;XjZ+t3b7wv7S z)5kdtB)SUJM%kgXlG|Q=PK5^rv(tX_YJXO|7KHB|(GMxitCuAYxw|~l!!R$2z~F23 z;%=H@-yjbuN)eHz)^<{>3!_PLHz(FJ*ype(nYb7%6n+#Qrd|p!vk`3pB!cI$ZAALO z?3t(2rgK}Q3}%kDbt9}c+1n(LROwCm0!ao}wEpj&z9Sm}86bAaL?Y?UaC5__eM2y# z*U77s%Sjph+l2nHNE;1muF<|MiKw6Kc}Y(dd!H|3+$D@f5X&_C6uNvIp(%uUaO@0- z(Y4YrieiD~MM<0C=V^Qr@j%*_OG*BNK>lQj-YyxDWdaU04sIW;_VGJcdcOw%W{)~c6*rC8lN5vi|M&`wW+?Z zw>MmKZV>2mVs4euJgIrcMRh{zR&$<|&Qonocs=9>Ldh{G(SB-J!{Y_#tP}u?8Szue z#~yWoK7pq7=3g!K0$@M<{JLZy#LxAty(Rk3kN&Rv-UqUqzD@;clEKD;(PuldGwjgr z&P~Md+&ML1&M?sx%RgM`ANPwq0D%ZFFs!{09bF!MD(SK1(Sov1~aFv&UsF zh{VRZq; z*Kk^UqQO2=r==h|X&^nJyvcCr1a42c(uJp+ZVpBsi+qfYK?KK?JUg@VOi)kd30Jap zc3Rn4uDwF#^x9pTY->lLfzNbADttcLUDRbSk>eEVGH6{bXM9lL# z$+;SgSO7orhITRVUR(4diloU~Vy3#p+=5Cv!w#T(+bpo$8E7%LmoO6_sZN;@DL^;b z-di%o(jMkT>TpFTC|O*hTxiVwQHvf({UNI5e2D)GgWJ>-MrGmm&)=mcD8fD04Q@)) z&3XCiO|9YS$syZJm!RokNBQh$SB9EQH!Fv<6?IvNh~nsd(fwjVU9Q<8?I!K$#TT&= z-F(KbMhg4L>+#qNg#-TEVzSeZN?L1}D{r1mf7aj)86JTI~u==yu$vC z$B4SP&gF=R{wDac!>N+WcSQ9{G)mp8A~%;kYv?f95uzxSD!!I!^3}FjN(lgh;gh#u zw?$oG^@^);kn`&kf4}?=xuw!(^kb~Tv7sG}z0nEQwd9{6-}7R^1B*-u%1<3v)wiR{|? zEB(RZ5#p|ukpTooYb4NW!LFcURe1L_1pjK*-cAQ~Eo zQKw?RokQzz^pmVrLYlco_P7aAE1*#-X1s*i0-poMv|sK-zxhVjymC2=dFh<;UoP|)01yMR(btV0@^G9jz?T) zXH&KC!d5Ej*n_gU@A9;_Y)v$O{t&(TkXOY>EjumMS}tvVG}-?o)+S|0-yyH#y!(FL zko#f2Ef1`NS`}4p7dDJC7@U=QJ`I8CTUn`V1wlvs2gfwMqEowX83lR@HWRy~=OHMce8fme;lcQy z5(EHMT$~4_KbQ!?&4v@n2PkrukOI%xvyMKSXqBImd#})HU7mP@r}%!6?vNEH@;ppL zFkoIK%AH*oBt?MI|UEz{h^6cy$X4F=tbSWuhxiFZI zTD}{;@yDB7ntLa8?2AU_ll!uF2k$FyI~iwptx=6I0{Qka)U#x%boT>L^?+0~klWt) zYrg^N^VL6H;fPP~XTK<0PODfZKR5q4lC5ItkQ9}bQN0%RveCm}&I3u8Hp5Q63Qap0 zUwiov;slLTIwQ@tu*%Y)v(uS_EpY1B4{p=O*-xJ(GC!m%4k$_<>V{4le_i=RCuCYy zOiGPqCYV!?Bnij_?cg@fSI9yy&QFRxrJM`7!w$&REEG@1D~Svjp4@L7oLKVS`1QJG z;3kT4T{(ZuniI0vbc5ilHy^8&!7t%DqnM0`V%FG=1QnA5O!p!CH!}cs&OH+5gMkS_ zNno^0*wLrQ;k^EB|iQ6?Na+kc#(yc=AUW0R)e{oDP?~R{}s0topWUm zo-%X^GRdF%1}xxnY50>UsnOy%l0z*g3gX#Sh|L5AEPZ33Z=sRmvJWq;i>m#!Mec}( zaNEu_&Fa&XeH`t{tz&L~=Ss!asJt8&{{Hxp3btwCe9}?_v!b4L-9f<{w$J})Wi|^uk*0gK5<=?C}FFwD}mA*RaO>YH?fz{8=< zEz=WsH{3ii_35Rf!(qgC62XyuMo@_Z3^)gias9>{Sv~wq)$l)-5*`dy$`hQ~N32xS z-`p9KN`G#3u=eUr*NKKrGgc22tt%}}t@8|A4&6{bzxM8{&1a0)?0jhuEHh&2b?Rz2 zbw>~4mJ*DyRkqSJDi<(7gK7yN<>GbP^)fZoT6;m6AZel$w`jcNSr+Ju?D?3i;DdS+0<3lXa zle@&ia>@Lct>92rDl~r_7H9v>o~i8msH@MY)<|L#wLCT6pNX8>^KPI%MqJI@cQ9$P zOdPxAm|Ux9GgD{)evL(Fqss{`al3$&zl&9Y!lSwVl0 z#HFT!?WB<+%a{m+mmTCzc|d?3YH}5Sl~9w9+vnVG{p#8un%r-tB7)HalJNvh<~v%b z(HD4q?!2bm*>`X97)?{;&<^Xtg_k#enxDk^jDsSD$+cDJ@RJWvDgx?jgqyQRAI#QD1W^W!BY^rP^Nde^KoK^|}V z|KIM}JAs3^^e@;u|3;-w0GJYv%J==ezJJ7+pnWxcl)wy+}eF<=M*MMR2d!~lVSgD6o@dJ`#1Krm4dB9eq+K|zp& zUZeyO=@5*HbP&OW7O*4)5F-#0Bp@w>qM*-l=BG2x%=$kcp7p%%de?ft<-^H2cfa?( z_I2&eh}#Pb^asm0ljjy<&KOSgx>aGW`~1lv1|~8h<-?Qy&i3JK}BeD^-FzF_0r?O^@yY4F%79=%}b^W5$-rg`)rWx2Ub_o#n zng2C~A3jgX>?^DN1OcVis=2-j)j2$7H;dE)jZr`;ix?bZq@0gowv( zW)awhksO76>Xn!~l%Hp!kFRmkTkq-zZ~9x;m9*`?BDyl5){u^@4Scjjidp>3>wKr+ zMttRS=O+X3^O*@e*Pp$~wH(}UVp$e;UUl*m@1;Mcs&4oULxDxSQ1l||ZnXLkM$(oI zRx*PVjbjdOZe;2nNsId&QTrvcJ>~1&Zvl&`4%ZILo488Nm;AJ>PF(vsht#+>`Z}u_ zGZ4_bG|SW2tD#J9*N=kh=W)@AFIICR2#E?@-6`YDGpB6IT5}NGU=GNlqxgZptq{bY zW#N=HyHY_GFRH*1UBPpn2S+xNmv8*x_2-WpIr_!<9SdgM5Tv0zRP+5lTCLla3PQH@ zvAm=Xl2aXy@e25`j?wqN;LU%&Bh(OhhjP(~WO5^}o}=(x#QEpWo10KI2M7g2ySPyc zX{%Wi)yoCU{c@Ke9%=SLt*4w*yZ5IVvOlt`&zu!MOzv1YA2TEb;YRu`=AUm&jX=g^!+ro~43MU&5PF;@9RXrn8a^epE zzQ#?FhSii;Eb?og8Oj0_aqSoCIn!U~Us(+Yn5O0YFW&d?8jqZ!aaedC`DQauxrF() zUE^5z%x6Iv)$;s<;i?TSddY;*79w=mGM!R14UPHCwx=0<(4+fDK94wMH*tXL=sR3I z*metK!I*}fpPZVy*HvBlZPoQJk@x2^#`Xur>TSuRMNw*DWU-GfHph%|7rdbQ_o z28X5_nD{m%twdX@=AOXqoYg=q4xQTpH$W==7BJx(M03~|Q!j+q@c(BYG?-2{Ja+#2 zh`nFxbBltTq)$Ty`2)N2?p~hB3}951hV`Fkg7?vESWqNnsCX>@szhDUI`tTLQ=~Xk zv_EBnzLDbCi4en8S^?Z0Vdw&9IS$*&J z!|0N=@yizZY3f6O^8V=#G>MtIsp>6)rb|u&pR#7A>LU{S%|l#QkwhQ^h_Zb#EkuF( z(Byf4Qw*Vd?Xt{@s0o!+14luslGLf#4m>K zCy39-LYPPwAOP$!I#qx6lho6nXQgxf$IzR-VYwvu%;qk4**@585vT+ar791#Jk2$o zQ`{Is%V|clDkqhand@s^=M>3Ks#xpP`SpNdGs#JFzw-c4VORT4*BzMR{;LD1|Zc|Nm+jPfq|L9 zUl5xCdE#|JWyVBkY-KGX>VnP?PZj!527VHvu)kiP zQ;Y#_sXtlM32k%w3<$!-Gw8c^JbJwGgy)w`0L-I224X3{sFK&rJ-o<%?H#Xul=agg zqH^}AsYqTBCgv47ql6hnqiFKB>;P1qsd&}d3oJ9=(`3;(w@L(mX9KU`LiHasf;vVw z@7&><@xx!~$zl4O)9jIEBdx^wp=*-?h{O8~W~>jRn{npg;m8=?w0L;&j1{=}uJstx zvaEEZnQHxE_uQbBio0f1cVl7ESzrA4nQM4pw3kRc)V0rtrw(~kvEL*9FbXMO#T1sU z+A9nPxnltv(;sLbpsTZu!IRgYgj$O`IMHSGCpH%i->D;!T|a`#7`BcaJr1ofX0ULJ z+s&S+kg|YG>*Evzs;FMjvM-{GK>Kb8c0bXZ1XPcOnx>^T#&yrg+vuht2|W?W%sW+k zoy78sZjLwhp*-RSci4#R^yf?d;JPh*#1mgFXaD6Ti6&&s!86{6J*CV8tk9VtLAfYH z*Itm`Y={sX?Kt*YPuEV`L#i91bXUA9wWzGGYP`{kd(`{AW7$??O-Coun{lHUy)9U) zr;_G&YsVl<+220QP|KEglEXFT`|6&4H~e^le;_VrpNDU)GfcC76<+X{2lWiDcIj-} z4C|-?IuI=GoK!qGAEebbif{P)Yo zfE*w(s)D){HG@;ueENdk6yHyzctb{dXMk(2dAot{*OxpkSCIiwHg8b(|oCWopQ^X_k3g%slH2$NRK$45Y0a7o#u04ER6&91uUbh_lx83l?xZ1=M zKNo}P4LRf=Ol#KRLOiyN4_RYzh5vhBm7$+JPFP8ZeSSA8Rr&Pvs{M_eel)ra{ID+H{^WbmYIS;K6IfXJQY}9T zbC7{}Q_10O+MX@+r96Hv#k&KRJBnDdTRLsPb9b&_J$-LlPA!u{ut zTPf||(fl3}Jco4c0H`2!$&Y<)=cQV$=ShIYAj@0DV_(wdx65&x8MDLe128!Rz z@ZoWCR9J6-FhjRRYtE!UOg<$($8uJ$sDv6_P_3v^RpY$6Jx5S5pRpYp<-6I>Bn|-n zjQ$TTnry7mGzh8^9SE}gpg0u0t2az);ol(s>#-+;M-w8?XZ^Lk=d!HJPdlEn2I0^; zHg+hv)R!(7Q$Afck;P7TgWDs+{Je!8@7LwBt3(5X?P2p|Q%qdm&dJYhgF87oK>6XX z??OFKYc^E4rl2xyzAZCFP8qs1jAikSDneGwHo9Io)w+vlP!y-7-E~1(Bq87BH+-fp zV*6B^C0NWh8xy9Yyb+s$elUwK;^t|mb4-zZG_mb~q z<}e^0MEA#H`uxra)Iv+uy;&CV8ekE+wz5|s*yT)!g`ZA#Xq^P^%SH!J} zR^hf6%@R?#b$uu0Q9B}bYWUg&MDL`_TjxrZd4(nA(4_A*3l$7U3S|JLN(*Fnxb*+A z9js7jqg8>dEOz_tFO3;fJIZM!B>IzUhe3;sf<-xD;w*g1bfDH@_y<%A9OKXupuovt zium{l{qYs$=DSIuAM9o4vlmz5HiKbMJaNFCT~ zD;^Dxda-=gd?DHiMOP}n`X9IO@?j^7zt1S@+nJ)>-j1X_qlQ#Q`|HZ58%*E6MF3_L zz2Jng^Ofz%F{(t{D<9lyDJAD z+WLpC89x3z``Io`|3m)W1r#*{g;+Ly?(A+rS=urM$!Tfa;AU*klQq?1ReyVLE7`AD z_VlDG{&PaO^p_IjNI1OA9TE8GsXL@zpDsJ8uob0Sa6e;KkcyNqS2acBqSUP6>5sI{ zSbO4G8$DN$5@>9_n>aOsl|VKsR{8X1RufUE75#y|xligL=ag(=ANnKwSWE~C!_&-V8KNR!QpERw3|98XF436{r z8{-E~5B@UQ_qla4YH-6zF3P!)*~&oI-#PqSJDm1pcqKMcR5^ou9u&PxOeI+2bP@{S z65acI-U`Kq=jV(gA&Qsw=;|PO|64y3?#zbj$JE|_H?B{3r^qL{s8h860obebf(xuG z5;dF-OpS`1d_>)BGh_`%;sV z#cie^wo;E>@VPv4MmaS4r7Wl!_Tw+N*TFu=Drt79EB>zL#h5OF zze}oJ^#M=h`ot+vB^ZOJ6~y^A14iSz>y(&^w6r#>s?T@#GMyQ0?3)-9b=3VXD~9dX z#KJ;Eu;C0TAo|*1!1KHE!X3t)dFpQ^v*r^w&23uov6k*JrzoI5LeD9}e!izqrJyRU zY~*2}Tikg`D5oymQi&ePI@%$O3oJotf5$E7JvIVoArn{s-u3j4iK~&#oKLMi9`tB# zeRJ%CdIBax_r4!bnFG!W!E}9KIe6Yvbk2gN4s}+TF3H}F(7>*-2&J`1c%M$zm+pzG zOMOs5djs~vyRKYm4abli7*z!P+Ik^~F2C!rJ9}}eL~5N-bxQFnIBW}gGwKwt!1>d* zKYLqKc3dk$cGA_jAdcIN-o;;R(eH-#hACa0ysT#*I0>y{R`rJcfAG`n#{amGjlmLm zF4!x7TTtuJFcT~8XLXIVNd68-Dt-(`;`2xtXS@JLO5Iik{m`SQBAJ*5hqEvF^fixT z2>YuEiORDrC4ie!r{-Cw9S5|Fy0&?=Au8}WBK@ji9hCFjx!@^tb3T$9I4b^*xd z)^NL&psqqK-0%0>vxnjaH$@cRYzwo?I3@q`^7ggMC(;sSpG#+(N`g}2ANdvfZ&#O! zNuKp50pa2X_b^u&!EM8(;?hv=E_d0^irTVImJ#K-DKX?e^sLcukcmyM48l|}Gy zXP66dhoviJks!$#5R!wZLsvep8|wRHFidWWl?!U=sTG0feINXY>K1jhEr5l^mpd~8 zaHq6XMykIsMDN+NPW+NB(bVyrH$zhyx{4^U{+w6Ez za+$}IKZER0QL({`Ms@S{FAqo-8~_gKDzVoi2CV|X3QrAO7Z~LEw}JkD){=HcaR&R` zs)r`;tGYAVFDM6~Xn_p7JA>X*)2Yz#G<1jJ@%p9voS$cGLHe>hT zwa?UO;;BrL(ukuuSV_C<6spJR%t(qGJ#Ijjd;D^A>RWHvP_YT3C$s?7S4@M6!IGbh zG@mmU?4#er@)uElSmMD0TIhfx(l_5BLZ=3rHhe7e!OfkEw z;9b372(Q%7wxjybw%E@l{?V3+zWVGrMVU~}Hr0&7jvEpBv+$o@+|_YHphQgyA;v?B z1eOUy3V=ctehecbZyfHIx~&OY6y|AuXQlIPt>3RgY;esq0htm2MX3pl`v0F*0#8mw)NGOx|1pG z4QHL?ay}x=KUL$+hfOY?y80fG$CPa{>ZrAz*=|bAWD{*rLTsj-@u>BMk>i)!6`i2v zGtOSo`fYwg*F}2X`e`3{Q^R7@NvLRrNT@4}QuXnCtyCcG7G>T?JG|W93(380-ExjY)e?UqRtS&T1b!FR5yIJq$kKNNamyD#KjZjU z$07d2>cfx4rvK=UI40Y@7Fb5F892L>ai6T5_TC2b<@K8Ynq``>68ISR6+WX6@aS5I zJ!#|B_>+au@UT>T?yzbiNWZJj#`Iz+1&iA2DOK%IV%I7A4}`LD^ilTF{g)4AUDW&O zp^x|EUC^FEbgyghWF}z~2g)om-rfZUnkTvYg|@eYJ|b3aYS2 zP{+R|gEQ(c_of$q7A>ZGiheKZVPd&$J?EyED-&Ki_|8$3iDCrlNtDi|Fv|%iP-$_mfl?_a>c&bRIBJ? z8*&7Rw!y8fo857%OD$U8jk~_znC&i=R=j-sGxRc3R;cjCpe5wnM|y(pwW+w6kxzAR zez@c$m#zu9^7&(P1C@~c8F4Uosm@kjqZ2FhyGQA8eb+yrZr z(L?SVP~9v%_ke>j$K8-=CKg$IJbr0o+5Hs5u&L9x{YEh08$i79*Nim1fR*|6gZH5T z>`$y*{@v1e9vY@KdS|hV()YYSt~MY&DreMG(!yUSdD~7hmDWA7P9wia*z8#5^@Q19 zz(RluTmm64tpTuA*W_X-+4sAnfTa#r?a@h)wHVK^_QA%aV`U`~_>Q(gq`WnKoOZ=k zM7D?co*cT{B4Q$HMEFie&<2U`H7bATwAv($S?}-JX-h@~vkAFP)b$;*QlYSuGSI6rwFBg}SPp8zC zUES^bz8rsUUJ+sFv(Px0X+zdKuahS%ntwWjLv2OwKCikOK_{r``kLGpx8E(-lv6u< z8a5#@sP4bYtUQ=?gKS$^HIt!xC*~=ZGVvdgr;wW0i2}a&&qerbi{NktdiCSu+n)IE zRguKo9tN3x*(BQkv`_yk(|%%z8vroK@vP6(b)}|HY?ZBqMNm@&yXu5=`r*@nFTxIH zWn+(D{ulvRj5b$TTg(7Z+og)8QpYYB8w%W5YF0Pzlp9z@I47^FyKc*Nk@jgR{Mitl zwck|jNjcyf)pv`}nU2ev^Vh4>i*G3<^JT41opOtl=%mCnFhPEC!?F=6vUF^P)eIr2 zGOYv%2QbyMGcuIw>WNHH4gZ}!2hZ|wL6u$GA(E&c@ROO8SwWhU_Na4^5jXJ+H7Rib?~L$-INu#zeI8OBJWQOZK@Yzja<{^ zFEt|JV zk+y@w)&tqxRl85uO7HD=03+e60%W|TLJOY%-@kU;$SsV$E8>IdQ%w;Elce16bKo>= zN}k}4~Umm`cQAFeK_&&RA_-phPNYs91 z3iTruOjg;YQ(IMx=9Sr>@ZBc9f0VduZ*MqtD!65ZF-gAFKEc-Q5|UQ%HOZD(+LxBo zQT)Qjc#Aux)`taNTTZj7P;E0J=^c|yE9$J+iH1xtH3IhdrwEe$5@%E`>D%5N7lnv| zzv&m}iYu1S9xgN`FrMTccpqDeK#x0`Ms$6!Vdq1YobY*6{#I-^$Y=-VUXM%b+}AbtC9OF;)lqznbqedx0LKVrQlvekS# zXP~ncmMpFoaB@x&PO)P*rPmbMe?=#0_#LSAc&GSB_c9E;&4;W|bRh6y=k9F~bh+71 z{aU$cvaYP_<4c+gr#|^(-?9WCj(}_z^!ts!;RQV z9QJSi_&Rlqf&-PX`stvTzqt5;mn(tNMG+&?E9O96TzHnj!p@Zd#c5TytI-c@^t2x| zD0X)!L)iPh0oM=?#WIr_MT1C^tN}=Ch^uDrdtH0!BLi_dln#L zB3VbYGamFXx0zp>(45okqS9!jzC~2EzAc@5h=x0Jj}7-Z0AB*72TID^{+u1w6i9JO zPEf!iF*Z(^-1WrUXvvL<9JP%%vNo;$ov2n2{UH3@qZiUEiFTd2yG6k|hJp;VB>N59 z&2_X8pPAjEh3v3mSh0Glu@|okPP|??BsTqq-3KReiT2)DrXN9=6Nf(OI&%wb28a-+ zQ+;c@QlTHx>iGl9UXF{dU#D)zh|u3nsg*#?@uSnTJxGcHx}#?cmob>`T+5^aSHG*j zo_0N*V6ZM3Z}P6qI-ZToFHpB;)%->3aF-BIx1_hC<1)cZDn#4YE4@iYgPIzzGl6 zo9>-WTH{2DYOjd*H=%2r?-RlG z>Q&uuPftU&&E#PNd-gA%4w+yx9r|Z9i%XqEJwEAiy4rg)N4$5LTmgg z>pBw|i;9MHJG$4g1NQIC<(G`f`|@-ii^+exiNxw%oAR4hJNL_kJ`sySpiaw?0?b*j z(!#z^Bz@AqxD4d_37j8g22V%H?cAIF%3-TFTut+XY1>))o-<9^hJa(h<5B=FZgC5YYOy0{U}s&~b+;1Jo=^g7lg5pP+<`m}UK$vH zZdeF%!oF%8)E`-J@e$cI6=!m7{ZzfM7nRm1Z~tt%Y9I0?@5~8!4r*R3EEw55e8(l5 z1XbBkoTuR7T)3Fssd`YMQJ)iVQ^kuIwtNH05CUBnuWC!ck~m}4DY4erZV{=%KXdZm z6y;@jpY@}(oS%jpv3QH=ld0J}b%q^H+=7%=vZM(K2qCIkd`J6$TRt9KZaoAte)}}B zXGvCnXf9)FxSL9IWv?4`29On!UoD#=H^$h)lr)E51ylm8AB|y;A=bu$yQ{y|K+@Q(* zz8+Hk8vAkEZv$u0j!A`Tho4S7sOF!I@C+&B$BYcem4QO@k3G6uW@3jZ4Psc)%IBS0 zYql1LJK$lt6X}$~8bn)-2CSjoPXH&0o-If^f8&>);qL9SYI_u*@g*!ZhlwZYOkN83 zX4_Ftt9RF^8aLudjj=`Xz#W=>{;Ojh4l&YW-JOE|iZlPd^8=mM)GVNFU%stogFT&t z%93i;sc2l1gN>kmG_ATlr?%Z%SOd-{1AMrJX|mLTyuDUvra^BWrodgy|eq?*0M= zMTfm@csf*2IU}y_21nDj+AxiQ#8Sb;?B2+c0;jkH_{e4P$(-%}XR6-e$O#@SJgWZ` zC?7|+g;*m%D)-vAEfAKy>`yq1rJDgAZ$480s28=8a*XxA))0VW@G8Dhi9yz1*o&WkRrlbB zeek9yg!{P#!Zc>u<);N+WI!Xfid&QhtHV#DT1Rua;d&KyZDJ)dv_1Bluda1LLhswl zqbYSx&_Sv!kPh`!SWJAN)O+=xSs;k-btp08Mg~V+0*dhTZZLLl;}o;4B0d3@oZ+hH zQ-FO}9Q>Sc?xOqJa}KWpwy#FBviW_*(%T?+U;pasDe8VlZ3Cay+-F$*l(*2dP2sPv zlv4?jUfM{l#9&p_MUoyRE%}Kr#gA7wmWjM++=adDQcicuqWAUp6O|`S&@Y)84yL3V zx!si~YN)$)EoWp&elAJd@W;m}%+xBB2D-Lg-*@;TyvUWi(Y3U{D1LXd@p5YhwPZm} zdSToB=8Xkbu~rLcEee|CRZzK<*mMMum*{vf)uaIKZVsTv((iKY)VuU9SY$sHwEIVb zki3hD0y`AF@AqRg$LJ{A`mc+&wWv$gX~nXD;JTK5nOv4lOL$_UvdpZt-HQE-P2)F9iBzz($3w-o0~Xh}s^2#;G8x3M zN`LK-ZqQWNY6XQdxnB;E3Am>eXmX}g4(?DXNUgW$*(dGZ2@zQEqdi|b6h6;dp=|#< z@n-Z3cF`FYxXaRSQ8nT=?xZT!Ato>h(Z&ulPSHR%R%<%#2R&>Nf-Zcz#Z2 zVKA16=@7`9{W*silBz{#Wg=DkP#`M1YOqT3lQunmKygo*Qy?d9J_JfiT|=*=w2h{g zL7gjriNwCg0QGl1ohk?X4y+3S;1{1*H8IUoejWpc*4Z8wLyb$?4z?7?n^BtE91U(; zTl23{t7-hv3-KvR4#P9Sajw|A!|P*cTT>^`t61YFCMrD(P*gkhmzQnHHI{1lyO^+N zW6ad&)XvlC#k&{Qv?Vyv98wlB^+SI z%_79ufXsSkINEH-SQ>|Tn;$-+7kZF+{PN}mhf@8?mM`_P$dflw&JMgQ9c)?38FWW~ z)kL{%FDI#N&XI+qn3bI~-+$uwIdE5j0^Gg3HY*HzRjlZ;2a}}iP!wWEKBB6Mt#Ncg z3CpD(xv2Z&yZssa;*}5}v$OTxhEYKC^?h~-951p=q2Qzu%|uhquAm5UZ~*$`fa6GX z$VkBA|2k706dOapH#Qy5IJ_Vmvd>VbO#H7Sb&PB?r-&V?W&`=}9qRi~cHAn>x^Q^i z?v=e*L{8}NozuclNm`G5!HYO6{bc9JBr0KJ?!%5x?g2yw+wl3-(iUD&mUQbD2vPZf z8%E)nTGxB|3h@AqXXsp6QX5vrRM|JK&UhV354PuW36@qbtPWnqJ5kCd0q$z-VgXj| zgEJTOgaE98S3I3y;xKpWMUQy_NPQ-e*?M zL@PIDD|DUfgvv?|m7nMA$%7f>F+uy|&)o3R*dUphA&?y@r1c-*unT1FIaHHIsTn5q zWAPcKCt1+t6k0~A&WuaoQm2iuI(OKdlpAxxr>JRWGxlM|XpGaYtD&P1B>- zvMozRJvse--kuZ&Yi13BUPQ84_I*5oHFjON{O;NJ&9M3pa*^{UUUO~SWHmRCoW|a# z9O$pnp3rX_!Q06{4u6uXv1Nlve1W4fx4_7Cm0%4jnQu9xwllPVDxxH%oD6h3cbbhG zYAY+D@>5$fDb)UZ&}2AC)jBXEDEHTfR8OXk8LC^{4=fSrvJ5^>)R?#hPfag41&4Ar z{P5V#Q1j~nafE;QE-Si+)WL2rU}wUT^|fdR>Cj^W61FVy0}DuOlR6J7Du)Cmo|%`& zzW3s7m?hcC=bx84s1RTdgzrH{g?_vpz#(;*+?0Vj0B*)GYR8yq5F0d6rnU6BR`MD# zQ}cjmp)JZsb`rz9+oA44pLH}>`Veq}V!vCEI&MTuL+uKzrulo(9QnF=$KU%+B+biUo@^X z#Ve6vr2$9GUX2GqY}!_g5F6TdAs@_O>fBqbs=v-V><5IeB^;QeN5`FeF?wFZ$WBlm zlWM1AaWkwRxLC!-ruCAyiL9{Vc~8-M^Fe?J<)O&|f830k>XIcOkx<&-2APFZUdt#1 z->TOnXN}nXKv^z~hYlqg-03bl$^b~Epup9iQVo75t(LL_Lb|SFyd6mfy1IalmNuaR zSX17mrc5CFU4fs@D^l<3VcjW9UNdy4+?8x4bM`*~vfkc14+f9socQa&l(02No^q&# z*PWsdX`VOK3u4Iv^O0suUL#h;Dy9LOIzU53ki(Zfd4XjKMDiOJ^VEg-$)*^mm_qI0 z;Ss9Zh9%3wK<7nk_ga9bAH2m4Zg3@<=)%IqKB_d)KJgl^f)BUG593FRLGR) zv*wc>$vT}Wf6f*13(W(I#89V=T)WcB=05FN1jvL7>LwESg>(N~; zl-Vd^dbi66EVR_`kyJQHVV<&c34}$qX7BFPTH~3~7Wu}PAa!urDP+2lP8+*9A^n$G zH=*;#h_|(NeP49h){~Vpxy`t8|ANI`_5Os9z@|ym4T=q*IkiI`PN=0WA?%fJD5!}@ z>Tagd4(bsABT79#g47B*8d=8um6q?U_%M6>&(dMP{E%6(xc2%lRHvUM5-L{!g;suf z_-CTX#?&%3HR&ruRd zAZNt@tj1lCej>D8+I*qNWd~O|Kp5m~Hd$U{eh!Z4k8n!bBpc}u0M zOE1y`z~#5`2KkkIQ}qT??lU^f5nLUz|HT5^>chHWjZXhVASNBL$!~V>2<&#N=%T7<#0%FQp^f z+0GYuY$_8iW%rNI$Ss<+e+{d@d18dL1a7X>w!=_F)`Ie`h3y^0u~e^p4}i-gxr#rn zj;V)tKlna8kJtoaF71rso(LTW@=5|*D;abYQz9%5&!33jQQj%_sb3K)w3Xy%OhR_M z>wSsX=`71TGG5d6q%%m$-)_dS`x;So>byL?wmO{S=g&y850y77owMXw#3BOmA1U>O zmi&W>)#Y(rkc{ui<^S2i$71cqhDq7XQsNTq-M<8pxrbb!+Ddj9Z5^}17=Ti#r*dK2 zgB+8)q?(T@3Fw!PX#SVv9*{)%4l~UA{*qLX?~PP2#~Huwn<^j|u?UQCM+ZNiI>>W?p(k&HSDUg{B61hH%;Lw!=mEY?+T^LVeK*pC|bxLg2 zD%*Zu>c<01fX4G?#W-k6$jAQ-B>m(Nce#RU&OZ^;Kl~IM*4&|f8-Oz+f{%P(D_KCE zitmf!&{f8HsjH#hBkLmXIc$6)<{-6rE8M!`{8!L_KcF`2M12 zC-5aee!%R)eCPV(%LSuR@lP5Z$!QFRwIGMuw%}s^#_%YT_?vv->k(`9a9H=)n_E}L zaikn49Ub+&KURkV9zm-np45Qd>Fy~vL_dY@KNZwN*( zhxjMw(8Q{o3sDs&&du+RmR2$6gWnEg#`PKmkNoWt4!LxFE29LzxC1VDaFto1FxO^zJw?uD^$Qi z(&gp_Yg91nE{>n{0mR+~Ab4E_eU_dtouJVC?spjS z(y%>16{_ZT7$tW>%T4IaZHFG6-gptW)gi}DxuN~_tcUaw6rKC}!6Nz=P&B6$$nu-} zTa^N-6y8km?_OB*h(8s(O$?4swHi5ZOVZ0)bwq1!DV>5CT>EY9isfnt=dok{y?W>&NHiaB;|DGUw3};PumEn zUORQlAw7-mO$t{vzGLNoQbqgPFzdCWa!kd}K0F+an{a0s%VUh0WA5$gVS1)$Oc`FUxh>pQe+4)%63M zVl;nc_%3EV;lB9Lz|o3;dA99mYvFrG-tF}Ho2+P;Uii0}B27vlNpYmV$e2~#Jb6!e zsyo#z+Ng*I&@tz2AAhxz&W{sz38q-FJh(0W)BCT|;<^q~%`)F)RycnHda&HI**y{b zHj<%$4;a^)!ObH_cm;!tiD-zxk^PkYGFgY+Ma_X>OU{icMU;U#xZJT?+IvL)R8mfCxucRY_}czERM;0+!nuzl4ROtviejo9e`_!B=-m8xEcV zhz`RGdmoGK+{A%*BT0Vom4c2P^(V}g>Fb`yq@Fq=*@e?yE!maxJ9;?dwxCx@zst#i z4jCeHGX9te@@VsLYFMn0&>NPd9&gi1T{)$6elfO5i=1?;Nw_x_1oz4&%(Zkr>1I(Q za5=qU!M`C?{@aka@M&$*myuBan94b^)@dsbsUyGDVoPGc+}(bxds@K>8KxwO>;E2& zC&;Vhatg_yS*{NM@4&<85tz%Ri>B$hMWQfIj6caQ`V6xPZL75@AK`?6Hj68Ir)w#`$j z6K2itPV*yHn9#N%<2-K(dQMV=@~J0_Osc$IjW-s@8XPB%CbdH{Fb_LQVO)?eFuz z&Ol&5$f);ydlH=WxFl_Ngp+=^_dW_)EYqgb@ME&SqmnGd{s`v$7GYw`CCT1IHtoF6FU-_4_|#2PV=x%=qQqA@c%rwEL79Be-2UjWly8`>OJ zJkppqFrK5m4Igf1P;v|XcrHu{zcYzTyWj6ptGM5n^B-4?Y8s`x0~#%xyWfF3J3I?B zxeW_UK}NRgOHDeaQ`E$W{n@Z?zctRF+q5gY7&{y?soIDyPTDxUoOC*}MIuc>_&QCW zW-or>ggG|VEnujepYhqtzSx|Uh99w&wD&=}1C=}0c@i4cHMM<1Uj3e{ZaO}=K6quB zi`@Oyd#kFgdAa1gLdbib0B%+J#cUtwK?Y4%V3@^zBmd^29X-`(Z1*EIDvaNo`hI!s zZ&h)~9X4u<*XJjewypyT;H}i5?_c4lML*!480h-1_8UI1CSjh4Wg<(;G9ZsX=fOM5 z^roG#uE42(k%+%|z*R>eX6iF+MN2d_9KbvhI|kSRZ`rca-Tf{QWz9)MEo$+ODcsN_ zXmdNR;GjJTA^hGHrj^kaW#o0xlY<=U2J#d+~64jW6~ChtxLqTgOS60g=Jf}eeiOtkh` zK@CX2L0p@{*1+6JdZWtp!6d>~ae<@HEr-Htcpb4z9$Aa225IHf9w2N+3yjJxQR*Ek zOeq{$57IXP@isP25{oRFuQd49x2dM$+CA{+M_AwLnOom^lgx3WnPJoHfcd+k1i%jc z-K@sdRPxO1QB&1}*L$i@Af(C$jc$UV$jsSzrJyNW;HKvgUPg;R1iHX1@`>2sqYRu& zZslZ>T zOD}hv8IxUw&3y@dHr8V^22R=^;QSg9FcM!eJsMy;R(pyP_JR_FFdeT=xaPuxsUxFH z`wOJXFZ5MqZ*`Lr?H*0^-uDz6`53=#r}QHAXhA<#(d+Wiwl zK#G|vuEz(zFM;HmfecW(!TM>DrA531t^Bdr;fF0by@4NhpwvfF;OU2XzwqVKn1F46Sciy~0w?7NG}Y5KdM%i8IQ-KnM| zs`pFWtyDcW)#x<&A-ieod`5sqJ=sF4m|yt@`XS@gDHimh_c-)7r?QL^1ol;&0?|@~ z9ag|Dq^{AnqYI;hiJ5#{>0ks2>c0RABPeS?iVj%>lAq8NaR{h|{Ui0_crJC!s+#s*pW^BJ1d!WriLByL z&R|X|(;c6>BaWQR9IGU^C(IHYhsaMzFWJJ;b1blf9zPvhT1?vc(yPnS`@J)3dP442 z2Hrl$mC`k-OFsi-clzGk{(mD8o8DfryW|ZRKHB>~758eCny7R3b{jIUUd68wOSdN2Bznis{SHVE!Pkjk_7>Q)i^0_AD^96W;iw!X_ z;AZC+XTAvokjJUVf)~-KA><|Rml~^%Ah>p|kvZn)$FdxiTerWeUsarUD9P(sDZ}>Z z^)wp>yqoXMKyDhofJ+;4(R%2*1$7o*MJUU55=x{aGb3&oBW;o@>5YUCJ9^MI%Xl2^ z;Qz(hdqy?2b>YHTFbWb>Is{NrX+lJ5qzQ=FkY1z&1*P}ULRBK5QUVHsfK-(Z(tDE< zqy?l&uOSGbcS89#py)ZT=YDtGA38K6S$prfW_zBw_MGX#C0$vEidPe%c3+U5>?I<= z;OwHPk1m*H0*}33;Y>eW`dx#?PG;^VZB{`sR{!0{G1E1oOdgxOwn3H}{!MitH&T2} z>YOAf7C8m9NsH&h+ZWiSO_^na{N3EAWL_3~Goq2TI>o^dBqH9>e872S$P$v;rD0zp zeCXm!qsyarzPhCCH+&>K!K54g7)Y|m{$E}@isoPLs4oX_<&PrLg_wU>@rLrU#stLH z$feyPc*u1W+4z4PG?Zx%FBR?xPrYVf@jQIiEa|yDPehcUWxWgY^5cGQ_Pj|rI`qCj zBr1O4BOIg!951xjsp&L-ntbYG#*2W+(WcqW5Bn#~1xXGd6k}L^B4i zz*v?c73Hanf&kIg(KdJsyFeCCtO21Tg$|DDRa-ba_3lSkTtPYw->%e zGQ}X`9po@U8P}w6hRXQqlav;L$%3>VCEgAJT`eBEf~E#h^L0!J?Anlcmlz{Deg+j* zVH24Y>!M>jDyHrU zF>v%iWcA{C*I>OL#Gxh@T_0>=Ej_|tX;tfatr*w1;5yxdS{L+|_ysJD*zjJq9!jir z!xdp!Ad7x-17R+;pxMIl%o(|U5Z|*=5fzyS(dN`_Vxx67_K7}YA~Ma@M>?C{U4O9J z>Exi#pU5}0Rn&)S-?U=WLy2=NAOhRc?+>73shxRpf%lTXEcT#EXiQH*`>9O*Ycxvr z&5ZF*6b7urDMI56=n}qFKgt0_lR-bah*JMB#^#fEmP9wlO{dzR(Md(huQuM1FF~mh z{Livx(&h=(hKdHA$d$9RMN#PDFH;9Kxj)$L?#!tMx6v}&exNvqtYx!m6dy&c>MUxm z9*8qlLFm=#D4Vf@Ywa?&k>`v?4o!{Ui%E6)DNXyRe@1DY3jn`i4RrF4+U^MPZ+;`M zF%{a9zldn z10G2Xz&=KDVZzSCHlE3JxpLCYIlpHYOAbo)=3;fiAyw~HNZnv@&eCUlO9dXj;T_1! zdB`O05FEz+!se1Gxc)02n|HMsjrHZI_#8jj^G|pZSVY`7d9ncIvRhLe-wkBvxAStP zLS}I6YKE{}P06foNI;s0t(tY)+EQ|vBbM&UgwF6AcmZG;%z$LvGE;(5#W5f_l7)uK z`c#llRO(|&c=qn@otoY_QVQyb8?fO@$V4v%LK+Mw!lAgs@QlUha)PgO@oN>_*m(WJ z;PVSDZ$fq(tHAaMJLcj}9bz}iK#yR0!3;b9Oe0FJ<|yRGSS&ktH>&F|oqu^P+h}rQ zO^(=G2CGw8p)CX+H=&(t){^-#^ML>NzC5}Vro`5nQax^A>yS30Jn4i$In_2HZnA7Z zBF$r(1qa}qNlD(b*ZS{fXqbw1&Gp49H&^jDyQpiSmDotXwx*^so{)a3Oy!~x1GnX} z5u?x?+#Mabp%k%9-7A`pjehIs(;Zw zpAGiWBrJV+7xmYrITFHzmmcc|Ae}G^Uq$59{hu!a#U*v-Cts~~Gtdd@qmQN;LR^9N zEdmOUoSL1j%uC_HRGgZ^fsEK7ai`o}ad+FKDTRsGO~oiuPKl|4AqMoSd{e;dy1%Y8 z|4+@`8+ZbQ#hj#WfC~=ZUhdfJQtMP54N92)g$1dhQM=bWORZq^uY^I-KE+A%|)G2@;-2h3C= zkw~YuMW+#Uw`*-{)?;F!b5-m~;m&qLLkM^1Tkh3a=&Bw=mlHuZ13ptQ%|ot$c^+gy zL!pZ`g)M9x3el(#gVVrcsy{w%Skyv~OTnqwZO4@K*ib{MD3^ggdrNUp23q)bbJt6U zdui#}LdH|uswY#9G32j)2n`oeSUb$y2b{bIXM>&k}_c|Ibp(ng4qd zi!KY8{iiNIE(J7*+ewrI{;I=VRw$ja+%OaP&Z`^8=Bz{68tno!D~(=T*3*S=t?0qs z*ZZx~$|5NgT%6!m0_x`myt$?C#fN~6x=x#}ncNqa2H1d$Z`Y#8&2&0i1DMU4=hx~V z`Spf6_}_el4EMHYX6XYrkA;nSs4OpCO5uNt>!0=LTX^dzX@+bQjmj-qeveKVE&{DP zY)FK+u`dlfmOMg^u<2!r;aW3vSQcXPZAdHMeM;(D)cJ6zp22DSK_Vrkcp}2P9pEJ> z@1r{L^c#uuE~anja~~vk0$pS4kIV?L>(7uLOR?2!94?is)!*!(DP;Em*8qINr2byR z0X|Agkdf`Zjf#~axlB;>d+|CSb61L$7=TUyMpC-8xIDg`1QU9`-8<5Xs1b) z1%kaX_~wD6bQf`H*?|HB3H<-Uv3~(LcKz)#jHKmy97g<6cYKnmK?r!_Auw}6V`qgp z3kWq&PpfPv6rNpzbF)fNDztWM$l*J&Bmr_Oj14sT*m+7H^KIC7qFN#gZmS?xxPw zJ{Ju^)#6NG?i+iq)8OzzT!K4xflqy@AO$b|@|BrRhE(_zeN7-k{mWoM{Svfp!MEYj`fzquMI;^` z|F4|+ERZuR%6BzPbcz3X*^nSP*-Eba%`|3uw@GW^2KQWum3!5Ko8)ReE^U@*uPi}% z@mjc$Xz>Frn7$jXKEAKN-sZM_X!iZZWzB#$@;XPcpls6`KUNPlFZTSD+lM0(1nHJp z0zWB<2CwkK#utDlnp}O_GsO#uxTaa+$?!C&=1j4NN7pTjz1H;}Ro{{Y z)E>378;nI#blg^kfZ=vVl-r*I#sB#4KjsEk0y97XLd5M#(>E)p5vowSez$zx7Tra= zguTB0De%}g8_B|3goW9GwR+sNfZlX>_ZVbHUOn1n<8E?tt8oXFYPyJRi7MlpF(ri1 zjg&`s?`5F**o3C72s3A+eCXdq9U&H*B{BU3F5mj1AWX z72xd-36VsZE6RDQ$hvExcJPf9E2}Zxo!qEcDJ4`C3jGzlHD5 zF=k&P!_EUjU0Sfx+o@_O9^Bzo&JpD7J)V2J{uEhUn;9ORuBa%PYVdYPt# zunzR6Nl1J%+Z;5{iFWCQgz4qUBMp#{7eEh1U4WNx9bafBx|26`qD)^qWG@Nq>7SL` zq=CEX`iR0BNaCD)qqDvSeKM04G|{%~jx-BR?_AD2r*SIT02jOXGS!k2;s)nCHD598 z3sOgEh4fo))#Lt3EcR4QHqlcv;-mbxqPSfVbmMd+0Cy2D@wrjyWmtrG-TRg|+{&z( zNP}Rx{u_use31)&FJ=J)Dx+HfKB?cOSq;w^`Vo8r7zgau4pF*e*l`}quV4AUKLpxJ z?-q0fnpr*sY*bv8yQpuua`u03MK|IvHh)UKi>}WGvmmeF8rR>X`(}x2PXL20tB`?w z*HyEf>4oUQ&1-NFC!<}5YK?{pwV;N)GCU$vz=(gjV#j@IpiYU`-hIcuCd4C(HOU|?7=FqAMu5^Qr7tI$1J3X20TzjrEBR=jp|h_!OyS}ps(I0Zz?qC% zuQgyuW+FSLia7strcz;ByWtJ%)-m|gSzz4VcId9fo0rG3z7p5en%6RLUx`snqM7p> zO*`U>J6CwvDv--CEok-G@L7-sKU?tJ++2NulP0OOL7_?eB-+{N zm)SPjRh$v50p&vGtSe`2ZmR9|5VQG8BxM$;iyrDmsI`W=K0F(siL14?cur)`0_BhH zItjOxv!+pkW7o24so!d?#895BKAqA>pYCtHQ*HnGe={#(*S6)h*#d)g1GJmDp!5$* z%U>gwJIWWgS9u1vqIYl$wfWnS*o=m<_ds#xd#!=sL*KE2(SZ19jWXM?*p^89aW%FM zNVU2->Yd|#KjD1mkViUnxk1Qvh6bYL))!nEx1%~+4pZF0gm3ocRab%}9KIABJx3$e zTy_c0p3?NfXYOQEN^$t?hhb5sb$zt1y>KVfog81A00zD&;)vU%jVVS|vDxWY`qBJDVg z#Mo~IgpX#l{x9O|Z`lpkTx-;D@< zqv}E#8{`EUfK2&5C2uflakYl{u+<`Rx2CWZz3s47inZ%e=rii`!6z9OSJPD_JKV4 zLL&-_X>)>-UtdnUlrkxQNscK3_F<*4q0w<>0HG&6uT%ME;Cnd}K_7-hhnvx*SLKU3bq3e@6Pc946MLI_ci6E)`=2hQa zLywWopl0o!hFgU>3FnQRroes=XA_x<&5QDh5Yg|rU@skV)Q8V?#wy<_>fwCfo|ul$ z-vWADBAseGA;8?W++{JyXt{kO#Q00r9nUKIUn$StD7g=S)2q|wx~LN(u5sr*=@>CR zoYKCDts9jH@MhYGqekNGor~6wDZxJna{w>Zj0FMRxfkdB(qNw1ET&UPk@UZ1ma!4( zdUhD*2G%0tjH$EwqDr1*(y!@9(JjP@Za1iZmvG5?uD}hG;WYma-FO=DS7|bv+o~!q zghNe|&(+UTUJ6@`R_hlunE#Gjn8s>@XEx(-+uWfk!`=Sa-;==KaCzX#%dFOO@S9hB z{e)Y@#t7t{wjj`r-ES>JeV3 z3+vWxfM!p9%0%3a*bKhF91Rv-rq`0MU@lelRq0aTr>|2;O1=M3Hg*9Q>%>Ojm}Ssj z=l}xF3Adq@Aw)My59s2mne(sfEFHWcw&Pp6`t%JUIla`;-ah=t*LVh8B>&lGHv+F5 z54!kAz>5F4j=0h+hCXZu{+4GAowY7(Ozt_?`g+2Ct$aarf=?usD6VGh#Q(bf4 z$(`-bx9VClxhX9^X*#WzJELV3t@2|a8{a%j(C&D7<9yJq3uj(FIm*Mayc{>RgYleV z(?~a`6@Nc`m&L{6d-d#4Jm!?wN!6x6KoqLVhrr24bqA?I4|2d3=zH<0SjHEsaj2@FZ(dk>Y`_v z;r<2l?okPH;Irk7lHOcuX?yR&d5gm$HAnRqs=uENF^!bu?P(#tXfVTMk(x6kc&Xl# ztzFlw`B?3$q;Bx(Wvr)ST6$R?$@`fhSyR&SW z*nU3Gcep;kQR-Qk7@yFskBoSY&@(E{nFxN6_H-Uy z@ATX|RDC5T5T)>BU}p)&vAb5;2mSGf`{Md3wHqVPjstR=jx(ckcOAW{%`Y(K`#x1s z{iL4qp4@gHm#bvXd$wDHcm1T<(>4JlmS8b(wc4kds!ce3soPj9zv^3Z?cBTN!clEa zzgkBN^F{tNQHO=dn&tyx>94(h#nx}Jf`PiS&gLGDuE-~xlJJ$##jV@f3UoU&UA(0B zg5i}{j_fojNfYT$>ePRQ21#K>Mdy|pwax43HTKB334PD6-^%Go zjH3*}Gp^uQyZij}>4xsJVNr{~lWG84<+XYVv~HZfVg3Top!<2F?;Yxu8kaiU;1FE+ zo-Dkow_27p1+PD-{)l?irsPKb(?%&(bC=Biw_RdQkVZ&q?coy3uvaDC`7eDX)Uh>X zXA<=hHN3~%{y^zQZK+iG_{ShQ3DCwF)bVAlAJf|9 z?zdXp<+fr&XisKNc3%_s=4%B@Uiyn~jJi-_{#{ zIQDJ}##fFW zv2P1&tb8@?(Ca&$=L(4S?0Wz9F0BHfZ!=;N-f9_&oq zCKXx~8@2i(BP*;v0-t@Q*Y(?cWk0#A?chS}spU~7Q|(pqNuLhQ@gu^A1)iHmu99ns zEZgM7N>ZxBmxM$$ggtn}kIwZ?VH{Y>CVbQH1#Xv;oJ9npFk+#D=<%cqI6Cy~3Y_B9;-qCH-FRXymq-CbG4cpwquuXndvu zk--`P6Pc=U(3U`a4#)peihT`H+CDKhw8;N&5mKHtmP)ds)Ab0E?c$e{i|~@y_~rC= zO;$N`gado}$KAN_~#YiIgI-+QQ3dGwL#L~hJ@uE2N8 z{L%u&;^`U(;Ez|21vIZ0kRqf+>%i&ky9^+jNJoM2DnFmitQ-NmRT=a}WBe@j8iBwS zBlOr~VtgPvkk$a=7hiKTRX?12=AyH^lxT$d_`}}|cl>3`{^VFtKwA%^HA4WHistp(0PXBWyzhlG{Q;16k3k5mOH&6m{hn&@HDc{ zSF>>pHc8r)_b)ABXLcDlgdo;Y%uAE>0!eIGJ*aJj>MOX`V~X(cJmQLqdI} zq&Z#$XcL-jDN~X;5jyZqp}>^QbS;i&$tU*tsoWnf)wgqKX>`Bm?^<{BZan|3_8if$ z0+oru#k1>u6?{oJgvGOlI37rXemKqgJ*P@U@w(aU?l*4!m zeT{Wl@vtvh>C3WRGqzU$KWkc5SCJz4{2Fhv1K`=p)e==PlC^hAlO+5s>Ax&3_B}T? zWgDiin_Nde1H2%4ZZF(CAIA?e`lrByv5Ey>IJESN7Tv{wzJ zvX%H;Gyc?-xA%K;w(!Uuec{2CD$}5lBe8Muv=7`WJ75pZgdzGw=4IuGC>B%Ns|R-R z%T;{t&&bagUmO%{SbKiW!Bx3LOPR-7-#C6*Iwc^(z?)utK%EuvZIcyK>f)5fJ*Uz> zOvcyTH{6-}`l2;Xo*Mf+nbv)oq{c~dyDcRl^43G8SmAa+&&1B!T5rYX4YopmL_nyk z?o#$|9V3<+xrdu%B&8Z)*lSWQK8b5+xj6-CnYZtQ!j9vRlEoaLKYgONl?f*ZMi`3? z#=bBFcUmkD%UZ>QfnJV_Pd`z#-bt3wB0OFxkMg7E>A5j~FR`&Qd_S~omK>yiKb^bP z5Ag=6S37N0O(jtZe{Lcm!;i(6L9!)@5yA32Kw#`W(^UC~?17!AzUoOEyy4Kgymk2@ zOXFd~%a5A@;RBgY>4EoS>G(3`;YN&}ek+=8IF0sFr(Mq(nk2ntY1Ya55X4Np`u4b- z>mmL>g};|n>pT4@o3hH4bly%#CY6^^F}mq(f-zc(){rH2(+BY}>v4LMe39>sS&b*q zN4*vj_RKk#95Lc433XgHIjwfu{}C#tku6W4!|-e}X3{3>yjq92 ztpH!+MFhUELJgp^44m%t^d0}^ooM-re7mHtBmV~C&p@uf#`0bm1Y4PxAHxNrh=Hit zX2|C4iC)O2_W1m{W!C^?f8w5v!uK;Fz}2D+!RlNSRH83_tli-|ddjlrSSjlh%v^9z z;pq<)O8wMyo5;cxs}uNtS-(Dm6!{&u;)#un36|wuaS9#Caer+|^!>(rJ+Q%zn0{xX zVCu3-We85az_8!lVPf9M=K0MC(nhb{aP@};z>91Mye_lkJ56W?R`2dwb+~Vg;yHB~ z_q@;t8Ux5$fX9)$6>Scm%a5KnKZ{M%%q`$*VaveB4zgnUg{$AmYEownrwjF)FNx%e zN-jP`Up^mpe9Prfg-?t4I}Beme*e*dM8RdxSU1JOVYk9`tbeZlBoZY(7zGM708b(~ zDfvN4xumY+Lpo62c1s|=wsf(H{h9F$rS6&RPmBiSxp)6JMhoo6=pCeC?aHI!tF%{s zU;HP$lVbnGP9VQrGGK#Q?(m%IahtBAf;T^VFzg!|M|*|tPq_eo-s#79hhtCl&KLGD z<8agSm=-!Y+{;e#IP-#MMNPDyEqW4Fw1YKCbLR;8WH~@$kta4$P0y1!iR`#(oH^{) znksarN338m4Afef`0)JFEGfx5RQ3VR+|SjaO^`m$F{JgmJ>3`t{J&@OS1E}IGW0vt z;oeXF*K%cFswt9VT@$bSUOHsna0C~^hG-Go59ZqLFx(Jqa5JXg9dFxMSzK3%=5G)T zkqvnC@3^7QAhWMI13yee!)5RC=+quIjQ>Zl(O*N_xjOfitu0gbG*$#MBffEKE@}Vp zeXCVMy$@vn_&VHq;#0)*d+a)dJQ5UOL+Q{Ub?;mC;4n{dny2X z?=kqB&olF{3|Qu6Dy>==$ANtIH22FTkSNNb@oUD|8War?dk%5c7#~uQ9v_naZ4F+F zxQC6SKXv^8S^>iTj{xBY;&wq_&o%D8{LuT)*Z}A)`8CTn2mFw8M0gUdjx3K3?;i=k zu30<%I6>4iE4Qx=4KsuYTPidDS5uX+Bw(_q6ZI4GaPh8HKq|ciCI>J6l9RB!`R4`qRSLcU(&T~-B-vD?`{mxBKseafe|WQ9^rqp%u{~SH zZb26K(6gp~$|{+br$)0P@*hzexan^Rl55H?-SNCbDmHxunQc5uOP%~u%gSDlPzhBc)SN!A%%Hz;e)h)+IVw%n zD~C2#4?o4dL5&S03#vt61^L=?r)KGwVV7%N|CuA~UVR@^{1)KqgzhNSN;O?+5AU^E z{VB`n95%G%m+cWf2>=JAxUyjWV~s!Uo2AqjJ6|mvgwhB^@sABvjtv!u-Uds&fQViE zH>WUP@oYZ_!9PfKty#)}{>}5j`w^_!G~BQ=5PwGP{0LnanOtiI@~~ZY=uc~hZ^Zd{ zV55jF4Qn?b!nF5JOR!^34__a7iAb3-zoD;e;db(VK3T!F@rtQS9ovf%r?1@Ayk0Xz zow8w^)hh3saunA`bP_bhhNsv^73^Z%rvS!%*gZBBo@-de2C)V^(=Dv$-3NE)=7Wi1 z0qbHvxs-$3__z;!f4d;79|E8akIkOhZV%TLU!elGdMF7!f&3wn=Rl%kL$}9&zuo6$w*8QceW*6u1fY?_ zPCrbs6M%KP?imfVnNS-U(fzf5!;!*Pe2V121X;g{d97TeW z{gb`D21mwNk2Qil)3PdD#y#+h;8GD z@ol$MF|O*-^D%kaLi8Sfv}%Rxx)!BEaW(3P`-W9$-C9Flwg4p8k{dW64Y)YgXt+WK zoIj#mGbEUQvbQPU(MBP=(?j*J$!Y_!!$u` zWnCiL0G|IRuG>xb({}a#PkqgGA2J&o(hEXS6N~xB9_p@9PRC`4`a)ct#?O9=8|;3u zO`@gD%8e$QFF2k!{g$GZRVMw%xMS{ePu*4^>W?^fP!E`no61ZjYz^{g@4*en17!RY zZX~@505R9jbe$v75+IQ3lI;xn80HW16$f^!{|XKJ-(1Tp!^8CQ3|5N#6?krBTW*?!1 z6~61T*!$Mc-vi0Bl^HSh@YFNr4KEpS;7q)!s@ z`7nCPdt=U=SuHSDO{cI^?!WSf^zkN-$(7zT73kwy&wqLkK;K!J_(exF#Vn-ApXo#G z?1nCohYgljn>VP7k-y7~;rv-(`Q=9_-%IaIt3~IkQQH6mS|7Q#syQZ|w8q9IK+9~+ zNdv5N6^y6Gtz!jcqFtW5=ec)%G9?#_P6a5Srq15W=~?!(qmPW+Fq_KRh}uWybTs!~ z?0RQGt1>9U`f;^++l*6>E0`S-NO|)f84Z<*!c}~k$NCl6caw2P0Oage5e|Ic#C`nP z_1Nhn0=uC6Am!1h>DifE5hWP8&!P#POqOY6JLEbfS46IvObG)-!sYUgRXl0GDlQJGVSG)YB+s@%9n; z(x3U-tqX5Y?=x=SC^ILDK&Maz)bLZrIx1Cu*u~x-+(J>` z#8(8E7>7j=se({OoOQ!&MSumvY|nZqcvSz;L2b0)&-1u4e z@~9H-%hhJ7b6C_bb%0L1zt_^Xy@s8R9!sKC z{bjv?3|NbpU~843Q7Sm9p*oYiN7lJnm2AqE;1M-R7Byag{#zcA-Yb~p(b1XamwvbO z*`c$wR*<;SIM4j2Q>SZ3%w-Ii(diOLyT9xlE+-jsn{amtLP4<*>KDdGto=f+&GqSS z+1$!KG;yZ+ucV1{>+-Z?-=hR$uMBv5pVS}9wdu;mf+*=O@+a6&uZK$;GhMspNj+fe zVt@MjWz+2o2RaDEmsm>4p<^z@cZ2i7tipy&@{t~Jv348PNFI3$T&uGmv?2@41)Wqs zgdcr&bk@QjH1j}Y882nu_8v^9u4WHXMn9aA;C%l+ftk=Gd-P2_PD(_@u_uw#q_>6#YpnF(NrP6{G6*$=W6KvgIKcxy6-RQyGZE=~x{uESW@0=F=Z zWqLH5@pFUmTx)W}D~x*c@j0N0R!fZ4`|YQ|#7 zwz(GWs*OV?qfrxoRT!mmF?_rw1pX=*I4;EG7{vX!o=Iwqx7QmSOtu!|! z+oji+RRb@vPl1}&j{MUlE&dpm3!ly|@6K6qro7v7+H^kItdX}YRjd7fjDt>s09x@y z7krasm1}aHB9BXaE|T)TT9L=K=v!=Yt;s2_Sf$ok^Bd*arwh&EVM^lAC@&Sz2o`TK z@_No<8T?_TFKuy2R?qHD#`W@xey{wG%1_Z_>we_x*X*8FWe-LyE~HJ5WKRDf)I@kji9kKC=sA6BdX zzfxaH(>o8sUIZn4WvAI3JxpRky`{ugLbhXb=;@LYdk`wY5NxX5-~FO5w>2^`iY`Ov z%qb5EE2_}vRw<5WbMa*uLy{dIU|`kz zg^r|dD)H!8?}uZ^N?AjVEhh(??0@LU(kfO2dtZF*60I0=umsuvp;1KALHS&XCAeok ztTtO@N**bEoT=Y#)xXvjQCIu~^=OLA*6~SI+Z~&44kw}0Vx!j?eaSA#dFYX}2*U~j zzU+MJ-}>$xTdn~cNFOjPYdXjGYGFd_Uq&p zR??mIrQ6swAfQd?8IL<3q<_0`6sQ8+X9J?^PiQK>vE5KtiG73OT7%jhBC(*^=;v0a z+h$@u3u2W6vqZrX{Vj`#Hw_A5WBpl`pY3T1i?fDJK8l!#dWX)${WVF?pac42N9B1G z&es~+AV7b535))MR>E*XR{jH#$WE3FPG}u@tY?7Vsd|}y8F9j0e0aYrBDa9SH}ane z=+5#*y-aZpt5I=8V@~3$4I^{0djCUxHUmDm@5#`LR>QlXfcq4rWtDej^BB)#^^;;` zRq{G?-JIbw&d^u$L(V7F-7K~F$V%TEu_y}CQr><}BcEwYA3G$#z@CuT6>gRwHM# zNj+3C;u|0D>pO#!^3N^mhhtoSFv&P9@-RnF@!|JL#h=jJ2dME2EQtL&zp(b!eX6sU zz_+tu6VY>RhIE@_SK+rVGQ(SQjW#!pEVjIT)^yOY-zg0Ms{ie2fHj0{=TydqF2F?O zS(1hnVh5o|6@&Dq-@by+S#f)Ll#@JNnL7(x%N`plYLxPbnqxfA3L7J{*NBb^EgtaI z7b{>vsYUmyn|J*k;g?QaBayV}x6ccAxNQ5A9^X$SSg&0(UHRVDq@J40wW_sAPC*uH zF)?xc+gVl>=PvbZtVgRkaiv?f`8Tb+3%5gL-Z-qJY)4>gB5?F3t#2%r$6x-qlwlEH zy?iLw{&NMFs_%zWYn^Ue^cH~M(e3~#Hld%o1SNd%p?GW)l19`tP7|xYZi4RywKMyA z^Ae^2B;^VQQ$|dC#B|pk2L-Rvw-YI7T*Q56T(1Y?TQ(K7-SqPf94)4L@H;M_B@w5b z;kUbmCiw!mwS88O%w>xC3~8?}8|2{9{Vmm4uM0{Zo{^1u%_0=IRZkm3Pyx+PK0&fh z@Yekqx-*t}9p=79uTd6{cx$JONcc&6+>UQ4548Ngb=_q$yR2G56Niu(ch~+SE&`Oy z9LsKs`qPMVox$$o=EWl`+9hj))Y~7q*6uD_>?pq4igTgal;PYg?xZ_xt|a6Cep$TGPBcuB`ACN3_Um{L@dWjpoO`tBMr|A zBc&|B@Q)jP*P@gwTAy&YVAZvPc*sA}OiDzdDB9J@?0w9U@H#Um?WnN49#t%LvqO8~>H{sEXe5?D1 zj&|b;pn>^<|B`-`{WfxCRpT{EWehCqZsP;wcAG|ms!N_89Hcup0j;W*p}YiQjfB-5 zGv)RwlF1W;jtQ3VDBRPwNXB~>EpC`IRH>t4?dbk)Hsuo=ck>5Ku>(aNbbwHQdbFRW z{1L=#zA~qh?CGIM?i(w0E{B4dS&H$o(r2YaNmA=|nrz?q{{T3>8<8Mg3#+wv4aheT zHd=Ar?mMz*8-mWE=6p|=nZ|oJZa%)-TRD1T=4sQ(-sE#t$6k`EtprI16mJw95ehT( zo0)kwP7_C}C2oW-BswEbw|cpG9IANTRJlyOz1pn+a^gkKiW;}{LRMCyNl%r|QR+^u z0i>&lai|__C;ITI-NgRqtpixi2_1;91mNd>ky1|5Zd887GsC^XcSGKW6*#^0il{UJ zP2$-3tTJd7vFfDG|7$1qAxQo*4D$ws{tD^4{TK|8+50Y1Y$G>a=h`^1s~= zaKl|`{Gy{KYs1MySp5&Qp!cSCd#R;KnHi*dF8RJlKM&<^K8(lg^b=M@lWUqamx6!Y z-mV9?*z#|KX7UFV24R7A(`l0wQ+YBG#TbzQ^nu)Qd4WnkaA~F?|h;Nw~H$ze9fkBr37W2HIEWpGXVfzh+M_h|%vC7IyJl z;McX54EMH~BC`m(<%nI=_{K2<{Sdb~@!eE(-Kfk|ri!@UhkpW);XmnseE<$@D(v?% zE?{?G2!tKPxe2X>lALt-S~;xduuxTIC?oR@WquB51jyOR`gh0-EtPkt}GA&%U;b z6U4d>r!*FfdK89@jPT{ZY@^0wdme3pHQ}0C{_@BEypFQV z0(vZI(QG_;Zby?#OV3qLz95|#aOZ3Z?FC%ToN`d;=D1^1*$M?bpo=PZ?su(3!frLw zV#Nx&l~*on7|O~TakKEmfVxh!u$VAo-_g9Kq?C=PX(D0$(&c7sXAW;~nyNYqmEn>E z&S2f-C>2RJwY7>C3!1UejW}+(3}Amwg{p#Qa_Oi}FO-F!_h{*@23`V57wm~;9F=t! zk^4cO;d`jXDyr)u2qXrouJbtn{XlLs#{wLhjNJ=h4zGY+qO zdX-$Or6VL>Z7B84cFY_%xT)w`{ej4`DQVHhQa%Gad%tsC@c^9w>Dqw7o{QS$6eeZ| zq`~mJVCA`J#32&dZQg0AZ4}omUy0uFrw&T9C?MNPd9_tUQbFG)JI}&&y?AVBU~K49 zqf}+Ip8sTXZN6B@&5^>{i9^X1e`ol3d}X_Y`&_fP2=5(*$fHFM3e)&Dw^c!xo^02k zR`Si#$9$QsLPr+Fa-O+HbTB^t$aB96?cPVgXlRf}X9A?+FS@`-Vl-@ukd~nP0GezY zsi{h17a3-eV>0*2#@MUg%r`kYv*;^v)D$Wp8A=u|F*|lz>x%4_EWOVWr*U$W{pu%L z%@oXGWYGy@RLrZSWpbC>v{d&ZaW+8M_*yAw)(95&L&<4_)N5Qd>Pir5+QE*dCw;W) zGo#g(J3iFS@|6ap^n5NOdC0ias$loEG5{|%CUXEO0MO@z4%!PMz-NmSp_eWZa#-OO z@6)K@?SruetamPNNJo@O2kG@Jd7V)i97^nd<`ZpE#fF{}d*RA)0N`Z+_s4e?B(s-g zVdH@_!n#c!$JxJDa%{%K*AgZMKTeaC&avJJ@>n@c5`4H^-~AAYM9w-XNfPhTiYe)* zQt<7HBXyO-jZ#VF$49M6u&$3j&kQU<7LNrcL_8Hbo*brCeCcl}T#ME>9o=}xx34)(RFPSrGemBDVsb~O(?-O7i|#1?L-FOV7E00;PmF(aw(TvP<9 zVWL(aa9WJ3w9(>FrPFZbvDL!(E*mwbs~UY(qWLESQT@QNY=`ONSsrgK!9d;dxA>Hmyi)L5QBz~8*+T3A<^{Nw z5=yC>ElPVNEJIA z4R!q+KbUiwR+qE1w|r{|booY>^QBJmSpS1Sp%J}(GFv7fk!eXVQ{pOf`iem94a9@0m_0zOLwl z46aJXlx83ZEpvSefy}wSC$YW5)Dlud?is76fJjXsmjrOS>K&{Steq{og;IC z++t&#bH+!=4cHrnBGWB?^W`E+Cy5*I#$TsUy+Z+a#{J8k&9>q#*g*M#DC$k%-R8H> z!;-;q9x7|0S|8xTyjxoT}9{yl@Bv|xemF^NID~t$UjTsUF~DkLh3rtbjOU2x4i>Y`b8Z-Cz^i zY0gVB)KZU#Owx*hoJiWB?i&3<3{$L$QS}QGId0-HnXOCg%%|H{A^8QP_5^=9VsVHy z2t^4TvYxa09t16R(N?QS*b7Xb0Dsp=yrg)^N+ssPKq=F7oWWAshYa4CyAfXi6PX~_ z%^k+x?xzz(@*UJ$;eZFtILO#OdYBnP`)`7|Lc@T0Xq3yFWXt-JZqyrF+S}BNr3Glf z#4u~$>1E7~pd>BOL#d2xAP~HLtmdH5>16Wn134qlz&sp05wczQ0oUwONY=sxc`V{5 zhdjKb*k!3%+@Ie4xhefYaZMhfw7J=JiFJ`h{rgd0)QCvrLwwdz^WN@;(Ft{qa?*b9 z0g8{k#wmIyS(EYKJVynnbF4XU$#!zMF$X!0L%w802@$tLoxTeia$7yie@8J`iTKQs z?4`tWaC!m0Q6#rj2X`G=E}^H!N@mrm#e|zMXHaO1wSsqV@s2jMJ ze4wFp;Ir)Vl5z}EN!y23sWeSwF0Z0*KJZo}t6bGoITXGms{wzqu0!%U#kQqRis?~Y z#&haT^ZpmzN|-CH??@mr1E*evCSS?WY#Yx!P8G8dE^tIFpLGsS;{(i=)HxE%$kvHo zB`4*weBxd{+%TBen_l>Gv$p}}+G}WQy%Kt;`e&_W&uZ%yAMV;fT*-p<@sDntFzfR_ z3PPxw&c12&^>)&b;k@G9s+b%X`|5?#<={NbT-eQ*$az}`b4HFY*iy#+TqP~FWPw*% z+C)D`W)joQ>7p*zCa@MFQT{bk$;F7V50iJI4$g<{UvJ;#2G2H0IgF{}?vK$8<9+HkR^OxYW3l?YFi>1nk+ zh?=)OMP6p)GyRDUSMd2(@`>6zmsnzJf~0MhS`&jMm}Ly(oHx}Ub*>{mQ>cn7qDN2U zJUR0@Nh)&JK`?cgOr5QNPpS^e43K;)EZsO-#J&BxX%{wF1IjYjHXef@#YlO%*^p9k z3nY5(km+*~9H%hTt5>pv>>&a@jWimq;-oIju2wQh8HrSjLQ!8%D5xg&G|?w&7H4QI ziw$dUG(DS|Ryu-?`yl=LS@Tj%S_#5&!I~ddK)0UXebY*6c&;g8I7-NF)R3vf{*C;7 z)pC}zTp8oQiT7QsN3$UIodd#@^G*#y`Tw!?Cg4!0|J(Rbat_7mWKdaJPFW+#zD#mZ z$!-ua#Zk7gW?x1*BHL7m5<<2a`!Zyk2%}^t%wQ~8!q^RF%*_AM`MvM&{eRE*x?Gph zB{SE|=lLx6eShxf2iK#dru*G5Bo+H%pO)@@&2u&~ReZZh%&94=EH&EXGr4L$l`%^G zmM5EJwH5iZGJvXmP_MHV)mBdvMoq**C&(}l6OUy3_g1sRTYu$C-tfMXPQ2wN6(kU( zHGB4hTMOOS)A;^ESNsqEY#8wS-F$oYyM8jj0Ve-Uk4W_8v0!im${xIi|6s*ARjb}rx$)a__VU_! zKcgsWbk&JtlmbIvi_4F;$TZ33l58#lVcJMln3q_$0l$a1gO=V({ym2q)!r&m*t z(2oTNo5iidFTr$8H0`tc1Ta>GvX8Pf3u&|YBe#1~D{e1FbP)eK7%u^CPHXJVAM;ip(B%`l+e*JF zzk+AhMPtbtgFcWNP@+b%jqmwZ&lp|%c=PD4tLIMq%J#IRa9xi24ure58(ed7aUGUNL@0ow2tuWy&{47VuUzV4N(G2I647}kbwz1yAovSjk(-n8&N+>gsV zf^xS6!-U4oE_LyNwF?#!yNOE*wmG&~!HMdyOYQi;$waDH`tbT& zpJp*K+ws$5xG(Kt0nx<``!{U7+OiabMvp7fBF-24eA4Zy>`?ki05rB&sPZyFq&kav zdp;Ns?^@Q8^Fp(wO~jF-e;*h_Vtj!dNhGw#_B2HaSDag*)RypmAqw-0(H-vuMUFOO z`#yn#Za%iD_n(N=C95%~hbeH6z1#q72~c+?+ba3c!wtq;q?>+iY;qVFHce_c_VVEi z5BuHT_?+*Po!|g)h`dv|!K5hs$;0z9qDm=yNw89z=lXD+MqBf*=%)D+IePl~eM7XF z;yZ4LF@RCA+-$}LfP1zUfczcw0>PE6_9Y85Vq~KdiCGg>?U*-vfoMbZakC1-pl2R= zdcUdJUBV&T5>%Ao^othQno(bXtRGfUzR6>F02EIC`JZL-T)A2Alm3c3kCK^RhAc^m z?)mmzEs|0he#1nz`rT;f`i1D5^$Tmi)<g*<+OBMi5u4r zGWUJqn>wtZUFc@~YuP+MwFzpYL(5Xnk%EHK>nw{$r{E;c=JuRZ=i&9wK7sG@0r?7e z{&xs2!MZfxo64VJ+q0K?L;@=rD6$7%ty z|B+{}vDs}NE+(%M{ad^y^oQ-U9)W>2c2ktPS7XACc3Y4_d-QRi;((oxap}E#j&!ei zyuyjoRgJywDf^Sfr`^6+Xd`z`13uqZ3|P-n^z1vU=tKAdl%XP>Gi||+(<8Mn_bCZE`xC1WkjpxKo)GwYZ)`LlPjKu;{Qm~u zK{~t@N&>Q=3x_hR8+j&b7`W31zDJLUul_#;@F*LS0ZJiwV+6aHKf&Q!sho zA*>2HBH}aSjkbd25F0V+us1ZGjPnxzAsotwoS=jW@%Tg{t<~|8dz_&i0w~J5l@{Venl9{#B>Y?~6+LF85d%N${Pw_*o zskVv&5OGe?@;RUw(19u17(GVCcQgyyEAf2Nda?Ho$F-~doMBjPZ!x+$F&`!}RKpxH zdOS`Iem;un5|XxWvP!s8a~B>xypn9`7Czl%j@*9S#9o? zjYwF<=AmHbNA8n02Kr~se-V}Zn-|&=#U=m!)(#=qdM7&qxu&K!dOA;O^p18g7^-}v zv73{h>0|fsjXh}O;zELfaEHqM69mMNJiO1`;KV<&nBA9HQTxo%if(Q|M}NGEIhQSP zES^2fqb5-Tl9iEBXJQKP+Jc&SMrsTz37yA2+vVk1zpxnz03UDF3w^yi`;5bnoL|*)vdiQp-)Ky9SNW6ZjSDiEwIfdMH$L!6KCp8k)e_k7ASXdvZH{4wHafHdYQDNZ1*=VW=qvGOY&pz z-6YF6_BeB7mqsDF8Eb?|h2`aCh)wftYFw-n`xYP`zi#TGP0JuL)YM2!yMdl^WmN~) zWqTVHxg?cDLvdW8U16N;J5zS#{ArY#nzH^%9*V%h=6II{d-J2@TjR z;X&@TtxM)_{cVAjc9z^0q3_$C(eL2PpDIx z^I=DG_4hMsgJQq9xtfi6V5ct+I`j==7lF8`dKgjp+KeZXs^$HT32s*!XBo><<&|>l zetFl~y4ixV`7FGuH=i5BCk;Cl*@CKg>tyFacXhhDxyF6u}3R8+LNpDUt5Ycod8ID-2?;;f&O|kwym7rMGqN##uRS>44B8 z2}l|a6taijg`7#h?x8irpA)?rNw*$ypDtTp2yqB<(B`EZ*=$bC52VJHuP6IWqZ6tV zOG3(ITSNlK&lVy5u#s@;!;qOTq_NlW(Q)Be`fErs!6h!~mt$;W!v`IZ z2s5--WL9?Y_Ij#y>56-$9?yQoxa07;>h1gBeYvj*kB9s1R9 zS1$57(5&|2=bL9bn#+ZK)_PXzIz`HTrNqJ|n34BIFxUBivNvHOjlm(28Z%EGjw+n^ zl*9o7%n~S~Bpa|!f;zj|D^KK<5S7g`uvu-zx-ece1+^oHPajs?ZOr$>)z*3Mo zjYv-x4i(B4P8@23e2p?&E$s2$DQ>j;vtvMpcS|5VLkJ=(onAF=E(b& z05tIh36?V;g0a?%s(v;lCp)ou{4TadMmXk55eJdT6;)g#q>)8b)&jXkh%okiX+sw&1cyAyuQJbQ9g{052defF7ST>UW~ zWi=%EY(J1a8e*i+-#Ll6R+=G>SaMm$?D0X9AiIqE05VPf<qE2; zxJopI(sK<%;_lEnOUwK6!{R=+SXCmE(*uUQ{D>FO6$7&B$y7m;1SO?Xd+)JOHh#1j z#3THrZaduhO`pyl z?#E-O{#Q%h_RcnBh;dDI_eCC%h;;R@dtdT?T$V5k}U$h0`YqboEh$P=ISs( z&&v1!_@7OlrkSm}4l`fH$Gz>EKnT8$*t9=6H+e>-qkQ{`Mq;bjQFEKY?_R|YCB4ah zr26Z_7{-eJPBkyNY~sk%R3N?B7K3E(t|Bc9p5V;^vC|e`*G>clTf~uT$)oq*kI4?d*w2G zQ>V@TtL=Xq8Kye*_1*i1bnkSi`F^>Wp?-e`EtGH7QQyG1P_~FGwe6(6DQ(&|e*Q}Q z#(d7`6!jjROqD>K%5BJ^)d-MtH^FwXDgr#q!2=>G<9iN{o7q1>jbBhUZ?@xoi7B-d zSWFDUN2d@cZE^DgqAuws>^qQ8X4-y>xniSc-LNi^(9fTce|f(rc3MB;sI-|oVpC|0 zFU-k#HDSB9=DNiT!&o9EqD*C1+UqL;HVLPb{HN!>uK)x{=^ebTEiRGWo#8!QI zN=B|YAYe19Ho4h3GLYU#2+a&4$&3Z>ADOz-0;8Pui!g5sg)&HP_NKKi>7!@GyvwX8 zma_)+#*2TndjaAol#gty(Y_fVR9d?qVxB2-9&UqRB(YzXf(*RlaHnMTk$sFPB7uU7`fznLGz)@C6HEt9`9E&cma*$k6VjZ_W4 zg_}_XJ&A*PLf$wMmCNYue8Wyb1V*dsF!mLD)e zcaf#cx%)zuCkVP<7dOVZSw^>Y@;;@^d)BUyHEo)@cj3%o-dkb(X; zk~F>B94N~Uyv9WPW`*BgIm*SqSZ$1(8p1a${=LUUZeJbr-2FGb*Lb{p!(xQgw_heA zA9*L;;Z4%M=hr!QPYQ}D4xd>$J;!6ix42%nWAtBnlRx-v?>Jr!R*nrY%6jKXOzMhr zZeE^ns2u;xb=h60CsfocJbNnBcpvsLVVhk&mv5*=z+A4u9KGbYbqNqUxW)D9t7+o9 zDUZ~G4#l;Enj9QFR~U5=RUMC7vzTaWB#cm#{b-T-BPV)LMd^>)B9O*XbqXPrq{bd> z-@OS>pUFrUo16rRh0R;f7pFctj7e3T?dEUu^e;We@z8hmiH>F0{slmr;1i?dEi-QO zXs+b|oH=sF#^}~L}(H%o5GtdKmR94b&L_4rsp@H4!Po152 zn&C0)VP|H8@9Y#hl@jV&>aMLW&a~DQ2dL}HHHxlA#XO_b)?{5BaY5w-S87ylE`4A9 z`$47IbDYGDVDR;!-@VM&@8rtF4BOj?mVQ;<2|blvTCojfy-af7ERY*2Had!Vr=w60 zKJQEE8A27q>6;JgcM~)uxV@Kd10m*n`8bnxjhMB~MHwNVwi@2D&WU}zThQMX54K;- zfn}G{MWbg+ndjv|+C8mynkh@AHR{cs$>h@2JWK+*?SC}eHI0JEi+!PQBR)|B7$+gJ zW0gMt5S%rSPBpE(b)alq%6V%cj^X{0##lJB8B#_ae}$Pu?_5WTC-3UQzo&ra?|QP4 zBsD5#LvkZgKOP_*xB5^Jcpu(`iv zNv}U}xEfIl>%j+R6)W#8sgH(69P=-BSl@m)=|C9~6>ZCw^3f(-_YcjH{J8%bIaRCH zn-Ej;#KO%Po7(AlEltI$tyY_S^x7c4a5h72MipPhQv>o9EvyML*}RO_>1d7$4aGLH z6oy~MEnkm8;@BOV$&E7X#`j?|hn*5RJn~~0rtsnSE~`zYqnc5h%K^}DZ8%H0jJv)t zF-_5voec#cX|3kF{)oiTIZ03-3kj}6OD5m_OEu$vWCy`a+XDB~j=(%v_M-i_rA?o& z9E~*aYlAi|65oVxHdaDH?y$16{4!Yd=j&;%vh%U4f+Q#UNd5ArqLTx>ukpdI@#0iTAOBAg z8h!UbcxH%vxSq&qs0E!LPvNtR&-_!tf@5F&d#qZkv4-IyvYdKKt@g($wKZ5~hm3u> z_i@j`p(tW()7oA7Yi`lzY@g7(wXPI%-ET_v)%P>XU(8SZ=5%~W?qh_)FW&y~+AFcu zzW6!UdEeNkg)bG`3{SR?G9As2ohyxCv)-g79&gX2;a*ut!LIBgJH+BoSgnC%sX48qME&r(JDX999YBwQqA^-^@*pZp**w|!MEiEzX?YrqJm~TiEjj%@Y3aOnl0%;FlpE;HsXA#%Anmo zqEnSKlD?|G>+QXk+O`)R1pKdkQz&_O#`WVR68ljxE2CtLJCia2pz43jVbI8rrPjWNdZM{kAed+^>eiZxdS{k^ge4{yk*uAWilc#F5j;}G!=&~jdazNp5Zhq^Ln~O zVw&uVPBxqa`$JW^TW}pzK`LKRGiWxi!Pi75kDlKaRGCID;16G$2v{d;$N`Ki(n1FX z%481EXXu349-fqAruT6`gvH_HqbxPp>iTL~F;;2CxWoUOroAcS^RYsr{Y>9YZ%_Sy z;jw+!xNp_F_xYANoHsgMo|VEo`mtHs{n8a`)e_Rb{$xJ7a5(YEL9!)LzOY_4f_t=# z)}93OP3Q#P5-;gC{893>K5Nlhs^}TiyFh&-ZAna<{c?9@uk({ z-yA;a57n#}?TpSTF;A}k<>mG97@f$CUlR&$2}IxTdksar%lXu5f8t(3`F4EbF@Ldz z1(LbTf<4EMm4v^?QVF5MXJl5b)JEUIq*Cfz>c`CYM=Zx>)BipGhIP%eQU08pVRErXi z)c5nB?o{TCbPN^=Inu_G$GJ@*P8#ytEqfFSP9)nFyZDrqu5xw zIH|n{TQ_x#%r_OBEEg~PBlNN^W2%RaB8&!#1{;J_=>qW>R2cqU3<*?+`h<2(_naKj z`yR3~S2*W`emZCv1gFe{hF$sV`|0ZJ0))WU&4fFkW`L87qOB5$HKle4#xjh9$+LAG zFR(Asuyh>~!*KC`XRd>4JEA-XU{bZ#_aJy4o?cz*javOJg%$~sZ#4;B!XHxb!>t*K zV$R`d2fx6|IC{G;O7a^Y(;2%9yL^-UJ!u?Ah*)kRG(9jpONJMPdSnv6d|or%Mv8HY z{To0%0wfMwI>Re5CGncUgSrS!m+?>xN2Tp!6Oub&q)j>Wwng9Fw!IKQ`1a38?BiP| zETLmrN#43M+6R)t08$NneXpQN@B}7+r1TfXI5HGECsqRoFVEhzI<3|!xUEEQ^*X%6 zQVZb~2;I&#D9VlLSzoxH8Tcw*XiIydtE5?^#vRA<>XxU(dAxN8UHzY`V`tYg|C*gJ zHWu}Np%#^N$=_LT)@%+V`anx=HG6>^(s_}Jcnk-}tp;F6ACQ%TXx;&U& z{O>%KYP0YK0bjXrnZwUh6u)cunV5uqx>%`=o~bSCFFjYsV;VKn<7NMwfjB(VUangM zqbaIQ;@;V~OdFbSS}{{=uaF(}6uYR4m-0fz@wM+RSVkJ7k<7yo^El?bKgy zJ=M*}BsowkeUKyKYb)oZKMC>qeGc^eK6)NEJuBts%(G&swNkTM7kdKo@VM+ojrmcS zULXn+rmwT_FRL-m+#f)-1m;UFL{%U|ZjRPjPHAwh`6MyhQ?|t7Oi;TosR?_X8VDD& z9(7I4Y(S>8h32{*SO9^+P?10nqEbs9d#}y+1isSPK8@}ay88}fG)78SX?rh8Q($C09m0rF=!r79kI(^8Gz^zYDjx7-Vf?L1fxDv7 z?~Ws?o2$T}5elW|6yTq`+>Of= z+RH-z%VF$~IZzN|D;89PrVA+4?s2yTW9W|+*g_9P$PLz7{$x(n5^O;p)2;8%AMqfb zr{qebDmq580jzVGm}MUzu^lw*6x&!nfg3sPlS>=sV`Mde_T5PzzU&{_SZ>psW&+*W zwvt@~`7KeG1TSKQ;rHB0`z`1V_Z8SOilIrB5)G>s4jnpPH6o$v_En4XOXGDRQ;u4F zHQ{!Gy|j--Y!Q!y7vtEs?$Z`nf4MA6n{y2m#s71!37F6OD=ns#SAb{0FLYoO4RVjGci_VuJ(+(b$H zILhjXin#O=;CA@7QGQDnhU{-O8fp;-{sC{@H1~e^j|$+KlFY0(mlh9_Qfb9(2;5b8 zGxMk(JUk`HBtx#mx5%<_wds@@y|;A2vm$+yV6+Zc1dXhIKj8FIYP~IY`V>tvc#o+) zcW)NhJ%;@_u)668zSdZ<`o$fo{^-IS7q~fNV<3u@(mhU4{lwSJnZ?DTePCrt%w58y zE@ZH)gZHJOS5)=d*XoNLC=;J+i{F<`UzC?9z}KSEseoSGm%8!|iut&D>u6~vGbN~5 z)Gyj!Vt=WJeSsa)R-(c@?)T$|(fV`~7fo`lGb-q4Jg!Rkv>0dp)21(WO{}up|8*mh z{;027|AohZjp8U#pww~Q#0bT*kACv;6Xou=zAd{y5hf8x*_Dxrdz>UJrBZtKn5p8u zOZ>O~pJk|hq4aAKt=Q+XKizJNEwg3{xag9L*jicH^qNc4zIR%FMW9|CE5RQEfB9a} z#|$cfu3~$_n&J)Pe!DLRy)2A9#$CW8Y@v1QMl7btzVq73^Jhm*PA#r9c=e(Dk}dBC zB1$>Wqn{ku#^q9hI$xp2fEZ<@F??j(fMudt!jga#Oydi(QJ|A545K1casA6$ybiJ^ z$|fe6zl#hVZMb2>DPeDlURMRsCdGcclO8}Du^bX~7WRj(dwGS!c!sP{N4KmD&^NQN z#9oq93gPBZLJOX_Va`hkEY0_45lbpvL%+qCjPU|(Vd=N~RH&u`OK_}bVe@Bp^5oE| zS8^Y{E|*Xl6+;7>^(PbFjK|c6s14z?cf>sQFusjZZy8K=!nUN1?w~ax&jdP=16kDx zY_I)RZnxc}b4vcQrUQyOZ8;*W=WcvUHK!K%d~TTG9(0L%eXXfTSw(=cFpk>asdb(+ zszb{U>_C<@l;Zp&ODZWgCBXW_2zN4JARGa?<~1%033md}3he~MZYzrR0|l_obsxY6 z7H6XjpxKQhtM7ZNr{A`~>`?3*dMZ9!tUZ zuS|q$!$$-@%kC_MQ0~ASNuRYk|8p7PXI|PF&0bnGvmn{65<8#nh|W2u@FY~Ojy|a8 zAV|J36SX1EC6s4YvmByGW)cEr9lzuz>Ac;bq*jf)%iH&7UcER%;y3TnZ zzMV#1$qDyPDSq^QyoFcC+qL-fTt)Kmw*jq1!tP@HGzvWKs^gf8X~kBs4&FS6g;B3Ns>GdL17ERwr(i zlO0ZM4I}_nPJ2FO?mSP;Dr5gO-`4A2Jm2hd9ZX)9$6-1=h7bM)AE<#+2HVbdO*uEw z)C`q7)J5YXY*+#l+!^eg*Ii0{B2Y@&vr_o`nB)5S? z{8w;=(!$qA%g+$OagF=ru;wGxs$bU~US8+7%~c&_|AvQlG@kPr<%JzH;Vy}zG&mnH z#?V*0fd=L2miF}r-l>hi5a4)bU31vXF#%A*;>vS!C231NYzsE529zaZi*>Kq)gnz} zp?Nv$Y{p}U9~^$u$D9+fLyuc}Oxj3)21X= zf>!nR80I%!n^4oX?+Q&)7GbAQx;xr_k+y~X;bwa>Kf_3pSz4Xusm~pn8qrb#;}yAW zvQh{TJihIm+SQ{l^n_>Iaca-%{x9jOU*(AB#iHub!EJg{JCW|G9tDHySFNHN&TJA+Ro=@gcU|* z8q~Kc8y|m-N&tXEd^k5f4${KwW4Ro zpBi#~MZ==IVyr(!-yHfzX0r51orY5{iay~++&j%Az2Iu#JL|n%^MsWL2_&GMEXl>F z6e=#aQLfteZ1@(>mMTa<{k4#57{6BN>AS(P8jE8oWrD>?1;TegN*5=lAuFFHAQ2t%aF}9zj;dwiHg+T`rSeq$U+owi_65}RkvetQ-&9f z??T>z6V@0!g+PWbAp|%86eZ#lH%dG9{sO`H!w&)W8(C7Zi%q884&Ouf@Ei_ND2Ilz7F}K0Ot2bx3!_gB~)jt6h8Ow!Ty6 zcR6D!RG>9D3rC-)-m%$Xr4^31i+!aBjiHl)igP~22W^qZ3&LIhy$tGVX5h>IzF0bU zq4L$Lw)S?-k0QppG6sL~$Xs%=grRIwgj!9;cFrB_CR}g3dv@KZX(^zd_R@EV%q#_V zAMYfo!F>oM0N>DAE}@ud0wzAw>hmQdS5=f>|L2n9jj!C7#K(aN zNApmG-xg;At{Q!}b3hs?^dQCLa{XZf=6*F%kr&&fA+ZmeV7~rctGZ|e7%-I{8R7>! z$OQlqm!t;CsW0XFaP5Nr`p0#LxurzA2=~V(3#*s*)m%{>Y0PGxu<@usJ^j;qtjS-3 zJIPJoEaK4p?5eF_ZHpV9?i}^}edm)=RLuw1zHLhZ;CdzSS0SR5H`%iP9>_J>L`G6X zPe_j=brThI6HKK|d-&hsAU(6kO@h!g$3x0UL6tKJvuDbPr7d_K!+Z-UF&R4%_hJ}Qz5GVEg5F4P<9##%C=_zzM$DFUr z#I0_w@;R^UH9cQ(wj2@hgStEC7XOJEzU5`C1BT+LpJ7r2bOoDk>iYjDO2nyqnW&=iwOtK>UV?$%vS|0! zv192ycD6)t%Y?P^s}`pA)1HNl+?a$xG(?FB!C zd9x>^#hXQ2o<(4A1?QL{k}h|HL$)Gg{HK7E4&UAkd$@7vCZb8cwo7EE%aYhzS0A9K zFUpDV4R72oMLfjf;^aJj-P#|-i2Ctc4t+a^9VE35O>o4P{OoN6%9VPE00yOB;Y=61@Wbs_MNy8(-p&FKv`h;7THI+w-VK9ZTYgKbComLI(WQ&N& zI-ckf=U3U_s-f#uIK7e;+;&gh&Mq_t{D$N^m?*|C$37rwx(Q2YBTALX1&Qoi*4KnRBF$7jYvcHy7;mk`p#w5;#2q8X^GxJ)N@;GE=b6tmMTiC+@37xv0hP|ET zyiVSSuXJ8y{;skIT?Feexv3&;p%OMUmtqHGx>lf~h^>?ybS8L+9u^}f0_4SYvgYM# zVP)-_tnC4}=P2)_S0>B6&{9F+}tD2F{4QqRitF4O0!(3jY-Yzy;y zG?T>_HrGWU6~n%##v2P+-p{nhkKBd)CI@C*3aWa&F%ALw1wf0mS9sUhw~ljne}Sa6 zfQNhxh7nuAB$}62!<1NAy=Dvfh?iOckcZ8!J~HWC`K92}noZP>7a#TYZ?-p@s^R2) zf1?9Q@vHxyUS@QL`^NPst?0K-ShQW&Cpsr@`Rd4hKaK>b!YjHI^dZB}FE8B*)od!Z zRA$Uk^Ntu3m2zarBGa6*x0^`g4^nr{GicW2+OyxCXfielJyDGj4t0?dVFsBCurJs@ zp5SKtbMFvp3btGe+qGE|TMnj7P+#Fgii=6tABXQPRfa*H3OM)dsTEm!Ldgxl39{je zKC?lfJpJK8JHvPrAse*hzx*pJC~{90(hBB=Z!9iBkXhz+D;?1HdW)qSuSmPhH5f4=D{o z+>axGO~w`OZJ?O^7MdlJvue|WDjQ!LYgE?(Z3$K!cE$U{G2YCciDgkxk%Sj4Aw0zH zZe93-KlGCk&xpdjTa^zv@CJNx$Ae{dIm%Rat0wszDkod!$nzM!Jx*iHc8T2rL2dT* z24F(D85@!-Cw9)k>Lw2o_!gv`iOBZf1alrk#> zekG`&LuN9eTufuh>HN?CCEsL&D{sa@xFl+ZyhLgu<#{QxMHThS4=Dp7PN8lV$E*A2 zib4pw%P*)LHupJQKO}lX)Ho~hK9~)T1aG9KpUjsFG7P07bb_GE+VEPmJu{?E-r@bJ z{Q0pMy9hvULFqXQvc479=zq(yVv1EKk4%`Lrcz_kRfxSMOSpW7%JP?N$*A#0eydNh z!ZGDVE;jR*u+Enx1J1tM4>Yn@>uCjg7`g(d62dp7akDQI_xQLU76?UT+2U?x$s7@M zbYy*>xnaSjD%q|iz}73as%l=yx)N^AP`9f$Mfo5w*>xM%n5L%o!~#>wv7_)JD>u6#?DV8E{RqqS zMQZ5N3xL%>G^bws#C}|YRAUxB^YwZ;IhJWpd5AM2rPys8sh?SFOpWB*mWZu44K$mA z0o|j3^$z^YfLG374=$H)48F_j*qQT?d#z-vI(Eb7{|TL+d1ugJ3t4YJ*hFniWRS(7 z^qZZ!ZCWuUQkG05pL^WFm3ZLDd$sHd>Z{Rc_q_4TKl>0xjH985pwE4{*{8=1eU@1? zb_i_tM;fd8inGb|*PHoV`&o_V&9DLoBKF*Tif&-0(C>U39=bW%T>Fiu%pc=ZBsZdB z_-HqkfZik`D+Y{b>jqNG!L0cgit=BvFN61eu3*n)dc_D;{i{^{44oXE;l`dZk|B+r z7+{5(mh(%Xcv#L@>1@FjO`2STiQJUS!6^cyC(`cCs6y=gHA|mykP3B96>jOmR#WO{W?< z{G_S9z(}reE-lH+;}k8Ak)OW$L6pVwcS%he#kl^r;Y_P+(A2)`j?gn%CSR3nhSA`C z>57g#K>Tiuu!c2a7vH;Q4vt)Ir6Kd(ewTiXfjVLbcVoclOOoa*uB zUQPBNK)<5Pv+^LNA(%Xx$>g0)a?&mUt=6R8>xN{@yOQ4Oxr%3CDXQJTXCSfCKu62k zERxCpE1ZL+@`r8~`MXIz7qdV!MPPTmBu43rzR9jXUO3wlzi3?lH1yMIoS|D3-z5CK#{nWLx#&TVeSb<|rY0LGbrKK2Bs6PRbR;jzwp(q{ z$gRP*kwL8w8+w{=u6*%K#Yf3)9SZ}FKg$yuwl2joE3wiKE0>KDo_&#bE*pP^;86)! z`=B&p1LlKDphy5L)gp<7);xlLvL$q%<@2^-ffrt7X%p$Uhm%YhRa%iyr8Os7mfK2x zakW$m1&U(+G+eZ$%}n+s_-)MRY|ZlVdE`PyA<&j3YbRvY0Bxywyk-}SCZa_{jWR8 z)bWS=yriHR>jwzFHP_q?MVmp{g(nUedsYar(+&LNysP7F50fxXhBILh)D|o~uT-Rr zTSl|}hy`yz5Kb0iRX!d3HKKkbmhtRZO>reTa6S!eI6W+y7TwMvj5fbN^Q(zhg~Nc& zk>z?L*x*I?(_1YtmPGEF5nC!d-Y^o7E3K(Tt!pmY9>Ah7c0xB@a(B}kl%O2dZXhGq zTI4{fM;(sb4?cS%op1NodPSt6v8n8uc7MmvO)WC1Ah88s(-))uK}skeO&xrrRlh8X zt|^q8xSHwE%>nuT?%(na?(5vQ)EXCP@5n*Vg&FNRfj3Yy&QvebC-;`}mkUxzTasI; z7}dslz^x-F4?FUYV&JE!iDY4`8yN(h&$%IrEEhJDLx8ckZ6cLODYvg*RopVH3T6A) z&S~pzw6~=esKI9xOCB| zvlepCX~F&U$q#Xj!(=exoKpV#2`e>>BYf+;L`O4^N^NrqqjLOq8xxf0gjmDS0>g-s zh*o3Mc)>4Y<|OlllzarC%(U$ca&_L(d~2sxoiL8BmXf<-;>E)$Qt(lBb=bl5 z#2Dtv_Q^H7HOsfx@rHLxV){zi@xjc|DN%Ml!*5>pB{QwxYkEcuWXYf01J@r+D9JWI zoXr$SJInd--@JGx=tC%j*-Kx>Es2#)Ea^qBqL14uJh)EEP3oPu79(|Iip0lMTkEAN z-01Ua{U~$f+5N`*>K4D?f4ifPjM+fK%k2;oM`h!ifWB!5Bcev$bkp0;hdg=_{!><( z&)B9ff0S?PtY7pvYRE(MU^S_iv;4?g~E9R8c4FlQ{y;1Ed_>Z*~*dj3L z5MU#2Y4W^A=M<-k8|78LvD?Gx;s;5_rI(QQ$f3;{h8*%(|0|bsZJPxYpBUU+I?$~w z?eV2p7Om9hsyX(Y+J8TeB&DIGcq(J}QWhWFMLGOB_iim*jkhGvsV2n`$YVbh+Nk=q zIiss7HKp%FPIAy+nTFcb)}mLo1npj?aj}rX*M8(SeZCM$;uU0Ih7}Tw^QH+o39dP+b6E{=yic4z2$Y247CGe4L5H0v+pO{@N;K{bZVma2Xcn_0FJp6{yE(LIeLTQ^1Z}8xsq4 zh8dX+cnu~(q-Tv8#jk+)i*?V{0Ctv`yUYsS9X{dskng90e$(VR)%4&Cr}Ft+!J<6a z`|$$66wuUa79Qx!zhO7G#}g*v7umz%!Q{Vs)OIM+e=gc?!|XPcdMa~dRNv~r^YHw( zeTz00kG>TVTLoAE!9Uf>`kp${tx`U2XQ-ll?f7IdFP)P_(FJ-MHlg~o(X4v2`!=7y zC_|6gLe79~G5D@!Iif8zUmg9-CCc8^d(=F4sZ2k|XscX{))0>)*$+I8_bB8^PI8zk ze{eKu_M=KU?Gl%+TfO2x0ob3Y2Peh-_gSY2B+B934tvQLK5a>_K-i z*&Wd`k(e}_B?GCY?Cg}=uvXayCl{}WmUbJT5-T0kE3#Z&Pj!AoWe;~wvU7R8WqRq6 zXt~`uhrQk=oA@TTc?ySxscrz-R zfshh|xW+I|=W+;Js4HMKNoo$6^x0<5I3ZVXB6hzdX2t~lSodty;c55+XUUkFs>J6U zkwkC9l$e!0^{n@1v$-a_mT4@9pxbSzw)ZighXvTj7%d8Z^wJX0yr1EGs9Vx5%`4sf zpPj^k-M(`^zpW_MmACT5L=gL@6i$)ba+Ird_9a(3(cO>jI9EdW1?BuR&`4a&6LUVI zBp3339=c>;~LDDvE{&Yr2%&jBw%1(HJQ&R_~JMgt^v`_|(?d80B z$1m;-rs~qN+O>6-p=mI8FftGb@0{;lU=7=7^quA8T+Y*TD|>xYb6#A_&_l6B&bT?# zER(oma?xqF57SkiIR7>)Re&~|g_;o^lH0CL_+tWH6Jj8{pYw!%%5GBNx1BMmja_H+ z_Y{uMKrZIGJ|vqUvC)~huZvmFG>it+Fr1_TxScT%0=6r-ZWiWW`J?fOIFtPO;lEo( zKOlrUIN+2Q#ruo0ylT+GYsq~gXoCc0&%hYw)WD|B_Rp793pD~*qoVRU=f4ovZ7ZbEAxNz(4fflZi_QWMNUe7mLna6+x)y+c0 zg;`EZkJNh<2x0=EU*vq@nf!a4>p?tiN1QkJQAz<|b^GHm zE8-}f19JOBt@^=W_)fU~&ADCh7s}RoJ>6#}PF{!xC7uzFVkWxO9Wng!{_j?&MBS3x z7D3LN1AxoBg}8r#}V5flJ?Zk$z7_ zxrRjdrSs~|-8FinoL<(Bj(Q*M_OHs_Z`w7AF(|)wN@v%P4R$qV%icqpn7!W^8nW=L zlwOg(m6kbYdr7qDgF|xT`VOK`x(s8r6g%ZKr0X}HEhYP>Mx4+Il8P9P)Om#ywc1H7 zU^RSnt4TK3niS0R_>1plqPDr(mMerdhX-7BZjQdAHs(uI!yq@!T=}~1Weu6!V)pvM zn_q#p_R_I_evNPaPm6Id*q~shnkliQYL+zk7F^01RK&&NqeyJxVqJ6r^U%y5mpdCVT|HEaaD2x7rhtbb59zK$?26 z4GFd?4u|Lkw?2gQma6Pl=p!CrsdwAfW+R$i)f58{&nrJ3NQ?h?pIUE@^z|Zo!)=<1 z`KSzbHp2@b&n}GV=X5CxlNOb=_j!&k)XFzJ4v~dV4(mY1fI&~VX>P1JGEjmeOfjwP9LaBX40Rz`oYhj@NLK2RoXUi4h*w*8hnf9$b3VZ+NM$X}ZF41f%h4ID6>? zg->nN@DpR}9%EDUanoP9E%jh$@24lGG;|v*7Hk%-rOcy7n<@#TwX+dJ{&AyAD3&zh z?)K4p<=bI*AI(MxMBAC&M5>`cV@1A_XHHhcl0QkPsCV=NcIxpcJgbz+^6RQHUQdB@|JWp zm)q{FNglYF6LVAL(MoRLN^XLStj(ltdGT42K6E#@kRAW%_gh1+QZQwr7%Lk>7C1p! zF_BFP{+o)83meUn-fPG7rT%ld{@~)|gOvCXwJ_?pmdNv9cjM!4HNzvWNDie}!>^8R zQBY7a;m(@$|Fy519}sA`*Uwzj@FxW?_uR=G;-YMP%p${jGZ@T#+BD`NB(`{G2Aq&8 z9|4GTjgZi!Uy<-b-aWc3aRSA(d~n**Qb<_Aru?NnCmemiHF_|YY*;SXU0JeI3e(#6 zGIZyOy7_I4WOU57%O){dZ^#0l#JC&S)<)Eeo0&T06`x{x*QY&G03&QSW8Q0Na|-F4 zLU6rp+>@^vXjxgTZ9~x*|Jtn64(tY<{5L%qY%_{JYGHYjO?t=w$F4otH)i3}sqko8!txPD6J^oKZ9TKkAm=V1UK_t1JPm!FFX zx2!q-e{_8ZP*dI3Hbp>1h)R_f%ZoG>5$Po&A_7tcL6M-KqVysV2uV-`lxjhWlu$%E zi1Zo|snR>4_ZmV836S<5^uD)!_aBF0n91PboORaT>)B6Xa(7J-TANU^wKDLb4E3|q zWE)Jps<2xDM%kr)dUlm2O}ae4Uz*j@NZVMjEp{D#&-Uhof}ANFFUmqYwS)`-XfHA_TOLqaLtcw-AMXZ9$8J>nKHbeT@J3qyt%Vfdbn(Dw7OybrFIe` zRc*@yw9{zT#ZY|zab9(?J^lk=GWAB(&8z&+Ce&G}{OzcEDLQ+=+LHCrj#vqTO|2nrs3l$rQV z_1mHU)+!Yq*!3sgf1yTc4IbU5&HEg~qc+~IH4;x+I<5MR8#|=xJ<@S|)o5@auK9-z zH_$goD-O`2@d={*#BundeAO>n8!bW(nzu!SBoT)q%K|Ph*$h;1&U|rs|J~zwkG!Z@TUVV{F(c>)?1?(S#$17TqjI0-5{e_$%<2 z?5G zknQ2Kq{(Li=W;I*l{lw~H{jWWT#5IKgaWXqNPL7ox@2{z@#}*;xMJauveT_1m5L^SQT-m3^ICmLSG|J%M4MZi+x!;b-&1-%bEhb%ub6y zgph$!dGsjFD|v;rb3Gd8)fVUStsdw7IWJ=u zEM!Hco>Ac#gc8)3l4?ssRvfUISmg`CoCuY{yLinV#m1`>eCsLAOXg#vC8R*Vl9Z;( zlOKn+m(Q}600;OXN>W+KXJP8>`)}7ryzYs4xW2!f_V|Le)`TVg!R zYNk1^7W+QfzF$?Z?nn>9yCiH>*y6qvflWL7c+C(!W;0qAji*>+{JXCq@C%Aq8RptZL4X(*=0h9amIH#|>7{+}5w8Nah39;LowjdJpxM7FzisyX7#v5gpp~W7S&4H4RZ7uT*~P0AwssotLbd9m~shP%)c@hURCy z%5+zJvZ;i<)CM33e70eiZUILy9wjQ8ZC#e-=mB>()LlK>AlBL^%zZH(>q0LYHHU_g zbkEEG0FYbV7ci7iva!N z06q%KI!a$sv@M%#L)~^+R3cbF`K>m)jon?XnW*6a(*9rWZch$f6fc}msQM-eICBC) zUcos1N!Z7{Q9EJKs^MBE?ijps@I+$nJ^4!B@BO8}Qe5ac8~IGtf4}4h(LUqf9<_C< z;O|-%UWdOjv$dOt*rSVAajbqQo1Hvc;+nU5Bdf&RZu?MkHXju6o@Z6&1s`%%x1hWa zXz9U14_a~8R4hDS2en~dzLkLQc2O(CA9$vZJXN2x524#Sh`%A|`mNdS=ZhyLNR3EPeLWmY%&R%l>S3 zxyfj*!1jeNAQuHz7f}Y|q#v|nV(!u#QeT-c*vv}|8^{pKdiq>6_Ag(61eqpFf%KEh z^rs8HpvB^@MDg_$xJ`lfC_rYW?knI7CgFcp#(MM2$^IYNSAmEt$a?$iS|EykU1Sgq z3rl=T9(h@t-}OE9%a?(9NW%Z+Bh_saz}e+i{tryQP6Blf%EMEl zsU>WY)g*ptECASIQf#^Y*4pDRLN}mVl2fx(7Sxj})(jI}o z@u%77UOVO29QtLcY2>tA!P19-ap{eOKgU$eE2^-bE-QNlzqM#DtEx@GZtAW#`n%#{ z<2~HPwyum&r6706=)FWB>)xh_h0?tT(4+OO0drYFk=-wzUoi?*cdkzd$r8V5(wD`F zHYk7y{Cw()`^H`OcRJ0;g12@|zMShyH%|Dy9`Af^h&hzZgwf!k)O<6%ZOcQFC&&Yf z`I1wAt6P8Jr|)T2o(bs_L9e2&Ts6?O)UCIEr!2A2H;pY;dUC)=yCqPeEU^U_^fZ|w z$xHgyqf(tCb2|E*sv@eZc($&G6;UAe9l9LFH?L5qQJ!;I<~MQvkJ12;K>-`SvK@cs^SCh& zmtqYbsS@`wBOD`f)F1Iv{gF4;s+i0Ux2mpETQ3kX?tmeLvRZy*7w<>&INgx3evP|) zXX5)3q$=CIMw^e}4KTlVzXy5Y3KMl^Pu_%CRo;`xL|X0p44F)WE+9 z4m(>;#B|$Q=Z}vcd1nGd9)VMh= zQ9b66mgh>s1-xkrM(dSXJw}VZ0P*3%9GWt%2aDEZGeIWIHtz%W)NcEmfG|OpYj#Og zHArmh2gK!P$=N#}gj8kzdjNQE!EoQmrXy18{4msl2PhBgD)N2!jyBW@E?mP0!h;hY zf=U8#uF?btHDk%37|wZsV_V$Zoo>Mm0IzQQ_tuftRgI2O zHB&O`p{4_;)u4DT)FvRN-Ee|pP~UzY98D&$VMxFDuut$cx+QA|{h@cf+cwrkGW5<# zZ{YOpkD>c-YuMjSf&Jo6RtxalG%KcuJMCk*w^EUYZkW73^}ta*q*w-rQnoNQU`gZI zBqx@DlwhxSxb{!~FYLvprr`c70Zxy z`#FQyEWAojP?_=Nr={Mp%_elk-qX01_`9(8S{4_7dQDUVt)We7avMkwLjKZda|cBE z1;Xtok#UY9F9zX`14?%BxWeF(m4TOppehzvf0u!cwKzgRHb5R=(K#!Y7GVh;x6^!In2otO+}Q<*%J*0BQ=Pn+ zRIaI<@w%D$1|PvWIx9`PCkXAr=pcX)_w;}x?i;dmQ&*K*xOZ$YITr9lrr0-L$ATTw zk5YD67-TiM+tMYs%0Af+iYMF^@^q&>RmAK{Gu#@n6XBY7Fwpc8SGwyXy;ilwZ=)<- zWv&X?Grd87t?Od8QajHh<`ZUW`VvxHd;IhZ0bRRN7Y*sZx^}@z89#M>+(KyiXKPik zqHKoe*hYYhQE&Zkulg&mc%v+4ZQYI#FXbLMzh~Fj>>Py;utA>$t8HCQdis3!vdWK$ zVp~78_wzT*qfZD<-i=;#$p*(wa%f&(moC>DrIMx z?6I%eRJB}cly+Jp2J8?wFvxct21^KL#3A(%-aF&6kpo^f@#Rsm*hFiq126LB2`h$V zFUb6q?`WNqLRS^~XUU?~6K%-)Vh3?N9uVS}tct(8Lm5Id1Iic~ixFZ=BeLE|Ez*<#B$CJg-(?nz)8$Ia@ka;5 z%p4o6{M2bITSLLU@>Ds(NK1Xa9xnBzo=;+WxO(gFf~lSo=EEwWR#gYSrA6Zo_S0{Y zek7!+c2>OKQq=O4Zu;jsQYal&3%~MJGgoq*B(5N2>3o8H{=I6SkP{#g`Wd9`QrJmg zDN;!R;)%edcu>(f)Vu|8rS~S#Fn$0HgJtd^H#wcwRrSVY)t7LKXL}XGIHiSL^rtgj z+&$lki`v?pTV5wZCSrP=fyKc*KTw0p`Oc^~^uo&`zjO%9>R5_BvRl4eRw=kTI| zwB>UCYk^4Y{sJHx;Crj@QuT&XnW#GZz=Dsw_-xyb@o7taT5J8e;T%0dMAZL(cX|C_ zcL|vB;HG&Rh;103bYXINdew3DI$R+UCRl15y_<8E?&nCtv1(7-JR%dbH$+%qf2Xei z`ApJk^HF)kw}xXaRW|8`DZG;DD(><6!y`oNH8EUP`r8gZ=Wkqe8vp9|skNd9byuFB z@#Y1vaNn@NyFPAw7b{n+#(@;FFF;sKwe4sZ=Tw82#9tec2BFv6zm$sXU@^rdXqxz^ z77o+v@}I-Q;_@Ci$=`m;$zi5^EAR%e;jAlwuqW$=R7;8jzAQTLW8|H1Ui?l(QMT$- zn+Z1WrGRiRqy2$s($KXuL#KszWr-z24rMh9<40*bBQUYZSsC91)2{J%Zh6C{FLhMl zOYDEHTD?u<(FmF?qd5Zy2GPHb`!qynK2iVzm{)Ck#wAkWCu+@GpXe^;>*ty;TZYL% z`|)N#^B6lTudPCe0X5wRcM*HHNZVx>UDQF-1fnDHj0UE?{T)QKs0T{sG7}m~D+{((JU-w@hMQ z4@8#4iBH@u1PJrhidN+3OS+axGqkHvWxf3amC7WHhdiLL$i%Rx;`o#TpW~y;4o@@r zS6&=M&VMy_-+2Ci%qM@UcE395fVgm}-s0BpfQZsj&2ySWN*QEQg!)Pw4$pezJ=!4h zSnPXj>*q6%_qq|60UHOcK`WpBnSa|={4;5!fc-?L(xL#oMf1Fm-ThC;zOStHX=ZzU z3qdq=7Ok%NQcg0nxlb0XKhj35ofdp}20NL*@Hm*$9Q&H(Vs@o=@$ib%x;k$GpbY}T z&du;s8Gz5)gB2ykCMT{_jY&@8xAY$PjO=)|rzL4U@>3`pmp^IIz@@=MYdpb2o4#zC z4%EfcC|#eihR7_(bH}o+c9ucYN4y}GQ@bh;xtH9WEJYqc1Lh}E_Wjf;$rLXvRD(fCnK{kX2I((nX`A5Pdnz04ny@BO)?NVjGOwP82T4J^(h=)E^pxdQEhO!f zcL8c9UH_Pw<)++HfW(?18O@k{EB_TF;cIunEBaNLtfLeW%>kkd^_K@_EDa#drOcH| zvH1yfF~;s>DyrRqAEk!d8JO__q9BSw8-EA|h!PynnUXeSe2`7N+K4o7{5%7};U-1P z)?c^RRJK@KG5C82o+z-5q}SQD)-yY{zAP)BG$$3GgLplJ{Le7J+`R+XiaqOqJ3r%x zOEXYvgA(%jMi!_F+r@fp%_9L$3ZvufWYO>KH+3H7JHM8iMjW9En;qzV4|2r*31D?8 z&N`EWQRQ#Fd2=Fjnsn6b&rKfv{x)rj`wP3V;?fYUN1ifHZYK^m zI1W^XoRbpY2afeJt(m&fOh$nIV}Q%*@M$w1t;QF7@-^R?M(jRaCnn~c_ZLZPI4Lvq zdD?2V(4_H3qeLm}c9PPZEuq@}PVA-|w|%zyi?ADGxJ%fuJLuvRAg0_Ln@3x%D#Mns zNl#V8aQ;KwN?Gbc{J2D!1EqQa3y$>i?6k(*%cFQ`7-(KdV|lFP?T#cORPL+9O7P#M z6J-0#>FH|suT^KYOeb3vLL?5MZd#v(d^}c zGxBhO1vj;3DiWQ>U7GU)1*AG_o2>y*dZrC@&AbPUXF*YfgfXXT-i_Hr3^r0Ljt=li z&VZ0xjB`^ThpY71086+30dkGoh35D66dJ>cmPt;L@Xc9^^2hTl$fo0E!J`rv9)7R8 zW%+ooqoiZcCbt3$-KhQf^M)BLlIg%<=I^AXwyyT};fh75Q?N+3VNO=^?7&l{uiAb+ zwa#2)>n<-WU=w7$cw8N7@>k`Bzf=hFDGOit?t8wrdina%ebXllx!7LGpp1)?KC`LP zm0P-`ae$Trq~kG<5x*ncklYCeHjCJWmVvSnV-PaiQ$An8vZ%-LB0#4*Cw1?M_6~_2 ztZgJIie&WX_LbB6nZqD0$-6SwZ6Bse?v%5swv+CZI*$j~(`{r6i74f_%W_xWQZ_RW zbU_9$gIMuAy0*1Y4w2Ps?eZusT3@?kG>D094A}G7DP*lC@_ql&sk3`$;h3V%eJx z$C8wnTzhqpT=j4K;My-|Ye$7k&8@edS{A-UMXuZZmdTs6*ONX3$j+Va>onA-$WH7U zHGb)dj;IfI_w5U`?Q7ufO;1p%UMdJ>+VvAj!!;uNZRhfkv`MvQV|7#0dTTADm#U;; z7&D~fWdW^ANMt7!0u4kR*i|gjv=FlxV}NccLHO0o!jR8R<_WcC0B~|;HBPZw9RsH` zWxJD4tj5r0O9D>C5uSUF1+{Y*0Gl!47j&Cba}UqHzx8hhwSSOsD$r7Fr+@EI%8e{( z-CUp&Kw$BtbSMSBd6EPs7S{miWt7%!b@8YWC+TSH>XZP?+TXW|59leAfFSYjH7N=; zFXd9^T|Iku;*?aFfj--F*A%bjOC?e``zlTp{n7kRnpbx)sj$R6VsC8LiP5Y0v~_c} zp^N3a-oF4~OLxr*39AZmZDi z9I5AvII80FEVSwqX6Q@&EQCidqH1n=-as9_)=1K*HaC74b2LQCR8+LTHQx^6gMpi} zL06n^rF5HxS)>U_1-A2`fIZ2@or>4?Vc7h$P(&|><=z8sH3~xF{EC|1+rSm2#jy8y zYjg(*9T<~P<$s9L*)tQGh z4tUt!t(r$uBKlhExrTvji3(st!K^Jk=5^8VHRU=u_uOhM={=NFLjToV+>2<^o&VO- zN1rH8{`naks%I3M{#!N*xVvra!*dP&Ho0)Ec4{MoP9_qFq1k6D6HShC?6H1EK$t-= zYnrg++L3#3YSwSOU4&)#KQemb<@K>;wVvU5_tOpAuBYAUOUbK$791j6)mJ*~jU(?` zMb@iSm6G|qi#n|DxiDY&%iALCsnW<|;?lb}Oxaqd&$Z-*o3wKWN90p|DEl%%lv;J~ z3}Di2byTUv`1={bg(XwKO!#>`Kpa%I6kFTqLyZI>UmW3zaTJ>M2+IS=*Z18XzdoX| z33ThzcL0*VS59^2O6%z;-xI>w*{N0qZqoycN7GmOEb`i`f_qPsz2(mr6lRJ{M&w-F z0rUcnv8YGH8WuxKd_5WHt@n4vd{G_ScWIsL^5pf6#_CC{$R|;NeqJ3^OSYp$TG8^r zS{;>utX0P@*}%VGzC71P%`zwC8`H5Umg4o(6cm_FP?YN{X z94@!vp23fY#`xWWxqLdAMlHts+Giik(Y1d7*?&aQqA1O)xG>pg&pdlF>TCM&*uc^O zf@&43W*O&5lXQNW=j44mE8cha6y_7Z!F18Q(Dd67(!_C=#@NsaukX|KwJnnFd{5~_ zN$;7&A?f|He3)HSA`D{VvR?d;bhjRljS(P8#@mV0#0EfV6mJ^jl4?j9*&! zr&7*HY-)3|4Y%oVp+(jk68|wXut&@2XCr&iVD@lQ+15RA-^{!r_aFen+Dw{HgrUOo z5#DnrN3kQF4r>CxozOabHjpj#tI{>~Na`6>@_o<#+=GPPm8;n=PJB`q?BUyrGX+-t zYALOqrkDda{|@spbQi0sFbMEh+4?XzE+Xk7|2aIe5 zGB)G8(<~<`E>YWW;WE`xvx*H|w<~(rMHk*|4*&C?(n&jA7LNHKAQAL&LEBDE;XgOsF674rt($30xrs2 z|G6PLpimvy>pcURXerJ=&lrxAhV&8DVJ=X>CJ;@Z{2A?8@ikVua}nUJrOe#CRj~=R zRoPMc?|t%?q2CNId5Z0x>wxYTLq;IfgeR89BR+h& zKv6O`Zi^Ep)_$=jo|7%C#F5ml=nR|eKSV3aRvqNK?=Ucv9dL;>wsY-bofouhX|i(0 ztfaHkp}ytQf!_YNEoNFxj)&`~yz?%1pBt+Dp*91)@H~MC=#IK2brW^PQd>U%NeT*P zW=65rcnY|z-n^Ol!H8f!J(&}oDJibJU~=?~NJjTG^rNWhv98+e;*5(yLmu|2w+5b0 z%1<8Q?yjh2qV+X1iWWF8mxSbO?4X&0+i06&m^k zP&$5B10i?4X&vfY>xOp@eKEKxPz*$*-PKo`_?p8{GhUM6?mXB?Qv9^`g#Y28cj;;q z`+jAhbe3}oxLCT!_Z+zV5+yifT5PYb@D3IzBs5{)4jYHrbR-gI&bqtqu-Fcig}0*H z>)oqxjnD8aLX{5?$Cy&zU_iALmd|JANDh6b=>iK)W{ zz~^>eq4Gr1b|GqRf;jfpGw1U{_PMuoo{0mo<-Zchi7W~n*l{|RHGt~W zQoM%M0#^?I?pS{UH+~-zC@LyCO;Q$0O1sqeAMamHRZj#I{DtGraJRZ9r`}qZm>E8{ zfFaJ26_3G@r9df1f94ePyp{*oS+X*j2Xf=CKNa@i1PPl6+>ia##C%oy)a@`8C#H#L ze6`nsgN*3lxR1W^-kIOX!=-*_;5SE4ngRMBuUTfVk__SBp%=ww+}Epy#vidI?KAO? z$F;1sG=0hu-*5P2o7;AW0$4#4G8n{Pm;y(&H+OCbIPEx`{RV#4zLPKHWsY5w&S<)I zuBn!giou$c@OaOtv8LgEZr@!?7W6^+U8xDO@r9%|Ny($LC!0fdmE|WAzvDsHI-|TS zAphTX#;#AFHbD{X1=y0Iu^T7eDWCEv$g_cEnFL_P@lJOeH@uSzW~Q?I^M6$FO|{~i zy3$ra*1c%2u!-`cWJhXED_|R10WM4F{IxqZv{|1v40x+ed{W9z7;8a#qZUmGW;FD%PKUMHCpzM7B#bca%$5Z#im5YK0cA<7S^HfkuoeS~= zL!%d9+Ca^qryQl(uktZG*vk#^c@N%kA$vSXSAjBI1CzCt(|3=#6_c3!C1wsBuR^F3 zVrF62F@Pt?N}X_=a|LKxuRl6j~AwrvKz!@9*M5)c_M1Cs~9NO|)7tAe|86D#6|Ni--==C9JC!chq zxExj)W>|+U0QTpHC;t#9f0x*9Y1z1a0Rm@LEi3GMm)B}^Y$VpAvEUncbi9-WHJR)| z^5;Of1aeqkw%3G+EbzcYd1F*!Imvfc*uRDJWR%VfRK0YZbE7Q|0OU_z3*oB-1+!%@ zdp9%5yKbyii+k2p2S*m{-`6|ix#8Ps2eq35oSz(=p@e=4&>S0lC$pv>Y|6cG$2itd z>k*EOc^r?N4}V&PbC55scZ|vfGORSyo}QEHP65d3z~NPT_UBtA*nHjdTA;`*eYe+5 zxiP-rV8D~_=y{PCyGc2MjLDMivtYaRa&uJie0tU7qtf?&#wyS|*BKAmXSB^GQ7v(6L; zSLQTje(Q>!QMUwKLb0wS=E1mqewBwI!a&?GF!p;yJzU1Sx625lBPF73bQv1tr2JZ_s@$V9c4cb zy;Gs!KQkfI=RTXRg;!h=J5VC77GgFF72XE~7XeqM%JAQ3oWDtqWX~Qs?Ed*dHxQ)L zz^6@u_^nUmsZ^8r0+oB%5stODTn%iHzjyw{_&xjT(`Ilkp&*sew{L;X=|8yNe~vFA zmF>r9C?f3;w2W|J^)*^8p>RPWOII}O_T#h6IxmA{-iwc4dv`D3aFB+E5x1it&hR;# zqowJ$#?rzQC4rH=CreNrU6a{UL>i4wzDL4#A-kaQ*eQqE7jDRgkz@w!Q^`#yf)RT}B1Rd)>BtAlK7qhD}>pijePeEdl}YS2V?u(}tj zYIZmnmKe$0Kh=a%nJ9(QV{KP_kw8XqNVGfSW9UyEYg(K1GZdiw*UsN> zX0&(R#w0$AFMvSh-kfiS5w^a;2<~Cp(z3`e4jc3@tf&Zqc9gFP;vM*?Y+1HDQQXC8 zaM8u3_#-8LMw7PPz{fbl&hW7*hCy*Sm_}u12%A0^wD}E<((b2ipjG9*>&BY$iLo9mfCE8>3I>j(xd?P z=^HRt!^7X_qvVvD(qe)e(_(B@eSM55xB8Q<>x?TdzBGMLo6c4wVo4>Iv?}wh%j3ax zg<`LHPgMdYwdA8}<|c&2_2hmXouBYl6r?uui$M~kHFG3nqVZww7Q<^$#C4Ae-p}hP z3-e2&L)lWM_oS!QIt%;x1602~H=LcOONjXH3M}}6(>{}g_S-u?{{@YkMSCF0ttxxq z?M+>^L#7<(2iGU-+3=`uH8XLjWW-!8dF#|pZtoS>ihF0-vTkFtFT@e`!-LlW1MR)z zw9`A&kM>0oUlulYqp$p8qp~gERVm%`9&PGWDd+D`p0BULObAEQewa?BcoG6<5 z;n@#$Qe&5s`GdxQ+DX=oQ68MHNMBz-mH&0CR3lKTQNe2rwQ31N#W#@r4x`?+5%eVc zq=7&B9g;dUIZ6Yvro{1WmX`v3j|W!XNLk;c;_`zDXvSfLYg!^)L4Uez>)K++3nlOz z(!*GxV$wb#{CswJ-v)&MsnE^+Fe|QVddBXd(2P?pG1f030L-+Xq3FJLxFJ6BQ1hZ|Eo5aOvj8?598H+w4cYI*jbf%%e5RVWWuoEPdgJqa=>RR3z_vU- zRwg}n=8?Leb#L(26zzRNADFPnOsD!f&JN_ZqJA|_y`7|7Mri=0qGoskW}Fd9UaZAe zVSix=9u@n+Y4#h}a;-RMV)h#@CIPfA0`ITKXyEr|&Mun+az`Ci&)TG5+I9e{KbW@3 z#~ACF+vGy=fOO5KSHLz5^?x?dbh^^E1L!VxaP1f{?_yL7BIRo92m*3m8X>u`_iF~7Kdr1DCZCJuu(oQm?;MJtESewjY zg>a8-+U<_J(V*sc@mtGe)lXqT0kdWKKZ^9Z`okfG9opq7es1= zW_x}$%=G*Uz8{bQ+wDqIbm<8rWX+sLOLA_lEU1A>1tGIQ*Cb(TN*t29v8_3lDjbp9 z8xY#bIvJQyYWxPjp&o4MgyAKhmU=dg3WDl|BY=l}Q%;;M$4@suVvC#F^|FVZ9HR30 z>Wz=S)xLfiC3e*F1%_)%su?SrEA7B#wDa$jsFta_$Kzh&wsFj2>+{q#?SU!HA6Tl& zE{8QS4+o*GNpDU?F4|o1P8w<4@@{QCcum1yK*`cmG%M}I-3>MHkAiH_e4q64Ii=>x z4mMhv$f%z`U29*AQh=krLnUvYp#gVZPqLxtMn#LX&&V$Hwy{(vLioK@%AcH zbJcbrVX8{lCTm7J$66Js_x3+uRjY7m|I0ppp*nI-D?(PqJtFR7T#N{~6x4u8IKbY8iaQkMUvH4%^UFtC#(* z;Hd}G*X(V=!1;V)w4K$_0c*1M{i?0ayu`}dlD?r2s)3M>kDq3DKQ75D2<&IX`y9SP z7xkax4O>vjCV6dCyjrkewhG_OJ;fxy zE6us*+XEyPIoQ5>Qs~;i6f=J0xg9FhZ8e;|WjugMu32XAb&K1C+nkWBn5?uKvcuje zcK{OrQipx5S>i&P!{2FptDPfhNBc*1RZ^%dQJ7l@6Y$-h0o$3wVR%}kUn#B4t2j=GmfNmR+O|{%gh_P&r{t@ zy3jq9{ecAg1fRHx!hnSb%r5Fp%wDWrPTR*tI;0tYYSQ&rUXh-y0lT@EsD;e%$8;u! zB%WD5Mz72`kCdMaMi_?V^HiofC6V1`J|-hDu}i7C5Jab*t?R77c@f9hufDpWb{*@>w1Wr-_6}NZad3MV|y1Bc+e7{%_@GZuS*&%bK=TghNw{;4t%vmDkXa z;8YW#WO=Aq?alzGm{c1CLM4&w)keM*wq2+RA2Pah+S}Q=@}_2#lBDsopPK;!ww%tz z?jqIP*##$TVU8}b2(U=T}AF*4?D&^0oa8cTwE>np5CA0?hmR0+s>RTl1I# zM5B3a$ZC0?eCaf=!s9yH(sW}QpEgMk?Lr)Sv2Swaey)dUS$+*(c;Gs}ZKwDQD==-`!DKV$9#La(NS{Wax7f-Xm7~4Y9ypTRBco$~iTQbT!Oo1z#D= zW`)EHjD2ODyunc$AV8@z4WP+B9^Zvf2YWJ=qU|p=-<4Zzu`E@<9ue6`2Ot?wN9kk% zBLegFw`3O@-rPZ~X@Get05LF0fZ&CB2g;aZvd-F{Ln*c1 zPNfQTyM?%gV3q64{a?>J+wJ31t~|9h-CZ>EyrLRZ*1jncFCqsniq&h+I~MDaq!jt& z-1&?%Wv$7DDM~}Zi*vXes*$HwjZV`k@AhDa3S=@zEVdb(0Ih^*W=vOOECZ+22p<>x|c;}{{<=n6GB=wh=g z94=x5ZwKK|Ka`E>zuF0Ui7{)`J2kEIO$lQe@#OfgqWeVWUWI0dn|(X0k=4s9p?L#* z3?jZ9>!r5M-dXjXcQU3wRb5JFu6I&9-dwwAhuy?b?Cw|XOK@+?ac`Ay%Y*2KCjFb? z;3XH@_UXT(*hH)<2>b78pnw&djc`RV_jkePE}Z2C%OuHtm%fYGKv#&34`4gMIQ1?C z_s;a|U9&79Y^UF`@v1&ww(Uf(T#QBb0$uP_WGjjdUrFvH=98*ZYVZn^-RtGs9Jo$U zbqZo5U*Y@Mx|`Dc_xpfxBqO0~tFD_3zsI0H5FDHXTD)^E#x|?Z!XD@>o&tK|V{NgR zfi*sc2p2@A)B0EiDa9;~#!&OY*RQi=sB966vW2hfN4j*?>>a^X7{YAeVIc@&T5i{9BR03bDyyDk9Dvc{n9$)sz7wd1XKK(#Ost?6ei_ZS zvu&EEi1D{$7>{q5MWtUeZoJi2u6kRHl}P81klG!-JoK-LwE`Br55-N>h=> zvNdPS<^^K|Yi`BBUfIO@{%31GoZxCbbUgU$-!&Pz{}x@|hg`7h>;~=dGJ{X=U$dT& zjw93sB#Llj20+iNvDfD78qB_e34F-UF}sJY3_pK?a#^c=&doG`-giD%<_P^lPPoy2 zYWu8q;CtIr4$F0S`VqqjG(Xq|Rk=9!jG}N?4Kz_w-xP4Vsr&!B$x9VUBZ9&DX4M9g^uyNwEhm%1c@f29kr}O#`-$ z$-#mIRJA#hGFV5A0xs1^?Ug-^@kDTjOEY}yyfa%5!L|xb*s1Rt?kLa(PJ2G#Xkf6bWN3uVNW!v9W^YfOvG&WUOC47>2vZ;e z{EcgW4HnopAO_$1+6y%(->^Uxk%;rLvCXieF0xBCXGwGAjE;`Eb+Gc0KZBcD9I_ke zp+|s?$2JTX*@$4~Y|Sd;3#C?Ra4g?(GMRlHVikE(l? z3PXJ43BzxE9yvOmK z*PojU0L=Z1bbAN%whOG&K-;j1kKx%=N86`}YF!~~R{woA80RT#-a7PjkZRo?#is7m z)!NGpcJoR$nX^l$W;U`mTo9d+B?bFmhI)18Sf0NY$^5(9rh%&+a;S^eu8;NtdKtc! zyp>*dGPXsWrKwAADx- z;71IqSXS*^>eRk%CG@X9!z=*IHMlfcuU)|E-H9umr7)N7oSp-xIZ-n~TZ-ty{uY@* z5#ZS(2{bQaE!*gbEyyHkanx*#EM>u`-VSklwKrF4=pJU@-azednIHZlW-vJQieZfe_9H*k59!K&FFU)jSNU9IlFLFHg3lEpMIio+|w=j zZ}d0v?tiy3kyT&{;ng4N%P4p@NvA^Uo0a4hu516ZM9d)cW=r_KCG4=FwhM9t{kuL? zQH%`gbyZ0`U-$hX)kigby1{L$SpYn#W4%cjDn{*wpiXjhJN|n~(7a<%D;e^mdmdxw zCcxeqn{HN5QV4QsStA3n!mgLRqu|dC47R85u2tbHYGfksSTQF_Z_QbV%(b4<^Gaen z;-Wsm#@1sQuFCr%!1DHCdN5EAZBlv7d<>TY1t?o!lo`C<5*ZSo2)Hy%ViQpHRIbZ?@y&X3ZA(m7AAl*PU?{E}_0H|Nbm;SRv>8F##Wc->-rd zH=2xcD>K!v9AlWlM*p+0t$hn7D@6-b2x&$pR)Z!4ci#Qy{YBo+6iE1Ueq#oZty#S@ zHD2x2qa;?E5~0KasaroaQwjs3q*XM~)%HOMk zZS!^Q(shqbYASv66vPo@y*4qY2|GRun?A#2vxK* sDP8ACvnK6%&IXTjF;#Cr(v z>$~k{J4?Ls;WUhtV@DsFU%s(yv&$1nfW+|~3@9cFrSu1Jj`B`?N{7uaCcwPlGH!fg z49_w1v_fI>5Tb~+kU?|1&iin^Rk21YztqR@3Vjd%^Xaunf~N}aEVg1f!QT$niu|R%a29Z$ZeAQfU^Q-dIIlhaNzCv&1tgOq@y-q;s0*U)S z8wbo%EQFs73HWW6$=T^EbM3+-i>XIqGr~ey&>+5d3$Akwu55;Z2&(0o!40ewm+?{A zrIE!|Hzq}tvum1cyZ7UfOc&s-vS3ocZ)bpUgKvyVc$X{GZcHaUzGPK*d>a35%CujQbkD{L6 z&o}t@*XVycaH21J0DK@~*T};;n2-9h-su?Rd<0%rt!W9Q!PLjPs4r#7p@7-keP+uv z-@tR1Pqbfcdy!=?VqRGK2YXl6uQK51fO{1uV`jOm2PEISD@=POb{+M9O;$ckDZ21% zhqr<@*~x#FSFLb(e05MX&`oT(91eppCd|DRBzc*D={R}(s}qbV{M^i!Q_ZVSbvL=| ze2?Jx`TiVYcfiu}l#RbnfT6j}N!z%&3XSBU;3wC1KIN96HzUOx$;ujVC#i5~Ftt^f zye!f)S>IyQ+Prkgx}r@iuE#8r`&i(fN?!8iTIiT7&Dub5CL8X(*43~Dm2%xKdulg} z^k2@(B{6e3@#zzhT&us|E?ed$15KdWfNcc+-w41B4_LK)Rs%4Bt57qcvi}tEDk5|> zVxXs6cV@8HTd}R;hojlH4OXbLloY=8ZA{Ug$$h>9(d;V$3q$Oe>*f38rywR~t(Hej zR*o$uir#|0yF29#g0nM%D zEw}l;#<*5q!s(VpMQ?v#;fqhwe`&Nn_jCB!dlLC&Y!kQuh~(g#xz;HCYP5^^T)*iu zyQ!5$x(jYcBJV1B8<+8vo*+)>B*2e6v%(*3VLpNb7*up6$Y4mKZ}vGuio=|7G9oJM z*CY+La+z#|L>?AznRj9z5+8PHdgr2TgnIJldUf&W{@zxifZB+{lephBNe+o_#ULH} zB#jc+WE-v*ClQn3<3DW7rvd|K$>{nWe|~v{O!{D<+z@J4Jf=)h@?Yf@2F{!pG8#ye zC9QTYi@nPX33gj#o^>%tws$bSpcZHaf97ISIICN@AZ(HuSCqTBbRU3fU&R53Mwczn-7M=KSy&w@h?&}=#-5G7RG&ST zz`pZg4rm7YuVi`-&&L`v-zeLkxyl28y>DBHVF#H3SjP>YvVTUmZ7^XYWnD<(Ch1~U z@y#Wi&0Mvc*h>evfS3?V-_NP#=laC_7@!B4BdP@ymraTG_lnCFoC7)Ofz&}`%fU#^ zd2jpiUB4_sM|<3#$yA%Jyd&A0aey}>$|&d13O5KNsZ?c@mwncfLCJ4QKqPXjsz^Wi zH1^~~<*dvf*7p;XXAS&jv*5WNX3Q}X3*QrwN!eoSsotXdz4!w%8xKp$8P?SAN4zGZ zXkz7CZ1dzIf^H>-$K7O5B*yD2ot-TCn7=0cN`x1r}2@fP#vhc20vnsGgfv;mnS7nORvR?|uL$spj+1#JCT_WNK3yIyw* zkz%&_in;O^gX_>0;Kf{-iN2lAb=K1em)I>!7qT3uWTYCpvEWA==fZMDRmuwVr|266 z`Sz+wK?H0j1s^+d$Pkmr7Wq~%TsTDH>De}eej8sG#9S*v#?{t}kC#`={D#Xt$J5Lb zWKgQ@{;jva+YzV()`0Tcw}2?6uyrt>*iAZZ2%x6#y)Sjp^i>R*pQ~}XybK`I+k)7s zu)&}qziRwA|8IJtD@m}W4ribtE!6r#s`}qgoBr=!F;*W?AvnZl#>y?(eDH5GU}ZCk z;LJ-&UD08r9|1t$(r4jO9BoE=(U>y@-qB|a5h@r+Ms(a@k3dWBz<{{{LG z+x+N4a^`B5<3uI%y0P)Lb~X$UlWag2)d4qRFrzyk@=$gvhMJk3*+stI{Wo7~(Sxn? z!XkrOYJD-HG)Hlq%F3x#VHe&A*1ny1XM$L)_Y`q<{I(HXHldyTb9sy7=L6%6h zY61Fy1WchVr92}|^~d%AF8!}Q#9DQ8;`8f?y<~{3T7bj21x3>QJ z^6)^e%w<%1(cbz;1TmgSLQL-`^xnHJ5tyu~Ai2FBNI+(Dm7GpCAa!EfxLM^O0eu7D zp8!@~3py{n`jN$~w(noz#k_3qC|OFP2L}~41%wFB>47cR#G5~>~{&~&rHWl4#J zUyp$B%cYis z>|B!UTtIejzl+DdYRKNIzfEvomNn6d%@B%p%VG7+u!22@m!36^)LBsh81VH8f$jB6 z)1EbBNN0!?;e!~)j;L~euA9o=oIznit3v|kW5!&>FHB8^@L4C-T!@@KyOC3XMv^LN z641>}Q$L+LjO^Xh70tl-y2 zsMgVs|XP?CP7D*|t;W+PTzu(SWMMvs_V568UTh3=G z-!0hR4P$ACZnrcJtibQjZHBXfj-T*p>-zxrFS>duQx)BAZ&bA6ANZhfFV9Ai0UUm9 zpTdq8)DLrC&(z5^jS%p+SfBdBKE|?WNtpN|Gu z+Odw|GaF6{B(w|ZKw~$J8#sgOe=E8WLvOvA#4Va##&u>b%^B%qo@%9J!`s8w7m2*y z`Y$kT9(Gf0?48|*m7Z(vrc`*54|59NRk|$1J}V=iQ=|CL=gt~GJ_FpcDQi&kjq#b^ z&)gK?t2{}>9wp!5q$n=|SnA`94`cCtmiI8JuD<>U&Rl!UfaxiN+(rM76op0XVm;Ba zoK``7Umh~AOsF1+`E;tk1hw)~1}=CG{8!eU zfjPg_x~>-!KK1tRw^?EVl6I$i@w~~AvsYv*}w|}(j071!YSnyMlp#7O*ZvQvsnp!Rk zO&u>%0(ib}tXW$!+`F`7wV!ABw*c4|v*r>&J~=U#&i3np(I}{lb;aWMv!V@DACGio z|5%e2u7&wDSbX1S`IK^z@x!94%yV3#3?bh2T?3eyL$H!dASY#RyT7CoVGl%doIZT2 zhRT;zng#wx?zkqMb%@FAvdN{5|8AEN7PYXRhq=-66_m0usF?Db$i^Zy;32iy}XR3>Gd8+r#ES)*ovR+G!<`<*HN-~>)5G@%Bs5f$!xf8gRHiDP21pJTAPO1 zTETelTUp88$C~G9$Qy7Sz|b-5L-lJ)*VJ_Xgm?uO^4N=#+|ms;6EF}Q0DfxfZ5!c| zXDoSUz4qf}12;u@S9SfWXPlXQM2vq4Aga+&9!oW`d&?5uX5;)HqyJ0m?0xK0VDo5` zdzEvuP2Y=Xet&zVd8{AV%t6Y#Klz-lbZl(Wcba3(OH?mr<*F=tWSF4jdFh37fNubZ zdoH2uGM<;+M*2YE@8(rr3~-9=5&q~=O`&Wc*R{{SoGGR1F}nr~muEzi-Y9@^C8j4{Nq1>~6CqVr5kE>02~XmDOb zZQgY>E72RjBEI*}{4n5saDE#!z?x#a`Ev=k-)*<4pHCG0yjP=C;Y>Lr4k!$HSO7ny z@7IXuUDZ%=T>$}k3I6G<`H+i<+kbZXr=z6`2KrR0UodR%S^Rhfd+ghG@fEcWI<@Uf z8fkn6H5F77Mg>c}TFfnWFnm1-#Ywn@Ki=0y4@Wkhd-E%pZQ10$Apd4nGkqUS=xxA) z6r1eXmy5-|wvv@FUiG@@)((JPsfyXDA$C+z^Vn8?&Q!narD>3mbil9zNwIO&WS@cTbFvvZU1GWmA3$uNN5-lbS<`X2>>MX z8$~)(c~NFudsu1$-1~xC2pH}XP{sw|Mm@;N=ULbie*SL&XzO5l;%<{f;@Ve!_VEAc zGKdENN!p60!87rRalA>%T&;b#7bgpTQ5uPGvim6@N~QLG$j`Md%VsW@!)udR)8>ck z)fUn+in_5Xbo-tNtv*G08O%R0={k5GlJD(QB#e-hqD)rh=+2VujyuKsOPLYJe zim)Pbg3K9_+^s^2goX3Y?_$KPz~s;b6|3zeB~{xurzrN(Gq-ji0A*+0Tv_3GCn#%8 znDrLr*1+eydU(C2p62-8*`}jz}=jSrr9~Pj{*L(oWa68);^!#Rqo6U z0ypR#@C8LCek`xtdW2}WhscD{KiGn`qH8{Az+G1f{Ty9=HlkON@y4Td!MXjz7ZkEY zJu$E@Wp_|gyUB<^hnPObX6@?9*IOuzqq6yOvfeoG2wTvfzloF z(%4-<2A<;U;mjT%5DBYu_}Rh$p31_loVV9r4r}-3UQ>qxmfmjZU`@WvEOTNK7&QiE zxas|lXTZ^GfEox&UzK9(i#98x2Qt{q@TO|_Y})?i%k$E&W?gPg;78S#DG3Ylpf+sOKDe8S2N$mq+>%WUlQu>aim3Epr^DEn6EpuNDKD zZ$(aON{Xvm_Dwm}Z=|D}!_ISdB3^$;!~Z@j&z9HrT!3#I1azEPzkM_U#Ql;p9_DWi z9{>^N2Mk{G!;+_VC+U6=It?lsa0TSE3i>(M0Sob2kb^97Qj2D6B^oQrxMt53QC7;- zr>9PA-}||LWyin2Fazv+J_Y9|xm^s|bVvTCH1vD@996aMy2kW3od5Jm&palRU{jeCocrTg` z^m~wM3~icbUuIu4%Na0(i9Q!kuGilFwn>n#M;MD_s2^?@5TaiPhNpI}zWf81d)H}l z+AS`}rr=bzys4m=^W+3Grr`lO*m`vHV-4q(89*CVn{M0<=(rh>dhpY4KM7kv!e^y= z%u2~h%f87&Tfdn7J_|750AE7xF)9kQuvDQZNNt4B?_l?HAva69S{~vD3R&^`>pPAH zOsQ+;#_3{&i54z40(}QR7N9^)W)B#We7|hlAU?isi|Bj1+5HK2KYtj&%TBtOFmNaj zyR{$oikox=Q>0PtR$D9eXI#Lt04Z(_%uqDg- z7lruRHDJB3Vu82eEhfXJ%aqoDmfO~V-)=Jtre&8T{3o3Avx=A+OU4osbF*eETW+WO zr{@awSNv}NYNd-8aKi75-ufmt0vqo)>)P@)D)Q=l|7Q(W2wK#}Y~?X?$LjAN_ZV_ z;h4hoKDakBYU%jVXK{!eYvV=>Yn4Z04Ea|NqKFEuN_ka=hj|w1Q>QX_NlS%S&@&I@ z2~N=DjJj4_36qW^F&f=uICV2q?7lPk2~hTW zW~3o8_Skk99KZOVWdU6Ny|i+*jX0r`#GRj7e%#K*XEqB5y99zQ{U0ATW6v&3$$FWb zGjZ8=-Qk4I*F7AbjPoNaI?dQM0UPD3nY2>N^Axt zYPTjZb7VEkUW)ZzDq1M&r+OVQTKWg%b82W=P5eG!I)`?jKZ02vTl&T%TeFf{$4vnQ zTi*cSSNkSqvA^U_MNWUnlRvxc(0d~$)m!hRB%z?n5y_*COQB-w&c$nsK6?Q58+i@2 zfj#~0+C>8@@&#&h6<=qaM_3g~kKp*>-BkU4XJ7h^;v6uvqU}Tzz%aJQ=4$-)Ayd|S zb&@ru5io&$Z;eNb=99hciG~N|i|@-`uBO&4_-^POzS(BY)z7k&yn&lsU(1TJG-AVa zUYXybMvwFi`Xpek;Sl*;d*`JjM+Eco|1`KB2$J~2g|q3cgIn!fBEW#+*(;@-2}m1d zQ_9hq_GR-4_9>#%#~+3nT>=g2l)n)m?}b0N{}s6xEN5c-w)uTd*Tvi6xI8d9V0O{Z z2u<5&3JLq{aP#%X^M3U)o-uA#+XSq!yKSn0!6}*E*3B0_pepf*^jTIx?|VCPEl6Fk z^qJROH0xuNgPdNP*Q!|Nx_T`S(dYO$$MssR(m+~r7SEu_&11~$!&Fx%Z~sKskVWOk zk#+gDwSgMZbw$CwTj;2rfk|VhIjC!Mgod)wQ4O|{LFwutAT$xLf%<(Z@;%zVvqIhE z?w}(CuN9el_dilc!NMEV_*ZvOS)&I4g8av}iN2JN@3V!^Ch+DPL5h=1I0acwvBc?H z`)5&w+&>yrU(K|B%Ko|Oz^A9KVv+%V!EP3|#+g2Pb$g;z^r0Js8c3zyjd@a+@83Cl zI07aAxacNK>XS=rZ<67^?-$A51--hIIcBHZrWIXw{Z+S%<;@4S)lZl!Su5d;H^~9D zy#>M>Sqg|hYe~6T*NFFhfG7K5_cl^lV5VrtnU!c>-S2VtYubkR5u+u0wEe$42l6%7 ztBP6(DP!J0Nlt~FpX4bZZN2?Ml|l_$NY79x0Zs0^M7D_v1S9KS;xxjd6pdsI#Kg;c zf{QiV1uDE`ou=AJfT$wfne&AC`)yf*rl8%9LuLF~)*zVmL|LwA5_c9XfdAoL`+cms zAJ^~xv1vbrrb{bz8R#BtJTuX=uVLc)ZsSd3*09U}_7@TBEIasedF*vs@24O`4Aj6v zSmL_u9~x5M6smJCI>F@E+hv2E3_R?3&#iqGv#R9Xw@DvEIS;JMhKh06LYv~}Aheb+NWWp!s1h3F>V_jBi2)HFAuopN1`#R$Zl&382CiM|wX~^A#_NOMy#HkLw%f*X*9ZQd+ijd^=VkO%ce-=5P~?B3 zFly46IRc*__pHxYr0K|aJI?r_neAm9{+_WM-sUYYr4N^!_$k$|C9D1t^g9-rSLKaLq#sd zBoNCj#drNiu@iPd+js1-yb0>rC1AR*K1LsN;$p`q#7pjTKSx&t$Oq(9L>8IqDdkA% zfxxju1JLy1_j~*XfXppT^|`-s5b??{yHTmH?Pc_BI{~NBM#{#%Aqd}&>9Gr7U;_e* z#%qb^$fty&e)1Y1>ZpFm_RB0;N;hlC^lMh>-R{JLCvoz=wDhyu85x{esr-m8?^kzF zfcpT&-;Z4NB}iT?FTlH_W%;tN%(SxsOddWj)5Uj*07eK`o!)nCIhY{1wL?9$d}&HD z6e;{LDk4qpDm_X|xub=gugIk#c5O5_N~qQlKS6smxi>5zk1;|zt;INBo^A}hFKEqs z(z)xa;!<(OLQw7u*-ueq!^$|llI`|ajProu34(UB<7wrY0B`yaxBeP5Wxr)B0Ph7n zI?+B@)ZbjXAnHf8G{cD6N_4{un1a6IFz{l&Bp`Huu7I|v=|mL#(#^D5_BbQjULRHM z>4vHE)EPU!J57n3ADCMRNbhrJIU+Y9|6SPYd&!gX`Ta-Jdls~p59i-8Dz|26W9AZQ zLQ~N*zPe04&a6^TFw#2lPIOq^rmwMb12Sn03;?HP{&6xb`aCUq>1nymy~LM3(tFeb zWZX)Q)OLUT+!bLEFT@dPKVo&RGKxsF9}JU*%Om1rFTyBm_sI+2ajv(|UiO`{h5&;w zXh$O(0eY_exva(&*@7KFt@eS{sDW>=K2T*nth?449XbWv^+BhUCs+D3c*Jj5_v@Bs zjjPHEbd>7{Ujv=uZNL?e_okIs09S*7>$$!BkI;bUcZBrkTeWOTK)-R`s=Wdee z%W9jRo1hsz{w03f!aB%B!qwLKk5NCWl&@Ly3)(Bojx)ke)=+z@T`8f~McYa>bjqfZ z^TI?3g0mriCB8#92!KI4q4j!Fy21~!!lsrFt?UjL)e*Bt?jEdDa!P8G703z$_uMX- zl3v$GnqD;HdXZP)jEGlhx1YW4jNZfxXS=oSs})qs*h$J3mpwc_P`>F$1sZAhXwSzI ziNMJ;ZcHDNaK*k7sRueiTptHfD3!PVP5Pi~px4yUg!TtiV6Nq{pJrYhezOx`s=xqK zWt9e85|ceoJ`wxIZ9d#@A(B)RLk^mPRp&) zoR)F2lZ(oO3*H8Q6pD}FnC%^TaCQLenWsZ!_`wbdp_p&P#&=9KJkXwL&sn{bECUm+ z*J1cV_)TOk>w7vJ#>dAEPM^XG<4Pq!SDuO@1aWbtl2C|H*tI-=VQwW+(JXDPg#{m+ zD=-wc*k8ut!iu#)Te9!UV@{X8>GCe%UfV@SmFIS|ACd>EMyaOsF8vdm7e!<8Ue)%#EF4tcKqO8?==zo5tm7u?_hxYpx4tk;58(MZ(XO;s*gcZ` zWL;sFDSNmj2?3f*HSF*`@oWK78xFFJ10Hy;ux2Rf`RuI&&xdNpiguB+TlpY-MDw?F zTCrHWmVsFoRgk)pQ#D}#7{cw`lEYG3d6a1GJqf<-?b=!c&jhfqaI;YgJ23HBsz|oy zhZP?cl0pamxG6{HKSaiwLi|vZdu~E)xTy$8$&G#OS)t9_i>m??Ws~%>3>E4a9s)2> zHvfoDYY1-q1gOK>zsA0M0YAGx6Ba2-a25_~UdH&G`2A?T^LuL9vO$kLXOPpKr?`p# z2>8GD>U478%uVVp$$;jrpY_t2R=iWM<0p3}-nuuJsG50))?W;hXeigF6h1MQnoB`6 zjFqY0$3GPu@l5KvSPdN!m^Hi_ngH;-E4FLbYW7G07eEYkCks4!)!g(kX)$(n@&SRmw?>MmV~R-NA8x{sZT_Zj)w_k9-fGgPmIP3(VKKuzb)*h(%z z`Dw*r@O8*Tg{C7q#JCpwKjBBDtMx(R(s{2tT_*~HM$))-hH+Wo_u`?8lm$iC9sZ|C z&qjP6YltAVvW~32tlQ^x;Xl;GS`V_x`GeME*tLL(5P{rmJ4|j4Hb`QolfQUXsAn6F zRYVPZy%EH|9+4S`IA_vQNtS&FoW9AI(eG2)IG8l0(L#zl*!2wh%x}bWiNI+trD?F$ zmHcsh&5d;gs|{efUKLiB`H(3LXOfCy4BmeSOdJi%tiKjoUmwMT@$Vqw%9yCji!UsT_IY-&i0DDJvh&KO#?GZ z6L8b(Z~M#R>D)R5J;vj7@Vzwp`FRvUSg6)`mrvd-bf!6Ty;gehz0iX{1>e$mPQpVh zNsejYc>%2TpHSIzp?@JA4kIso>H0)0PAm7LeX*?M%Yde4f#Z~GaXosnU!V}uZVS5* zf&G-uE0iaVIrp)sNdI5e50C|bnrl@jOU9&mBVyBcN;_@3HkFM4ZU~c>)TScvoDdru zE5twbW088}^4hRY9PM0|lR^JV(^9YRA1N1My!hbj-kICFse_KyPs-mqzE3WpW=vr@ zLQax9k6==^;Lo)cHmONH{x-K~YVlA79fY7*U;a6nx}obu!Ws*-X@x2MI-ZA}2W4D9c1S^kMf zAJV`hA71$FD7AYeaEj2>Cw&t#RD4S$)3wxm88J-OA$e$l}q^75-uCF(pJ9s}rxKCvhWoN@Bxu*i^q3R~) zoh!aFg!6`GJCkVsjT(i&OqOR_o~ie}>~eV)D>L=3JVdpIT(8JJBDK`ZnA2u;7iOpa z97+%!{Vh0JRatkq8U3cO6?}plPaN3t>Fqh9l-{lvexbZ}yY1@s*vt1Wuo)5@nwe9+ z|DZ}!XDD~_ci!czdvkg+lJhwqnBH*6OrIBaSkPm1LBI+FsG!VJC#>k-~z&A8l4^#iRQGG1%VDg(E`)=?uY zZr#m;-(h^6PNhK&-q<}7iGC1IUWiuVTSS>{GLtRf$C`pQ!Zg7id0p3K4=?xL(nDk# zw(V0owNr^WA&nNFrk8U64_R|kkNYb{>rMKW8d>45kHV04f58bWu};2r=r(>jbwJ4zjSf&6O&)ZlPunKCZM`cbqxx82+pf1wT!%=? zC5rG|a_0kt?;JGnG~OVz^MKf_p~KJK)$7~_?F{XHkKLpoWSJ?BdHdIzX|c2(i)Ebvq+ zno8%4F6KtEJMf8!>IM1@$i`(=sUyXl&QwN+cIiEJ@tIh!dW}jof1eyaFhb#g1yvzd zvfPw6`X(-esPIgN=qk78v;;vRnb-*kNFYMv6DiQhc+jFg5B#$69M1LC!pnE2+%aHD2|e$tf6emS!$&KbspIL>9uK1*FeK2 zTj&D7J6rTXQf3K4dO{utmb{&;-*rfP^W1_`y&KJZE z)OMkZi`eu%(BBfDAWCb4(L4P6NgQwH9A{Q7bSJu`;Po1P^o@Y}bHI0w!U({7`^#MO zvv+9J&b{;oEX`eXx;EiTm~x;J1PtvJzxmKxSzG2}@XqkDe&kAbILaj7TbiD}?jG=~ZjQS8;99(iSe?wwsI4Amu=iCDbMNfcP0bw>kqwS5kReIc`R_G72)T zlAJXa)v>VajcWO+Ar;kupqTGdhyb`TG3NG0HJW{%qaRf5{ol`E$95Qt@++vIb7?=`m`;q zX*dn0jc5>0s}0w5A_10=6RbB+RK0Y2R5)e12hR*)JfW7olUK6!Sr5+Ns`Iqz_w*^6 z6xySRA?W2}z8vR1?OSFeV?dkS7YS1cv(@qZ%ko4YoDr&4@K3K=;bYDDv>$xwX?ncm9qn8v`GdBSCHX2IlBWD>%Z9tS88932i-S>J%fUD7%Ji< zUV}gdm`4g7v$V>W>T2qLs;ml1alrW$D+W~5x|&%>$Pb+k?JR~aAE!oyDpF`9pQd#> zLStqdmyWg0CF7wB8R>QUpi_9Gg9~?`efsuT0WAI=`Bi!ufJl6!+g{vEo!$Ck=F*R$ zWHa>MWYZrXhF{}>n!df-IfM|)BI<(@bxZAB$|4cPBqZMFxa)n1+{{x3vem1GB9-3d z7tMpPVajN7nyP#4ua!)R^5;MQ+5O<^4`zs|90ph+K}N^Y+f7b_#rGzvIvWuovk=(J z2U)CRoReaO7mgkI2TV$GGIez>TDB}}*HU4N(ICr@0M6IioCp0TCMo{GAp*}llkj?Z z%ob$PIeYS{ljC9EakPIDt0{mX4|F?L{Z9uO;~j4QPzt2~$?2EHclVo z1C>pW5hsk$?Go$iYTu`T5Kr^|Rqa#CiXG-{gEH`v{B0m!wRCPpsXx^ORS$$OAB>lj zY69<$TqwijU%{9e%L=#xSoDuwu5}V)#@A)NZ&P8{ofBFkwre+d7|JTtYm<(aza29F z^2!*~;eP`?!;md@?pMssEl1x|9gE^0X%%D zYrl!5xA$-zW=fLX2e&Jm=nQ@x)Dwv}pIkig)rP^=m zFXs`655*S50zoagJ(@N!hxFkQ5=0f(h9V)Y`Ddj z+b<0-c5QQQ_$87+|HMvNs~vqpV$|aY_{nUAKr-(uP7IP0>(YWpiL zXjG(2CN_A3=FHK^NGj<*fF%hBU+H*^0N}zws6Ihy|Rb5uB@i1>W2eE|JK6L zq|Ss*R$et(TH`>g$B5xl6i`Mr}ppNjUWoeqT>hCx!q0Mt*j_z71WHhx0zala<-l83@ZX@^icWuZk_EN@-Q%66$ zJI~|EzJ8witFYMG*wJP;R84ZsfROy_)IY^`oRWHgMZ7pY8YNhp>Fo0~_z!W)&gRfo zGmilkpwQ|YaK^6?%wAMfGlydCe@7tjFKDg4Ms`De*~7(;%y-L$cSap)PDM}1Q&Uxw z@e!^v7l-!Z)%rd!q8yeoAwrtEq#A8+%JK&&KFX`Ax<86~G8s#dboHiG`tE{Euu#+V zd_k3~vmw|9ey%r1Jh6r#f0(SdX4}4lx2DELbHU1f2&NJ?Kyc>H;<@8X&`$5jh>hQB z<@XJ3eQ{H8zT@-C`Hfkb$@#B4^+BH=VCuFFlLb<=6s-c^O~gVh58Mq#-Fsu=*S3!y zj4}wF3`6Ze`b>m@hNJoD7rfsrCfhX%FUk$qpv~$@#GMA915r^L)!l6dKw8{*^!)zL zeKD%;DVSGc3PZ$U++a5TUp-1$6aL$jq`dRYLGdXfi1S zJlT!(+o9NcS|@A~)q46=16jnn2q@xQicr>#i!Xl+b7?Ws-mLSFPnIa#&oXhdT~qQ} z@Nn6Bqo@=w&GhpI{bp@9Z}-$IVM--_4AqG1_c`z&2FU_S7p;52ihfRQH#^oJzdhKl zn3UB1*cCFP&;umGe+^3nnwf`I(C-spXdeFYWx}vdq2ySZ>y8jB>9&DS7PmEr1-Fex zVURKwI#|{`AlF2%%1TAWCVlON1l++5*b;HG;hy8yvz(2RNtF}KeiUDV@%erpC@eSP zb7)LbmG_I&)+9AwcUG&gnv?+Mu?93jg?oHH_EJYb+6ySc=f$R^2bMUEHZOC=JSaXV zzNWaBS!IkPQ=-a4IW{-v<{I;B$}NZ&_QIn7`WxEeAw*JqUZ*@_=X_RQ9^tI{hhQJjQ0klprabaOJ-bu+|UM?`&LCX zWcd1a6Xf=io7>ePRreSIDQ{d~+5L4NP1muIB@|4nJP+OJUHb3}=I*Ns>RlbtbwS^H zQ&YjW(dVu{CZVB={ujyOOAO-67R1T9!{YCWXDq`c+Xo|Ay>LhTLLlB6^zLLygE8sc zdN&vkXqFJaUkV~^NrE+Q)Yz^|FBUeZbF0PYQ1|1i8fA%%+Xy#DbS~Rq@6>_~PTT;l z5*jgi@=KpCeEzI9ATE?kI2q^&MjU_iA0YZh2nd^!FCTA7+8^@6yB6fM9U*08? zV(aU4XwfW1SXw@!6XAG8{aB5NlIJ<*(_}xtBKCX^IzlG))z={gBp)32Ai2L?OPK|7 zq|f^mOPED^?wipG3@V{@w+UBV8>qVv{g}db1A7 zAlApv6%@@ln5{nK6#K2j20&v%yN;E7B{`bpP3R*GUT5@mr5shUc-@|lw~|m@Z5>IZ z(<6S`#zy_g$XleO5d}%aqhq^Z4GC1`<=Cr)E<;&yk;nnE$b!IG%3I2OTu%%iWn}EcqSs3^xDXn~y#rX>!@$_b>!SR1T;8ocN)ZWM|YbrDs zK)u(VR*unstpN;ku+jtVINCzGA?y9NcK%CjCTf@LQqg~A;9_k%ulTsfWXfD@>sNkd zdo;OIxya^2jJ3v>f^5@6fpI5g2x{1Ts>;x_DN4FO(X_?=tHdHUV`?Qozh0s&)6oQ_ z9fubYs=oqz$SLVyue_hHzT_t=M#w>kX%|ajtxv^0HOh82zIVDoV7zc)vQvCKqLN)u z6&@9ln4SGl=WUZy;e>n-Kc^X(6po(Anmu)^u)RwewB~MF>_`YFcnaUkh^E2rYw8F^ zvEnE8hiiW{g&z)-)|bRgf0G%>XWAKt1n;3r*--RBu&7D8ILBJnmT;w>vD%Bor8Xk^+5q zez7i?c0tpzbF)S%2!5bF(&mWPm{Zf`sIhm+@2_F z)FuAG*zi|-02j@OI^BVO5D!;DRr^-z`QnP%#?B&Z698Jx0+7>?++P-tk zKEUwk%x;3n5TlSM8j@0hk|0mQgG^qhf-LC&G_c;A>6xPyOx3#*X29B+LDzJ@Y@91hVfK|LE055i= zr2T;+`e0(U*;7&qJ_V$4j0c zz4VI*1!y&Ri0t8lzGC?=Ed3wpy*Rpd{S%>r=?wqB# z($s#JI&jtkegWtxN^lPUN{JTChSVRnQPci`#(VzC-7k1CNiQt$2uLlLL)n9X1__LA z9;(j~>p&21g~wVG)p3Oq*uF+5g@sR^_3iYY8b)RRmH3!+rbm8!S7}bX;zC0nB4;6_ z=r!F1M!HWuu#+stV>^?&Q+ir}PmCB|FHCd|*7xLN#wkk#`?=bU&<@_kX*`1l2|`a) zU&uT`v{(*yZMD}55vY=Q(3$jrGo_0ynA$XBC*{95(etpMzQTB4)c;v_WYw5hSaW)8 zIvX)1(xH;giZsIY_%9Hw^NO@h>S?$j>|Jwm{EMh7Ho6{WbtkB?+lISleRqW_-Xqvx z5v5Qi>LUw>xs)j#IAY{(pEP(J)!AqpdcnbURW5KhV&qW7`CKZzB?oe3Nrs;TTr%&t zGOnKMe?IVr5@ki$;wj%fc~ZDBRwP2v2`3_iq5vrBfSGKH4@Ax#jM!x)%sJftV(>Y0 zrP%7t{dG2P9foCljQD_FIq6LV-Ac(tWRmu)E2?_aC#x4aq0wYYSN3D;=}|+BV9*o9 zzAq>}L1gq!^4++Rakm}E;LvcN`U^XgmNxH1?@k3M)c61US*$>&MXXg%i+sB)_pNv!!h=O2#1OxAPUs~+)`8y8cr*Vf5xS`v3+Es;go8Om@Cjtb68jVzOUA5ka36KUMmICCl(l>v;5Aaq}J`xBeGl zxLCM|d-Kci;48yXi3MT|fhpO=fcGjBEN6QO&xX9?nQd|yBgf<^H$Q*YYi%3R(|(|w zC7Z_O)KevzpKYFo+nHK)eo8mClxo?XV|CthMgXcR;}o`#LeGOz5`#ll3LPR2G$}7A zJB-XX7d}A=IMhW5RYD%lg5T)OZ_yZVALTRdmEzI^?IJ4)nl-Y)Xjq$lHJ9mF2m*7| zJsiK2xZ||#RpQ@=T`%kO?nH-r9=CsVd+afi?QDdJ;`jYs`%F0G7|2vKJomuHD>Q(v z4lO=3%LXowl&ClUKNsuRw$FaIu51+4AiTitk#Oz*{^2~kN3tP>=YMsFFOmOz#n$k8 zosE}D2x}UEpUa(dH15`WcKJ_PQn+Tf%M^VFSbm^pS#aASE21|%IUCtEWLpTeI-v|k zj98o)N1c3Gt>M)C0%Sypves=!B<;M|PlKsz9kSwY)z~rPY7$&eq%oguZA2`(OYvHh z1gfD~Qc4!8by!!84hZ1{I6=Jg_nn&%c`qJVQFkwe+(1ypnqKz4fcAKV)K~b`hC7}X zwZBudljpibr4KQ#V)TJR_21NeO#+q|EA#&D`e2<{-wKOF34EHB(`O318SmF$Aul_; zeOFbyk0%;kE-egTzKhsT{9J}PnPwfCxd)gPo~nK$vyhHwoxZ|lX3<Xuk3bk73NUxNTAj(=;o%yKI-sv1|yxKd*N_teUVxR9D<`;-Yk$E2k*cp+KwO zHtNdUyaY&$phD_s(8yHuaXq)*85YvGJTyY6&G)Y|spp7L>zYmBhuX=XW`3J# z=jj5N>DreLCh6QY!it8wijV$m@6!YJ2~WuWGkEf!R&l*46wH!ewlG5Z66C*2zae}6 z_0Ka0u6Ta|DlOhea(u$ENo!Z1){qiaU?D%#!9iR;Ss^xo>@9amAG;u*7)Ld1c5PEu z%EM_>3Wzy~3KFtf(@q#^08Vc>R36ByRi>YNN}Ci8I^CcQP;CxQ0u!iUd8%WS3^ZIm&0d zQ9S;0QN*?b0|=p*z8qLooylNOC+f0#^#WN~nndum@N{_mki0NCU(!@7{Kfun>1UfO zLQ_|ld`99YgaazHKW?r_%&&%}W0L@3*0bl}_i2dbjpu}EgD}ec)>quV$2NRF!N~p(#7Jj$%mb9lT!V*J+f8hSh$oYfp|WGgLT8$414;O@3nO*c z0Cjl(XW{Y)(I%wGjpw6peizD?(47y@mO%^Syw2SI8z@NADaIwmo8YKA@}G}qvL}ks z?K}~M+gATl!q)tO_=_A>ySFffuhm#M%Q`tf^E`4pwEu>h08m)9{arWl*eerx;?6J0 zK($HG>q5oTt{Z-pdj}k*)2iUV?V(j7N?hHcpBAM-ccbjqzyW`#3pa-Mce(;fE(B#n z!3o(SZiQ<>Mk7OCkVF6ReCoOS$2KHtj=%qTe7OpPden33ViB_w$ix`0^FP z`M?#3K7xDy@@%|MRz`?960Ul?Wee*<%u|7nf(aLQfPuW5h~zZa;Kl{R*G@ig$s~8UyB_?GauTS!1Y^CFDcKBbE!Po2 zEUFTyy2xQ7*2KV5R$AfY_Sgv}(pX`gqIx6sszyTjbXT@2MIpa~hwQ!Mb|`c@I~;Ya zC^%F|8csC6FOWB_CMYI2j@Ankyf;$eHC~c;G!S(LXeo+1gXRXwso@aCf-2ur3LUlI z$=5hU>kZc_E6{bMfLlpmXuT4IZpf5QR+tMVDqDDttk4MIIS4gDX$hk1MEycMb>5gF zEcOMNn$&o6ak6tCGVXKL!#L^?SG52cG7&|57h7Ebr6cR~ADCa_rA5JlplwkX9{Db5 zm2_OII?0>zc1>ETNKE? z?-|wfw)Kr#L6o8bN(rFSLBY@i0R;u5cPR!?A}xRzA|(_X(nF9YErce$i2~9Ufdmjj zF9M>FkWi(BA|UMhcb}urKKtzVzVEo>ez{}Z4-5!tYtFUS?DIEAS3vn#GGTFa#cc6A zB3R);q$G(}NR zQsj=Yiiz01`}&lw#9AZxM0AIiVT>2!_ml?OoyV6Pdfd_~iGG+jg3cv8@o#GpaL(~Z zA0q``TbRyt!7k}$3`biyRO^7e%l7r<- z@8GV#et{(m|FIW`%2|U9en&64>;E4u-bd3$vAqkgmz!sv0-@da8u9tB+B={vFk(_* zNyH^y=a;kTNKjDnuo;6^x$0gW*x<62dKB!6z+{u5^2FcXCh-{$d^8*)l>j}QMc_}0 zHiOg2N@qi}mU_Elzs9M{IS@Xe@M@mYL@WzoHxmE>Bu#brP74x2o)KU>c`UrI&CgW- zxtf#9IgZyUzTb|uMBDZ>pG!NZ$s7?&%@Fs#=tbytnrljCvaNOMa=sRA!qs`tpXQR0 z@9!>W)FCKmVKPc%k2h!9^5RO=d-4ta&8})4XR&uvhvFxpn zauR{hlB#Y@#kpIU>~F93%|CzCZgWKSX%k;8^!728PMi5BCb z%&KWC3g*t=cPZ=sWgz-_VzFB^%@Fa+{*bM9O*I&DkNoCYr#wk?zj#k3Pm4d*_udvI zoQQ;A-?X6ksds?RG5MaW1xSi3F7$NT*4v6{s0)Rg=5T5giTWwLy3m+M67r}cXDK^S zDb>j{FTwSnh+LCorISA1PDhhxP#kH z*1q~KPD>aM>oJNvZ$L+7)3;4(d4Ajnv7=Zmp;)F61%r);`U(#1H~bhzfr_3{7mIZE zZX1T$Tc=c<(}LY)#;rE`nh>{!L?l}|+T}pcfmL`8L$_v7)JWHU3t{TV(74X|tXu*kD?mS1lJygM`(oIBowekz(v+^z4foFl)@Ln(_W~C` z$X4{U1i)J3sWtOp1h7=FBKwBTo0by|=L}oFKgJBcTw(T~lRRc`!R4$*J!N(6zi|ze zfvx^yAv(_;`zxj=15eBNWFGA#!Uz6iv52KBg~Io?a5SxqvLZyQT|BnhCGQjH{{D3o z(vE9k+xOXc)^Env9d+K~Vr;cU+rG|A2`F(NoDV4PYh&@q>`kE}E{=lA83kx=eQ!@c5R{0(+R3f^UP)66_ ze$_XY2udZ6hJSX7%b3zMdMzLDa^;6g)0LFJ>{;oFk#{@`Dc&jG^6?GivhE`Fb|oi# zJbK8D2i*XUP(A%7;UL~)>?ri!^=JAwg+ivz*zFYYxLx~>g+9*ZQ6oITn-|osERKe6 z$)U=&^y&ei2^VU6v&Z5x*iHDC)6ISBEHKw`Sav~X0U?4W+NQbP3cmkG!#9=st>l%z z@j{RL>!&>1_hhZ>?RPazx$aHnmKRkhJX>x*Y*jP`2hHk)q@L)(M*A7b(&o!}nD6uM z26Wlz{M*%ZBn*cv*hg=4)pgaS>P#X>oQBTb1?=*UhI#pD=wxy$e}r}CfgXjWL;Q*t zG(;rhKHNVc9b^$*bv{s=|Gg=T0Qsts#30Pi$GtTm6=Lc8cgb+)^X+R9O}3u8jwrd` z#El=@=qVk0%-r9K0RiPr-=dehbN51acx~p=$B81O)%~B=BYJ2xh{Yz+sZjR@ z4-)!Bn-V5$!#oli?1Zr*#XyPwhQ63eOf3G37NRSCZ7zKo*>^YHq34ml@E_ATo^}5F zQ9y@fW8Kq|t@fbGQuJL_a%2197~1%9r}@hFSwPLom)`#5(6UkicN}1#n05vATGzu3Hboy zKTxfpA0@rW!Y!NPi`yM1h_R1RCz~_zj*%N;YR5T-Imhj=SJQ!}E$!nIo%{ZO6$d+G z#ggs*F~3+pBavkh+{Do zG_+f9Xr%4(GagwIYmXAkaBo*WD;akA1O6?th$qeYG%s!qLc-q6A!dCCB6(5YK2D*} zdwou;zkTsXh{T-7({I9!CP#$RE}!}4n{vBdA>Lh1XQR*OE_U8ir)`2O&5M~{wt~qz zRsnDD90DCibU9Dti2+C`+;?*d=T{Z3cehw~%aESHa%=9oXH6U>thOqc?!nLvf20o? zi#$hci(2Ryp@%s{cwfhAN2hK$a;9 zOimnyPf441oM!y2*U6yXA%Y3nEmB9Fj^Rwz9fi}U`AJfIUz2Z~sP1r25DGV_)5EF%AxFWJ-l zdx-YuFoDyJjgy#2s@QMSAUt@V%F*3hcz%pGN5Oy%QI~4;CB_@pT}uVKcU^Nf1QaX{ zHZ+PFz3PoSG9E%+WCb~gi4s>~4dFp-3Whow4;&}RUK4a}{6|j`*vF61%eO{#S9i#y z*DXUF9(WqYY!xRr986uoFCJ#suQ&+ZC4ECPz6TH|Ym;KWR8Rjn_2whE3XP6|=opJnHwm_Ss?OIqT=$nK;$8TMsiDE|igHVRAc zv#q-V$+sg}zJBuc=}|c_$P+*Y+y6cIDz+E~lm(zG({IaZ_x+0AX@Q`bbH_cbltluh zR1sYh{N(PbcpP-Fc5&3-?)A9h6`H@ANGOjsp21bD0!`GJHkk&AHfu?q+Ne7AAq_r4 z^RtFXp~H@UaCFZJr3BzN=a%D>AF41w7E_sHT+XHq=-l58(87$#!HvL)L_54 zfNs$EzLsPqWFFeMJ$XZ3L>jGLedb+|=0uF*OwOSv~cSli`D?eyK z_A-d0zis+fpt}@K=Wo%;YI1YlN`Y(Uyxx6yvG}kwpPu7Mo)beL4l&1Ijjus>4uxAE zOti+_NiMmsbv61Gn{eSH{lRmqON#6u)tcPBM$OX9UzQI>$x7bEeG;xDKIpr&sD>(W zG57fr6Kx^z;9{b>%SCgPab^O`@h4j$Oy``Anmd6*@4WuNeNg@bKz0*i<HL*ry<>RbRnklK^kA+N_mCaIaMXHS?-C zFUO(f6YN*Jju&(}Ft^+dkv)WX79^$vl-d2~z<_^Q&n*kx1uod^|5>}sz{cpru_{+r zGx-E7Z1hkcakZ5U>*%*nIr=_wPmtQkU$hfl4Tf)i{c~$0~S3p=ed&`7Ewyc zEOEYpJt$Dpms);yz}?n+yEodr<(@HhPn46LZjlly)>4_+*aMS#hf)C^c=oNfi@3BFwRK1u-A&(+~>uz zdbLlxe7eAzq1!1aQ>~Uv5xpRyy?ERk3(?_%?-)`;QkUn!>R9a+l5hIg4hTUj=&K3+ z=Q_@xMSJn*Y&HB|2q5fPZ?b$tQ(r6K7Qo<80@wvO@2GuTS|Da>sH-AMDWMV4c}$%t zL@n;>$1}+9=BE6y6&uY$S5AQ;4weO|E}ait1HLlSgMpSYOkU8ZzD)~9j7VUG`NJ`( zH0*OYYi)ZxovV>n_2T)zcBWm}?jI|VP?foV6*WBCf(cGj%?6gK#a(lF5L{*rH9_0} zi$Wnll!+Blr|X3YIZXtm<^jiScY;43&*I0 zosdPPf$E`zTDvn>o(COwN<@o<{?w~6gNTpT4%byejXR_q~2(*uT`=?aDT7Pu85&l1e5Ybk$)lsb+6zp zll;3YqFVsNomrkjnofeJ?-j{AbHDkqnE=(}ShAb>{LDrXl1D-(7;z&Wk}5vJaSM*C z>^1w{D&M$n`BfX|8K4T;9IB0DP)%s*;th5%ZQi>L<^`lnr#&Bm4g>n0J43)m^0zM> zZTqe)324(|f>XDm8MkOS-p(DS>)_j1>Fb!9G(@*G!6cxNVjBV60kmS) zumQFZ#Xn;fnwo)HT;rnoTmLvx_UJE6nGa6BG)TsIPJTq}UGJ*(w*@D`hR$V#Z7lc~ z@=#JQJ3aj3cmENoHmgK>=*K?p=iSPXKSuqo(HdS~IE-@yRVC?8CUU|35`EbbpDD@8 zG!r$)f^7&F!%O(Lje?s&gJ0aF>r;ZjFzVn zaL8{jWqu5r%loj3E_b3Y^wiL%FN7%4th(+<0Y;Gif;9D%ikgBk>fYALXaym}WG#OS z>lbOpGTk&$eKR3i^~s)@^%3$Uzvh znOLR+^dT{jgvd{%z(tYPqHDkC1477(HHm@# zE~US_Qsk=d!2~RV{ln|#&FQ3Qcvseu&$4IK5^LX|CQF4#oNp%U0V{-%&$283Wuk{Xb zER^gYBiNirlUFKFMezyH#wKvI)6RnO4g{!F0*L(@ANfKzO6OG;&@i*tAfdAMeAu=C zOx&z3SjBDC;NQ#?_+$ARcxrWob;84dnZp&B?}s?Qb6##e9&Y{I`n!v)Ap&a|uJq%1 ziw0CA!Ad4Ys{jyGKnL3y{TPCk->{B9g9f~z%7ZX^!8ymgoJVDZL871LU@o+_N;6v} zRm~x!9Jr!yuHjqAfo$Lm7-lu}-n-bnzNfxkr`{cSuUx_-gs1b5pvvwK0sZvzeO&qc zT<|E6BZr;5&%I&JTZ3_t*VPcmmnUMf02jW!3)sq-EtbD-Wm$laMKA$1N%kjkmMM$& zaQ?4ovGR^jC39Y2y+V0)GKNX`g?<@Npx@Wc)uybf>|YjZ|EPCQ8KT#Z6aed~{a`_( z)Lu|56k=L6ewQly;*@>np?8Nutqy`u$ea);zGsSR&}f|s1D%-$XSK$4{C5oJA`rtV z&oMdbfi3!FaL85nb$olCC&@zkgo4h4V~q@vT`3K~2F;;aNDIlR5Am|Bj%6&}Xeqf7 zRa(|(D(tj-E5(ASdaq}Iq$v}+uWmk8FavOEdH7-f@EMjjaa+R=7$m;So=s{)GeuKpLhJye1 z#8@_^qAUQn#2={J);R>QIJ=1!ZcYCyEgasQ_$S7uRH{|F1Kosi6m-ICr{v8!$cM(- zBUJcO&J9X!-CkT4sf}PI7J6w77Gm6?{nDrXy&8#&@s$B_y^h{6@nq#?9y;Iw6!BaU zxw|2R<)6^{5>?)2nS}porawQ06=RGPw5!YH)s8Z&JlC?*|#Yi zL|lhT(vdX$$kzre1h5gv#?-nzT5T_E3rAFG;bKy7SSup)zoqTLx&V-*;hy5x<1{!c zjYXb1ikv_J^){0B-L*xlN>Zv}kVc_{g%CRv!22peF9(Bt7;;`Y4`PI!9US%l-(u&F z3;$Tg)|3u}tPLu&e^&(ajL+WnRUeT51BQ4Gp}LFM^*(4*(K?9$p{a4V2ew8o(?HC; zt8z}0B6)Cs{Q`J{j?__ntq|8NG^Vd z%>-59nUk@%MIyp2WJvRrOPTz)yZRS(?Sxx><$a*U4|=R`%U+eZL6H*IQF{}=waRO=#ridE;|02_eI*bnY=o()xt&SlgZj5`d}f0^l&iSy_n^;5nB9j z=S;3z3+R)Gi{gw>&PP12r7As_{nE-;VNkz4&LpOw60@yDbsJoWFtsdhoRD#Z5(9QBOhH zb{zjtfvWlig}tMZ&I<=dyvC+AATW0X@zmGwh8PGV&?PKZpIqFNR|F?jWEJw{JD)`X z*#HMR`TH!&u=1PYJ74VjKBj(cy^S%a8d<@YUDro!ktbjUYP7;yd09A;NgY92IS zqIhIe-2o#Q-=KT^%aC6N@kZL)EDCC3lq$wMk(<>)(^8c_HVlv6reB04lVoR<{8N8*bIVTsC(A_mL)oR0|1Pm*aoXp*OSR6F>~8^ z6f^t$1ec)E)949`1^nloNa7<0bwQGQxvqU`1L9TZ;73%$M9bqo_{vYxz+Taa6$jPt zU(f6``32co{*M8%0TiU3jJAkK1!7xp7>gVn@fgI*Uj&3VpwVH@z;Hbp&=1RV%Lu4N z?XyZO*e+^TBMe=Y&0}bo0NZ+;ZP<9Nx1Hxm)T|Z3C!_Fx`lfW`)FE5+1P^SsEx0|5i^htBa7d##XRiAS1> z2UVm4#lUvaAcR_Y42~bTu_uMgcpyjL4jD`QCj`-79^GLvwmYr)73pgBGWQXNM!|vzTgbv_rz6jHZP1gQ zhcdwg&_!Y@(FjBCt~djv5=Gfir9>Fv`>|Ha3iX4m`m#SkhM_NcVHF(eJRv+ll=)4* z{?Ki|cH6nm8o25^O_qmzjC_zH25_0d5O~C2jBMgOaTK)&rCQ6oV8DzYE2yjNWIt4i zCT(-IqXo7AP6DeP0z(>VSM+=9@f8?#IjTPpTH^NYSg+{v?S_ehiG}VUGkLCG46(2L zCnLgQfDfJ?k5+gTO%;UOr#-6zdRzMY^r*h)x$W>s6na#V%K6RYwivg=W`Mq<-|E+HlbGRhK#~ z)+pZza`-ux0hco6c=oCE#w1ZCUeR|ig0Y~$yCaXc(nd6Y`7giH zz(6y)>tbptPgmebm-lHDOsoVzjiRu6J5GS<7Qws!w;%s5{2${^M8|<127zdi-y!?o z4e!9*a|Q7&dD~S5xeewpHkqc82bvw9ZZr2JcXt~T0^o_;D8qiW>$_Z+l*_WqbG4Y^ zWwczTxe1_zAO2|qllVYddw(XJ>cfwnWSR4Q!wGE^wQ@_-_W-A_prSs&~s~}=_J#Ps^#wQ+xC}?og zI5o&DZ544i7Biw!wW7f)_7vl)f(hA*p!lPg0{`|P5Yy7$#)TXo`wZRlPR)5nvj@f{ ze6{`JAKJpmUCu)xnO#E&#QV*6kgZ=89DR9#QpikVw4ix4)G3DMj{298y~>)SS@;8i zVZ+CMikjRCx7Pq9l>h=0vw99s26pZoDxVAoX-N_84t<^dDTR3IMSiv(WD&k|u}wpy zH~=h0Uu8AI9g2g#-9Mh+Qr`iKEArYN_ZT{CM;{EuJO4b%wGT_sHb{sexeq2F*kiF? zWlqNuq15peIa)3Jo($DT8IVpy*bgNUk8myQRxSZ%*Fx{dV**`B29ErV*Ajek`J=&9 zC%T-&TDl)^0hMc^Hi8iAJb|X3`{*HA!^i`MM zKsk+cZH$)MT$*VxjEzPBxB7cwk&LgXE6@3WT=3-}T+}HDq4oqK=-};@pG8a2w(%Ch z!7?#3&t4a805?5phLZ=pZP+01gss4*xq#4szs36c>sBFr0hqu88;x533J4-0#TZ#a z?y_!~fE)f+0=dK<6@-EMc8r+*EYwV1-ckHJp|<0(63`b#6D&#r_^(=v-~rhG5YVpqkK{g+p6W81POElOJCPVwNRP_0a<|Z2=}*h_d{(1qZnnibNW~D zE!wP}&^{-pF7-La?EA`|l^2%Si!d{=M}iUw4FE9WB#NSc$-#sxu_X=eZ zEU{gru7JP<*LW=|k)UL@Du(p8$Q`$o`xIa_)-Q^c1!LZ-$w=>V;p;hwStM_RfZqfY zd3+jZE3qxSMi0`7poH8fALbf{uMd5_9ys(h1VFx+ow)=2{CO9sMGO{eNscUK6n>R| z5KNV75H>~OGF2fS%XAh@={BLk^!krhRq=f>$GHTJQ=ruL^gmM8;q$+ z_AIHgd9I&g&cBEa6BCGn54y6>yWNRUfn#cD+{7MqVp&die{wve@tta}a|10|Gy;x; z*BQl`b@ZKY+e)6f#@Z8j^E7LZ(PQ%-G)GZ>GbA7_%SF!0^?}!zTAm`&sy9>|wabMj z`mBgqhKRSiwBqa?iFy53XYvCQqIkjdz2zb;Y{hpENvYGm2{{N*(y^KPI0Fcuai9=S7c)1C?wXtl z+vJ%JwsVlLX;z)6nRZM5b0=0ohWc3eNiTdk;G6N91=~%h)Non^KR>z*#%_V=LP!is ze@o|o`{6;?-A=Y5&AK&NXQs8fe(3!p5SH7}gQ^N~j~7J>n%9CUvyGYqiO{{blQ>6A zi{RU_2NvBbBj~9VN!h$BM0BrGSxngJ0N89o6y*Elea#KQZUbT~tbqr-tR0PhMRV1h zj&4b789(V=M(iCHqPcB3iTjXLq`g4bL{W~Igj&}wC8~-Rf$!FH$7l1im1AP*&CM?z zUa*+R?T;775_hRBg7vq;Cy|9=;ILYPnB~!CBY4xjN8SW?JOSFyRsY>p!74%#`u1!x zE_z7mtU#*DrywGKx@Vvr&J9z_f(3$=~!DcO_IU#lViWAx26D6Onj zLD3^Tb%Div%$=UxZ5ks;tRbqrB~mR@%W^b$v;B7;@Ds<+(YSOcYmY_dThD=mE}{z5 zTtR4?aPHz>C+Aj3nW%tT0;J)Ku@|-t^aEPPw!|{nj?}Nb2-lCh#y39V!eWRQP|jc6 z%evJ`a&Z4`< zR+YRk0S%}Ccf7?@>O|1-Vurm4hYLLq3d3TJ4YX91;uTLrq?kL0+D@#koi0|F@gS#K zifc#j`1XRx{L2&kY0in65Z{asIR)xHGc`cqads?52@B7i?+9aaWL;KFz`?r6IN*dB?a+%I-O^`xAJO?mXx9`9N#@r;mKrP2b4FTrmh^?r8!$Gfb91VDso!l;KJAX&WJl=$j zM2WtRJrV5Xz528vaB=u=!!XCzPED(odde%Ey1VKE8mDP7yXvqK?+0Sojo|$3 zkDTNQ9bl(P_ytBm07PLmh(FlN@4c)-+S~o!tGSZ8i&W;o1Lf&FnS5fkN@kQ?d|n}c zI%+jl4EG3-%wqLwF-ZF!)SQoNby1LEK3V9ML~C66G8cjQHv6ly$In2yzjp9nlg1ngsMOZa@GLdEvYdib3u1NZs(p!2}pi ze9hGJ(o`ShY77XX6zw-1=eHm=q#VW{W|lR!E4pc;s$7Jxuf zNt2T06~cl#GpdUW=o<&367~=|D96s75H#A(N>h@kZMFyw_^Ft3D;9b-E|tEu`r<#E z49Po{16xbZ7tN1>NMPVJs4i%dyh55e6Y|=EK80M$tn~8_4IWNn9*pN zRsI{ifAoi+6fX{Yv{tfd3lsz9hL8%0m}nc`f-G=eoaou(eEFAjE&?g<9+_j^tHxfV znJT8b&zV}WX{~fJWa7qUt*$dFjd#aCN1ScWB+Y2}z2H*Jf4d5KLqrV>CBq%0IweCk z!Lzm9Z76T;jyA?%5IMta3FT6-cI^U?Tnl}-6!Li#N(jW(WHlYg{FyETQe|`gH-SF8 z_!A-6ThiEaDsK6y848*JQkcaioW6Ugf-JK7!;e+*H0IQsHbd!8Qu#c0D?MP3OaKG< zU9cZf(5Z|D1keR^Ve-f79f`X|Jb^Eb<-`BN35(!Yw|pAP|6XvXF?DFv*k(7cxOVSl zpDx^EbMq5U)T1sNhlS#iX-Qb;H)r%7re3=(B{xc=Ds8?;R=0;KOS|BaF~XkY0eS(J z4u-HDM>Wt)9Aa+mElKonLmb>GJARgFc_|ALtE)L>afafw*M*41d+;z&IEIl|<{sB* z-wn3wE)NMx2bb@!6*vsY>eKAr$m=YInYiM1{9&a^G57E}GCfs&3duBdjTkVM@;#Sw zw5K0Z$sQ-@KveKm^!%<1XbHu)`@nd&w;6+cmOh)XqcIV{F+VAAWnM!3GHJuAp!w^D zz0PzyFhId83Mv9vsv3bm3W{I8SF#-XDi~R$elM2TI#Hw^9wXqL$iYP``n!wE!5Yev zTLqMn@FwnuTF5ESz5R4i{;jsdj)#IcAKBQeOH+^_*or~%^CnE`_^Vw+^2>I&5f|p} zU?9?!0-mJ2ym^qR+-C9#z8D+^O3YOK!kcA9Vl?StM=`krv&>K zCi%Mrb144NVUbqdREARWr1WMkcd*t&8qW?uQ68^O-!Fg+qE*uB(_(fHvx0>EvNm=O zJ$?{lky6hHFDFKA)3tv43CgX@3ApA>O%V`>T02bJQ=O-l7HFj62 zu!SPj4dimlZ(4c~gCFNMRE@fX3wS8K#uI{8!MqK`=(PO<@i8D^Sn?!bPw=02!qIE+ zE%EDtR?|?Y)To$|ffg>5oy(O;oSUsTtN1rnR2CU1Y4`kmj?(ppXT!@shz9wxTBOd( z(q_TJ@M|F6`oDagn0{blNzIGKu3>Dg>S>^q<1H2`2BBu3BA;>~Kw=gkP@SGIVG=yA z_Pd{|Dh5mo*(7(@4qMQkPfyC8RlX-hSRH23AVG^wr1M97@pQg-Ah%wmUG3j1gkSl* z%BoZ_upA1_tF%VY`_#CQ&K?wwaF(KLbZntKzTH#Xmz2|H zvb!Gy$eKEQZWY;oGtS1cEMVb-x2yXklBP??Zh@helRxRHC@W^L@Ivwj9l@sdU!SaM zV3nV}_B@){*nb$y18l0?vY$W;%B>5{Z^*Z><1m<6QNHYFISslNy^AE^R1!dFR8ZGw z1KA>&4&_>wCal^WNf5%`b!9x}9c}3KQ%VS3cJxj2zgHhlflu)2x{W`mn!EAnjfcRvM*6j=n2K{1 zfAColqv29}3cT6aPXT6p@n8~+H;YP#N!-DG%yaJIUBheJ^1Ypkh(`7hZ#iQ?_)BY9 zct+)KVDJd~l#0_~^ANEh%}Sv2^5JGY&ldC{kv>IcbaejsMq7-D&+OXgZBfgk*FyOy zb%}}Tm@3?9(Wg6|ra_?26bc1`x5PEh2e2(8SD$pOvX>wH z(Osmbg&L}D0rTD(SHJ=ytWf&EpJgvVx67qshSh@YPezCd`u9Js1S}tv?y&g0o$>9g zsB4^DWDd^wOx(%S=bj&VaWwpPT*qx0E_ZnR@PfC^%A*mx;mt9(H=|sOZF&k2rt?R_ znNI_+?G%ij#)uG;_TMd+s_lI%p4*iZ?B;F!26;DczxR+facjtS$lGFP!fa23kU6Ft z)BNBb>x5uPObwfaVbDfmE0=j1gF2^9;`>53)0v+ujLIH&QYwNu~g}P$~-k`c3zsmcW{I{aR{pvH4wlmq^uR2^r5Mb0KMy z5}&0gD0}Aib&s@lGqozo8?`vZ$dtL=-KQJ|K{NT)I;#0SU*DA#jTJ407egyZDNTOE zt||u5n@blXIeU%Vs&shA3X7&9DMLn(CEM1hPS;C_(waj{lp8nXD|FK)ZNc2)6(ULl z?>$Y(S6xs_hzMoN+W zU3rmI#-?Mk@x8p8!-o&R>@mCq&XFKYGeTur!l(q5CynaU;UJv8NMUps?>jAv}>b^&*_wQZF4_ z>ZHpHaeg|R>d;dkDJ#$5Erwv9IS)Ss&~FwH=^F2En32V!y;fK{hOOyjU%<^pIj2_- zc-vRK5xaTyO6icu&_>}C4WaUfovzCl?92__Ox)TyT|~7tiWcw+jr9&PrVh3=FoXBt z+}?wm>u3{aej2~wSsFhT`EBX}^4lHYp?}v#Tg@F9@-vaEq*}0StyS51XWBY{>A`2q z-+cBCLS&xoD9-R2s}Db$IA84|MN^-m43pMi%)yt z_#xij7Tjl@@gTR+-o~INJ9}_^;7ExRqpPJ2xb|lKY5jzvD2Kuycbjq}Qf(6a>)q>Z z1x(OwYyPeejh2P4y(@}V8?(cE#32&B=Hm-+)Web6FlR^j?PS?lkdH}AJTdt}@mUz>kJ+6Gr>|(}P70X`7fSo~mk$_zu!VII$8Se~?Vl~B?FRf7| zI*aX_3=+?x*-DeIc8@>s(@E?cqcqySNm`T^M^1{A%O;pFbQTNHF1p$jDdb+Ck+V)2 zOE4C@F~q<_zQB)2e72wZSlBRHKTe_*ajBdw?p%iKOfHk3SS8(9ZRtkmLlZ-H1p5%d z!~)$G`M`;@bHF|b!YV8wk+G8>pQCU_91kK&UFn1E2c~d`sNH3|a^T#Iax^#`p;DD; zV`;aic@fJZ3;g-J);=Z5w!7Rw_--Z?&TLQ-h!{~=7c~mN zs27f1#Wa78vne)6CL_Octy7qD zC~0#rrbbgl)hz91>*bm7MCX2aS4m58WO2EZiX>;1!ygxQ#b1~`y0BkmrWu(vCI>*m zY2p(_E>;CR!HwOJ$gj$@(o-??FwNQ-dZBL`d!;n>G^cg%Aa*|9{Jly??V(D|O`S9m z?fB|xIi$o8ldgFl=ay?JZg^g($l$tTf<+fod>&VMJO|qWeg9-)Z5TgFRfeAnpxDGj zas{t=GtlT3{HcWe7RTVgR*w2cIPz(_@beM4_9o+kcv6F!fusm z+Ro)ZiTFvuJ*J!Kt<*d3vM3l0&%pq;1Mj(l=Zixtd&`av=@|wMinrCVZhZr{Z_ekd zZGdk~D6QA(b^$`6z|80iqF#4l>D8DazG=s4lp#++c=TQ#@iro9KCbWKr*{owrjNkU z+~(U{!I7G4h=PV;KSTGO2Gi1n(*sQAX_JD4uh^f0Jc~HT!wF}eM-`mEROzO^5oadg zty@vn0<0rn4FgG8=MV5>X85hE|CAH{$MnTE2470vH5~ws{7cG+<#u_o%e9E&hP=-! z>%nWOD7{n>n;EnES#-4#WP_NG8xxA`u+-(fdqw{Rwf6eX8@7ZRO+Kwe`ku>pf*G%S zfH->zKiOME?oNSKv0QVJ_mI)xSrWt)W$K+2sV|r9s;*hu&i>pWC^7#a8>RAI;aXj) zjmH|3>CDNhcFDv^z8k8BLB=7AVX*n~Qd8zzkew|4w}pb*W9${J1qB{9+NG7|Xc3z# zQ$zNWI0m-EH@1b(#+R+Jcei8|8_FJUH1SkDaDe+PMmAwVk&tP=wBDzuoA>j(=WLSl zj&fFAFuJO+VS#6WJ|C#d?aT+qk5SlPcAiJXT1qskJom@#WFG&-Ry$~aE0?R1muWqdQ6%Y}ENew%lh)0(UAf5w0sWtjEb$d_2 zwp70%t@$F{nAKp}PIa~REl4G23}zATRt<85!w)N{O?r~y2E4rGo5-)(_G3S$7!5r0 zS&DFF*}AvfLNJQxEx*GK6(c>M!RfQRbF7$Re`7;157U*=^F4xHaBLe zUM{FI;WnoEB?(GW?SlWjaQzQFbl}VqL|TG>Np)q$r~lG2sz*c;{Jr`U?Qim3b{@r7 zx~HhRzB{})s~IK#E{~~t|6zBy;SkeYq=DPg*@fk|_GR@W-##~pbbka-b%6MHi$!i~ zqD5fBk@9^-+=iY>1qN#41W^q$!-^6IvE}i|LVHA#zrMZKdXf=@+hI$m(SSXq!>#}F zl6I6~*l?$-;J(2SDZa%0PBr>&t!mA-(5X>a3l}*?)ZBCSINFmA_+Vz%fq7c_yN02TSv768{)~0_j%$r|y ze9d7nUXU(~R(H0MY*Rif^SXd^0)i%;0)Mr%L?pZLZOeYLP=$Q;z(n4@hS~%C@6qxHT4hkUjX3t;F8brmH={q#7_0DX@ z&4gKBi!ahIK`>Ni@78^IVXs*I`C;uQ|L){x#nIsL5x4CA%15d4(S-+k0$l9MOij{+ za}BbSn^Gy}SFsOMN6f~r7aQ0O5-oV~KEQ~xz87Nl9-_MR>h)rAgHDNDt7SpazPj9b zeM3`mz{^Q@4Vc@k*z*{o=8y6iRNwp5BxX>s(<*WNvv2=u1(V9ctlkn1E6Hr zl2wM8z3~jEd`zuS3ilDlVHDee`+P(|u*gDmaxbo78Y>&uV=>LGeUp5_?&C^3DSxV= z@DDFMW10i*RWiOU)f;)^Q|?4yUCzB%pr9jxc=NJShhxAJe*GY1XU@$cjbM$4Kf8e( z3eC6O<0QN4nZp2r=VCr^1)FL{lqv-=+#taVF*1pWTn>-O8SAkMwfv?`$|nWF*OxzS z*<>PY*^H1$DrD$gKYmM%@O2PLZ7PfSuRHQY9I(XlMQj;gdS02nU+?m1KhL^f?#b}Y z*`}ATW{7PW$Vg7nbz~>16G)v+hUi?YRDNCuomcm%T3HuiY4}mbO^K+{<`RX!*$e$Y%d$ zhBl{3wbA6R4(FTMP6Ip1KDy~S&uGNj;4bn}C^#URki^do0efKAZUDpyJ zDk6~LNAZbUq=!0*V{(0wme0K+EDQxK*yB6(I>6^sMcg^v%8Y`FV3Wi9-kiwJ;&Ldl z#Gn-su)iqb0VpO`&P1J$-&>oi@2y|pBU{D&j8CQkM)Qg|h z|D;=7_9K6PaVmr)2*7^sH%+=b& zz(F=mspj2q@p195XK(qRofEP)wHJ>nsaEH7z2$cCbu{_xIH8Z|`npQmAa3Wbt7ynK zz2l$j`GelGZ1&Ul(-jAWoc!Tsm&MEa*PobTjS7wRB2!lGPP4l>aLyW6eJF0!dQr)8 zqex$meM8NH`GLqvr*KhYWCy!m)yw!BhBWI8*KG8MHr*~Ur5Zf?z}I&OX0`K#+ctNr zSc-0p6SxLb-b*GGY)Xh2T^x!tyZ}k3J=o-@FX)n#{HbcAC*aR3h}ABWd7q1H%}>H# zUCjsX*G7rRwdvOp%BSNF0=k z3kEZ+cDhalLW|%lPua#}cfb6(8Zd*v@t>jrV1)|dc0b44vC1Hf`s1QY>UMh%W}gOj zMs*6$9vG4sK$zh*wWMcS0V$}aQ&CMwfvm)PG7~scmlftMFEei`K zSHlFXje=^nN~-tVqOW^ddEBMmcWvMW?bzp18v|&r-B3s*KB*Q`E`AJgCxpGn9OrNuE0Pt}4LAf2yLO z=fVI}1&IHo(KmR60EZ|(h6;^;6sNo%A=6z_>iNgH5rHa9&O52-=0^lngyQV*<`C|!a9F1Lb?M3yIjYRbdxO-xbvf4}^Nd7iKpKJ= zt(JS!AC0Zh9X6=h%};!1=c$rd9Sv0X1A(4_hn8@!#OTbWTGUU8)@ zd4tB1j*~A^Qr=DwKGFYt5tM`~z6TvF?cc?zQOozn9>J73I~^5!`{bC-5NpieUNWwv^07K%^V z8FyHX6j>MV4dITN9fUdn7ZxfE&8l}IXT0w;@#-_Y4Q19RDG&{S)EH$3UwPr_FSV7D z8~H@rqMNUzL?tV;=o=1yH#%Ma!9Ff5 zk%9mTK99*-@{+M~hrhMcIW*y%ixWkm^+XSO@9jfhC8Si^P{_VxX=1v{VPVxkCofkC zCMbV@j|e2>5v!IBrkp((p)T~C(E$*o|`#$sWV zuZmn)7%x+!kG~Wi`EYdXao^C)lz?!7P4wpa$&QMBJOu)oOINA*TC#I}p~t49?vsk1 zvaya#lb33wiI5ys=G<%(a$(9#=EsqCAN|o#HC?F)t+b}Bz;y)B4Y}f%yg-J1L3IWe6wd;A-nK3AuzrQ#(KJ9@JI9wSJ1cb~-W?Le8&pO) zz3-;&jxK=`463TWoV`(USc1goQZrTYo4}4s)b~=4V-!JdNf&2(y1>irW$Bi!D^wv%?-WR`@ z`zlodkhsosFhqG|HIB&{Q*>!XXKb6~@Uz@->qOtH24oGk(53&)*Z?=&gEQ;GZ@N75 z=;G}pCgi|!oqU%>W?wZtfmc#AZSwOwvJh(UzY6*Em^TNi!Wdz|*ZG+jP`Z-58?tNeDy(B>GGte(gm zipA^koGRMX@Unul#p}YPpPm7{zF%W|3fe!oVS|4?4Qis9=UuQ`xE8RAV6fjF_cvBE zgPu~z?UHW#W=F`&@6oAgpXT(4$F_QGhc_z{^PYvDn0{#+%AK*k?91Jo1fjPvI?z7%~? z@9{`^=oW=9qLb4JLxxgFDZBPRBDU4W?AR5*8GSrx3jOq$QN%3HkSjajt%ZCToN z1d?NM)xfjuK1<00ucfdOTuk9(%U5OvDg^s&W(6LqtkoNn?`*;zqWT4!&NHmaT2e5^ zFyH=eX8EZIpk%+!@1{5uxO{S}hSEMF^=RK?GFOI%V=t;o&A+D>R%+5cQKfam<|V|o z-Faz$@77!dK9h5l8OhRaX)hAj!|t4_H=}1+T^x5W+i&`j(p4c?M?+UNC8(Y*kmtd5 zn0_W4wQ8;Ev0BAk+AWi7q=j-CLH5a^ugpGD3XcI;QaN0iLyAF7|I#IGivUV!5I>&s z0ac8=?XQ@zpMuucE}k4MAXlnTq)HS9G3Z>|$&n>xCw-(!)?1M3<;o&d-t6}%u_{Mysne=NZmI{R0^!~-42mM*_9-K`_4laVVVm|i&$kPgt02qUI_Dr zgpAWs{I0SSmHYKG0KZ4c^p~1-VH>t%vZDvSu?V4w)c%(rt?Jw_9eC0YZDbs>Erp1{ z4IDhW?)!-ID^m38QF#|$rOG|kkTG5y)%w9Zic@U7luxiML9D><-=^Kev-J-IwC}uN zR>OXGx>14Zl1e|S#5ntHCjBQv*ZJjzxbs_uqBh*tDU2921)CtnpntO|HJu6^@2csGomdgdd=@AlHZFwvCM6+mrT^2 zW4QqR8K56?;{XK#7T*)u|8v>Xmw8$dRp>f}OIfG3KMUxcJXg%e#L83Pdt$rv((goJ)T-KG{rSb;{Cia{ zYjh^!K@RPjd=s-HgoMUX--~a&pa;mf?;S)))5j|thw}8NL(Bpt&iTVwNZ(F5oia<+ z@;}y>)JEGc4J58F4|1UW%=>@5=KnUHz2V2U&iZU6Gv;5(H&GG*wl>u`x}VU;4=*P4 z4W$`>Z%5$#gqb;I_*a_p@*In!pIZ+=0z!~JA%xuE+7kPJ{pYvhQ=yfMVv)LOUjqYz zzAE%gnnuqalh2>0bGrD7JreacqMx0UhJA{#QDa7C4tpG?qK!e7s**jrfXESdyn|j2UFb&M{u*$CU~k$#Xg%*w15gfc0laTa9n7_?fct5NXPx-E8I@ zjg_yCvM>Q0PeYqsy=7w=+DYBT{P}8fF$!suN?mw&oxyOhb_57R4ZmDER{h_ILHD;Z zRDBAND=;YwdwtiZ*`i&aTuQXKSsdMI-InWMy|)3ss`LZ42xB5@zi! zFDmA6H94SD=MCF?<}XF+gtMpu*4z{ewG>#+T6?@f45J17SRsDHFi6;X1R_TzzJugCDJHemJLl0e1*p1~4 zB7w9xNeDCZ#7{77q1*1`2bXGCYKhj<^+0b1)z#sghJQiwudnjE)u}pTlV~knZ_3KhK_@Cblq^Rh$F$bC4_1W z)bF44JKxvV!`188$wgzSqFTWEd{7JX*bXuAH`U z?8-%-eo{rwaKd9 zU|)rtTn$<$zi~P5w!{oMYjQ*e6qe1b^+Nl{qBO=_q`FJiO;4&&RfVJaZEubSD!3N( zBfN=SD7nM8V?WXSc|w1`3$R~(PJ9 z;adwKiKywLsR>eW)3pJN#s3q;d^$i7Z3Dja$jEcmHqNm-^>-vtUkzHa4$>~uS=Fq zbJcp_-r9Eelc&@bXDT*+LgV2Qh<=S~H4a_KvD-Dbhz0U*Tj9e+&W7`37$^T25HYq* zZXBa^jBsF+gudZ9vGA7XcHBN9Eszbm;J$}_q zkCKL(P5rLG+K~zN$GD)syYh&o3q+jb&d1HNxC|LNW-jQFqTGL@fJ+Bv0*w(7$Ct6U z9d+&+wCPD19pW=@o@8=7IF*OF=2E9IPRH7N>(f`Dwy_-Kr>J6C*WY_Ry{0Ei@4`p! z_^!MRGc9(79_D1PViJ7%L)2Xhxq>g`?IkV7PT$Q^LwDwRzpH}7474?r4MZtHx~|c} zZL9-(ba0aYnc8f8qoXcW3M96v24)}|g%_@v&-|rowEd@Rc4ROj?{uWERAi^1J3B%`Spz#$-DR|%7gzT zX+#cBM6^kIB{3;KVzbxpe|`Cku^*bB*R%DLZX9zwTvP`pjb}hNg)p}5 zcuEp6G54>$I?;pgzgoo6eJuk2Cc3xsVf;xsc$pQEv;Psm861m+%r?;H7U-38qVrGZ zirEPrxc0#T1hSkEecNpY94>uFy{Wz-ZJEq0DPmc{+0H%u%JKf~sEXS1NQtG5yrGh-4H-`F~{L38|i~(pi zmFG4WLj;bWfT>$)D1jCa*8ERfp?vw{oeTSjQq6A= zR`ah7Hl|qmmsiC(8h*4I5v2zU>Qq~M4Ur=Ar&HT36!MF@ z$)0^6_ecoOdr$Y&2zxwN%A|Qe7p~{;?D~)c1_llUIDs&JUv>Ze1$lF>d(Vf(YT*9h zqtPDLtiAbRXKAkSo1X;E`o9T|V+dqpJ!a8KsY=n{U5y}TbSHH-s{|~wnWvfUN+0cw z?v`|Z{VZCH=bpyp7E4aER+G$21X-%=b4*XJYo}r^>ys<=Eq-Q8m46ccS&qLKy^V#< z^23NR!J8yOt8wg=W558nIBPK92~3T30N|E8?n{4gRboPsJS=)bZDk^fDkADP5)vaQ z+eRHBntUDn`LVB$aLqgk2=REf=I_sMq#I(Zr~7@z7GzhvJFhaly$N}Ct-^VExZ**- z-8H=?FR|b8{opWm&cvtjY^MA-kt97xB){^_;qNP`ruoUiKRW&^g`TZRa~a$%N#&7# zEO}jzVKhylmCtUDDhd#%e&*-Cy(^nk^L|+Zk22E-X2wDJ zdx#x!Q1z`7f~QY2GxYGRfIwaWtpy$e-<0|b4BG$mdX0-dwS%d~%1)G)+jvGc&%-c7 zH11sW4tWI z=@cKU{M-Gc|Ihp|(oAz%FWOasDiZP^*i3U#TC}vh7+|tBCGL3vF@sx&E%iLn)Uz_rmC<-ViPaO}vuUG9zbdidtjOpZQ}Q`OgCsPWGMx03 z9<6pjPHOR7gTJ>NKSnakNv;=u+3f<}65K4vFXp*o-1GQ;-EJj7VD#yH=+%GFg{f|~ zgFy+_ttI8oqj(|ALiyiL=e$Oj;yp1w@t4Q8ufM1%FDbF92{ygc#Gl}ZJ@fqa(~8%E z&tj|MMGhm=K{5?Qt+=~^b%$sQ=}(pGhVQ2TfOF~2y$ZH8G5XZ^3~ovJ>`_#gtkb{Q z&r;`qz4j;lf$?Q;(4tEe6b+pL!2Odm>r`YF)N2MV=UtW7*iQek%*ZsI2eKMa-d)T7 z75xC)0=dNuo~3=eKx}1}P~O7q?UYJG9__`}P@kTVr;$;06)U|SCuhQ*7O8pDI^iD$ z=!)tX+U@1-N4X|l0`gfFGTcTZI-g9k&m}4F!Ov2H652!?T?lkTDP$Awb`Rn|dGC;b z)|XC^TVe4p9K{GuaeqbzAg?V4OnbfGwec@^wV6Y`5OQ{Ls96jRHeDqFsY~)Yp{vT> z|GD!91q{Ibc|5gdA^mhdBEEFLlY#1m@rI`ajFw)0o1I2YDmwMr_>RtbbffiD?c1XV zLeHLa8c1Z$I+mQUrFNWWR7kZ)j!2ch8smv zf)>Mg{`t8c?y1pj1Aab}9pT_PMo`o&IA3Qe#%4hPhs;|zj26Id16a_GjA^=za-Fs# zdkRRJuMV|-NABSsakEk^ch5!}oRH^*g=dR{o+GrMR8T7F_m}_5MS&*(um=Zt7{-*If+_Yp9sJ>;EoT7n`QHV!{L+J5 zwYNAG>NYHFxq0vDp^896TCIUq9l|uK{eH0KH*N;DXUd!K8=IByB!xyQmn=|yxxM#* zU(QY1_wK{DR0hk4?@ikbY$vMdndw?+r^{$mD+ean@aA?;W(U)x=yE)jd5~mw*ScA* z2jS#ikY)4{dG#@%=O7gen6;3K5G<`8+Pb1<{XSKzTrg0<08kkUO3-Nfse=oH5SiV!vW0ySf`jU{vOIo5XTzHb>^SduxMDtWgJM2VqKhZZnTSM6D+L*|wLz9Rh_+bdyl}c46&YVtV&W z_oF2ycL*>_dZX@&ucYwn-ICSIcXmO~E6`(xs2jc4m&-m^SWqkhjXEp+UX>S>Ue*X9 z1TK`nw_xIyKwf7xKz^0GKn?^<_g{@=`+a7MyHpo_NartaUmQJX5jzk-Bf}+}KAn6j zDcS2gZxWiwsk#l0i^{j=td(il^XLOVy>?9@nrcV;UnvEEY=4dWRG$UX6>Q1^p)aqL zh3E4OEE?duhYm6-!J@vt*^D#B)J>0vCzVOq87PdKtbh&L6SsLmr6VXSB@0OSujLcH zL2Tv+0RwHX(2k6%De6ngR!G#1z868e#n^0_8_KVbnujqP@>BZ1xj{3j>P-@+#Ixf& z$!`~pyY}RJ>g(dFROY}e2hu&mGb*2<;R8F~M~jrLN49Q)}Oi_q#n25#L9CEKCi zT_Xi48!a33lM0?jAqCsb4JIg4{|G~RK%rEX)m(HYgSzeJIOG?ojY?ebsl60yvw)wO zUsKv$^_FH(-k67f>9n=x@HXFhuamVvk4NhNNi_gKce%)O;#Yzq!j`gGYx~$K>@Lj) zOju_nBm9%NHwkD;;&~8y23>6#iJqmSOjp!j__;^>*ajT4zmZAsCBUNN9d|f2j_OxG zk>gLCgT)*m;(_g#6Z(T0%}awBGOnIDzMI(q)mkm@e>!e5s*xS-Ou!8D%;8&_hK*{S zmM|5c$~ww=iWRAnRPVX9IXjFUt_* z3Rd3|bk%%PFf7c`wRVot@8cu@81e!o#59iiCtlG>WxwjXTQwFkU9hp7!oe72EI1+@ zim45&p#{X~gLc2<==OVStfgu&a!zfBT5ZyW~r%T{Lm#&FC`X;?LUSlaH{q33Z zxQ>nWz_82mrcKzNT1)kU2X}d6n}|gOv#3+VoT{;eQ$yEla}&QTKttn-=&smEbq zW(}V>a%Z!dNZ>h|x3pP&-I;o2E2XA7MBmt;?yU(1_t3Yq4tQ&cgE}~ke?VyVY`_;~ zk9(2rxYdDMjqwEMxb+I1=>h);D8{n-PliKu<)om|+jr{%fdcj~ALp<3uniu*dD{RU zbEK9QpUzHs?1v$!rnXjT67mppRf`c+Mp$b8>q!8w(r zEnADPf1fcMC1_KeWUS>VNc11uQM{z}PFkutZ)(6Hl?OJR|G#vd*g7CK=brF%15#ko zJ309;v^T9logYj^x8>gld$+k-@W8oP3uZVp<4fG(Y0O8xch{ccX;MztiZwslk3T<^ zAq3pLbp!M9xDH@%5o1-2Xs@%qI91O1gh*WrSg(s*jO1uZpGBzJH9gMtfp`~J znr}BM2#|r}#

dVO2NgP}stK>^zjD(l;c{;llFBzh22 z%^7VgB z*@5&vX2E8cODhs_3u+p@-lH8ElW~bW-bp9IR5RS^CG$fGei?Q$^EgjS`abspqfFE> zw*EKQ+%ojCq&QSy+1f12RJjq6T?InGR@9vZsu%l^{tXAj-ktA5ZvGD43n{O!I0(?0aW@G(X zn_KgSM$F|YyzO;Lq#V+W$zlljrgWaS0{XVD*^lo71Jl2V;QL@4<$%k~jr`mlhjq7X zgAI?wQSX(Ia~|=!wI%&>8`TzC<{|6SFD;=&}L%M-`v9y3{DI_6Uzc zu<~pT%|y?vY>c4uvk~ljYM@_`0r-rA7&ZZkqvS5TA4%J>k>aOzUi5Nbq*f-g3D&no z?l`nk`zX>fNV7+qDwRgt8udo=s-^lQlSS^1zk3KZ(Ein$g^K(lx15K>n<>oPi!rO2 zy$MLvi?!Kc(DNcU3jOTyJ!lr4>QMR@r5B@OD7L2}qcs`hm<@_SN^gNsgj+4{5QTwL z0Jr20lqgUF-KANE36rmsh2Isg-Q4g#@-afx3)=|A)R2bA^jCF> z{8>r>p7Ym;<9-t(7KX6VD{9ow2V!*d94zkO;2mrM_fGGCJ)yCrsYSJl#rIQ-2M?GD z=NT_4=FGIbo3$UOVX(MAc}_dlH$AtwRD4Qv;+z~+c19noyb#ny=6HGWiROh=4z-sr zv^f+zoRzsHI28Kq8Lk%>izT*PXGhOT#pu7(yoG^Oy`wEdTsMFDo)qM@ri|Ga9(GBd z4+@{qI(-_&?=X{=1ZjZ!Z$b*~hwH6cUgs7K>J5zN+2+ z42(Sk2Hihp)xZVZR|qSyzW6{`?W)VN?6blj%cw09kaPCqxE=>By(i9146uH$Kbe;U z^n2z-pP44QA(?tQKS>>YIUkIEjR`sRs2(}{*RDYmZx za5?}wB8lsw+r|(13Y18Ofh+RDNlCU}fQnmArRbRpGSG~dQ+iF%pWmnx?TeBN)&+_e zzdJbEfuGn4IR8Ox?L&lVg6-p^Q1aeKZ{0x84Yml}yKJ}_vK4e=v@?3xbg%^Td5*KE z8YtWxLcPQPxdDt>k81WjHimnTr7OAj9ah8>sK#q;Dn2eDp=vDwSXrf&@@Y3us7Uz0 z8VyISv-RhiUF?1QM(Bt2SHeu)?7tLz1l+5)eKDU&$H~bq%$%93*CjPuf#;6hRiUtF z&6XsH+txXd!spBSNhV3r#c}_r(!Y4e_f}g@N}(ogqmyGwCwjFoq96Az+)RKunp^0V z5*2WnehvmVk`$g>&bMsAJ$m=7`4n1mDcGc_=%V+mEe z&B9DMoTp=2L8%=m5sLaJJVl~MoBdW_@!n)}Dc{r+jyw$0b)Y7M1-i_|(ZC#q92XDT zWpqACJrM%bCFUJifQy$nL>)5qlKvjh~A0O;Jteg-f5n{{M z?*^`ezI3lH8m~n}<>DQMdubBP9r6xsYd-Vy>fcsB zO=`q>S^1hN|ltT z<$dcX8HMYPygo7qw2&3$;tZTVquPPU})$hDVY?O(_gna_) zN(cUUd}j+7EpF}bgx)9H>dnKmL|ol7{OZxRCkskpi1P#`!0uL!aEV>A9d-hu0{@AG zkT-{vq7u;bpX_6i9Pp-a?~T3nZl~e7HCq*7zbmvH^Fkq4gbH>+ck~iH)@%5sQ9Ico zPrL6q|1Kc`5BRHVZm|{UM`B^t8?A9iipA9KEl|M^Q?TU(mq4%~cw|$3VZnwyR=doF z3GQLdtca0|CA^vM?(fg%_AYA zmU9P|Y2O7j@^Pl>K_C4vuY6C65szdqMyn>57=BWZ9=W$OdCX0Zd;S}TuYA5Hn#`4- zqfYj!h9Uz?bl&Z#_aBZCmiTyk?5>sMBke}9_(1s`pl2^!KX#6I@}=}Uyw_y@-tuG3{+Y9}i} z>3eNwl)C-M^t?4|fpfZtF4D4RLib8M#9+(60jg{a3|}XC*}tqp&^k>=eBx_Vy*$>N zbBx0;{F2Jg@vT@+-f*x%Sj+$602Z_5hv#?J<`&`Yd zdNlu3&=&iBAh~%GAl*@U>R*BIir+@?aERDeowt>{Ia!Zvf-ZC3V>S|eBgTdA4{4kIc6GmQvBJL zJi@@%r80h0G=$xAz&@31zvkS**?_@;JpWJw4;4#Ya1rGB8GKet70 z4I|s!&Z^yB*lm;jJ_-ii8O(JS%AL1Hdo5^oLpzKpjx=zln*`SV4jG>ZZ=SFo5iN?W zty1N>Pcu#21Vx_7yTHv1Sr`;ZkuuDx_p6vdBrkufNf&@? zA?j1I(}XZ@iiLRqA1} z2~I%0GgNb)xi^4DfCQ%Ru4L}fMbk^JUX!GsNlCD%S_LJ2cm+|n5e40(ZyZYnrlsN} zB{MVj%z(+R5!Z$1(4_#Vx#(uu?BR|B^_4fi@j$FqG`4+id3N4C_CN>*P=>$Sara2& z6I%oXZK5*T8-Pb>JX^i;LUxYaeCEo_`F29G{v?oRg^W1I%t0Ook%DOtOKU*$$!7i; zPo^hsb9jJ zbT^$Rta0pl4RNaW&RD{3P>KiHEZK2Yqo-D{$3@Mxn_0 zDV1?i@wv{EbNATo)vfbC+`jp|>rIW?hEJbEeWX4rAHL^lf3^6qe3fi^scHnW`I5dY z%h7Ib)t-hvny*(|e>CnqvkHv)OJ-g)yM}yajjqs!duLj#&$7OyN~n6lw>xU;=A!olP5B8g5+pTwLRd)J`+-lyvqSyn0JDC#EL3YYEc-m5a9$ zKi_QEc?cLl)r%5PR}TVAIRsbmwB7Z-)0M0f3HkPN-MX`2gJKhvkUw;G|yjx!b@fsXpQT=9SvBo|+3bZzkvM4c%Cng<_FHL-_ zbT*lNd(9AZ{M5rKHXJM)uR*xux3W!`fZ{^-25u_)PMc7Dp!SsyJao|6WziKy5A27H zxx(@BE|>ayEZpj5@@ASECz%al7P;KYJc{8KocV2{+(@Pj@dvr?NKI|IK{0k)y<)2) zunab4?i9LNf(%=RE=rNa+aI|ye*x||MYRFw;~o|iB~ZD3iqohj++fp1f3`8T9&bJ{ zXaG`Hw>z6HUS#cZ1KF6?&^PpotFWN6xwehu*HLby^L%)~HB`e0V>@f5gTv&dC^3Rn zN#(TCKpLUFWVcZldVa}c=xDY9R_#mPc-D`QcgaDJ;;a57oSDu_XR_hh4 z7>lROGUQ2^`b5uFtkJ*ij5>APmETA?PBu<9Bvzk$qrHHW!cl!gr(Oz zt)uE|WkP%`F?ZtqRBzu>Rfw8jM~b=a*USLPY>Rtw09rcFuwHHUE}UGR_rvNrDSTRr zy@i@`CTjDv6I0i__hc|GjHIP>@72OM=a1U7FElw>^CA->^m8`l@^VwuxfSx6S^FQL z=Y3csSF66%KK(Eh_)>7`12A4ZN4GzNp)bs}GyBPR?h1(e=icA(zP+wS$kW>M4YK%P zrZ`YmGdtZU)i?C*D2ZH7xt$-szzv0BWEYlzs`nv8=NecgU(-lDcMBnWQ1yG`TSi&p zc-PZ#_hAL)NReUXaP=LjU1$Q?4m)l%*Jy8;To4Tg$%fTc#I8AxvBl{a1)=nuKqL08wWiI)-BM{qai{5)1F;2CX^CP5Ao~n!| zKz87sfPxd@0X)F)4luh9Oi?`8e$Zq%erYF!`4S`IHlMoQ58Z(dg7A6`?psIj_Y_aA=^k*UrIQJvfXAX-Dn8+5rpCUWGN_g=AgF(&tkiRU-G;EjSh ze!}1nAJTzXep1wrET2ir!o*(E!7(3F)N{|hik&pkn_9pm>HmnJ2mWNKhF>w*X~FfT z7<O|uwf0EP(OeqOZwH~dtXD>V(_nBNTR>; z@7i+$d6QK)b5Uyy$1+tmtK)KoW1BOfaUn04S=9-cfgyAc%4gZg*gVhD8`W4H5o}O} zJg@nh6~sJcuB2}QzFTBRmF5IOLj52|eLej*s5f1E~r5f|r zu&kAwjm{=c_BWE&)RbbBz`Y+uHr^ihHUzvuci>xB?tqtGgnEo}4U@j%KQVg7_{U?z zyh}c5gh`LOXeu&_&Dy$i22~q!_%pMdQt^n3+hl6mtszM*e%f=s z{;IhgA>AoCeTC+6#rDK8@y^q!w^ZF^P~J(Zs@2DG9pgKwOPL>cr)1;mxC<`kp|yaC z-4S^w11M;=zko1Z(#mAtUQj(Q2XAg3h8B^c0l}D3(%as)t9uyc!^BDr@>7yu{+AUqIW>9>xT{1p1Q^6s46|o7 z$>mUuj;$(`e=R$8uP@@|BzEW8$yIla9Ueb-Kl4=DEUn|XIDKtwD%M01<+>B7R53r9 zaXN?cSb6aOy2$h#?}_%f(v}gIsU6)R;q;0#2T~??=H@avEy@}o47Q_O05~Po>klbq zVX97HZzIDT*xN>vF#ut4>ENdHYTfM*+N;$2qU30x#`w=z-Sf$q)r*(nC7V4)!+Gb1 zNzt1MO{n^IrJ~Y-v$%>El13Cu!tV1B*Tp1)^;T=;9ONz0X}EXm0x+8%Nv=3^l7+9w#trS$} zuz@yNM!RPW){$ySdHb(hz|ZR1`wc#RGq-nLedjfG@BaFo{jc;ostftAZ)d@Q5{L?T zs(%Nd-tCDPUAblh=np5{3v%&{VGV-M63cy-b%=2A*-i}`@dgpUHP2PN_bBP<(%nkM z!WSZKh2poT?|JOJocb>r;oon6(jq&94jz3P1Fjq_5p@o*7B+_k1qBbzN9Oh&iw>5j z+QZyEE*neGUGtn)Tal1DNw!D5qcVO?6Z}|Cd`$(J9Ycn}e)33Fm>MMYn!%STVsw_& z!pbtY@2QSgwKs{6t1r0zJn5%YS2xhLg5*S*@yKb5QMXRZSfq-|oO zu||&#?MhKpNr{Dsjigvw5OB$!{Vt>RzD7>Fu7DG$bZW@0*w$D~ur=VAUQQrZYCN>; z+P>Hlw{eKHQxxt|Ea|xoZ9=bkuiu3Rlju$B=ck8KW+(0T7;JrRF3BWg0bjDkEH6_z zu?oMh%Ajl(5#yR`5`Sg~u3x|qRmcM*TnY5W)9H5571Qs|G4zQi;y%=?NPp0C3`4N# z$tz1?1%Be@Rd15ZG&m{$Er7AMN05THC7x4?Z5d!pzvgund zW`H@1hUD42IcGC6l%Fa`k6UuFY_xlwzdG<0)1_5=?-~2FO^m7OP^!!TT1g3(A5y+y z)@qH@pvtb;-lo-J>yy(;K6Z3c8&d&d=avJjUpRgzi`Hu9Oo;@E>_@pCZCFlHN4#UN ziKT^c>ZLAMnBvg4yUSVb`6Cp4d0T&|C%}9e%e*`41J7m{ZSa8a=y)Qv-do+MgQAbf ztmcY0m%bHReRx6rqqJXjP%Q2CLcvC&xNPvokBtKzG;dB%a${h%@hq+w4Nt4^A!F3f zw_&3^w&_D+mVQZgq*TU{oAXNJm|~*GvQN4hMH_mBFq*p|QGA}#!fkjeugDkuex6tH z{NjSJUW{cmCFPM&$t|0IKL2A;rj$J|JavV{9@g|hA_soZXqaKHBzU=^R%p0xU zG!pS{=(OiO#)YG;EyK?oFEm&L#A()V`@^7`@?r5uAW~57=}Of}mSv!T;?(;?n-Czk zTWLLqeutX_TGXhQ)UJqYG2BHb*DD&WmPejuxNQ>P4J~+?`S$N>CUMB7D<|F&3H?^0 zm)}|vU)p=0QN(D1obSrknE}tOryf6cC@rQ9UQA1;Q*+seoTKz3mNMmHpzLR|&U~i? z#d>@#7r-O;;{3^|2UPZ^pp05lZ?ou@g?b3;!jJMh|3RZJq9=_{@E=$f+i-e4M)Shn7-t`p&9o!&-Rn2B0b3n+xclw3wQD z@P(rT*>Q)Yw(ndP?z{=MOV+X?Q9iaMjm9KZ!Msabf08LJ!swOmRD&S*m1vn+s?3tF z9)7xO`!e`p_CdF2+c?Gx5d-czXjIXA6KzBrV^neGV5*38XYu86+~;zKR&tz~KG7}8 zl-kI5u>*t-FjJGqrpG~hhD!(KsqJj&Vq1s{*EFD^=dqy+DMiaqa#p^i^I-r+%HEVw z_Dzr@YCpD>?cC5Ci6h@oqp3*G5HCL^I|9`s@H?+Z-{qcH+cR->qZkGlG9& zWsSy0U3-f{2=J6nzz+rm;bz`0b4626|55Tx8uVi+A;-!{OLOElg~mDnY1CCIce2fn zfUWY?%g@EnjP1#TMjQ~6dAD)0gvASRIYP>hWnmr!x#sb0^$wSLjLVm^Gj1E4daIL6 z0vogCWNFfW7cuVS0kH5$aL(d$^j8rj*lQ(l0Y+&6@xcSyqW7`B?_}lL6BpFxTV|d? z&4%YG?ElEGu&yXlLr&n;-NkLu?D z+7-DzzaI4b7iyJa~zPD20TB!t4kQi4`szJXo_VOLc@b53< zfJ^wI5p;|tdq}e|>{pLE##Zu8)&km%X1BUpMh`GPd2+iNwJ(4!r|cx--i7s}j#_G_ zeiA#r-Br7`JQ5LWGtuH4Ld|mm9p$u9CoO!!bQ}_;zOoy$QA+E*%!F@Y4%nhzS!jTc z&%;4?SR6)NvhHrcfXgGcKp?4|celPN8_hzr@Y`$RF~iW(U}HgJ6SHI}UN5Am^93hh zBjSUrjei&g)v+w$>0)_>4aM=LrKoL8W!d*O!`hv1@oSgj_@TAz2`pJr@kCzJsiM)= z=}yrsq_a}?pg>$@d8Wa09H<+V7oI{tqX#2v2}tjJnt% zQ2i7~ZMG_&Nd{6A)dsYDMLUN(zIQp>33F?UOUpY_?AMharhinlBVlS=ThePzWK1S< z_ywVHyvH;UL-sx%4N~9kxU~2T5XQY*ynZZhd=c~fh8F0L$9-iP%ro$K5eG%j^@*n) zwYt(8mnK{uh@g3Y%uP90$873B+5%aE6<12EJ`GY;-!d~$zqx3Bq9QzQOGc|Y+oH;y zcXp!g1E4Ydye@EdWr@+pd$TVH7aI~Ze{oT1`Fk|uvCzoxpD;PVbn!&jUV&rgaJGd$ zd!HnR>OXGb%I)2LpijTl=XI^Fo_OJssmT-Vzp4L?*ce*j&-#B~EOi4vvnE?fsK9MZU~%r=UJW7rKa}kBfXdA}md**Gts!>o z%!UL-kE~m6>MeJsb_;Ic$S7&4;4>#eTq$-NO1VpBMqLYso*je3#`oSa;S-GUl&_ag zW)!JWRCg?w6Lqx!DnJ%g+E~~v$r=`e`YpIOB}L$B;kc01YwjF7r9g+SpGur%^^?jk zfHPm3=ZT)e30Z)3aN$qL7$LaTzas~+4`zKq%rT{DsD-B)STN;9;`v7^9z46BNV&!;4zjI) ze4+@f>#nIB-o*(mIASzvcU=IjinZgI3&7fWtAW8T!Xku_auv$n_xXMM-&pW_)8F}@ z(qQJ+g+6AVTwCzki>T$;8Y(5<>k{*=w)(y6kSBmhoD$R4JriGJ8=|_tQ44yW?v0q7 z(`m51C73CCETNRPItrfsTAdx2JUP^TUsua@YwD@^c4Dxp;CLZ0{ov|4L<%Nh9OiM2 zZfoMjUI8X5Jcq=-Vy+o@g$+Hcmtiq)3hCYx%9zjnl;bmO8cOIUT|3ry0N#JC2@l1MI7%$apo+S>Dsga?*B< ze2;wE;J=F9cZidwV&ZeC^q1@1>J*;I{*PH!&pri3OINMX2%^4dw^CSqf`4iVzk7d4 zB}A{;)N5o@)T55lz_50E5S;xyp&d>}p+>$~{*EY79Bw^=F*(lOEWSy}tZUT=%%@YU z--M*kck3L+HI!~wa9vfQI2>gbg1#oV<(v8HdwyK^3t!Q@e!)&o_^w)^3 zKmF^H0lCuJPXf;dqx|sz5e0_sE3)_6_Ll1PZ=R@2b^&|(G>*yeI=;Q=Q%Qhqegao_ zyk?o6yUjYFmXJq=btT(GsIA%F0)EM{rmM|c<|EM6(9u~f50(Tj$6dODp8tP*y?G$i zZTmJpB`GahhzRXM<#v~yQAtXctW#M=R1%Wx#+XVdLWPJJLblA1eHoL4D8{}HW8cTv zW`7Eq$di=s|@LbaKIAzKaBc{{LlF*0uiH zoX9|7umQ75-mUdWVe)1$U#1bKEJX{$R&w6M%y;9Oe+4dhM=`3c3PcwMbXjkIgGV3J zl95ACb|9_DC@nI@mArTprwb-M34#p&Ic2?zk4_X6zLkhSW7mKiSG6e&bA%uok^iNc z=Eb2o5ezGGAX=;M4@vk29j8ULk9Th$OBNb*pd*)|&1>?l<0#wj@uR;!2M&)|7r=9( zF4erbP$AkSuz9Uxvden@Hb$-rc673*W$%?28dUc@dUjD0`94~bRJdG9Y|keIT7nPa zA-kM=yGLuCGI4ibC<#xr^{d-P{@~nFTJ0|R@=IOrwyW}?7{$B}IpAmC6TBJ!@o>uJ z=&F-BurR-Z9ky{Y)k_-NL_lVZ5n^!z#6}tf$X1WzF$JRB=8vl!&JR?Kdkms?S8Ud4 zOJG7lmHHOgXgC^GUN+vKEka3s6>ZsC8<|H8W4&VYfT$|WRYRD-*W z&$G@g+@z(s`8-kJ*AJb{9NwK9)m|`Nn5znRn)=E%4_=U+HHL0^{KkiLMd|h)*~!Yk zW2&-@w+|xFM*7kvm1N~YLck^PBk!Wu3_eYk400=+IL!oFTa%5?@DAEi1L^{xFVXg%_SQWUS&`AB z)kEsC1<~=EL)k=5CfQ9O8JY6*oo-arpRPN&J>^?3H`HsZ+Ng0o9pdLtQ z*oB_DX8*q{KKHArxOa!4dbQqjEN)@F34-ejDZGhAdw{8n@ zX_1(mZ&UPbZ9YQNCRCgCSi=*A+3Nx21dAQkpJFepqq7&e__rso)cGm2n|(2^Lu6S4 z_^(BSuId3?lIcBBrL8v9LHgI}mp{Qx;SSO|^0WODRx3OGb418hab??Y3bNli(1$_YcM4s=FY<>oLGbzhhSWLH zzD#ZAqjJ6H6F*LAK~gUGCCa^*=?*Nw>--*wY5g3ndPu2b0ioSgQ;FuTSEFzBv}tR` zeLI+>l+qk2$T_nosEeJ4imm{mUFm8%NwYCTR7k$}jNSI?`2j+yBWFAtclJ+F6n9

OjO~7oo@s zwXCO(IV9QhDr_q|+xOggNq-aM-!A^)zb6d4i_-_Q7=*1xAahco==&l><>ttr#mUbMRJ!t=5M*j6cLq z9q{;QoN>;@(e5W35L4C6ey}?EL<<5m&Vh16Iupwqp|iibeL#>$1Y(9h3cVRZfJgrq zuUNvu1?7?_#<16R-$qnU6BM{NDmG}H<=x~q&4q{3)_;Az)}VX7T5bWR>#tgDr`-pZ zJ@_|3v!+@b54?Ywi-?1ULZd>Ah)_DU^S6X*s}_4$2wIzIGbeV*N)*U<%H|tCFK8 zub<*X7v}U|b4N6f=KJ4ELJmI_>hGSk#92@wU%tgJJYn>{kONW`?YfqeDyW$NjhS^B z+1{1ufJ@=at4bda3>;ySyUn;d3vIQx=#usn)~egUR{2P?^ebZ_7kliUnqvhM&bgM(1S_)$ z*x3vPCBx6hQ<_%X@|B0GA9ugoL{UB>1o`<^(RWBYuOzN|4r+rE-rzT#!6@XGW1}_6 zhU+MsqVLt}Znkp0X7A~TwG@L9n&nlG(dI{|kke6bz_+*p%_^PYGv~wnM zZn31>`5=R)YuF&gwMb)posqSB)nQ4#BbR>vztr={%DC1WfB*glr1qFhlnF_p2o9co zrtzWCb?d}5HoGh2+bip#hElZoGSO=PO7y^Zf2Mu!V`ylI0T@#MUA6d1%zJrSsi*fy zrI2k3bJ!`0!&`e$r&%wAPE}lkMRCLe77S(j22iY{<)JScpxyIjW2TC>vCCJ0mi>_8 zYA8e#7G(l(UERzE$mNEW+SON_}4=h>i+7_X0wSLdxHv2h} zyG}C3V=`W~oqjMbMcze_PIMEs@p(P?kZtAZBFq`duM$1hiL*49`I!HBPc(s7=HVm& z>z)%lP87H$PF({KuyTsYKye8!&Ek@j)Am(vN7;R2#pMs}hK+=g#L0-JwGve+|?RR|z!C z6O7h^cSJ6={=Dt3O$V5ANL%$DrPmGLd8sCOR1=USwAwHe!UR7n{jeG(+AV5N$0YKa zTZcgQX6xU?@Uh@!hoFIZ3)NUJ;U6Sbi;4>!8x?9&Rsz&T7Z02y08nVO43$S6%$);#d9d=%>|=?>`|tmw0Km}J+Vr3QI?o_0slM7 zjn(~PH*+d8px06e+ihh5_CCFwP3Y8HJj@k7Mp|xu@iA#-*2Bfl_uLCTQGfyY{%_G2 zFW}O9Absws%&Gh!&g?-6O@lM!!Yw_l5hZV~5T(ZjzY&_iYtQ=+L02NrxOULF<_&sJ zf-VSr0?$ce!q2o6BSoCtadSof%`x%6ZaA+s(*a8Q7i_JS13(6`P9(YO)mATyfMXpU zO2ZUMYmXVbD33hu`Z4dL&Gy$IPe$j+TSb4|%1HKnchzd^g}%+}n_cRBvkgr9CGgr~ z_=du%o%Dz*d(La+C!qiu?ZmjV9)n7^4gM<@|mO(?QM=*k6K${r4;TIDR#aJX4wBWq%!(*W1OvXpv z&@KA@-a}|8!lpT*0>pc8RdTq0?ku3= zO>m{qjk=tY*|g5``TRTHO=pZu{zA0Y{WlxWgU>U&jVow7sC~=yQ>93FwOet+jSwcL zjDTT8=bjzFK3*RVRHWZKWvlwRwT0Q`=iPy|&2>n>etraPziCzxZ}9%+J!}JDr4yz+ z(9X#XfQi=%omBisT@W~V=B|)t%GeM;YkX9@6cNQrCeB}Q_;5>FW7G29oGTJAa zjxkYsZff}P`1g-+{#gr?p3g1Y623pnWhis(>Sb)Gr=wWF&j8&V#r=98v6TDZ3c4?x z2?{Su&|-n(Ib$ku&A=r2sh0lhVrCC(zWH+}pmc0G;mc#Eo=(lL_YPxy3%@>ddAZi;QwrKm(O>~M^ zEWx9hWFJo%=nFGDZ+qZ%=5-<5L%8YlsSOa+F32&wp#eAjoP6j5v{-3W@(R#@)!=4u zYg3RDi=}FZ<+{2p7gocKOW9FF@S6DxaewTD_0EFAnMsIb$2qvRnTa~V3ipm38KX6e zB0EYj1367k_WZ1K`idvF-3;g^JNEVG2I@7a=`~zA|MaPL@D|U%7v|=S`8Ns9On-ss zHs^df_ZrtO7e}9&OT4U4%#O;P!WmcFfDL2TQp^$yyj=s ztFKS|`K86^y?bpo4TD1D?ueWX@AA57Sn+pwPW{^y1I)*BP1sLwG8KACN9#2vqu*Uw zl$B}&@7-UUnI%yJ62{CIr>al}JqL8pV`_6+qX@B@EVNtzQC0IjxOP*x%f=(s@Pht-yFk0+-K?Z%o7g6I>5I;a^cuGE0`_W*J(RBX9T_#M>EOkHFTg%km6EeG z78*+3C-4Hct340faL#%erzm5|%A*zSlgL)Yf+BM{&Wot=qD*MPdzB?NPT+D^U~s#u zB`p?3(Y~X5n`whEzB0Lb8LGY3(W!saFfBC?nZ67a9*+(ao*xKC0AB}i(-Yv3gCXM| z&8DSrUQ3cRX~#f}zcks=-k>KXjFml%&fO`kt*PoFTE*DtOmp8&F!JpCZ!I}5eaF8y z8f39B$ls!p?`=cpU#omKxYmW|G-0@{)PxyWN+UNtAu4gyr0!~o!j<4CD>vl3$k>hi z!OwGs;Z9OQIq~Nd%_^_epr2~|9*I5vXCwv&>MKC)qZZRkIzveP24~)^wZwETQ8ooq z%ec;^$sDKhzKFeUYC}Wpk_xQI zH%J6E&rmvD+~7azU$+@)I=9>=EhDb=n8U7b$X2bKUP{of>-mU~b^<7Y{Um1HZHbPV zRDrE>847;&fk>u)E?0Sa%t__*6)lkaqt&D=yzW6QvBuMs!kH;85!GA#vc()8(ntO9 z>V3Af@Qw-ZjZCs5KrSM+1Lp3Uz;MCP*+3P0<6$J~I26tm9yOVUYHfmuFoPgjqRYQ9 z4egrH)_}>Egn^`RiVGfAIZWwnSRrn8MB6n$7q0UcO4$6dpONZLpKEPDq(R-A*$<5x z)T@kV_f|isM6UD0OWG8h#qWR67E#;$jhGfjuy+5b<;!;V{3*N+LMF1lRGJov?UTK^ zJv;u~Yn{q#idqUeX4ejeB6(sT(!6?7uw(u;nDitHjT^ zn0RGMkbfd{m-HjOf5pdcGZ8_hz0!Ts1JB;tel?NbSxOI>_PM*lyQQ8{a+A=qdvkki z5{OgkB2)VdE!@#1E&8PUh>|hjBC~ZhPNd5_cj-BpqYZ11%HHlx8DB+*2~VZFs(VcW ziPBy8+EeJLPRhVSke2plZH%JtC7+iyen*06QE=`K38^lA`ZZOGW?}F)ubS*xL&XC? z38Aox0G!@myp@s)Mki<+tAO-+c^8Bqp+MyM^m1rpLINq{mMwS!lv+kerw`+fU-HuK zJe2VP4^3ur?&7uBrO6M1YQwqUOWJi;>4R0+W-bTe`CeEu#~zPRSd0GQ5GHJB*lJ=x2%jzIDLQ}6uV4MI z`8DtLR9n>`IrWlD2a3v8o?-iTL~--I_P!-~8)trDG)B~EcS zLDU%APv|AO@j3lq77e7YQk7x9@Tva4_;le6dya!Il)gWs^mAEp%P4f(wh6hIjxg$8 z+WDMF`Y5OC7sl`f)+HHC|91=5@kiiqcvR#yBsHouN!@?THs1&M`y8(?J%s3Qck=Lm zKj%~J0MwT9ua`i=|AJ-JY{eP!hk?l0#LWUy_q^-(YY0zee&aoyfIUu;O1lP3f+K_5 z7D3m9u`Qc|u{m>rF&_deTNU1yoHr3FBfghbe8OZbT>wRJI# zv4AA>=?kKN>^W?bK~zwO{oTU@r94VSikfk-@DE%^q0|m~juv!0un|2b2!{ZlL>^{%F_* z-_WY*s%LPvo{S(MLtSq?#D7@$dPN0VKeUvpGI!N7#aT7`xWzk{x-&JiQp#&8cO>?9 z`_eb+g2j_<7!V1N=kSn2P9*PlL|(1M$gz6*I(50EeF`OBIyz=y%SmC(g_&qNRr6?1 zfngJ;s<~_6tZL2^)BiZ54JZe!olv(?IpP8qkwafkN(CKU2s%Zf*Q`5i^Qt0p%ScY& z^jAIv=+ZUhkbN!EJI^_dHLqQT5YN2G4P0o1QFOO!=^cXWTr7az|6!J|mo)CwfUuIu z5E5LfmRy=_1^{;JOGVW)*Gv4W*%PJ7x0+vkbpj#ja4sP!m#fi=gXDLhdsLK{P}mLi>UI>4`bfVV z%zYVA5TeD5C6ug(Ps+F1#83PQz&gbpw-E~LuJNYflk%D?_Qr8De~=?7L#g{w{0Z~0 zQy<82t~c%mX(NInV?RLox5OOY#Q!`sJ$XR!<8Y0_yUcxEFJ#qI{C56Q!tw}H-tgE` z5&wZ^A2Q`V-_fJ;`Yh!t8qNN6OJy*9rELRUnz6TXp%JCfljQpY&v~mw6U{VuXKlC_ zTXl`_1BU<752%f*xzk#ZR?vGZ`sl(At>(FW?Y%X=rK{)Q!b$t>V$flQW!HdgNOMnf86B(0BB z6itElS?2jx@J>%xAZFtx-JPHflo@Lh%JzLF+MoMDUH?C{k2kQ{g^Bz#+2g5$p6gxW zDL$N%V_$TO2YR=`dl^W#*g2z_^|rH(3C0zpZ9arGw9Q14 zH{(}!TXVG zD6>e-yOjY5SpRgIkok_@|M(9}J*lnY%p=S*T^d=wtNZTGh;6C_y+lF%@bZFWkdT-3 z0V#B*!$fjD4udhK`fJ1I z<{zIZaSCk(rqAM8barn1#)}ryZpu?dG4UVx^$kdeng?@Kvyr9-dMoh09uL2H{C6zF z0w{XQ-Vm5;-rtfK<7#K4W^_iRsRtP0@U!#v5?;1{e=-~4&Sr8ltbN8l{Aek4M6y53 zHn|yo`Ur<*Xcy|A)X#g%aU^>gzOLR%MT#6RF!C?2(51Z-)W8ybH6J=zgAdfk0c66PW;KB(Wl!=KyyeZ)q7x&9< zz|-~Klj0TG$OxtR_N#!7?^$;miJRg?=c$iRJsOPQv;{Jw@oRim`A&fe=6Alvn&j7} z%e$LJ7f{|UX;Z!7{=ZUIg;~*O&+aN9@4%MlPMOp$1Db` z4SD=alYURCP&$U_0+u+{w)!)%G+4=vtJqunBF}^Q7Es83nVgO<&vZwn{YAj#$r1PJ zfHnyjj4TeJ?prwq$7U)F>?+if-jWe<;N~ED)m=AaYd@B-L$-HH4UEQN42E)YZtnd^ za9ELwst&4D4j;L=N7D!s9B30@?EAE(yroib_=WZJtZ1=rX~8U>n9ik6xC?hr=%3t< zsO30F)psGDDT?6b;gN`(EI^m$HRmZqo2Ha2*Y=AJm^4UbM0X9wXZ-O21ba<|5R6+i zXY`tq7bVc(rEza4eDRi*4_CeU>kWoV6uHE4T2=P?wW@QsEQsiv(|RC|Lc0(Bl|Pt2 zY{sJPgWTsaCN&UD5i~>AG~)oZmL>(vM#vehUzQNJ%Dh`WHkr}KT$1ojpoTI%Q|~?f zEU=h>@+(MsSnHl(-Zeo`ypjvQkzG(HXBjxj8;%V8J|gA%?kXBM~^|T4-Tjole6-dSDQ=UJ#w5bYOEYEva>QrGQWHcR-ZBpXYxqUWm{rq1a$mFXWBPE zDS~&kRzc~y@{3c(R=B9mI!af{XQji?4uB}oco&Ku{!fNyz!w6(nuO-|pVDw&?Qg zYd>?ind!3MA(GZC;K0G+jPWL@3?&HNg@i(WIeknxT8lUY+}g$>v=*o*;$l zJJA$k<281d*@tDTNKk%oc0P(5Tbqk2i9?OACzxA&+45>Zbw;kE_4YjKX3D)wslQ91 z>vm=3sNUsh?fY>N+k2@VlSSc+7~!!tP4B45I%H3kkW{ln81qDVJ8iYiE^D*Hf9$Us z)AY@#+wgpa85iolyM1Dt5FqTLtvf^Aasf{K0Vu8NbYXJ2wk0oXjwm5JJto71=}R-( zLj@p8I+xJI4G%3Lo$!^A$`UwceQ~w z62;Q*Ax7%@#>8vZ`3JUve8g|#$i#06;5|XWOOAy4M+RcZPyQ3Jz%)|NZ{}E$w0kcN zq!)H+^g6Nwm~47=K?P&~EpY@`+q6l;M|!>n>?~iT;cZgQj5Z`QrRWunPqo}J)DnMd zkYOi_oWA;#Ijj}h9;zI}Jb`>f;Dc=6rqFUmlW4a0+4y^@wAH@8YOk&yH+0n;la`x? z{{;S}>2AffibPw-6`}Xo1zb=j$tx_$W7utayFt-5n(OcFjtE&@UxoVr1O`!NSL5*; zzrdvy?V$i=aLz#hOZCPv(RFWH2`wUVem*lf5B$y7Qxw+(3Z4HuMv1ovmJEVGCENnj znzd6(!TVJ;d5&4Ttvcn3e$p0^`P{Esx?76O4BDPv;9otU-X}v}P5TZ_vG=#|O&NyH z{G-{F`ZW)dz4>qRhSV6<+GlG1;cHqBa4Fn*H+X9f?N^Ciav8%id)w zc|CJ;`@zTmlVuBUrEF(?IqhtV0Y7T|9fw`>L}b1|!$Eoto)y7u+|^lN>VgehCQ_Eg zj@8HYYr@-*8N3-CKayTV&T2(w@bMx0BN1le`<#5Y@3fRU7E+XE6m@lWU~1MCqbJ&)j4K zP5@>8??P;>ND5~FkJ6g4}^D(Xfwb%a(Uk#cf5kIyMePQYlw zVvBxY63e*uOiwG+AFVI54J`T{!;j18S>b-m@zI=A#R28~HfS&XyhK~OyeflHOAeMU zKWd2ixI3k>ThG1S!mS8woL?Ok3uy8(E{I|c3XlilY?H$lg~w!@;=)#_2lNewO84&y zy_u}cXpJMwE_8s&FSrBH>}B;O#$%-Tre)sxhTo*f`O;O_QoJNfFXGG4{-6eH)HB@h z5kjY*<(DlTy$IokgT2m_Amj4Mjm%6g^%k>U<#bQMBkt&sO40>-tiQYTG2ppwVOoCD zq+gZVe2`WhYt-DP!j66Q>g;51-mo%h6~O#Rx;3V#ts$heza*7PQ@9DH zMJK$$Dg94La1?sgEKl)Ah0tV(nUC_*Z~ zhA#i+%G0QDI?yPVCkOO9dDj#s--T;WSSfdRE>XAK9r@EVp&x7?li`j;ldn+x-c-;x z)2!S>PT@zKyy=5?DFIW*65x_O?xq1Ls){$hV8eEuaPPX5iZ>mdk%rhIxE*!EJ+dV{ zBmrE5JQv*=dypo9JMlgpV*N*f1ii3(}4_xZ1Ng?dwaga5MSS zol4h25J5Q-VYqK;uLZ<-_Gkz-bff0J^=PK;Lkdrd{SQDCrX2z+~kcEK5Uh3$Iw(oAgh2=(|SHf5!1r zsgwL6Dc~r_+mOq8xJ|I3PxKVn)8M zFv-o>K$>w}t_9Fqt#PisQJaISQ8wGN`osUNQgE2NEB6v}ss`1u<$Y`4T;R{oid$$+ z%we@2?Y2pmMC=Fa4(Rf0t}}Z5=agp^v7rojviW@%9EgPP2;JwmTP0=nnr+cw&>!-NHm@g@du*`*Z>n(Fyt+MGap2Wl5z=h0FYj8K**!FV{jYVw zZ~m8D9$4UbCwqmd^sZ{UW;L@@z#?Ge(BmJB(}e@#r86#GDvV3a`7R3LN%P=Y8N;0A zA96*9K5p&VmwSEv9oq8_SAz;Urw~!Y24i9wqTkb<0mS2HXK;ZX>P&(HDm>yrUqaY2 z6-ZjvHzWmV>eWaApDuzl6kBael+lAfn%+5Z0=pL*uFJPk8x+gmW+l}|0{IoEElS0GBUjXm0@2qQFP{uF*iLs`{vC)dc-f6H)YDBO{* z5O5Atsd)8I>wMac3@b(z7-Qk{jd<>Dz=NsI=1lbwsRB)gqa-d{p7z3+RSX!-O~Ht; z-P5<RR1P1$C&Ol}8%oxf$mQCs8gRFrZ zugpR3gl3XDTmbVF;u1Lk(_Z4vhfA*&VJaIxXzIU56iLZg@JAgETyE-O?vjx^9;Xc% zPUx>YE7qATT#@;ARyA4d->oDm^i^#;v=r-Ui>(j%>d)u*3&-&lJ$Y!9xw zEJTe>Kj8Z9#5Wy=|I7Bc_YIi8=mWIMAIE7o99ays0Hn3}W;G^bU^lNU_ubs*oVrH% z3f~}BbgG0qo58?8Z5$1>mzuKu$;F`nTe_GE27te}1+n9MIa6CN25{maDF~@eWy4O! zg&qVA1rd&(E<5T?UUKD7pl7E=ScOEP=O1C~YWmzT@jNpK&3bZ5{q}3+IisI8c)aA& z%2bnK-P==l-2%q0x;7_tsap!tgb!T{6Uq`jd>mN$%G%}hxcV|`BruA}_Jh7NemU{^xsc zT=5}Ob50mBfybGG3C7Oyr3+U1vs=m~m&yq?P5^SV3L=Z;dSUt-Ha0s5N(d{RFim-- zY~K%@MN&f9Wy88YA_Eh=vC~xY(@nplN_AFx!L9FfI7{ez!~6um){bcpvsJJ^rVLAg zGL2R=W;>J07U6{AT8zf^=xjmpQ}^vf@;FHrb$cG|&}1wCod&4rYzPFzQ7`sZMitjd zh?Yu2F1xH#0@qh)(iA4l#-~}9H{ECLg`7|mqzccQCE7D4he~}i6bACO)ENFdGlbUD z!@NG{h8?WN?4qoe{gZ+gR=Q)*HI52TE$mGKbmXpi;2a%WP_KFW$?9zw$6`s^l5-W# zNC2@!tR#TOoQe1{pVU_ZeJrQ|u34l4oimU1KlnHvIjl8B;b&{*;4geZn6P9r52YY1 zbVK-$W{Tfh{>$x%F=D|?Qsv{m!tb(8gMAxNvT%uFXI8g(oM8)YAw-P(Gs<`$R;EIkpM5M3X? z2fF{4-ORE{q)&;E;?W+l{`(FB#E+v{f*<6nuC4h@Y=Ep4bFfbL{i9nfeMV9w=%tyQ zmGUkT!ZSHRrGhX^{AbgXhvJUkT370>6#f45L@$1_h?;B@=V=70GY%Y)}4XY-mJnt8!*h*m~-k}<$T;da&g0@>O* z;O9EI&~`@Z`t1@6(af+hr)wXs@jv3=PzvvB|81IjM1Gw8WZi(JU%NCFVVZ}e?0t&D?PzXg`(1c$y45~F9e)c7HU3&U<+NkqztS+ zBhJ2+fvsW`_YsA07FThIi)=F@smBk z2+gw_nh+vRS2icG81On#1y-g1LS(hZeE(#zy4hfmE`8o%ii<9EZJ8XI4fDDGs7vN% zra?tm7y8!VudXUi8UiRA-)TOus7Qj=wZZu-KJ~&SkHdao@FXntl3 z^7+h}Vue*sk`f6@m{~2|M%%6eKwEq0$kRQQs4Q}uMMD6oeqWyDJJhlIpz|@7aPlO{pAKK?8RgXSolg?YL8pmL{jogZ_HcJ3og2wn?B+hP* ztL`*Xoe*YgKjGjzJ$p}e?Z3y=5GA>lo~+ctAX@qVI53Hmu^D-qv2DH??OQ0x>8;I^ zXjM_a7Yb*dbbjgd7A0JJUi=Hjr`iZ4DiG$cg`*|<&v#8tm0K;}ie znY6XDCt`plFztU;@eAuF!Dwktg{#}kX@izP&l+&bb;x`;9UuJw@o6E5ucF7Ij-#K- zgo=!c(>|);x*k-_SW2Fu{{^x{>C%tTiJdtSObNyJE_3Pg4V0kq003YQuxFc;iP#gv zXN0fq2rcP>Mf18q-r+Zto_7j$(Jt^U9`snwkfRIbcEUxNHeg{(!p-W;VE98HKERJi zJ=~#X@gshp0xj+2f(@a{5uT`5a9!7ZhkoKN*;k0vs$ap6m80PN0lS5jKe-;XQ~mo8 zqq}sL%b%dGqwa}pIB~>~o7PE7Yu?Ii+uhTDS-NdrFYb>5pire5eND%j1Bewpl1l@0 z?dFkx*;5h;j*w14*0>5EhwTI*`|F<@NEd3JrIsAZaDF;}a^~`1O~r zmhqexl;JcC&+^Gw zqzq9*(41B%r-_>^xGX#=+cX9!6k{tZC10Kph?oWj)ydFxn$zPGz_f+PXmx-+5B2SB z{6z}7tj~qGl}Iq$x^E;6PQL-2b{KP*IKoVA1-o&*q|X9MV|zU}u+snx2eN zs4l0fmy$4LFc666GC#^%C1v-$xP!O0cjOFQ=~ii)&)##+QpUDZo*QftE}QwnQj@3y z7&7<@(O;phA(od@rnL~M*qiS_fp#a18D7ON!`kp5i~S>-lc)VT5sym4ROXy8PL-mT z5i19SM5Aj{%43Tz9%!p4!9e3i2~&YmxcrNb$wwFQ@bRZmz3`#-{r^FF9Y z84;sUp{=c%hu8IIbl{^ogvdH%$floKM?+SruHP#&cf1L5kUNg?`#Euc&eF3bx2H9<(%Ds&lu7CK<4G0(oMfUO;5^y zpgD*LLybw0vd$=N6=XA{(^n5d^ydRTxX*2hS(|NC_cr}Bn&5q>ITFN)As%pj#VX0Q2uFydQA3dc=t#n${BOxj+@_)a8FHPX<&nnIp+fc#O2m% zHR}mZvdGK_I>eqHw4jjiNZXSqcTKw_$8ne86U$H}^0z~nT19UxU4gRw(XM*zUh&#Q zi@!1;6=QsmZ+r}5*3<*>vPP+Unki?Lex&#fPiU)tF4FE7xOgg;BpmjBwE(8RsnqA)*HEf!R{wQ>Tgn%FQ5O%&}vM3>6@ z8fZb}6$9tm2W2Ds5QB3kzMG^^No_6nfKZa+}N41lQ5kD+@6UygAn*YAH=J`jC-2YoN=GEFWnIt27Q%q^30xq zlA)`&52Ut$l&N`P0>FN)K>~e5R}Zh0glU+&Ht1wq-KwZK??F+l5l@k3R)zj>4Jty< zkVnhyUDas%t{vv+oJAp! zt4w^gFk`!_=eGrdxA>8F^X^^%28I;Z#7?agXBA=hpmM)sLBSCIhEok(#tHO(rdFVu z5Wq-@`@d*AFxBUDni&_vK!mCK)g;B!{%zW_F>ocHpux{1o3Oh=VnH&2vzM&`R-oeN z{FRC~GO7v#{z&nZT!DG|4&~#92Nm)&s~Oqd^Bq6C(q2yXK9=-di9S13GKZd+OjH5k zAnPuMMzp!!6W6YlNxw2}O%XfqzA@M$v0+VaYM|WGkD~KL$7{35p;>-}s#0jTLy_6e z=9O|EvUGfYGpliIFM|`i87k-$k?S7e$qGCfY7JRNWGQm;a1hn?2vH z(#}XO|E!C-5_mr2M^(ng4-X7-a)Pec-HVNey>w%7fT-;=aD_u4>R`77nAk~rb8o4C zU!q}ICicLVNEd)C;OBNSzxh#$Cf6>?b@D8#NUPKU+b@||ZUuT2lnTn&FTkMwZo4qj z_kSJ{5dZ(nugda;*H)k5%1JnV6C>`Jo?#wzSmZiVjE7yW5-Mz2d1j7KEuV@ro|Dgx zZhCWWT)w^i2d;dk;u!Vx4?dk7F4_ccDU)=oe(Cx#A zR1U3Zdc8@mB&bojf#_niP3~-G4s>wFqry0eR7xuh#83H=itkg3dj`7Dj^vz`Zz@ov z%TnPdw`Jq|MQBeIY#_CHEWCR9>YCd0+!)Bv{<-W3BgK%A9wq-)h@C2e$q7VY15JmMnjjX8w-3m{E%8A8acE| z0qf6tAZt!)oOtY+0IlBJJB-@abzFBm;*W4#n%EJDuWH0Hx~hxa!b+v^&K?2ZB&3=d z6OQ9KB~}r%tM`eb`1DK6V@my{;t0QW4b&9nrGy)i%O_!tqoUrkgv|0(+50g+MF&?K z##Rr>3!N6Q+Yz$s!j?nNe*MAsY)6Jb(aim6?crLeT|k{rV4ZKLfM%H6(y;rimP&;4 z(9(u7W`j@Q6kbxBGp60-yu3MhBw~x(vK#3q_yv#x7A)v}L--Y1NAtRtTRSC(+|o6u z9*LI!x0u<8p>5BkHtl^XJ(gX99M8#dLsK32~$J83O(B9 z-NP28a3u_m+D|oP7mJg9X5K~2@NHm_xFr-RJQIhnD^_D~MY;9Tn~}xNUVEjLd@w4@ z%#gqy^Ql#9I$gD8CZT%Y7OH6)u1RxWn>R+BOgF6sp^K?j)0_MKbipsyk`tRbq&eid z=K6j@Ac`-4^TavsfhtD#Ts@Ui{X&9e?LizTSvo9tBz{&QamZZ4TAMU`t8KJ4fmL2? zwZpB|1C}I_#5Ue=U7M81%<*8AtHK&SvZHnny+^-A-(QUgz>UP;YF2c<)MH+opeVHx zI+onPfeuY+u=yX;a{NNI=~e5LPza+IQo2aBk5@UJwV6_K@Hokp;_#(mL(iP)~K&E!imy{-OqK~{rlbQ zx}bWl_gck4Ls%CoifUH9YF3|#tjmmWeoz+T=CxGLi6DsUQ7ZQMaD7S4`n44lXIq2D z(9#3jXcZ1_f2zM(t%T@+%KDTa)5(?qlQe72RLfDB@FAtExRLG~lC1}A&P-MF7ucW? zUhm6XiL{q!Y=z1yu7J`ULGwuJ*?}wWNX%@Qi>!()^juO4_%Z3bhShQ*t|j(F>SUG= zm6R>qEAb!PA8fy+!u@;m_`8dL;hp6NTXti|uo`!~4=t^Uj`Klgb#Co|vb2Px^;!<3 z-q0TGgy9NE)m+MNoMD1G$c55-%ouNCtitbvsGH1Cu|&OvGAB<@IIu$mwpaySwb7dUXpX`n@EpV zF-srJu%jp80Xtj8W419p78MFNbDM%nH%qV6WEMRZg;f}tiR^*<54F0s!t2pNXl6NF zafn0n@#tGW^K-5njqrDSV?30&ru8ww2N^b_voSJzvV+4h@FeC#vpwEdCKX!VF#7H{ zNL2Y5v0gUBRxut@HqxHcj%YsOm~A-s5W5%zsYd63m_VTos{)X5*8nZCvdwNRcu!O) zx8CbUP;UGbSz6F}@kPKA>Kbtt{eV`=04is%yGXNuQt`3X?zLB?nvV{YCngRRGHe5{ z@G4P}o6VfL1wK7hG^iu?PEg0@Q zvR3cLlBmMW++~-mEaGmZF2riMC6J0eR%Yr$RK{>>QKyuCeMbT9I-TY3OgOAbBCQB6 zZqR8O}=m6ifS-t0?kRJyx5-%}HEAMhE6?4h-ajlZlDU?y5HIP3I(n z6Xp6T;bqe__8aeb2~dalP!a`ogO^G2ZYOg%K6ZRj*=mTKHYsFVELO_rKt-AJVg%|_jWB+htgNdlX7j$DnA-Y?|CTwAobCVPkw-$wO~#veX@EiM5!wt;V|ZeS5+=| zpGHiGd$K{2UI}c?7_xRALbVC7`H9*oBK5(5VA=E9t8gw{nOGqc&{_eHuopWE&C`=8fd&*$@TalhXmkH_;xkHNf1;nMZqV!A<3Q{r&rt%akwlXr+p_L86V8hw$%^(Tc+ z1`UxZm=xUza>6NHl298h_PlTcJ{%m+A0lzu#xN7is?Pf|hSmi(DLWt*~i&g)xCJb2MZ0<|Ta^pfD1=N%3Wu}AYO%*lbz z0t13y@a%XuZwfC87K9Lef7kl^PL*vHZ9hT&`TI;^Z@T?K7;WmTo}J00_W-)8<}jsx zt+*;Cr%LopRjCQ6tA{H@VExcf^ogT-wNj58ABbH}kLt+R0f~z%su=`fko^H6`fK{8 zl9pj=MB2iH_S&DnL&e>@6=roQ+}ST}DzT^+x2D3mQ~&c*@P!?3P^{hguBvMb#Y>S+ zy7euu&kq<-HdE1X<9L?a)I%GddF{wG5p!`R`XNeaY%p?!??LNZOG_ao(OVGGF!qnP z+mDOW62_mHBH8MII^ItBgQ;{!X4hl$vC4L;E_H9&^kxI(c;awLPE+jtob72-ExnJj zvK6?If59Qn_N0j&%J90e#WX(}vN+y@Q(G0a6vFJ(gn zO#Yeo^n7nur3*GP@76oLfkegOyX7frS&%qxbT+3aO+C!`Q@yQ_OPtnS(_aNJ?xgj; z2sb{Hs#7y~8>Cy$gCfk+9`d{RK|~>u7_hd&t-USJZg_Iu-)Z@A1O-+}AhBeX25wsw639RL9eKsxy`k;yM~{3* zc4}Fhw`%WC;D-_7p4i4C5iT!Q?T^0ysX-muPaOw0ui5csFf{B@HI)`sM7? z1pc0U*967CWzq+0`A;(Y?@m5JEiK*bcpq+z#0Uk{-OuHWG(6nk$@wHYzHqXgVkLO! zCfYIWh&E-B^g5Z{Gq7yT7s(BH2Vf`Hu*z{LTYYbkr-8*eMt1@)>lIoQ&v7gh9q=G5 z9U$=R((y0w{lXs0Hka|KYNERPP`8u{r$u8VrHpj}l@bdvKfVt3z^L-$h9vNrzy~+9 zm3o$>9j0sV*7N8yC}+&H+m+$NpT1unn-JE0oDP8KGDh8Wcf_L*Ao2pK3v%ceT@qem z%82CzF_$LjCGG}sQF%)#wYn2k3mT3-FmnHhfu7ELn*4SPvbxyPTl=~&oiIH~SPIb= zL0ri+T|{1sVZ*;Q-f3(3(_eE9nRIm=+)g(P*IET9!N%);>21C+2G;TJT|bqVxUgZm zrCV=k&;rMF6l`LVh;nQzkNcX1D0Wo*@=q?bLbgv-={n%Ya3xH;MuR3D!_kJCLzG+1*{Sy}Y=_MK#IYDy?kR2)nXUppCHoXFZlrlhhq)C`biEP5W*zBG8z@x4Aw^oM=spTe;e8Y{T5{u@d?DJU8K^7P251|9-aTp>I=a78Z6m8Is1pzvnj!mdYz z>Aj@%hKXJ?pZlrXL89NzTk@jYxz&fplmdLH1-9*_8IhgUn;^s9P*V?tT+G+FD2#0M zM+H!~4FKZ;90-LR&0^KfFpH{#h|81`%9#%GWo0p{^eR@Yl!)U)#WsRrP)(J0fi9poa}j0vG! z6Pw>LnjyO7+@8q-YuFw%jrW99BB#Ph^q)3V-HK8FtB|oE{O59sUfLCs5NUw}3ExDm z&$9M2-n?+~x2E>$krZ-+blbd#!wwOPi%yPSc22^R+vnTw-Nv>ZV6dBH9sNTg*jXq9 zzc-WL=7l81cdA-Jo8Izz=211$ner60uE-hMYrF)rDfFJ^LCnP*maQ6}rG zf&At;jnhqsvU_gn3HqPwuEt{H%Qib+h8t6=1`Y{2a{W*;yvpE-;XH+eVU3@FTT#%@ zOVB)BDiIe|p+db@JvPsXONSWrJngoPLuEkzr&)G`GZ;|^|MleLDvnu}+smRh?$Lw$ zebT7F8en4qk|PwR81QT!7)?*rZfz-hC=CmsOePL14FOpCCnW8&GNHIF~Ac*P)KY`AR@*>z?r zQa6YsqjbN=$jY6W@4V_v6}Sx{Nt6M)e^~8gk?;K|ld1n=XwwS?h?MC!0uN>imYb4B z&^{tqLR8l9Qx=ZfIXbr@4BegI*60(ndTl zQ4f&R-4KIa&F`{eP=o1$xRa;Hmvh%Tsz(|^MW%%GGQVhQg@AkR49ES$Yj}=ftBn2f*{9k~E z-k{M2Ct-4{WsCIDH*hccL@idp=WBqgeROTiUSp8iMy`MX1Arvb$x_%2qf+{Z_}hp3Gbv6=_FaL?49?WPpQT~>KJV> zo9M{TxvBnxeGmnNs9>xt6rB{0hxCxn1E@8LloS%olox1P!16$kd((Rr!k(4`!m`Bn z1Bs;7${P(U#6S6@O*don>qf4lzDMXRY6p`5+Wd1p_Do?W)a;}F^Mi@G{Z*?N zfX2TLiEdvkSPt)9UNT=j83xDZR8K7Oj>o5SVv zWp!QgCGx2chH=6} ztR?lmFz03@DNvH}48hlMW3ia7z#}nZ99^ICU&wTH-g4Nh)7*RfeIhMUok1FmsgG$? z*}abUDXT*T)Q}Ifs_mM0(2(ObUjt`9o|E@7+``XQ^;}TQ)Ent|91fS&uG-@;o_HF0 z1T@}wCLk$raU|2BF19)fr|>-44kRO+y1^-d;G%Q-RdH9~N1|m@R8G=A^VU3*J`d46 z&&gcS;OxZ%umS;ZwCoFRbt-a|>m4FFf;rgREWK0c7LIw1ayyQZpxn?dWB%S?Yp=YX z6{G#iGE;DhH-s5v#*^jQ2{Pc*HmQ|#t*eU%vI7!S1Y6e@KkjMbPL~-P46@@|{OY8= z&F7{8^y&6`*Lr>h5G$Mpc#k^PLU88A?C8D^uwmGQFACWYs6Jg?GL0`v=upXc z1ITmhtOm*L2WuZ85&;pdAnq0&^?Y&Y0(nk#aG+q;x#51In$VD5Ih>wgOaBSe z4t2Q1WIMKY*#`RMg|=hpC%ss{aU7@98Bla^;2uEX@x~r5gNKHLRJuI3#00D?k^Ej4 z3}oIYi*8s~Xv9Af^lrzOD7j8GQo^hRKJGzSXx-xKXSyiP%FUG*-)GPD`T`#hB!lkm zC>VBP$?dyHlV`lmA(~s84w1C=g4Gbs)o4w}h3LL(;CTNf*Ypa#RxWK1(VC*yC-R?= z+PFk8Z&bnfC&J;eYuLCo6wVO6qfy}sK>U;E`~;*Tp2iw?Jcl;CIJT8H1Xhf=D7Mm^ zC`GxIKpNtSI~qd;qpq6{@;5Rpk-&`6X|BvZ`$ib`b>zhu>c8_Mnic<6PgX6t17(&j z4T5p$z^x#wC97DI6D_eOd+3RNu?%h4;>p(X`3!s54bLRD;k35+ua3wQ-O7?BwSW?B za|5`p6QvDCNIIg)-FAQ?q(9$Br}ab9IM}Czc98nB%u}c-W<2lxBfG8UpHv|A1cI<0 z!m?)zyz(r>E`wdd#L*qOPV0 zspu1G)JxK#)@Cd|&vOJ?{qWd0cMS8+CY&{FT1EZM*ByEPYc_SncwXrZzVfQxiHz<* zyeO1Ea&{qL-2h!)CYz6p8Wi?u`+hImj5kSWtwG{GDlt}d>ep2hQ~3impO+T8RT?22 zk9p>S`5GxdKQLyE3ueHg6 zKh*diQ$8$h2P<_uW!2q)2=nuN&O`V@e~-1|>r_-g?mIQa?G?hMRyIgGR2G^iqVzHJ z%kw5kjjoJGr@Kl9g$rA9#jQFs*4VEsxdWwl41ay*Hw-I`;6j-j(nY+0emuu2F3s-Q za9PdBN;Lm_7*JN40i@9*r*)*!Nk;X1%b1mV0WPh08|EtV7l7$fa6HlG2aGAr|9MF; z#t&!mS&8YT6?nUG!EiCI-A^kP2;e#&W_Tig(h!uqW~kNgBs_i4m7>x1$5Il7%5&bz zmx2~&mI<=M8@B#2vM2H3;XShONMW;X*wDadoZILv>qb_0_M*B)Kq8^A>Jo-Jx&

?;wK zBHEKAeM=s@yESp#CGQxCL?0=RHm1V96T(39nGB;!Cr@(;MQxZCw>YMnWU#E!yjBlL zCE9+1?szIe^MdX=PE(uE6sr$cQ~6ymX;zyfk1IBvUqym^-Q&MU6fIO8a>SjsjP~5H z{r}c5hK%h2IX&wOFBdxphz{Gd33Aq|;(DRJ_ zCR*TX&1_)hm~%*Owq$42gD4e5INg3mTTuK4cQ=lQLTe~`O!lP+RiEXpU$)Dx*u1+Q z0sZSl@tzKc9Z|Ulnokf`-`$9PbN?uW|FC#r$7gl>h4x`Cfi&?wX5(#`&CVK9s}i> zPCF`b8Vk)kyt-LH(muqeI{WuFC*bp@WiN>;peSBruZd;`;XM+@{XRvI5M$yf;m zXg)d7o=MSiI}cdbt_@U2J~U#b7(O<_r$W*0We*9VaVo3AdIQ1n6BX%W}N2T2)Jv1Hvo(hSJyREM;}xMoJ)pz zI8~ASVB3_}&Z{v^g_w499ruJK=}C&N9$WhxA$0Be%9ceg>RUePutQAuS3W0bQJBtI zz1dJU?{B8-_=VDXd}Xlr4A!;jcBzG$KyExGUhyE@Vpc|IE#c)*ubgN3n3_Hl8g4j4 zKZ$N#5F7#CsC4l`;!8<2X`gI4bu+bOFFbD zNnRoYJ@pWOvbJwA{&&Xhfx>P4Dt<^^qnPD}e5yvVNN3ne$477DPjrcQx!SCke@7y+ z{Gj6dr{z$jrMK=TWVu4v_sA#+?{^X#)Yyzgj*xDjzQTo~&uSbJQf*InxV;8S5@Fd; zH2sBn&Twa0BHCJl6Qp6k@4KU1R-W9bwhI1h+4lB)0@A_&fm1vv1bvWfX>S2cG%}HP188E zE6FQDbW;n*c3j7_f9th(b^6UgvkP?pE&N9~!;5)goA2UwTr9bI>>kR&p2kBpUEO|n z%Duai*=_t1cWyqqw)kIXV`qCRuK3>__1O5Go*bLH9#204{-B$|$#Qm2cvOr9{PAwc z_Y%W-FrKDKIziWmF&N!(Z6M8ed4v&9`E;|E91x(o{CfxsaWr*{{XjCHAto5&la-<4 zR}+3iI!ra=;NQPSt(oh0m^(~XeI_90K58JHNroHqCkt$6>ZES+0EgVdZ}mU@4Uf}M z2U(AnyLjO9#%ihjP@KUN)2~jATj*E+9q%dM`oFCBbsp_t{na&o&hYrrap_%%L@Ov~ zB9DP$@=3&S|D+F#BSQa~t=9lIt%3)aAV4bA$+i^30@JTuFtrF5afdj)`Jp1!IUa(v ztNIb|oS@z*{$2dLyrq@ziEiM56^tm>F$)G#^0j^3@GqIs!bb<38>f|haD|WoUGIgQ z0`q!=7F4f@IAo$LefJcsscDxK{FCsFYt!&bMsEZ6h0A!eHt}1e1kzW5w!4G;TWNi<68{oE-ac$L za#~9X)b9|4k`q?s{`r&jD3CUQo+O~L#0tx4CJN&44GUh{O#UVYF-o&PQjo*I(`{)4qxIimaXoehi?r-k}1S)TMY37r62p)-~I~UHpTuDW0@56)Mng z%%xoAj#&m~`pVcqH_q19Dj#Jg~$yJ*a^krz(OeufH z?_Y&d%t+XF8!3KqSmDW=vt0s!ymt=rT~&(2-B3LoYNc033-Ya=pJ!w;syhJf323JP z*5aD|xl}hB2>vM$HD(kt6j(k>5|iFRD&0EtY3WhgMv|sdFkpjsWF4xOmiwfQnHr=< z=YSWtrX?n1o{QMYvsGU&j^_x0ni88poyZiFK&JRPJH!6%1K#xg&LmEbG6sPz3x`cD zIfJebh8wd$JIN3BsIuGJa@ul04q0(m6>qc4v%MqPrzd<)B)-z_rOYGv#YO~LwH4g} zN4l8o;U&15p1xgqAojP-X&p!Yn5LuaiDWDN?-+xgTlbA9)07Y=GIor`_SC9@ykaF1 z#e09;$d4-r0_}U>oCz}um19+TZwOs_j0T(1{_uA%2n%d(HB00ERcne+wN)Ril5%Q~ zV;b`Dr`D9Rp94jx8Cw7r^`^95y@lNaRH=a{^UFwaay-TKtCNMeHTO)4Yfudbn7HXC zv#P4yK$lumECT*9%Ef2&;m8y|HupG_PG5u#JX+a>{LbyEcaS_IGU%Ii!>?nLcb10{KkwNiVz5!p@*qn8%6 zwa%f1p&M>m9e%*cpbeJ)QYMZc+OZzp+Ae_nV`|{Ch}{ER6X0y6_kqB?c)Eg@Q-%f+ zfE2M3ZtORQ*n?D#4q6n9_eufNTgwM^Q)#+wiKXoa2t`C9?uRENeCq%$0D_(979^4; zS&X9Pao*op&or6MEhT*Bafp-eV}zK!JWOmJlGuPZ<5(q#_x2}aK=*1;?XSgFBB0WC zQD^YPH6Q>n2G)=K%+VXWjD?4`!duG^m;$(G?uh-qbdxY3UjlBDh3(`Jyp`uz#kdvP z7pV8rINi!GIF|x8O%G$IJKdHZp+`pM!Hk(_;H~VMKpGzILL=! z#WUwm-Gf+qJ4^RfpI?;wXtwk>E3;?PraV)aJAxl?%g9)esz}Z|7Ia)r+-tUYbNI<( zOy(75zENpfXGI}3^{9!X%!r}03D9ym6|E^2n)|}Xi4mVDFW)KeAqMyk(d^5sgPsRKe+8&yv5z6 zU}gVGufSdQBI*?kQTKx83g(KCi#LOXt3vMfw@_l4e*~zL>Wo%=JkQ_}Ry|310pEyq z$G0ri+OgosF;l?JH-A!xGzU?^@ zoS6Q#5080$>Rk)s)f3e5DXB8uGCRAl{oY;aj^^Z&M29#PZPP*9z|9!jmOEBH%1v(rM-c7$!o-wdNA(djb4Za_5g&+P=pLXfrwH+r9!awfN zS|I7MEG|Qpko`%71C*q>=7*UKFquKtq)Eg5H0Jsg+wQ6m=1f-`g)6x^Qf#kp*4IdQ zNcd>t#R%Ms_*Ze7p(V2g0{SE1 zeowbHAR@MgF&^Tib_|eMms?Gxb3AbXqSy%)i_rhgT-u$0^)H-uoXh@Mv3gQcipjM1 zSEITy9Z=McTayKY>qj<*h)`tQfHKQlGhQBl?$S%>L5KUFV#^KSpsgPm%|+ZRQMsKn zA<~p!4T>Ga3TfCzH@KU< zOuP{BLb3V_Pg)On(kUX@2=st}A1_R^(Z5BO>h9Dves)AfQlKLWrD^llUW!(9pHfzJ z+R~-A(}Hs&X_ptamrN!Dm0#^tu_Jcp;ToQ9WO-b=82^gC)D(XD(iW$GzW+Qpu_hI| zp&@ibfT>?b>&sUQn(Yo<52!pKpZXPn#4b))D<=kYpDQ!xdxfYYZCfyu`R0!@!i;3` zTU*Ol?^}4i)}>{&Pftk;IXz=awW6U@gTZTh*T#c-wJ!D?jmhxiqh|JFYyU)1$?E{e zn}}VXT&yS>2{(q>1B!H{{xn_`3~hJHQaIIe&|~HAm;~Ufl4%|mc-G!ip!cZzeHEWI zd6>LeI#_@bRqsz^TxZ#3r+eBWP8QDfY7L|vNFp_vYlyL93a5_u{gCg|V+k{n34>3l z8I5J{3eMIytM&k*$xJ2qH!;u?Lj*oy1R`Mi9WbN&gVYs9sdF%K2Ic_F^mESW2Q*4P z@Z`ALE00&>q)YJ4%2twuI`u0jjrD0WX1ZFmfFPSBT}z)39#M*sSLBjT3d zHuOXO^;+2U(lt=AYEdXgv>X?SdD?DMsDAP50@O@T>j!uQZ}p>1522+HNZEqOYR4Ce z40S=}E&tf0mB$BuO_*AbE}VbTEy>63XYZ4XB8nZY7PsdNA2*(FN<}yh2>a7e9G*hE>i&V^B^7am88MIFAytth#trc49r&m<3g%_Gw9n772+@Vei%Sz_uglv(j9gq=)3j}JG)UsLum@vgs{9;i&U8j+ z9chitVB~@Pz!bb1iBl;5wuYCeuGl-z$N<3U&$f&JH9ERZ1a}J7j%3ATQe2U`HTMWRta_aXgHn5r=v=!I z-@0w+&=r|maa&~6#1p&Z^5mU@TBfh{Nng7o899d@qwT?AjZ*d+3MJE8qr2NyeCAT& zA3p8t^3rHH)B-soBR3a6A)CdPAyo;)zmNdPHhxkSWjvx72RO$jfVlH$gJDn-X{>3;Fv>j(c zo*@adNdZ}>ASE&5>4p4pF;OGqE5!#&;D*>?1_`gUAt13_J#Vfc@y>C@dk24@L>8)oku7%ek zYn^z|?CjvEO>W^)*|L3oyUk15NB;zY^S>{fLd4>)p5++u&o?bf$Q4*5pR|uS_$tCA zmZ_1IK^Zr>gxDqxyM8OL$LEXxio9**%W^!et-O0@9UEN-9L3c!!?~ z;lngF?1cUozt~8n{XR%5{q!&%mE#iQ9VFZ705mwiCxx?5-Gq?1Mc?Gy{O}my*RZ1A zhj9dCVw)n}!zy8|o>`xUY1rI8e`GumI>=i_H5o6X_`?5|0{*wJwOvwKb{zQdD_40>n#-SZZ?C)qiT?t9v*bbI zga+k6h%Rn&C*fqZI@v=M{Rb4jg?=R&V)+MhQUr9r)D7#G6Q<#eR`0s=>wnqO@)=W| z!FX>;*j``3lyf}4>J?hO%f^9rJpDl#_lI_)(vx3r)n>P?9KJL7YPnM(>_Ei0AJo&) zEo*-4TKoOG^Pa2>u8=lu8oX^*u#uFodh;8}em~6kjChMl7;;-ev+@3Y?DHkwA5%^bNdCn!*k8pk_)7|w?5_+;f zngU5AFr|lU1Rhv#tl8h^9Y7zb`ET$vg`ojZ>o6lS)1!Zhejf7~E1!9xOx@@L$`Q~+ zFo+K)pIC1nSoY)NOz`bs#^*q1c5Gmeryu^^MWCslMRpT9LY#FfKb>~O?`?+5dncab zl5rwLu&hR~#YV--8DIG9RDNiHcHxVW#kGuKiUB&G9;+~(M-%)DcX3l>^MhcpC10+K zuF77^B=W*hISSU+Rg@n*(hKV^MsilG0(^V};zfI!cgHXzlfAa@O{0#1YU#Fj#aq|h z-J`gB&cm7WJJwWFz}gXO62vKSQ#&LE?sCzskk9*jM=S`}L&TvmYzUklB}vT3;~GM8 zg~4>JrOii}_kjv0@DM+Q?i0N*UpU#4bpXWU^NzK zD0h{ZJ~h9pvG{dX4+X+djhv>jn1pH7Fr6<_6X2&cc%pETlG1Fdq*-i^>h?#MdC$oJ zD=`1;f2oDIFw7VTzHx;SOuEB)Fg~%}@Lq{#(+EdfB6NQW&Jj`wL9Q)069cb%9F2%Qb>#EK+ zhcXK>@~=(z(J zFqT=mfOkr+%mS3i^>V240%dwo%B|GCtbPNh_flF=6h1Be%T)urycR2cM=`UBZ;d7+ zc=qfIOTgm(tLlp4_~wit@(l94JS#5D3Kc^23}7MZT0#1K@NavJztU9_aS@s%rq+4= z%`pV_6Xgbc5l}(Uyd$8WJ_ww3lQtm=J3%~0)3HdsO}Xm?Gcu zG3m}k*znKi`>HHv=-Z|sY0B|OQLkUErN>uImgq++Q^Rl)4?pj>y8cfIY(`nz;Z1UL z4hFvbZUtQaN^}@|2Xk84ty}m)Me9ZC_dxYmOM+c+J>~j~952{q*^?)_kMhlI+=Fow z4r?YLVwHlho9tko=i}pBD)q`Jc@=70=eb8!xas#o?M~|z*3m7PRB`Grl!%|+@%?@^4#ob{h9bfm=|UAD#1)( zqb_4}q+i(c$Do8W`D_C*(rYT2kI*T{{Bo*ds`!>p=x`+(JQLP9xI|p)LT6edZRn|Zw{(SUr7uNST1sg zG{j1OLU1wP;>Ensc5dJoVLg+VFx8=K%nI@G^h_WIMy~SXp7wgZY+!6f*r`Fw%J}Wu zFNc0FbVx75j9ZrQ@BFcr$%&RYo8L$0tx(@uWF7lbms!%nH{;+K-{{NM67btF1_#4I zI50xewc|L*il)9Mg$2ut1X4xqVQfQ)>puV%l#lLa_f(Y5=fT%+;x&eYN>OL!Pv7lm z8G7Z6yHUECdIxB?jX+2+coVM*L10rWQU{k^gr$Y(o>0JkRyD*0=hnX1AN2oy1~$Xx zz-XqzMdi#`NT|iF0fu8{`f9Cu@)FscxS8qR&v$Vcdmt&&9!>o=v{iZZ(SPSW-08!` zZ;y0q!~7^!6C=`Y{?x+BUtz`8opp@>tDiA|_uS8>DEi$A7-0!rsV_(w85R>QBKXP; za4&I;s#))0k;qpLSmR!zN2Np#sQKatWHXmbTlJEOl(a-InNE7~d!FOtP4L|+;uE#= z<9)G+68Mb>LEFXchHblv_kBYH2|GRaA^LZQ=GdNUTG}Anxy!sHb-J$9yWWDa-P!NP z0G&F5<7hi)LzJl&FyK-uirupqsNlv7m(`ab@mggfMQtZYjz)D$6bw4V;NCJtR~e$? z1CpLgDZtX;t$=O!y#@081#-Pi?Rd|;kkLTk+2ce>w9_uSXU-$lWYN1%J1~pQ?6ju% zQ|@(MAw~7h%xefsxvFxc`uYgF0J;b9;$l8!8Z#MDi$Ye(#wt-~PCiGQptXy?*zwuQ@aO5IA^EL)&2Yr)Qo{dWIdienvp6Rdob;|bj1CtJgseU=u6 zI6?m?AMD6x`98IMQASx*k1D$f4hR?({$@_znPW~#-3?Qy)vmd_)ftpb!~F{H`);E? zvm_RN2X4^QmAK&(`#@@|6uK8mK2n!$3?Nm2=zpS=?+@dX7Qs(&17?}D`E;b_xd=}s zhXqekKPJHs^t;*I1sEi`0DO?*gF98?Ml$L4y58mb_N(ZJA<#kHAW>WCJd!ZDg#>6@ zGeF@pxg!KElMCdP7J`d|Cj_ov6o8E_!P`x{3)ZI9fKcXTjB$94$`+yx7c6j7Ot%w9 zw+;dc{nMjn!Woq|-Av!elasR~_UQb4Bs%ARUa831@g zCZ$bVoGzX2JXP15FO`^z8k9CU0M&13kQUz zx?XsyP0e`WH2uf%RPfKf)@YfY9YFoPY7cg7rsHK!+ z7do7}&Y?vd954FMzj)x)uAKBGb;9Ca(#E?f{!1*x%R#B8YGP6Swtg6?Cg$Xye zjw84KYfFg!L)@`4^zNvOCGt{qW=?zsBrnXD+Pq&;+1V7@#mXs53lzOTE3GX<6E{nh zrhwd5)ncMDcS#%^RX$c=8kNuN^xC&NgWpTuSk0IvAc}t4R%a;OxDcAO2NgNCH0!{U zML8je3pRwKLzXYixi{X=(RJ9wifi)K}8$$O2lP z;tL!CY)?L2%M&p!MbWOr)(5RzaEUyy&9X&Gpk2*OU|clX>Mk z{?DLq%CXkPa{fLb?Ekg>#Nz{@d!RvZbt^p*qw&B;$yc8^*z(b@=3-xse7pu2B*V!F z`Ee7cbIe#S6M0e*L9;|~v0-OV>}f;Pe-(aL)zbj-QhKZm>a+YR@EN5Yc8dw$J&Z`L zL|feEj5>7gr5s#8f>GT&k~&=S*nD|u*2X898;fL}Z=BBdZHpXCNNbPY%Q#Uz(UIrg zt@#l@I6E4SKaMjnh^?be{k7e6#M~u;bpn`aQWXi7f+-FsAqFr0yZ<$EE-6VoAnbv& zSoPlx-fm%;bEiHI3_Z8_>`_xK(uLl@6mYnQ9=(Q(Q=3unt`uS=iPOoEeX5hc&xVPU z=PQGqaiVU&hd4Bn+JV{j1y5TR>OTBS*QvYp+gn?Xt)1_rc+_sD*JE`YLKs}Ou*AC) z^y*_Qn!`ilyK&uen)Gp{wyi3fyAdZ!#@kuNO=3@d6wf@o;)bTNLx;))v)Ng2k}rt8ovQ?0aB%;>0iT5~qWvfFTb@__TZ7p!?tL zI(hWqwbsjbrv^ea&4MCtl_Xa1f5J(~TlW;+M%=_JVX7%y?Np7+F>K(}ekv)wk3Ck{ zv2MRqQletH=#LVkRhIi6qj}r;$hLwo?ddjuM(K@(S0i%zCc%66gKxbPWWZjP96Auu z;%@b%do0XogP)4Fb3Q$U5L06`+#0L;#jF5y9^(F>xfye{ z)#T?T5|ba^55D2^Ba#YoJ=qhB?E?yPwrzT4LJ;aR#TGEvzM}T~ql}{;ZXNj0ZJ>DT z&KzF~k!OW$%5w|0*D>f?V?kR`W$XO@iWrlr|8uZNS5%DVS&bDCAym&1D@N)HrX8V& z3(ebO-5+J&vj3xz`oEEvM!)%l|KrspiP*Wq;c~BApsfW*V7sJDe6%6Y(9byj||8@bh9oF5lquY#- z*ot6qwC|8ht;N(!>zCzjZje3rRNVPsr7X5onk2@3m9=yQFNBy2M)Nk)RwDCTBST*@ zR&l%vZX|{Z3yd^k?Z%G_Xa@_Z>N=yyjP<{4mpUtqtg+RjsNbrBz7K9dEhnw z-K|P1rUZV1*Skq*6l|`~v6M4uLlJDyMmgtLNi>qtGPVsXnJ=e$&N_wck+E%$JW; zQZ}?im&TYBcl|YnEqH=*>(*~@?m7Of82EJEwT<@8vG}tiE7R~+f^taQ)sb(2+g|1{ z008wAZ%a4FB$bzL&NKXvxa?pZmU|R*40NExu|(cqu{-n}BJI(j!#+o2Um&i^89?z` zkdqU8nud@d%Z(^b_Ye{}#Uex@*1uUnH{!$6)5LgIM!aY3CLVuVd^aZg!ow9Xvzk=S zXZ-~;7f>Y(0Rol%z7xe)Qkxq*hW_>^W|yWr7HVa54T%6ICvvOLIQo5zwC|}Bzbqi7 zB{fWYeo4B>3cmnpi2R1q2?sS@VjxosYsU!;j_wKEoax060=n zGBct1`8!PRpm{6l=sMOjCT{doZu;2V4u?-SJ5-{&6pkF+uHp^<3u!`#Ce>ug|%%hoqPyB`I(-r+@1KrK=hdzxCd znQXvIj5+JcKKQ)f@o*91S?Ceadf*0#nd?+e_mkK0DyN-len7nbK*46x3RzfVL{g1p zrBSqJ@YkXL#5~)>`G@K7`+o}!uA8#SYnu1!yL~IpGJ;_Kz|8@pJMt8sf(wNBsp@}c z%<zYENe_tXHRJL#{#{$d>R}p}^RNOvZ)~mQ-*OSA(rqQF5~-;~NR)H1OQ6DLdjlM+9!8Ce&|Sw^`i%I4gSsmprMduF4TCps>^yG>GYO|o|ltjVX>^oqeO))&#IW|ILXGU6Sih|7zc+DQK3?6m>!>>=J=qe9FKIde-4D{ZaNy!r zIu6s5p*sTISaU1fMI4$a*`yd@|?jw)-^OJn6+H zsdSxQlM#J=`N=EAx2T`2bXMzoLdUp7YDQfN!uIdaa`sf8pUBJTuxO7nY>OYv@QVLN z_ym*4+f)dAHHzbykz+G*FSO+bF6le^o@2Y-6(3u>TD>du33jz@vboY4GlvCkeZT_2 z&E|V;{)N8iS?K13crD*|h9p!0cTi9xYOS_67ACN7BA))=hqD*q?!LNt+6wUliIkMH zIAXCfp3E}O`GWUeF;{**5_6Jz$bGmTSxr71@kkZm*MBd~7hLMIQ!r5EJhhah^kdqQ zrV2SA&;MflFjz~fHGXghBi1|Knu3jRJ$zj9W-5O;tof$H>d-asid{tmbeL|0faN$O zBpbecJ<2%t-}hJ&<#Je~8D{R2wQ}lT9(nn)qVr-om0Di$+#$3ro?3$U$yE7g=GeAl zxun(oo3=In7&Eo*==yJ52@_C)(OMIrCG#w4&|hXz5^G`7heSAtUME+HN#* zQ|jyKTT5+jZ(3S8uf&TQnxVIY$d)iuTm`Is?hnDl4~PBUZ-z`Q=?ItUhanE+s^M!1 z-L>ET0Y$skt~}Q3Xfw3Q80miPyJRK<-3JbV(guy`v4L3Tx>LqW^UM7|`uMD28|f-d zyEo~Woy^w`@)Z*E<5wrvV7FxK@kuK#FG{wSU4C0^%iv{%J#l&(U3Be=&%+zro2GNJ zOI-@4hZ5F@X6BFkpjNlJ#E6W1m>F&pw>Jd=8@FpN1HTRfseVF(0tL0(L*CqJ+nXN| znXqozUEk2uI&0S7vF8MP)~zVK21S!+Q`aDwT5aO*=bx0bzfdI>@phyp0iV_t`C_;=7ngs<5B)WiN2%sV*gd`${UPLqzB#BB2y#*4AKnNW~ z-&c1PclZ5XJ?EYC-qXM0_k?`!ojZ5#%x69`S4FfYL)cR+)v%u45+&K_Ze^s+bqaeB z_Jk)J&(K#U>u#V>8{b5l4aQad2j#yvWSt$H(|HEG>CKZ7DJ`iSY^iWA!F&?NP>bc< zp~$EIE8}WKDMv%+Q9gUIJ~3|9r+i1GsEqD4&&2syU!uh&J9w2tu@j2bQuaX@p7?Qe z?Z@7-p^T71`YEu{*Hh~+d+_nSFqu9cV%l+sftqd2jnJv17dk*rw8UT zPe;|GrJ*vrqk{gMouEjSo5+KrY@7 z^TK{sb+yL$9yH9634wdW1?D@CDX`gXdZ7ukQGk>`wcKh#CXt`;TJLK-mXh`{1o&Ng zx4|#ttp`{W?+Z=0g-6t`{jjU%-?ENtKbr&bU*+=2^5$yRAQ5X~Ge<%qTDH-B7v6B6dp*5y-S>Ceonp$x(LtPc;0Fnzz~;hSyuhaf1Re%WTo-LnPM)Z6zOrH&JHnv5{~ zQ32^geR!fwB=gAN*08aSc*3{rlvN)?uu%UhI6>?peuA0NcZ95wmDjhp1tO{RTC7Sj zo}A)EbFpwO>O?A4mop#DSWqeem{&lXtUuvObq5$cRFfMju<=|rS1__u$^L_Qcc6~Z zvfaP~zE*@1(AVK_`47c-$H^%PwZ^$|k1g^iuQY(X=0W>2*x`la_fJU&f9?|0>p2X5 z5nno1Q&`6+ma5IBOxFnip&tO+_^qSi4(XJ1ec|9e$vtUHSm>m9%VmhZuZ&}9 zYn!brZRL1~pT{E7SA0HccX}Sr*6ynp3?LoVQ+S!^`m&*N9eMeL81mWa!M;@2Gs9Zq z8tD;T=y3VYqxmA>2TnsM{wrlzovq}FJ?4v42IJ!g9p|`WP{vo3`;+RXlnGv5o5*08 z|7oepPWNXXdA=>$mTC7)pIotC0HfufXuLK3JcS&NRF; z1A0GMSXb{6=QBAna{imBbL(ev$rtY>7)*)vyP?Y3=YzrP#;#Hr5{#!jCrSMDD1Uvune9z(R$C!=dQHs41W`-Y0Tm1^|i>cnnFCa`<6 zKr+wVXa^~HJxiNm^~$XXu5OheCv{uG;Rt6lb33qg!)F@Dn0&=gM71T!XYW7C^rW<1jnIK1{=Hnz=xA!PiY8b{ zQiv$n;t>an@!X#{w7&YUUj-1Hq3TTS7vR%Rx?#mqT92jJ^4i-nxAjgsn0yK&76(2% z$Fq@*AVM_rKDPH_nC_VpO*IQ=M3Wmt?7W*5CA}QF?)hgEkgL*r7B1=EUng&m_)I7N zjO^6ZH>VX}D|p;{T)~GQSlD*rNjy>NxAnK-_jp;8Q|2aSlAXSL!6pHb-)6bG$^MMH z6Jk-#8L!Fy(kMSn#@Vsxn$uvniNJMsF)t7{Hy-bD5WZc4zcRYNqwR8_c={fN5z}(y zJUn{9{^O0%sbB4%Pd=YN`WNW@IpuQgW9)E%fR8f`4kQfhl<3E-pMu8lB7Ehok6~Pd z`UhI=Am|z+z5eNfBN@II$IWkfG6qN^>F?l7A#w5Oq+y}L?4m;7;Rr?7;7-Cq?xY4! zVPq~Zb`UVZVMOtUby;`50c6I-RrbxEeH*D9G?!LrA>?x{usbN*4dx;p_=3k$Z(K}2 zk%Wcwo7ju`ZEu==0_W=MRFuWc+AZk&} zoDJ|iLg-Dcpi}^c_k9J>oBdhpGbvx0;Xr0F@ViS}y}qT}tT?{^T9?XiTF)#~{xLdz zVPN6yw+FzDuwuwi*0C4PNzyWLxm|@b`SMx&%oYW;n9eEvZ*oND{QY^s^0bG}&?lt21Rncb#l+20a%)-Ij9}dE}Z})dY^=yNK%sD}Cq+ ze+}t>e|FW`^-`S6kx&DC=N3ng;)OE*7aICP?|b2?dQaJ16$KIc4~TLd_z_k_@;`^?>fAI*m<>DRLPE`nnD zd&_yJ5YzdGd~CwfwL3}stp-7oju;l8^b^)(7x^tG>xupw$LD)&e<$8=hjUJ^@g#-T zD%2NSsDc}}F~(|;(Y@yvX9f04bvhi(gXXf%bwOYAW0g~;42nW`OeX_Z4E4>_b=#@K z5M86)ZGej<1r667(&+?bb;<@Rp^CNAf+g4eqSRx`wCU+sH>Z}~@jqP1XJ{^45t@~^RXg&AaSd|2bv0H&%Xe}e zg4JxU&j;?YBVFL%3zFpTbZpT}D{pfb&nXRb@0Uv!U?YS1@F0;&LXiwEjh{)caW z*ab8|xde_#F)Z^@cKd;9(LFqpueI`DLM^i{dxc$|k{_{Jlt>JIYa}dScXezw`$p7=su>}_d8n~sj30?M_1ik9)^4Z0^ zWkRQ&?~|c7q<49+U*xK*rHCvUo2ahsGAH++X?!yc^{ZB%+`G}fOINlYPSe3)w%4!n z#uzm3n7K=huA6(Dwa6E=wRaT%ajjd-+%p5-t%3U}4Mls11ikjo8XJR@}4>*D&A2s?yFNivyd~xYWCh);M3wviE935sQ?sg_Nj$uW-UlDgYxmw#4`a-7%B9axcpdZ z_R5iqZomIaliWP^d&Ko8y0D-d>yyK6UWnaCkN7CP5dZF$$(*{v1RdB(po;^e$h*{0 ztxY3lYwec$i6c61{~d8neK_s0Wh8#^8zr4FaBj5$HtM62${$FNw^c~a_7v1~;iBQF z_pP`EVJNqO;kx^Y{DZHs>y%YUWPObwX?ipig)cZT zAlpQG$~U`QQJ`C#!kl`YZF68R7?*PD;fKcd5Jg`JP;57sf9Io=qGhIxsPJpPianL2 zMp5owylVae)q`n$-|tP*lXAQ%Zp0oPHUV#67ZYDv?5;s--d<&?F^E^ zhn#)tsy0NA?$0NI^p3RJ$e1=BSPh#zRDNz+rVRw0e>71ob6%T$QS`!7d{W%wK}iAG zhnV<}5Ln0}BYCcWCl_G$CBOp-S9^X8 zgSNk2{}^vbx2kz(NcYX1k9=Vo{@Sr01JbwpJIm+4Kn-4aJ~6z_@b%^ajlw_db&9v& zO%b`(CS7@~W}T6Ip-{Nb$C#T7;Q%gu!TcuNLc6Q5x>f+CAKzLVSP&L}moaTw4Whhr znB0kqWm;r?sM|46N{Oh%oVv_D{0*zSy)yFaz9Il`18-N-G zu~~gPQ*U>)-8bG?SJ(6bCnRe`2YEAepAGY_&;L@TlhXU_;)#%E0DwN6*QTEU8a8%hhDUJFC%P6*Sc3ci4!5^1nR+^2wpFJs zYoU&>F4#`y%?TQ&h3S0$#~VK=No5u}&9qip?*0kgZ9ad|7pxRJt;c7+`I~41i4?vw zlH~KT+w5b8TA-NlnDbdC_775k%e7S)rFL}zmgF~n9k7hz(e=^jC63a5_Rie}SU>AL z1r9ev+Z)v$MHekK;*xMN^B*6)Ipp6w*);z*JJN08EmFz6=A)ys)vDwW`#F6~VeF3^ zUf9FEe%!ujpXUkeW;ynoQ~6-)=9<<1cxr%BcPhx%i9^tX23*M$FFN%U8C$Zg8n_@q zm3?`g+%?AHJ(&viTJ(#Y`jMXzG5408bZtA%LHEJ1dhXdq zivf_EHWh3HtJPVLpUH zqFSk;(qYFVGa*@jGgYm9Bd38D#@$mBUbWFt&+;1LX!N~+nNCDr^y3|?`(Gbb=wV8o z`3dPRm+o#6D_MQufCTz-f(#QIJm8fL&p3+$qgJwC>X3ui4!YaAFI0^TFi^169q0r6(BJEVA=lrD;P5R&Ev0ppP8a z5fkL#8th;a*3X#}tHsWjjrBL@ss;;FJX5!l{YY z+UaJ2eNCSssiB4W_OCLbs3{yJ_TAh?hG}?5wZ-m?{q0ZtEhfX9YL#Pn9-X4+h7`_8 zM&hz!9Y2ANdO(>o7w$t|U#oOb2+B3F$YLmkPRj+Z_hH!Y7}6I_Bt&A9?lWr{50>%t|uzTDH?J zLQGMn7X`A9xa{rqw_H(88KnpFIWJAInLdM>zF^oj304q9SO8<_FJ-p1G`bsX)7s*0L01mfUdBbYMi`AKeyXcW`Yt zQ4UX(0ukG_1>l#zE9Rx#bIpW+Yu-|T_!%B}NYRi_>R-L~SMmgq@KmPl65Ts(+QLyk zA!orXYw-E)Oy|^$(-lg)@(#L!-p!0BbsE-9HA;y0*)h zQls9Jv+rSv77@5K$U8(SOVZ~n$Hpujq0_=^LOKtSft7py(}!B`~^|*)X7afrbXe`)2UkXvSk=+E6sCKm8b;1eeeriFB5UDu6I;ezNM!2n^hcJ*jr_pBq>y;p5(uA7MQBC zK@YJE=Rwmuns-$9j5NQG>>&$=Z>;pu+>faIvkeFlqIo?}GQF%|NwaD1!@T0WR*zJ1 zKwYn+v|Y4qt$7eLjH#FxI(_g5ozz`4@54br$u$r>c-m94iS#I&Oz>6Ddih9pPGipD z24jZWKgD7T0Q=@I{~h1G>CWDHu;n%E8>=S&z~Gl70VxICy*bD8?P@VE+?l(qAirZq z0`qOl;g-0g{`GOjD4cS6tk?NI55X|gKT#lf1KW@0dUXOE!Lj-f1tN zoPpx5h<>>xAX&cdf@%48=&HH`aXRuHL&LST&=23tc=%{{2-!IEk1GtvBz%1$Xa_>d z|4vL%G}mYtl6L^xC%>;my{|TASP2&;8etT7i{DP%4~ZU%=wpWTDTGC{1QiVxZ^^r@ z=tyeJ)LgZdSxi;War>R)kWkshCs>#l0381S5pNS-E~H<5t$Ij?G)*JCX)tsoFv7tX8WZk`j4>db-uLF+x6A)ArGWVi@#ov8u*$my z`1YrT?xJ(81_76gbh>m!k9+H&PvEm<($+Xs;N(ImLeTK!rvQ+ofE^sv8#CcgH)V7i z&~KTHU?BXM;ck0x#T(MNAxJ*=sTZbJ8L+aDCh!!Y1qhLayw<`?6KgVR7$qUo3i~d& zslJ#{aefA=u~d+Mc32B-W8Hwf-WT5y1JK<=UquX?_?Tids{I4QD4X=kAS~=}EVgo& z_y<9!8uipqI!R|@NpHGxPlByQh|Z^Cb!ymcQ(F=N8Q<5hxLb`#CzmUsyN)N)5Dy;I zXtzH+1hzNbB!h^oFPEC!+k*EGyjiL9Y_Bu*N5%On>gf^Juc%S9B5puOP7- z=sV6XD7DvcQ7>2c+}+NIb678$1BC{zQzLV9HTF<7uFHqbz5zOECHXy;G7%j}_T7X$ zV{CwOYoocccdd#z>7*d@#~y+J`MhWs;fWLDVMJQcpW@aKQ$ zh!5!8jNhgkf(0#40PG#$-nN|<8UW)Sb0VS*pc~~W%~jQOvVGmePOUxu^>b%!P@>0m z&0+8+Ps|f~KG%yQIHvAMZL`)2Y}rSLravA#n&dEywTE+@!wh5>3VJ(3oWe$EAMH-A z?5%09@K)nx>JI9%b>zc#iC#tfz11Mwg8%qUtgg8hr6lXbWV!T(bMPk0MAZSQq}Ic3 z!I^RS z2*NI|_?4jb*ns1tW=lhxUe>$8H3_dS`v1|yzrGyX`%C}1E%hjMDO_{gy4gPneX;k9l@a6kl5dr@&;boAIUDA>Cd-|s2Y+kE4jlOZjbnF9= zfpll+*GTkLGmt?NPJU%b5{bhmlT(iA&6^_it*@bz)I(kiwFc0z!ZV56><;G$kN%#t zQkO`6i8q9e$j&L2>#`@Nv@XUpOqWU8dtz+jpe;SfMr(|XwH`P0W2duHnMOqJNo!(r zmOi;-+*ku5KTA?z@BEca(DBVcHOU0sf=BGV^!N>_IW{rd0H7jeTF4Udw(jXUtnenC z^hyJGC(cQc;aUJjjnH?U6V@pu>h*+XPXD5{LqU6ejn=HkMx=k5}V~(C zYQtF*8%`10yvS}n;r()|0YT)RASeK~Z*HU&uF_myX>M8pp#`Rv zSAs~aoTsaNix$WQ-jh>-5u34kIO#f#5(D}4VW}>BNWC>_3o_=;+tLGpj-Kg$qerFR z>+i6Ct#?q&5!ukz{}$6g+fWiFlRK!GEdv=Cz)1aah^v6AK`_tlVLx^AnKU}e$?J$0 z_LrNbHB~;{Q9hZL=#V~xYBH7)CKvZQ(%O94mgZp3yZh%%3+`H9JWhA6c^}Kk7*RN1 zk4#oG8D%RV47^RPlqCnGw4q*3&9g_TsV-jx=q?(JPTJ`ar#>EoG7m%gmJ6Ra#jO~5XRq>t|*|Gp3_EmCoF&p!G9c6 z*R7FYi+U~6+&Z5g6;y|eF7X9g1N$tD%ust}k==-%yz+=kuN3NFS$bU6CtkOavVj9i zHg8PmIsce8>5hMo78OI-H4Ga=n_(z2s*~E{gJV$D-HBV|7|u#AB=jXu($aL`6h*7? z-F|F1c~_z%yq{ml=!`mvvVP z(i*c7G&=_*tu`~M3w`iz%*zQ1__8h{unhll{*%xN2~Yn68*a{%YOa7(T>nH<{&WN0 zhyv}RXObhQD)z@4ryihHft1h$-fA*TkyJID$cKgjUd$3x(Z7Dx$tJw6a%p+Mwqo2+ zQ6a=~A2S2&nvo5-fyb~Y2*zd7=-uyo>1B(?!kR$Wx-!UQRAR{YlP>wH4sCy{OvfSM9ot{AOfRWT~A#`zer(v;gV5N5iXxp`%oI zs#^vydsckgliG4YV8p#d4?Q8!GoCtbmdki7lxxtxgLd}R#TY=>361U~yMb}_n^waU zE2;3YSEpnX)Aec@RU34ns=keKFPyTn5iAvp>i63t+2rfTmGTdKO6W3-t-Q5-Dy!qY zP;!W7(7w{K2&SOTm!U~D7JzXCX5j3%on-3%8K6Cq|HMxM zZ0M#?Z8a)FDM4`^T`t_0(Q${VDQD;(i^ItJMLbaxrv+?+7eJEN4G-kKjtX|!IQct} z*({ba;X#HyC)!5A{rGx^bl%N!Gy|!`|6uHj;qIhj`T?hYG2I04K7mRFxY=0beAGmJQ0oDn~Z?e`A%KNSo`kO)oNRP;xNl-2)Cf=2yB3}A<`T2cV1i2Gr{Z4&rvp<|EkKwz%x?i%{bywm1H z6wpGRCW8oW0)l(q2}$5^QE<<K&buY}+qwBBT4RM4~<}hie?#{~+-$)vtg?!ILcc zg!;OmGfI9{VWvQiZpczSr@%hR;~4?Lol+lP;j1+sD%ay!&Y;;E4@4vulTK0au+nk_ zT-Idiwydw$Ch+*)Ui9KFoPW==`kbfXYVR|bgF#kxS9QGik!nkz+ybN453 z7HlX4APM9?jKOrzKD8GsmIvsrVAg0t&kmy=;!8iq5_yeyrz8p_AucM&UwUl)ot51g z;;7{q+;}!}7If~nX%Ju&5ekVduA(PyOr$cWBQ44RTG-Iv%87eL*uF~P%5N%28Sp#k z^JAfNpXXowv$x?(<6KWIq&2ugsIX~_mH{qP^V$#{{%oB1m61BwG17@_p>dGEAkP$p z?)5vG(d+}J#yFthW3A#xs>%K^8Eur)#xvVXE)<$_9!+kJd(oR+1&3Kdp6!wJJOkUq0F-e5)j-HzGAMg6I5!Qp4Mm$!PeG|+_z;brDq zMaVYQ0fKJUS_F1$PM|AWAwx(UJIaI&ud{VYNfRc+oDY{G-ZT zki}Fd|9hN{B`*^o{VWm?TYN>|-;^U?B01Byz*qCk z*3=-HdOhJ)J`kjd31`uxld33h4 zXxQj9Ky^KF%TB;+5v!!ZnKAESMDN{@JOieHBM7p<7key%>MNB^wk8{Qc}~7w$zA~o zQ5`Nn6}O}6rXkoYTaUIMdZ|lna<$u~snTci(4gDf{O7RrVXYSqx%1^4M3XsT26KMq zsxu0#h*6kzi#~U8ALiz^#1#JvYiUD92A(b==i=KIUF60LR9Z&hzIOL%`^N<(EWxARoFTWJFW0ryax0B9!NVu~6e0Bz zqvyy5%UAVWsUlA7%pfZEZJCPiws=qR>zc+}^?{7tk>btBo6oK!ltwSqRJ$5~2{z7- zO~0#fjPmxvBR8`Zke`fx(U{rfv}KrG5T0@L29wAw@qRX+Kg_--7|c@sV`i1lEQ~><-dO69>+t(+^>;_BCykI&q{}NnNJmM z2fVON&9kGlw_gIMY!ua>pVT(;GZ3{ZrAurG!ro4*Bz2~2*bH&e{{q2q+q744-H$#m z(?MBp?zZhxvIv<|d_Jxz^rm8dq1^w6!7qpJzxJVKRNqo19AnOm=tXn26SPtK4P$Sh-3qXqJOF*&q=@}npZZltJ`ZLGj zV^Ci+|7*X8a|o{>>R|s(2r}Jq1bUjX5BI2Oq{>Mu@QIzZ^k_`rbEQ*To9PK=^u{#_ zrJ<47f2?MOc)@KXQdsd0P1`h#jS4y?y)_ZeLsd{B?MPU|yhn=tUX2$-MQz*14D*~+ zwWNNmx5Ctq7lZ~~WxrG`bAI*h_|7p&CdfxUL4-^=HFbI0Rccp#;8tXoYe#vL<0=F` z>1)qPZ#5Cw#$Iozw8zkOMyz)v{J~;0K1C~?AS%$YQ6UkpN|M{w%46%3J+L6DznhyQfO189p8l=z?~3o zFNq@@*(rv)raxN}IyatpaqRe!YywiP4P<(23&b2DCTzWKHHxn1SWEt&Eg94ZypX^>h+!b+K?hk>pZu(LU^;GVm%(MFe= z_NYW-?O5_AnMH05jFdEaWD0MXsElzt5 zeZd6Yl>e{%?5pF_GiI=xQ3sh-jC{x(Z7UW)jcJX0Y9)O}PDX3DNKJd?y3h9IZ0@V* zEArm$$Onj9tJZ`JOf5x{&wdHn20Nx{)s%IV*@F9l5pwNms*%m&J{Ou2ry#zv0*ZVYF7>dTK0J3T9~a6^M4W zYk5sDbO}nq%iCz8?FoLiRZ-{Cy*hwmH$8EFIcnM?P7~4_!RtxkyU(MzJzEnRvJv{vt6HtZ-PWm_jTsc#jqrW&P=@l6BpO6)j`qr=bo-U0$ssAcfDb9 zBXG?zFWSev1$i?ahdY7l?HwKP*i3U>F75>CEboAg21cVYKg;ovOa<13b!s$0-EiLBB83c@opJX0UAJ@snO>sQObK7=lb*T z?9Un@Nlbl4b7QQxU^T3s8a6UkZ?S&qAEQ9@-d_JMN`x1Zjp2->)|{9B!U&#$_2&fWJu*_ zZcVaBoW?DBku1e6t*7?{VvX%XVh|7UFvGccsO8d(?EI73`CUSrMUz}=jQGP&S6@C# zJ0Tr+ng1A0cShpydf|-|Jh7re6{kEf!Ui43#Lk-n)y+kms$mlNRB&hL^@x-z25|S0 z;@o@U{YYeBE~F}I~hGt%hnhm~xtv_WlXgjjsV zQaV0;ss5<3Y3V;zQacXpUe`|lBu*55g49v5$bmkgzgd29i1)PY|A7}PAyHc#QU-f%nfd?JrAI@NCpy>B0s>&Pl#Vn*S5eK3 zS{55@6hTClR-+ktRX_&KUSPQxP$+zdc*J5rJitEI!JSeCnHN(Hb0P1V;_i*jmH3<6 z)uMon=8PI1aHAPxJ<)K$8}}Inr8gdF^~|=qe(&J}>d&V!&OSA%e@2}R7Ni`m`8f>H zU-C-^oT)>Urk`$7bf2_ebK8#oIM#!m_S; zY|Ck&t;4nw{NsMCJa4#^lHY8tLt8)cUIZmD0bl-x|6z`&#qtT}6MB=cGWmK23rWbp zZ059v3h%z6^?CujkxPi2cOhf5T{~N5!0~RC^jwKk3D4bfoweqI=B3+K0|Q4j%B@5I z=s17-YoahfF_JHL87nz{`NW<)$PQwd!(nC!3uQYs*6DDRjLar$s)-le52Rg=|rVvd5KZsBP*4`Y?Do8ZYMmgBkv6}N9X zA+aMF<3M{X3~+r#iN7{K{?n?wZd(gj7;n^viW15H?Rkpx(Op{*P4`{Vh_US1GRmHZ z9K0Y>WFpz@`)|7V2ZbPw2l(u-CR}lkjjNhe5s*m0szo731~BA; zNY-!DL4T0P3wO5H(8Wmd1$3-7xQ|ZP?E7^x_{nXj(rI^iM5;f`y7sJTE@ecW`k1p& zLC>8HrR2SEJ0@tZ5MU?N+1V)pq;h?$224st)+9N~Gf!0$!YDA$xIPOi%+*5drp*Z4 z(+MW>g#B z--783@TWZyOO}1aV&_UEW z&2N@vV8Q9%%W{gwBb@X>Qd2A3{E~WIGHr8XoLNP==oelHnTc=naG&~e48D0I{y7l448}DDxwolw zw4Tk`d0poumgy*hoN|u2T}p*u%Vi+xp)buv6%quS44Ev~0yk3e1wvMDPjVQcKyR$b z=;C72$CBy&J z53~S4y7JW=sK(q16jB+f-q62OtJ|8HU(T6|pHxM6|4W8w?tNYxDK&ZQC2$|l=H=_h zON#Cv7{{L5aUA9R)%86doSh;Kv>G8L%(Hl>F_TXaJHJwSjZ$RIT82v$ zMKD=zV8}-`RNnH0z{lLIldk1pL;vKVG{nCX;Sn|rmsSV0)19gK;&J^QU|)V`NI`3u zvnk!9%9EA~+P)rozuQ~SP}AIMP3@2IWh)Us)%_Yx+v$c#8h60Vdq^MY(aq_XN)EW} zx!WHG&xgy7z5!{m(Qh*^z6B`@_QIhBr?Q=?coV*q=04_&{6oMc5_x?Y$Y!qRzyYu) zF>2%NB!m-<0LNok_3B;`5sMLBmyrjT=Q)OEt0G@VdxzH&g|d{$L|}zH1%+$r5za{` zoqfRJ!Bj0J+HL2D!?av6iwWxY?lo=#{qlJlm@6dK5ZomwN9y28`@wL2Q&J<_v2Og* zF3JV`!M#8Fx^-HUClobx%t^=BLvvF)j)eht?!=YiM7ICX3!hn_w3*3A&!!`MdJ9ks z?k-1(7Pp>mTj&I%vLz^IapkeATK7(EX?n(YDRn|dCp5ldBivWwH1-x(8E+D8M` z-%zt154I?{EE~`E!4t*y&8bqNwqo;PGnaAq0kHo-#*vE&nd5b#iYVZ$5au+|?w4mC zaZiUb?sp2kHb7qzN!ZHnl3BDaj<6CpRHm7(SR*{s;e?vWz(xJaJslDS8!a!GkM0ly zymqNIesSq{E@UD4r<`Jxn~IQfdvdS0mDk-W+vl2j(aD;}E3arIH#a{FwxMmmYfMtNKlNRIQ1y1+XsL{ zOcczU=3!YV(^*E+GYyoSbHrmu7#kvp|kBXHQK^kupV7Sx0A*7d#(GHhYsNsA3 zE!dGZt6K%Ab9u0M;~hugIdV!aqF+-$RR?&RWc@(?iHZ@0Gk19zjDatrrGj53N3hTU zcv_WYwxiaNz42cg7BxTu;$5Jvi&8zl>)!8=gmc&hq9qGFzIY7lU9Ya| z!eL2cpQjc{Hl{c|E@5$9f~$EylrS`nqN&9j5D(i?Kphed_p^)Won78Wv~~&IS{GWt z@=ymyTq{)6e6Ii3vg?seaIZ`pGT$P)&Kbqt`gg;b2O<8G4X3t|)t`K!tjCup*%^rR zy%*Uzu6r4H=5rLD62UlR%_Ok^ZJuhLeI!)aC?nyfq0`f_L%KQGiC-YH5kjY)i%zf% zL2Lm;Es+mSa(m=Egs@|u)Z<2V+&iTOL)(4K)s>$~A2uC(PxEgQ+;(a=yaYrUR^GsP*Cw;j5Mqu3N=e%s0I@I*4+qzgVEeXg|VdQSN@HR^&Uf&xC* zZ^QRuHR<1pA;9X&CIfHY_QlzvIvEuV2gQ-{cA86eYD^I2w%QvLH*{v(N7a#ohziNf zoY(sgRa?KADDpDUvz=D)vm=0m?Bjz>!g7(?{-^M-%vi)@9T-ZgWTYs7vgkf5R{%E+ zuPpHle>Y;RW&a?YG7JSHZ!S5`5-To-Cq4w?zNVa~`^{rykgPhfif@v~tTEaKr-4pX zYal+=yR%Xz1wX7X3u8i#S5X40E-hKRp(3@@iCqGz{};Y|`e*7l2i}=ad0TUqFj-p7 z({0(AMx{RpJZIqT)@6BW4Jwxai0bYzjHzowL?IK6*C=sZeLf}Ue3>Wf=k$dNHUUZZ z0}+&(^a3n|5uc0b%Cu!3ua8qHHB-)7%!K`Rh_k>sI}jC`Ngz2rrKW^(D+Dkz3|Cj= zIsT|i@PVF+YJ}?^fX8ifwdHC=;Pb)lNib=9@0`n7oah9GoI%JD;%pf}K~{SHleDdm z;c#e?I{z@(YXZv8G)dY5#PIWoDrj*a){oe`-d-)I0d9S_C$>4{q+_kHO}ed zl1nY)lM?;3P&M}<+V^XLM|}CnH9(0x-S!}WT33)D@Loo-RQ|-%Z2-gzs{!e-3k&Ns zKILwN6g2|?2=Y$NpDwHmNG!g1js?0$UuB>F<@G?;_Y)+WR+-syLiz#PXs z)doRc0Leh_Kkn5XAWcY`j92_8(NTOlBjrLxY^nekM5texv9Y+D_xO1JhYXi@&T6yvR9=W@Zw3tg=Zq%q?TwDJ z13?1J7K-e*vQ7Fd+kyXSG|AwrfBH*@WrUJ}N}Vy6y_1vWnrq%ajPJq>9BG(9a2)`m zR>9DEV0%C4_lWcI_dR)y>*YqwUp_tiK}C7RH}tG8&o`}@U-A2YpPyOra_x#`AMxv_Rll39_{Mkq@?2qs zC2nH5B4h(!R;*h2 z<>U7+ev!m?@qAZ`zjfjJp82vVzFUg#hV*Y;_--G++sE(r@v{WJZ-wt$;rmwjz7;-6 z;Cm?b_fA;x-E00LiSJ7B-E00^7rwg+-(7{jRpGmR{B9rrmcak-QP<5gUg44(3I32z P0y%up?f~VFW0(FHs5h;T diff --git a/piet-gpu/shader/kernel4.comp b/piet-gpu/shader/kernel4.comp deleted file mode 100644 index 09d0448..0000000 --- a/piet-gpu/shader/kernel4.comp +++ /dev/null @@ -1,301 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// This is "kernel 4" in a 4-kernel pipeline. It renders the commands -// in the per-tile command list to an image. - -// Right now, this kernel stores the image in a buffer, but a better -// plan is to use a texture. This is because of limited support. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -// We can do rendering either in sRGB colorspace (for compatibility) -// or in a linear colorspace, with conversions to sRGB (which will give -// higher quality antialiasing among other things). -#define DO_SRGB_CONVERSION 0 - -// TODO: the binding of the main buffer can be readonly -#include "mem.h" -#include "setup.h" - -#define CHUNK_X 2 -#define CHUNK_Y 4 -#define CHUNK (CHUNK_X * CHUNK_Y) -#define CHUNK_DX (TILE_WIDTH_PX / CHUNK_X) -#define CHUNK_DY (TILE_HEIGHT_PX / CHUNK_Y) -layout(local_size_x = CHUNK_DX, local_size_y = CHUNK_DY) in; - -layout(binding = 1) restrict readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) buffer BlendBuf { - uint blend_mem[]; -}; - -#ifdef GRAY -layout(r8, binding = 3) uniform restrict writeonly image2D image; -#else -layout(rgba8, binding = 3) uniform restrict writeonly image2D image; -#endif - -layout(rgba8, binding = 4) uniform restrict readonly image2D image_atlas; - -layout(rgba8, binding = 5) uniform restrict readonly image2D gradients; - -#include "ptcl.h" -#include "tile.h" -#include "blend.h" - -#define MAX_BLEND_STACK 128 -mediump vec3 tosRGB(mediump vec3 rgb) { -#if DO_SRGB_CONVERSION - bvec3 cutoff = greaterThanEqual(rgb, vec3(0.0031308)); - mediump vec3 below = vec3(12.92) * rgb; - mediump vec3 above = vec3(1.055) * pow(rgb, vec3(0.41666)) - vec3(0.055); - return mix(below, above, cutoff); -#else - return rgb; -#endif -} - -mediump vec3 fromsRGB(mediump vec3 srgb) { -#if DO_SRGB_CONVERSION - // Formula from EXT_sRGB. - bvec3 cutoff = greaterThanEqual(srgb, vec3(0.04045)); - mediump vec3 below = srgb / vec3(12.92); - mediump vec3 above = pow((srgb + vec3(0.055)) / vec3(1.055), vec3(2.4)); - return mix(below, above, cutoff); -#else - return srgb; -#endif -} - -// unpacksRGB unpacks a color in the sRGB color space to a vec4 in the linear color -// space. -mediump vec4 unpacksRGB(uint srgba) { - mediump vec4 color = unpackUnorm4x8(srgba).wzyx; - return vec4(fromsRGB(color.rgb), color.a); -} - -// packsRGB packs a color in the linear color space into its 8-bit sRGB equivalent. -uint packsRGB(mediump vec4 rgba) { - rgba = vec4(tosRGB(rgba.rgb), rgba.a); - return packUnorm4x8(rgba.wzyx); -} - -uvec2 chunk_offset(uint i) { - return uvec2(i % CHUNK_X * CHUNK_DX, i / CHUNK_X * CHUNK_DY); -} - -mediump vec4[CHUNK] fillImage(uvec2 xy, CmdImage cmd_img) { - mediump vec4 rgba[CHUNK]; - for (uint i = 0; i < CHUNK; i++) { - ivec2 uv = ivec2(xy + chunk_offset(i)) + cmd_img.offset; - mediump vec4 fg_rgba; - fg_rgba = imageLoad(image_atlas, uv); - fg_rgba.rgb = fromsRGB(fg_rgba.rgb); - rgba[i] = fg_rgba; - } - return rgba; -} - -void main() { - uint tile_ix = gl_WorkGroupID.y * conf.width_in_tiles + gl_WorkGroupID.x; - Alloc cmd_alloc = slice_mem(conf.ptcl_alloc, tile_ix * PTCL_INITIAL_ALLOC, PTCL_INITIAL_ALLOC); - CmdRef cmd_ref = CmdRef(cmd_alloc.offset); - - uint blend_offset = memory[cmd_ref.offset >> 2]; - cmd_ref.offset += 4; - - uvec2 xy_uint = uvec2(gl_LocalInvocationID.x + TILE_WIDTH_PX * gl_WorkGroupID.x, - gl_LocalInvocationID.y + TILE_HEIGHT_PX * gl_WorkGroupID.y); - vec2 xy = vec2(xy_uint); - mediump vec4 rgba[CHUNK]; - uint blend_stack[BLEND_STACK_SPLIT][CHUNK]; - for (uint i = 0; i < CHUNK; i++) { - rgba[i] = vec4(0.0); - } - - mediump float area[CHUNK]; - uint clip_depth = 0; - // Previously we would early-out if there was a memory failure, so we wouldn't try to read corrupt - // tiles. But now we assume this is checked CPU-side before launching fine rasterization. - while (true) { - uint tag = Cmd_tag(cmd_alloc, cmd_ref).tag; - if (tag == Cmd_End) { - break; - } - switch (tag) { - case Cmd_Stroke: - // Calculate distance field from all the line segments in this tile. - CmdStroke stroke = Cmd_Stroke_read(cmd_alloc, cmd_ref); - mediump float df[CHUNK]; - for (uint k = 0; k < CHUNK; k++) - df[k] = 1e9; - TileSegRef tile_seg_ref = TileSegRef(stroke.tile_ref); - do { - TileSeg seg = TileSeg_read(new_alloc(tile_seg_ref.offset, TileSeg_size, true), tile_seg_ref); - vec2 line_vec = seg.vector; - for (uint k = 0; k < CHUNK; k++) { - vec2 dpos = xy + vec2(0.5, 0.5) - seg.origin; - dpos += vec2(chunk_offset(k)); - float t = clamp(dot(line_vec, dpos) / dot(line_vec, line_vec), 0.0, 1.0); - df[k] = min(df[k], length(line_vec * t - dpos)); - } - tile_seg_ref = seg.next; - } while (tile_seg_ref.offset != 0); - for (uint k = 0; k < CHUNK; k++) { - area[k] = clamp(stroke.half_width + 0.5 - df[k], 0.0, 1.0); - } - cmd_ref.offset += 4 + CmdStroke_size; - break; - case Cmd_Fill: - CmdFill fill = Cmd_Fill_read(cmd_alloc, cmd_ref); - for (uint k = 0; k < CHUNK; k++) - area[k] = float(fill.backdrop); - tile_seg_ref = TileSegRef(fill.tile_ref); - // Calculate coverage based on backdrop + coverage of each line segment - do { - TileSeg seg = TileSeg_read(new_alloc(tile_seg_ref.offset, TileSeg_size, true), tile_seg_ref); - for (uint k = 0; k < CHUNK; k++) { - vec2 my_xy = xy + vec2(chunk_offset(k)); - vec2 start = seg.origin - my_xy; - vec2 end = start + seg.vector; - vec2 window = clamp(vec2(start.y, end.y), 0.0, 1.0); - if (window.x != window.y) { - vec2 t = (window - start.y) / seg.vector.y; - vec2 xs = vec2(mix(start.x, end.x, t.x), mix(start.x, end.x, t.y)); - float xmin = min(min(xs.x, xs.y), 1.0) - 1e-6; - float xmax = max(xs.x, xs.y); - float b = min(xmax, 1.0); - float c = max(b, 0.0); - float d = max(xmin, 0.0); - float a = (b + 0.5 * (d * d - c * c) - xmin) / (xmax - xmin); - area[k] += a * (window.x - window.y); - } - area[k] += sign(seg.vector.x) * clamp(my_xy.y - seg.y_edge + 1.0, 0.0, 1.0); - } - tile_seg_ref = seg.next; - } while (tile_seg_ref.offset != 0); - for (uint k = 0; k < CHUNK; k++) { - area[k] = min(abs(area[k]), 1.0); - } - cmd_ref.offset += 4 + CmdFill_size; - break; - case Cmd_Solid: - for (uint k = 0; k < CHUNK; k++) { - area[k] = 1.0; - } - cmd_ref.offset += 4; - break; - case Cmd_Alpha: - CmdAlpha alpha = Cmd_Alpha_read(cmd_alloc, cmd_ref); - for (uint k = 0; k < CHUNK; k++) { - area[k] = alpha.alpha; - } - cmd_ref.offset += 4 + CmdAlpha_size; - break; - case Cmd_Color: - CmdColor color = Cmd_Color_read(cmd_alloc, cmd_ref); - mediump vec4 fg = unpacksRGB(color.rgba_color); - for (uint k = 0; k < CHUNK; k++) { - mediump vec4 fg_k = fg * area[k]; - rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k; - } - cmd_ref.offset += 4 + CmdColor_size; - break; - case Cmd_LinGrad: - CmdLinGrad lin = Cmd_LinGrad_read(cmd_alloc, cmd_ref); - float d = lin.line_x * float(xy.x) + lin.line_y * float(xy.y) + lin.line_c; - for (uint k = 0; k < CHUNK; k++) { - vec2 chunk_xy = vec2(chunk_offset(k)); - float my_d = d + lin.line_x * chunk_xy.x + lin.line_y * chunk_xy.y; - int x = int(round(clamp(my_d, 0.0, 1.0) * float(GRADIENT_WIDTH - 1))); - mediump vec4 fg_rgba = imageLoad(gradients, ivec2(x, int(lin.index))); - fg_rgba.rgb = fromsRGB(fg_rgba.rgb); - mediump vec4 fg_k = fg_rgba * area[k]; - rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k; - } - cmd_ref.offset += 4 + CmdLinGrad_size; - break; - case Cmd_RadGrad: - CmdRadGrad rad = Cmd_RadGrad_read(cmd_alloc, cmd_ref); - for (uint k = 0; k < CHUNK; k++) { - vec2 my_xy = xy + vec2(chunk_offset(k)); - my_xy = rad.mat.xz * my_xy.x + rad.mat.yw * my_xy.y - rad.xlat; - float ba = dot(my_xy, rad.c1); - float ca = rad.ra * dot(my_xy, my_xy); - float t = sqrt(ba * ba + ca) - ba - rad.roff; - int x = int(round(clamp(t, 0.0, 1.0) * float(GRADIENT_WIDTH - 1))); - mediump vec4 fg_rgba = imageLoad(gradients, ivec2(x, int(rad.index))); - fg_rgba.rgb = fromsRGB(fg_rgba.rgb); - mediump vec4 fg_k = fg_rgba * area[k]; - rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k; - } - cmd_ref.offset += 4 + CmdRadGrad_size; - break; - case Cmd_Image: - CmdImage fill_img = Cmd_Image_read(cmd_alloc, cmd_ref); - mediump vec4 img[CHUNK] = fillImage(xy_uint, fill_img); - for (uint k = 0; k < CHUNK; k++) { - mediump vec4 fg_k = img[k] * area[k]; - rgba[k] = rgba[k] * (1.0 - fg_k.a) + fg_k; - } - cmd_ref.offset += 4 + CmdImage_size; - break; - case Cmd_BeginClip: - if (clip_depth < BLEND_STACK_SPLIT) { - for (uint k = 0; k < CHUNK; k++) { - blend_stack[clip_depth][k] = packsRGB(vec4(rgba[k])); - rgba[k] = vec4(0.0); - } - } else { - uint base_ix = (blend_offset >> 2) + (clip_depth - BLEND_STACK_SPLIT) * TILE_HEIGHT_PX * TILE_WIDTH_PX + - CHUNK * (gl_LocalInvocationID.x + CHUNK_DX * gl_LocalInvocationID.y); - for (uint k = 0; k < CHUNK; k++) { - blend_mem[base_ix + k] = packsRGB(vec4(rgba[k])); - rgba[k] = vec4(0.0); - } - } - clip_depth++; - cmd_ref.offset += 4; - break; - case Cmd_EndClip: - CmdEndClip end_clip = Cmd_EndClip_read(cmd_alloc, cmd_ref); - clip_depth--; - uint base_ix; - if (clip_depth >= BLEND_STACK_SPLIT) { - base_ix = (blend_offset >> 2) + (clip_depth - BLEND_STACK_SPLIT) * TILE_HEIGHT_PX * TILE_WIDTH_PX + - CHUNK * (gl_LocalInvocationID.x + CHUNK_DX * gl_LocalInvocationID.y); - } - for (uint k = 0; k < CHUNK; k++) { - uint bg_rgba; - if (clip_depth < BLEND_STACK_SPLIT) { - bg_rgba = blend_stack[clip_depth][k]; - } else { - bg_rgba = blend_mem[base_ix + k]; - } - mediump vec4 bg = unpacksRGB(bg_rgba); - mediump vec4 fg = rgba[k] * area[k]; - rgba[k] = mix_blend_compose(bg, fg, end_clip.blend); - } - cmd_ref.offset += 4 + CmdEndClip_size; - break; - case Cmd_Jump: - cmd_ref = CmdRef(Cmd_Jump_read(cmd_alloc, cmd_ref).new_ref); - cmd_alloc.offset = cmd_ref.offset; - break; - } - } - - for (uint i = 0; i < CHUNK; i++) { -#ifdef GRAY - // Just store the alpha value; later we can specialize this kernel more to avoid - // computing unneeded RGB colors. - imageStore(image, ivec2(xy_uint + chunk_offset(i)), vec4(rgba[i].a)); -#else - imageStore(image, ivec2(xy_uint + chunk_offset(i)), vec4(tosRGB(rgba[i].rgb), rgba[i].a)); -#endif - } -} diff --git a/piet-gpu/shader/mem.h b/piet-gpu/shader/mem.h deleted file mode 100644 index d79ed16..0000000 --- a/piet-gpu/shader/mem.h +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -layout(set = 0, binding = 0) buffer Memory { - // offset into memory of the next allocation, initialized by the user. - uint mem_offset; - // mem_error is a bitmask of stages that have failed allocation. - uint mem_error; - // offset into blend memory of allocations for blend stack. - uint blend_offset; - uint[] memory; -}; - -// Uncomment this line to add the size field to Alloc and enable memory checks. -// Note that the Config struct in setup.h grows size fields as well. - -// This setting is not working and the mechanism will be removed. -//#define MEM_DEBUG - -#ifdef MEM_DEBUG -#define Alloc_size 16 -#else -// TODO: this seems wrong -#define Alloc_size 8 -#endif - -// Alloc represents a memory allocation. -struct Alloc { - // offset in bytes into memory. - uint offset; -#ifdef MEM_DEBUG - // size in bytes of the allocation. - uint size; -#endif -}; - -// new_alloc synthesizes an Alloc from an offset and size. -Alloc new_alloc(uint offset, uint size, bool mem_ok) { - Alloc a; - a.offset = offset; -#ifdef MEM_DEBUG - if (mem_ok) { - a.size = size; - } else { - a.size = 0; - } -#endif - return a; -} - -#define STAGE_BINNING (1u << 0) -#define STAGE_TILE_ALLOC (1u << 1) -#define STAGE_PATH_COARSE (1u << 2) -#define STAGE_COARSE (1u << 3) - -// Allocations in main memory will never be 0, and this might be slightly -// faster to test against than some other value. -#define MALLOC_FAILED 0 - -// Check that previous dependent stages have succeeded. -bool check_deps(uint dep_stage) { - // TODO: this should be an atomic relaxed load, but that involves - // bringing in "memory scope semantics" - return (atomicOr(mem_error, 0) & dep_stage) == 0; -} - -// Allocate size bytes of memory, offset in bytes. -// Note: with a bit of rearrangement of header files, we could make the -// mem_size argument go away (it comes from the config binding). -uint malloc_stage(uint size, uint mem_size, uint stage) { - uint offset = atomicAdd(mem_offset, size); - if (offset + size > mem_size) { - atomicOr(mem_error, stage); - offset = MALLOC_FAILED; - } - return offset; -} - -// touch_mem checks whether access to the memory word at offset is valid. -// If MEM_DEBUG is defined, touch_mem returns false if offset is out of bounds. -// Offset is in words. -bool touch_mem(Alloc alloc, uint offset) { -#ifdef MEM_DEBUG - if (offset < alloc.offset/4 || offset >= (alloc.offset + alloc.size)/4) { - atomicMax(mem_error, ERR_OUT_OF_BOUNDS); - return false; - } -#endif - return true; -} - -// write_mem writes val to memory at offset. -// Offset is in words. -void write_mem(Alloc alloc, uint offset, uint val) { - if (!touch_mem(alloc, offset)) { - return; - } - memory[offset] = val; -} - -// read_mem reads the value from memory at offset. -// Offset is in words. -uint read_mem(Alloc alloc, uint offset) { - if (!touch_mem(alloc, offset)) { - return 0; - } - uint v = memory[offset]; - return v; -} - -// slice_mem returns a sub-allocation inside another. Offset and size are in -// bytes, relative to a.offset. -Alloc slice_mem(Alloc a, uint offset, uint size) { -#ifdef MEM_DEBUG - if ((offset & 3) != 0 || (size & 3) != 0) { - atomicMax(mem_error, ERR_UNALIGNED_ACCESS); - return Alloc(0, 0); - } - if (offset + size > a.size) { - // slice_mem is sometimes used for slices outside bounds, - // but never written. - return Alloc(0, 0); - } - return Alloc(a.offset + offset, size); -#else - return Alloc(a.offset + offset); -#endif -} - -// alloc_write writes alloc to memory at offset bytes. -void alloc_write(Alloc a, uint offset, Alloc alloc) { - write_mem(a, offset >> 2, alloc.offset); -#ifdef MEM_DEBUG - write_mem(a, (offset >> 2) + 1, alloc.size); -#endif -} - -// alloc_read reads an Alloc from memory at offset bytes. -Alloc alloc_read(Alloc a, uint offset) { - Alloc alloc; - alloc.offset = read_mem(a, offset >> 2); -#ifdef MEM_DEBUG - alloc.size = read_mem(a, (offset >> 2) + 1); -#endif - return alloc; -} diff --git a/piet-gpu/shader/path_coarse.comp b/piet-gpu/shader/path_coarse.comp deleted file mode 100644 index 39b26b2..0000000 --- a/piet-gpu/shader/path_coarse.comp +++ /dev/null @@ -1,289 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Coarse rasterization of path segments. - -// Allocation and initialization of tiles for paths. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_COARSE_WG 5 -#define COARSE_WG (1 << LG_COARSE_WG) - -layout(local_size_x = COARSE_WG, local_size_y = 1) in; - -layout(set = 0, binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -#include "pathseg.h" -#include "tile.h" - -// scale factors useful for converting coordinates to tiles -#define SX (1.0 / float(TILE_WIDTH_PX)) -#define SY (1.0 / float(TILE_HEIGHT_PX)) - -#define ACCURACY 0.25 -#define Q_ACCURACY (ACCURACY * 0.1) -#define REM_ACCURACY (ACCURACY - Q_ACCURACY) -#define MAX_HYPOT2 (432.0 * Q_ACCURACY * Q_ACCURACY) -#define MAX_QUADS 16 - -vec2 eval_quad(vec2 p0, vec2 p1, vec2 p2, float t) { - float mt = 1.0 - t; - return p0 * (mt * mt) + (p1 * (mt * 2.0) + p2 * t) * t; -} - -vec2 eval_cubic(vec2 p0, vec2 p1, vec2 p2, vec2 p3, float t) { - float mt = 1.0 - t; - return p0 * (mt * mt * mt) + (p1 * (mt * mt * 3.0) + (p2 * (mt * 3.0) + p3 * t) * t) * t; -} - -struct SubdivResult { - float val; - float a0; - float a2; -}; - -/// An approximation to $\int (1 + 4x^2) ^ -0.25 dx$ -/// -/// This is used for flattening curves. -#define D 0.67 -float approx_parabola_integral(float x) { - return x * inversesqrt(sqrt(1.0 - D + (D * D * D * D + 0.25 * x * x))); -} - -/// An approximation to the inverse parabola integral. -#define B 0.39 -float approx_parabola_inv_integral(float x) { - return x * sqrt(1.0 - B + (B * B + 0.25 * x * x)); -} - -SubdivResult estimate_subdiv(vec2 p0, vec2 p1, vec2 p2, float sqrt_tol) { - vec2 d01 = p1 - p0; - vec2 d12 = p2 - p1; - vec2 dd = d01 - d12; - float cross = (p2.x - p0.x) * dd.y - (p2.y - p0.y) * dd.x; - float x0 = (d01.x * dd.x + d01.y * dd.y) / cross; - float x2 = (d12.x * dd.x + d12.y * dd.y) / cross; - float scale = abs(cross / (length(dd) * (x2 - x0))); - - float a0 = approx_parabola_integral(x0); - float a2 = approx_parabola_integral(x2); - float val = 0.0; - if (scale < 1e9) { - float da = abs(a2 - a0); - float sqrt_scale = sqrt(scale); - if (sign(x0) == sign(x2)) { - val = da * sqrt_scale; - } else { - float xmin = sqrt_tol / sqrt_scale; - val = sqrt_tol * da / approx_parabola_integral(xmin); - } - } - return SubdivResult(val, a0, a2); -} - -// All writes to the output must be gated by mem_ok. -bool mem_ok = true; - -void main() { - if (!check_deps(STAGE_BINNING | STAGE_TILE_ALLOC | STAGE_PATH_COARSE)) { - return; - } - uint element_ix = gl_GlobalInvocationID.x; - PathSegRef ref = PathSegRef(conf.pathseg_alloc.offset + element_ix * PathSeg_size); - - PathSegTag tag = PathSegTag(PathSeg_Nop, 0); - if (element_ix < conf.n_pathseg) { - tag = PathSeg_tag(conf.pathseg_alloc, ref); - } - switch (tag.tag) { - case PathSeg_Cubic: - PathCubic cubic = PathSeg_Cubic_read(conf.pathseg_alloc, ref); - - vec2 err_v = 3.0 * (cubic.p2 - cubic.p1) + cubic.p0 - cubic.p3; - float err = err_v.x * err_v.x + err_v.y * err_v.y; - // The number of quadratics. - uint n_quads = max(uint(ceil(pow(err * (1.0 / MAX_HYPOT2), 1.0 / 6.0))), 1); - n_quads = min(n_quads, MAX_QUADS); - SubdivResult keep_params[MAX_QUADS]; - // Iterate over quadratics and tote up the estimated number of segments. - float val = 0.0; - vec2 qp0 = cubic.p0; - float step = 1.0 / float(n_quads); - for (uint i = 0; i < n_quads; i++) { - float t = float(i + 1) * step; - vec2 qp2 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t); - vec2 qp1 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t - 0.5 * step); - qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); - SubdivResult params = estimate_subdiv(qp0, qp1, qp2, sqrt(REM_ACCURACY)); - keep_params[i] = params; - val += params.val; - - qp0 = qp2; - } - uint n = max(uint(ceil(val * 0.5 / sqrt(REM_ACCURACY))), 1); - - bool is_stroke = fill_mode_from_flags(tag.flags) == MODE_STROKE; - uint path_ix = cubic.path_ix; - Path path = Path_read(conf.tile_alloc, PathRef(conf.tile_alloc.offset + path_ix * Path_size)); - Alloc path_alloc = - new_alloc(path.tiles.offset, (path.bbox.z - path.bbox.x) * (path.bbox.w - path.bbox.y) * Tile_size, true); - ivec4 bbox = ivec4(path.bbox); - vec2 p0 = cubic.p0; - qp0 = cubic.p0; - float v_step = val / float(n); - int n_out = 1; - float val_sum = 0.0; - for (uint i = 0; i < n_quads; i++) { - float t = float(i + 1) * step; - vec2 qp2 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t); - vec2 qp1 = eval_cubic(cubic.p0, cubic.p1, cubic.p2, cubic.p3, t - 0.5 * step); - qp1 = 2.0 * qp1 - 0.5 * (qp0 + qp2); - SubdivResult params = keep_params[i]; - float u0 = approx_parabola_inv_integral(params.a0); - float u2 = approx_parabola_inv_integral(params.a2); - float uscale = 1.0 / (u2 - u0); - float target = float(n_out) * v_step; - while (n_out == n || target < val_sum + params.val) { - vec2 p1; - if (n_out == n) { - p1 = cubic.p3; - } else { - float u = (target - val_sum) / params.val; - float a = mix(params.a0, params.a2, u); - float au = approx_parabola_inv_integral(a); - float t = (au - u0) * uscale; - p1 = eval_quad(qp0, qp1, qp2, t); - } - - // Output line segment - - // Bounding box of element in pixel coordinates. - float xmin = min(p0.x, p1.x) - cubic.stroke.x; - float xmax = max(p0.x, p1.x) + cubic.stroke.x; - float ymin = min(p0.y, p1.y) - cubic.stroke.y; - float ymax = max(p0.y, p1.y) + cubic.stroke.y; - float dx = p1.x - p0.x; - float dy = p1.y - p0.y; - // Set up for per-scanline coverage formula, below. - float invslope = abs(dy) < 1e-9 ? 1e9 : dx / dy; - float c = (cubic.stroke.x + abs(invslope) * (0.5 * float(TILE_HEIGHT_PX) + cubic.stroke.y)) * SX; - float b = invslope; // Note: assumes square tiles, otherwise scale. - float a = (p0.x - (p0.y - 0.5 * float(TILE_HEIGHT_PX)) * b) * SX; - - int x0 = int(floor(xmin * SX)); - int x1 = int(floor(xmax * SX) + 1); - int y0 = int(floor(ymin * SY)); - int y1 = int(floor(ymax * SY) + 1); - - x0 = clamp(x0, bbox.x, bbox.z); - y0 = clamp(y0, bbox.y, bbox.w); - x1 = clamp(x1, bbox.x, bbox.z); - y1 = clamp(y1, bbox.y, bbox.w); - float xc = a + b * float(y0); - int stride = bbox.z - bbox.x; - int base = (y0 - bbox.y) * stride - bbox.x; - // TODO: can be tighter, use c to bound width - uint n_tile_alloc = uint((x1 - x0) * (y1 - y0)); - // Consider using subgroups to aggregate atomic add. - uint malloc_size = n_tile_alloc * TileSeg_size; - uint tile_offset = malloc_stage(malloc_size, conf.mem_size, STAGE_PATH_COARSE); - if (tile_offset == MALLOC_FAILED) { - mem_ok = false; - } - Alloc tile_alloc = new_alloc(tile_offset, malloc_size, true); - - TileSeg tile_seg; - - int xray = int(floor(p0.x * SX)); - int last_xray = int(floor(p1.x * SX)); - if (p0.y > p1.y) { - int tmp = xray; - xray = last_xray; - last_xray = tmp; - } - for (int y = y0; y < y1; y++) { - float tile_y0 = float(y * TILE_HEIGHT_PX); - int xbackdrop = max(xray + 1, bbox.x); - if (!is_stroke && min(p0.y, p1.y) < tile_y0 && xbackdrop < bbox.z) { - int backdrop = p1.y < p0.y ? 1 : -1; - TileRef tile_ref = Tile_index(path.tiles, uint(base + xbackdrop)); - uint tile_el = tile_ref.offset >> 2; - atomicAdd(memory[tile_el + 1], backdrop); - } - - // next_xray is the xray for the next scanline; the line segment intersects - // all tiles between xray and next_xray. - int next_xray = last_xray; - if (y < y1 - 1) { - float tile_y1 = float((y + 1) * TILE_HEIGHT_PX); - float x_edge = mix(p0.x, p1.x, (tile_y1 - p0.y) / dy); - next_xray = int(floor(x_edge * SX)); - } - - int min_xray = min(xray, next_xray); - int max_xray = max(xray, next_xray); - int xx0 = min(int(floor(xc - c)), min_xray); - int xx1 = max(int(ceil(xc + c)), max_xray + 1); - xx0 = clamp(xx0, x0, x1); - xx1 = clamp(xx1, x0, x1); - - for (int x = xx0; x < xx1; x++) { - float tile_x0 = float(x * TILE_WIDTH_PX); - TileRef tile_ref = Tile_index(TileRef(path.tiles.offset), uint(base + x)); - uint tile_el = tile_ref.offset >> 2; - uint old = 0; - old = atomicExchange(memory[tile_el], tile_offset); - tile_seg.origin = p0; - tile_seg.vector = p1 - p0; - float y_edge = 0.0; - if (!is_stroke) { - y_edge = mix(p0.y, p1.y, (tile_x0 - p0.x) / dx); - if (min(p0.x, p1.x) < tile_x0) { - vec2 p = vec2(tile_x0, y_edge); - if (p0.x > p1.x) { - tile_seg.vector = p - p0; - } else { - tile_seg.origin = p; - tile_seg.vector = p1 - p; - } - // kernel4 uses sign(vector.x) for the sign of the intersection backdrop. - // Nudge zeroes towards the intended sign. - if (tile_seg.vector.x == 0) { - tile_seg.vector.x = sign(p1.x - p0.x) * 1e-9; - } - } - if (x <= min_xray || max_xray < x) { - // Reject inconsistent intersections. - y_edge = 1e9; - } - } - tile_seg.y_edge = y_edge; - tile_seg.next.offset = old; - if (mem_ok) { - TileSeg_write(tile_alloc, TileSegRef(tile_offset), tile_seg); - } - tile_offset += TileSeg_size; - } - xc += b; - base += stride; - xray = next_xray; - } - - n_out += 1; - target += v_step; - p0 = p1; - } - val_sum += params.val; - - qp0 = qp2; - } - - break; - } -} diff --git a/piet-gpu/shader/pathseg.comp b/piet-gpu/shader/pathseg.comp deleted file mode 100644 index 0efa66f..0000000 --- a/piet-gpu/shader/pathseg.comp +++ /dev/null @@ -1,291 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Processing of the path stream, after the tag scan. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" -#include "pathtag.h" - -#define N_SEQ 4 -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_SEQ) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#include "tile.h" -#include "pathseg.h" -#include "scene.h" - -layout(binding = 3) readonly buffer ParentBuf { - TagMonoid[] parent; -}; - -struct Monoid { - vec4 bbox; - uint flags; -}; - -#define FLAG_RESET_BBOX 1 -#define FLAG_SET_BBOX 2 - -Monoid combine_monoid(Monoid a, Monoid b) { - Monoid c; - c.bbox = b.bbox; - // TODO: I think this should be gated on b & SET_BBOX == false also. - if ((a.flags & FLAG_RESET_BBOX) == 0 && b.bbox.z <= b.bbox.x && b.bbox.w <= b.bbox.y) { - c.bbox = a.bbox; - } else if ((a.flags & FLAG_RESET_BBOX) == 0 && (b.flags & FLAG_SET_BBOX) == 0 && - (a.bbox.z > a.bbox.x || a.bbox.w > a.bbox.y)) { - c.bbox.xy = min(a.bbox.xy, c.bbox.xy); - c.bbox.zw = max(a.bbox.zw, c.bbox.zw); - } - c.flags = (a.flags & FLAG_SET_BBOX) | b.flags; - c.flags |= ((a.flags & FLAG_RESET_BBOX) << 1); - return c; -} - -Monoid monoid_identity() { - return Monoid(vec4(0.0, 0.0, 0.0, 0.0), 0); -} - -// These are not both live at the same time. A very smart shader compiler -// would be able to figure that out, but I suspect many won't. -shared TagMonoid sh_tag[WG_SIZE]; -shared Monoid sh_scratch[WG_SIZE]; - -vec2 read_f32_point(uint ix) { - float x = uintBitsToFloat(scene[ix]); - float y = uintBitsToFloat(scene[ix + 1]); - return vec2(x, y); -} - -vec2 read_i16_point(uint ix) { - uint raw = scene[ix]; - float x = float(int(raw << 16) >> 16); - float y = float(int(raw) >> 16); - return vec2(x, y); -} - -// Note: these are 16 bit, which is adequate, but we could use 32 bits. - -// Round down and saturate to minimum integer; add bias -uint round_down(float x) { - return uint(max(0.0, floor(x) + 32768.0)); -} - -// Round up and saturate to maximum integer; add bias -uint round_up(float x) { - return uint(min(65535.0, ceil(x) + 32768.0)); -} - -void main() { - Monoid local[N_SEQ]; - float linewidth[N_SEQ]; - uint save_trans_ix[N_SEQ]; - - uint ix = gl_GlobalInvocationID.x * N_SEQ; - - uint tag_word = scene[(conf.pathtag_offset >> 2) + (ix >> 2)]; - - // Scan the tag monoid - TagMonoid local_tm = reduce_tag(tag_word); - sh_tag[gl_LocalInvocationID.x] = local_tm; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - TagMonoid other = sh_tag[gl_LocalInvocationID.x - (1u << i)]; - local_tm = combine_tag_monoid(other, local_tm); - } - barrier(); - sh_tag[gl_LocalInvocationID.x] = local_tm; - } - barrier(); - // sh_tag is now the partition-wide inclusive scan of the tag monoid. - TagMonoid tm = tag_monoid_identity(); - if (gl_WorkGroupID.x > 0) { - tm = parent[gl_WorkGroupID.x - 1]; - } - if (gl_LocalInvocationID.x > 0) { - tm = combine_tag_monoid(tm, sh_tag[gl_LocalInvocationID.x - 1]); - } - // tm is now the full exclusive scan of the tag monoid. - - // Indices to scene buffer in u32 units. - uint ps_ix = (conf.pathseg_offset >> 2) + tm.pathseg_offset; - uint lw_ix = (conf.linewidth_offset >> 2) + tm.linewidth_ix; - uint save_path_ix = tm.path_ix; - uint trans_ix = tm.trans_ix; - TransformRef trans_ref = TransformRef(conf.trans_offset + trans_ix * Transform_size); - PathSegRef ps_ref = PathSegRef(conf.pathseg_alloc.offset + tm.pathseg_ix * PathSeg_size); - for (uint i = 0; i < N_SEQ; i++) { - linewidth[i] = uintBitsToFloat(scene[lw_ix]); - save_trans_ix[i] = trans_ix; - // if N_SEQ > 4, need to load tag_word from local if N_SEQ % 4 == 0 - uint tag_byte = tag_word >> (i * 8); - uint seg_type = tag_byte & 3; - if (seg_type != 0) { - // 1 = line, 2 = quad, 3 = cubic - // Unpack path segment from input - vec2 p0; - vec2 p1; - vec2 p2; - vec2 p3; - if ((tag_byte & 8) != 0) { - // 32 bit encoding - p0 = read_f32_point(ps_ix); - p1 = read_f32_point(ps_ix + 2); - if (seg_type >= 2) { - p2 = read_f32_point(ps_ix + 4); - if (seg_type == 3) { - p3 = read_f32_point(ps_ix + 6); - } - } - } else { - // 16 bit encoding - p0 = read_i16_point(ps_ix); - p1 = read_i16_point(ps_ix + 1); - if (seg_type >= 2) { - p2 = read_i16_point(ps_ix + 2); - if (seg_type == 3) { - p3 = read_i16_point(ps_ix + 3); - } - } - } - Transform transform = Transform_read(trans_ref); - p0 = transform.mat.xy * p0.x + transform.mat.zw * p0.y + transform.translate; - p1 = transform.mat.xy * p1.x + transform.mat.zw * p1.y + transform.translate; - vec4 bbox = vec4(min(p0, p1), max(p0, p1)); - // Degree-raise and compute bbox - if (seg_type >= 2) { - p2 = transform.mat.xy * p2.x + transform.mat.zw * p2.y + transform.translate; - bbox.xy = min(bbox.xy, p2); - bbox.zw = max(bbox.zw, p2); - if (seg_type == 3) { - p3 = transform.mat.xy * p3.x + transform.mat.zw * p3.y + transform.translate; - bbox.xy = min(bbox.xy, p3); - bbox.zw = max(bbox.zw, p3); - } else { - p3 = p2; - p2 = mix(p1, p2, 1.0 / 3.0); - p1 = mix(p1, p0, 1.0 / 3.0); - } - } else { - p3 = p1; - p2 = mix(p3, p0, 1.0 / 3.0); - p1 = mix(p0, p3, 1.0 / 3.0); - } - vec2 stroke = vec2(0.0, 0.0); - if (linewidth[i] >= 0.0) { - // See https://www.iquilezles.org/www/articles/ellipses/ellipses.htm - stroke = 0.5 * linewidth[i] * vec2(length(transform.mat.xz), length(transform.mat.yw)); - bbox += vec4(-stroke, stroke); - } - local[i].bbox = bbox; - local[i].flags = 0; - - // Write path segment to output - PathCubic cubic; - cubic.p0 = p0; - cubic.p1 = p1; - cubic.p2 = p2; - cubic.p3 = p3; - cubic.path_ix = tm.path_ix; - // Not needed, TODO remove from struct - cubic.trans_ix = gl_GlobalInvocationID.x * 4 + i; - cubic.stroke = stroke; - uint fill_mode = uint(linewidth[i] >= 0.0); - PathSeg_Cubic_write(conf.pathseg_alloc, ps_ref, fill_mode, cubic); - - ps_ref.offset += PathSeg_size; - uint n_points = (tag_byte & 3) + ((tag_byte >> 2) & 1); - uint n_words = n_points + (n_points & (((tag_byte >> 3) & 1) * 15)); - ps_ix += n_words; - } else { - local[i].bbox = vec4(0.0, 0.0, 0.0, 0.0); - // These shifts need to be kept in sync with setup.h - uint is_path = (tag_byte >> 4) & 1; - // Relies on the fact that RESET_BBOX == 1 - local[i].flags = is_path; - tm.path_ix += is_path; - trans_ix += (tag_byte >> 5) & 1; - trans_ref.offset += ((tag_byte >> 5) & 1) * Transform_size; - lw_ix += (tag_byte >> 6) & 1; - } - } - - // Partition-wide monoid scan for bbox monoid - Monoid agg = local[0]; - for (uint i = 1; i < N_SEQ; i++) { - // Note: this could be fused with the map above, but probably - // a thin performance gain not worth the complexity. - agg = combine_monoid(agg, local[i]); - local[i] = agg; - } - // local is N_SEQ sub-partition inclusive scan of bbox monoid. - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - // sh_scratch is the partition-wide inclusive scan of the bbox monoid, - // sampled at the end of the N_SEQ sub-partition. - - barrier(); - uint path_ix = save_path_ix; - uint bbox_out_ix = (conf.path_bbox_alloc.offset >> 2) + path_ix * 6; - // Write bboxes to paths; do atomic min/max if partial - Monoid row = monoid_identity(); - if (gl_LocalInvocationID.x > 0) { - row = sh_scratch[gl_LocalInvocationID.x - 1]; - } - for (uint i = 0; i < N_SEQ; i++) { - Monoid m = combine_monoid(row, local[i]); - // m is partition-wide inclusive scan of bbox monoid. - bool do_atomic = false; - if (i == N_SEQ - 1 && gl_LocalInvocationID.x == WG_SIZE - 1) { - // last element - do_atomic = true; - } - if ((m.flags & FLAG_RESET_BBOX) != 0) { - memory[bbox_out_ix + 4] = floatBitsToUint(linewidth[i]); - memory[bbox_out_ix + 5] = save_trans_ix[i]; - if ((m.flags & FLAG_SET_BBOX) == 0) { - do_atomic = true; - } else { - memory[bbox_out_ix] = round_down(m.bbox.x); - memory[bbox_out_ix + 1] = round_down(m.bbox.y); - memory[bbox_out_ix + 2] = round_up(m.bbox.z); - memory[bbox_out_ix + 3] = round_up(m.bbox.w); - bbox_out_ix += 6; - do_atomic = false; - } - } - if (do_atomic) { - if (m.bbox.z > m.bbox.x || m.bbox.w > m.bbox.y) { - // atomic min/max - atomicMin(memory[bbox_out_ix], round_down(m.bbox.x)); - atomicMin(memory[bbox_out_ix + 1], round_down(m.bbox.y)); - atomicMax(memory[bbox_out_ix + 2], round_up(m.bbox.z)); - atomicMax(memory[bbox_out_ix + 3], round_up(m.bbox.w)); - } - bbox_out_ix += 6; - } - } -} diff --git a/piet-gpu/shader/pathseg.h b/piet-gpu/shader/pathseg.h deleted file mode 100644 index 749771e..0000000 --- a/piet-gpu/shader/pathseg.h +++ /dev/null @@ -1,100 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct PathCubicRef { - uint offset; -}; - -struct PathSegRef { - uint offset; -}; - -struct PathCubic { - vec2 p0; - vec2 p1; - vec2 p2; - vec2 p3; - uint path_ix; - uint trans_ix; - vec2 stroke; -}; - -#define PathCubic_size 48 - -PathCubicRef PathCubic_index(PathCubicRef ref, uint index) { - return PathCubicRef(ref.offset + index * PathCubic_size); -} - -#define PathSeg_Nop 0 -#define PathSeg_Cubic 1 -#define PathSeg_size 52 - -PathSegRef PathSeg_index(PathSegRef ref, uint index) { - return PathSegRef(ref.offset + index * PathSeg_size); -} - -struct PathSegTag { - uint tag; - uint flags; -}; - -PathCubic PathCubic_read(Alloc a, PathCubicRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - uint raw6 = read_mem(a, ix + 6); - uint raw7 = read_mem(a, ix + 7); - uint raw8 = read_mem(a, ix + 8); - uint raw9 = read_mem(a, ix + 9); - uint raw10 = read_mem(a, ix + 10); - uint raw11 = read_mem(a, ix + 11); - PathCubic s; - s.p0 = vec2(uintBitsToFloat(raw0), uintBitsToFloat(raw1)); - s.p1 = vec2(uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.p2 = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - s.p3 = vec2(uintBitsToFloat(raw6), uintBitsToFloat(raw7)); - s.path_ix = raw8; - s.trans_ix = raw9; - s.stroke = vec2(uintBitsToFloat(raw10), uintBitsToFloat(raw11)); - return s; -} - -void PathCubic_write(Alloc a, PathCubicRef ref, PathCubic s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.p0.x)); - write_mem(a, ix + 1, floatBitsToUint(s.p0.y)); - write_mem(a, ix + 2, floatBitsToUint(s.p1.x)); - write_mem(a, ix + 3, floatBitsToUint(s.p1.y)); - write_mem(a, ix + 4, floatBitsToUint(s.p2.x)); - write_mem(a, ix + 5, floatBitsToUint(s.p2.y)); - write_mem(a, ix + 6, floatBitsToUint(s.p3.x)); - write_mem(a, ix + 7, floatBitsToUint(s.p3.y)); - write_mem(a, ix + 8, s.path_ix); - write_mem(a, ix + 9, s.trans_ix); - write_mem(a, ix + 10, floatBitsToUint(s.stroke.x)); - write_mem(a, ix + 11, floatBitsToUint(s.stroke.y)); -} - -PathSegTag PathSeg_tag(Alloc a, PathSegRef ref) { - uint tag_and_flags = read_mem(a, ref.offset >> 2); - return PathSegTag(tag_and_flags & 0xffff, tag_and_flags >> 16); -} - -PathCubic PathSeg_Cubic_read(Alloc a, PathSegRef ref) { - return PathCubic_read(a, PathCubicRef(ref.offset + 4)); -} - -void PathSeg_Nop_write(Alloc a, PathSegRef ref) { - write_mem(a, ref.offset >> 2, PathSeg_Nop); -} - -void PathSeg_Cubic_write(Alloc a, PathSegRef ref, uint flags, PathCubic s) { - write_mem(a, ref.offset >> 2, (flags << 16) | PathSeg_Cubic); - PathCubic_write(a, PathCubicRef(ref.offset + 4), s); -} - diff --git a/piet-gpu/shader/pathtag.h b/piet-gpu/shader/pathtag.h deleted file mode 100644 index c7af0d6..0000000 --- a/piet-gpu/shader/pathtag.h +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Common data structures and functions for the path tag stream. - -// This is the layout for tag bytes in the path stream. See -// doc/pathseg.md for an explanation. - -#define PATH_TAG_PATHSEG_BITS 0xf -#define PATH_TAG_PATH 0x10 -#define PATH_TAG_TRANSFORM 0x20 -#define PATH_TAG_LINEWIDTH 0x40 - -struct TagMonoid { - uint trans_ix; - uint linewidth_ix; - uint pathseg_ix; - uint path_ix; - uint pathseg_offset; -}; - -TagMonoid tag_monoid_identity() { - return TagMonoid(0, 0, 0, 0, 0); -} - -TagMonoid combine_tag_monoid(TagMonoid a, TagMonoid b) { - TagMonoid c; - c.trans_ix = a.trans_ix + b.trans_ix; - c.linewidth_ix = a.linewidth_ix + b.linewidth_ix; - c.pathseg_ix = a.pathseg_ix + b.pathseg_ix; - c.path_ix = a.path_ix + b.path_ix; - c.pathseg_offset = a.pathseg_offset + b.pathseg_offset; - return c; -} - -TagMonoid reduce_tag(uint tag_word) { - TagMonoid c; - // Some fun bit magic here, see doc/pathseg.md for explanation. - uint point_count = tag_word & 0x3030303; - c.pathseg_ix = bitCount((point_count * 7) & 0x4040404); - c.linewidth_ix = bitCount(tag_word & (PATH_TAG_LINEWIDTH * 0x1010101)); - c.path_ix = bitCount(tag_word & (PATH_TAG_PATH * 0x1010101)); - c.trans_ix = bitCount(tag_word & (PATH_TAG_TRANSFORM * 0x1010101)); - uint n_points = point_count + ((tag_word >> 2) & 0x1010101); - uint a = n_points + (n_points & (((tag_word >> 3) & 0x1010101) * 15)); - a += a >> 8; - a += a >> 16; - c.pathseg_offset = a & 0xff; - return c; -} diff --git a/piet-gpu/shader/pathtag_reduce.comp b/piet-gpu/shader/pathtag_reduce.comp deleted file mode 100644 index 9e84bf8..0000000 --- a/piet-gpu/shader/pathtag_reduce.comp +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The reduction phase for path tag scan implemented as a tree reduction. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" -#include "pathtag.h" - -// Note: the partition size is smaller than pathseg by a factor -// of 4, as there are 4 tag bytes to a tag word. -#define N_ROWS 2 -#define LG_WG_SIZE (6 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -layout(binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#define Monoid TagMonoid - -layout(set = 0, binding = 3) buffer OutBuf { - Monoid[] outbuf; -}; - -shared Monoid sh_scratch[WG_SIZE]; - -void main() { - uint ix = gl_GlobalInvocationID.x * N_ROWS; - uint scene_ix = (conf.pathtag_offset >> 2) + ix; - uint tag_word = scene[scene_ix]; - - Monoid agg = reduce_tag(tag_word); - for (uint i = 1; i < N_ROWS; i++) { - tag_word = scene[scene_ix + i]; - agg = combine_tag_monoid(agg, reduce_tag(tag_word)); - } - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - // We could make this predicate tighter, but would it help? - if (gl_LocalInvocationID.x + (1u << i) < WG_SIZE) { - Monoid other = sh_scratch[gl_LocalInvocationID.x + (1u << i)]; - agg = combine_tag_monoid(agg, other); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - if (gl_LocalInvocationID.x == 0) { - outbuf[gl_WorkGroupID.x] = agg; - } -} diff --git a/piet-gpu/shader/pathtag_scan.comp b/piet-gpu/shader/pathtag_scan.comp deleted file mode 100644 index 798622e..0000000 --- a/piet-gpu/shader/pathtag_scan.comp +++ /dev/null @@ -1,75 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// A scan for path tag scan implemented as a tree reduction. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "setup.h" -#include "pathtag.h" - -#define N_ROWS 8 -#define LG_WG_SIZE (7 + LG_WG_FACTOR) -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -#define Monoid TagMonoid -#define combine_monoid combine_tag_monoid -#define monoid_identity tag_monoid_identity - -layout(binding = 0) buffer DataBuf { - Monoid[] data; -}; - -#ifndef ROOT -layout(binding = 1) readonly buffer ParentBuf { - Monoid[] parent; -}; -#endif - -shared Monoid sh_scratch[WG_SIZE]; - -void main() { - Monoid local[N_ROWS]; - - uint ix = gl_GlobalInvocationID.x * N_ROWS; - - local[0] = data[ix]; - for (uint i = 1; i < N_ROWS; i++) { - local[i] = combine_monoid(local[i - 1], data[ix + i]); - } - Monoid agg = local[N_ROWS - 1]; - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - - barrier(); - // This could be a semigroup instead of a monoid if we reworked the - // conditional logic, but that might impact performance. - Monoid row = monoid_identity(); -#ifdef ROOT - if (gl_LocalInvocationID.x > 0) { - row = sh_scratch[gl_LocalInvocationID.x - 1]; - } -#else - if (gl_WorkGroupID.x > 0) { - row = parent[gl_WorkGroupID.x - 1]; - } - if (gl_LocalInvocationID.x > 0) { - row = combine_monoid(row, sh_scratch[gl_LocalInvocationID.x - 1]); - } -#endif - for (uint i = 0; i < N_ROWS; i++) { - Monoid m = combine_monoid(row, local[i]); - data[ix + i] = m; - } -} diff --git a/piet-gpu/shader/ptcl.h b/piet-gpu/shader/ptcl.h deleted file mode 100644 index 54dcc9e..0000000 --- a/piet-gpu/shader/ptcl.h +++ /dev/null @@ -1,426 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct CmdStrokeRef { - uint offset; -}; - -struct CmdFillRef { - uint offset; -}; - -struct CmdColorRef { - uint offset; -}; - -struct CmdLinGradRef { - uint offset; -}; - -struct CmdRadGradRef { - uint offset; -}; - -struct CmdImageRef { - uint offset; -}; - -struct CmdAlphaRef { - uint offset; -}; - -struct CmdEndClipRef { - uint offset; -}; - -struct CmdJumpRef { - uint offset; -}; - -struct CmdRef { - uint offset; -}; - -struct CmdStroke { - uint tile_ref; - float half_width; -}; - -#define CmdStroke_size 8 - -CmdStrokeRef CmdStroke_index(CmdStrokeRef ref, uint index) { - return CmdStrokeRef(ref.offset + index * CmdStroke_size); -} - -struct CmdFill { - uint tile_ref; - int backdrop; -}; - -#define CmdFill_size 8 - -CmdFillRef CmdFill_index(CmdFillRef ref, uint index) { - return CmdFillRef(ref.offset + index * CmdFill_size); -} - -struct CmdColor { - uint rgba_color; -}; - -#define CmdColor_size 4 - -CmdColorRef CmdColor_index(CmdColorRef ref, uint index) { - return CmdColorRef(ref.offset + index * CmdColor_size); -} - -struct CmdLinGrad { - uint index; - float line_x; - float line_y; - float line_c; -}; - -#define CmdLinGrad_size 16 - -CmdLinGradRef CmdLinGrad_index(CmdLinGradRef ref, uint index) { - return CmdLinGradRef(ref.offset + index * CmdLinGrad_size); -} - -struct CmdRadGrad { - uint index; - vec4 mat; - vec2 xlat; - vec2 c1; - float ra; - float roff; -}; - -#define CmdRadGrad_size 44 - -CmdRadGradRef CmdRadGrad_index(CmdRadGradRef ref, uint index) { - return CmdRadGradRef(ref.offset + index * CmdRadGrad_size); -} - -struct CmdImage { - uint index; - ivec2 offset; -}; - -#define CmdImage_size 8 - -CmdImageRef CmdImage_index(CmdImageRef ref, uint index) { - return CmdImageRef(ref.offset + index * CmdImage_size); -} - -struct CmdAlpha { - float alpha; -}; - -#define CmdAlpha_size 4 - -CmdAlphaRef CmdAlpha_index(CmdAlphaRef ref, uint index) { - return CmdAlphaRef(ref.offset + index * CmdAlpha_size); -} - -struct CmdEndClip { - uint blend; -}; - -#define CmdEndClip_size 4 - -CmdEndClipRef CmdEndClip_index(CmdEndClipRef ref, uint index) { - return CmdEndClipRef(ref.offset + index * CmdEndClip_size); -} - -struct CmdJump { - uint new_ref; -}; - -#define CmdJump_size 4 - -CmdJumpRef CmdJump_index(CmdJumpRef ref, uint index) { - return CmdJumpRef(ref.offset + index * CmdJump_size); -} - -#define Cmd_End 0 -#define Cmd_Fill 1 -#define Cmd_Stroke 2 -#define Cmd_Solid 3 -#define Cmd_Alpha 4 -#define Cmd_Color 5 -#define Cmd_LinGrad 6 -#define Cmd_RadGrad 7 -#define Cmd_Image 8 -#define Cmd_BeginClip 9 -#define Cmd_EndClip 10 -#define Cmd_Jump 11 -#define Cmd_size 48 - -CmdRef Cmd_index(CmdRef ref, uint index) { - return CmdRef(ref.offset + index * Cmd_size); -} - -struct CmdTag { - uint tag; - uint flags; -}; - -CmdStroke CmdStroke_read(Alloc a, CmdStrokeRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - CmdStroke s; - s.tile_ref = raw0; - s.half_width = uintBitsToFloat(raw1); - return s; -} - -void CmdStroke_write(Alloc a, CmdStrokeRef ref, CmdStroke s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.tile_ref); - write_mem(a, ix + 1, floatBitsToUint(s.half_width)); -} - -CmdFill CmdFill_read(Alloc a, CmdFillRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - CmdFill s; - s.tile_ref = raw0; - s.backdrop = int(raw1); - return s; -} - -void CmdFill_write(Alloc a, CmdFillRef ref, CmdFill s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.tile_ref); - write_mem(a, ix + 1, uint(s.backdrop)); -} - -CmdColor CmdColor_read(Alloc a, CmdColorRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - CmdColor s; - s.rgba_color = raw0; - return s; -} - -void CmdColor_write(Alloc a, CmdColorRef ref, CmdColor s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.rgba_color); -} - -CmdLinGrad CmdLinGrad_read(Alloc a, CmdLinGradRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - CmdLinGrad s; - s.index = raw0; - s.line_x = uintBitsToFloat(raw1); - s.line_y = uintBitsToFloat(raw2); - s.line_c = uintBitsToFloat(raw3); - return s; -} - -void CmdLinGrad_write(Alloc a, CmdLinGradRef ref, CmdLinGrad s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.index); - write_mem(a, ix + 1, floatBitsToUint(s.line_x)); - write_mem(a, ix + 2, floatBitsToUint(s.line_y)); - write_mem(a, ix + 3, floatBitsToUint(s.line_c)); -} - -CmdRadGrad CmdRadGrad_read(Alloc a, CmdRadGradRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - uint raw6 = read_mem(a, ix + 6); - uint raw7 = read_mem(a, ix + 7); - uint raw8 = read_mem(a, ix + 8); - uint raw9 = read_mem(a, ix + 9); - uint raw10 = read_mem(a, ix + 10); - CmdRadGrad s; - s.index = raw0; - s.mat = vec4(uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3), uintBitsToFloat(raw4)); - s.xlat = vec2(uintBitsToFloat(raw5), uintBitsToFloat(raw6)); - s.c1 = vec2(uintBitsToFloat(raw7), uintBitsToFloat(raw8)); - s.ra = uintBitsToFloat(raw9); - s.roff = uintBitsToFloat(raw10); - return s; -} - -void CmdRadGrad_write(Alloc a, CmdRadGradRef ref, CmdRadGrad s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.index); - write_mem(a, ix + 1, floatBitsToUint(s.mat.x)); - write_mem(a, ix + 2, floatBitsToUint(s.mat.y)); - write_mem(a, ix + 3, floatBitsToUint(s.mat.z)); - write_mem(a, ix + 4, floatBitsToUint(s.mat.w)); - write_mem(a, ix + 5, floatBitsToUint(s.xlat.x)); - write_mem(a, ix + 6, floatBitsToUint(s.xlat.y)); - write_mem(a, ix + 7, floatBitsToUint(s.c1.x)); - write_mem(a, ix + 8, floatBitsToUint(s.c1.y)); - write_mem(a, ix + 9, floatBitsToUint(s.ra)); - write_mem(a, ix + 10, floatBitsToUint(s.roff)); -} - -CmdImage CmdImage_read(Alloc a, CmdImageRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - CmdImage s; - s.index = raw0; - s.offset = ivec2(int(raw1 << 16) >> 16, int(raw1) >> 16); - return s; -} - -void CmdImage_write(Alloc a, CmdImageRef ref, CmdImage s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.index); - write_mem(a, ix + 1, (uint(s.offset.x) & 0xffff) | (uint(s.offset.y) << 16)); -} - -CmdAlpha CmdAlpha_read(Alloc a, CmdAlphaRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - CmdAlpha s; - s.alpha = uintBitsToFloat(raw0); - return s; -} - -void CmdAlpha_write(Alloc a, CmdAlphaRef ref, CmdAlpha s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.alpha)); -} - -CmdEndClip CmdEndClip_read(Alloc a, CmdEndClipRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - CmdEndClip s; - s.blend = raw0; - return s; -} - -void CmdEndClip_write(Alloc a, CmdEndClipRef ref, CmdEndClip s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.blend); -} - -CmdJump CmdJump_read(Alloc a, CmdJumpRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - CmdJump s; - s.new_ref = raw0; - return s; -} - -void CmdJump_write(Alloc a, CmdJumpRef ref, CmdJump s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.new_ref); -} - -CmdTag Cmd_tag(Alloc a, CmdRef ref) { - uint tag_and_flags = read_mem(a, ref.offset >> 2); - return CmdTag(tag_and_flags & 0xffff, tag_and_flags >> 16); -} - -CmdFill Cmd_Fill_read(Alloc a, CmdRef ref) { - return CmdFill_read(a, CmdFillRef(ref.offset + 4)); -} - -CmdStroke Cmd_Stroke_read(Alloc a, CmdRef ref) { - return CmdStroke_read(a, CmdStrokeRef(ref.offset + 4)); -} - -CmdAlpha Cmd_Alpha_read(Alloc a, CmdRef ref) { - return CmdAlpha_read(a, CmdAlphaRef(ref.offset + 4)); -} - -CmdColor Cmd_Color_read(Alloc a, CmdRef ref) { - return CmdColor_read(a, CmdColorRef(ref.offset + 4)); -} - -CmdLinGrad Cmd_LinGrad_read(Alloc a, CmdRef ref) { - return CmdLinGrad_read(a, CmdLinGradRef(ref.offset + 4)); -} - -CmdRadGrad Cmd_RadGrad_read(Alloc a, CmdRef ref) { - return CmdRadGrad_read(a, CmdRadGradRef(ref.offset + 4)); -} - -CmdImage Cmd_Image_read(Alloc a, CmdRef ref) { - return CmdImage_read(a, CmdImageRef(ref.offset + 4)); -} - -CmdEndClip Cmd_EndClip_read(Alloc a, CmdRef ref) { - return CmdEndClip_read(a, CmdEndClipRef(ref.offset + 4)); -} - -CmdJump Cmd_Jump_read(Alloc a, CmdRef ref) { - return CmdJump_read(a, CmdJumpRef(ref.offset + 4)); -} - -void Cmd_End_write(Alloc a, CmdRef ref) { - write_mem(a, ref.offset >> 2, Cmd_End); -} - -void Cmd_Fill_write(Alloc a, CmdRef ref, CmdFill s) { - write_mem(a, ref.offset >> 2, Cmd_Fill); - CmdFill_write(a, CmdFillRef(ref.offset + 4), s); -} - -void Cmd_Stroke_write(Alloc a, CmdRef ref, CmdStroke s) { - write_mem(a, ref.offset >> 2, Cmd_Stroke); - CmdStroke_write(a, CmdStrokeRef(ref.offset + 4), s); -} - -void Cmd_Solid_write(Alloc a, CmdRef ref) { - write_mem(a, ref.offset >> 2, Cmd_Solid); -} - -void Cmd_Alpha_write(Alloc a, CmdRef ref, CmdAlpha s) { - write_mem(a, ref.offset >> 2, Cmd_Alpha); - CmdAlpha_write(a, CmdAlphaRef(ref.offset + 4), s); -} - -void Cmd_Color_write(Alloc a, CmdRef ref, CmdColor s) { - write_mem(a, ref.offset >> 2, Cmd_Color); - CmdColor_write(a, CmdColorRef(ref.offset + 4), s); -} - -void Cmd_LinGrad_write(Alloc a, CmdRef ref, CmdLinGrad s) { - write_mem(a, ref.offset >> 2, Cmd_LinGrad); - CmdLinGrad_write(a, CmdLinGradRef(ref.offset + 4), s); -} - -void Cmd_RadGrad_write(Alloc a, CmdRef ref, CmdRadGrad s) { - write_mem(a, ref.offset >> 2, Cmd_RadGrad); - CmdRadGrad_write(a, CmdRadGradRef(ref.offset + 4), s); -} - -void Cmd_Image_write(Alloc a, CmdRef ref, CmdImage s) { - write_mem(a, ref.offset >> 2, Cmd_Image); - CmdImage_write(a, CmdImageRef(ref.offset + 4), s); -} - -void Cmd_BeginClip_write(Alloc a, CmdRef ref) { - write_mem(a, ref.offset >> 2, Cmd_BeginClip); -} - -void Cmd_EndClip_write(Alloc a, CmdRef ref, CmdEndClip s) { - write_mem(a, ref.offset >> 2, Cmd_EndClip); - CmdEndClip_write(a, CmdEndClipRef(ref.offset + 4), s); -} - -void Cmd_Jump_write(Alloc a, CmdRef ref, CmdJump s) { - write_mem(a, ref.offset >> 2, Cmd_Jump); - CmdJump_write(a, CmdJumpRef(ref.offset + 4), s); -} - diff --git a/piet-gpu/shader/scene.h b/piet-gpu/shader/scene.h deleted file mode 100644 index 3e74b69..0000000 --- a/piet-gpu/shader/scene.h +++ /dev/null @@ -1,350 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct LineSegRef { - uint offset; -}; - -struct QuadSegRef { - uint offset; -}; - -struct CubicSegRef { - uint offset; -}; - -struct FillColorRef { - uint offset; -}; - -struct FillLinGradientRef { - uint offset; -}; - -struct FillImageRef { - uint offset; -}; - -struct SetLineWidthRef { - uint offset; -}; - -struct TransformRef { - uint offset; -}; - -struct ClipRef { - uint offset; -}; - -struct SetFillModeRef { - uint offset; -}; - -struct ElementRef { - uint offset; -}; - -struct LineSeg { - vec2 p0; - vec2 p1; -}; - -#define LineSeg_size 16 - -LineSegRef LineSeg_index(LineSegRef ref, uint index) { - return LineSegRef(ref.offset + index * LineSeg_size); -} - -struct QuadSeg { - vec2 p0; - vec2 p1; - vec2 p2; -}; - -#define QuadSeg_size 24 - -QuadSegRef QuadSeg_index(QuadSegRef ref, uint index) { - return QuadSegRef(ref.offset + index * QuadSeg_size); -} - -struct CubicSeg { - vec2 p0; - vec2 p1; - vec2 p2; - vec2 p3; -}; - -#define CubicSeg_size 32 - -CubicSegRef CubicSeg_index(CubicSegRef ref, uint index) { - return CubicSegRef(ref.offset + index * CubicSeg_size); -} - -struct FillColor { - uint rgba_color; -}; - -#define FillColor_size 4 - -FillColorRef FillColor_index(FillColorRef ref, uint index) { - return FillColorRef(ref.offset + index * FillColor_size); -} - -struct FillLinGradient { - uint index; - vec2 p0; - vec2 p1; -}; - -#define FillLinGradient_size 20 - -FillLinGradientRef FillLinGradient_index(FillLinGradientRef ref, uint index) { - return FillLinGradientRef(ref.offset + index * FillLinGradient_size); -} - -struct FillImage { - uint index; - ivec2 offset; -}; - -#define FillImage_size 8 - -FillImageRef FillImage_index(FillImageRef ref, uint index) { - return FillImageRef(ref.offset + index * FillImage_size); -} - -struct SetLineWidth { - float width; -}; - -#define SetLineWidth_size 4 - -SetLineWidthRef SetLineWidth_index(SetLineWidthRef ref, uint index) { - return SetLineWidthRef(ref.offset + index * SetLineWidth_size); -} - -struct Transform { - vec4 mat; - vec2 translate; -}; - -#define Transform_size 24 - -TransformRef Transform_index(TransformRef ref, uint index) { - return TransformRef(ref.offset + index * Transform_size); -} - -struct Clip { - vec4 bbox; - uint blend; -}; - -#define Clip_size 20 - -ClipRef Clip_index(ClipRef ref, uint index) { - return ClipRef(ref.offset + index * Clip_size); -} - -struct SetFillMode { - uint fill_mode; -}; - -#define SetFillMode_size 4 - -SetFillModeRef SetFillMode_index(SetFillModeRef ref, uint index) { - return SetFillModeRef(ref.offset + index * SetFillMode_size); -} - -#define Element_Nop 0 -#define Element_Line 1 -#define Element_Quad 2 -#define Element_Cubic 3 -#define Element_FillColor 4 -#define Element_FillLinGradient 5 -#define Element_FillImage 6 -#define Element_SetLineWidth 7 -#define Element_Transform 8 -#define Element_BeginClip 9 -#define Element_EndClip 10 -#define Element_SetFillMode 11 -#define Element_size 36 - -ElementRef Element_index(ElementRef ref, uint index) { - return ElementRef(ref.offset + index * Element_size); -} - -struct ElementTag { - uint tag; - uint flags; -}; - -LineSeg LineSeg_read(LineSegRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - LineSeg s; - s.p0 = vec2(uintBitsToFloat(raw0), uintBitsToFloat(raw1)); - s.p1 = vec2(uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - return s; -} - -QuadSeg QuadSeg_read(QuadSegRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - uint raw4 = scene[ix + 4]; - uint raw5 = scene[ix + 5]; - QuadSeg s; - s.p0 = vec2(uintBitsToFloat(raw0), uintBitsToFloat(raw1)); - s.p1 = vec2(uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.p2 = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - return s; -} - -CubicSeg CubicSeg_read(CubicSegRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - uint raw4 = scene[ix + 4]; - uint raw5 = scene[ix + 5]; - uint raw6 = scene[ix + 6]; - uint raw7 = scene[ix + 7]; - CubicSeg s; - s.p0 = vec2(uintBitsToFloat(raw0), uintBitsToFloat(raw1)); - s.p1 = vec2(uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.p2 = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - s.p3 = vec2(uintBitsToFloat(raw6), uintBitsToFloat(raw7)); - return s; -} - -FillColor FillColor_read(FillColorRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - FillColor s; - s.rgba_color = raw0; - return s; -} - -FillLinGradient FillLinGradient_read(FillLinGradientRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - uint raw4 = scene[ix + 4]; - FillLinGradient s; - s.index = raw0; - s.p0 = vec2(uintBitsToFloat(raw1), uintBitsToFloat(raw2)); - s.p1 = vec2(uintBitsToFloat(raw3), uintBitsToFloat(raw4)); - return s; -} - -FillImage FillImage_read(FillImageRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - FillImage s; - s.index = raw0; - s.offset = ivec2(int(raw1 << 16) >> 16, int(raw1) >> 16); - return s; -} - -SetLineWidth SetLineWidth_read(SetLineWidthRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - SetLineWidth s; - s.width = uintBitsToFloat(raw0); - return s; -} - -Transform Transform_read(TransformRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - uint raw4 = scene[ix + 4]; - uint raw5 = scene[ix + 5]; - Transform s; - s.mat = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.translate = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - return s; -} - -Clip Clip_read(ClipRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - uint raw1 = scene[ix + 1]; - uint raw2 = scene[ix + 2]; - uint raw3 = scene[ix + 3]; - Clip s; - s.bbox = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.blend = scene[ix + 4]; - return s; -} - -SetFillMode SetFillMode_read(SetFillModeRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = scene[ix + 0]; - SetFillMode s; - s.fill_mode = raw0; - return s; -} - -ElementTag Element_tag(ElementRef ref) { - uint tag_and_flags = scene[ref.offset >> 2]; - return ElementTag(tag_and_flags & 0xffff, tag_and_flags >> 16); -} - -LineSeg Element_Line_read(ElementRef ref) { - return LineSeg_read(LineSegRef(ref.offset + 4)); -} - -QuadSeg Element_Quad_read(ElementRef ref) { - return QuadSeg_read(QuadSegRef(ref.offset + 4)); -} - -CubicSeg Element_Cubic_read(ElementRef ref) { - return CubicSeg_read(CubicSegRef(ref.offset + 4)); -} - -FillColor Element_FillColor_read(ElementRef ref) { - return FillColor_read(FillColorRef(ref.offset + 4)); -} - -FillLinGradient Element_FillLinGradient_read(ElementRef ref) { - return FillLinGradient_read(FillLinGradientRef(ref.offset + 4)); -} - -FillImage Element_FillImage_read(ElementRef ref) { - return FillImage_read(FillImageRef(ref.offset + 4)); -} - -SetLineWidth Element_SetLineWidth_read(ElementRef ref) { - return SetLineWidth_read(SetLineWidthRef(ref.offset + 4)); -} - -Transform Element_Transform_read(ElementRef ref) { - return Transform_read(TransformRef(ref.offset + 4)); -} - -Clip Element_BeginClip_read(ElementRef ref) { - return Clip_read(ClipRef(ref.offset + 4)); -} - -Clip Element_EndClip_read(ElementRef ref) { - return Clip_read(ClipRef(ref.offset + 4)); -} - -SetFillMode Element_SetFillMode_read(ElementRef ref) { - return SetFillMode_read(SetFillModeRef(ref.offset + 4)); -} - diff --git a/piet-gpu/shader/setup.h b/piet-gpu/shader/setup.h deleted file mode 100644 index e6b6e3f..0000000 --- a/piet-gpu/shader/setup.h +++ /dev/null @@ -1,103 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Various constants for the sizes of groups and tiles. - -// Much of this will be made dynamic in various ways, but for now it's easiest -// to hardcode and keep all in one place. - -// A LG_WG_FACTOR of n scales workgroup sizes by 2^n. Use 0 for a -// maximum workgroup size of 128, or 1 for a maximum size of 256. -#define LG_WG_FACTOR 1 -#define WG_FACTOR (1<> 2; - uint raw0 = state[ix + 0]; - uint raw1 = state[ix + 1]; - uint raw2 = state[ix + 2]; - uint raw3 = state[ix + 3]; - uint raw4 = state[ix + 4]; - uint raw5 = state[ix + 5]; - uint raw6 = state[ix + 6]; - uint raw7 = state[ix + 7]; - uint raw8 = state[ix + 8]; - uint raw9 = state[ix + 9]; - uint raw10 = state[ix + 10]; - uint raw11 = state[ix + 11]; - uint raw12 = state[ix + 12]; - uint raw13 = state[ix + 13]; - uint raw14 = state[ix + 14]; - State s; - s.mat = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.translate = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - s.bbox = vec4(uintBitsToFloat(raw6), uintBitsToFloat(raw7), uintBitsToFloat(raw8), uintBitsToFloat(raw9)); - s.linewidth = uintBitsToFloat(raw10); - s.flags = raw11; - s.path_count = raw12; - s.pathseg_count = raw13; - s.trans_count = raw14; - return s; -} - -void State_write(StateRef ref, State s) { - uint ix = ref.offset >> 2; - state[ix + 0] = floatBitsToUint(s.mat.x); - state[ix + 1] = floatBitsToUint(s.mat.y); - state[ix + 2] = floatBitsToUint(s.mat.z); - state[ix + 3] = floatBitsToUint(s.mat.w); - state[ix + 4] = floatBitsToUint(s.translate.x); - state[ix + 5] = floatBitsToUint(s.translate.y); - state[ix + 6] = floatBitsToUint(s.bbox.x); - state[ix + 7] = floatBitsToUint(s.bbox.y); - state[ix + 8] = floatBitsToUint(s.bbox.z); - state[ix + 9] = floatBitsToUint(s.bbox.w); - state[ix + 10] = floatBitsToUint(s.linewidth); - state[ix + 11] = s.flags; - state[ix + 12] = s.path_count; - state[ix + 13] = s.pathseg_count; - state[ix + 14] = s.trans_count; -} - diff --git a/piet-gpu/shader/tile.h b/piet-gpu/shader/tile.h deleted file mode 100644 index e11329c..0000000 --- a/piet-gpu/shader/tile.h +++ /dev/null @@ -1,150 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Code auto-generated by piet-gpu-derive - -struct PathRef { - uint offset; -}; - -struct TileRef { - uint offset; -}; - -struct TileSegRef { - uint offset; -}; - -struct TransformSegRef { - uint offset; -}; - -struct Path { - uvec4 bbox; - TileRef tiles; -}; - -#define Path_size 12 - -PathRef Path_index(PathRef ref, uint index) { - return PathRef(ref.offset + index * Path_size); -} - -struct Tile { - TileSegRef tile; - int backdrop; -}; - -#define Tile_size 8 - -TileRef Tile_index(TileRef ref, uint index) { - return TileRef(ref.offset + index * Tile_size); -} - -struct TileSeg { - vec2 origin; - vec2 vector; - float y_edge; - TileSegRef next; -}; - -#define TileSeg_size 24 - -TileSegRef TileSeg_index(TileSegRef ref, uint index) { - return TileSegRef(ref.offset + index * TileSeg_size); -} - -struct TransformSeg { - vec4 mat; - vec2 translate; -}; - -#define TransformSeg_size 24 - -TransformSegRef TransformSeg_index(TransformSegRef ref, uint index) { - return TransformSegRef(ref.offset + index * TransformSeg_size); -} - -Path Path_read(Alloc a, PathRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - Path s; - s.bbox = uvec4(raw0 & 0xffff, raw0 >> 16, raw1 & 0xffff, raw1 >> 16); - s.tiles = TileRef(raw2); - return s; -} - -void Path_write(Alloc a, PathRef ref, Path s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.bbox.x | (s.bbox.y << 16)); - write_mem(a, ix + 1, s.bbox.z | (s.bbox.w << 16)); - write_mem(a, ix + 2, s.tiles.offset); -} - -Tile Tile_read(Alloc a, TileRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - Tile s; - s.tile = TileSegRef(raw0); - s.backdrop = int(raw1); - return s; -} - -void Tile_write(Alloc a, TileRef ref, Tile s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, s.tile.offset); - write_mem(a, ix + 1, uint(s.backdrop)); -} - -TileSeg TileSeg_read(Alloc a, TileSegRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - TileSeg s; - s.origin = vec2(uintBitsToFloat(raw0), uintBitsToFloat(raw1)); - s.vector = vec2(uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.y_edge = uintBitsToFloat(raw4); - s.next = TileSegRef(raw5); - return s; -} - -void TileSeg_write(Alloc a, TileSegRef ref, TileSeg s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.origin.x)); - write_mem(a, ix + 1, floatBitsToUint(s.origin.y)); - write_mem(a, ix + 2, floatBitsToUint(s.vector.x)); - write_mem(a, ix + 3, floatBitsToUint(s.vector.y)); - write_mem(a, ix + 4, floatBitsToUint(s.y_edge)); - write_mem(a, ix + 5, s.next.offset); -} - -TransformSeg TransformSeg_read(Alloc a, TransformSegRef ref) { - uint ix = ref.offset >> 2; - uint raw0 = read_mem(a, ix + 0); - uint raw1 = read_mem(a, ix + 1); - uint raw2 = read_mem(a, ix + 2); - uint raw3 = read_mem(a, ix + 3); - uint raw4 = read_mem(a, ix + 4); - uint raw5 = read_mem(a, ix + 5); - TransformSeg s; - s.mat = vec4(uintBitsToFloat(raw0), uintBitsToFloat(raw1), uintBitsToFloat(raw2), uintBitsToFloat(raw3)); - s.translate = vec2(uintBitsToFloat(raw4), uintBitsToFloat(raw5)); - return s; -} - -void TransformSeg_write(Alloc a, TransformSegRef ref, TransformSeg s) { - uint ix = ref.offset >> 2; - write_mem(a, ix + 0, floatBitsToUint(s.mat.x)); - write_mem(a, ix + 1, floatBitsToUint(s.mat.y)); - write_mem(a, ix + 2, floatBitsToUint(s.mat.z)); - write_mem(a, ix + 3, floatBitsToUint(s.mat.w)); - write_mem(a, ix + 4, floatBitsToUint(s.translate.x)); - write_mem(a, ix + 5, floatBitsToUint(s.translate.y)); -} - diff --git a/piet-gpu/shader/tile_alloc.comp b/piet-gpu/shader/tile_alloc.comp deleted file mode 100644 index 63ced91..0000000 --- a/piet-gpu/shader/tile_alloc.comp +++ /dev/null @@ -1,112 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Allocation and initialization of tiles for paths. - -#version 450 -#extension GL_GOOGLE_include_directive : enable - -#include "mem.h" -#include "setup.h" - -#define LG_TILE_ALLOC_WG (7 + LG_WG_FACTOR) -#define TILE_ALLOC_WG (1 << LG_TILE_ALLOC_WG) - -layout(local_size_x = TILE_ALLOC_WG, local_size_y = 1) in; - -layout(set = 0, binding = 1) readonly buffer ConfigBuf { - Config conf; -}; - -layout(binding = 2) readonly buffer SceneBuf { - uint[] scene; -}; - -#include "drawtag.h" -#include "tile.h" - -// scale factors useful for converting coordinates to tiles -#define SX (1.0 / float(TILE_WIDTH_PX)) -#define SY (1.0 / float(TILE_HEIGHT_PX)) - -shared uint sh_tile_count[TILE_ALLOC_WG]; -shared uint sh_tile_offset; - -vec4 load_draw_bbox(uint draw_ix) { - uint base = (conf.draw_bbox_alloc.offset >> 2) + 4 * draw_ix; - float x0 = uintBitsToFloat(memory[base]); - float y0 = uintBitsToFloat(memory[base + 1]); - float x1 = uintBitsToFloat(memory[base + 2]); - float y1 = uintBitsToFloat(memory[base + 3]); - vec4 bbox = vec4(x0, y0, x1, y1); - return bbox; -} - -void main() { - if (!check_deps(STAGE_BINNING)) { - return; - } - uint th_ix = gl_LocalInvocationID.x; - uint element_ix = gl_GlobalInvocationID.x; - // At the moment, element_ix == path_ix. The clip-intersected bounding boxes - // for elements (draw objects) are computed in the binning stage, but at some - // point we'll probably want to break that correspondence. Tiles should be - // allocated for paths, not draw objs. EndClip doesn't need an allocation. - PathRef path_ref = PathRef(conf.tile_alloc.offset + element_ix * Path_size); - uint drawtag_base = conf.drawtag_offset >> 2; - - uint drawtag = Drawtag_Nop; - if (element_ix < conf.n_elements) { - drawtag = scene[drawtag_base + element_ix]; - } - int x0 = 0, y0 = 0, x1 = 0, y1 = 0; - // Allocate an empty path for EndClip; at some point we'll change - // this to be per path rather than per draw object. - if (drawtag != Drawtag_Nop && drawtag != Drawtag_EndClip) { - vec4 bbox = load_draw_bbox(element_ix); - x0 = int(floor(bbox.x * SX)); - y0 = int(floor(bbox.y * SY)); - x1 = int(ceil(bbox.z * SX)); - y1 = int(ceil(bbox.w * SY)); - } - x0 = clamp(x0, 0, int(conf.width_in_tiles)); - y0 = clamp(y0, 0, int(conf.height_in_tiles)); - x1 = clamp(x1, 0, int(conf.width_in_tiles)); - y1 = clamp(y1, 0, int(conf.height_in_tiles)); - - Path path; - path.bbox = uvec4(x0, y0, x1, y1); - uint tile_count = (x1 - x0) * (y1 - y0); - - sh_tile_count[th_ix] = tile_count; - uint total_tile_count = tile_count; - // Prefix sum of sh_tile_count - for (uint i = 0; i < LG_TILE_ALLOC_WG; i++) { - barrier(); - if (th_ix >= (1u << i)) { - total_tile_count += sh_tile_count[th_ix - (1u << i)]; - } - barrier(); - sh_tile_count[th_ix] = total_tile_count; - } - if (th_ix == TILE_ALLOC_WG - 1) { - sh_tile_offset = malloc_stage(total_tile_count * Tile_size, conf.mem_size, STAGE_TILE_ALLOC); - } - barrier(); - uint offset_start = sh_tile_offset; - if (offset_start == MALLOC_FAILED) { - return; - } - - if (element_ix < conf.n_elements) { - uint tile_subix = th_ix > 0 ? sh_tile_count[th_ix - 1] : 0; - path.tiles = TileRef(offset_start + Tile_size * tile_subix); - Path_write(conf.tile_alloc, path_ref, path); - } - - // Zero out allocated tiles efficiently - uint total_count = sh_tile_count[TILE_ALLOC_WG - 1] * (Tile_size / 4); - uint start_ix = offset_start >> 2; - for (uint i = th_ix; i < total_count; i += TILE_ALLOC_WG) { - memory[start_ix + i] = 0; - } -} diff --git a/piet-gpu/src/lib.rs b/piet-gpu/src/lib.rs deleted file mode 100644 index 98347fc..0000000 --- a/piet-gpu/src/lib.rs +++ /dev/null @@ -1,928 +0,0 @@ -mod pico_svg; -mod ramp; -mod render_driver; -pub mod samples; -mod simple_text; -pub mod stages; - -pub use piet_scene as scene; - -use bytemuck::{Pod, Zeroable}; -use scene::ResourcePatch; -use std::convert::TryInto; - -pub use render_driver::RenderDriver; -pub use simple_text::SimpleText; - -use piet_gpu_hal::{ - include_shader, BindType, BufWrite, Buffer, BufferUsage, CmdBuf, ComputePassDescriptor, - DescriptorSet, Error, Image, ImageLayout, Pipeline, QueryPool, Session, -}; - -use piet_scene::Scene; - -pub use pico_svg::PicoSvg; -use stages::{ClipBinding, ElementBinding, ElementCode, DRAW_PART_SIZE, PATHSEG_PART_SIZE}; - -use crate::stages::{ClipCode, Config, ElementStage, CLIP_PART_SIZE}; - -const TILE_W: usize = 16; -const TILE_H: usize = 16; - -const PTCL_INITIAL_ALLOC: usize = 1024; - -const N_GRADIENT_SAMPLES: usize = 512; -// TODO: make this dynamic -const N_GRADIENTS: usize = 256; - -#[allow(unused)] -fn dump_scene(buf: &[u8]) { - for i in 0..(buf.len() / 4) { - let mut buf_u32 = [0u8; 4]; - buf_u32.copy_from_slice(&buf[i * 4..i * 4 + 4]); - println!("{:4x}: {:8x}", i * 4, u32::from_le_bytes(buf_u32)); - } -} - -#[allow(unused)] -pub fn dump_k1_data(k1_buf: &[u32]) { - for i in 0..k1_buf.len() { - if k1_buf[i] != 0 { - println!("{:4x}: {:8x}", i * 4, k1_buf[i]); - } - } -} - -pub struct RenderConfig { - width: usize, - height: usize, - format: PixelFormat, -} - -// Should we just use the enum from piet-gpu-hal? -pub enum PixelFormat { - A8, - Rgba8, -} - -#[repr(C)] -#[derive(Clone, Copy, Debug, Zeroable, Pod)] -pub(crate) struct MemoryHeader { - mem_offset: u32, - mem_error: u32, - blend_offset: u32, -} - -/// The sizes of various objects in the encoded scene, needed for memory layout. -#[derive(Default)] -pub(crate) struct SceneStats { - // Slices of scene encoding, in order - pub n_drawobj: usize, - pub drawdata_len: usize, - pub n_transform: usize, - pub linewidth_len: usize, - pub pathseg_len: usize, - pub n_pathtag: usize, - - // Additional stats needed needed for memory layout & dispatch - pub n_path: u32, - pub n_pathseg: u32, - pub n_clip: u32, -} - -pub struct Renderer { - // These sizes are aligned to tile boundaries, though at some point - // we'll want to have a good strategy for dealing with odd sizes. - width: usize, - height: usize, - - pub image_dev: Image, // resulting image - - // TODO: two changes needed here. First, if we're fencing on the coarse - // pipeline, then we only need one copy (this changes if we also bind the - // scene buffer in fine rasterization, which might be a good idea to reduce - // copying). Second, there should be a staging buffer for discrete cards. - scene_bufs: Vec, - - memory_buf_host: Vec, - memory_buf_dev: Buffer, - memory_buf_readback: Buffer, - - // Staging buffers - config_bufs: Vec, - // Device config buf - config_buf: Buffer, - - blend_buf: Buffer, - - // New element pipeline - element_code: ElementCode, - element_stage: ElementStage, - element_bindings: Vec, - - clip_code: ClipCode, - clip_binding: ClipBinding, - - tile_pipeline: Pipeline, - tile_ds: Vec, - - path_pipeline: Pipeline, - path_ds: DescriptorSet, - - backdrop_pipeline: Pipeline, - backdrop_ds: DescriptorSet, - backdrop_y: u32, - - bin_pipeline: Pipeline, - bin_ds: DescriptorSet, - - coarse_pipeline: Pipeline, - coarse_ds: Vec, - - k4_pipeline: Pipeline, - k4_ds: DescriptorSet, - - scene_stats: SceneStats, - // TODO: the following stats are now redundant and can be removed. - n_transform: usize, - n_drawobj: usize, - n_paths: usize, - n_pathseg: usize, - n_pathtag: usize, - n_clip: u32, - - // Keep a reference to the image so that it is not destroyed. - _bg_image: Image, - - gradient_bufs: Vec, - gradients: Image, - - ramps: ramp::RampCache, - drawdata_patches: Vec<(usize, u32)>, -} - -impl RenderConfig { - pub fn new(width: usize, height: usize) -> RenderConfig { - RenderConfig { - width, - height, - format: PixelFormat::Rgba8, - } - } - - pub fn pixel_format(mut self, format: PixelFormat) -> Self { - self.format = format; - self - } -} - -impl Renderer { - /// The number of query pool entries needed to run the renderer. - pub const QUERY_POOL_SIZE: u32 = Self::COARSE_QUERY_POOL_SIZE + Self::FINE_QUERY_POOL_SIZE; - - /// The number of query pool entries needed to run the coarse pipeline. - pub const COARSE_QUERY_POOL_SIZE: u32 = 10; - - /// The number of query pool entries needed to run the fine pipeline. - pub const FINE_QUERY_POOL_SIZE: u32 = 2; - - pub unsafe fn new( - session: &Session, - width: usize, - height: usize, - n_bufs: usize, - ) -> Result { - let config = RenderConfig::new(width, height); - Self::new_from_config(session, config, n_bufs) - } - - /// Create a new renderer. - pub unsafe fn new_from_config( - session: &Session, - config: RenderConfig, - n_bufs: usize, - ) -> Result { - // For now, round up to tile alignment - let width = config.width; - let height = config.height; - let width = width + (width.wrapping_neg() & (TILE_W - 1)); - let height = height + (height.wrapping_neg() & (TILE_W - 1)); - let dev = BufferUsage::STORAGE | BufferUsage::COPY_DST; - let usage_mem_dev = BufferUsage::STORAGE | BufferUsage::COPY_DST | BufferUsage::COPY_SRC; - let usage_blend = BufferUsage::STORAGE; - let usage_upload = BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC; - let usage_readback = BufferUsage::MAP_READ | BufferUsage::COPY_DST; - - // TODO: separate staging buffer (if needed) - let scene_bufs = (0..n_bufs) - .map(|_| { - session - .create_buffer(8 * 1024 * 1024, usage_upload) - .unwrap() - }) - .collect::>(); - - let image_format = match config.format { - PixelFormat::A8 => piet_gpu_hal::ImageFormat::A8, - PixelFormat::Rgba8 => piet_gpu_hal::ImageFormat::Surface, - }; - let image_dev = session.create_image2d(width as u32, height as u32, image_format)?; - - const CONFIG_BUFFER_SIZE: u64 = std::mem::size_of::() as u64; - let config_buf = session.create_buffer(CONFIG_BUFFER_SIZE, dev).unwrap(); - let config_bufs = (0..n_bufs) - .map(|_| { - session - .create_buffer(CONFIG_BUFFER_SIZE, usage_upload) - .unwrap() - }) - .collect(); - - let memory_buf_host = (0..n_bufs) - .map(|_| { - session - .create_buffer(std::mem::size_of::() as u64, usage_upload) - .unwrap() - }) - .collect(); - let target_dependent_size = - (width / TILE_W) as u64 * (height / TILE_H) as u64 * PTCL_INITIAL_ALLOC as u64; - let memory_buf_dev = - session.create_buffer(target_dependent_size + 8 * 1024 * 1024, usage_mem_dev)?; - let memory_buf_readback = - session.create_buffer(std::mem::size_of::() as u64, usage_readback)?; - let blend_buf = session.create_buffer(16 * 1024 * 1024, usage_blend)?; - - let element_code = ElementCode::new(session); - let element_stage = ElementStage::new(session, &element_code); - let element_bindings = scene_bufs - .iter() - .map(|scene_buf| { - element_stage.bind( - session, - &element_code, - &config_buf, - scene_buf, - &memory_buf_dev, - ) - }) - .collect(); - - let clip_code = ClipCode::new(session); - let clip_binding = ClipBinding::new(session, &clip_code, &config_buf, &memory_buf_dev); - - let tile_alloc_code = include_shader!(session, "../shader/gen/tile_alloc"); - let tile_pipeline = session.create_compute_pipeline( - tile_alloc_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - ], - )?; - let tile_ds = scene_bufs - .iter() - .map(|scene_buf| { - session.create_simple_descriptor_set( - &tile_pipeline, - &[&memory_buf_dev, &config_buf, scene_buf], - ) - }) - .collect::, _>>()?; - - let path_alloc_code = include_shader!(session, "../shader/gen/path_coarse"); - let path_pipeline = session - .create_compute_pipeline(path_alloc_code, &[BindType::Buffer, BindType::BufReadOnly])?; - let path_ds = session - .create_simple_descriptor_set(&path_pipeline, &[&memory_buf_dev, &config_buf])?; - - let (backdrop_code, backdrop_y) = - if session.gpu_info().workgroup_limits.max_invocations >= 1024 { - (include_shader!(session, "../shader/gen/backdrop_lg"), 4) - } else { - println!("using small workgroup backdrop kernel"); - (include_shader!(session, "../shader/gen/backdrop"), 1) - }; - let backdrop_pipeline = session - .create_compute_pipeline(backdrop_code, &[BindType::Buffer, BindType::BufReadOnly])?; - let backdrop_ds = session - .create_simple_descriptor_set(&backdrop_pipeline, &[&memory_buf_dev, &config_buf])?; - - // TODO: constants - let bin_code = include_shader!(session, "../shader/gen/binning"); - let bin_pipeline = session - .create_compute_pipeline(bin_code, &[BindType::Buffer, BindType::BufReadOnly])?; - let bin_ds = - session.create_simple_descriptor_set(&bin_pipeline, &[&memory_buf_dev, &config_buf])?; - - let coarse_code = include_shader!(session, "../shader/gen/coarse"); - let coarse_pipeline = session.create_compute_pipeline( - coarse_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - ], - )?; - let coarse_ds = scene_bufs - .iter() - .map(|scene_buf| { - session.create_simple_descriptor_set( - &coarse_pipeline, - &[&memory_buf_dev, &config_buf, scene_buf], - ) - }) - .collect::, _>>()?; - let bg_image = Self::make_test_bg_image(&session); - - const GRADIENT_BUF_SIZE: usize = N_GRADIENTS * N_GRADIENT_SAMPLES * 4; - - let gradient_bufs = (0..n_bufs) - .map(|_| { - session - .create_buffer(GRADIENT_BUF_SIZE as u64, usage_upload) - .unwrap() - }) - .collect(); - let gradients = Self::make_gradient_image(&session); - - let k4_code = match config.format { - PixelFormat::A8 => include_shader!(session, "../shader/gen/kernel4_gray"), - PixelFormat::Rgba8 => include_shader!(session, "../shader/gen/kernel4"), - }; - let k4_pipeline = session.create_compute_pipeline( - k4_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::Buffer, - BindType::Image, - BindType::ImageRead, - BindType::ImageRead, - ], - )?; - let k4_ds = session - .descriptor_set_builder() - .add_buffers(&[&memory_buf_dev, &config_buf, &blend_buf]) - .add_images(&[&image_dev]) - .add_textures(&[&bg_image, &gradients]) - .build(&session, &k4_pipeline)?; - - let scene_stats = Default::default(); - let ramps = ramp::RampCache::default(); - let drawdata_patches = vec![]; - - Ok(Renderer { - width, - height, - scene_bufs, - memory_buf_host, - memory_buf_dev, - memory_buf_readback, - config_buf, - config_bufs, - blend_buf, - image_dev, - element_code, - element_stage, - element_bindings, - clip_code, - clip_binding, - tile_pipeline, - tile_ds, - path_pipeline, - path_ds, - backdrop_pipeline, - backdrop_ds, - backdrop_y, - bin_pipeline, - bin_ds, - coarse_pipeline, - coarse_ds, - k4_pipeline, - k4_ds, - scene_stats, - n_transform: 0, - n_drawobj: 0, - n_paths: 0, - n_pathseg: 0, - n_pathtag: 0, - n_clip: 0, - _bg_image: bg_image, - gradient_bufs, - gradients, - ramps, - drawdata_patches, - }) - } - - pub fn upload_scene(&mut self, scene: &Scene, buf_ix: usize) -> Result<(), Error> { - self.drawdata_patches.clear(); - self.scene_stats = SceneStats::from_scene(scene); - self.ramps.advance(); - let data = scene.data(); - let stop_data = &data.resources.stops; - for patch in &data.resources.patches { - match patch { - ResourcePatch::Ramp { offset, stops } => { - let ramp_id = self.ramps.add(&stop_data[stops.clone()]); - self.drawdata_patches.push((*offset, ramp_id)); - } - } - } - unsafe { - self.upload_config(buf_ix)?; - { - let mut mapped_scene = self.scene_bufs[buf_ix].map_write(..)?; - write_scene(scene, &self.drawdata_patches, &mut mapped_scene); - } - - // Upload gradient data. - let ramp_data = self.ramps.data(); - if !ramp_data.is_empty() { - assert!( - self.gradient_bufs[buf_ix].size() as usize - >= std::mem::size_of_val(&*ramp_data) - ); - self.gradient_bufs[buf_ix].write(ramp_data)?; - } - } - Ok(()) - } - - // Note: configuration has to be re-uploaded when memory buffer is resized - pub(crate) unsafe fn upload_config(&mut self, buf_ix: usize) -> Result<(), Error> { - let stats = &self.scene_stats; - let n_path = stats.n_path as usize; - self.n_paths = n_path; - self.n_transform = stats.n_transform; - self.n_drawobj = stats.n_drawobj; - self.n_pathseg = stats.n_pathseg as usize; - self.n_pathtag = stats.n_pathtag; - self.n_clip = stats.n_clip; - let (mut config, alloc) = stats.config(self.width, self.height); - config.mem_size = self.memory_buf_size() as u32; - self.config_bufs[buf_ix].write(&[config])?; - let mem_header = MemoryHeader { - mem_offset: alloc as u32, - mem_error: 0, - blend_offset: 0, - }; - // Note: we could skip doing this on realloc, but probably not worth the bother - self.memory_buf_host[buf_ix].write(&[mem_header])?; - Ok(()) - } - - /// Get the size of memory for the allocations known in advance. - pub(crate) fn memory_size(&self, stats: &SceneStats) -> usize { - stats.config(self.width, self.height).1 - } - - /// Record the coarse part of a render pipeline. - pub unsafe fn record_coarse( - &self, - cmd_buf: &mut CmdBuf, - query_pool: &QueryPool, - buf_ix: usize, - ) { - cmd_buf.copy_buffer(&self.config_bufs[buf_ix], &self.config_buf); - cmd_buf.copy_buffer(&self.memory_buf_host[buf_ix], &self.memory_buf_dev); - cmd_buf.memory_barrier(); - cmd_buf.image_barrier( - &self.image_dev, - ImageLayout::Undefined, - ImageLayout::General, - ); - // TODO: make gradient upload optional, only if it's changed - cmd_buf.image_barrier( - &self.gradients, - ImageLayout::Undefined, - ImageLayout::BlitDst, - ); - cmd_buf.copy_buffer_to_image(&self.gradient_bufs[buf_ix], &self.gradients); - cmd_buf.image_barrier(&self.gradients, ImageLayout::BlitDst, ImageLayout::General); - cmd_buf.reset_query_pool(&query_pool); - cmd_buf.begin_debug_label("Element bounding box calculation"); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 0, 1)); - self.element_stage.record( - &mut pass, - &self.element_code, - &self.element_bindings[buf_ix], - self.n_paths as u32, - self.n_pathtag as u32, - self.n_drawobj as u64, - ); - pass.end(); - cmd_buf.end_debug_label(); - cmd_buf.memory_barrier(); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 2, 3)); - pass.begin_debug_label("Clip bounding box calculation"); - self.clip_binding - .record(&mut pass, &self.clip_code, self.n_clip as u32); - pass.end_debug_label(); - pass.begin_debug_label("Element binning"); - pass.dispatch( - &self.bin_pipeline, - &self.bin_ds, - (((self.n_paths + 255) / 256) as u32, 1, 1), - (256, 1, 1), - ); - pass.end_debug_label(); - pass.memory_barrier(); - pass.begin_debug_label("Tile allocation"); - pass.dispatch( - &self.tile_pipeline, - &self.tile_ds[buf_ix], - (((self.n_paths + 255) / 256) as u32, 1, 1), - (256, 1, 1), - ); - pass.end_debug_label(); - pass.end(); - cmd_buf.begin_debug_label("Path flattening"); - cmd_buf.memory_barrier(); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 4, 5)); - pass.dispatch( - &self.path_pipeline, - &self.path_ds, - (((self.n_pathseg + 31) / 32) as u32, 1, 1), - (32, 1, 1), - ); - pass.end(); - cmd_buf.end_debug_label(); - cmd_buf.memory_barrier(); - cmd_buf.begin_debug_label("Backdrop propagation"); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 6, 7)); - pass.dispatch( - &self.backdrop_pipeline, - &self.backdrop_ds, - (((self.n_paths + 255) / 256) as u32, 1, 1), - (256, self.backdrop_y, 1), - ); - pass.end(); - cmd_buf.end_debug_label(); - // TODO: redo query accounting - cmd_buf.memory_barrier(); - cmd_buf.begin_debug_label("Coarse raster"); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer(&query_pool, 8, 9)); - pass.dispatch( - &self.coarse_pipeline, - &self.coarse_ds[buf_ix], - ( - (self.width as u32 + 255) / 256, - (self.height as u32 + 255) / 256, - 1, - ), - (256, 1, 1), - ); - pass.end(); - cmd_buf.end_debug_label(); - cmd_buf.memory_barrier(); - } - - pub unsafe fn record_fine( - &self, - cmd_buf: &mut CmdBuf, - query_pool: &QueryPool, - query_start: u32, - ) { - if query_start == 0 { - cmd_buf.reset_query_pool(&query_pool); - } - cmd_buf.begin_debug_label("Fine raster"); - let mut pass = cmd_buf.begin_compute_pass(&ComputePassDescriptor::timer( - &query_pool, - query_start, - query_start + 1, - )); - pass.dispatch( - &self.k4_pipeline, - &self.k4_ds, - ( - (self.width / TILE_W) as u32, - (self.height / TILE_H) as u32, - 1, - ), - (8, 4, 1), - ); - pass.end(); - cmd_buf.end_debug_label(); - cmd_buf.memory_barrier(); - cmd_buf.image_barrier(&self.image_dev, ImageLayout::General, ImageLayout::BlitSrc); - } - - pub unsafe fn record_readback(&self, cmd_buf: &mut CmdBuf) { - cmd_buf.copy_buffer(&self.memory_buf_dev, &self.memory_buf_readback); - cmd_buf.memory_barrier(); - } - - /// Record a render pipeline. - /// - /// This *assumes* the buffers are adequately sized. - pub unsafe fn record(&self, cmd_buf: &mut CmdBuf, query_pool: &QueryPool, buf_ix: usize) { - self.record_coarse(cmd_buf, query_pool, buf_ix); - self.record_fine(cmd_buf, query_pool, 10); - } - - pub fn make_image( - session: &Session, - width: usize, - height: usize, - buf: &[u8], - ) -> Result { - unsafe { - let buffer = session.create_buffer_init(&buf, BufferUsage::COPY_SRC)?; - const RGBA: piet_gpu_hal::ImageFormat = piet_gpu_hal::ImageFormat::Rgba8; - let image = session.create_image2d(width.try_into()?, height.try_into()?, RGBA)?; - let mut cmd_buf = session.cmd_buf()?; - cmd_buf.begin(); - cmd_buf.image_barrier(&image, ImageLayout::Undefined, ImageLayout::BlitDst); - cmd_buf.copy_buffer_to_image(&buffer, &image); - cmd_buf.image_barrier(&image, ImageLayout::BlitDst, ImageLayout::General); - cmd_buf.finish(); - // Make sure not to drop the buffer and image until the command buffer completes. - cmd_buf.add_resource(&buffer); - cmd_buf.add_resource(&image); - let _ = session.run_cmd_buf(cmd_buf, &[], &[]); - // We let the session reclaim the fence. - Ok(image) - } - } - - /// Make a test image. - fn make_test_bg_image(session: &Session) -> Image { - const WIDTH: usize = 256; - const HEIGHT: usize = 256; - let mut buf = vec![255u8; WIDTH * HEIGHT * 4]; - for y in 0..HEIGHT { - for x in 0..WIDTH { - let r = x as u8; - let g = y as u8; - let b = r ^ g; - buf[(y * WIDTH + x) * 4] = r; - buf[(y * WIDTH + x) * 4 + 1] = g; - buf[(y * WIDTH + x) * 4 + 2] = b; - } - } - Self::make_image(session, WIDTH, HEIGHT, &buf).unwrap() - } - - fn make_gradient_image(session: &Session) -> Image { - unsafe { - const RGBA: piet_gpu_hal::ImageFormat = piet_gpu_hal::ImageFormat::Rgba8; - session - .create_image2d(N_GRADIENT_SAMPLES as u32, N_GRADIENTS as u32, RGBA) - .unwrap() - } - } - - pub(crate) unsafe fn realloc_scene_if_needed( - &mut self, - session: &Session, - new_size: u64, - buf_ix: usize, - ) -> Result<(), Error> { - if new_size <= self.scene_bufs[buf_ix].size() { - return Ok(()); - } - const ALIGN: u64 = 0x10000; - let new_size = (new_size + ALIGN - 1) & ALIGN.wrapping_neg(); - println!( - "reallocating scene buf[{}] {} -> {}", - buf_ix, - self.scene_bufs[buf_ix].size(), - new_size - ); - let usage_upload = BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC; - let scene_buf = session.create_buffer(new_size, usage_upload)?; - self.element_bindings[buf_ix].rebind_scene(session, &scene_buf); - session.update_buffer_descriptor(&mut self.tile_ds[buf_ix], 2, &scene_buf); - session.update_buffer_descriptor(&mut self.coarse_ds[buf_ix], 2, &scene_buf); - self.scene_bufs[buf_ix] = scene_buf; - Ok(()) - } - - /// Get the size of the memory buffer. - /// - /// This is the usable size (not including the header). - pub(crate) fn memory_buf_size(&self) -> u64 { - self.memory_buf_dev.size() - std::mem::size_of::() as u64 - } - - pub(crate) unsafe fn realloc_memory( - &mut self, - session: &Session, - new_size: u64, - ) -> Result<(), Error> { - println!( - "reallocating memory buf {} -> {}", - self.memory_buf_dev.size(), - new_size - ); - let usage_mem_dev = BufferUsage::STORAGE | BufferUsage::COPY_DST | BufferUsage::COPY_SRC; - let memory_buf_dev = session.create_buffer(new_size, usage_mem_dev)?; - for element_binding in &mut self.element_bindings { - element_binding.rebind_memory(session, &memory_buf_dev); - } - self.clip_binding.rebind_memory(session, &memory_buf_dev); - for tile_ds in &mut self.tile_ds { - session.update_buffer_descriptor(tile_ds, 0, &memory_buf_dev); - } - session.update_buffer_descriptor(&mut self.path_ds, 0, &memory_buf_dev); - session.update_buffer_descriptor(&mut self.backdrop_ds, 0, &memory_buf_dev); - session.update_buffer_descriptor(&mut self.bin_ds, 0, &memory_buf_dev); - for coarse_ds in &mut self.coarse_ds { - session.update_buffer_descriptor(coarse_ds, 0, &memory_buf_dev); - } - session.update_buffer_descriptor(&mut self.k4_ds, 0, &memory_buf_dev); - self.memory_buf_dev = memory_buf_dev; - Ok(()) - } - - pub(crate) fn blend_size(&self) -> u64 { - self.blend_buf.size() - } - - pub(crate) unsafe fn realloc_blend( - &mut self, - session: &Session, - new_size: u64, - ) -> Result<(), Error> { - println!( - "reallocating blend buf {} -> {}", - self.blend_size(), - new_size - ); - let usage_blend = BufferUsage::STORAGE; - let blend_buf = session.create_buffer(new_size, usage_blend)?; - session.update_buffer_descriptor(&mut self.k4_ds, 2, &blend_buf); - self.blend_buf = blend_buf; - Ok(()) - } -} - -const TRANSFORM_SIZE: usize = 24; -const PATHSEG_SIZE: usize = 52; -const PATH_BBOX_SIZE: usize = 24; -const DRAWMONOID_SIZE: usize = 16; -const DRAW_BBOX_SIZE: usize = 16; -const DRAWTAG_SIZE: usize = 4; -const ANNOTATED_SIZE: usize = 40; - -impl SceneStats { - pub fn from_scene(scene: &piet_scene::Scene) -> Self { - let data = scene.data(); - Self { - n_drawobj: data.drawtag_stream.len(), - drawdata_len: data.drawdata_stream.len(), - n_transform: data.transform_stream.len(), - linewidth_len: std::mem::size_of_val(&*data.linewidth_stream), - pathseg_len: data.pathseg_stream.len(), - n_pathtag: data.tag_stream.len(), - n_path: data.n_path, - n_pathseg: data.n_pathseg, - n_clip: data.n_clip, - } - } - - pub(crate) fn scene_size(&self) -> usize { - align_up(self.n_drawobj, DRAW_PART_SIZE as usize) * DRAWTAG_SIZE - + self.drawdata_len - + self.n_transform * TRANSFORM_SIZE - + self.linewidth_len - + align_up(self.n_pathtag, PATHSEG_PART_SIZE as usize) - + self.pathseg_len - } - - /// Return a config for a scene with these stats. - /// - /// Also returns the beginning of free (dynamic) memory. - fn config(&self, width: usize, height: usize) -> (Config, usize) { - // Layout of scene buffer - let drawtag_offset = 0; - let n_drawobj = self.n_drawobj; - let n_drawobj_padded = align_up(n_drawobj, DRAW_PART_SIZE as usize); - let drawdata_offset = drawtag_offset + n_drawobj_padded * DRAWTAG_SIZE; - let trans_offset = drawdata_offset + self.drawdata_len; - let n_trans = self.n_transform; - let linewidth_offset = trans_offset + n_trans * TRANSFORM_SIZE; - let pathtag_offset = linewidth_offset + self.linewidth_len; - let n_pathtag = self.n_pathtag; - let n_pathtag_padded = align_up(n_pathtag, PATHSEG_PART_SIZE as usize); - let pathseg_offset = pathtag_offset + n_pathtag_padded; - - // Layout of memory - let mut alloc = 0; - let pathseg_alloc = alloc; - alloc += pathseg_alloc + self.n_pathseg as usize * PATHSEG_SIZE; - let path_bbox_alloc = alloc; - let n_path = self.n_path as usize; - alloc += path_bbox_alloc + n_path * PATH_BBOX_SIZE; - let drawmonoid_alloc = alloc; - alloc += n_drawobj_padded * DRAWMONOID_SIZE; - let anno_alloc = alloc; - alloc += n_drawobj * ANNOTATED_SIZE; - let clip_alloc = alloc; - let n_clip = self.n_clip as usize; - const CLIP_SIZE: usize = 4; - alloc += n_clip * CLIP_SIZE; - let clip_bic_alloc = alloc; - const CLIP_BIC_SIZE: usize = 8; - // This can round down, as we only reduce the prefix - alloc += (n_clip / CLIP_PART_SIZE as usize) * CLIP_BIC_SIZE; - let clip_stack_alloc = alloc; - const CLIP_EL_SIZE: usize = 20; - alloc += n_clip * CLIP_EL_SIZE; - let clip_bbox_alloc = alloc; - const CLIP_BBOX_SIZE: usize = 16; - alloc += align_up(n_clip as usize, CLIP_PART_SIZE as usize) * CLIP_BBOX_SIZE; - let draw_bbox_alloc = alloc; - alloc += n_drawobj * DRAW_BBOX_SIZE; - let drawinfo_alloc = alloc; - // TODO: not optimized; it can be accumulated during encoding or summed from drawtags - const MAX_DRAWINFO_SIZE: usize = 44; - alloc += n_drawobj * MAX_DRAWINFO_SIZE; - - // These constants depend on encoding and may need to be updated. - const PATH_SIZE: usize = 12; - const BIN_SIZE: usize = 8; - let width_in_tiles = width / TILE_W; - let height_in_tiles = height / TILE_H; - let tile_base = alloc; - alloc += ((n_path + 3) & !3) * PATH_SIZE; - let bin_base = alloc; - alloc += ((n_drawobj + 255) & !255) * BIN_SIZE; - let ptcl_base = alloc; - alloc += width_in_tiles * height_in_tiles * PTCL_INITIAL_ALLOC; - - let config = Config { - mem_size: 0, // to be filled in later - n_elements: n_drawobj as u32, - n_pathseg: self.n_pathseg, - pathseg_alloc: pathseg_alloc as u32, - anno_alloc: anno_alloc as u32, - path_bbox_alloc: path_bbox_alloc as u32, - drawmonoid_alloc: drawmonoid_alloc as u32, - clip_alloc: clip_alloc as u32, - clip_bic_alloc: clip_bic_alloc as u32, - clip_stack_alloc: clip_stack_alloc as u32, - clip_bbox_alloc: clip_bbox_alloc as u32, - draw_bbox_alloc: draw_bbox_alloc as u32, - drawinfo_alloc: drawinfo_alloc as u32, - n_trans: n_trans as u32, - n_path: self.n_path, - n_clip: self.n_clip, - trans_offset: trans_offset as u32, - linewidth_offset: linewidth_offset as u32, - pathtag_offset: pathtag_offset as u32, - pathseg_offset: pathseg_offset as u32, - drawtag_offset: drawtag_offset as u32, - drawdata_offset: drawdata_offset as u32, - width_in_tiles: width_in_tiles as u32, - height_in_tiles: height_in_tiles as u32, - tile_alloc: tile_base as u32, - bin_alloc: bin_base as u32, - ptcl_alloc: ptcl_base as u32, - }; - - (config, alloc) - } -} - -fn write_scene(scene: &Scene, drawdata_patches: &[(usize, u32)], buf: &mut BufWrite) { - let data = scene.data(); - buf.extend_slice(&data.drawtag_stream); - let n_drawobj = data.drawtag_stream.len(); - buf.fill_zero(padding(n_drawobj, DRAW_PART_SIZE as usize) * DRAWTAG_SIZE); - if !drawdata_patches.is_empty() { - let mut pos = 0; - for patch in drawdata_patches { - let offset = patch.0; - let value = patch.1; - if pos < offset { - buf.extend_slice(&data.drawdata_stream[pos..offset]); - } - buf.push(value); - pos = offset + 4; - } - if pos < data.drawdata_stream.len() { - buf.extend_slice(&data.drawdata_stream[pos..]) - } - } else { - buf.extend_slice(&data.drawdata_stream); - } - buf.extend_slice(&data.transform_stream); - buf.extend_slice(&data.linewidth_stream); - buf.extend_slice(&data.tag_stream); - let n_pathtag = data.tag_stream.len(); - buf.fill_zero(padding(n_pathtag, PATHSEG_PART_SIZE as usize)); - buf.extend_slice(&data.pathseg_stream); -} - -fn padding(x: usize, align: usize) -> usize { - x.wrapping_neg() & (align - 1) -} - -fn align_up(x: usize, align: usize) -> usize { - debug_assert!(align.is_power_of_two()); - (x + align - 1) & !(align - 1) -} diff --git a/piet-gpu/src/pico_svg.rs b/piet-gpu/src/pico_svg.rs deleted file mode 100644 index 673b195..0000000 --- a/piet-gpu/src/pico_svg.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! A loader for a tiny fragment of SVG - -use std::str::FromStr; - -use roxmltree::{Document, Node}; - -use piet_scene::kurbo::{Affine, BezPath}; -use piet_scene::Color; - -pub struct PicoSvg { - pub items: Vec, -} - -pub enum Item { - Fill(FillItem), - Stroke(StrokeItem), -} - -pub struct StrokeItem { - pub width: f64, - pub color: Color, - pub path: BezPath, -} - -pub struct FillItem { - pub color: Color, - pub path: BezPath, -} - -struct Parser<'a> { - scale: f64, - items: &'a mut Vec, -} - -impl PicoSvg { - pub fn load(xml_string: &str, scale: f64) -> Result> { - let doc = Document::parse(xml_string)?; - let root = doc.root_element(); - let mut items = Vec::new(); - let mut parser = Parser::new(&mut items, scale); - for node in root.children() { - parser.rec_parse(node)?; - } - Ok(PicoSvg { items }) - } -} - -impl<'a> Parser<'a> { - fn new(items: &'a mut Vec, scale: f64) -> Parser<'a> { - Parser { scale, items } - } - - fn rec_parse(&mut self, node: Node) -> Result<(), Box> { - let transform = if self.scale >= 0.0 { - Affine::scale(self.scale) - } else { - Affine::new([-self.scale, 0.0, 0.0, self.scale, 0.0, 1536.0]) - }; - if node.is_element() { - match node.tag_name().name() { - "g" => { - for child in node.children() { - self.rec_parse(child)?; - } - } - "path" => { - let d = node.attribute("d").ok_or("missing 'd' attribute")?; - let bp = BezPath::from_svg(d)?; - let path = transform * bp; - // TODO: default fill color is black, but this is overridden in tiger to this logic. - if let Some(fill_color) = node.attribute("fill") { - if fill_color != "none" { - let color = parse_color(fill_color); - let color = modify_opacity(color, "fill-opacity", node); - self.items.push(Item::Fill(FillItem { - color, - path: path.clone(), - })); - } - } - if let Some(stroke_color) = node.attribute("stroke") { - if stroke_color != "none" { - let width = self.scale.abs() - * f64::from_str( - node.attribute("stroke-width").ok_or("missing width")?, - )?; - let color = parse_color(stroke_color); - let color = modify_opacity(color, "stroke-opacity", node); - self.items - .push(Item::Stroke(StrokeItem { width, color, path })); - } - } - } - _ => (), - } - } - Ok(()) - } -} - -fn parse_color(color: &str) -> Color { - if color.as_bytes()[0] == b'#' { - let mut hex = u32::from_str_radix(&color[1..], 16).unwrap(); - if color.len() == 4 { - hex = (hex >> 8) * 0x110000 + ((hex >> 4) & 0xf) * 0x1100 + (hex & 0xf) * 0x11; - } - let rgba = (hex << 8) + 0xff; - let (r, g, b, a) = ( - (rgba >> 24 & 255) as u8, - ((rgba >> 16) & 255) as u8, - ((rgba >> 8) & 255) as u8, - (rgba & 255) as u8, - ); - Color::rgba8(r, g, b, a) - } else if color.starts_with("rgb(") { - let mut iter = color[4..color.len() - 1].split(','); - let r = u8::from_str(iter.next().unwrap()).unwrap(); - let g = u8::from_str(iter.next().unwrap()).unwrap(); - let b = u8::from_str(iter.next().unwrap()).unwrap(); - Color::rgb8(r, g, b) - } else { - Color::rgba8(255, 0, 255, 0x80) - } -} - -fn modify_opacity(mut color: Color, attr_name: &str, node: Node) -> Color { - if let Some(opacity) = node.attribute(attr_name) { - let alpha = if opacity.ends_with("%") { - let pctg = opacity[..opacity.len() - 1].parse().unwrap_or(100.0); - pctg * 0.01 - } else { - opacity.parse().unwrap_or(1.0) - } as f64; - color.a = (alpha.min(1.0).max(0.0) * 255.0).round() as u8; - color - } else { - color - } -} diff --git a/piet-gpu/src/ramp.rs b/piet-gpu/src/ramp.rs deleted file mode 100644 index 86af504..0000000 --- a/piet-gpu/src/ramp.rs +++ /dev/null @@ -1,129 +0,0 @@ -use piet_scene::{Color, ColorStop, ColorStops}; - -use std::collections::HashMap; - -const N_SAMPLES: usize = 512; -const RETAINED_COUNT: usize = 64; - -#[derive(Default)] -pub struct RampCache { - epoch: u64, - map: HashMap, - data: Vec, -} - -impl RampCache { - pub fn advance(&mut self) { - self.epoch += 1; - if self.map.len() > RETAINED_COUNT { - self.map - .retain(|_key, value| value.0 < RETAINED_COUNT as u32); - self.data.truncate(RETAINED_COUNT * N_SAMPLES); - } - } - - pub fn add(&mut self, stops: &[ColorStop]) -> u32 { - if let Some(entry) = self.map.get_mut(stops) { - entry.1 = self.epoch; - entry.0 - } else if self.map.len() < RETAINED_COUNT { - let id = (self.data.len() / N_SAMPLES) as u32; - self.data.extend(make_ramp(stops)); - self.map.insert(stops.into(), (id, self.epoch)); - id - } else { - let mut reuse = None; - for (stops, (id, epoch)) in &self.map { - if *epoch + 2 < self.epoch { - reuse = Some((stops.to_owned(), *id)); - break; - } - } - if let Some((old_stops, id)) = reuse { - self.map.remove(&old_stops); - let start = id as usize * N_SAMPLES; - for (dst, src) in self.data[start..start + N_SAMPLES] - .iter_mut() - .zip(make_ramp(stops)) - { - *dst = src; - } - self.map.insert(stops.into(), (id, self.epoch)); - id - } else { - let id = (self.data.len() / N_SAMPLES) as u32; - self.data.extend(make_ramp(stops)); - self.map.insert(stops.into(), (id, self.epoch)); - id - } - } - } - - pub fn data(&self) -> &[u32] { - &self.data - } -} - -fn make_ramp<'a>(stops: &'a [ColorStop]) -> impl Iterator + 'a { - let mut last_u = 0.0; - let mut last_c = ColorF64::from_color(stops[0].color); - let mut this_u = last_u; - let mut this_c = last_c; - let mut j = 0; - (0..N_SAMPLES).map(move |i| { - let u = (i as f64) / (N_SAMPLES - 1) as f64; - while u > this_u { - last_u = this_u; - last_c = this_c; - if let Some(s) = stops.get(j + 1) { - this_u = s.offset as f64; - this_c = ColorF64::from_color(s.color); - j += 1; - } else { - break; - } - } - let du = this_u - last_u; - let c = if du < 1e-9 { - this_c - } else { - last_c.lerp(&this_c, (u - last_u) / du) - }; - c.to_premul_u32() - }) -} - -#[derive(Copy, Clone, Debug)] -struct ColorF64([f64; 4]); - -impl ColorF64 { - fn from_color(color: Color) -> Self { - Self([ - color.r as f64 / 255.0, - color.g as f64 / 255.0, - color.b as f64 / 255.0, - color.a as f64 / 255.0, - ]) - } - - fn lerp(&self, other: &Self, a: f64) -> Self { - fn l(x: f64, y: f64, a: f64) -> f64 { - x * (1.0 - a) + y * a - } - Self([ - l(self.0[0], other.0[0], a), - l(self.0[1], other.0[1], a), - l(self.0[2], other.0[2], a), - l(self.0[3], other.0[3], a), - ]) - } - - fn to_premul_u32(&self) -> u32 { - let a = self.0[3].min(1.0).max(0.0); - let r = ((self.0[0] * a).min(1.0).max(0.0) * 255.0) as u32; - let g = ((self.0[1] * a).min(1.0).max(0.0) * 255.0) as u32; - let b = ((self.0[2] * a).min(1.0).max(0.0) * 255.0) as u32; - let a = (a * 255.0) as u32; - r | (g << 8) | (b << 16) | (a << 24) - } -} diff --git a/piet-gpu/src/render_driver.rs b/piet-gpu/src/render_driver.rs deleted file mode 100644 index c4d8043..0000000 --- a/piet-gpu/src/render_driver.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu_hal::{CmdBuf, Error, Image, QueryPool, Semaphore, Session, SubmittedCmdBuf}; -use piet_scene::Scene; - -use crate::{MemoryHeader, Renderer, SceneStats}; - -/// Additional logic for sequencing rendering operations, specifically -/// for handling failure and reallocation. -/// -/// It may be this shouldn't be a separate object from Renderer. -pub struct RenderDriver { - frames: Vec, - renderer: Renderer, - buf_ix: usize, - /// The index of a pending fine rasterization submission. - pending: Option, -} - -pub struct TargetState<'a> { - pub cmd_buf: &'a mut CmdBuf, - pub image: &'a Image, -} - -#[derive(Default, Debug)] -pub struct TimingStats { - coarse: Vec, - fine: Vec, -} - -struct RenderFrame { - cmd_buf: CmdBufState, - coarse_query_pool: QueryPool, - fine_query_pool: QueryPool, - timing_stats: TimingStats, -} - -enum CmdBufState { - Start, - Submitted(SubmittedCmdBuf), - Ready(CmdBuf), -} - -impl RenderDriver { - /// Create new render driver. - /// - /// Should probably be fallible. - /// - /// We can get n from the renderer as well. - pub fn new(session: &Session, n: usize, renderer: Renderer) -> RenderDriver { - let frames = (0..n) - .map(|_| { - // Maybe should allocate here so it doesn't happen on first frame? - let cmd_buf = CmdBufState::default(); - let coarse_query_pool = - session.create_query_pool(Renderer::COARSE_QUERY_POOL_SIZE)?; - let fine_query_pool = session.create_query_pool(Renderer::FINE_QUERY_POOL_SIZE)?; - Ok(RenderFrame { - cmd_buf, - coarse_query_pool, - fine_query_pool, - timing_stats: TimingStats::default(), - }) - }) - .collect::>() - .unwrap(); - RenderDriver { - frames, - renderer, - buf_ix: 0, - pending: None, - } - } - - pub fn upload_scene(&mut self, session: &Session, scene: &Scene) -> Result<(), Error> { - let stats = SceneStats::from_scene(scene); - self.ensure_scene_buffers(session, &stats)?; - self.renderer.upload_scene(scene, self.buf_ix) - } - - fn ensure_scene_buffers(&mut self, session: &Session, stats: &SceneStats) -> Result<(), Error> { - let scene_size = stats.scene_size(); - unsafe { - self.renderer - .realloc_scene_if_needed(session, scene_size as u64, self.buf_ix)?; - } - let memory_size = self.renderer.memory_size(&stats); - // TODO: better estimate of additional memory needed - // Note: if we were to cover the worst-case binning output, we could make the - // binning stage infallible and cut checking logic. It also may not be a bad - // estimate for the rest. - let estimated_needed = memory_size as u64 + (1 << 20); - if estimated_needed > self.renderer.memory_buf_size() { - if let Some(pending) = self.pending.take() { - // There might be a fine rasterization task that binds the memory buffer - // still in flight. - self.frames[pending].cmd_buf.wait(); - } - unsafe { - self.renderer.realloc_memory(session, estimated_needed)?; - } - } - Ok(()) - } - - /// Run one try of the coarse rendering pipeline. - pub(crate) fn try_run_coarse(&mut self, session: &Session) -> Result { - let frame = &mut self.frames[self.buf_ix]; - let cmd_buf = frame.cmd_buf.cmd_buf(session)?; - unsafe { - cmd_buf.begin(); - // TODO: probably want to return query results as well - self.renderer - .record_coarse(cmd_buf, &frame.coarse_query_pool, self.buf_ix); - self.renderer.record_readback(cmd_buf); - let cmd_buf = frame.cmd_buf.cmd_buf(session)?; - cmd_buf.finish_timestamps(&frame.coarse_query_pool); - cmd_buf.host_barrier(); - cmd_buf.finish(); - frame.cmd_buf.submit(session, &[], &[])?; - frame.cmd_buf.wait(); - frame.timing_stats.coarse = session.fetch_query_pool(&frame.coarse_query_pool)?; - let mut result = Vec::new(); - // TODO: consider read method for single POD value - self.renderer.memory_buf_readback.read(&mut result)?; - Ok(result[0]) - } - } - - /// Run the coarse render pipeline, ensuring enough memory for intermediate buffers. - pub fn run_coarse(&mut self, session: &Session) -> Result<(), Error> { - loop { - let mem_header = self.try_run_coarse(session)?; - //println!("{:?}", mem_header); - if mem_header.mem_error == 0 { - let blend_needed = mem_header.blend_offset as u64; - if blend_needed > self.renderer.blend_size() { - unsafe { - self.renderer.realloc_blend(session, blend_needed)?; - } - } - return Ok(()); - } - // Not enough memory, reallocate and retry. - // TODO: be smarter (multiplier for early stages) - let mem_size = mem_header.mem_offset + 4096; - // Safety rationalization: no command buffers containing the buffer are - // in flight. - unsafe { - self.renderer.realloc_memory(session, mem_size.into())?; - self.renderer.upload_config(self.buf_ix)?; - } - } - } - - /// Record the fine rasterizer, leaving the command buffer open. - pub fn record_fine(&mut self, session: &Session) -> Result { - let frame = &mut self.frames[self.buf_ix]; - let cmd_buf = frame.cmd_buf.cmd_buf(session)?; - unsafe { - cmd_buf.begin(); - self.renderer - .record_fine(cmd_buf, &frame.fine_query_pool, 0); - } - let image = &self.renderer.image_dev; - Ok(TargetState { cmd_buf, image }) - } - - /// Submit the current command buffer. - pub fn submit( - &mut self, - session: &Session, - wait_semaphores: &[&Semaphore], - signal_semaphores: &[&Semaphore], - ) -> Result<(), Error> { - let frame = &mut self.frames[self.buf_ix]; - let cmd_buf = frame.cmd_buf.cmd_buf(session)?; - unsafe { - cmd_buf.finish_timestamps(&frame.fine_query_pool); - cmd_buf.host_barrier(); - cmd_buf.finish(); - frame - .cmd_buf - .submit(session, wait_semaphores, signal_semaphores)? - } - self.pending = Some(self.buf_ix); - Ok(()) - } - - unsafe fn wait_frame(&mut self, session: &Session, buf_ix: usize) { - let frame = &mut self.frames[buf_ix]; - frame.cmd_buf.wait(); - if let Ok(stats) = session.fetch_query_pool(&frame.fine_query_pool) { - frame.timing_stats.fine = stats; - } - if self.pending == Some(buf_ix) { - self.pending = None; - } - } - - pub unsafe fn wait(&mut self, session: &Session) { - self.wait_frame(session, self.buf_ix); - } - - /// Move to the next buffer. - pub fn next_buffer(&mut self) { - self.buf_ix = (self.buf_ix + 1) % self.frames.len() - } - - pub unsafe fn get_timing_stats(&mut self, session: &Session, buf_ix: usize) -> &TimingStats { - self.wait_frame(session, buf_ix); - &self.frames[buf_ix].timing_stats - } - - pub fn wait_all(&mut self, session: &Session) { - for buf_ix in 0..self.frames.len() { - unsafe { - self.wait_frame(session, buf_ix); - } - } - } -} - -impl Default for CmdBufState { - fn default() -> Self { - CmdBufState::Start - } -} - -impl CmdBufState { - /// Get a command buffer suitable for recording. - /// - /// If the command buffer is submitted, wait. - fn cmd_buf(&mut self, session: &Session) -> Result<&mut CmdBuf, Error> { - if let CmdBufState::Ready(cmd_buf) = self { - return Ok(cmd_buf); - } - if let CmdBufState::Submitted(submitted) = std::mem::take(self) { - if let Ok(Some(cmd_buf)) = submitted.wait() { - *self = CmdBufState::Ready(cmd_buf); - } - } - if matches!(self, CmdBufState::Start) { - *self = CmdBufState::Ready(session.cmd_buf()?); - } - if let CmdBufState::Ready(cmd_buf) = self { - Ok(cmd_buf) - } else { - unreachable!() - } - } - - unsafe fn submit( - &mut self, - session: &Session, - wait_semaphores: &[&Semaphore], - signal_semaphores: &[&Semaphore], - ) -> Result<(), Error> { - if let CmdBufState::Ready(cmd_buf) = std::mem::take(self) { - let submitted = session.run_cmd_buf(cmd_buf, wait_semaphores, signal_semaphores)?; - *self = CmdBufState::Submitted(submitted); - Ok(()) - } else { - Err("Tried to submit CmdBufState not in ready state".into()) - } - } - - fn wait(&mut self) { - if matches!(self, CmdBufState::Submitted(_)) { - if let CmdBufState::Submitted(submitted) = std::mem::take(self) { - if let Ok(Some(cmd_buf)) = submitted.wait() { - *self = CmdBufState::Ready(cmd_buf); - } - } - } - } -} - -impl TimingStats { - pub fn print_summary(&self) { - let ts = &self.coarse; - println!("Element time: {:.3}ms", ts[0] * 1e3); - println!("Clip + bin + tile time: {:.3}ms", (ts[2] - ts[1]) * 1e3); - println!("Coarse path time: {:.3}ms", (ts[4] - ts[2]) * 1e3); - println!("Backdrop time: {:.3}ms", (ts[6] - ts[5]) * 1e3); - println!("Coarse raster kernel time: {:.3}ms", (ts[8] - ts[7]) * 1e3); - println!("Fine kernel time: {:.3}ms", self.fine[0] * 1e3); - } - - pub fn short_summary(&self) -> String { - let ts = &self.coarse; - let el = ts[0] * 1e3; - let cl = (ts[2] - ts[1]) * 1e3; - let cp = (ts[4] - ts[3]) * 1e3; - let bd = (ts[6] - ts[5]) * 1e3; - let cr = (ts[8] - ts[7]) * 1e3; - let fr = self.fine[0] * 1e3; - let total = el + cl + cp + bd + cr + fr; - format!( - "{:.3}ms :: el:{:.3}ms|cl:{:.3}ms|cp:{:.3}ms|bd:{:.3}ms|cr:{:.3}ms|fr:{:.3}ms", - total, el, cl, cp, bd, cr, fr - ) - } -} diff --git a/piet-gpu/src/samples.rs b/piet-gpu/src/samples.rs deleted file mode 100644 index e4f7bc8..0000000 --- a/piet-gpu/src/samples.rs +++ /dev/null @@ -1,355 +0,0 @@ -use crate::PicoSvg; -use piet_scene::kurbo::{Affine, BezPath, Ellipse, PathEl, Point, Rect}; -use piet_scene::*; - -use crate::SimpleText; - -pub fn render_funky_paths(sb: &mut SceneBuilder) { - use PathEl::*; - let missing_movetos = [ - LineTo((100.0, 100.0).into()), - LineTo((100.0, 200.0).into()), - ClosePath, - LineTo((0.0, 400.0).into()), - LineTo((100.0, 400.0).into()), - ]; - let only_movetos = [MoveTo((0.0, 0.0).into()), MoveTo((100.0, 100.0).into())]; - let empty: [PathEl; 0] = []; - sb.fill( - Fill::NonZero, - Affine::translate((100.0, 100.0)), - Color::rgb8(0, 0, 255), - None, - &missing_movetos, - ); - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - Color::rgb8(0, 0, 255), - None, - &empty, - ); - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - Color::rgb8(0, 0, 255), - None, - &only_movetos, - ); - sb.stroke( - &Stroke::new(8.0), - Affine::translate((100.0, 100.0)), - Color::rgb8(0, 255, 255), - None, - &missing_movetos, - ); -} - -#[allow(unused)] -const N_CIRCLES: usize = 0; - -#[allow(unused)] -pub fn render_svg(sb: &mut SceneBuilder, svg: &PicoSvg, print_stats: bool) { - use crate::pico_svg::*; - let start = std::time::Instant::now(); - for item in &svg.items { - match item { - Item::Fill(fill) => { - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - fill.color, - None, - &fill.path, - ); - } - Item::Stroke(stroke) => { - sb.stroke( - &Stroke::new(stroke.width as f32), - Affine::IDENTITY, - stroke.color, - None, - &stroke.path, - ); - } - } - } - if print_stats { - println!("flattening and encoding time: {:?}", start.elapsed()); - } -} - -#[allow(unused)] -pub fn render_tiger(sb: &mut SceneBuilder, print_stats: bool) { - use super::pico_svg::*; - let xml_str = std::str::from_utf8(include_bytes!( - "../../piet-wgsl/examples/assets/Ghostscript_Tiger.svg" - )) - .unwrap(); - let start = std::time::Instant::now(); - let svg = PicoSvg::load(xml_str, 8.0).unwrap(); - if print_stats { - println!("parsing time: {:?}", start.elapsed()); - } - render_svg(sb, &svg, print_stats); -} - -pub fn render_scene(sb: &mut SceneBuilder) { - render_cardioid(sb); - render_clip_test(sb); - render_alpha_test(sb); - //render_tiger(sb, false); -} - -#[allow(unused)] -fn render_cardioid(sb: &mut SceneBuilder) { - let n = 601; - let dth = std::f64::consts::PI * 2.0 / (n as f64); - let center = Point::new(1024.0, 768.0); - let r = 750.0; - let mut path = BezPath::new(); - for i in 1..n { - let mut p0 = center; - let a0 = i as f64 * dth; - p0.x += a0.cos() * r; - p0.y += a0.sin() * r; - let mut p1 = center; - let a1 = ((i * 2) % n) as f64 * dth; - p1.x += a1.cos() * r; - p1.y += a1.sin() * r; - path.push(PathEl::MoveTo(p0)); - path.push(PathEl::LineTo(p1)); - } - sb.stroke( - &Stroke::new(2.0), - Affine::IDENTITY, - Color::rgb8(0, 0, 0), - None, - &path, - ); -} - -#[allow(unused)] -fn render_clip_test(sb: &mut SceneBuilder) { - const N: usize = 16; - const X0: f64 = 50.0; - const Y0: f64 = 450.0; - // Note: if it gets much larger, it will exceed the 1MB scratch buffer. - // But this is a pretty demanding test. - const X1: f64 = 550.0; - const Y1: f64 = 950.0; - let step = 1.0 / ((N + 1) as f64); - for i in 0..N { - let t = ((i + 1) as f64) * step; - let path = [ - PathEl::MoveTo((X0, Y0).into()), - PathEl::LineTo((X1, Y0).into()), - PathEl::LineTo((X1, Y0 + t * (Y1 - Y0)).into()), - PathEl::LineTo((X1 + t * (X0 - X1), Y1).into()), - PathEl::LineTo((X0, Y1).into()), - PathEl::ClosePath, - ]; - sb.push_layer(Mix::Clip, Affine::IDENTITY, &path); - } - let rect = Rect::new(X0, Y0, X1, Y1); - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - &Brush::Solid(Color::rgb8(0, 0, 0)), - None, - &rect, - ); - for _ in 0..N { - sb.pop_layer(); - } -} - -#[allow(unused)] -fn render_alpha_test(sb: &mut SceneBuilder) { - // Alpha compositing tests. - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - Color::rgb8(255, 0, 0), - None, - &make_diamond(1024.0, 100.0), - ); - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - Color::rgba8(0, 255, 0, 0x80), - None, - &make_diamond(1024.0, 125.0), - ); - sb.push_layer(Mix::Clip, Affine::IDENTITY, &make_diamond(1024.0, 150.0)); - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - Color::rgba8(0, 0, 255, 0x80), - None, - &make_diamond(1024.0, 175.0), - ); - sb.pop_layer(); -} - -#[allow(unused)] -pub fn render_blend_grid(sb: &mut SceneBuilder) { - const BLEND_MODES: &[Mix] = &[ - Mix::Normal, - Mix::Multiply, - Mix::Darken, - Mix::Screen, - Mix::Lighten, - Mix::Overlay, - Mix::ColorDodge, - Mix::ColorBurn, - Mix::HardLight, - Mix::SoftLight, - Mix::Difference, - Mix::Exclusion, - Mix::Hue, - Mix::Saturation, - Mix::Color, - Mix::Luminosity, - ]; - for (ix, &blend) in BLEND_MODES.iter().enumerate() { - let i = ix % 4; - let j = ix / 4; - let transform = Affine::translate((i as f64 * 225., j as f64 * 225.)); - let square = blend_square(blend.into()); - sb.append(&square, Some(transform)); - } -} - -#[allow(unused)] -fn render_blend_square(sb: &mut SceneBuilder, blend: BlendMode, transform: Affine) { - // Inspired by https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode - let rect = Rect::from_origin_size(Point::new(0., 0.), (200., 200.)); - let linear = LinearGradient::new((0.0, 0.0), (200.0, 0.0)).stops([Color::BLACK, Color::WHITE]); - sb.fill(Fill::NonZero, transform, &linear, None, &rect); - const GRADIENTS: &[(f64, f64, Color)] = &[ - (150., 0., Color::rgb8(255, 240, 64)), - (175., 100., Color::rgb8(255, 96, 240)), - (125., 200., Color::rgb8(64, 192, 255)), - ]; - for (x, y, c) in GRADIENTS { - let mut color2 = c.clone(); - color2.a = 0; - let radial = RadialGradient::new((*x, *y), 100.0).stops([*c, color2]); - sb.fill(Fill::NonZero, transform, &radial, None, &rect); - } - const COLORS: &[Color] = &[ - Color::rgb8(255, 0, 0), - Color::rgb8(0, 255, 0), - Color::rgb8(0, 0, 255), - ]; - sb.push_layer(Mix::Normal, transform, &rect); - for (i, c) in COLORS.iter().enumerate() { - let linear = LinearGradient::new((0.0, 0.0), (0.0, 200.0)).stops([Color::WHITE, *c]); - sb.push_layer(blend, transform, &rect); - // squash the ellipse - let a = transform - * Affine::translate((100., 100.)) - * Affine::rotate(std::f64::consts::FRAC_PI_3 * (i * 2 + 1) as f64) - * Affine::scale_non_uniform(1.0, 0.357) - * Affine::translate((-100., -100.)); - sb.fill( - Fill::NonZero, - a, - &linear, - None, - &Ellipse::new((100., 100.), (90., 90.), 0.), - ); - sb.pop_layer(); - } - sb.pop_layer(); -} - -#[allow(unused)] -fn blend_square(blend: BlendMode) -> SceneFragment { - let mut fragment = SceneFragment::default(); - let mut sb = SceneBuilder::for_fragment(&mut fragment); - render_blend_square(&mut sb, blend, Affine::IDENTITY); - sb.finish(); - fragment -} - -#[allow(unused)] -pub fn render_anim_frame(sb: &mut SceneBuilder, text: &mut SimpleText, i: usize) { - sb.fill( - Fill::NonZero, - Affine::IDENTITY, - &Brush::Solid(Color::rgb8(128, 128, 128)), - None, - &Rect::from_origin_size(Point::new(0.0, 0.0), (1000.0, 1000.0)), - ); - let text_size = 60.0 + 40.0 * (0.01 * i as f32).sin(); - let s = "\u{1f600}hello piet-gpu text!"; - text.add( - sb, - None, - text_size, - None, - Affine::translate((110.0, 600.0)), - s, - ); - text.add( - sb, - None, - text_size, - None, - Affine::translate((110.0, 700.0)), - s, - ); - let th = (std::f64::consts::PI / 180.0) * (i as f64); - let center = Point::new(500.0, 500.0); - let mut p1 = center; - p1.x += 400.0 * th.cos(); - p1.y += 400.0 * th.sin(); - sb.stroke( - &Stroke::new(5.0), - Affine::IDENTITY, - &Brush::Solid(Color::rgb8(128, 0, 0)), - None, - &&[PathEl::MoveTo(center), PathEl::LineTo(p1)][..], - ); -} - -#[allow(unused)] -pub fn render_brush_transform(sb: &mut SceneBuilder, i: usize) { - let th = (std::f64::consts::PI / 180.0) * (i as f64); - let linear = LinearGradient::new((0.0, 0.0), (0.0, 200.0)).stops([ - Color::RED, - Color::GREEN, - Color::BLUE, - ]); - sb.fill( - Fill::NonZero, - Affine::translate((200.0, 200.0)), - &linear, - Some(around_center(Affine::rotate(th), Point::new(200.0, 100.0))), - &Rect::from_origin_size(Point::default(), (400.0, 200.0)), - ); - sb.stroke( - &Stroke::new(40.0), - Affine::translate((800.0, 200.0)), - &linear, - Some(around_center(Affine::rotate(th), Point::new(200.0, 100.0))), - &Rect::from_origin_size(Point::default(), (400.0, 200.0)), - ); -} - -fn around_center(xform: Affine, center: Point) -> Affine { - Affine::translate(center.to_vec2()) * xform * Affine::translate(-center.to_vec2()) -} - -fn make_diamond(cx: f64, cy: f64) -> [PathEl; 5] { - const SIZE: f64 = 50.0; - [ - PathEl::MoveTo(Point::new(cx, cy - SIZE)), - PathEl::LineTo(Point::new(cx + SIZE, cy)), - PathEl::LineTo(Point::new(cx, cy + SIZE)), - PathEl::LineTo(Point::new(cx - SIZE, cy)), - PathEl::ClosePath, - ] -} diff --git a/piet-gpu/src/simple_text.rs b/piet-gpu/src/simple_text.rs deleted file mode 100644 index 674099e..0000000 --- a/piet-gpu/src/simple_text.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_scene::glyph::{pinot, pinot::TableProvider, GlyphContext}; -use piet_scene::kurbo::Affine; -use piet_scene::{Brush, SceneBuilder}; - -pub use pinot::FontRef; - -// This is very much a hack to get things working. -// On Windows, can set this to "c:\\Windows\\Fonts\\seguiemj.ttf" to get color emoji -const FONT_DATA: &[u8] = - include_bytes!("../../piet-wgsl/examples/assets/third-party/Roboto-Regular.ttf"); - -pub struct SimpleText { - gcx: GlyphContext, -} - -impl SimpleText { - pub fn new() -> Self { - Self { - gcx: GlyphContext::new(), - } - } - - pub fn add( - &mut self, - builder: &mut SceneBuilder, - font: Option<&FontRef>, - size: f32, - brush: Option<&Brush>, - transform: Affine, - text: &str, - ) { - let font = font.unwrap_or(&FontRef { - data: FONT_DATA, - offset: 0, - }); - if let Some(cmap) = font.cmap() { - if let Some(hmtx) = font.hmtx() { - let upem = font.head().map(|head| head.units_per_em()).unwrap_or(1000) as f64; - let scale = size as f64 / upem; - let vars: [(pinot::types::Tag, f32); 0] = []; - let mut provider = self.gcx.new_provider(font, None, size, false, vars); - let hmetrics = hmtx.hmetrics(); - let default_advance = hmetrics - .get(hmetrics.len().saturating_sub(1)) - .map(|h| h.advance_width) - .unwrap_or(0); - let mut pen_x = 0f64; - for ch in text.chars() { - let gid = cmap.map(ch as u32).unwrap_or(0); - let advance = hmetrics - .get(gid as usize) - .map(|h| h.advance_width) - .unwrap_or(default_advance) as f64 - * scale; - if let Some(glyph) = provider.get(gid, brush) { - let xform = transform - * Affine::translate((pen_x, 0.0)) - * Affine::scale_non_uniform(1.0, -1.0); - builder.append(&glyph, Some(xform)); - } - pen_x += advance; - } - } - } - } -} diff --git a/piet-gpu/src/stages.rs b/piet-gpu/src/stages.rs deleted file mode 100644 index e786ef5..0000000 --- a/piet-gpu/src/stages.rs +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Stages for new element pipeline, exposed for testing. - -mod clip; -mod draw; -mod path; -mod transform; - -use bytemuck::{Pod, Zeroable}; - -pub use clip::{ClipBinding, ClipCode, CLIP_PART_SIZE}; -pub use draw::{DrawBinding, DrawCode, DrawMonoid, DrawStage, DRAW_PART_SIZE}; -pub use path::{PathBinding, PathCode, PathEncoder, PathStage, PATHSEG_PART_SIZE}; -use piet_gpu_hal::{Buffer, ComputePass, Session}; -pub use transform::Transform; - -/// The configuration block passed to piet-gpu shaders. -/// -/// Note: this should be kept in sync with the version in setup.h. -#[repr(C)] -#[derive(Clone, Copy, Default, Debug, Zeroable, Pod)] -pub struct Config { - pub mem_size: u32, - pub n_elements: u32, // paths - pub n_pathseg: u32, - pub width_in_tiles: u32, - pub height_in_tiles: u32, - pub tile_alloc: u32, - pub bin_alloc: u32, - pub ptcl_alloc: u32, - pub pathseg_alloc: u32, - pub anno_alloc: u32, - pub path_bbox_alloc: u32, - pub drawmonoid_alloc: u32, - pub clip_alloc: u32, - pub clip_bic_alloc: u32, - pub clip_stack_alloc: u32, - pub clip_bbox_alloc: u32, - pub draw_bbox_alloc: u32, - pub drawinfo_alloc: u32, - pub n_trans: u32, - pub n_path: u32, - pub n_clip: u32, - pub trans_offset: u32, - pub linewidth_offset: u32, - pub pathtag_offset: u32, - pub pathseg_offset: u32, - pub drawtag_offset: u32, - pub drawdata_offset: u32, -} - -// The "element" stage combines a number of stages for parts of the pipeline. - -pub struct ElementCode { - path_code: PathCode, - draw_code: DrawCode, -} - -pub struct ElementStage { - path_stage: PathStage, - draw_stage: DrawStage, -} - -pub struct ElementBinding { - path_binding: PathBinding, - draw_binding: DrawBinding, -} - -impl ElementCode { - pub unsafe fn new(session: &Session) -> ElementCode { - ElementCode { - path_code: PathCode::new(session), - draw_code: DrawCode::new(session), - } - } -} - -impl ElementStage { - pub unsafe fn new(session: &Session, code: &ElementCode) -> ElementStage { - ElementStage { - path_stage: PathStage::new(session, &code.path_code), - draw_stage: DrawStage::new(session, &code.draw_code), - } - } - - pub unsafe fn bind( - &self, - session: &Session, - code: &ElementCode, - config_buf: &Buffer, - scene_buf: &Buffer, - memory_buf: &Buffer, - ) -> ElementBinding { - ElementBinding { - path_binding: self.path_stage.bind( - session, - &code.path_code, - config_buf, - scene_buf, - memory_buf, - ), - draw_binding: self.draw_stage.bind( - session, - &code.draw_code, - config_buf, - scene_buf, - memory_buf, - ), - } - } - - pub unsafe fn record( - &self, - pass: &mut ComputePass, - code: &ElementCode, - binding: &ElementBinding, - n_paths: u32, - n_tags: u32, - n_drawobj: u64, - ) { - // No memory barrier needed here; path has at least one before pathseg - self.path_stage.record( - pass, - &code.path_code, - &binding.path_binding, - n_paths, - n_tags, - ); - // No memory barrier needed here; draw has at least one before draw_leaf - self.draw_stage - .record(pass, &code.draw_code, &binding.draw_binding, n_drawobj); - } -} - -impl ElementBinding { - pub unsafe fn rebind_memory(&mut self, session: &Session, memory: &Buffer) { - self.path_binding.rebind_memory(session, memory); - self.draw_binding.rebind_memory(session, memory); - } - - pub unsafe fn rebind_scene(&mut self, session: &Session, scene: &Buffer) { - self.path_binding.rebind_scene(session, scene); - self.draw_binding.rebind_scene(session, scene); - } -} diff --git a/piet-gpu/src/stages/clip.rs b/piet-gpu/src/stages/clip.rs deleted file mode 100644 index bb9998f..0000000 --- a/piet-gpu/src/stages/clip.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The clip processing stage (includes substages). - -use piet_gpu_hal::{ - include_shader, BindType, Buffer, ComputePass, DescriptorSet, Pipeline, Session, -}; - -// Note that this isn't the code/stage/binding pattern of most of the other stages -// in the new element processing pipeline. We want to move those temporary buffers -// into common memory and converge on this pattern. -pub struct ClipCode { - reduce_pipeline: Pipeline, - leaf_pipeline: Pipeline, -} - -pub struct ClipBinding { - reduce_ds: DescriptorSet, - leaf_ds: DescriptorSet, -} - -pub const CLIP_PART_SIZE: u32 = 256; - -impl ClipCode { - pub unsafe fn new(session: &Session) -> ClipCode { - let reduce_code = include_shader!(session, "../../shader/gen/clip_reduce"); - let reduce_pipeline = session - .create_compute_pipeline(reduce_code, &[BindType::Buffer, BindType::BufReadOnly]) - .unwrap(); - let leaf_code = include_shader!(session, "../../shader/gen/clip_leaf"); - let leaf_pipeline = session - .create_compute_pipeline(leaf_code, &[BindType::Buffer, BindType::BufReadOnly]) - .unwrap(); - ClipCode { - reduce_pipeline, - leaf_pipeline, - } - } -} - -impl ClipBinding { - pub unsafe fn new( - session: &Session, - code: &ClipCode, - config: &Buffer, - memory: &Buffer, - ) -> ClipBinding { - let reduce_ds = session - .create_simple_descriptor_set(&code.reduce_pipeline, &[memory, config]) - .unwrap(); - let leaf_ds = session - .create_simple_descriptor_set(&code.leaf_pipeline, &[memory, config]) - .unwrap(); - ClipBinding { reduce_ds, leaf_ds } - } - - /// Record the clip dispatches. - /// - /// Assumes memory barrier on entry. Provides memory barrier on exit. - pub unsafe fn record(&self, pass: &mut ComputePass, code: &ClipCode, n_clip: u32) { - let n_wg_reduce = n_clip.saturating_sub(1) / CLIP_PART_SIZE; - if n_wg_reduce > 0 { - pass.dispatch( - &code.reduce_pipeline, - &self.reduce_ds, - (n_wg_reduce, 1, 1), - (CLIP_PART_SIZE, 1, 1), - ); - pass.memory_barrier(); - } - let n_wg = (n_clip + CLIP_PART_SIZE - 1) / CLIP_PART_SIZE; - if n_wg > 0 { - pass.dispatch( - &code.leaf_pipeline, - &self.leaf_ds, - (n_wg, 1, 1), - (CLIP_PART_SIZE, 1, 1), - ); - pass.memory_barrier(); - } - } - - pub unsafe fn rebind_memory(&mut self, session: &Session, memory: &Buffer) { - session.update_buffer_descriptor(&mut self.reduce_ds, 0, memory); - session.update_buffer_descriptor(&mut self.leaf_ds, 0, memory); - } -} diff --git a/piet-gpu/src/stages/draw.rs b/piet-gpu/src/stages/draw.rs deleted file mode 100644 index 8e55f95..0000000 --- a/piet-gpu/src/stages/draw.rs +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The draw object stage of the element processing pipeline. - -use bytemuck::{Pod, Zeroable}; - -use piet_gpu_hal::{ - include_shader, BindType, Buffer, BufferUsage, ComputePass, DescriptorSet, Pipeline, Session, -}; - -/// The output element of the draw object stage. -#[repr(C)] -#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Zeroable, Pod)] -pub struct DrawMonoid { - pub path_ix: u32, - pub clip_ix: u32, - pub scene_offset: u32, - pub info_offset: u32, -} - -const DRAW_WG: u64 = 256; -const DRAW_N_ROWS: u64 = 8; -pub const DRAW_PART_SIZE: u64 = DRAW_WG * DRAW_N_ROWS; - -pub struct DrawCode { - reduce_pipeline: Pipeline, - root_pipeline: Pipeline, - leaf_pipeline: Pipeline, -} -pub struct DrawStage { - // Right now we're limited to partition^2 (~16M) elements. This can be - // expanded but is tedious. - root_buf: Buffer, - root_ds: DescriptorSet, -} - -pub struct DrawBinding { - reduce_ds: DescriptorSet, - leaf_ds: DescriptorSet, -} - -impl DrawCode { - pub unsafe fn new(session: &Session) -> DrawCode { - let reduce_code = include_shader!(session, "../../shader/gen/draw_reduce"); - let reduce_pipeline = session - .create_compute_pipeline( - reduce_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - BindType::Buffer, - ], - ) - .unwrap(); - let root_code = include_shader!(session, "../../shader/gen/draw_root"); - let root_pipeline = session - .create_compute_pipeline(root_code, &[BindType::Buffer]) - .unwrap(); - let leaf_code = include_shader!(session, "../../shader/gen/draw_leaf"); - let leaf_pipeline = session - .create_compute_pipeline( - leaf_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - BindType::BufReadOnly, - ], - ) - .unwrap(); - DrawCode { - reduce_pipeline, - root_pipeline, - leaf_pipeline, - } - } -} - -impl DrawStage { - pub unsafe fn new(session: &Session, code: &DrawCode) -> DrawStage { - // We're limited to DRAW_PART_SIZE^2 - // Also note: size here allows padding - let root_buf_size = DRAW_PART_SIZE * 16; - let root_buf = session - .create_buffer(root_buf_size, BufferUsage::STORAGE) - .unwrap(); - let root_ds = session - .create_simple_descriptor_set(&code.root_pipeline, &[&root_buf]) - .unwrap(); - DrawStage { root_buf, root_ds } - } - - pub unsafe fn bind( - &self, - session: &Session, - code: &DrawCode, - config_buf: &Buffer, - scene_buf: &Buffer, - memory_buf: &Buffer, - ) -> DrawBinding { - let reduce_ds = session - .create_simple_descriptor_set( - &code.reduce_pipeline, - &[memory_buf, config_buf, scene_buf, &self.root_buf], - ) - .unwrap(); - let leaf_ds = session - .create_simple_descriptor_set( - &code.leaf_pipeline, - &[memory_buf, config_buf, scene_buf, &self.root_buf], - ) - .unwrap(); - DrawBinding { reduce_ds, leaf_ds } - } - - pub unsafe fn record( - &self, - pass: &mut ComputePass, - code: &DrawCode, - binding: &DrawBinding, - size: u64, - ) { - if size > DRAW_PART_SIZE.pow(2) { - panic!("very large scan not yet implemented"); - } - let n_workgroups = (size + DRAW_PART_SIZE - 1) / DRAW_PART_SIZE; - if n_workgroups > 1 { - pass.dispatch( - &code.reduce_pipeline, - &binding.reduce_ds, - (n_workgroups as u32, 1, 1), - (DRAW_WG as u32, 1, 1), - ); - pass.memory_barrier(); - pass.dispatch( - &code.root_pipeline, - &self.root_ds, - (1, 1, 1), - (DRAW_WG as u32, 1, 1), - ); - } - pass.memory_barrier(); - pass.dispatch( - &code.leaf_pipeline, - &binding.leaf_ds, - (n_workgroups as u32, 1, 1), - (DRAW_WG as u32, 1, 1), - ); - } -} - -impl DrawBinding { - pub unsafe fn rebind_memory(&mut self, session: &Session, memory: &Buffer) { - session.update_buffer_descriptor(&mut self.reduce_ds, 0, memory); - session.update_buffer_descriptor(&mut self.leaf_ds, 0, memory); - } - - pub unsafe fn rebind_scene(&mut self, session: &Session, scene: &Buffer) { - session.update_buffer_descriptor(&mut self.reduce_ds, 2, scene); - session.update_buffer_descriptor(&mut self.leaf_ds, 2, scene); - } -} diff --git a/piet-gpu/src/stages/path.rs b/piet-gpu/src/stages/path.rs deleted file mode 100644 index 312358e..0000000 --- a/piet-gpu/src/stages/path.rs +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The path stage (includes substages). - -use piet_gpu_hal::{ - include_shader, BindType, Buffer, BufferUsage, ComputePass, DescriptorSet, Pipeline, Session, -}; - -pub struct PathCode { - reduce_pipeline: Pipeline, - tag_root_pipeline: Pipeline, - clear_pipeline: Pipeline, - pathseg_pipeline: Pipeline, -} - -pub struct PathStage { - tag_root_buf: Buffer, - tag_root_ds: DescriptorSet, -} - -pub struct PathBinding { - reduce_ds: DescriptorSet, - clear_ds: DescriptorSet, - path_ds: DescriptorSet, -} - -const REDUCE_WG: u32 = 128; -const REDUCE_N_ROWS: u32 = 2; -const REDUCE_PART_SIZE: u32 = REDUCE_WG * REDUCE_N_ROWS; - -const ROOT_WG: u32 = 256; -const ROOT_N_ROWS: u32 = 8; -const ROOT_PART_SIZE: u32 = ROOT_WG * ROOT_N_ROWS; - -const SCAN_WG: u32 = 256; -const SCAN_N_ROWS: u32 = 4; -const SCAN_PART_SIZE: u32 = SCAN_WG * SCAN_N_ROWS; - -pub const PATHSEG_PART_SIZE: u32 = SCAN_PART_SIZE; - -const CLEAR_WG: u32 = 256; - -impl PathCode { - pub unsafe fn new(session: &Session) -> PathCode { - let reduce_code = include_shader!(session, "../../shader/gen/pathtag_reduce"); - let reduce_pipeline = session - .create_compute_pipeline( - reduce_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - BindType::Buffer, - ], - ) - .unwrap(); - let tag_root_code = include_shader!(session, "../../shader/gen/pathtag_root"); - let tag_root_pipeline = session - .create_compute_pipeline(tag_root_code, &[BindType::Buffer]) - .unwrap(); - let clear_code = include_shader!(session, "../../shader/gen/bbox_clear"); - let clear_pipeline = session - .create_compute_pipeline(clear_code, &[BindType::Buffer, BindType::BufReadOnly]) - .unwrap(); - let pathseg_code = include_shader!(session, "../../shader/gen/pathseg"); - let pathseg_pipeline = session - .create_compute_pipeline( - pathseg_code, - &[ - BindType::Buffer, - BindType::BufReadOnly, - BindType::BufReadOnly, - BindType::BufReadOnly, - ], - ) - .unwrap(); - PathCode { - reduce_pipeline, - tag_root_pipeline, - clear_pipeline, - pathseg_pipeline, - } - } -} - -impl PathStage { - pub unsafe fn new(session: &Session, code: &PathCode) -> PathStage { - let tag_root_buf_size = (ROOT_PART_SIZE * 20) as u64; - let tag_root_buf = session - .create_buffer(tag_root_buf_size, BufferUsage::STORAGE) - .unwrap(); - let tag_root_ds = session - .create_simple_descriptor_set(&code.tag_root_pipeline, &[&tag_root_buf]) - .unwrap(); - PathStage { - tag_root_buf, - tag_root_ds, - } - } - - pub unsafe fn bind( - &self, - session: &Session, - code: &PathCode, - config_buf: &Buffer, - scene_buf: &Buffer, - memory_buf: &Buffer, - ) -> PathBinding { - let reduce_ds = session - .create_simple_descriptor_set( - &code.reduce_pipeline, - &[memory_buf, config_buf, scene_buf, &self.tag_root_buf], - ) - .unwrap(); - let clear_ds = session - .create_simple_descriptor_set(&code.clear_pipeline, &[memory_buf, config_buf]) - .unwrap(); - let path_ds = session - .create_simple_descriptor_set( - &code.pathseg_pipeline, - &[memory_buf, config_buf, scene_buf, &self.tag_root_buf], - ) - .unwrap(); - PathBinding { - reduce_ds, - clear_ds, - path_ds, - } - } - - /// Record the path stage. - /// - /// Note: no barrier is needed for transform output, we have a barrier before - /// those are consumed. Result is written without barrier. - pub unsafe fn record( - &self, - pass: &mut ComputePass, - code: &PathCode, - binding: &PathBinding, - n_paths: u32, - n_tags: u32, - ) { - if n_tags > ROOT_PART_SIZE * SCAN_PART_SIZE { - println!( - "number of pathsegs exceeded {} > {}", - n_tags, - ROOT_PART_SIZE * SCAN_PART_SIZE - ); - } - - // Number of tags consumed in a tag reduce workgroup - let reduce_part_tags = REDUCE_PART_SIZE * 4; - let n_wg_tag_reduce = (n_tags + reduce_part_tags - 1) / reduce_part_tags; - if n_wg_tag_reduce > 1 { - pass.dispatch( - &code.reduce_pipeline, - &binding.reduce_ds, - (n_wg_tag_reduce, 1, 1), - (REDUCE_WG, 1, 1), - ); - // I think we can skip root if n_wg_tag_reduce == 2 - pass.memory_barrier(); - pass.dispatch( - &code.tag_root_pipeline, - &self.tag_root_ds, - (1, 1, 1), - (ROOT_WG, 1, 1), - ); - // No barrier needed here; clear doesn't depend on path tags - } - let n_wg_clear = (n_paths + CLEAR_WG - 1) / CLEAR_WG; - pass.dispatch( - &code.clear_pipeline, - &binding.clear_ds, - (n_wg_clear, 1, 1), - (CLEAR_WG, 1, 1), - ); - pass.memory_barrier(); - let n_wg_pathseg = (n_tags + SCAN_PART_SIZE - 1) / SCAN_PART_SIZE; - pass.dispatch( - &code.pathseg_pipeline, - &binding.path_ds, - (n_wg_pathseg, 1, 1), - (SCAN_WG, 1, 1), - ); - } -} - -impl PathBinding { - pub unsafe fn rebind_memory(&mut self, session: &Session, memory: &Buffer) { - session.update_buffer_descriptor(&mut self.reduce_ds, 0, memory); - session.update_buffer_descriptor(&mut self.clear_ds, 0, memory); - session.update_buffer_descriptor(&mut self.path_ds, 0, memory); - } - - pub unsafe fn rebind_scene(&mut self, session: &Session, scene: &Buffer) { - session.update_buffer_descriptor(&mut self.reduce_ds, 2, scene); - session.update_buffer_descriptor(&mut self.path_ds, 2, scene); - } -} - -pub struct PathEncoder<'a> { - tag_stream: &'a mut Vec, - // If we're never going to use the i16 encoding, it might be - // slightly faster to store this as Vec, we'd get aligned - // stores on ARM etc. - pathseg_stream: &'a mut Vec, - first_pt: [f32; 2], - state: State, - n_pathseg: u32, -} - -#[derive(PartialEq)] -enum State { - Start, - MoveTo, - NonemptySubpath, -} - -impl<'a> PathEncoder<'a> { - pub fn new(tags: &'a mut Vec, pathsegs: &'a mut Vec) -> PathEncoder<'a> { - PathEncoder { - tag_stream: tags, - pathseg_stream: pathsegs, - first_pt: [0.0, 0.0], - state: State::Start, - n_pathseg: 0, - } - } - - pub fn move_to(&mut self, x: f32, y: f32) { - let buf = [x, y]; - let bytes = bytemuck::bytes_of(&buf); - self.first_pt = buf; - if self.state == State::MoveTo { - let new_len = self.pathseg_stream.len() - 8; - self.pathseg_stream.truncate(new_len); - } - if self.state == State::NonemptySubpath { - if let Some(tag) = self.tag_stream.last_mut() { - *tag |= 4; - } - } - self.pathseg_stream.extend_from_slice(bytes); - self.state = State::MoveTo; - } - - pub fn line_to(&mut self, x: f32, y: f32) { - if self.state == State::Start { - // should warn or error - return; - } - let buf = [x, y]; - let bytes = bytemuck::bytes_of(&buf); - self.pathseg_stream.extend_from_slice(bytes); - self.tag_stream.push(9); - self.state = State::NonemptySubpath; - self.n_pathseg += 1; - } - - pub fn quad_to(&mut self, x1: f32, y1: f32, x2: f32, y2: f32) { - if self.state == State::Start { - return; - } - let buf = [x1, y1, x2, y2]; - let bytes = bytemuck::bytes_of(&buf); - self.pathseg_stream.extend_from_slice(bytes); - self.tag_stream.push(10); - self.state = State::NonemptySubpath; - self.n_pathseg += 1; - } - - pub fn cubic_to(&mut self, x1: f32, y1: f32, x2: f32, y2: f32, x3: f32, y3: f32) { - if self.state == State::Start { - return; - } - let buf = [x1, y1, x2, y2, x3, y3]; - let bytes = bytemuck::bytes_of(&buf); - self.pathseg_stream.extend_from_slice(bytes); - self.tag_stream.push(11); - self.state = State::NonemptySubpath; - self.n_pathseg += 1; - } - - pub fn close_path(&mut self) { - match self.state { - State::Start => return, - State::MoveTo => { - let new_len = self.pathseg_stream.len() - 8; - self.pathseg_stream.truncate(new_len); - self.state = State::Start; - return; - } - State::NonemptySubpath => (), - } - let len = self.pathseg_stream.len(); - if len < 8 { - // can't happen - return; - } - let first_bytes = bytemuck::bytes_of(&self.first_pt); - if &self.pathseg_stream[len - 8..len] != first_bytes { - self.pathseg_stream.extend_from_slice(first_bytes); - self.tag_stream.push(13); - self.n_pathseg += 1; - } else { - if let Some(tag) = self.tag_stream.last_mut() { - *tag |= 4; - } - } - self.state = State::Start; - } - - fn finish(&mut self) { - if self.state == State::MoveTo { - let new_len = self.pathseg_stream.len() - 8; - self.pathseg_stream.truncate(new_len); - } - if let Some(tag) = self.tag_stream.last_mut() { - *tag |= 4; - } - } - - /// Finish encoding a path. - /// - /// Encode this after encoding path segments. - pub fn path(&mut self) { - self.finish(); - // maybe don't encode if path is empty? might throw off sync though - self.tag_stream.push(0x10); - } - - /// Get the number of path segments. - /// - /// This is the number of path segments that will be written by the - /// path stage; use this for allocating the output buffer. - /// - /// Also note: it takes `self` for lifetime reasons. - pub fn n_pathseg(self) -> u32 { - self.n_pathseg - } -} diff --git a/piet-gpu/src/stages/transform.rs b/piet-gpu/src/stages/transform.rs deleted file mode 100644 index 8e237ba..0000000 --- a/piet-gpu/src/stages/transform.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! The transform stage of the element processing pipeline. - -use bytemuck::{Pod, Zeroable}; - -/// An affine transform. -// This is equivalent to the version in piet-gpu-types, but the bytemuck -// representation will likely be faster. -#[repr(C)] -#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)] -pub struct Transform { - pub mat: [f32; 4], - pub translate: [f32; 2], -} - -impl Transform { - pub const IDENTITY: Transform = Transform { - mat: [1.0, 0.0, 0.0, 1.0], - translate: [0.0, 0.0], - }; -} diff --git a/piet-wgsl/examples/winit/Cargo.toml b/piet-wgsl/examples/winit/Cargo.toml index e112839..0898487 100644 --- a/piet-wgsl/examples/winit/Cargo.toml +++ b/piet-wgsl/examples/winit/Cargo.toml @@ -2,6 +2,7 @@ name = "winit" version = "0.1.0" edition = "2021" +publish = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html diff --git a/tests/Cargo.toml b/tests/Cargo.toml deleted file mode 100644 index ea320b9..0000000 --- a/tests/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "piet-gpu-tests" -version = "0.1.0" -authors = ["Raph Levien "] -description = "Tests for piet-gpu shaders and generic GPU capabilities." -license = "MIT/Apache-2.0" -edition = "2021" - -[features] -default = ["piet-gpu"] - -[dependencies] -clap = "3.2.22" -bytemuck = "1.7.2" -kurbo = "0.7.1" -rand = "0.7.3" - -[dependencies.piet-gpu-hal] -path = "../piet-gpu-hal" - -[dependencies.piet-gpu] -path = "../piet-gpu" -optional = true diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 979fd9c..0000000 --- a/tests/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# piet-gpu-tests - -This subdirectory contains a curated set of tests for GPU issues likely to affect piet-gpu compatibility or performance. To run, cd to the tests directory and do `cargo run --release`. There are a number of additional options, including: - -* `--dx12` Prefer DX12 backend on windows. -* `--size {s,m,l}` Size of test to run. -* `--n_iter n` Number of iterations. -* `--verbose` Verbose output. - -As usual, run `cargo run -- -h` for the current list. - -Below is a description of individual tests. - -## clear buffers - -This is as simple as it says, it uses a compute shader to clear buffers. It's run first as a warmup, and is a simple test of raw memory bandwidth (reported as 4 byte elements/s). - -## Prefix sum tests - -There are several variations of the prefix sum test, first the [decoupled look-back] variant, then a more conservative tree reduction version. The decoupled look-back implemenation exercises advanced atomic features and depends on their correctness, including atomic coherence and correct scope of memory barriers. - -None of the decoupled look-back tests are expected to pass on Metal, as that back-end lacks the appropriate barrier; the spirv-cross translation silently translates the GLSL version to a weaker one. All tests are expected to pass on both Vulkan and DX12. - -The compatibility variant does all manipulation of the state buffer using non-atomic operations, with the buffer marked "volatile" and barriers to insure acquire/release ordering. - -The atomic variant is similar, but uses atomicLoad and atomicStore (from the [memory scope semantics] extension to GLSL). - -Finally, the vkmm (Vulkan memory model) variant uses explicit acquire and release semantics on the atomics instead of barriers, and only runs when the device reports that the memory model extension is available. - -The tree reduction version of this test does not rely on advanced atomics and can be considered a baseline for both correctness and performance. The current implementation lacks configuration settings to handle odd-size buffers. On well-tuned hardware, the decoupled look-back implementation is expected to be 1.5x faster. - -Note that the workgroup sizes and sequential iteration count parameters are hard-coded (and tuned for a desktop card I had handy). A useful future extension of this test suite would be iteration over several combinations of those parameters. (The main reason this is not done yet is that it would put a lot of strain on the shader build pipeline, and at the moment hand-editing the ninja file is adequate). - -## Atomic tests - -Decoupled look-back relies on the atomic message passing idiom; these tests exercise that in isolation. - -The message passing tests basically do bunch of the basic message passing operation in parallel, and the "special sauce" is that the memory locations for both flags and data are permuted. That seems to do a lot better job finding violations than existing versions of the test. - -The linked list test is mostly a bandwidth test of atomicExchange, and is a simplified version of what the coarse path rasterizer does in piet-gpu to build per-tile lists of path segments. The verification of the resulting lists is also a pretty good test of device scoped modification order (not that this is likely to fail). - -## More tests - -I'll be adding more tests specific to piet-gpu. I'm also open to tests being added here, feel free to file an issue. - -[decoupled look-back]: https://raphlinus.github.io/gpu/2020/04/30/prefix-sum.html -[memory scope semantics]: https://github.com/KhronosGroup/GLSL/blob/master/extensions/khr/GL_KHR_memory_scope_semantics.txt diff --git a/tests/shader/build.ninja b/tests/shader/build.ninja deleted file mode 100644 index 49e0260..0000000 --- a/tests/shader/build.ninja +++ /dev/null @@ -1,72 +0,0 @@ -# Build file for shaders. - -# You must have Vulkan tools in your path, or patch here. - -glslang_validator = glslangValidator -spirv_cross = spirv-cross -dxc = dxc - -# See https://github.com/KhronosGroup/SPIRV-Cross/issues/1248 for -# why we set this. -msl_flags = --msl-decoration-binding - -rule glsl - command = $glslang_validator $flags -V -o $out $in - -rule hlsl - command = $spirv_cross --hlsl $in --output $out - -rule dxil - command = $dxc -T cs_6_0 $in -Fo $out - -rule msl - command = $spirv_cross --msl $in --output $out $msl_flags - -build gen/clear.spv: glsl clear.comp -build gen/clear.hlsl: hlsl gen/clear.spv -build gen/clear.dxil: dxil gen/clear.hlsl -build gen/clear.msl: msl gen/clear.spv - -build gen/prefix.spv: glsl prefix.comp -build gen/prefix.hlsl: hlsl gen/prefix.spv -build gen/prefix.dxil: dxil gen/prefix.hlsl -build gen/prefix.msl: msl gen/prefix.spv - -build gen/prefix_atomic.spv: glsl prefix.comp - flags = -DATOMIC -build gen/prefix_atomic.hlsl: hlsl gen/prefix_atomic.spv -build gen/prefix_atomic.dxil: dxil gen/prefix_atomic.hlsl -build gen/prefix_atomic.msl: msl gen/prefix_atomic.spv - -build gen/prefix_vkmm.spv: glsl prefix.comp - flags = -DATOMIC -DVKMM -# Vulkan memory model doesn't translate - -build gen/prefix_reduce.spv: glsl prefix_reduce.comp -build gen/prefix_reduce.hlsl: hlsl gen/prefix_reduce.spv -build gen/prefix_reduce.dxil: dxil gen/prefix_reduce.hlsl -build gen/prefix_reduce.msl: msl gen/prefix_reduce.spv - -build gen/prefix_root.spv: glsl prefix_scan.comp - flags = -DROOT -build gen/prefix_root.hlsl: hlsl gen/prefix_root.spv -build gen/prefix_root.dxil: dxil gen/prefix_root.hlsl -build gen/prefix_root.msl: msl gen/prefix_root.spv - -build gen/prefix_scan.spv: glsl prefix_scan.comp -build gen/prefix_scan.hlsl: hlsl gen/prefix_scan.spv -build gen/prefix_scan.dxil: dxil gen/prefix_scan.hlsl -build gen/prefix_scan.msl: msl gen/prefix_scan.spv - -build gen/message_passing.spv: glsl message_passing.comp -build gen/message_passing.hlsl: hlsl gen/message_passing.spv -build gen/message_passing.dxil: dxil gen/message_passing.hlsl -build gen/message_passing.msl: msl gen/message_passing.spv - -build gen/message_passing_vkmm.spv: glsl message_passing.comp - flags = -DVKMM - -build gen/linkedlist.spv: glsl linkedlist.comp -build gen/linkedlist.hlsl: hlsl gen/linkedlist.spv -build gen/linkedlist.dxil: dxil gen/linkedlist.hlsl -build gen/linkedlist.msl: msl gen/linkedlist.spv diff --git a/tests/shader/clear.comp b/tests/shader/clear.comp deleted file mode 100644 index 62a5fb2..0000000 --- a/tests/shader/clear.comp +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Clear a buffer. - -#version 450 - -layout(local_size_x = 256) in; - -// This should probably be uniform rather than readonly, -// but we haven't done the binding work yet. -layout(binding = 0) readonly buffer ConfigBuf { - // size is in uint (4 byte) units - uint size; - uint value; -}; - -layout(binding = 1) buffer TargetBuf { - uint[] data; -}; - -void main() { - uint ix = gl_GlobalInvocationID.x; - if (ix < size) { - data[ix] = value; - } -} diff --git a/tests/shader/linkedlist.comp b/tests/shader/linkedlist.comp deleted file mode 100644 index 87e051b..0000000 --- a/tests/shader/linkedlist.comp +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Linked list building. - -#version 450 - -#define N_BUCKETS 65536 -#define N_ITER 100 - -layout(local_size_x = 256, local_size_y = 1) in; - -layout(set = 0, binding = 0) buffer MemBuf { - uint[] mem; -}; - -void main() { - uint rng = gl_GlobalInvocationID.x + 1; - for (uint i = 0; i < N_ITER; i++) { - // xorshift32 - rng ^= rng << 13; - rng ^= rng >> 17; - rng ^= rng << 5; - uint bucket = rng % N_BUCKETS; - if (bucket != 0) { - uint alloc = atomicAdd(mem[0], 2) + N_BUCKETS; - uint old = atomicExchange(mem[bucket], alloc); - mem[alloc] = old; - mem[alloc + 1] = gl_GlobalInvocationID.x; - } - } -} diff --git a/tests/shader/message_passing.comp b/tests/shader/message_passing.comp deleted file mode 100644 index e5e53b6..0000000 --- a/tests/shader/message_passing.comp +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// Our version of the message passing atomic litmus test. - -#version 450 - -#extension GL_KHR_memory_scope_semantics : enable - -#ifdef VKMM -#pragma use_vulkan_memory_model -#define ACQUIRE gl_StorageSemanticsBuffer, gl_SemanticsAcquire -#define RELEASE gl_StorageSemanticsBuffer, gl_SemanticsRelease -#else -#define ACQUIRE 0, 0 -#define RELEASE 0, 0 -#endif - -layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; - -struct Element -{ - uint data; - uint flag; -}; - -layout(binding = 0) buffer DataBuf -{ - Element data[]; -} data_buf; - -layout(binding = 1) buffer ControlBuf -{ - uint failures; -} control_buf; - -uint permute_flag_ix(uint data_ix) -{ - return (data_ix * 419u) & 65535u; -} - -void main() -{ - atomicStore(data_buf.data[gl_GlobalInvocationID.x].data, 1u, gl_ScopeDevice, 0, 0); -#ifndef VKMM - memoryBarrierBuffer(); -#endif - uint write_flag_ix = permute_flag_ix(gl_GlobalInvocationID.x); - atomicStore(data_buf.data[write_flag_ix].flag, 1u, gl_ScopeDevice, RELEASE); - uint read_ix = (gl_GlobalInvocationID.x * 4099u) & 65535u; - uint read_flag_ix = permute_flag_ix(read_ix); - uint flag = atomicLoad(data_buf.data[read_flag_ix].flag, gl_ScopeDevice, ACQUIRE); -#ifndef VKMM - memoryBarrierBuffer(); -#endif - uint data = atomicLoad(data_buf.data[read_ix].data, gl_ScopeDevice, 0, 0); - if (flag > data) - { - atomicAdd(control_buf.failures, 1u); - } -} diff --git a/tests/shader/prefix.comp b/tests/shader/prefix.comp deleted file mode 100644 index b41d1b1..0000000 --- a/tests/shader/prefix.comp +++ /dev/null @@ -1,226 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// A prefix sum. -// -// This test builds in three configurations. The default is a -// compatibility mode, essentially plain GLSL. With ATOMIC set, the -// flag loads and stores are atomic operations, but uses barriers. -// With both ATOMIC and VKMM set, it uses acquire/release semantics -// instead of barriers. - -#version 450 - -#extension GL_KHR_memory_scope_semantics : enable - -#ifdef VKMM -#pragma use_vulkan_memory_model -#define ACQUIRE gl_StorageSemanticsBuffer, gl_SemanticsAcquire -#define RELEASE gl_StorageSemanticsBuffer, gl_SemanticsRelease -#else -#define ACQUIRE 0, 0 -#define RELEASE 0, 0 -#endif - -#define N_ROWS 16 -#define LG_WG_SIZE 9 -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -struct Monoid { - uint element; -}; - -layout(set = 0, binding = 0) readonly buffer InBuf { - Monoid[] inbuf; -}; - -layout(set = 0, binding = 1) buffer OutBuf { - Monoid[] outbuf; -}; - -// These correspond to X, A, P respectively in the prefix sum paper. -#define FLAG_NOT_READY 0u -#define FLAG_AGGREGATE_READY 1u -#define FLAG_PREFIX_READY 2u - -struct State { - uint flag; - Monoid aggregate; - Monoid prefix; -}; - -// Perhaps this should be "nonprivate" with VKMM -layout(set = 0, binding = 2) volatile buffer StateBuf { - uint part_counter; - State[] state; -}; - -shared Monoid sh_scratch[WG_SIZE]; - -Monoid combine_monoid(Monoid a, Monoid b) { - return Monoid(a.element + b.element); -} - -shared uint sh_part_ix; -shared Monoid sh_prefix; -shared uint sh_flag; - -void main() { - Monoid local[N_ROWS]; - // Determine partition to process by atomic counter (described in Section - // 4.4 of prefix sum paper). - if (gl_LocalInvocationID.x == 0) { - sh_part_ix = atomicAdd(part_counter, 1); - } - barrier(); - uint part_ix = sh_part_ix; - - uint ix = part_ix * PARTITION_SIZE + gl_LocalInvocationID.x * N_ROWS; - - // TODO: gate buffer read? (evaluate whether shader check or - // CPU-side padding is better) - local[0] = inbuf[ix]; - for (uint i = 1; i < N_ROWS; i++) { - local[i] = combine_monoid(local[i - 1], inbuf[ix + i]); - } - Monoid agg = local[N_ROWS - 1]; - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - - // Publish aggregate for this partition - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - state[part_ix].aggregate = agg; - if (part_ix == 0) { - state[0].prefix = agg; - } - } - // Write flag with release semantics; this is done portably with a barrier. -#ifndef VKMM - memoryBarrierBuffer(); -#endif - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - uint flag = FLAG_AGGREGATE_READY; - if (part_ix == 0) { - flag = FLAG_PREFIX_READY; - } -#ifdef ATOMIC - atomicStore(state[part_ix].flag, flag, gl_ScopeDevice, RELEASE); -#else - state[part_ix].flag = flag; -#endif - } - - Monoid exclusive = Monoid(0); - if (part_ix != 0) { - // step 4 of paper: decoupled lookback - uint look_back_ix = part_ix - 1; - - Monoid their_agg; - uint their_ix = 0; - while (true) { - // Read flag with acquire semantics. - if (gl_LocalInvocationID.x == WG_SIZE - 1) { -#ifdef ATOMIC - sh_flag = atomicLoad(state[look_back_ix].flag, gl_ScopeDevice, ACQUIRE); -#else - sh_flag = state[look_back_ix].flag; -#endif - } - // The flag load is done only in the last thread. However, because the - // translation of memoryBarrierBuffer to Metal requires uniform control - // flow, we broadcast it to all threads. - barrier(); -#ifndef VKMM - memoryBarrierBuffer(); -#endif - uint flag = sh_flag; - barrier(); - - if (flag == FLAG_PREFIX_READY) { - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - Monoid their_prefix = state[look_back_ix].prefix; - exclusive = combine_monoid(their_prefix, exclusive); - } - break; - } else if (flag == FLAG_AGGREGATE_READY) { - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - their_agg = state[look_back_ix].aggregate; - exclusive = combine_monoid(their_agg, exclusive); - } - look_back_ix--; - their_ix = 0; - continue; - } - // else spin - - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - // Unfortunately there's no guarantee of forward progress of other - // workgroups, so compute a bit of the aggregate before trying again. - // In the worst case, spinning stops when the aggregate is complete. - Monoid m = inbuf[look_back_ix * PARTITION_SIZE + their_ix]; - if (their_ix == 0) { - their_agg = m; - } else { - their_agg = combine_monoid(their_agg, m); - } - their_ix++; - if (their_ix == PARTITION_SIZE) { - exclusive = combine_monoid(their_agg, exclusive); - if (look_back_ix == 0) { - sh_flag = FLAG_PREFIX_READY; - } else { - look_back_ix--; - their_ix = 0; - } - } - } - barrier(); - flag = sh_flag; - barrier(); - if (flag == FLAG_PREFIX_READY) { - break; - } - } - // step 5 of paper: compute inclusive prefix - if (gl_LocalInvocationID.x == WG_SIZE - 1) { - Monoid inclusive_prefix = combine_monoid(exclusive, agg); - sh_prefix = exclusive; - state[part_ix].prefix = inclusive_prefix; - } -#ifndef VKMM - memoryBarrierBuffer(); -#endif - if (gl_LocalInvocationID.x == WG_SIZE - 1) { -#ifdef ATOMIC - atomicStore(state[part_ix].flag, FLAG_PREFIX_READY, gl_ScopeDevice, RELEASE); -#else - state[part_ix].flag = FLAG_PREFIX_READY; -#endif - } - } - barrier(); - if (part_ix != 0) { - exclusive = sh_prefix; - } - - Monoid row = exclusive; - if (gl_LocalInvocationID.x > 0) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - 1]; - row = combine_monoid(row, other); - } - for (uint i = 0; i < N_ROWS; i++) { - Monoid m = combine_monoid(row, local[i]); - // Make sure buffer allocation is padded appropriately. - outbuf[ix + i] = m; - } -} diff --git a/tests/shader/prefix_reduce.comp b/tests/shader/prefix_reduce.comp deleted file mode 100644 index 36750e9..0000000 --- a/tests/shader/prefix_reduce.comp +++ /dev/null @@ -1,53 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// The reduction phase for prefix sum implemented as a tree reduction. - -#version 450 - -#define N_ROWS 8 -#define LG_WG_SIZE 9 -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -struct Monoid { - uint element; -}; - -layout(set = 0, binding = 0) readonly buffer InBuf { - Monoid[] inbuf; -}; - -layout(set = 0, binding = 1) buffer OutBuf { - Monoid[] outbuf; -}; - -shared Monoid sh_scratch[WG_SIZE]; - -Monoid combine_monoid(Monoid a, Monoid b) { - return Monoid(a.element + b.element); -} - -void main() { - uint ix = gl_GlobalInvocationID.x * N_ROWS; - // TODO: gate buffer read - Monoid agg = inbuf[ix]; - for (uint i = 1; i < N_ROWS; i++) { - agg = combine_monoid(agg, inbuf[ix + i]); - } - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - // We could make this predicate tighter, but would it help? - if (gl_LocalInvocationID.x + (1u << i) < WG_SIZE) { - Monoid other = sh_scratch[gl_LocalInvocationID.x + (1u << i)]; - agg = combine_monoid(agg, other); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - if (gl_LocalInvocationID.x == 0) { - outbuf[gl_WorkGroupID.x] = agg; - } -} diff --git a/tests/shader/prefix_scan.comp b/tests/shader/prefix_scan.comp deleted file mode 100644 index 82ac847..0000000 --- a/tests/shader/prefix_scan.comp +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR MIT OR Unlicense - -// A scan for a tree reduction prefix scan (either root or not, by ifdef). - -#version 450 - -#define N_ROWS 8 -#define LG_WG_SIZE 9 -#define WG_SIZE (1 << LG_WG_SIZE) -#define PARTITION_SIZE (WG_SIZE * N_ROWS) - -layout(local_size_x = WG_SIZE, local_size_y = 1) in; - -struct Monoid { - uint element; -}; - -layout(set = 0, binding = 0) buffer DataBuf { - Monoid[] data; -}; - -#ifndef ROOT -layout(set = 0, binding = 1) readonly buffer ParentBuf { - Monoid[] parent; -}; -#endif - -shared Monoid sh_scratch[WG_SIZE]; - -Monoid combine_monoid(Monoid a, Monoid b) { - return Monoid(a.element + b.element); -} - -void main() { - Monoid local[N_ROWS]; - - uint ix = gl_GlobalInvocationID.x * N_ROWS; - - // TODO: gate buffer read - local[0] = data[ix]; - for (uint i = 1; i < N_ROWS; i++) { - local[i] = combine_monoid(local[i - 1], data[ix + i]); - } - Monoid agg = local[N_ROWS - 1]; - sh_scratch[gl_LocalInvocationID.x] = agg; - for (uint i = 0; i < LG_WG_SIZE; i++) { - barrier(); - if (gl_LocalInvocationID.x >= (1u << i)) { - Monoid other = sh_scratch[gl_LocalInvocationID.x - (1u << i)]; - agg = combine_monoid(other, agg); - } - barrier(); - sh_scratch[gl_LocalInvocationID.x] = agg; - } - - barrier(); - // This could be a semigroup instead of a monoid if we reworked the - // conditional logic, but that might impact performance. - Monoid row = Monoid(0); -#ifdef ROOT - if (gl_LocalInvocationID.x > 0) { - row = sh_scratch[gl_LocalInvocationID.x - 1]; - } -#else - if (gl_WorkGroupID.x > 0) { - row = parent[gl_WorkGroupID.x - 1]; - } - if (gl_LocalInvocationID.x > 0) { - row = combine_monoid(row, sh_scratch[gl_LocalInvocationID.x - 1]); - } -#endif - for (uint i = 0; i < N_ROWS; i++) { - Monoid m = combine_monoid(row, local[i]); - // TODO: gate buffer write - data[ix + i] = m; - } -} diff --git a/tests/src/clear.rs b/tests/src/clear.rs deleted file mode 100644 index af4b8ea..0000000 --- a/tests/src/clear.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Utilities (and a benchmark) for clearing buffers with compute shaders. - -use piet_gpu_hal::{include_shader, BindType, BufferUsage, ComputePass, DescriptorSet}; -use piet_gpu_hal::{Buffer, Pipeline}; - -use crate::config::Config; -use crate::runner::Runner; -use crate::test_result::TestResult; - -const WG_SIZE: u64 = 256; - -/// The shader code for clearing buffers. -pub struct ClearCode { - pipeline: Pipeline, -} - -/// The stage resources for clearing buffers. -pub struct ClearStage { - n_elements: u64, - config_buf: Buffer, -} - -/// The binding for clearing buffers. -pub struct ClearBinding { - descriptor_set: DescriptorSet, -} - -pub unsafe fn run_clear_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("clear buffers"); - let n_elements: u64 = config.size.choose(1 << 12, 1 << 20, 1 << 24); - let out_buf = runner.buf_down(n_elements * 4, BufferUsage::empty()); - let code = ClearCode::new(runner); - let stage = ClearStage::new_with_value(runner, n_elements, 0x42); - let binding = stage.bind(runner, &code, &out_buf.dev_buf); - let n_iter = config.n_iter; - let mut total_elapsed = 0.0; - for i in 0..n_iter { - let mut commands = runner.commands(); - let mut pass = commands.compute_pass(0, 1); - stage.record(&mut pass, &code, &binding); - pass.end(); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&out_buf); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = out_buf.map_read(..); - if let Some(failure) = verify(dst.cast_slice()) { - result.fail(format!("failure at {}", failure)); - } - } - } - result.timing(total_elapsed, n_elements * n_iter); - result -} - -impl ClearCode { - pub unsafe fn new(runner: &mut Runner) -> ClearCode { - let code = include_shader!(&runner.session, "../shader/gen/clear"); - let pipeline = runner - .session - .create_compute_pipeline(code, &[BindType::BufReadOnly, BindType::Buffer]) - .unwrap(); - ClearCode { pipeline } - } -} - -impl ClearStage { - pub unsafe fn new_with_value(runner: &mut Runner, n_elements: u64, value: u32) -> ClearStage { - let config = [n_elements as u32, value]; - let config_buf = runner - .session - .create_buffer_init(&config, BufferUsage::STORAGE) - .unwrap(); - ClearStage { - n_elements, - config_buf, - } - } - - pub unsafe fn bind( - &self, - runner: &mut Runner, - code: &ClearCode, - out_buf: &Buffer, - ) -> ClearBinding { - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.pipeline, &[&self.config_buf, out_buf]) - .unwrap(); - ClearBinding { descriptor_set } - } - - pub unsafe fn record(&self, pass: &mut ComputePass, code: &ClearCode, bindings: &ClearBinding) { - let n_workgroups = (self.n_elements + WG_SIZE - 1) / WG_SIZE; - // An issue: for clearing large buffers (>16M), we need to check the - // number of workgroups against the (dynamically detected) limit, and - // potentially issue multiple dispatches. - pass.dispatch( - &code.pipeline, - &bindings.descriptor_set, - (n_workgroups as u32, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - // One thing that's missing here is registering the buffers so - // they can be safely dropped by Rust code before the execution - // of the command buffer completes. - } -} - -// Verify that the data is cleared. -fn verify(data: &[u32]) -> Option { - data.iter().position(|val| *val != 0x42) -} diff --git a/tests/src/clip.rs b/tests/src/clip.rs deleted file mode 100644 index b1f8613..0000000 --- a/tests/src/clip.rs +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2022 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Tests for the piet-gpu clip processing stage. - -use bytemuck::{Pod, Zeroable}; -use rand::Rng; - -use piet_gpu::stages::{self, ClipBinding, ClipCode, DrawMonoid}; -use piet_gpu_hal::{BufWrite, BufferUsage}; - -use crate::{Config, Runner, TestResult}; - -struct ClipData { - clip_stream: Vec, - // In the atomic-int friendly encoding - path_bbox_stream: Vec, -} - -#[derive(Copy, Clone, Debug, Pod, Zeroable, Default)] -#[repr(C)] -struct PathBbox { - bbox: [u32; 4], - linewidth: f32, - trans_ix: u32, -} - -pub unsafe fn clip_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("clip"); - let n_clip: u64 = config.size.choose(1 << 8, 1 << 12, 1 << 16); - let data = ClipData::new(n_clip); - let stage_config = data.get_config(); - let config_buf = runner - .session - .create_buffer_init(std::slice::from_ref(&stage_config), BufferUsage::STORAGE) - .unwrap(); - // Need to actually get data uploaded - let mut memory = runner.buf_down(data.memory_size(), BufferUsage::STORAGE); - { - let mut buf_write = memory.map_write(..); - data.fill_memory(&mut buf_write); - } - - let code = ClipCode::new(&runner.session); - let binding = ClipBinding::new(&runner.session, &code, &config_buf, &memory.dev_buf); - - let mut commands = runner.commands(); - commands.upload(&memory); - let mut pass = commands.compute_pass(0, 1); - binding.record(&mut pass, &code, n_clip as u32); - pass.end(); - commands.download(&memory); - runner.submit(commands); - let dst = memory.map_read(..); - if let Some(failure) = data.verify(&dst) { - result.fail(failure); - } - result -} - -fn rand_bbox() -> [u32; 4] { - let mut rng = rand::thread_rng(); - const Y_MIN: u32 = 32768; - const Y_MAX: u32 = Y_MIN + 1000; - let mut x0 = rng.gen_range(Y_MIN, Y_MAX); - let mut y0 = rng.gen_range(Y_MIN, Y_MAX); - let mut x1 = rng.gen_range(Y_MIN, Y_MAX); - let mut y1 = rng.gen_range(Y_MIN, Y_MAX); - if x0 > x1 { - std::mem::swap(&mut x0, &mut x1); - } - if y0 > y1 { - std::mem::swap(&mut y0, &mut y1); - } - [x0, y0, x1, y1] -} - -/// Convert from atomic-friendly to normal float bbox. -fn decode_bbox(raw: [u32; 4]) -> [f32; 4] { - fn decode(x: u32) -> f32 { - x as f32 - 32768.0 - } - [ - decode(raw[0]), - decode(raw[1]), - decode(raw[2]), - decode(raw[3]), - ] -} - -fn intersect_bbox(b0: [f32; 4], b1: [f32; 4]) -> [f32; 4] { - [ - b0[0].max(b1[0]), - b0[1].max(b1[1]), - b0[2].min(b1[2]), - b0[3].min(b1[3]), - ] -} - -const INFTY_BBOX: [f32; 4] = [-1e9, -1e9, 1e9, 1e9]; - -impl ClipData { - /// Generate a random clip sequence - fn new(n: u64) -> ClipData { - // Simple LCG random generator, for deterministic results - let mut z = 20170705u64; - let mut depth = 0; - let mut path_bbox_stream = Vec::new(); - let clip_stream = (0..n) - .map(|i| { - let is_push = if depth == 0 { - true - } else if depth >= 255 { - false - } else { - z = z.wrapping_mul(742938285) % ((1 << 31) - 1); - (z % 2) != 0 - }; - if is_push { - depth += 1; - let path_ix = path_bbox_stream.len() as u32; - let bbox = rand_bbox(); - let path_bbox = PathBbox { - bbox, - ..Default::default() - }; - path_bbox_stream.push(path_bbox); - path_ix - } else { - depth -= 1; - !(i as u32) - } - }) - .collect(); - ClipData { - clip_stream, - path_bbox_stream, - } - } - - fn get_config(&self) -> stages::Config { - let n_clip = self.clip_stream.len(); - let n_path = self.path_bbox_stream.len(); - let clip_alloc = 0; - let path_bbox_alloc = clip_alloc + 4 * n_clip; - let drawmonoid_alloc = path_bbox_alloc + 24 * n_path; - let clip_bic_alloc = drawmonoid_alloc + 8 * n_clip; - // TODO: this is over-allocated, we only need one bic per wg - let clip_stack_alloc = clip_bic_alloc + 8 * n_clip; - let clip_bbox_alloc = clip_stack_alloc + 20 * n_clip; - stages::Config { - clip_alloc: clip_alloc as u32, - path_bbox_alloc: path_bbox_alloc as u32, - drawmonoid_alloc: drawmonoid_alloc as u32, - clip_bic_alloc: clip_bic_alloc as u32, - clip_stack_alloc: clip_stack_alloc as u32, - clip_bbox_alloc: clip_bbox_alloc as u32, - n_clip: n_clip as u32, - ..Default::default() - } - } - - fn memory_size(&self) -> u64 { - (8 + self.clip_stream.len() * (4 + 8 + 8 + 20 + 16) + self.path_bbox_stream.len() * 24) - as u64 - } - - fn fill_memory(&self, buf: &mut BufWrite) { - // offset / header; no dynamic allocation - buf.fill_zero(8); - buf.extend_slice(&self.clip_stream); - buf.extend_slice(&self.path_bbox_stream); - // drawmonoid is left uninitialized - } - - fn verify(&self, buf: &[u8]) -> Option { - let n_clip = self.clip_stream.len(); - let n_path = self.path_bbox_stream.len(); - let clip_bbox_start = 8 + n_clip * (4 + 8 + 8 + 20) + n_path * 24; - let clip_range = clip_bbox_start..(clip_bbox_start + n_clip * 16); - let clip_result = bytemuck::cast_slice::(&buf[clip_range]); - let draw_start = 8 + n_clip * 4 + n_path * 24; - let draw_range = draw_start..(draw_start + n_clip * 16); - let draw_result = bytemuck::cast_slice::(&buf[draw_range]); - let mut bbox_stack = Vec::new(); - let mut parent_stack = Vec::new(); - for (i, path_ix) in self.clip_stream.iter().enumerate() { - let mut expected_path = None; - if *path_ix >= 0x8000_0000 { - let parent = parent_stack.pop().unwrap(); - expected_path = Some(self.clip_stream[parent as usize]); - bbox_stack.pop().unwrap(); - } else { - parent_stack.push(i); - let path_bbox_stream = self.path_bbox_stream[*path_ix as usize]; - let bbox = decode_bbox(path_bbox_stream.bbox); - let new = match bbox_stack.last() { - None => bbox, - Some(old) => intersect_bbox(*old, bbox), - }; - bbox_stack.push(new); - }; - let expected = bbox_stack.last().copied().unwrap_or(INFTY_BBOX); - let clip_bbox = clip_result[i]; - if clip_bbox != expected { - return Some(format!( - "{}: path_ix={}, expected bbox={:?}, clip_bbox={:?}", - i, path_ix, expected, clip_bbox - )); - } - if let Some(expected_path) = expected_path { - let actual_path = draw_result[i].path_ix; - if expected_path != actual_path { - return Some(format!( - "{}: expected path {}, actual {}", - i, expected_path, actual_path - )); - } - } - } - None - } -} diff --git a/tests/src/config.rs b/tests/src/config.rs deleted file mode 100644 index 2593ed9..0000000 --- a/tests/src/config.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Test config parameters. - -use clap::ArgMatches; - -pub struct Config { - pub groups: Groups, - pub size: Size, - pub n_iter: u64, - pub verify_all: bool, -} - -pub struct Groups(String); - -pub enum Size { - Small, - Medium, - Large, -} - -impl Config { - pub fn from_matches(matches: &ArgMatches) -> Config { - let groups = Groups::from_str(matches.value_of("groups").unwrap_or("all")); - let size = Size::from_str(matches.value_of("size").unwrap_or("m")); - let n_iter = matches - .value_of("n_iter") - .and_then(|s| s.parse().ok()) - .unwrap_or(1000); - let verify_all = matches.is_present("verify_all"); - Config { - groups, - size, - n_iter, - verify_all, - } - } -} - -impl Groups { - pub fn from_str(s: &str) -> Groups { - Groups(s.to_string()) - } - - pub fn matches(&self, group_name: &str) -> bool { - self.0 == "all" || self.0 == group_name - } -} - -impl Size { - fn from_str(s: &str) -> Size { - if s == "small" || s == "s" { - Size::Small - } else if s == "large" || s == "l" { - Size::Large - } else { - Size::Medium - } - } - - pub fn choose(&self, small: T, medium: T, large: T) -> T { - match self { - Size::Small => small, - Size::Medium => medium, - Size::Large => large, - } - } -} diff --git a/tests/src/draw.rs b/tests/src/draw.rs deleted file mode 100644 index 907fadc..0000000 --- a/tests/src/draw.rs +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Tests for the piet-gpu draw object stage. - -use piet_gpu_hal::{BufWrite, BufferUsage}; -use rand::seq::SliceRandom; - -use crate::{Config, Runner, TestResult}; - -use piet_gpu::stages::{self, DrawCode, DrawMonoid, DrawStage}; - -const DRAWTAG_SIZE: usize = 4; -const ANNOTATED_SIZE: usize = 40; - -// Tags for draw objects. See shader/drawtag.h for the authoritative source. -const DRAWTAG_FILLCOLOR: u32 = 4; -const DRAWTAG_FILLLINGRADIENT: u32 = 20; -const DRAWTAG_FILLIMAGE: u32 = 8; -const DRAWTAG_BEGINCLIP: u32 = 5; -const DRAWTAG_ENDCLIP: u32 = 37; - -const TAGS: &[u32] = &[ - DRAWTAG_FILLCOLOR, - DRAWTAG_FILLLINGRADIENT, - DRAWTAG_FILLIMAGE, - DRAWTAG_BEGINCLIP, - DRAWTAG_ENDCLIP, -]; - -struct DrawTestData { - tags: Vec, -} - -pub unsafe fn draw_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("draw"); - // TODO: implement large scan and set large to 1 << 24 - let n_tag: u64 = config.size.choose(1 << 12, 1 << 20, 1 << 22); - let data = DrawTestData::new(n_tag); - let stage_config = data.get_config(); - - let config_buf = runner - .session - .create_buffer_init(std::slice::from_ref(&stage_config), BufferUsage::STORAGE) - .unwrap(); - let scene_size = n_tag * DRAWTAG_SIZE as u64; - let scene_buf = runner - .session - .create_buffer_with(scene_size, |b| data.fill_scene(b), BufferUsage::STORAGE) - .unwrap(); - let memory = runner.buf_down(data.memory_size(), BufferUsage::STORAGE); - - let code = DrawCode::new(&runner.session); - let stage = DrawStage::new(&runner.session, &code); - let binding = stage.bind( - &runner.session, - &code, - &config_buf, - &scene_buf, - &memory.dev_buf, - ); - - let mut total_elapsed = 0.0; - let n_iter = config.n_iter; - for i in 0..n_iter { - let mut commands = runner.commands(); - let mut pass = commands.compute_pass(0, 1); - stage.record(&mut pass, &code, &binding, n_tag); - pass.end(); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&memory); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = memory.map_read(..); - if let Some(failure) = data.verify(&dst) { - result.fail(failure); - } - } - } - let n_elements = n_tag; - result.timing(total_elapsed, n_elements * n_iter); - - result -} - -impl DrawTestData { - fn new(n: u64) -> DrawTestData { - let mut rng = rand::thread_rng(); - let tags = (0..n).map(|_| *TAGS.choose(&mut rng).unwrap()).collect(); - DrawTestData { tags } - } - - fn get_config(&self) -> stages::Config { - let n_tags = self.tags.len(); - - // Layout of memory - let drawmonoid_alloc = 0; - let anno_alloc = drawmonoid_alloc + 16 * n_tags; - let clip_alloc = anno_alloc + ANNOTATED_SIZE * n_tags; - let stage_config = stages::Config { - n_elements: n_tags as u32, - anno_alloc: anno_alloc as u32, - drawmonoid_alloc: drawmonoid_alloc as u32, - clip_alloc: clip_alloc as u32, - drawtag_offset: 0, - ..Default::default() - }; - stage_config - } - - fn memory_size(&self) -> u64 { - // Note: this overallocates the clip buf a bit - only needed for the - // total number of begin_clip and end_clip tags. - (8 + self.tags.len() * (16 + 4 + ANNOTATED_SIZE)) as u64 - } - - fn fill_scene(&self, buf: &mut BufWrite) { - buf.extend_slice(&self.tags); - } - - fn verify(&self, buf: &[u8]) -> Option { - let size = self.tags.len() * 16; - let actual = bytemuck::cast_slice::(&buf[8..8 + size]); - let mut expected = DrawMonoid::default(); - for (i, (tag, actual)) in self.tags.iter().zip(actual).enumerate() { - // Verify exclusive prefix sum. - let (path_ix, clip_ix) = Self::reduce_tag(*tag); - if *actual != expected { - println!("{:?} {:?}", actual, expected); - return Some(format!("draw mismatch at {}", i)); - } - expected.path_ix += path_ix; - expected.clip_ix += clip_ix; - expected.scene_offset += tag & 28; - } - None - } - - fn reduce_tag(tag: u32) -> (u32, u32) { - match tag { - DRAWTAG_FILLCOLOR | DRAWTAG_FILLLINGRADIENT | DRAWTAG_FILLIMAGE => (1, 0), - DRAWTAG_BEGINCLIP | DRAWTAG_ENDCLIP => (1, 1), - // TODO: ENDCLIP will become (0, 1) - _ => (0, 0), - } - } -} diff --git a/tests/src/linkedlist.rs b/tests/src/linkedlist.rs deleted file mode 100644 index e24adcb..0000000 --- a/tests/src/linkedlist.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu_hal::{include_shader, BindType, BufferUsage, DescriptorSet}; -use piet_gpu_hal::{Buffer, Pipeline}; - -use crate::runner::{Commands, Runner}; -use crate::test_result::TestResult; -use crate::Config; - -const WG_SIZE: u64 = 256; -const N_BUCKETS: u64 = 65536; - -struct LinkedListCode { - pipeline: Pipeline, -} - -struct LinkedListStage; - -struct LinkedListBinding { - descriptor_set: DescriptorSet, -} - -pub unsafe fn run_linkedlist_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("linked list"); - let mem_buf = runner.buf_down(1024 * N_BUCKETS, BufferUsage::CLEAR); - let code = LinkedListCode::new(runner); - let stage = LinkedListStage::new(runner, &code, N_BUCKETS); - let binding = stage.bind(runner, &code, &mem_buf.dev_buf); - let n_iter = config.n_iter; - let mut total_elapsed = 0.0; - for i in 0..n_iter { - let mut commands = runner.commands(); - // Might clear only buckets to save time. - stage.record(&mut commands, &code, &binding, &mem_buf.dev_buf); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&mem_buf); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = mem_buf.map_read(..); - if !verify(dst.cast_slice()) { - result.fail("incorrect data"); - } - } - } - result.timing(total_elapsed, N_BUCKETS * 100 * n_iter); - result -} - -impl LinkedListCode { - unsafe fn new(runner: &mut Runner) -> LinkedListCode { - let code = include_shader!(&runner.session, "../shader/gen/linkedlist"); - let pipeline = runner - .session - .create_compute_pipeline(code, &[BindType::Buffer]) - .unwrap(); - LinkedListCode { pipeline } - } -} - -impl LinkedListStage { - unsafe fn new( - _runner: &mut Runner, - _code: &LinkedListCode, - _n_buckets: u64, - ) -> LinkedListStage { - LinkedListStage - } - - unsafe fn bind( - &self, - runner: &mut Runner, - code: &LinkedListCode, - mem_buf: &Buffer, - ) -> LinkedListBinding { - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.pipeline, &[mem_buf]) - .unwrap(); - LinkedListBinding { descriptor_set } - } - - unsafe fn record( - &self, - commands: &mut Commands, - code: &LinkedListCode, - bindings: &LinkedListBinding, - out_buf: &Buffer, - ) { - commands.cmd_buf.clear_buffer(out_buf, None); - commands.cmd_buf.memory_barrier(); - let n_workgroups = N_BUCKETS / WG_SIZE; - let mut pass = commands.compute_pass(0, 1); - pass.dispatch( - &code.pipeline, - &bindings.descriptor_set, - (n_workgroups as u32, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - pass.end(); - } -} - -fn verify(data: &[u32]) -> bool { - let mut expected = (0..N_BUCKETS).map(|_| Vec::new()).collect::>(); - for ix in 0..N_BUCKETS { - let mut rng = ix as u32 + 1; - for _ in 0..100 { - // xorshift32 - rng ^= rng.wrapping_shl(13); - rng ^= rng.wrapping_shr(17); - rng ^= rng.wrapping_shl(5); - let bucket = rng % N_BUCKETS as u32; - if bucket != 0 { - expected[bucket as usize].push(ix as u32); - } - } - } - let mut actual = Vec::new(); - for (i, expected) in expected.iter_mut().enumerate().skip(1) { - actual.clear(); - let mut ptr = i; - loop { - let next = data[ptr] as usize; - if next == 0 { - break; - } - let val = data[next + 1]; - actual.push(val); - ptr = next; - } - actual.sort(); - expected.sort(); - if actual != *expected { - return false; - } - } - true -} diff --git a/tests/src/main.rs b/tests/src/main.rs deleted file mode 100644 index 5f72708..0000000 --- a/tests/src/main.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Tests for piet-gpu shaders and GPU capabilities. - -mod clear; -mod clip; -mod config; -mod draw; -mod linkedlist; -mod message_passing; -mod prefix; -mod prefix_tree; -mod runner; -mod test_result; - -#[cfg(feature = "piet-gpu")] -mod path; - -use clap::{App, Arg}; -use piet_gpu_hal::InstanceFlags; - -use crate::config::Config; -pub use crate::runner::Runner; -use crate::test_result::ReportStyle; -pub use crate::test_result::TestResult; - -fn main() { - let matches = App::new("piet-gpu-tests") - .arg( - Arg::with_name("verbose") - .short('v') - .long("verbose") - .help("Verbose reporting of results"), - ) - .arg( - Arg::with_name("groups") - .short('g') - .long("groups") - .help("Groups to run") - .takes_value(true), - ) - .arg( - Arg::with_name("size") - .short('s') - .long("size") - .help("Size of tests") - .takes_value(true), - ) - .arg( - Arg::with_name("n_iter") - .short('n') - .long("n_iter") - .help("Number of iterations") - .takes_value(true), - ) - .arg( - Arg::with_name("verify_all") - .long("verify_all") - .help("Verify all iterations"), - ) - .arg( - Arg::with_name("dx12") - .long("dx12") - .help("Prefer DX12 backend"), - ) - .get_matches(); - let style = if matches.is_present("verbose") { - ReportStyle::Verbose - } else { - ReportStyle::Short - }; - let config = Config::from_matches(&matches); - unsafe { - let report = |test_result: &TestResult| { - test_result.report(style); - }; - let mut flags = InstanceFlags::empty(); - if matches.is_present("dx12") { - flags |= InstanceFlags::DX12; - } - let mut runner = Runner::new(flags); - if style == ReportStyle::Verbose { - // TODO: get adapter name in here too - println!("Backend: {:?}", runner.backend_type()); - } - report(&clear::run_clear_test(&mut runner, &config)); - if config.groups.matches("prefix") { - report(&prefix::run_prefix_test( - &mut runner, - &config, - prefix::Variant::Compatibility, - )); - report(&prefix::run_prefix_test( - &mut runner, - &config, - prefix::Variant::Atomic, - )); - if runner.session.gpu_info().has_memory_model { - report(&prefix::run_prefix_test( - &mut runner, - &config, - prefix::Variant::Vkmm, - )); - } - report(&prefix_tree::run_prefix_test(&mut runner, &config)); - } - if config.groups.matches("atomic") { - report(&message_passing::run_message_passing_test( - &mut runner, - &config, - message_passing::Variant::Atomic, - )); - if runner.session.gpu_info().has_memory_model { - report(&message_passing::run_message_passing_test( - &mut runner, - &config, - message_passing::Variant::Vkmm, - )); - } - report(&linkedlist::run_linkedlist_test(&mut runner, &config)); - } - #[cfg(feature = "piet-gpu")] - if config.groups.matches("piet") { - report(&path::path_test(&mut runner, &config)); - report(&draw::draw_test(&mut runner, &config)); - report(&clip::clip_test(&mut runner, &config)); - } - } -} diff --git a/tests/src/message_passing.rs b/tests/src/message_passing.rs deleted file mode 100644 index 39e71dc..0000000 --- a/tests/src/message_passing.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu_hal::{include_shader, BindType, BufferUsage, DescriptorSet, ShaderCode}; -use piet_gpu_hal::{Buffer, Pipeline}; - -use crate::config::Config; -use crate::runner::{Commands, Runner}; -use crate::test_result::TestResult; - -const N_ELEMENTS: u64 = 65536; - -/// The shader code forMessagePassing sum example. -struct MessagePassingCode { - pipeline: Pipeline, -} - -/// The stage resources for the prefix sum example. -struct MessagePassingStage { - data_buf: Buffer, -} - -/// The binding for the prefix sum example. -struct MessagePassingBinding { - descriptor_set: DescriptorSet, -} - -#[derive(Debug)] -pub enum Variant { - Atomic, - Vkmm, -} - -pub unsafe fn run_message_passing_test( - runner: &mut Runner, - config: &Config, - variant: Variant, -) -> TestResult { - let mut result = TestResult::new(format!("message passing litmus, {:?}", variant)); - let out_buf = runner.buf_down(4, BufferUsage::CLEAR); - let code = MessagePassingCode::new(runner, variant); - let stage = MessagePassingStage::new(runner); - let binding = stage.bind(runner, &code, &out_buf.dev_buf); - let n_iter = config.n_iter; - let mut total_elapsed = 0.0; - let mut failures = 0; - for _ in 0..n_iter { - let mut commands = runner.commands(); - stage.record(&mut commands, &code, &binding, &out_buf.dev_buf); - commands.cmd_buf.memory_barrier(); - commands.download(&out_buf); - total_elapsed += runner.submit(commands); - let mut dst: Vec = Default::default(); - out_buf.read(&mut dst); - failures += dst[0]; - } - if failures > 0 { - result.fail(format!("{} failures", failures)); - } - result.timing(total_elapsed, N_ELEMENTS * n_iter); - result -} - -impl MessagePassingCode { - unsafe fn new(runner: &mut Runner, variant: Variant) -> MessagePassingCode { - let code = match variant { - Variant::Atomic => include_shader!(&runner.session, "../shader/gen/message_passing"), - Variant::Vkmm => { - ShaderCode::Spv(include_bytes!("../shader/gen/message_passing_vkmm.spv")) - } - }; - let pipeline = runner - .session - .create_compute_pipeline(code, &[BindType::Buffer, BindType::Buffer]) - .unwrap(); - MessagePassingCode { pipeline } - } -} - -impl MessagePassingStage { - unsafe fn new(runner: &mut Runner) -> MessagePassingStage { - let data_buf_size = 8 * N_ELEMENTS; - let data_buf = runner - .session - .create_buffer( - data_buf_size, - BufferUsage::STORAGE | BufferUsage::COPY_DST | BufferUsage::CLEAR, - ) - .unwrap(); - MessagePassingStage { data_buf } - } - - unsafe fn bind( - &self, - runner: &mut Runner, - code: &MessagePassingCode, - out_buf: &Buffer, - ) -> MessagePassingBinding { - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.pipeline, &[&self.data_buf, out_buf]) - .unwrap(); - MessagePassingBinding { descriptor_set } - } - - unsafe fn record( - &self, - commands: &mut Commands, - code: &MessagePassingCode, - bindings: &MessagePassingBinding, - out_buf: &Buffer, - ) { - commands.cmd_buf.clear_buffer(&self.data_buf, None); - commands.cmd_buf.clear_buffer(out_buf, None); - commands.cmd_buf.memory_barrier(); - let mut pass = commands.compute_pass(0, 1); - pass.dispatch( - &code.pipeline, - &bindings.descriptor_set, - (256, 1, 1), - (256, 1, 1), - ); - pass.end(); - } -} diff --git a/tests/src/path.rs b/tests/src/path.rs deleted file mode 100644 index 1a933d0..0000000 --- a/tests/src/path.rs +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Tests for the piet-gpu path stage. - -use crate::{Config, Runner, TestResult}; - -use bytemuck::{Pod, Zeroable}; -use piet_gpu::stages::{self, PathCode, PathEncoder, PathStage, Transform}; -use piet_gpu_hal::{BufWrite, BufferUsage}; -use rand::{prelude::ThreadRng, Rng}; - -struct PathData { - n_trans: u32, - n_linewidth: u32, - n_path: u32, - n_pathseg: u32, - tags: Vec, - pathsegs: Vec, - bbox: Vec<(f32, f32, f32, f32)>, - lines: Vec<([f32; 2], [f32; 2])>, -} - -// This is designed to match pathseg.h -#[repr(C)] -#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)] -struct PathSeg { - tag: u32, - p0: [f32; 2], - p1: [f32; 2], - p2: [f32; 2], - p3: [f32; 2], - path_ix: u32, - trans_ix: u32, - stroke: [f32; 2], -} - -#[repr(C)] -#[derive(Clone, Copy, Debug, Default, PartialEq, Zeroable, Pod)] -struct Bbox { - left: u32, - top: u32, - right: u32, - bottom: u32, - linewidth: f32, - trans_ix: u32, -} - -pub unsafe fn path_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("path"); - - // TODO: implement large scans and raise limit - let n_path: u64 = config.size.choose(1 << 12, 1 << 16, 209_000); - let path_data = PathData::new(n_path as u32); - let stage_config = path_data.get_config(); - let config_buf = runner - .session - .create_buffer_init(std::slice::from_ref(&stage_config), BufferUsage::STORAGE) - .unwrap(); - let scene_size = n_path * 256; - let scene_buf = runner - .session - .create_buffer_with( - scene_size, - |b| path_data.fill_scene(b), - BufferUsage::STORAGE, - ) - .unwrap(); - let memory_init = runner - .session - .create_buffer_with( - path_data.memory_init_size(), - |b| path_data.fill_memory(b), - BufferUsage::COPY_SRC, - ) - .unwrap(); - let memory = runner.buf_down(path_data.memory_full_size(), BufferUsage::empty()); - - let code = PathCode::new(&runner.session); - let stage = PathStage::new(&runner.session, &code); - let binding = stage.bind( - &runner.session, - &code, - &config_buf, - &scene_buf, - &memory.dev_buf, - ); - - let mut total_elapsed = 0.0; - let n_iter = config.n_iter; - for i in 0..n_iter { - let mut commands = runner.commands(); - commands.cmd_buf.copy_buffer(&memory_init, &memory.dev_buf); - commands.cmd_buf.memory_barrier(); - let mut pass = commands.compute_pass(0, 1); - stage.record( - &mut pass, - &code, - &binding, - path_data.n_path, - path_data.tags.len() as u32, - ); - pass.end(); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&memory); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = memory.map_read(..); - if let Some(failure) = path_data.verify(&dst) { - result.fail(failure); - } - } - } - let n_elements = path_data.n_pathseg as u64; - result.timing(total_elapsed, n_elements * n_iter); - - result -} - -fn rand_point(rng: &mut ThreadRng) -> (f32, f32) { - let x = rng.gen_range(0.0, 100.0); - let y = rng.gen_range(0.0, 100.0); - (x, y) -} - -// Must match shader/pathseg.h -const PATHSEG_SIZE: u32 = 52; - -impl PathData { - fn new(n_path: u32) -> PathData { - let mut rng = rand::thread_rng(); - let n_trans = 1; - let n_linewidth = 1; - let segments_per_path = 8; - let mut tags = Vec::new(); - let mut pathsegs = Vec::new(); - let mut bbox = Vec::new(); - let mut lines = Vec::new(); - let mut encoder = PathEncoder::new(&mut tags, &mut pathsegs); - for _ in 0..n_path { - let (x, y) = rand_point(&mut rng); - let mut min_x = x; - let mut max_x = x; - let mut min_y = y; - let mut max_y = y; - let first_pt = [x, y]; - let mut last_pt = [x, y]; - encoder.move_to(x, y); - for _ in 0..segments_per_path { - let (x, y) = rand_point(&mut rng); - lines.push((last_pt, [x, y])); - last_pt = [x, y]; - encoder.line_to(x, y); - min_x = min_x.min(x); - max_x = max_x.max(x); - min_y = min_y.min(y); - max_y = max_y.max(y); - } - bbox.push((min_x, min_y, max_x, max_y)); - encoder.close_path(); - // With very low probability last_pt and first_pt might be equal, which - // would cause a test failure - might want to seed RNG. - lines.push((last_pt, first_pt)); - encoder.path(); - } - let n_pathseg = encoder.n_pathseg(); - //println!("tags: {:x?}", &tags[0..8]); - //println!("path: {:?}", bytemuck::cast_slice::(&pathsegs[0..64])); - PathData { - n_trans, - n_linewidth, - n_path, - n_pathseg, - tags, - pathsegs, - bbox, - lines, - } - } - - fn get_config(&self) -> stages::Config { - let n_trans = self.n_trans; - - // Layout of scene buffer - let linewidth_offset = 0; - let pathtag_offset = linewidth_offset + self.n_linewidth * 4; - let n_tagbytes = self.tags.len() as u32; - // Depends on workgroup size, maybe get from stages? - let padded_n_tagbytes = (n_tagbytes + 2047) & !2047; - let pathseg_offset = pathtag_offset + padded_n_tagbytes; - - // Layout of memory - let trans_alloc = 0; - let pathseg_alloc = trans_alloc + n_trans * 24; - let path_bbox_alloc = pathseg_alloc + self.n_pathseg * PATHSEG_SIZE; - let stage_config = stages::Config { - pathseg_alloc, - path_bbox_alloc, - n_trans, - n_path: self.n_path, - pathtag_offset, - linewidth_offset, - pathseg_offset, - ..Default::default() - }; - stage_config - } - - fn fill_scene(&self, buf: &mut BufWrite) { - let linewidth = -1.0f32; - buf.push(linewidth); - buf.extend_slice(&self.tags); - buf.fill_zero(self.tags.len().wrapping_neg() & 2047); - buf.extend_slice(&self.pathsegs); - } - - fn memory_init_size(&self) -> u64 { - let mut size = 8; // offset and error - size += self.n_trans * 24; - size as u64 - } - - fn memory_full_size(&self) -> u64 { - let mut size = self.memory_init_size(); - size += (self.n_pathseg * PATHSEG_SIZE) as u64; - size += (self.n_path * 24) as u64; - size - } - - fn fill_memory(&self, buf: &mut BufWrite) { - // This stage is not dynamically allocating memory - let mem_offset = 0u32; - let mem_error = 0u32; - let mem_init = [mem_offset, mem_error]; - buf.push(mem_init); - let trans = Transform::IDENTITY; - buf.push(trans); - } - - fn verify(&self, memory: &[u8]) -> Option { - fn round_down(x: f32) -> u32 { - (x.floor() + 32768.0) as u32 - } - fn round_up(x: f32) -> u32 { - (x.ceil() + 32768.0) as u32 - } - let begin_pathseg = 32; - for i in 0..self.n_pathseg { - let offset = (begin_pathseg + PATHSEG_SIZE * i) as usize; - let actual = - bytemuck::from_bytes::(&memory[offset..offset + PATHSEG_SIZE as usize]); - let expected = self.lines[i as usize]; - const EPSILON: f32 = 1e-9; - if (expected.0[0] - actual.p0[0]).abs() > EPSILON - || (expected.0[1] - actual.p0[1]).abs() > EPSILON - || (expected.1[0] - actual.p3[0]).abs() > EPSILON - || (expected.1[1] - actual.p3[1]).abs() > EPSILON - { - println!("{}: {:.1?} {:.1?}", i, actual, expected); - } - } - let begin_bbox = 32 + PATHSEG_SIZE * self.n_pathseg; - for i in 0..self.n_path { - let offset = (begin_bbox + 24 * i) as usize; - let actual = bytemuck::from_bytes::(&memory[offset..offset + 24]); - let expected_f32 = self.bbox[i as usize]; - if round_down(expected_f32.0) != actual.left - || round_down(expected_f32.1) != actual.top - || round_up(expected_f32.2) != actual.right - || round_up(expected_f32.3) != actual.bottom - { - println!("{}: {:?} {:?}", i, actual, expected_f32); - return Some(format!("bbox mismatch at {}", i)); - } - } - None - } -} diff --git a/tests/src/prefix.rs b/tests/src/prefix.rs deleted file mode 100644 index dbaf256..0000000 --- a/tests/src/prefix.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu_hal::{include_shader, BindType, BufferUsage, DescriptorSet, ShaderCode}; -use piet_gpu_hal::{Buffer, Pipeline}; - -use crate::config::Config; -use crate::runner::{Commands, Runner}; -use crate::test_result::TestResult; - -const WG_SIZE: u64 = 512; -const N_ROWS: u64 = 16; -const ELEMENTS_PER_WG: u64 = WG_SIZE * N_ROWS; - -/// The shader code for the prefix sum example. -/// -/// A code struct can be created once and reused any number of times. -struct PrefixCode { - pipeline: Pipeline, -} - -/// The stage resources for the prefix sum example. -/// -/// A stage resources struct is specific to a particular problem size -/// and queue. -struct PrefixStage { - // This is the actual problem size but perhaps it would be better to - // treat it as a capacity. - n_elements: u64, - state_buf: Buffer, -} - -/// The binding for the prefix sum example. -struct PrefixBinding { - descriptor_set: DescriptorSet, -} - -#[derive(Debug)] -pub enum Variant { - Compatibility, - Atomic, - Vkmm, -} - -pub unsafe fn run_prefix_test( - runner: &mut Runner, - config: &Config, - variant: Variant, -) -> TestResult { - let mut result = TestResult::new(format!("prefix sum, decoupled look-back, {:?}", variant)); - /* - // We're good if we're using DXC. - if runner.backend_type() == BackendType::Dx12 { - result.skip("Shader won't compile on FXC"); - return result; - } - */ - let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 25); - let data_buf = runner - .session - .create_buffer_with( - n_elements * 4, - |b| b.extend(0..n_elements as u32), - BufferUsage::STORAGE, - ) - .unwrap(); - let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty()); - let code = PrefixCode::new(runner, variant); - let stage = PrefixStage::new(runner, n_elements); - let binding = stage.bind(runner, &code, &data_buf, &out_buf.dev_buf); - let n_iter = config.n_iter; - let mut total_elapsed = 0.0; - for i in 0..n_iter { - let mut commands = runner.commands(); - stage.record(&mut commands, &code, &binding); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&out_buf); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = out_buf.map_read(..); - if let Some(failure) = verify(dst.cast_slice()) { - result.fail(format!("failure at {}", failure)); - } - } - } - result.timing(total_elapsed, n_elements * n_iter); - result -} - -impl PrefixCode { - unsafe fn new(runner: &mut Runner, variant: Variant) -> PrefixCode { - let code = match variant { - Variant::Compatibility => include_shader!(&runner.session, "../shader/gen/prefix"), - Variant::Atomic => include_shader!(&runner.session, "../shader/gen/prefix_atomic"), - Variant::Vkmm => ShaderCode::Spv(include_bytes!("../shader/gen/prefix_vkmm.spv")), - }; - let pipeline = runner - .session - .create_compute_pipeline( - code, - &[BindType::BufReadOnly, BindType::Buffer, BindType::Buffer], - ) - .unwrap(); - // Currently, DX12 and Metal backends don't support buffer clearing, so use a - // compute shader as a workaround. - PrefixCode { pipeline } - } -} - -impl PrefixStage { - unsafe fn new(runner: &mut Runner, n_elements: u64) -> PrefixStage { - let n_workgroups = (n_elements + ELEMENTS_PER_WG - 1) / ELEMENTS_PER_WG; - let state_buf_size = 4 + 12 * n_workgroups; - let state_buf = runner - .session - .create_buffer( - state_buf_size, - BufferUsage::STORAGE | BufferUsage::COPY_DST | BufferUsage::CLEAR, - ) - .unwrap(); - PrefixStage { - n_elements, - state_buf, - } - } - - unsafe fn bind( - &self, - runner: &mut Runner, - code: &PrefixCode, - in_buf: &Buffer, - out_buf: &Buffer, - ) -> PrefixBinding { - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.pipeline, &[in_buf, out_buf, &self.state_buf]) - .unwrap(); - PrefixBinding { descriptor_set } - } - - unsafe fn record(&self, commands: &mut Commands, code: &PrefixCode, bindings: &PrefixBinding) { - let n_workgroups = (self.n_elements + ELEMENTS_PER_WG - 1) / ELEMENTS_PER_WG; - commands.cmd_buf.clear_buffer(&self.state_buf, None); - commands.cmd_buf.memory_barrier(); - let mut pass = commands.compute_pass(0, 1); - pass.dispatch( - &code.pipeline, - &bindings.descriptor_set, - (n_workgroups as u32, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - pass.end(); - // One thing that's missing here is registering the buffers so - // they can be safely dropped by Rust code before the execution - // of the command buffer completes. - } -} - -// Verify that the data is OEIS A000217 -fn verify(data: &[u32]) -> Option { - data.iter() - .enumerate() - .position(|(i, val)| ((i * (i + 1)) / 2) as u32 != *val) -} diff --git a/tests/src/prefix_tree.rs b/tests/src/prefix_tree.rs deleted file mode 100644 index 3c9c813..0000000 --- a/tests/src/prefix_tree.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -use piet_gpu_hal::{include_shader, BindType, BufferUsage, DescriptorSet}; -use piet_gpu_hal::{Buffer, Pipeline}; - -use crate::config::Config; -use crate::runner::{Commands, Runner}; -use crate::test_result::TestResult; - -const WG_SIZE: u64 = 512; -const N_ROWS: u64 = 8; -const ELEMENTS_PER_WG: u64 = WG_SIZE * N_ROWS; - -struct PrefixTreeCode { - reduce_pipeline: Pipeline, - scan_pipeline: Pipeline, - root_pipeline: Pipeline, -} - -struct PrefixTreeStage { - sizes: Vec, - tmp_bufs: Vec, -} - -struct PrefixTreeBinding { - // All but the first and last can be moved to stage. - descriptor_sets: Vec, -} - -pub unsafe fn run_prefix_test(runner: &mut Runner, config: &Config) -> TestResult { - let mut result = TestResult::new("prefix sum, tree reduction"); - // This will be configurable. Note though that the current code is - // prone to reading and writing past the end of buffers if this is - // not a power of the number of elements processed in a workgroup. - let n_elements: u64 = config.size.choose(1 << 12, 1 << 24, 1 << 24); - let data_buf = runner - .session - .create_buffer_with( - n_elements * 4, - |b| b.extend(0..n_elements as u32), - BufferUsage::STORAGE, - ) - .unwrap(); - let out_buf = runner.buf_down(data_buf.size(), BufferUsage::empty()); - let code = PrefixTreeCode::new(runner); - let stage = PrefixTreeStage::new(runner, n_elements); - let binding = stage.bind(runner, &code, &out_buf.dev_buf); - // Also will be configurable of course. - let n_iter = config.n_iter; - let mut total_elapsed = 0.0; - for i in 0..n_iter { - let mut commands = runner.commands(); - commands.cmd_buf.copy_buffer(&data_buf, &out_buf.dev_buf); - commands.cmd_buf.memory_barrier(); - stage.record(&mut commands, &code, &binding); - if i == 0 || config.verify_all { - commands.cmd_buf.memory_barrier(); - commands.download(&out_buf); - } - total_elapsed += runner.submit(commands); - if i == 0 || config.verify_all { - let dst = out_buf.map_read(..); - if let Some(failure) = verify(dst.cast_slice()) { - result.fail(format!("failure at {}", failure)); - } - } - } - result.timing(total_elapsed, n_elements * n_iter); - result -} - -impl PrefixTreeCode { - unsafe fn new(runner: &mut Runner) -> PrefixTreeCode { - let reduce_code = include_shader!(&runner.session, "../shader/gen/prefix_reduce"); - let reduce_pipeline = runner - .session - .create_compute_pipeline(reduce_code, &[BindType::BufReadOnly, BindType::Buffer]) - .unwrap(); - let scan_code = include_shader!(&runner.session, "../shader/gen/prefix_scan"); - let scan_pipeline = runner - .session - .create_compute_pipeline(scan_code, &[BindType::Buffer, BindType::BufReadOnly]) - .unwrap(); - let root_code = include_shader!(&runner.session, "../shader/gen/prefix_root"); - let root_pipeline = runner - .session - .create_compute_pipeline(root_code, &[BindType::Buffer]) - .unwrap(); - PrefixTreeCode { - reduce_pipeline, - scan_pipeline, - root_pipeline, - } - } -} - -impl PrefixTreeStage { - unsafe fn new(runner: &mut Runner, n_elements: u64) -> PrefixTreeStage { - let mut size = n_elements; - let mut sizes = vec![size]; - let mut tmp_bufs = Vec::new(); - while size > ELEMENTS_PER_WG { - size = (size + ELEMENTS_PER_WG - 1) / ELEMENTS_PER_WG; - sizes.push(size); - let buf = runner - .session - .create_buffer(4 * size, BufferUsage::STORAGE) - .unwrap(); - tmp_bufs.push(buf); - } - PrefixTreeStage { sizes, tmp_bufs } - } - - unsafe fn bind( - &self, - runner: &mut Runner, - code: &PrefixTreeCode, - data_buf: &Buffer, - ) -> PrefixTreeBinding { - let mut descriptor_sets = Vec::with_capacity(2 * self.tmp_bufs.len() + 1); - for i in 0..self.tmp_bufs.len() { - let buf0 = if i == 0 { - data_buf - } else { - &self.tmp_bufs[i - 1] - }; - let buf1 = &self.tmp_bufs[i]; - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.reduce_pipeline, &[buf0, buf1]) - .unwrap(); - descriptor_sets.push(descriptor_set); - } - let buf0 = self.tmp_bufs.last().unwrap_or(data_buf); - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.root_pipeline, &[buf0]) - .unwrap(); - descriptor_sets.push(descriptor_set); - for i in (0..self.tmp_bufs.len()).rev() { - let buf0 = if i == 0 { - data_buf - } else { - &self.tmp_bufs[i - 1] - }; - let buf1 = &self.tmp_bufs[i]; - let descriptor_set = runner - .session - .create_simple_descriptor_set(&code.scan_pipeline, &[buf0, buf1]) - .unwrap(); - descriptor_sets.push(descriptor_set); - } - PrefixTreeBinding { descriptor_sets } - } - - unsafe fn record( - &self, - commands: &mut Commands, - code: &PrefixTreeCode, - bindings: &PrefixTreeBinding, - ) { - let mut pass = commands.compute_pass(0, 1); - let n = self.tmp_bufs.len(); - for i in 0..n { - let n_workgroups = self.sizes[i + 1]; - pass.dispatch( - &code.reduce_pipeline, - &bindings.descriptor_sets[i], - (n_workgroups as u32, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - pass.memory_barrier(); - } - pass.dispatch( - &code.root_pipeline, - &bindings.descriptor_sets[n], - (1, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - for i in (0..n).rev() { - pass.memory_barrier(); - let n_workgroups = self.sizes[i + 1]; - pass.dispatch( - &code.scan_pipeline, - &bindings.descriptor_sets[2 * n - i], - (n_workgroups as u32, 1, 1), - (WG_SIZE as u32, 1, 1), - ); - } - pass.end(); - } -} - -// Verify that the data is OEIS A000217 -fn verify(data: &[u32]) -> Option { - data.iter() - .enumerate() - .position(|(i, val)| ((i * (i + 1)) / 2) as u32 != *val) -} diff --git a/tests/src/runner.rs b/tests/src/runner.rs deleted file mode 100644 index 0760f59..0000000 --- a/tests/src/runner.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Test runner intended to make it easy to write tests. - -use std::ops::RangeBounds; - -use bytemuck::Pod; -use piet_gpu_hal::{ - BackendType, BufReadGuard, BufWriteGuard, Buffer, BufferUsage, CmdBuf, ComputePass, - ComputePassDescriptor, Instance, InstanceFlags, QueryPool, Session, -}; - -pub struct Runner { - #[allow(unused)] - instance: Instance, - pub session: Session, - cmd_buf_pool: Vec, -} - -/// A wrapper around command buffers. -pub struct Commands { - pub cmd_buf: CmdBuf, - query_pool: QueryPool, -} - -/// Buffer for both uploading and downloading -pub struct BufStage { - pub stage_buf: Buffer, - pub dev_buf: Buffer, -} - -impl Runner { - pub unsafe fn new(flags: InstanceFlags) -> Runner { - let instance = Instance::new(flags).unwrap(); - let device = instance.device().unwrap(); - let session = Session::new(device); - let cmd_buf_pool = Vec::new(); - Runner { - instance, - session, - cmd_buf_pool, - } - } - - pub unsafe fn commands(&mut self) -> Commands { - let mut cmd_buf = self - .cmd_buf_pool - .pop() - .unwrap_or_else(|| self.session.cmd_buf().unwrap()); - cmd_buf.begin(); - // TODO: also pool these. But we might sort by size, as - // we might not always be doing two. - let query_pool = self.session.create_query_pool(2).unwrap(); - cmd_buf.reset_query_pool(&query_pool); - Commands { - cmd_buf, - query_pool, - } - } - - pub unsafe fn submit(&mut self, commands: Commands) -> f64 { - let mut cmd_buf = commands.cmd_buf; - let query_pool = commands.query_pool; - cmd_buf.finish_timestamps(&query_pool); - cmd_buf.host_barrier(); - cmd_buf.finish(); - let submitted = self.session.run_cmd_buf(cmd_buf, &[], &[]).unwrap(); - self.cmd_buf_pool.extend(submitted.wait().unwrap()); - let timestamps = self.session.fetch_query_pool(&query_pool).unwrap(); - timestamps.get(0).copied().unwrap_or_default() - } - - #[allow(unused)] - pub fn buf_up(&self, size: u64) -> BufStage { - let stage_buf = self - .session - .create_buffer(size, BufferUsage::MAP_WRITE | BufferUsage::COPY_SRC) - .unwrap(); - let dev_buf = self - .session - .create_buffer(size, BufferUsage::COPY_DST | BufferUsage::STORAGE) - .unwrap(); - BufStage { stage_buf, dev_buf } - } - - /// Create a buffer for download (readback). - /// - /// The `usage` parameter need not include COPY_SRC and STORAGE. - pub fn buf_down(&self, size: u64, usage: BufferUsage) -> BufStage { - let stage_buf = self - .session - .create_buffer(size, BufferUsage::MAP_READ | BufferUsage::COPY_DST) - .unwrap(); - let dev_buf = self - .session - .create_buffer(size, usage | BufferUsage::COPY_SRC | BufferUsage::STORAGE) - .unwrap(); - BufStage { stage_buf, dev_buf } - } - - pub fn backend_type(&self) -> BackendType { - self.session.backend_type() - } -} - -impl Commands { - /// Start a compute pass with timer queries. - pub unsafe fn compute_pass(&mut self, start_query: u32, end_query: u32) -> ComputePass { - self.cmd_buf - .begin_compute_pass(&ComputePassDescriptor::timer( - &self.query_pool, - start_query, - end_query, - )) - } - - pub unsafe fn upload(&mut self, buf: &BufStage) { - self.cmd_buf.copy_buffer(&buf.stage_buf, &buf.dev_buf); - } - - pub unsafe fn download(&mut self, buf: &BufStage) { - self.cmd_buf.copy_buffer(&buf.dev_buf, &buf.stage_buf); - } -} - -impl BufStage { - pub unsafe fn read(&self, dst: &mut Vec) { - self.stage_buf.read(dst).unwrap() - } - - pub unsafe fn map_read<'a>(&'a self, range: impl RangeBounds) -> BufReadGuard<'a> { - self.stage_buf.map_read(range).unwrap() - } - - pub unsafe fn map_write<'a>(&'a mut self, range: impl RangeBounds) -> BufWriteGuard { - self.stage_buf.map_write(range).unwrap() - } -} diff --git a/tests/src/test_result.rs b/tests/src/test_result.rs deleted file mode 100644 index 05ad9b3..0000000 --- a/tests/src/test_result.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2021 The piet-gpu authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Also licensed under MIT license, at your choice. - -//! Recording of results from tests. - -pub struct TestResult { - name: String, - // TODO: statistics. We're lean and mean for now. - total_time: f64, - n_elements: u64, - status: Status, -} - -pub enum Status { - Pass, - Fail(String), - #[allow(unused)] - Skipped(String), -} - -#[derive(Clone, Copy, PartialEq, Eq)] -pub enum ReportStyle { - Short, - Verbose, -} - -impl TestResult { - pub fn new(name: impl Into) -> TestResult { - TestResult { - name: name.into(), - total_time: 0.0, - n_elements: 0, - status: Status::Pass, - } - } - - pub fn report(&self, style: ReportStyle) { - let fail_string = match &self.status { - Status::Pass => "pass".into(), - Status::Fail(s) => format!("fail ({})", s), - Status::Skipped(s) => format!("skipped ({})", s), - }; - match style { - ReportStyle::Short => { - let mut timing_string = String::new(); - if self.total_time > 0.0 { - if self.n_elements > 0 { - let throughput = self.n_elements as f64 / self.total_time; - timing_string = format!(" {} elements/s", format_nice(throughput, 1)); - } else { - timing_string = format!(" {}s", format_nice(self.total_time, 1)); - } - } - println!("{}: {}{}", self.name, fail_string, timing_string) - } - ReportStyle::Verbose => { - println!("test {}", self.name); - println!(" {}", fail_string); - if self.total_time > 0.0 { - println!(" {}s total time", format_nice(self.total_time, 1)); - if self.n_elements > 0 { - println!(" {} elements", self.n_elements); - let throughput = self.n_elements as f64 / self.total_time; - println!(" {} elements/s", format_nice(throughput, 1)); - } - } - } - } - } - - pub fn fail(&mut self, explanation: impl Into) { - self.status = Status::Fail(explanation.into()); - } - - #[allow(unused)] - pub fn skip(&mut self, explanation: impl Into) { - self.status = Status::Skipped(explanation.into()); - } - - pub fn timing(&mut self, total_time: f64, n_elements: u64) { - self.total_time = total_time; - self.n_elements = n_elements; - } -} - -fn format_nice(x: f64, precision: usize) -> String { - // Precision should probably scale; later - let (scale, suffix) = if x >= 1e12 && x < 1e15 { - (1e-12, "T") - } else if x >= 1e9 { - (1e-9, "G") - } else if x >= 1e6 { - (1e-6, "M") - } else if x >= 1e3 { - (1e-3, "k") - } else if x >= 1.0 { - (1.0, "") - } else if x >= 1e-3 { - (1e3, "m") - } else if x >= 1e-6 { - (1e6, "\u{00b5}") - } else if x >= 1e-9 { - (1e9, "n") - } else if x >= 1e-12 { - (1e12, "p") - } else { - return format!("{:.*e}", precision, x); - }; - format!("{:.*}{}", precision, scale * x, suffix) -}