diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 41713097..51634343 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,7 +15,11 @@ jobs: # Enforce lockfile correctness. - run: cargo check --locked # Test all crates, except those that force-target WASM. - - run: cargo test --workspace + # Skip the tests that make proofs, since we'll run those on the release profile + - run: cargo test --workspace -- --skip circuit_test --skip test_runtime + # The tests that do Nightstream proofs are quite slow without --release + - run: cargo test -p starstream-interleaving-proof --release circuit_test + - run: cargo test -p starstream-runtime --release test_runtime # Cosmetic checks. - run: cargo clippy - run: cargo fmt --check diff --git a/Cargo.lock b/Cargo.lock index 7eb0cf8e..29de4b92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,18 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -32,6 +44,21 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "anstream" version = "0.6.21" @@ -94,12 +121,281 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" +[[package]] +name = "archery" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70e0a5f99dfebb87bb342d0f53bb92c81842e100bbb915223e38349580e5441d" + +[[package]] +name = "ark-bn254" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d69eab57e8d2663efa5c63135b2af4f396d66424f88954c21104125ab6b3e6bc" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-std", +] + +[[package]] +name = "ark-crypto-primitives" +version = "0.5.0" +source = "git+https://github.com/arkworks-rs/crypto-primitives#39fb17bd6f46366913526c262454b9fce82a4cd7" +dependencies = [ + "ahash", + "ark-crypto-primitives-macros", + "ark-ec", + "ark-ff", + "ark-r1cs-std", + "ark-relations", + "ark-serialize", + "ark-snark", + "ark-std", + "blake2", + "derivative", + "digest", + "fnv", + "hashbrown 0.15.5", + "merlin", + "num-bigint", + "sha2", +] + +[[package]] +name = "ark-crypto-primitives-macros" +version = "0.5.0" +source = "git+https://github.com/arkworks-rs/crypto-primitives#39fb17bd6f46366913526c262454b9fce82a4cd7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-ec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43d68f2d516162846c1238e755a7c4d131b892b70cc70c471a8e3ca3ed818fce" +dependencies = [ + "ahash", + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "educe", + "fnv", + "hashbrown 0.15.5", + "itertools 0.13.0", + "num-bigint", + "num-integer", + "num-traits", + "rayon", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm", + "ark-ff-macros", + "ark-serialize", + "ark-std", + "arrayvec 0.7.6", + "digest", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "rayon", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-goldilocks" +version = "0.0.0" +dependencies = [ + "ark-ff", +] + +[[package]] +name = "ark-poly" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "579305839da207f02b89cd1679e50e67b4331e2f9294a57693e5051b7703fe27" +dependencies = [ + "ahash", + "ark-ff", + "ark-serialize", + "ark-std", + "educe", + "fnv", + "hashbrown 0.15.5", + "rayon", +] + +[[package]] +name = "ark-poly-commit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d68a105d915bcde6c0687363591c97e72d2d3758f3532d48fd0bf21a3261ce7" +dependencies = [ + "ahash", + "ark-crypto-primitives", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-relations", + "ark-serialize", + "ark-std", + "blake2", + "derivative", + "digest", + "fnv", + "merlin", + "num-traits", + "rand 0.8.5", + "rayon", +] + +[[package]] +name = "ark-poseidon2" +version = "0.0.0" +dependencies = [ + "ark-bn254", + "ark-ff", + "ark-goldilocks", + "ark-poly", + "ark-poly-commit", + "ark-r1cs-std", + "ark-relations", + "criterion", + "rand 0.8.5", + "rand_chacha 0.3.1", +] + +[[package]] +name = "ark-r1cs-std" +version = "0.5.0" +source = "git+https://github.com/arkworks-rs/r1cs-std#3b12258dcb485f15bc7ad1312db0e64063bef9cc" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-relations", + "ark-std", + "educe", + "itertools 0.14.0", + "num-bigint", + "num-integer", + "num-traits", + "tracing", +] + +[[package]] +name = "ark-relations" +version = "0.5.1" +source = "git+https://github.com/arkworks-rs/snark/#845ce9d50bbe535792f04b44db78b009ee402ed7" +dependencies = [ + "ark-ff", + "ark-poly", + "ark-serialize", + "ark-std", + "foldhash", + "indexmap 2.11.4", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "git+https://github.com/arkworks-rs/algebra#598a5fbabc1903c7bab6668ef8812bfdf2158723" +dependencies = [ + "ark-serialize-derive", + "ark-std", + "digest", + "num-bigint", + "rayon", + "serde_with", +] + +[[package]] +name = "ark-serialize-derive" +version = "0.5.0" +source = "git+https://github.com/arkworks-rs/algebra#598a5fbabc1903c7bab6668ef8812bfdf2158723" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "ark-snark" +version = "0.5.1" +source = "git+https://github.com/arkworks-rs/snark/#845ce9d50bbe535792f04b44db78b009ee402ed7" +dependencies = [ + "ark-ff", + "ark-relations", + "ark-serialize", + "ark-std", +] + +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", + "rayon", +] + +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + [[package]] name = "async-channel" version = "1.9.0" @@ -237,7 +533,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -282,6 +578,15 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -303,6 +608,35 @@ dependencies = [ "typenum", ] +[[package]] +name = "bitmaps" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d084b0137aaa901caf9f1e8b21daa6aa24d41cd806e111335541eff9683bd6" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "blake3" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" +dependencies = [ + "arrayref", + "arrayvec 0.7.6", + "cc", + "cfg-if", + "constant_time_eq", + "cpufeatures", +] + [[package]] name = "block-buffer" version = "0.10.4" @@ -344,12 +678,24 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" version = "1.2.41" @@ -368,6 +714,18 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "chrono" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +dependencies = [ + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + [[package]] name = "chumsky" version = "0.11.1" @@ -382,6 +740,33 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half 2.7.1", +] + [[package]] name = "clap" version = "4.5.49" @@ -413,7 +798,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -471,6 +856,18 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "constant_time_eq" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + [[package]] name = "cpp_demangle" version = "0.4.5" @@ -636,6 +1033,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools 0.10.5", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -661,6 +1094,12 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + [[package]] name = "crypto-common" version = "0.1.7" @@ -694,6 +1133,27 @@ dependencies = [ "uuid", ] +[[package]] +name = "deranged" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" +dependencies = [ + "powerfmt", + "serde_core", +] + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "digest" version = "0.10.7" @@ -702,6 +1162,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", + "subtle", ] [[package]] @@ -725,6 +1186,24 @@ dependencies = [ "winapi", ] +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + +[[package]] +name = "educe" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7bc049e1bd8cdeb31b68bbd586a9464ecf9f3944af3958a7a9d0f8b9799417" +dependencies = [ + "enum-ordinalize", + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "either" version = "1.15.0" @@ -759,10 +1238,30 @@ dependencies = [ ] [[package]] -name = "equivalent" -version = "1.0.2" +name = "enum-ordinalize" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" +dependencies = [ + "enum-ordinalize-derive", +] + +[[package]] +name = "enum-ordinalize-derive" +version = "4.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -834,6 +1333,12 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + [[package]] name = "foldhash" version = "0.1.5" @@ -897,7 +1402,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -972,9 +1477,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasip2", + "wasm-bindgen", ] [[package]] @@ -984,7 +1491,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" dependencies = [ "fallible-iterator", - "indexmap", + "indexmap 2.11.4", "stable_deref_trait", ] @@ -1019,6 +1526,23 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + [[package]] name = "hashbrown" version = "0.14.5" @@ -1061,12 +1585,42 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "httparse" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "id-arena" version = "2.2.1" @@ -1095,14 +1649,48 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af1955a75fa080c677d3972822ec4bad316169ab1cfc6c257a942c2265dbe5fe" dependencies = [ - "bitmaps", - "rand_core", - "rand_xoshiro", + "bitmaps 2.1.0", + "rand_core 0.6.4", + "rand_xoshiro 0.6.0", "sized-chunks", "typenum", "version_check", ] +[[package]] +name = "imbl" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fade8ae6828627ad1fa094a891eccfb25150b383047190a3648d66d06186501" +dependencies = [ + "archery", + "bitmaps 3.2.1", + "imbl-sized-chunks", + "rand_core 0.9.5", + "rand_xoshiro 0.7.0", + "version_check", +] + +[[package]] +name = "imbl-sized-chunks" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f4241005618a62f8d57b2febd02510fb96e0137304728543dfc5fd6f052c22d" +dependencies = [ + "bitmaps 3.2.1", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + [[package]] name = "indexmap" version = "2.11.4" @@ -1134,6 +1722,17 @@ dependencies = [ "walkdir", ] +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + [[package]] name = "is_ci" version = "1.2.0" @@ -1146,6 +1745,24 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.14.0" @@ -1201,6 +1818,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures", +] + [[package]] name = "kv-log-macro" version = "1.0.7" @@ -1210,6 +1836,12 @@ dependencies = [ "log", ] +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + [[package]] name = "leb128fmt" version = "0.1.0" @@ -1284,6 +1916,15 @@ dependencies = [ "libc", ] +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata 0.4.13", +] + [[package]] name = "memchr" version = "2.7.6" @@ -1299,6 +1940,18 @@ dependencies = [ "rustix", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "miette" version = "7.6.0" @@ -1326,7 +1979,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1349,6 +2002,208 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "neo-ajtai" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "neo-ccs", + "neo-math", + "neo-params", + "p3-field", + "p3-goldilocks", + "p3-matrix", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rayon", + "serde", + "subtle", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "neo-ccs" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "neo-math", + "neo-params", + "once_cell", + "p3-field", + "p3-goldilocks", + "p3-matrix", + "p3-poseidon2", + "p3-symmetric", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rayon", + "serde", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "neo-fold" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "neo-ajtai", + "neo-ccs", + "neo-math", + "neo-memory", + "neo-params", + "neo-reductions", + "neo-transcript", + "neo-vm-trace", + "p3-field", + "p3-goldilocks", + "p3-matrix", + "rand_chacha 0.9.0", + "rayon", + "serde", + "serde_json", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "neo-math" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "p3-field", + "p3-goldilocks", + "p3-matrix", + "rand 0.9.2", + "rand_chacha 0.9.0", + "subtle", + "thiserror 2.0.17", +] + +[[package]] +name = "neo-memory" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "neo-ajtai", + "neo-ccs", + "neo-math", + "neo-params", + "neo-reductions", + "neo-transcript", + "neo-vm-trace", + "p3-field", + "p3-goldilocks", + "p3-matrix", + "serde", + "thiserror 2.0.17", +] + +[[package]] +name = "neo-params" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "serde", + "thiserror 2.0.17", +] + +[[package]] +name = "neo-reductions" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "bincode", + "blake3", + "neo-ajtai", + "neo-ccs", + "neo-math", + "neo-params", + "neo-transcript", + "p3-challenger", + "p3-field", + "p3-goldilocks", + "p3-matrix", + "p3-poseidon2", + "p3-symmetric", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rayon", + "thiserror 2.0.17", +] + +[[package]] +name = "neo-transcript" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "neo-ccs", + "neo-math", + "once_cell", + "p3-field", + "p3-goldilocks", + "p3-symmetric", + "rand 0.9.2", + "rand_chacha 0.9.0", + "subtle", +] + +[[package]] +name = "neo-vm-trace" +version = "0.1.0" +source = "git+https://github.com/LFDT-Nightstream/Nightstream.git?rev=cc2e9850c97d17316a72590de86b71b84b7e7313#cc2e9850c97d17316a72590de86b71b84b7e7313" +dependencies = [ + "serde", + "thiserror 2.0.17", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + [[package]] name = "object" version = "0.37.3" @@ -1357,7 +2212,7 @@ checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "crc32fast", "hashbrown 0.15.5", - "indexmap", + "indexmap 2.11.4", "memchr", ] @@ -1373,12 +2228,174 @@ version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "owo-colors" version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +[[package]] +name = "p3-challenger" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20e42ba74a49c08c6e99f74cd9b343bfa31aa5721fea55079b18e3fd65f1dcbc" +dependencies = [ + "p3-field", + "p3-maybe-rayon", + "p3-monty-31", + "p3-symmetric", + "p3-util", + "tracing", +] + +[[package]] +name = "p3-dft" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e63fa5eb1bd12a240089e72ae3fe10350944d9c166d00a3bfd2a1794db65cf5c" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-util", + "spin 0.10.0", + "tracing", +] + +[[package]] +name = "p3-field" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ebfdb6ef992ae64e9e8f449ac46516ffa584f11afbdf9ee244288c2a633cdf4" +dependencies = [ + "itertools 0.14.0", + "num-bigint", + "p3-maybe-rayon", + "p3-util", + "paste", + "rand 0.9.2", + "serde", + "tracing", +] + +[[package]] +name = "p3-goldilocks" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64716244b5612622d4e78a4f48b74f6d3bb7b4085b7b6b25364b1dfca7198c66" +dependencies = [ + "num-bigint", + "p3-challenger", + "p3-dft", + "p3-field", + "p3-mds", + "p3-poseidon2", + "p3-symmetric", + "p3-util", + "paste", + "rand 0.9.2", + "serde", +] + +[[package]] +name = "p3-matrix" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5542f96504dae8100c91398fb1e3f5ec669eb9c73d9e0b018a93b5fe32bad230" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "p3-maybe-rayon", + "p3-util", + "rand 0.9.2", + "serde", + "tracing", + "transpose", +] + +[[package]] +name = "p3-maybe-rayon" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e5669ca75645f99cd001e9d0289a4eeff2bc2cd9dc3c6c3aaf22643966e83df" + +[[package]] +name = "p3-mds" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "038763af23df9da653065867fd85b38626079031576c86fd537097e5be6a0da0" +dependencies = [ + "p3-dft", + "p3-field", + "p3-symmetric", + "p3-util", + "rand 0.9.2", +] + +[[package]] +name = "p3-monty-31" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a981d60da3d8cbf8561014e2c186068578405fd69098fa75b43d4afb364a47" +dependencies = [ + "itertools 0.14.0", + "num-bigint", + "p3-dft", + "p3-field", + "p3-matrix", + "p3-maybe-rayon", + "p3-mds", + "p3-poseidon2", + "p3-symmetric", + "p3-util", + "paste", + "rand 0.9.2", + "serde", + "spin 0.10.0", + "tracing", + "transpose", +] + +[[package]] +name = "p3-poseidon2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "903b73e4f9a7781a18561c74dc169cf03333497b57a8dd02aaeb130c0f386599" +dependencies = [ + "p3-field", + "p3-mds", + "p3-symmetric", + "p3-util", + "rand 0.9.2", +] + +[[package]] +name = "p3-symmetric" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cd788f04e86dd5c35dd87cad29eefdb6371d2fd5f7664451382eeacae3c3ed0" +dependencies = [ + "itertools 0.14.0", + "p3-field", + "serde", +] + +[[package]] +name = "p3-util" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "663b16021930bc600ecada915c6c3965730a3b9d6a6c23434ccf70bfc29d6881" +dependencies = [ + "serde", +] + [[package]] name = "parking" version = "2.2.1" @@ -1408,6 +2425,12 @@ dependencies = [ "windows-link", ] +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + [[package]] name = "percent-encoding" version = "2.3.2" @@ -1421,7 +2444,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 2.11.4", ] [[package]] @@ -1453,6 +2476,34 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polling" version = "3.11.0" @@ -1479,13 +2530,28 @@ dependencies = [ "serde", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + [[package]] name = "pretty" version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d22152487193190344590e4f30e219cf3fe140d9e7a3fdb683d82aa2c5f4156" dependencies = [ - "arrayvec", + "arrayvec 0.5.2", "typed-arena", "unicode-width 0.2.2", ] @@ -1528,7 +2594,7 @@ checksum = "752233a382efa1026438aa8409c72489ebaa7ed94148bfabdf5282dc864276ef" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1546,11 +2612,64 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.5", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.5", +] + [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.17", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] [[package]] name = "rand_xoshiro" @@ -1558,7 +2677,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_xoshiro" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" +dependencies = [ + "rand_core 0.9.5", ] [[package]] @@ -1591,14 +2719,34 @@ dependencies = [ ] [[package]] -name = "redox_users" -version = "0.4.6" +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.17", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ - "getrandom 0.2.17", - "libredox", - "thiserror 1.0.69", + "proc-macro2", + "quote", + "syn 2.0.106", ] [[package]] @@ -1615,6 +2763,18 @@ dependencies = [ "smallvec", ] +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.13", + "regex-syntax 0.8.8", +] + [[package]] name = "regex-automata" version = "0.3.9" @@ -1705,6 +2865,30 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -1748,7 +2932,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ - "half", + "half 1.8.3", "serde", ] @@ -1769,7 +2953,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1793,7 +2977,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1805,13 +2989,31 @@ dependencies = [ "serde_core", ] +[[package]] +name = "serde_with" +version = "3.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.11.4", + "schemars 0.9.0", + "schemars 1.2.1", + "serde_core", + "serde_json", + "time", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap", + "indexmap 2.11.4", "itoa", "ryu", "serde", @@ -1829,6 +3031,15 @@ dependencies = [ "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.3.0" @@ -1856,7 +3067,7 @@ version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" dependencies = [ - "bitmaps", + "bitmaps 2.1.0", "typenum", ] @@ -1885,6 +3096,21 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -1939,6 +3165,54 @@ dependencies = [ "thiserror 2.0.17", ] +[[package]] +name = "starstream-interleaving-proof" +version = "0.1.0" +dependencies = [ + "ark-bn254", + "ark-ff", + "ark-goldilocks", + "ark-poly", + "ark-poly-commit", + "ark-poseidon2", + "ark-r1cs-std", + "ark-relations", + "neo-ajtai", + "neo-ccs", + "neo-fold", + "neo-math", + "neo-memory", + "neo-params", + "neo-vm-trace", + "p3-field", + "p3-goldilocks", + "p3-poseidon2", + "p3-symmetric", + "rand 0.9.2", + "rand_chacha 0.9.0", + "starstream-interleaving-spec", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "starstream-interleaving-spec" +version = "0.1.0" +dependencies = [ + "ark-ff", + "ark-goldilocks", + "ark-poseidon2", + "hex", + "imbl", + "neo-ajtai", + "neo-ccs", + "neo-fold", + "neo-math", + "neo-memory", + "p3-field", + "thiserror 2.0.17", +] + [[package]] name = "starstream-interpreter" version = "0.0.0" @@ -1974,6 +3248,22 @@ dependencies = [ "web-sys", ] +[[package]] +name = "starstream-runtime" +version = "0.0.0" +dependencies = [ + "ark-ff", + "ark-poseidon2", + "imbl", + "starstream-interleaving-proof", + "starstream-interleaving-spec", + "thiserror 2.0.17", + "wasm-encoder 0.240.0", + "wasmi", + "wasmprinter 0.2.80", + "wat", +] + [[package]] name = "starstream-sandbox-web" version = "0.0.0" @@ -1985,7 +3275,7 @@ dependencies = [ "starstream-compiler", "starstream-to-wasm", "termcolor", - "wasmprinter", + "wasmprinter 0.240.0", "wit-component", ] @@ -1999,7 +3289,7 @@ dependencies = [ "starstream-types", "thiserror 2.0.17", "wasm-encoder 0.240.0", - "wasmprinter", + "wasmprinter 0.240.0", "wasmtime", "wit-component", ] @@ -2020,12 +3310,34 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d08889ec5408683408db66ad89e0e1f93dff55c73a4ccc71c427d5b277ee47e6" +[[package]] +name = "strength_reduce" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe895eb47f22e2ddd4dabc02bce419d2e643c8e3b585c78158b349195bc24d82" + +[[package]] +name = "string-interner" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23de088478b31c349c9ba67816fa55d9355232d63c3afea8bf513e31f0f1d2c0" +dependencies = [ + "hashbrown 0.15.5", + "serde", +] + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "supports-color" version = "3.0.2" @@ -2047,6 +3359,17 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "syn" version = "2.0.106" @@ -2138,7 +3461,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -2149,7 +3472,57 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", ] [[package]] @@ -2177,7 +3550,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -2186,7 +3559,7 @@ version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap", + "indexmap 2.11.4", "serde_core", "serde_spanned", "toml_datetime", @@ -2284,7 +3657,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -2294,6 +3667,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata 0.4.13", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "transpose" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ad61aed86bc3faea4300c7aee358b4c6d0c8d6ccc36524c96e4c92ccf26e77e" +dependencies = [ + "num-integer", + "strength_reduce", ] [[package]] @@ -2366,6 +3779,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + [[package]] name = "value-bag" version = "1.11.1" @@ -2448,7 +3867,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn", + "syn 2.0.106", "wasm-bindgen-shared", ] @@ -2470,7 +3889,7 @@ dependencies = [ "anyhow", "heck 0.4.1", "im-rc", - "indexmap", + "indexmap 2.11.4", "log", "petgraph", "serde", @@ -2509,11 +3928,73 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee093e1e1ccffa005b9b778f7a10ccfd58e25a20eccad294a1a93168d076befb" dependencies = [ "anyhow", - "indexmap", + "indexmap 2.11.4", "wasm-encoder 0.240.0", "wasmparser 0.240.0", ] +[[package]] +name = "wasmi" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22bf475363d09d960b48275c4ea9403051add498a9d80c64dbc91edabab9d1d0" +dependencies = [ + "spin 0.9.8", + "wasmi_collections", + "wasmi_core", + "wasmi_ir", + "wasmparser 0.228.0", + "wat", +] + +[[package]] +name = "wasmi_collections" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85851acbdffd675a9b699b3590406a1d37fc1e1fd073743c7c9cf47c59caacba" +dependencies = [ + "string-interner", +] + +[[package]] +name = "wasmi_core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef64cf60195d1f937dbaed592a5afce3e6d86868fb8070c5255bc41539d68f9d" +dependencies = [ + "libm", +] + +[[package]] +name = "wasmi_ir" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb572ce4400e06b5475819f3d6b9048513efbca785f0b9ef3a41747f944fd8" +dependencies = [ + "wasmi_core", +] + +[[package]] +name = "wasmparser" +version = "0.121.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" +dependencies = [ + "bitflags 2.9.4", + "indexmap 2.11.4", + "semver", +] + +[[package]] +name = "wasmparser" +version = "0.228.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4abf1132c1fdf747d56bbc1bb52152400c70f336870f968b85e89ea422198ae3" +dependencies = [ + "bitflags 2.9.4", + "indexmap 2.11.4", +] + [[package]] name = "wasmparser" version = "0.240.0" @@ -2522,7 +4003,7 @@ checksum = "b722dcf61e0ea47440b53ff83ccb5df8efec57a69d150e4f24882e4eba7e24a4" dependencies = [ "bitflags 2.9.4", "hashbrown 0.15.5", - "indexmap", + "indexmap 2.11.4", "semver", "serde", ] @@ -2534,10 +4015,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ "bitflags 2.9.4", - "indexmap", + "indexmap 2.11.4", "semver", ] +[[package]] +name = "wasmprinter" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60e73986a6b7fdfedb7c5bf9e7eb71135486507c8fbc4c0c42cffcb6532988b7" +dependencies = [ + "anyhow", + "wasmparser 0.121.2", +] + [[package]] name = "wasmprinter" version = "0.240.0" @@ -2567,7 +4058,7 @@ dependencies = [ "fxprof-processed-profile", "gimli", "hashbrown 0.15.5", - "indexmap", + "indexmap 2.11.4", "ittapi", "libc", "log", @@ -2617,7 +4108,7 @@ dependencies = [ "cranelift-bitset", "cranelift-entity", "gimli", - "indexmap", + "indexmap 2.11.4", "log", "object", "postcard", @@ -2629,7 +4120,7 @@ dependencies = [ "target-lexicon", "wasm-encoder 0.240.0", "wasmparser 0.240.0", - "wasmprinter", + "wasmprinter 0.240.0", "wasmtime-internal-component-util", ] @@ -2662,7 +4153,7 @@ dependencies = [ "anyhow", "proc-macro2", "quote", - "syn", + "syn 2.0.106", "wasmtime-internal-component-util", "wasmtime-internal-wit-bindgen", "wit-parser", @@ -2688,7 +4179,7 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "gimli", - "itertools", + "itertools 0.14.0", "log", "object", "pulley-interpreter", @@ -2777,7 +4268,7 @@ checksum = "47f6bf5957ba823cb170996073edf4596b26d5f44c53f9e96b586c64fa04f7e9" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -2807,7 +4298,7 @@ dependencies = [ "anyhow", "bitflags 2.9.4", "heck 0.5.0", - "indexmap", + "indexmap 2.11.4", "wit-parser", ] @@ -2894,12 +4385,65 @@ dependencies = [ "wasmtime-internal-math", ] +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "windows-link" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.59.0" @@ -3076,7 +4620,7 @@ checksum = "7dc5474b078addc5fe8a72736de8da3acfb3ff324c2491133f8b59594afa1a20" dependencies = [ "anyhow", "bitflags 2.9.4", - "indexmap", + "indexmap 2.11.4", "log", "serde", "serde_derive", @@ -3095,7 +4639,7 @@ checksum = "9875ea3fa272f57cc1fc50f225a7b94021a7878c484b33792bccad0d93223439" dependencies = [ "anyhow", "id-arena", - "indexmap", + "indexmap 2.11.4", "log", "semver", "serde", @@ -3105,6 +4649,46 @@ dependencies = [ "wasmparser 0.240.0", ] +[[package]] +name = "zerocopy" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "zstd" version = "0.13.3" diff --git a/Cargo.toml b/Cargo.toml index 01b5381c..064c7037 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,11 @@ members = [ "starstream-sandbox-web", "starstream-to-wasm", "starstream-types", + "interleaving/starstream-interleaving-proof", + "interleaving/starstream-interleaving-spec", + "interleaving/starstream-runtime", + "ark-poseidon2", + "ark-goldilocks" ] exclude = ["old"] @@ -39,5 +44,20 @@ wasm-encoder = "0.240.0" wasmprinter = "0.240.0" wit-component = "0.240.0" +neo-fold = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-math = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-ccs = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-ajtai = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-params = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-vm-trace = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } +neo-memory = { git = "https://github.com/LFDT-Nightstream/Nightstream.git", rev = "cc2e9850c97d17316a72590de86b71b84b7e7313" } + [profile.dev.package] insta.opt-level = 3 + +[patch.crates-io] +ark-relations = { git = "https://github.com/arkworks-rs/snark/" } +ark-snark = { git = "https://github.com/arkworks-rs/snark/" } +ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std" } +ark-serialize = { git = "https://github.com/arkworks-rs/algebra"} +ark-crypto-primitives = { git = "https://github.com/arkworks-rs/crypto-primitives"} diff --git a/ark-goldilocks/Cargo.toml b/ark-goldilocks/Cargo.toml new file mode 100644 index 00000000..04380e36 --- /dev/null +++ b/ark-goldilocks/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "ark-goldilocks" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +ark-ff = { version = "0.5.0", default-features = false } diff --git a/ark-goldilocks/src/lib.rs b/ark-goldilocks/src/lib.rs new file mode 100644 index 00000000..344af0a1 --- /dev/null +++ b/ark-goldilocks/src/lib.rs @@ -0,0 +1,7 @@ +use ark_ff::{Fp64, MontBackend, MontConfig}; + +#[derive(MontConfig)] +#[modulus = "18446744069414584321"] // 2^64 - 2^32 + 1 +#[generator = "7"] // a small primitive root (7 works here) +pub struct FpGoldilocksConfig; +pub type FpGoldilocks = Fp64>; diff --git a/ark-poseidon2/Cargo.toml b/ark-poseidon2/Cargo.toml new file mode 100644 index 00000000..3569243a --- /dev/null +++ b/ark-poseidon2/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "ark-poseidon2" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +ark-ff = { version = "0.5.0", default-features = false } +ark-relations = { version = "0.5.0", features = ["std"] } +ark-r1cs-std = { version = "0.5.0", default-features = false } +ark-bn254 = { version = "0.5.0", features = ["scalar_field"] } +ark-poly = "0.5.0" +ark-poly-commit = "0.5.0" +rand = "0.8.5" +rand_chacha = "0.3.1" + +ark-goldilocks = { path = "../ark-goldilocks" } + +[dev-dependencies] +criterion = "0.5" + +[[bench]] +name = "poseidon2_gadget" +harness = false diff --git a/ark-poseidon2/benches/poseidon2_gadget.rs b/ark-poseidon2/benches/poseidon2_gadget.rs new file mode 100644 index 00000000..a47ce884 --- /dev/null +++ b/ark-poseidon2/benches/poseidon2_gadget.rs @@ -0,0 +1,61 @@ +use ark_poseidon2::{ + F, RoundConstants, + constants::GOLDILOCKS_S_BOX_DEGREE, + gadget::Poseidon2Gadget, + linear_layers::{GoldilocksExternalLinearLayer, GoldilocksInternalLinearLayer8}, +}; +use ark_r1cs_std::{GR1CSVar as _, alloc::AllocVar as _, fields::fp::FpVar}; +use ark_relations::gr1cs::ConstraintSystem; +use criterion::{Criterion, black_box, criterion_group, criterion_main}; + +const WIDTH: usize = 8; +const HALF_FULL_ROUNDS: usize = 4; +const PARTIAL_ROUNDS: usize = 22; + +fn bench_poseidon2_gadget_inc(c: &mut Criterion) { + c.bench_function("poseidon2_gadget_inc", |b| { + b.iter(|| { + let cs = ConstraintSystem::::new_ref(); + + let constants = RoundConstants::new_goldilocks_8_constants(); + + let input_values = [ + F::from(1), + F::from(2), + F::from(3), + F::from(4), + F::from(5), + F::from(6), + F::from(7), + F::from(8), + ]; + + let input_vars = input_values + .iter() + .map(|&val| FpVar::new_witness(cs.clone(), || Ok(val))) + .collect::, _>>() + .unwrap(); + let input_array: [FpVar; WIDTH] = input_vars.try_into().unwrap(); + + let gadget = Poseidon2Gadget::< + F, + GoldilocksExternalLinearLayer<8>, + GoldilocksInternalLinearLayer8, + WIDTH, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >::new(constants); + let result = gadget.permute(&input_array).unwrap(); + + let output_values: Vec = result + .iter() + .map(|var: &FpVar| var.value().unwrap()) + .collect(); + black_box(output_values); + }); + }); +} + +criterion_group!(benches, bench_poseidon2_gadget_inc); +criterion_main!(benches); diff --git a/ark-poseidon2/src/constants.rs b/ark-poseidon2/src/constants.rs new file mode 100644 index 00000000..2bcdbe6b --- /dev/null +++ b/ark-poseidon2/src/constants.rs @@ -0,0 +1,188 @@ +use crate::F; +use ark_ff::{PrimeField, UniformRand}; +use rand::SeedableRng; +use rand_chacha::ChaCha20Rng; + +/// Degree of the chosen permutation polynomial for Goldilocks, used as the Poseidon2 S-Box. +/// +/// As p - 1 = 2^32 * 3 * 5 * 17 * ... the smallest choice for a degree D satisfying gcd(p - 1, D) = 1 is 7. +pub const GOLDILOCKS_S_BOX_DEGREE: u64 = 7; +pub const HALF_FULL_ROUNDS: usize = 4; +pub const PARTIAL_ROUNDS: usize = 22; + +pub const HL_GOLDILOCKS_8_EXTERNAL_ROUND_CONSTANTS: [[[u64; 8]; 4]; 2] = [ + [ + [ + 0xdd5743e7f2a5a5d9, + 0xcb3a864e58ada44b, + 0xffa2449ed32f8cdc, + 0x42025f65d6bd13ee, + 0x7889175e25506323, + 0x34b98bb03d24b737, + 0xbdcc535ecc4faa2a, + 0x5b20ad869fc0d033, + ], + [ + 0xf1dda5b9259dfcb4, + 0x27515210be112d59, + 0x4227d1718c766c3f, + 0x26d333161a5bd794, + 0x49b938957bf4b026, + 0x4a56b5938b213669, + 0x1120426b48c8353d, + 0x6b323c3f10a56cad, + ], + [ + 0xce57d6245ddca6b2, + 0xb1fc8d402bba1eb1, + 0xb5c5096ca959bd04, + 0x6db55cd306d31f7f, + 0xc49d293a81cb9641, + 0x1ce55a4fe979719f, + 0xa92e60a9d178a4d1, + 0x002cc64973bcfd8c, + ], + [ + 0xcea721cce82fb11b, + 0xe5b55eb8098ece81, + 0x4e30525c6f1ddd66, + 0x43c6702827070987, + 0xaca68430a7b5762a, + 0x3674238634df9c93, + 0x88cee1c825e33433, + 0xde99ae8d74b57176, + ], + ], + [ + [ + 0x014ef1197d341346, + 0x9725e20825d07394, + 0xfdb25aef2c5bae3b, + 0xbe5402dc598c971e, + 0x93a5711f04cdca3d, + 0xc45a9a5b2f8fb97b, + 0xfe8946a924933545, + 0x2af997a27369091c, + ], + [ + 0xaa62c88e0b294011, + 0x058eb9d810ce9f74, + 0xb3cb23eced349ae4, + 0xa3648177a77b4a84, + 0x43153d905992d95d, + 0xf4e2a97cda44aa4b, + 0x5baa2702b908682f, + 0x082923bdf4f750d1, + ], + [ + 0x98ae09a325893803, + 0xf8a6475077968838, + 0xceb0735bf00b2c5f, + 0x0a1a5d953888e072, + 0x2fcb190489f94475, + 0xb5be06270dec69fc, + 0x739cb934b09acf8b, + 0x537750b75ec7f25b, + ], + [ + 0xe9dd318bae1f3961, + 0xf7462137299efe1a, + 0xb1f6b8eee9adb940, + 0xbdebcc8a809dfe6b, + 0x40fc1f791b178113, + 0x3ac1c3362d014864, + 0x9a016184bdb8aeba, + 0x95f2394459fbc25e, + ], + ], +]; + +pub const HL_GOLDILOCKS_8_INTERNAL_ROUND_CONSTANTS: [u64; 22] = [ + 0x488897d85ff51f56, + 0x1140737ccb162218, + 0xa7eeb9215866ed35, + 0x9bd2976fee49fcc9, + 0xc0c8f0de580a3fcc, + 0x4fb2dae6ee8fc793, + 0x343a89f35f37395b, + 0x223b525a77ca72c8, + 0x56ccb62574aaa918, + 0xc4d507d8027af9ed, + 0xa080673cf0b7e95c, + 0xf0184884eb70dcf8, + 0x044f10b0cb3d5c69, + 0xe9e3f7993938f186, + 0x1b761c80e772f459, + 0x606cec607a1b5fac, + 0x14a0c2e1d45f03cd, + 0x4eace8855398574f, + 0xf905ca7103eff3e6, + 0xf8c8f8d20862c059, + 0xb524fe8bdd678e5a, + 0xfbb7865901a1ec41, +]; + +/// Round constants for Poseidon2, in a format that's convenient for R1CS. +#[derive(Debug, Clone)] +pub struct RoundConstants< + F: PrimeField, + const WIDTH: usize, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +> { + pub beginning_full_round_constants: [[F; WIDTH]; HALF_FULL_ROUNDS], + pub partial_round_constants: [F; PARTIAL_ROUNDS], + pub ending_full_round_constants: [[F; WIDTH]; HALF_FULL_ROUNDS], +} + +impl RoundConstants { + // TODO: cache/lazyfy this + pub fn new_goldilocks_8_constants() -> Self { + let [beginning_full_round_constants, ending_full_round_constants] = + HL_GOLDILOCKS_8_EXTERNAL_ROUND_CONSTANTS; + + Self { + beginning_full_round_constants: constants_to_ark_arrays(beginning_full_round_constants), + partial_round_constants: HL_GOLDILOCKS_8_INTERNAL_ROUND_CONSTANTS + .into_iter() + .map(F::from) + .collect::>() + .try_into() + .unwrap(), + ending_full_round_constants: constants_to_ark_arrays(ending_full_round_constants), + } + } +} + +impl RoundConstants { + pub fn new_goldilocks_12_constants() -> Self { + // TODO: hardcoded seed + let mut rng = ChaCha20Rng::seed_from_u64(77); + + Self { + beginning_full_round_constants: std::array::from_fn(|_| { + std::array::from_fn(|_| F::rand(&mut rng)) + }), + partial_round_constants: std::array::from_fn(|_| F::rand(&mut rng)), + ending_full_round_constants: std::array::from_fn(|_| { + std::array::from_fn(|_| F::rand(&mut rng)) + }), + } + } +} + +fn constants_to_ark_arrays(beginning_full_round_constants: [[u64; 8]; 4]) -> [[F; 8]; 4] { + beginning_full_round_constants + .into_iter() + .map(|inner| { + inner + .into_iter() + .map(F::from) + .collect::>() + .try_into() + .unwrap() + }) + .collect::>() + .try_into() + .unwrap() +} diff --git a/ark-poseidon2/src/gadget.rs b/ark-poseidon2/src/gadget.rs new file mode 100644 index 00000000..1da1044a --- /dev/null +++ b/ark-poseidon2/src/gadget.rs @@ -0,0 +1,221 @@ +use super::constants::RoundConstants; +use super::linear_layers::{ExternalLinearLayer, InternalLinearLayer}; +use crate::constants::{GOLDILOCKS_S_BOX_DEGREE, HALF_FULL_ROUNDS, PARTIAL_ROUNDS}; +use ark_ff::PrimeField; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::*; +use ark_relations::gr1cs::SynthesisError; + +/// R1CS gadget for Poseidon2 hash function +pub struct Poseidon2Gadget< + F: PrimeField, + ExtLinear: ExternalLinearLayer, + IntLinear: InternalLinearLayer, + const WIDTH: usize, + const SBOX_DEGREE: u64, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +> { + constants: RoundConstants, + _phantom: core::marker::PhantomData<(ExtLinear, IntLinear)>, +} + +impl< + F: PrimeField, + ExtLinear: ExternalLinearLayer, + IntLinear: InternalLinearLayer, + const WIDTH: usize, + const SBOX_DEGREE: u64, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +> Poseidon2Gadget +{ + pub fn new(constants: RoundConstants) -> Self { + Self { + // constants: constants.allocate(cs.clone())?, + constants, + _phantom: core::marker::PhantomData, + } + } + + /// Compute Poseidon2 permutation in R1CS + pub fn permute(&self, inputs: &[FpVar; WIDTH]) -> Result<[FpVar; WIDTH], SynthesisError> { + let mut state = inputs.clone(); + + ExtLinear::apply(&mut state)?; + + // Beginning full rounds + for round in 0..HALF_FULL_ROUNDS { + self.eval_full_round( + &mut state, + &self.constants.beginning_full_round_constants[round], + )?; + } + + for round in 0..PARTIAL_ROUNDS { + self.eval_partial_round(&mut state, &self.constants.partial_round_constants[round])?; + } + + for round in 0..HALF_FULL_ROUNDS { + self.eval_full_round( + &mut state, + &self.constants.ending_full_round_constants[round], + )?; + } + + Ok(state) + } + + fn eval_full_round( + &self, + state: &mut [FpVar; WIDTH], + round_constants: &[F; WIDTH], + ) -> Result<(), SynthesisError> { + // Add round constants and apply S-box to each element + for (s, r) in state.iter_mut().zip(round_constants.iter()) { + *s = s.clone() + FpVar::constant(*r); + *s = self.eval_sbox(s.clone())?; + } + + // Apply external linear layer + ExtLinear::apply(state)?; + + Ok(()) + } + + fn eval_partial_round( + &self, + state: &mut [FpVar; WIDTH], + round_constant: &F, + ) -> Result<(), SynthesisError> { + // Add round constant and apply S-box to first element only + state[0] = state[0].clone() + FpVar::constant(*round_constant); + state[0] = self.eval_sbox(state[0].clone())?; + + // Apply internal linear layer + IntLinear::apply(state)?; + + Ok(()) + } + + /// Evaluates the S-box over a field variable + fn eval_sbox(&self, x: FpVar) -> Result, SynthesisError> { + match SBOX_DEGREE { + 3 => { + // x^3 + let x2 = x.square()?; + Ok(x2 * &x) + } + 5 => { + // x^5 + let x2 = x.square()?; + let x4 = x2.square()?; + Ok(x4 * &x) + } + 7 => { + // x^7 + let x2 = x.square()?; + let x3 = &x2 * &x; + let x6 = x3.square()?; + Ok(x6 * &x) + } + _ => Err(SynthesisError::Unsatisfiable), + } + } +} + +#[cfg(test)] +pub fn poseidon2_hash< + F: PrimeField, + ExtLinear: ExternalLinearLayer, + IntLinear: InternalLinearLayer, + const WIDTH: usize, + const SBOX_DEGREE: u64, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +>( + inputs: &[FpVar; WIDTH], + constants: &RoundConstants, +) -> Result<[FpVar; WIDTH], SynthesisError> { + let gadget = Poseidon2Gadget::< + F, + ExtLinear, + IntLinear, + WIDTH, + SBOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >::new(constants.clone()); + gadget.permute(inputs) +} + +pub fn poseidon2_compress< + const WIDTH: usize, + const TARGET: usize, + F: PrimeField, + ExtLinear: ExternalLinearLayer, + IntLinear: InternalLinearLayer, +>( + inputs: &[FpVar; WIDTH], + constants: &RoundConstants, +) -> Result<[FpVar; TARGET], SynthesisError> { + let gadget = Poseidon2Gadget::< + F, + ExtLinear, + IntLinear, + WIDTH, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >::new(constants.clone()); + let p_x = gadget.permute(inputs)?; + + // truncation + let mut p_x: [FpVar; TARGET] = std::array::from_fn(|i| p_x[i].clone()); + + for (p_x, x) in p_x.iter_mut().zip(inputs) { + // feed-forward operation + *p_x += x; + } + + Ok(p_x) +} + +pub fn poseidon2_sponge_absorb< + F: PrimeField, + ExtLinear: ExternalLinearLayer, + IntLinear: InternalLinearLayer, + const WIDTH: usize, + const RATE: usize, + const SBOX_DEGREE: u64, + const HALF_FULL_ROUNDS: usize, + const PARTIAL_ROUNDS: usize, +>( + inputs: &[FpVar], + constants: &RoundConstants, +) -> Result<[FpVar; WIDTH], SynthesisError> { + if RATE >= WIDTH { + return Err(SynthesisError::Unsatisfiable); + } + + let gadget = Poseidon2Gadget::< + F, + ExtLinear, + IntLinear, + WIDTH, + SBOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >::new(constants.clone()); + + let mut state = std::array::from_fn(|_| FpVar::zero()); + + for chunk in inputs.chunks(RATE) { + for (i, value) in chunk.iter().enumerate() { + state[i] += value.clone(); + } + state = gadget.permute(&state)?; + } + + Ok(state) +} diff --git a/ark-poseidon2/src/goldilocks.rs b/ark-poseidon2/src/goldilocks.rs new file mode 100644 index 00000000..c41b5c20 --- /dev/null +++ b/ark-poseidon2/src/goldilocks.rs @@ -0,0 +1,100 @@ +use ark_goldilocks::FpGoldilocks; +use std::sync::OnceLock; + +pub static MATRIX_DIAG_8_GOLDILOCKS: OnceLock<[FpGoldilocks; 8]> = OnceLock::new(); + +pub(crate) fn matrix_diag_8_goldilocks() -> &'static [FpGoldilocks; 8] { + MATRIX_DIAG_8_GOLDILOCKS.get_or_init(|| { + [ + FpGoldilocks::from(0xa98811a1fed4e3a5_u64), + FpGoldilocks::from(0x1cc48b54f377e2a0_u64), + FpGoldilocks::from(0xe40cd4f6c5609a26_u64), + FpGoldilocks::from(0x11de79ebca97a4a3_u64), + FpGoldilocks::from(0x9177c73d8b7e929c_u64), + FpGoldilocks::from(0x2a6fe8085797e791_u64), + FpGoldilocks::from(0x3de6e93329f8d5ad_u64), + FpGoldilocks::from(0x3f7af9125da962fe_u64), + ] + }) +} + +#[allow(unused)] +pub static MATRIX_DIAG_12_GOLDILOCKS: OnceLock<[FpGoldilocks; 12]> = OnceLock::new(); + +#[allow(unused)] +pub(crate) fn matrix_diag_12_goldilocks() -> &'static [FpGoldilocks; 12] { + MATRIX_DIAG_12_GOLDILOCKS.get_or_init(|| { + [ + FpGoldilocks::from(0xc3b6c08e23ba9300_u64), + FpGoldilocks::from(0xd84b5de94a324fb6_u64), + FpGoldilocks::from(0x0d0c371c5b35b84f_u64), + FpGoldilocks::from(0x7964f570e7188037_u64), + FpGoldilocks::from(0x5daf18bbd996604b_u64), + FpGoldilocks::from(0x6743bc47b9595257_u64), + FpGoldilocks::from(0x5528b9362c59bb70_u64), + FpGoldilocks::from(0xac45e25b7127b68b_u64), + FpGoldilocks::from(0xa2077d7dfbb606b5_u64), + FpGoldilocks::from(0xf3faac6faee378ae_u64), + FpGoldilocks::from(0x0c6388b51545e883_u64), + FpGoldilocks::from(0xd27dbb6944917b60_u64), + ] + }) +} + +#[allow(unused)] +pub static MATRIX_DIAG_16_GOLDILOCKS: OnceLock<[FpGoldilocks; 16]> = OnceLock::new(); + +#[allow(unused)] +pub(crate) fn matrix_diag_16_goldilocks() -> &'static [FpGoldilocks; 16] { + MATRIX_DIAG_16_GOLDILOCKS.get_or_init(|| { + [ + FpGoldilocks::from(0xde9b91a467d6afc0_u64), + FpGoldilocks::from(0xc5f16b9c76a9be17_u64), + FpGoldilocks::from(0x0ab0fef2d540ac55_u64), + FpGoldilocks::from(0x3001d27009d05773_u64), + FpGoldilocks::from(0xed23b1f906d3d9eb_u64), + FpGoldilocks::from(0x5ce73743cba97054_u64), + FpGoldilocks::from(0x1c3bab944af4ba24_u64), + FpGoldilocks::from(0x2faa105854dbafae_u64), + FpGoldilocks::from(0x53ffb3ae6d421a10_u64), + FpGoldilocks::from(0xbcda9df8884ba396_u64), + FpGoldilocks::from(0xfc1273e4a31807bb_u64), + FpGoldilocks::from(0xc77952573d5142c0_u64), + FpGoldilocks::from(0x56683339a819b85e_u64), + FpGoldilocks::from(0x328fcbd8f0ddc8eb_u64), + FpGoldilocks::from(0xb5101e303fce9cb7_u64), + FpGoldilocks::from(0x774487b8c40089bb_u64), + ] + }) +} + +#[allow(unused)] +pub static MATRIX_DIAG_20_GOLDILOCKS: OnceLock<[FpGoldilocks; 20]> = OnceLock::new(); + +#[allow(unused)] +pub(crate) fn matrix_diag_20_goldilocks() -> &'static [FpGoldilocks; 20] { + MATRIX_DIAG_20_GOLDILOCKS.get_or_init(|| { + [ + FpGoldilocks::from(0x95c381fda3b1fa57_u64), + FpGoldilocks::from(0xf36fe9eb1288f42c_u64), + FpGoldilocks::from(0x89f5dcdfef277944_u64), + FpGoldilocks::from(0x106f22eadeb3e2d2_u64), + FpGoldilocks::from(0x684e31a2530e5111_u64), + FpGoldilocks::from(0x27435c5d89fd148e_u64), + FpGoldilocks::from(0x3ebed31c414dbf17_u64), + FpGoldilocks::from(0xfd45b0b2d294e3cc_u64), + FpGoldilocks::from(0x48c904473a7f6dbf_u64), + FpGoldilocks::from(0xe0d1b67809295b4d_u64), + FpGoldilocks::from(0xddd1941e9d199dcb_u64), + FpGoldilocks::from(0x8cfe534eeb742219_u64), + FpGoldilocks::from(0xa6e5261d9e3b8524_u64), + FpGoldilocks::from(0x6897ee5ed0f82c1b_u64), + FpGoldilocks::from(0x0e7dcd0739ee5f78_u64), + FpGoldilocks::from(0x493253f3d0d32363_u64), + FpGoldilocks::from(0xbb2737f5845f05c0_u64), + FpGoldilocks::from(0xa187e810b06ad903_u64), + FpGoldilocks::from(0xb635b995936c4918_u64), + FpGoldilocks::from(0x0b3694a940bd2394_u64), + ] + }) +} diff --git a/ark-poseidon2/src/lib.rs b/ark-poseidon2/src/lib.rs new file mode 100644 index 00000000..d76ff443 --- /dev/null +++ b/ark-poseidon2/src/lib.rs @@ -0,0 +1,310 @@ +//! Poseidon2 hash function implementation for R1CS (SNARK) systems using Arkworks. + +pub mod constants; +pub mod gadget; +pub mod goldilocks; +pub mod linear_layers; +pub mod math; + +pub type F = ark_goldilocks::FpGoldilocks; + +use crate::{ + gadget::{poseidon2_compress, poseidon2_sponge_absorb}, + linear_layers::{ + GoldilocksExternalLinearLayer, GoldilocksInternalLinearLayer8, + GoldilocksInternalLinearLayer12, + }, +}; +use ark_r1cs_std::{GR1CSVar as _, alloc::AllocVar as _, fields::fp::FpVar}; +use ark_relations::gr1cs::{ConstraintSystem, SynthesisError}; +pub use constants::RoundConstants; +use constants::{GOLDILOCKS_S_BOX_DEGREE, HALF_FULL_ROUNDS, PARTIAL_ROUNDS}; + +pub fn compress_8(inputs: &[FpVar; 8]) -> Result<[FpVar; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_8_constants(); + + poseidon2_compress::<8, 4, F, GoldilocksExternalLinearLayer<8>, GoldilocksInternalLinearLayer8>( + inputs, &constants, + ) +} + +pub fn compress_12(inputs: &[FpVar; 12]) -> Result<[FpVar; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_12_constants(); + + poseidon2_compress::<12, 4, F, GoldilocksExternalLinearLayer<12>, GoldilocksInternalLinearLayer12>( + inputs, &constants, + ) +} + +pub fn sponge_8(inputs: &[FpVar]) -> Result<[FpVar; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_8_constants(); + + let state = poseidon2_sponge_absorb::< + F, + GoldilocksExternalLinearLayer<8>, + GoldilocksInternalLinearLayer8, + 8, + 4, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >(inputs, &constants)?; + + Ok(std::array::from_fn(|i| state[i].clone())) +} + +pub fn sponge_12(inputs: &[FpVar]) -> Result<[FpVar; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_12_constants(); + + let state = poseidon2_sponge_absorb::< + F, + GoldilocksExternalLinearLayer<12>, + GoldilocksInternalLinearLayer12, + 12, + 8, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >(inputs, &constants)?; + + Ok(std::array::from_fn(|i| state[i].clone())) +} + +fn compress_trace_generic< + const WIDTH: usize, + ExtLinear: crate::linear_layers::ExternalLinearLayer, + IntLinear: crate::linear_layers::InternalLinearLayer, +>( + inputs: &[F; WIDTH], + constants: &RoundConstants, +) -> Result<[F; 4], SynthesisError> { + // TODO: obviously this is not a good way of implementing this, but the + // implementation is currently not general enough to be used over both FpVar and + // just plain field elements + // + // for now, we just create a throw-away constraint system and get the values + // from that computation + let cs = ConstraintSystem::::new_ref(); + + let inputs = inputs + .iter() + .map(|input| FpVar::new_witness(cs.clone(), || Ok(input))) + .collect::, _>>()?; + + let compressed = poseidon2_compress::( + inputs[..].try_into().unwrap(), + constants, + )?; + + Ok(std::array::from_fn(|i| compressed[i].value().unwrap())) +} + +pub fn compress_8_trace(inputs: &[F; 8]) -> Result<[F; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_8_constants(); + compress_trace_generic::<8, GoldilocksExternalLinearLayer<8>, GoldilocksInternalLinearLayer8>( + inputs, &constants, + ) +} + +pub fn compress_12_trace(inputs: &[F; 12]) -> Result<[F; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_12_constants(); + compress_trace_generic::<12, GoldilocksExternalLinearLayer<12>, GoldilocksInternalLinearLayer12>( + inputs, &constants, + ) +} + +pub fn sponge_8_trace(inputs: &[F; 8]) -> Result<[F; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_8_constants(); + sponge_trace_generic::<8, 4, GoldilocksExternalLinearLayer<8>, GoldilocksInternalLinearLayer8>( + inputs, &constants, + ) +} + +pub fn sponge_12_trace(inputs: &[F]) -> Result<[F; 4], SynthesisError> { + let constants = RoundConstants::new_goldilocks_12_constants(); + sponge_trace_generic::<12, 8, GoldilocksExternalLinearLayer<12>, GoldilocksInternalLinearLayer12>( + inputs, &constants, + ) +} + +fn sponge_trace_generic< + const WIDTH: usize, + const RATE: usize, + ExtLinear: crate::linear_layers::ExternalLinearLayer, + IntLinear: crate::linear_layers::InternalLinearLayer, +>( + inputs: &[F], + constants: &RoundConstants, +) -> Result<[F; 4], SynthesisError> { + // TODO: obviously this is not a good way of implementing this, but the + // implementation is currently not general enough to be used over both FpVar and + // just plain field elements + // + // for now, we just create a throw-away constraint system and get the values + // from that computation + let cs = ConstraintSystem::::new_ref(); + + let inputs = inputs + .iter() + .map(|input| FpVar::new_witness(cs.clone(), || Ok(input))) + .collect::, _>>()?; + + let state = poseidon2_sponge_absorb::< + F, + ExtLinear, + IntLinear, + WIDTH, + RATE, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >(&inputs, constants)?; + + Ok(std::array::from_fn(|i| state[i].value().unwrap())) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + F, + constants::GOLDILOCKS_S_BOX_DEGREE, + gadget::poseidon2_hash, + linear_layers::{GoldilocksExternalLinearLayer, GoldilocksInternalLinearLayer8}, + }; + use ark_r1cs_std::{GR1CSVar, alloc::AllocVar, fields::fp::FpVar}; + use ark_relations::gr1cs::{ConstraintSystem, SynthesisError}; + + const WIDTH: usize = 8; + const HALF_FULL_ROUNDS: usize = 4; + const PARTIAL_ROUNDS: usize = 22; + + #[test] + fn test_poseidon2_gadget_basic() -> Result<(), SynthesisError> { + let cs = ConstraintSystem::::new_ref(); + + let constants = RoundConstants::new_goldilocks_8_constants(); + + let input_values = [ + F::from(0), + F::from(0), + F::from(0), + F::from(0), + F::from(0), + F::from(0), + F::from(0), + F::from(0), + ]; + + let input_vars = input_values + .iter() + .map(|&val| FpVar::new_witness(cs.clone(), || Ok(val))) + .collect::, _>>()?; + let input_array: [FpVar; WIDTH] = input_vars.try_into().unwrap(); + + let result = poseidon2_hash::< + F, + GoldilocksExternalLinearLayer<8>, + GoldilocksInternalLinearLayer8, + WIDTH, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >(&input_array, &constants)?; + + assert!(cs.is_satisfied()?); + + let output_values: Vec = result + .iter() + .map(|var: &FpVar| var.value().unwrap()) + .collect(); + + // output taken from the plonky3 implementation + let expected: [F; 8] = [ + F::from(12033154258266855215_u64), + F::from(10280848056061907209_u64), + F::from(2185915012395546036_u64), + F::from(14655708400709920811_u64), + F::from(8156942431357196992_u64), + F::from(4422236401544933648_u64), + F::from(12369536641900949_u64), + F::from(7054567940610806767_u64), + ]; + + // At least one output should be non-zero (very likely with our placeholder linear layers) + assert!(output_values.iter().any(|&val| val != F::from(0u64))); + + println!("Input: {:?}", input_values); + println!("Output: {:?}", output_values); + println!("Constraint system satisfied: {}", cs.is_satisfied()?); + println!("Number of constraints: {}", cs.num_constraints()); + + assert_eq!(output_values, expected); + + Ok(()) + } + + #[test] + fn test_poseidon2_gadget_inc() -> Result<(), SynthesisError> { + let cs = ConstraintSystem::::new_ref(); + + let constants = RoundConstants::new_goldilocks_8_constants(); + + // Create test inputs + let input_values = [ + F::from(1), + F::from(2), + F::from(3), + F::from(4), + F::from(5), + F::from(6), + F::from(7), + F::from(8), + ]; + + let input_vars = input_values + .iter() + .map(|&val| FpVar::new_witness(cs.clone(), || Ok(val))) + .collect::, _>>()?; + let input_array: [FpVar; WIDTH] = input_vars.try_into().unwrap(); + + let result = poseidon2_hash::< + F, + GoldilocksExternalLinearLayer<8>, + GoldilocksInternalLinearLayer8, + WIDTH, + GOLDILOCKS_S_BOX_DEGREE, + HALF_FULL_ROUNDS, + PARTIAL_ROUNDS, + >(&input_array, &constants)?; + + // Check that the constraint system is satisfied + assert!(cs.is_satisfied()?); + + let output_values: Vec = result + .iter() + .map(|var: &FpVar| var.value().unwrap()) + .collect(); + + // output taken from the plonky3 implementation + let expected: [F; 8] = [ + F::from(18388235340048743902_u64), + F::from(11155847389840004280_u64), + F::from(8258921485236881363_u64), + F::from(13238911595928314283_u64), + F::from(1414783942044928333_u64), + F::from(14855162370750728991_u64), + F::from(872655314674193689_u64), + F::from(10410794385812429044_u64), + ]; + + println!("Input: {:?}", input_values); + println!("Output: {:?}", output_values); + println!("Constraint system satisfied: {}", cs.is_satisfied()?); + println!("Number of constraints: {}", cs.num_constraints()); + + assert_eq!(output_values, expected); + + Ok(()) + } +} diff --git a/ark-poseidon2/src/linear_layers.rs b/ark-poseidon2/src/linear_layers.rs new file mode 100644 index 00000000..f9ed9fe5 --- /dev/null +++ b/ark-poseidon2/src/linear_layers.rs @@ -0,0 +1,58 @@ +//! Linear layer implementations for Poseidon2 R1CS gadget + +use crate::{ + F, + goldilocks::{matrix_diag_8_goldilocks, matrix_diag_12_goldilocks}, + math::mds_light_permutation, +}; +use ark_ff::PrimeField; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::gr1cs::SynthesisError; + +pub trait ExternalLinearLayer { + fn apply(state: &mut [FpVar; WIDTH]) -> Result<(), SynthesisError>; +} + +pub trait InternalLinearLayer { + fn apply(state: &mut [FpVar; WIDTH]) -> Result<(), SynthesisError>; +} + +pub enum GoldilocksExternalLinearLayer {} + +impl ExternalLinearLayer for GoldilocksExternalLinearLayer { + fn apply(state: &mut [FpVar; WIDTH]) -> Result<(), SynthesisError> { + mds_light_permutation(state)?; + + Ok(()) + } +} + +pub enum GoldilocksInternalLinearLayer8 {} +pub enum GoldilocksInternalLinearLayer12 {} + +pub fn matmul_internal( + state: &mut [FpVar; WIDTH], + mat_internal_diag_m_1: &'static [F; WIDTH], +) { + let sum: FpVar = state.iter().sum(); + for i in 0..WIDTH { + state[i] *= FpVar::Constant(mat_internal_diag_m_1[i]); + state[i] += sum.clone(); + } +} + +impl InternalLinearLayer for GoldilocksInternalLinearLayer8 { + fn apply(state: &mut [FpVar; 8]) -> Result<(), SynthesisError> { + matmul_internal(state, matrix_diag_8_goldilocks()); + + Ok(()) + } +} + +impl InternalLinearLayer for GoldilocksInternalLinearLayer12 { + fn apply(state: &mut [FpVar; 12]) -> Result<(), SynthesisError> { + matmul_internal(state, matrix_diag_12_goldilocks()); + + Ok(()) + } +} diff --git a/ark-poseidon2/src/math.rs b/ark-poseidon2/src/math.rs new file mode 100644 index 00000000..9ea234aa --- /dev/null +++ b/ark-poseidon2/src/math.rs @@ -0,0 +1,135 @@ +use ark_ff::PrimeField; +use ark_r1cs_std::fields::{ + FieldVar as _, + fp::{AllocatedFp, FpVar}, +}; +use ark_relations::gr1cs::{ConstraintSystemRef, LinearCombination, SynthesisError}; + +#[inline(always)] +fn linear_combination_4( + coeffs: [F; 4], + vars: [&FpVar; 4], +) -> Result, SynthesisError> { + let mut sum_constants = F::zero(); + let mut has_value = true; + let mut value = F::zero(); + let mut cs = ConstraintSystemRef::None; + let mut lc_terms = Vec::with_capacity(4); + + for (coeff, var) in coeffs.iter().zip(vars.iter()) { + match var { + FpVar::Constant(c) => { + sum_constants += *coeff * *c; + } + FpVar::Var(v) => { + cs = cs.or(v.cs.clone()); + lc_terms.push((*coeff, v.variable)); + if let Ok(v) = v.value() { + value += *coeff * v; + } else { + has_value = false; + } + } + } + } + + if lc_terms.is_empty() { + return Ok(FpVar::Constant(sum_constants)); + } + + let variable = cs + .new_lc(|| { + let mut lc = LinearCombination(lc_terms); + lc.compactify(); + lc + }) + .unwrap(); + let value = if has_value { Some(value) } else { None }; + + Ok(FpVar::Var(AllocatedFp::new(value, variable, cs)) + FpVar::Constant(sum_constants)) +} + +/// Multiply a 4-element vector x by: +/// [ 2 3 1 1 ] +/// [ 1 2 3 1 ] +/// [ 1 1 2 3 ] +/// [ 3 1 1 2 ]. +#[inline(always)] +fn apply_mat4(x: &mut [FpVar]) -> Result<(), SynthesisError> { + let vars = [&x[0], &x[1], &x[2], &x[3]]; + + let y0 = linear_combination_4([F::from(2u64), F::from(3u64), F::ONE, F::ONE], vars)?; + let y1 = linear_combination_4([F::ONE, F::from(2u64), F::from(3u64), F::ONE], vars)?; + let y2 = linear_combination_4([F::ONE, F::ONE, F::from(2u64), F::from(3u64)], vars)?; + let y3 = linear_combination_4([F::from(3u64), F::ONE, F::ONE, F::from(2u64)], vars)?; + + x[0] = y0; + x[1] = y1; + x[2] = y2; + x[3] = y3; + + Ok(()) +} + +/// Implement the matrix multiplication used by the external layer. +/// +/// Given a 4x4 MDS matrix M, we multiply by the `4N x 4N` matrix +/// `[[2M M ... M], [M 2M ... M], ..., [M M ... 2M]]`. +/// +/// # Panics +/// This will panic if `WIDTH` is not supported. Currently, the +/// supported `WIDTH` values are 2, 3, 4, 8, 12, 16, 20, 24.` +#[inline(always)] +pub fn mds_light_permutation( + state: &mut [FpVar; WIDTH], +) -> Result<(), SynthesisError> { + match WIDTH { + 2 => { + let mut sum = state[0].clone(); + sum += &state[1]; + state[0] += ∑ + state[1] += sum; + } + + 3 => { + let mut sum = state[0].clone(); + sum += &state[1]; + sum += &state[2]; + state[0] += ∑ + state[1] += ∑ + state[2] += sum; + } + + 4 | 8 | 12 | 16 | 20 | 24 => { + // First, we apply M_4 to each consecutive four elements of the state. + // In Appendix B's terminology, this replaces each x_i with x_i'. + for chunk in state.chunks_exact_mut(4) { + // mdsmat.permute_mut(chunk.try_into().unwrap()); + apply_mat4(chunk)?; + } + // Now, we apply the outer circulant matrix (to compute the y_i values). + + // We first precompute the four sums of every four elements. + let mut sums: [FpVar; 4] = core::array::from_fn(|_| FpVar::zero()); + for j in (0..WIDTH).step_by(4) { + sums[0] += &state[j]; + sums[1] += &state[j + 1]; + sums[2] += &state[j + 2]; + sums[3] += &state[j + 3]; + } + + // The formula for each y_i involves 2x_i' term and x_j' terms for each j that equals i mod 4. + // In other words, we can add a single copy of x_i' to the appropriate one of our precomputed sums + state + .iter_mut() + .enumerate() + .for_each(|(i, elem)| *elem += &sums[i % 4]); + } + + _ => { + panic!("Unsupported width"); + } + } + + Ok(()) +} diff --git a/interleaving/starstream-interleaving-proof/Cargo.toml b/interleaving/starstream-interleaving-proof/Cargo.toml new file mode 100644 index 00000000..1cd2c21f --- /dev/null +++ b/interleaving/starstream-interleaving-proof/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "starstream-interleaving-proof" +version = "0.1.0" +edition = "2024" + +[dependencies] +ark-ff = { version = "0.5.0", default-features = false } +ark-relations = { version = "0.5.0", features = ["std"] } +ark-r1cs-std = { version = "0.5.0", default-features = false } +ark-bn254 = { version = "0.5.0", features = ["scalar_field"] } +ark-poly = "0.5.0" +ark-poly-commit = "0.5.0" +tracing = { version = "0.1", default-features = false, features = [ "attributes" ] } +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +neo-fold = { workspace = true } +neo-math = { workspace = true } +neo-ccs = { workspace = true } +neo-ajtai = { workspace = true } +neo-params = { workspace = true } +neo-vm-trace = { workspace = true } +neo-memory = { workspace = true } + +starstream-interleaving-spec = { path = "../starstream-interleaving-spec" } + +p3-goldilocks = { version = "0.4.1", default-features = false } +p3-field = "0.4.1" +p3-symmetric = "0.4.1" +p3-poseidon2 = "0.4.1" +rand_chacha = "0.9.0" +rand = "0.9" + +ark-goldilocks = { path = "../../ark-goldilocks" } +ark-poseidon2 = { path = "../../ark-poseidon2" } diff --git a/interleaving/starstream-interleaving-proof/src/abi.rs b/interleaving/starstream-interleaving-proof/src/abi.rs new file mode 100644 index 00000000..309e3d79 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/abi.rs @@ -0,0 +1,343 @@ +use crate::{F, OptionalF, ledger_operation::LedgerOperation}; +use ark_ff::Zero; +use starstream_interleaving_spec::{EffectDiscriminant, LedgerEffectsCommitment, WitLedgerEffect}; + +pub const OPCODE_ARG_COUNT: usize = 7; + +pub fn commit(prev: LedgerEffectsCommitment, op: WitLedgerEffect) -> LedgerEffectsCommitment { + let ledger_op = ledger_operation_from_wit(&op); + let opcode_discriminant = opcode_discriminant(&ledger_op); + let opcode_args = opcode_args(&ledger_op); + + let mut concat = [F::zero(); 12]; + concat[..4].copy_from_slice(&prev.0); + concat[4] = opcode_discriminant; + concat[5..].copy_from_slice(&opcode_args); + + let compressed = + ark_poseidon2::compress_12_trace(&concat).expect("poseidon2 compress_12_trace"); + LedgerEffectsCommitment(compressed) +} + +#[derive(Copy, Clone, Debug)] +pub enum ArgName { + Target, + Val, + Ret, + Caller, + Offset, + Size, + ProgramHash0, + ProgramHash1, + ProgramHash2, + ProgramHash3, + ActivationCaller, + OwnerId, + TokenId, + InterfaceId0, + InterfaceId1, + InterfaceId2, + InterfaceId3, + + PackedRef0, + PackedRef1, + PackedRef2, + PackedRef3, + PackedRef4, + PackedRef5, +} + +impl ArgName { + // maps argument names to positional indices + // + // these need to match the order in the ABI used by the wasm/program vm. + pub const fn idx(self) -> usize { + match self { + ArgName::Target | ArgName::OwnerId | ArgName::TokenId => 0, + ArgName::Val => 1, + ArgName::Ret => 2, + ArgName::Caller | ArgName::Offset | ArgName::Size | ArgName::ActivationCaller => 3, + ArgName::InterfaceId0 => 3, + ArgName::InterfaceId1 => 4, + ArgName::InterfaceId2 => 5, + ArgName::InterfaceId3 => 6, + ArgName::ProgramHash0 => 3, + ArgName::ProgramHash1 => 4, + ArgName::ProgramHash2 => 5, + ArgName::ProgramHash3 => 6, + + // Packed ref args for RefPush/RefGet/RefWrite. + ArgName::PackedRef0 => 0, + ArgName::PackedRef1 => 1, + ArgName::PackedRef2 => 2, + ArgName::PackedRef3 => 3, + ArgName::PackedRef4 => 4, + ArgName::PackedRef5 => 5, + } + } +} + +pub(crate) fn ledger_operation_from_wit(op: &WitLedgerEffect) -> LedgerOperation { + match op { + WitLedgerEffect::Resume { + target, + val, + ret, + caller, + } => LedgerOperation::Resume { + target: F::from(target.0 as u64), + val: F::from(val.0), + ret: ret.to_option().map(|r| F::from(r.0)).unwrap_or_default(), + caller: OptionalF::from_option( + caller.to_option().flatten().map(|p| F::from(p.0 as u64)), + ), + }, + WitLedgerEffect::Yield { val } => LedgerOperation::Yield { + val: F::from(val.0), + }, + WitLedgerEffect::Return {} => LedgerOperation::Return {}, + WitLedgerEffect::Burn { ret } => LedgerOperation::Burn { + ret: F::from(ret.0), + }, + WitLedgerEffect::ProgramHash { + target, + program_hash, + } => LedgerOperation::ProgramHash { + target: F::from(target.0 as u64), + program_hash: program_hash.unwrap().0.map(F::from), + }, + WitLedgerEffect::NewUtxo { + program_hash, + val, + id, + } => LedgerOperation::NewUtxo { + program_hash: program_hash.0.map(F::from), + val: F::from(val.0), + target: F::from(id.unwrap().0 as u64), + }, + WitLedgerEffect::NewCoord { + program_hash, + val, + id, + } => LedgerOperation::NewCoord { + program_hash: program_hash.0.map(F::from), + val: F::from(val.0), + target: F::from(id.unwrap().0 as u64), + }, + WitLedgerEffect::Activation { val, caller } => LedgerOperation::Activation { + val: F::from(val.unwrap().0), + caller: F::from(caller.unwrap().0 as u64), + }, + WitLedgerEffect::Init { val, caller } => LedgerOperation::Init { + val: F::from(val.unwrap().0), + caller: F::from(caller.unwrap().0 as u64), + }, + WitLedgerEffect::Bind { owner_id } => LedgerOperation::Bind { + owner_id: F::from(owner_id.0 as u64), + }, + WitLedgerEffect::Unbind { token_id } => LedgerOperation::Unbind { + token_id: F::from(token_id.0 as u64), + }, + WitLedgerEffect::NewRef { size, ret } => LedgerOperation::NewRef { + size: F::from(*size as u64), + ret: F::from(ret.unwrap().0), + }, + WitLedgerEffect::RefPush { vals } => LedgerOperation::RefPush { + vals: vals.map(value_to_field), + }, + WitLedgerEffect::RefGet { reff, offset, ret } => LedgerOperation::RefGet { + reff: F::from(reff.0), + offset: F::from(*offset as u64), + ret: ret.unwrap().map(value_to_field), + }, + WitLedgerEffect::RefWrite { reff, offset, vals } => LedgerOperation::RefWrite { + reff: F::from(reff.0), + offset: F::from(*offset as u64), + vals: vals.map(value_to_field), + }, + WitLedgerEffect::InstallHandler { interface_id } => LedgerOperation::InstallHandler { + interface_id: interface_id.0.map(F::from), + }, + WitLedgerEffect::UninstallHandler { interface_id } => LedgerOperation::UninstallHandler { + interface_id: interface_id.0.map(F::from), + }, + WitLedgerEffect::GetHandlerFor { + interface_id, + handler_id, + } => LedgerOperation::GetHandlerFor { + interface_id: interface_id.0.map(F::from), + handler_id: F::from(handler_id.unwrap().0 as u64), + }, + WitLedgerEffect::CallEffectHandler { + interface_id, + val, + ret, + .. + } => LedgerOperation::CallEffectHandler { + interface_id: interface_id.0.map(F::from), + val: F::from(val.0), + ret: ret.to_option().map(|r| F::from(r.0)).unwrap_or_default(), + }, + } +} + +pub(crate) fn opcode_discriminant(op: &LedgerOperation) -> F { + match op { + LedgerOperation::Nop {} => F::zero(), + LedgerOperation::Resume { .. } => F::from(EffectDiscriminant::Resume as u64), + LedgerOperation::CallEffectHandler { .. } => { + F::from(EffectDiscriminant::CallEffectHandler as u64) + } + LedgerOperation::Yield { .. } => F::from(EffectDiscriminant::Yield as u64), + LedgerOperation::Return { .. } => F::from(EffectDiscriminant::Return as u64), + LedgerOperation::Burn { .. } => F::from(EffectDiscriminant::Burn as u64), + LedgerOperation::ProgramHash { .. } => F::from(EffectDiscriminant::ProgramHash as u64), + LedgerOperation::NewUtxo { .. } => F::from(EffectDiscriminant::NewUtxo as u64), + LedgerOperation::NewCoord { .. } => F::from(EffectDiscriminant::NewCoord as u64), + LedgerOperation::Activation { .. } => F::from(EffectDiscriminant::Activation as u64), + LedgerOperation::Init { .. } => F::from(EffectDiscriminant::Init as u64), + LedgerOperation::Bind { .. } => F::from(EffectDiscriminant::Bind as u64), + LedgerOperation::Unbind { .. } => F::from(EffectDiscriminant::Unbind as u64), + LedgerOperation::NewRef { .. } => F::from(EffectDiscriminant::NewRef as u64), + LedgerOperation::RefPush { .. } => F::from(EffectDiscriminant::RefPush as u64), + LedgerOperation::RefGet { .. } => F::from(EffectDiscriminant::RefGet as u64), + LedgerOperation::RefWrite { .. } => F::from(EffectDiscriminant::RefWrite as u64), + LedgerOperation::InstallHandler { .. } => { + F::from(EffectDiscriminant::InstallHandler as u64) + } + LedgerOperation::UninstallHandler { .. } => { + F::from(EffectDiscriminant::UninstallHandler as u64) + } + LedgerOperation::GetHandlerFor { .. } => F::from(EffectDiscriminant::GetHandlerFor as u64), + } +} + +pub(crate) fn opcode_args(op: &LedgerOperation) -> [F; OPCODE_ARG_COUNT] { + let mut args = [F::zero(); OPCODE_ARG_COUNT]; + match op { + LedgerOperation::Nop {} => {} + LedgerOperation::Resume { + target, + val, + ret, + caller, + } => { + args[ArgName::Target.idx()] = *target; + args[ArgName::Val.idx()] = *val; + args[ArgName::Ret.idx()] = *ret; + args[ArgName::Caller.idx()] = caller.encoded(); + } + LedgerOperation::CallEffectHandler { + interface_id, + val, + ret, + } => { + args[ArgName::Val.idx()] = *val; + args[ArgName::Ret.idx()] = *ret; + args[ArgName::InterfaceId0.idx()] = interface_id[0]; + args[ArgName::InterfaceId1.idx()] = interface_id[1]; + args[ArgName::InterfaceId2.idx()] = interface_id[2]; + args[ArgName::InterfaceId3.idx()] = interface_id[3]; + } + LedgerOperation::Yield { val } => { + args[ArgName::Val.idx()] = *val; + } + LedgerOperation::Return {} => {} + LedgerOperation::Burn { ret } => { + args[ArgName::Target.idx()] = F::zero(); + args[ArgName::Ret.idx()] = *ret; + } + LedgerOperation::ProgramHash { + target, + program_hash, + } => { + args[ArgName::Target.idx()] = *target; + args[ArgName::ProgramHash0.idx()] = program_hash[0]; + args[ArgName::ProgramHash1.idx()] = program_hash[1]; + args[ArgName::ProgramHash2.idx()] = program_hash[2]; + args[ArgName::ProgramHash3.idx()] = program_hash[3]; + } + LedgerOperation::NewUtxo { + program_hash, + val, + target, + } + | LedgerOperation::NewCoord { + program_hash, + val, + target, + } => { + args[ArgName::Target.idx()] = *target; + args[ArgName::Val.idx()] = *val; + args[ArgName::ProgramHash0.idx()] = program_hash[0]; + args[ArgName::ProgramHash1.idx()] = program_hash[1]; + args[ArgName::ProgramHash2.idx()] = program_hash[2]; + args[ArgName::ProgramHash3.idx()] = program_hash[3]; + } + LedgerOperation::Activation { val, caller } => { + args[ArgName::Val.idx()] = *val; + args[ArgName::ActivationCaller.idx()] = *caller; + } + LedgerOperation::Init { val, caller } => { + args[ArgName::Val.idx()] = *val; + args[ArgName::ActivationCaller.idx()] = *caller; + } + LedgerOperation::Bind { owner_id } => { + args[ArgName::OwnerId.idx()] = *owner_id; + } + LedgerOperation::Unbind { token_id } => { + args[ArgName::TokenId.idx()] = *token_id; + } + LedgerOperation::NewRef { size, ret } => { + args[ArgName::Size.idx()] = *size; + args[ArgName::Ret.idx()] = *ret; + } + LedgerOperation::RefPush { vals } => { + args[ArgName::PackedRef0.idx()] = vals[0]; + args[ArgName::PackedRef1.idx()] = vals[1]; + args[ArgName::PackedRef2.idx()] = vals[2]; + args[ArgName::PackedRef3.idx()] = vals[3]; + } + LedgerOperation::RefGet { reff, offset, ret } => { + args[ArgName::Val.idx()] = *reff; + args[ArgName::Offset.idx()] = *offset; + + // Pack 4 return values, leaving slots 1 and 3 for reff/offset. + args[ArgName::PackedRef0.idx()] = ret[0]; + args[ArgName::PackedRef2.idx()] = ret[1]; + args[ArgName::PackedRef4.idx()] = ret[2]; + args[ArgName::PackedRef5.idx()] = ret[3]; + } + LedgerOperation::RefWrite { reff, offset, vals } => { + args[ArgName::Val.idx()] = *reff; + args[ArgName::Offset.idx()] = *offset; + // Avoid collisions with Val(idx=1) and Offset(idx=3). + args[ArgName::PackedRef0.idx()] = vals[0]; + args[ArgName::PackedRef2.idx()] = vals[1]; + args[ArgName::PackedRef4.idx()] = vals[2]; + args[ArgName::PackedRef5.idx()] = vals[3]; + } + LedgerOperation::InstallHandler { interface_id } + | LedgerOperation::UninstallHandler { interface_id } => { + args[ArgName::InterfaceId0.idx()] = interface_id[0]; + args[ArgName::InterfaceId1.idx()] = interface_id[1]; + args[ArgName::InterfaceId2.idx()] = interface_id[2]; + args[ArgName::InterfaceId3.idx()] = interface_id[3]; + } + LedgerOperation::GetHandlerFor { + interface_id, + handler_id, + } => { + args[ArgName::InterfaceId0.idx()] = interface_id[0]; + args[ArgName::InterfaceId1.idx()] = interface_id[1]; + args[ArgName::InterfaceId2.idx()] = interface_id[2]; + args[ArgName::InterfaceId3.idx()] = interface_id[3]; + args[ArgName::Ret.idx()] = *handler_id; + } + } + args +} + +pub(crate) fn value_to_field(val: starstream_interleaving_spec::Value) -> F { + F::from(val.0) +} diff --git a/interleaving/starstream-interleaving-proof/src/circuit.rs b/interleaving/starstream-interleaving-proof/src/circuit.rs new file mode 100644 index 00000000..6f622074 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/circuit.rs @@ -0,0 +1,2309 @@ +use crate::abi::{self, ArgName, OPCODE_ARG_COUNT}; +use crate::coroutine_args_gadget::{check_activation, check_init}; +use crate::execution_switches::ExecutionSwitches; +use crate::handler_stack_gadget::{ + HandlerState, InterfaceResolver, handler_stack_access_wires, trace_handler_stack_ops, +}; +use crate::ledger_operation::{REF_GET_BATCH_SIZE, REF_PUSH_BATCH_SIZE}; +use crate::memory::{self, Address, IVCMemory, MemType}; +pub use crate::memory_tags::MemoryTag; +use crate::program_hash_gadget::{program_hash_access_wires, trace_program_hash_ops}; +use crate::program_state::{ + ProgramState, ProgramStateWires, program_state_read_wires, program_state_write_wires, + trace_program_state_reads, trace_program_state_writes, +}; +use crate::ref_arena_gadget::{ref_arena_access_wires, ref_arena_read_size, trace_ref_arena_ops}; +use crate::switchboard::{ + HandlerSwitchboard, HandlerSwitchboardWires, MemSwitchboardBool, MemSwitchboardWires, + RefArenaSwitchboard, RefArenaSwitchboardWires, RomSwitchboard, RomSwitchboardWires, +}; +use crate::{ + F, OptionalF, OptionalFpVar, ledger_operation::LedgerOperation, memory::IVCMemoryAllocated, +}; +use ark_ff::{AdditiveGroup, Field as _, PrimeField}; +use ark_r1cs_std::fields::FieldVar; +use ark_r1cs_std::{ + GR1CSVar as _, alloc::AllocVar as _, eq::EqGadget, fields::fp::FpVar, prelude::Boolean, +}; +use ark_relations::{ + gr1cs::{ConstraintSystemRef, SynthesisError}, + ns, +}; +use starstream_interleaving_spec::{InterleavingInstance, LedgerEffectsCommitment}; +use std::marker::PhantomData; +use std::ops::Not; +use tracing::debug_span; + +struct OpcodeConfig { + mem_switches_curr: MemSwitchboardBool, + mem_switches_target: MemSwitchboardBool, + rom_switches: RomSwitchboard, + handler_switches: HandlerSwitchboard, + ref_arena_switches: RefArenaSwitchboard, + execution_switches: ExecutionSwitches, + opcode_args: [F; OPCODE_ARG_COUNT], + opcode_discriminant: F, +} + +pub struct StepCircuitBuilder { + pub instance: InterleavingInstance, + pub ops: Vec>, + write_ops: Vec<(ProgramState, ProgramState)>, + mem_switches: Vec<(MemSwitchboardBool, MemSwitchboardBool)>, + rom_switches: Vec, + handler_switches: Vec, + ref_arena_switches: Vec, + interface_resolver: InterfaceResolver, + + mem: PhantomData, +} + +type StepCircuitResult = Result< + ( + InterRoundWires, + <>::Allocator as IVCMemoryAllocated>::FinishStepPayload, + Option, + ), + SynthesisError, +>; + +// OptionalF/OptionalFpVar live in optional.rs + +/// common circuit variables to all the opcodes +#[derive(Clone)] +pub struct Wires { + // irw + id_curr: FpVar, + id_prev: OptionalFpVar, + ref_arena_stack_ptr: FpVar, + handler_stack_ptr: FpVar, + + ref_building_remaining: FpVar, + ref_building_ptr: FpVar, + + switches: ExecutionSwitches>, + + opcode_args: [FpVar; OPCODE_ARG_COUNT], + + curr_read_wires: ProgramStateWires, + curr_write_wires: ProgramStateWires, + + target_read_wires: ProgramStateWires, + target_write_wires: ProgramStateWires, + + ref_arena_read: [FpVar; REF_GET_BATCH_SIZE], + handler_state: HandlerState, + + // ROM lookup results + is_utxo_curr: FpVar, + is_utxo_target: FpVar, + must_burn_curr: FpVar, + rom_program_hash: [FpVar; 4], + + constant_false: Boolean, + constant_true: Boolean, +} + +// helper so that we always allocate witnesses in the same order +pub struct PreWires { + interface_index: F, + + switches: ExecutionSwitches, + + opcode_args: [F; OPCODE_ARG_COUNT], + opcode_discriminant: F, + + curr_mem_switches: MemSwitchboardBool, + target_mem_switches: MemSwitchboardBool, + rom_switches: RomSwitchboard, + handler_switches: HandlerSwitchboard, + ref_arena_switches: RefArenaSwitchboard, + + irw: InterRoundWires, +} + +/// IVC wires (state between steps) +/// +/// these get input and output variables +#[derive(Clone)] +pub struct InterRoundWires { + id_curr: F, + id_prev: OptionalF, + ref_arena_counter: F, + handler_stack_counter: F, + + ref_building_remaining: F, + ref_building_ptr: F, +} + +#[derive(Clone, Copy, Debug)] +pub struct IvcWireIndices { + pub id_curr: usize, + pub id_prev: usize, + pub ref_arena_stack_ptr: usize, + pub handler_stack_ptr: usize, + pub ref_building_remaining: usize, + pub ref_building_ptr: usize, +} + +#[derive(Clone, Copy, Debug)] +pub struct IvcWireLayout { + pub input: IvcWireIndices, + pub output: IvcWireIndices, +} + +impl IvcWireLayout { + pub const FIELD_COUNT: usize = 6; + + pub fn input_indices(&self) -> [usize; Self::FIELD_COUNT] { + [ + self.input.id_curr, + self.input.id_prev, + self.input.ref_arena_stack_ptr, + self.input.handler_stack_ptr, + self.input.ref_building_remaining, + self.input.ref_building_ptr, + ] + } + + pub fn output_indices(&self) -> [usize; Self::FIELD_COUNT] { + [ + self.output.id_curr, + self.output.id_prev, + self.output.ref_arena_stack_ptr, + self.output.handler_stack_ptr, + self.output.ref_building_remaining, + self.output.ref_building_ptr, + ] + } +} + +impl Wires { + fn arg(&self, kind: ArgName) -> FpVar { + self.opcode_args[kind.idx()].clone() + } + + fn id_prev_is_some(&self) -> Result, SynthesisError> { + self.id_prev.is_some() + } + + fn id_prev_value(&self) -> Result, SynthesisError> { + self.id_prev.decode_or_zero() + } + + // IMPORTANT: no rust branches in this function, since the purpose of this + // is to get the exact same layout for all the opcodes + #[tracing::instrument(target = "gr1cs", skip_all)] + pub fn from_irw>( + vals: &PreWires, + rm: &mut M, + current_write_values: &ProgramState, + target_write_values: &ProgramState, + ) -> Result { + vals.debug_print(); + + let cs = rm.get_cs(); + + // io vars + let id_curr = FpVar::::new_witness(cs.clone(), || Ok(vals.irw.id_curr))?; + let id_prev = OptionalFpVar::new(FpVar::new_witness(cs.clone(), || { + Ok(vals.irw.id_prev.encoded()) + })?); + let ref_arena_stack_ptr = + FpVar::new_witness(cs.clone(), || Ok(vals.irw.ref_arena_counter))?; + let handler_stack_counter = + FpVar::new_witness(cs.clone(), || Ok(vals.irw.handler_stack_counter))?; + + let ref_building_remaining = + FpVar::new_witness(cs.clone(), || Ok(vals.irw.ref_building_remaining))?; + let ref_building_ptr = FpVar::new_witness(cs.clone(), || Ok(vals.irw.ref_building_ptr))?; + + let opcode_args_cs = ns!(cs.clone(), "opcode_args"); + let opcode_args_vec = (0..OPCODE_ARG_COUNT) + .map(|i| FpVar::::new_witness(opcode_args_cs.clone(), || Ok(vals.opcode_args[i]))) + .collect::, _>>()?; + let opcode_args: [FpVar; OPCODE_ARG_COUNT] = + opcode_args_vec.try_into().expect("opcode args length"); + let opcode_discriminant = + FpVar::::new_witness(cs.clone(), || Ok(vals.opcode_discriminant))?; + + // Allocate switches and enforce exactly one is true + let switches = vals + .switches + .allocate_and_constrain(cs.clone(), &opcode_discriminant)?; + + let constant_false = Boolean::new_constant(cs.clone(), false)?; + + let target = opcode_args[ArgName::Target.idx()].clone(); + let val = opcode_args[ArgName::Val.idx()].clone(); + let offset = opcode_args[ArgName::Offset.idx()].clone(); + + let curr_mem_switches = MemSwitchboardWires::allocate(cs.clone(), &vals.curr_mem_switches)?; + let target_mem_switches = + MemSwitchboardWires::allocate(cs.clone(), &vals.target_mem_switches)?; + + let curr_address = FpVar::::new_witness(cs.clone(), || Ok(vals.irw.id_curr))?; + let curr_read_wires = + program_state_read_wires(rm, &cs, curr_address.clone(), &curr_mem_switches)?; + + let handler_switches = + HandlerSwitchboardWires::allocate(cs.clone(), &vals.handler_switches)?; + let ref_arena_switches = + RefArenaSwitchboardWires::allocate(cs.clone(), &vals.ref_arena_switches)?; + let interface_index_var = FpVar::new_witness(cs.clone(), || Ok(vals.interface_index))?; + let handler_reads = handler_stack_access_wires( + cs.clone(), + rm, + &handler_switches, + &interface_index_var, + &handler_stack_counter, + &id_curr, + )?; + + let yield_to_value = curr_read_wires.yield_to.decode_or_zero()?; + let return_like = &switches.yield_op | &switches.return_op; + let default_target_address = return_like.select(&yield_to_value, &target)?; + let target_address = switches.call_effect_handler.select( + &handler_reads.handler_stack_node_process, + &default_target_address, + )?; + let target_read_wires = + program_state_read_wires(rm, &cs, target_address.clone(), &target_mem_switches)?; + + let curr_write_wires = + ProgramStateWires::from_write_values(cs.clone(), current_write_values)?; + + let target_write_wires = + ProgramStateWires::from_write_values(cs.clone(), target_write_values)?; + + program_state_write_wires( + rm, + &cs, + curr_address.clone(), + &curr_write_wires, + &curr_mem_switches, + )?; + + program_state_write_wires( + rm, + &cs, + target_address.clone(), + &target_write_wires, + &target_mem_switches, + )?; + + let rom_switches = RomSwitchboardWires::allocate(cs.clone(), &vals.rom_switches)?; + + let is_utxo_curr = rm.conditional_read( + &rom_switches.read_is_utxo_curr, + &Address { + addr: id_curr.clone(), + tag: MemoryTag::IsUtxo.allocate(cs.clone())?, + }, + )?[0] + .clone(); + + let is_utxo_target = rm.conditional_read( + &rom_switches.read_is_utxo_target, + &Address { + addr: target_address.clone(), + tag: MemoryTag::IsUtxo.allocate(cs.clone())?, + }, + )?[0] + .clone(); + + let must_burn_curr = rm.conditional_read( + &rom_switches.read_must_burn_curr, + &Address { + addr: id_curr.clone(), + tag: MemoryTag::MustBurn.allocate(cs.clone())?, + }, + )?[0] + .clone(); + + let rom_program_hash = program_hash_access_wires( + cs.clone(), + rm, + &rom_switches.read_program_hash_target, + &target_address, + )?; + + let handler_state = HandlerState { + handler_stack_node_process: handler_reads.handler_stack_node_process, + interface_rom_read: handler_reads.interface_rom_read, + }; + + // ref arena wires + let ref_arena_read = { + let ref_size_read = + ref_arena_read_size(cs.clone(), rm, &ref_arena_switches, &opcode_args, &val)?; + + ref_arena_access_wires( + cs.clone(), + rm, + &ref_arena_switches, + &opcode_args, + &ref_building_ptr, + &ref_building_remaining, + &val, + &offset, + &ref_size_read, + )? + }; + + let should_trace = switches.nop.clone().not(); + trace_ic_wires( + id_curr.clone(), + rm, + &cs, + &should_trace, + &opcode_discriminant, + &opcode_args, + )?; + + Ok(Wires { + id_curr, + id_prev, + ref_arena_stack_ptr, + handler_stack_ptr: handler_stack_counter, + + ref_building_remaining, + ref_building_ptr, + switches, + + constant_false, + constant_true: Boolean::new_constant(cs.clone(), true)?, + + // wit_wires + opcode_args, + + curr_read_wires, + curr_write_wires, + + target_read_wires, + target_write_wires, + + is_utxo_curr, + is_utxo_target, + must_burn_curr, + rom_program_hash, + ref_arena_read, + handler_state, + }) + } +} + +impl InterRoundWires { + pub fn new(entrypoint: u64) -> Self { + InterRoundWires { + id_curr: F::from(entrypoint), + id_prev: OptionalF::none(), + ref_arena_counter: F::ZERO, + handler_stack_counter: F::ZERO, + ref_building_remaining: F::ZERO, + ref_building_ptr: F::ZERO, + } + } + + pub fn update(&mut self, res: Wires) { + let _guard = debug_span!("update ivc state").entered(); + + tracing::debug!( + "current_program from {} to {}", + self.id_curr, + res.id_curr.value().unwrap() + ); + + self.id_curr = res.id_curr.value().unwrap(); + + let res_id_prev = res.id_prev.value().unwrap(); + tracing::debug!( + "prev_program from {} to {}", + self.id_prev.encoded(), + res_id_prev + ); + + self.id_prev = OptionalF::from_encoded(res_id_prev); + + tracing::debug!( + "ref_arena_counter from {} to {}", + self.ref_arena_counter, + res.ref_arena_stack_ptr.value().unwrap() + ); + + self.ref_arena_counter = res.ref_arena_stack_ptr.value().unwrap(); + + tracing::debug!( + "handler_stack_counter from {} to {}", + self.handler_stack_counter, + res.handler_stack_ptr.value().unwrap() + ); + + self.handler_stack_counter = res.handler_stack_ptr.value().unwrap(); + + self.ref_building_remaining = res.ref_building_remaining.value().unwrap(); + self.ref_building_ptr = res.ref_building_ptr.value().unwrap(); + } +} + +impl LedgerOperation { + fn get_config(&self) -> OpcodeConfig { + let mut config = OpcodeConfig { + mem_switches_curr: MemSwitchboardBool::default(), + mem_switches_target: MemSwitchboardBool::default(), + rom_switches: RomSwitchboard::default(), + handler_switches: HandlerSwitchboard::default(), + ref_arena_switches: RefArenaSwitchboard::default(), + execution_switches: ExecutionSwitches::default(), + opcode_args: [F::ZERO; OPCODE_ARG_COUNT], + opcode_discriminant: F::ZERO, + }; + + config.opcode_discriminant = abi::opcode_discriminant(self); + + match self { + LedgerOperation::Nop {} => { + config.execution_switches.nop = true; + } + LedgerOperation::Resume { .. } => { + config.execution_switches.resume = true; + + config.mem_switches_curr.activation = true; + config.mem_switches_curr.expected_input = true; + config.mem_switches_curr.expected_resumer = true; + + config.mem_switches_target.activation = true; + config.mem_switches_target.expected_input = true; + config.mem_switches_target.expected_resumer = true; + config.mem_switches_target.on_yield = true; + config.mem_switches_target.yield_to = true; + config.mem_switches_target.finalized = true; + config.mem_switches_target.initialized = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_is_utxo_target = true; + } + LedgerOperation::CallEffectHandler { .. } => { + config.execution_switches.call_effect_handler = true; + + config.mem_switches_curr.activation = true; + config.mem_switches_curr.expected_input = true; + config.mem_switches_curr.expected_resumer = true; + + config.mem_switches_target.activation = true; + config.mem_switches_target.expected_input = true; + config.mem_switches_target.expected_resumer = true; + config.mem_switches_target.finalized = true; + + config.handler_switches.read_interface = true; + config.handler_switches.read_head = true; + config.handler_switches.read_node = true; + } + LedgerOperation::Yield { .. } => { + config.execution_switches.yield_op = true; + + config.mem_switches_curr.activation = true; + config.mem_switches_curr.on_yield = true; + config.mem_switches_curr.yield_to = true; + config.mem_switches_curr.finalized = true; + + config.mem_switches_target.expected_input = true; + config.mem_switches_target.expected_resumer = true; + + config.rom_switches.read_is_utxo_curr = true; + } + LedgerOperation::Return { .. } => { + config.execution_switches.return_op = true; + + config.mem_switches_curr.activation = true; + config.mem_switches_curr.on_yield = true; + config.mem_switches_curr.yield_to = true; + config.mem_switches_curr.finalized = true; + + config.rom_switches.read_is_utxo_curr = true; + } + LedgerOperation::Burn { .. } => { + config.execution_switches.burn = true; + + config.mem_switches_curr.activation = true; + config.mem_switches_curr.finalized = true; + config.mem_switches_curr.did_burn = true; + config.mem_switches_curr.expected_input = true; + config.mem_switches_curr.initialized = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_must_burn_curr = true; + } + LedgerOperation::ProgramHash { .. } => { + config.execution_switches.program_hash = true; + + config.rom_switches.read_program_hash_target = true; + } + LedgerOperation::NewUtxo { .. } => { + config.execution_switches.new_utxo = true; + + config.mem_switches_target.initialized = true; + config.mem_switches_target.init = true; + config.mem_switches_target.init_caller = true; + config.mem_switches_target.expected_input = true; + config.mem_switches_target.expected_resumer = true; + config.mem_switches_target.on_yield = true; + config.mem_switches_target.yield_to = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_is_utxo_target = true; + config.rom_switches.read_program_hash_target = true; + } + LedgerOperation::NewCoord { .. } => { + config.execution_switches.new_coord = true; + + config.mem_switches_target.initialized = true; + config.mem_switches_target.init = true; + config.mem_switches_target.init_caller = true; + config.mem_switches_target.expected_input = true; + config.mem_switches_target.expected_resumer = true; + config.mem_switches_target.on_yield = true; + config.mem_switches_target.yield_to = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_is_utxo_target = true; + config.rom_switches.read_program_hash_target = true; + } + LedgerOperation::Activation { .. } => { + config.execution_switches.activation = true; + + config.mem_switches_curr.activation = true; + } + LedgerOperation::Init { .. } => { + config.execution_switches.init = true; + + config.mem_switches_curr.init = true; + config.mem_switches_curr.init_caller = true; + } + LedgerOperation::Bind { .. } => { + config.execution_switches.bind = true; + + config.mem_switches_target.initialized = true; + config.mem_switches_curr.ownership = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_is_utxo_target = true; + } + LedgerOperation::Unbind { .. } => { + config.execution_switches.unbind = true; + + config.mem_switches_target.ownership = true; + + config.rom_switches.read_is_utxo_curr = true; + config.rom_switches.read_is_utxo_target = true; + } + LedgerOperation::NewRef { .. } => { + config.execution_switches.new_ref = true; + config.ref_arena_switches.ref_sizes_write = true; + } + LedgerOperation::RefPush { .. } => { + config.execution_switches.ref_push = true; + config.ref_arena_switches.ref_arena_write = true; + config.ref_arena_switches.ref_arena_write_is_push = true; + } + LedgerOperation::RefGet { .. } => { + config.execution_switches.get = true; + config.ref_arena_switches.ref_sizes_read = true; + config.ref_arena_switches.ref_arena_read = true; + } + LedgerOperation::RefWrite { .. } => { + config.execution_switches.ref_write = true; + config.ref_arena_switches.ref_sizes_read = true; + config.ref_arena_switches.ref_arena_write = true; + } + LedgerOperation::InstallHandler { .. } => { + config.execution_switches.install_handler = true; + config.rom_switches.read_is_utxo_curr = true; + + config.handler_switches.read_interface = true; + config.handler_switches.read_head = true; + config.handler_switches.write_node = true; + config.handler_switches.write_head = true; + } + LedgerOperation::UninstallHandler { .. } => { + config.execution_switches.uninstall_handler = true; + config.rom_switches.read_is_utxo_curr = true; + + config.handler_switches.read_interface = true; + config.handler_switches.read_head = true; + config.handler_switches.read_node = true; + config.handler_switches.write_head = true; + } + LedgerOperation::GetHandlerFor { .. } => { + config.execution_switches.get_handler_for = true; + + config.handler_switches.read_interface = true; + config.handler_switches.read_head = true; + config.handler_switches.read_node = true; + } + } + + config.opcode_args = abi::opcode_args(self); + + config + } + + // state transitions for current and target (next) programs + // in general, we only change the state of a most two processes in a single + // step. + // + // this takes the current state for both of those processes, and returns the + // new state for each one too. + pub fn program_state_transitions( + &self, + curr_id: F, + curr_read: ProgramState, + target_read: ProgramState, + curr_is_utxo: bool, + target_id: Option, + ) -> (ProgramState, ProgramState) { + let mut curr_write = curr_read.clone(); + let mut target_write = target_read.clone(); + + match self { + LedgerOperation::Nop {} => { + // Nop does nothing to the state + } + LedgerOperation::Resume { + val, ret, caller, .. + } => { + // Current process gives control to target. + // It's `arg` is cleared, and its `expected_input` is set to the return value `ret`. + curr_write.activation = F::ZERO; // Represents None + curr_write.expected_input = OptionalF::new(*ret); + curr_write.expected_resumer = *caller; + + // Target process receives control. + // Its `arg` is set to `val`, and it is no longer in a `finalized` state. + target_write.expected_input = OptionalF::none(); + target_write.expected_resumer = OptionalF::none(); + target_write.activation = *val; + target_write.finalized = false; + + // If target was in a yield state, record who resumed it and clear the flag. + if target_read.on_yield && !curr_is_utxo { + target_write.yield_to = OptionalF::new(curr_id); + } + target_write.on_yield = false; + } + LedgerOperation::CallEffectHandler { val, ret, .. } => { + let target = target_id.expect("CallEffectHandler requires resolved handler target"); + curr_write.activation = F::ZERO; + curr_write.expected_input = OptionalF::new(*ret); + curr_write.expected_resumer = OptionalF::new(target); + + target_write.expected_input = OptionalF::none(); + target_write.expected_resumer = OptionalF::none(); + target_write.activation = *val; + target_write.finalized = false; + } + LedgerOperation::Yield { val: _, .. } => { + // Current process yields control back to its parent (the target of this operation). + // Its `arg` is cleared. + curr_write.activation = F::ZERO; // Represents None + curr_write.finalized = true; + curr_write.on_yield = true; + } + LedgerOperation::Return {} => { + // Coordination script return is terminal for this transaction. + curr_write.activation = F::ZERO; + curr_write.finalized = true; + } + LedgerOperation::Burn { ret } => { + // The current UTXO is burned. + curr_write.activation = F::ZERO; // Represents None + curr_write.finalized = true; + curr_write.did_burn = true; + curr_write.expected_input = OptionalF::new(*ret); // Sets its final return value. + } + LedgerOperation::NewUtxo { val, target: _, .. } + | LedgerOperation::NewCoord { val, target: _, .. } => { + // The current process is a coordinator creating a new process. + // The new process (target) is initialized. + target_write.initialized = true; + target_write.init = *val; + target_write.init_caller = curr_id; + target_write.expected_input = OptionalF::none(); + target_write.expected_resumer = OptionalF::none(); + target_write.on_yield = true; + target_write.yield_to = OptionalF::none(); + } + LedgerOperation::Bind { owner_id } => { + curr_write.ownership = OptionalF::new(*owner_id); + } + LedgerOperation::Unbind { .. } => { + target_write.ownership = OptionalF::none(); + } + _ => {} + } + (curr_write, target_write) + } +} + +impl> StepCircuitBuilder { + pub fn new(instance: InterleavingInstance, ops: Vec>) -> Self { + let interface_resolver = InterfaceResolver::new(&ops); + + Self { + ops, + write_ops: vec![], + mem_switches: vec![], + rom_switches: vec![], + handler_switches: vec![], + ref_arena_switches: vec![], + interface_resolver, + mem: PhantomData, + instance, + } + } + + pub fn make_step_circuit( + &self, + i: usize, + rm: &mut M::Allocator, + cs: ConstraintSystemRef, + mut irw: InterRoundWires, + compute_ivc_layout: bool, + ) -> StepCircuitResult { + rm.start_step(cs.clone()).unwrap(); + + let _guard = + tracing::info_span!("make_step_circuit", i = i, pid = ?irw.id_curr, op = ?self.ops[i]) + .entered(); + + if !matches!(&self.ops[i], &LedgerOperation::Nop {}) { + tracing::info!("synthesizing step"); + } + + let wires_in = self.allocate_vars(i, rm, &irw)?; + let next_wires = wires_in.clone(); + + // per opcode constraints + let next_wires = self.visit_yield(next_wires)?; + let next_wires = self.visit_return(next_wires)?; + let next_wires = self.visit_call_effect_handler(next_wires)?; + let next_wires = self.visit_resume(next_wires)?; + let next_wires = self.visit_burn(next_wires)?; + let next_wires = self.visit_program_hash(next_wires)?; + let next_wires = self.visit_new_process(next_wires)?; + let next_wires = self.visit_activation(next_wires)?; + let next_wires = self.visit_init(next_wires)?; + let next_wires = self.visit_bind(next_wires)?; + let next_wires = self.visit_unbind(next_wires)?; + let next_wires = self.visit_new_ref(next_wires)?; + let next_wires = self.visit_ref_push(next_wires)?; + let next_wires = self.visit_ref_get(next_wires)?; + let next_wires = self.visit_ref_write(next_wires)?; + let next_wires = self.visit_install_handler(next_wires)?; + let next_wires = self.visit_uninstall_handler(next_wires)?; + let next_wires = self.visit_get_handler_for(next_wires)?; + + let mem_step_data = rm.finish_step(i == self.ops.len() - 1)?; + + // input <-> output mappings are done by modifying next_wires + let ivc_layout = if compute_ivc_layout { + Some(ivc_wires(&cs, &wires_in, &next_wires)?) + } else { + None + }; + + { + let _guard = debug_span!(target: "gr1cs", "ref_building_mode").entered(); + // Enforce global invariant: If building ref, must be RefPush + let is_building = wires_in.ref_building_remaining.is_zero()?.not(); + is_building.enforce_equal(&wires_in.switches.ref_push)?; + } + + irw.update(next_wires); + + Ok((irw, mem_step_data, ivc_layout)) + } + + pub fn trace_memory_ops(&mut self, params: >::Params) -> M { + // initialize all the maps + let mut mb = { + let mut mb = M::new(params); + + register_memory_segments(&mut mb); + + for (pid, mod_hash) in self.instance.process_table.iter().enumerate() { + let hash_fields = mod_hash.0.map(F::from); + for (lane, field) in hash_fields.iter().enumerate() { + let addr = (pid * 4) + lane; + mb.init( + Address { + addr: addr as u64, + tag: MemoryTag::ProcessTable.into(), + }, + vec![*field], + ); + } + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::Initialized.into(), + }, + vec![F::from( + if pid < self.instance.n_inputs || pid == self.instance.entrypoint.0 { + 1 + } else { + 0 + }, + )], + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::Finalized.into(), + }, + vec![F::from(0u64)], // false + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::DidBurn.into(), + }, + vec![F::from(0u64)], // false + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::ExpectedInput.into(), + }, + vec![OptionalF::none().encoded()], + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::ExpectedResumer.into(), + }, + vec![OptionalF::none().encoded()], + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::OnYield.into(), + }, + vec![F::ONE], // true + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::YieldTo.into(), + }, + vec![OptionalF::none().encoded()], + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::Activation.into(), + }, + vec![F::from(0u64)], // None + ); + + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::Init.into(), + }, + vec![F::from(0u64)], // None + ); + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::InitCaller.into(), + }, + vec![F::from(0u64)], + ); + + let trace_iv = LedgerEffectsCommitment::iv().0; + for offset in 0..4 { + let addr = (pid * 4) + offset; + mb.init( + Address { + addr: addr as u64, + tag: MemoryTag::TraceCommitments.into(), + }, + vec![trace_iv[offset]], + ); + } + } + + for (pid, must_burn) in self.instance.must_burn.iter().enumerate() { + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::MustBurn.into(), + }, + vec![F::from(if *must_burn { 1u64 } else { 0 })], + ); + } + + for (pid, is_utxo) in self.instance.is_utxo.iter().enumerate() { + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::IsUtxo.into(), + }, + vec![F::from(if *is_utxo { 1u64 } else { 0 })], + ); + } + + for (pid, owner) in self.instance.ownership_in.iter().enumerate() { + let encoded_owner = owner + .map(|p| OptionalF::new(F::from(p.0 as u64)).encoded()) + .unwrap_or_else(|| OptionalF::none().encoded()); + mb.init( + Address { + addr: pid as u64, + tag: MemoryTag::Ownership.into(), + }, + vec![encoded_owner], + ); + } + + mb + }; + + // Initialize handler memory using simplified approach + self.init_handler_memory(&mut mb); + + // out of circuit memory operations. + // this is needed to commit to the memory operations before-hand. + // + // and here we compute the actual write values (for memory operations) + // + // note however that we don't enforce/check anything, that's done in the + // circuit constraints + + // Initialize IRW for the trace phase and update it as we process each operation + let mut irw = InterRoundWires::new(self.instance.entrypoint.0 as u64); + + let mut ref_building_id = F::ZERO; + let mut ref_building_offset = F::ZERO; + let mut ref_building_remaining = F::ZERO; + + for instr in &self.ops { + if !matches!(instr, LedgerOperation::Nop {}) { + tracing::info!("mem tracing instr {:?}", &instr); + } + + let config = instr.get_config(); + + trace_ic(irw.id_curr.into_bigint().0[0] as usize, &mut mb, &config); + + let curr_switches = config.mem_switches_curr; + let target_switches = config.mem_switches_target; + let rom_switches = config.rom_switches; + let handler_switches = config.handler_switches; + let ref_arena_switches = config.ref_arena_switches; + + self.mem_switches + .push((curr_switches.clone(), target_switches.clone())); + + self.rom_switches.push(rom_switches.clone()); + self.handler_switches.push(handler_switches.clone()); + self.ref_arena_switches.push(ref_arena_switches.clone()); + + // Get interface index for handler operations + let interface_index = match instr { + LedgerOperation::InstallHandler { interface_id } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::UninstallHandler { interface_id } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::GetHandlerFor { interface_id, .. } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::CallEffectHandler { interface_id, .. } => self + .interface_resolver + .get_interface_index_field(*interface_id), + _ => F::ZERO, + }; + + let handler_reads = trace_handler_stack_ops( + &mut mb, + &handler_switches, + &interface_index, + &irw.handler_stack_counter, + &irw.id_curr, + ); + + if config.execution_switches.install_handler { + irw.handler_stack_counter += F::ONE; + } + + trace_ref_arena_ops( + &mut mb, + &mut ref_building_id, + &mut ref_building_offset, + &mut ref_building_remaining, + &ref_arena_switches, + instr, + ); + + let curr_read = + trace_program_state_reads(&mut mb, irw.id_curr.into_bigint().0[0], &curr_switches); + let curr_yield_to = curr_read.yield_to; + + let target_addr = match instr { + LedgerOperation::Resume { target, .. } => Some(*target), + LedgerOperation::CallEffectHandler { .. } => { + Some(handler_reads.handler_stack_node_process) + } + LedgerOperation::Yield { .. } => curr_read.yield_to.to_option(), + LedgerOperation::Return { .. } => curr_read.yield_to.to_option(), + LedgerOperation::Burn { .. } => irw.id_prev.to_option(), + LedgerOperation::NewUtxo { target: id, .. } => Some(*id), + LedgerOperation::NewCoord { target: id, .. } => Some(*id), + LedgerOperation::ProgramHash { target, .. } => Some(*target), + LedgerOperation::Unbind { token_id } => Some(*token_id), + _ => None, + }; + + let target_pid = target_addr.map(|t| t.into_bigint().0[0]); + let target_read = + trace_program_state_reads(&mut mb, target_pid.unwrap_or(0), &target_switches); + + mb.conditional_read( + rom_switches.read_is_utxo_curr, + Address { + addr: irw.id_curr.into_bigint().0[0], + tag: MemoryTag::IsUtxo.into(), + }, + ); + mb.conditional_read( + rom_switches.read_is_utxo_target, + Address { + addr: target_pid.unwrap_or(0), + tag: MemoryTag::IsUtxo.into(), + }, + ); + mb.conditional_read( + rom_switches.read_must_burn_curr, + Address { + addr: irw.id_curr.into_bigint().0[0], + tag: MemoryTag::MustBurn.into(), + }, + ); + let target_pid_value = target_pid.unwrap_or(0); + let _ = trace_program_hash_ops( + &mut mb, + rom_switches.read_program_hash_target, + &F::from(target_pid_value), + ); + + let curr_is_utxo = self.instance.is_utxo[irw.id_curr.into_bigint().0[0] as usize]; + let target_id = match instr { + LedgerOperation::CallEffectHandler { .. } => { + Some(handler_reads.handler_stack_node_process) + } + _ => None, + }; + let (curr_write, target_write) = instr.program_state_transitions( + irw.id_curr, + curr_read, + target_read, + curr_is_utxo, + target_id, + ); + + self.write_ops + .push((curr_write.clone(), target_write.clone())); + + trace_program_state_writes( + &mut mb, + irw.id_curr.into_bigint().0[0], + &curr_write, + &curr_switches, + ); + + trace_program_state_writes(&mut mb, target_pid_value, &target_write, &target_switches); + + // update pids for next iteration + match instr { + LedgerOperation::Resume { target, .. } => { + irw.id_prev = OptionalF::new(irw.id_curr); + irw.id_curr = *target; + } + LedgerOperation::CallEffectHandler { .. } => { + irw.id_prev = OptionalF::new(irw.id_curr); + irw.id_curr = handler_reads.handler_stack_node_process; + } + LedgerOperation::Yield { .. } => { + irw.id_prev = OptionalF::new(irw.id_curr); + irw.id_curr = curr_yield_to.decode_or_zero(); + } + LedgerOperation::Return { .. } => { + if let Some(parent) = curr_yield_to.to_option() { + irw.id_prev = OptionalF::new(irw.id_curr); + irw.id_curr = parent; + } + } + LedgerOperation::Burn { .. } => { + let old_curr = irw.id_curr; + irw.id_curr = irw.id_prev.decode_or_zero(); + irw.id_prev = OptionalF::new(old_curr); + } + _ => {} + } + + mb.finish_step(); + } + + let current_steps = self.ops.len(); + if let Some(missing) = mb.required_steps().checked_sub(current_steps) { + tracing::debug!("padding with {missing} Nop operations for scan"); + self.ops + .extend(std::iter::repeat_n(LedgerOperation::Nop {}, missing)); + + // TODO: we probably want to do this before the main loop + for _ in 0..missing { + mb.finish_step(); + } + } + + mb + } + + fn init_handler_memory(&self, mem: &mut M) { + let interfaces = self.interface_resolver.interfaces(); + + // Initialize Interfaces ROM and HandlerStackHeads + for (index, interface_id) in interfaces.iter().enumerate() { + for (limb, field) in interface_id.iter().enumerate() { + mem.init( + Address { + addr: (index * 4 + limb) as u64, + tag: MemoryTag::Interfaces.into(), + }, + vec![*field], + ); + } + + mem.init( + Address { + addr: index as u64, + tag: MemoryTag::HandlerStackHeads.into(), + }, + vec![F::ZERO], // null pointer (empty stack) + ); + } + + // Pre-allocate arena nodes for all InstallHandler operations + let install_count = self + .ops + .iter() + .filter(|op| matches!(op, LedgerOperation::InstallHandler { .. })) + .count(); + + for i in 0..install_count { + mem.init( + Address { + addr: i as u64, + tag: MemoryTag::HandlerStackArenaProcess.into(), + }, + vec![F::ZERO], // process_id + ); + mem.init( + Address { + addr: i as u64, + tag: MemoryTag::HandlerStackArenaNextPtr.into(), + }, + vec![F::ZERO], // next_ptr + ); + } + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn allocate_vars( + &self, + i: usize, + rm: &mut M::Allocator, + irw: &InterRoundWires, + ) -> Result { + let instruction = &self.ops[i]; + let (curr_write, target_write) = &self.write_ops[i]; + let (curr_mem_switches, target_mem_switches) = &self.mem_switches[i]; + let rom_switches = &self.rom_switches[i]; + let handler_switches = &self.handler_switches[i]; + let ref_arena_switches = &self.ref_arena_switches[i]; + + // Compute interface index for handler operations + let interface_index = match instruction { + LedgerOperation::InstallHandler { interface_id } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::UninstallHandler { interface_id } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::GetHandlerFor { interface_id, .. } => self + .interface_resolver + .get_interface_index_field(*interface_id), + LedgerOperation::CallEffectHandler { interface_id, .. } => self + .interface_resolver + .get_interface_index_field(*interface_id), + _ => F::ZERO, + }; + + let mut default = PreWires::new( + irw.clone(), + curr_mem_switches.clone(), + target_mem_switches.clone(), + rom_switches.clone(), + handler_switches.clone(), + ref_arena_switches.clone(), + interface_index, + abi::opcode_discriminant(instruction), + ); + + default.opcode_args = abi::opcode_args(instruction); + + let prewires = match instruction { + LedgerOperation::Nop {} => PreWires { + switches: ExecutionSwitches::nop(), + ..default + }, + LedgerOperation::Resume { .. } => PreWires { + switches: ExecutionSwitches::resume(), + ..default + }, + LedgerOperation::CallEffectHandler { .. } => PreWires { + switches: ExecutionSwitches::call_effect_handler(), + ..default + }, + LedgerOperation::Yield { .. } => PreWires { + switches: ExecutionSwitches::yield_op(), + ..default + }, + LedgerOperation::Return { .. } => PreWires { + switches: ExecutionSwitches::return_op(), + ..default + }, + LedgerOperation::Burn { .. } => PreWires { + switches: ExecutionSwitches::burn(), + ..default + }, + LedgerOperation::ProgramHash { .. } => PreWires { + switches: ExecutionSwitches::program_hash(), + ..default + }, + LedgerOperation::NewUtxo { .. } => PreWires { + switches: ExecutionSwitches::new_utxo(), + ..default + }, + LedgerOperation::NewCoord { .. } => PreWires { + switches: ExecutionSwitches::new_coord(), + ..default + }, + LedgerOperation::Activation { .. } => PreWires { + switches: ExecutionSwitches::activation(), + ..default + }, + LedgerOperation::Init { .. } => PreWires { + switches: ExecutionSwitches::init(), + ..default + }, + LedgerOperation::Bind { .. } => PreWires { + switches: ExecutionSwitches::bind(), + ..default + }, + LedgerOperation::Unbind { .. } => PreWires { + switches: ExecutionSwitches::unbind(), + ..default + }, + LedgerOperation::NewRef { .. } => PreWires { + switches: ExecutionSwitches::new_ref(), + ..default + }, + LedgerOperation::RefPush { .. } => PreWires { + switches: ExecutionSwitches::ref_push(), + ..default + }, + LedgerOperation::RefGet { .. } => PreWires { + switches: ExecutionSwitches::get(), + ..default + }, + LedgerOperation::RefWrite { .. } => PreWires { + switches: ExecutionSwitches::ref_write(), + ..default + }, + LedgerOperation::InstallHandler { .. } => PreWires { + switches: ExecutionSwitches::install_handler(), + ..default + }, + LedgerOperation::UninstallHandler { .. } => PreWires { + switches: ExecutionSwitches::uninstall_handler(), + ..default + }, + LedgerOperation::GetHandlerFor { .. } => PreWires { + switches: ExecutionSwitches::get_handler_for(), + ..default + }, + }; + + Wires::from_irw(&prewires, rm, curr_write, target_write) + } + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_resume(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.resume; + + // 1. self-resume check + wires + .id_curr + .conditional_enforce_not_equal(&wires.arg(ArgName::Target), switch)?; + + // 2. Direct Resume is coordination-script only. + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::FALSE, switch)?; + + let is_utxo_curr = wires.is_utxo_curr.is_one()?; + + // 3. Target must be initialized + wires + .target_read_wires + .initialized + .conditional_enforce_equal(&wires.constant_true, switch)?; + + // 4. Re-entrancy check (target's arg must be None/0) + wires + .target_read_wires + .activation + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + // 5. Claim check: val passed in must match target's expected_input (if set). + wires + .target_read_wires + .expected_input + .conditional_enforce_eq_if_some(switch, &wires.arg(ArgName::Val))?; + + // 6. Resumer check: current process must match target's expected_resumer (if set). + wires + .target_read_wires + .expected_resumer + .conditional_enforce_eq_if_some(switch, &wires.id_curr)?; + + // 7. If target was in yield state, record yield_to; otherwise keep it unchanged. + let target_on_yield = wires.target_read_wires.on_yield.clone(); + let new_yield_to = OptionalFpVar::select_encoded( + &(switch & target_on_yield & is_utxo_curr.not()), + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.target_read_wires.yield_to, + )?; + + wires + .target_write_wires + .yield_to + .encoded() + .conditional_enforce_equal(&new_yield_to.encoded(), switch)?; + + // After a resume, the target is no longer in yield state. + wires + .target_write_wires + .on_yield + .conditional_enforce_equal(&Boolean::FALSE, switch)?; + + // Expectations are consumed by resume. + wires + .target_write_wires + .expected_input + .encoded() + .conditional_enforce_equal(&FpVar::zero(), switch)?; + wires + .target_write_wires + .expected_resumer + .encoded() + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + // 8. Store expected resumer for the current process. + wires + .curr_write_wires + .expected_resumer + .encoded() + .conditional_enforce_equal(&wires.arg(ArgName::Caller), switch)?; + + // --- + // IVC state updates + // --- + // On resume, current program becomes the target, and the old current program + // becomes the new previous program. + let next_id_curr = switch.select(&wires.arg(ArgName::Target), &wires.id_curr)?; + let next_id_prev = OptionalFpVar::select_encoded( + switch, + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.id_prev, + )?; + + wires.id_curr = next_id_curr; + wires.id_prev = next_id_prev; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_call_effect_handler(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.call_effect_handler; + + // The interface witness must match interface ROM. + let interface_args = [ + ArgName::InterfaceId0, + ArgName::InterfaceId1, + ArgName::InterfaceId2, + ArgName::InterfaceId3, + ]; + for (i, arg) in interface_args.iter().enumerate() { + wires.handler_state.interface_rom_read[i] + .conditional_enforce_equal(&wires.arg(*arg), switch)?; + } + + // No self-call. + wires.id_curr.conditional_enforce_not_equal( + &wires.handler_state.handler_stack_node_process, + switch, + )?; + + // Re-entrancy check (target activation must be None). + wires + .target_read_wires + .activation + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + // Claim check: val ref must match target expected_input (if set). + wires + .target_read_wires + .expected_input + .conditional_enforce_eq_if_some(switch, &wires.arg(ArgName::Val))?; + + // Resumer check: current process must match target expected_resumer (if set). + wires + .target_read_wires + .expected_resumer + .conditional_enforce_eq_if_some(switch, &wires.id_curr)?; + + // Target expectations are consumed by the call. + wires + .target_write_wires + .expected_input + .encoded() + .conditional_enforce_equal(&FpVar::zero(), switch)?; + wires + .target_write_wires + .expected_resumer + .encoded() + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + // Caller expected_resumer is fixed to the resolved target. + wires + .curr_write_wires + .expected_resumer + .encoded() + .conditional_enforce_equal( + &OptionalFpVar::from_pid(&wires.handler_state.handler_stack_node_process).encoded(), + switch, + )?; + + // IVC state updates mirror resume-to-target. + let next_id_curr = switch.select( + &wires.handler_state.handler_stack_node_process, + &wires.id_curr, + )?; + let next_id_prev = OptionalFpVar::select_encoded( + switch, + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.id_prev, + )?; + + wires.id_curr = next_id_curr; + wires.id_prev = next_id_prev; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_burn(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.burn; + + // --- + // Ckecks from the mocked verifier + // --- + + // 1. Current process must be a UTXO. + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 2. Must be initialized. + wires + .curr_read_wires + .initialized + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 3. This UTXO must be marked for burning. + wires + .must_burn_curr + .is_one()? + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 4. Parent must exist. + wires + .id_prev_is_some()? + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 5. Claim check: burned value `ret` must match parent's `expected_input` (if set). + // Parent's state is in `target_read_wires`. + wires + .target_read_wires + .expected_input + .conditional_enforce_eq_if_some(switch, &wires.arg(ArgName::Ret))?; + + // --- + // IVC state updates + // --- + // Like yield, current program becomes the parent, and new prev is the one that burned. + let prev_value = wires.id_prev_value()?; + let next_id_curr = switch.select(&prev_value, &wires.id_curr)?; + let next_id_prev = OptionalFpVar::select_encoded( + switch, + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.id_prev, + )?; + wires.id_curr = next_id_curr; + wires.id_prev = next_id_prev; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_yield(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.yield_op; + let yield_to = wires.curr_read_wires.yield_to.clone(); + let yield_to_is_some = yield_to.is_some()?; + + // 1. Yield is only valid for UTXOs. + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 2. Must have a target to yield to. + yield_to_is_some.conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 3. Claim check: yielded value `val` must match parent's `expected_input`. + // The parent's state is in `target_read_wires` because we set `target = yield_to`. + wires + .target_read_wires + .expected_input + .conditional_enforce_eq_if_some( + &(switch & &yield_to_is_some), + &wires.arg(ArgName::Val), + )?; + + // 4. Resumer check: parent must expect the current process (if set). + wires + .target_read_wires + .expected_resumer + .conditional_enforce_eq_if_some(&(switch & &yield_to_is_some), &wires.id_curr)?; + + // Target state should be preserved on yield. + // TODO: make the switches more narrow in scope (only read, only write, + // or read write) + wires + .target_write_wires + .expected_input + .encoded() + .conditional_enforce_equal(&wires.target_read_wires.expected_input.encoded(), switch)?; + + wires + .target_write_wires + .expected_resumer + .encoded() + .conditional_enforce_equal( + &wires.target_read_wires.expected_resumer.encoded(), + switch, + )?; + + // --- + // State update enforcement + // --- + wires + .curr_write_wires + .finalized + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // Mark the current process as in-yield and preserve yield_to. + // + // The next 2 checks form a pair. + wires + .curr_write_wires + .on_yield + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + wires + .curr_write_wires + .yield_to + .encoded() + .conditional_enforce_equal(&wires.curr_read_wires.yield_to.encoded(), switch)?; + + // --- + // IVC state updates + // --- + // On yield, the current program becomes the parent (yield_to), + // and the new prev program is the one that just yielded. + let yield_to_value = yield_to.decode_or_zero()?; + let next_id_curr = (switch & &yield_to_is_some).select(&yield_to_value, &wires.id_curr)?; + let next_id_prev = OptionalFpVar::select_encoded( + &(switch & yield_to_is_some), + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.id_prev, + )?; + wires.id_curr = next_id_curr; + wires.id_prev = next_id_prev; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_return(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.return_op; + let yield_to = wires.curr_read_wires.yield_to.clone(); + let has_parent = yield_to.is_some()?; + + // Coordination scripts only. + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::FALSE, switch)?; + + wires + .curr_write_wires + .finalized + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + wires + .curr_write_wires + .activation + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + // If we have a parent, transfer control back like Yield. + let has_parent_and_switch = switch & &has_parent; + let parent = yield_to.decode_or_zero()?; + let next_id_curr = has_parent_and_switch.select(&parent, &wires.id_curr)?; + let next_id_prev = OptionalFpVar::select_encoded( + &has_parent_and_switch, + &OptionalFpVar::from_pid(&wires.id_curr), + &wires.id_prev, + )?; + wires.id_curr = next_id_curr; + wires.id_prev = next_id_prev; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip(self, wires))] + fn visit_new_process(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.new_utxo | &wires.switches.new_coord; + + // The target is the new process being created. + // The current process is the coordination script doing the creation. + // + // 1. Coordinator check: current process must NOT be a UTXO. + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::FALSE, &switch)?; + + // 2. Target type check + let target_is_utxo = wires.is_utxo_target.is_one()?; + // if new_utxo_switch is true, target_is_utxo must be true. + // if new_utxo_switch is false (i.e. new_coord_switch is true), target_is_utxo must be false. + target_is_utxo.conditional_enforce_equal(&wires.switches.new_utxo, &switch)?; + + // 3. Program hash check + let program_hash_args = [ + ArgName::ProgramHash0, + ArgName::ProgramHash1, + ArgName::ProgramHash2, + ArgName::ProgramHash3, + ]; + for (i, arg) in program_hash_args.iter().enumerate() { + wires.rom_program_hash[i].conditional_enforce_equal(&wires.arg(*arg), &switch)?; + } + + // 4. Target must not be initialized. + wires + .target_read_wires + .initialized + .conditional_enforce_equal(&wires.constant_false, &switch)?; + + wires + .target_write_wires + .initialized + .conditional_enforce_equal(&wires.constant_true, &switch)?; + + wires.target_write_wires.init = + switch.select(&wires.arg(ArgName::Val), &wires.target_read_wires.init)?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_activation(&self, wires: Wires) -> Result { + let switch = &wires.switches.activation; + + check_activation( + switch, + &wires.curr_read_wires.activation, + &wires.arg(ArgName::Val), + &wires.id_prev_value()?, + &wires.arg(ArgName::Caller), + )?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_init(&self, wires: Wires) -> Result { + let switch = &wires.switches.init; + + check_init( + switch, + &wires.curr_read_wires.init, + &wires.arg(ArgName::Val), + &wires.curr_read_wires.init_caller, + &wires.arg(ArgName::Caller), + )?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_bind(&self, wires: Wires) -> Result { + let switch = &wires.switches.bind; + + // curr is the token (or the utxo bound to the target) + let is_utxo_curr = wires.is_utxo_curr.is_one()?; + let is_utxo_target = wires.is_utxo_target.is_one()?; + + // we don't need to check if the token is initialized because + // we can't resume an unitialized token anyway + let is_initialized_target = &wires.target_read_wires.initialized; + + (is_utxo_curr & is_utxo_target & is_initialized_target) + .conditional_enforce_equal(&wires.constant_true, switch)?; + + wires + .curr_read_wires + .ownership + .is_some()? + .conditional_enforce_equal(&wires.constant_false, switch)?; + + let owner_id_encoded = &wires.arg(ArgName::OwnerId) + FpVar::one(); + + wires + .curr_write_wires + .ownership + .encoded() + .conditional_enforce_equal(&owner_id_encoded, switch)?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_unbind(&self, wires: Wires) -> Result { + let switch = &wires.switches.unbind; + + let is_utxo_curr = wires.is_utxo_curr.is_one()?; + let is_utxo_target = wires.is_utxo_target.is_one()?; + + (is_utxo_curr & is_utxo_target).conditional_enforce_equal(&wires.constant_true, switch)?; + + // only the owner can unbind + let id_curr_encoded = &wires.id_curr + FpVar::one(); + wires + .target_read_wires + .ownership + .encoded() + .conditional_enforce_equal(&id_curr_encoded, switch)?; + + wires + .target_write_wires + .ownership + .encoded() + .conditional_enforce_equal(&FpVar::zero(), switch)?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_new_ref(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.new_ref; + + // 1. Must not be building + wires + .ref_building_remaining + .is_zero()? + .conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // 2. Ret must be fresh ID + wires + .arg(ArgName::Ret) + .conditional_enforce_equal(&wires.ref_arena_stack_ptr, switch)?; + + // 3. Init building state + // remaining = size + wires.ref_building_remaining = + switch.select(&wires.arg(ArgName::Size), &wires.ref_building_remaining)?; + // ptr = ret + wires.ref_building_ptr = + switch.select(&wires.arg(ArgName::Ret), &wires.ref_building_ptr)?; + + // 4. Increment stack ptr by size + let size = wires.arg(ArgName::Size); + wires.ref_arena_stack_ptr = switch.select( + &(&wires.ref_arena_stack_ptr + + size * FpVar::Constant(F::from(REF_PUSH_BATCH_SIZE as u64))), + &wires.ref_arena_stack_ptr, + )?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_ref_push(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.ref_push; + + let is_building = wires.ref_building_remaining.is_zero()?.not(); + is_building.conditional_enforce_equal(&Boolean::TRUE, switch)?; + + // Update state + // remaining -= 1 word + let next_remaining = &wires.ref_building_remaining - FpVar::one(); + wires.ref_building_remaining = + switch.select(&next_remaining, &wires.ref_building_remaining)?; + + // ptr += 4 elems + let inc = FpVar::one() + FpVar::one() + FpVar::one() + FpVar::one(); + let next_ptr = &wires.ref_building_ptr + inc; + wires.ref_building_ptr = switch.select(&next_ptr, &wires.ref_building_ptr)?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_ref_get(&self, wires: Wires) -> Result { + let switch = &wires.switches.get; + + let expected = [ + wires.opcode_args[ArgName::PackedRef0.idx()].clone(), + wires.opcode_args[ArgName::PackedRef2.idx()].clone(), + wires.opcode_args[ArgName::PackedRef4.idx()].clone(), + wires.opcode_args[ArgName::PackedRef5.idx()].clone(), + ]; + + for (expected_val, read_val) in expected.iter().zip(wires.ref_arena_read.iter()) { + expected_val.conditional_enforce_equal(read_val, switch)?; + } + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_ref_write(&self, wires: Wires) -> Result { + // Plumbing only: circuit semantics for in-place ref writes are not enforced yet. + let _switch = &wires.switches.ref_write; + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_program_hash(&self, wires: Wires) -> Result { + let switch = &wires.switches.program_hash; + + let program_hash_args = [ + ArgName::ProgramHash0, + ArgName::ProgramHash1, + ArgName::ProgramHash2, + ArgName::ProgramHash3, + ]; + for (i, arg) in program_hash_args.iter().enumerate() { + wires + .arg(*arg) + .conditional_enforce_equal(&wires.rom_program_hash[i], switch)?; + } + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_install_handler(&self, mut wires: Wires) -> Result { + let switch = &wires.switches.install_handler; + + // Only coordination scripts can install handlers + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::FALSE, switch)?; + + // Verify that Interfaces[interface_index] == interface_id + // This ensures the interface index witness is correct + let interface_args = [ + ArgName::InterfaceId0, + ArgName::InterfaceId1, + ArgName::InterfaceId2, + ArgName::InterfaceId3, + ]; + for (i, arg) in interface_args.iter().enumerate() { + wires.handler_state.interface_rom_read[i] + .conditional_enforce_equal(&wires.arg(*arg), switch)?; + } + + // Update handler stack counter (allocate new node) + wires.handler_stack_ptr = switch.select( + &(&wires.handler_stack_ptr + FpVar::one()), + &wires.handler_stack_ptr, + )?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_uninstall_handler(&self, wires: Wires) -> Result { + let switch = &wires.switches.uninstall_handler; + + // Only coordination scripts can uninstall handlers + wires + .is_utxo_curr + .is_one()? + .conditional_enforce_equal(&Boolean::FALSE, switch)?; + + // Verify that Interfaces[interface_index] == interface_id + // This ensures the interface index witness is correct + let interface_args = [ + ArgName::InterfaceId0, + ArgName::InterfaceId1, + ArgName::InterfaceId2, + ArgName::InterfaceId3, + ]; + for (i, arg) in interface_args.iter().enumerate() { + wires.handler_state.interface_rom_read[i] + .conditional_enforce_equal(&wires.arg(*arg), switch)?; + } + + // Read the node at current head: should contain (process_id, next_ptr) + let node_process = &wires.handler_state.handler_stack_node_process; + + // Verify the process_id in the node matches the current process (only installer can uninstall) + wires + .id_curr + .conditional_enforce_equal(node_process, switch)?; + + Ok(wires) + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn visit_get_handler_for(&self, wires: Wires) -> Result { + let switch = &wires.switches.get_handler_for; + + // Verify that Interfaces[interface_index] == interface_id + // This ensures the interface index witness is correct + let interface_args = [ + ArgName::InterfaceId0, + ArgName::InterfaceId1, + ArgName::InterfaceId2, + ArgName::InterfaceId3, + ]; + for (i, arg) in interface_args.iter().enumerate() { + wires.handler_state.interface_rom_read[i] + .conditional_enforce_equal(&wires.arg(*arg), switch)?; + } + + // Read the node at current head: should contain (process_id, next_ptr) + let node_process = &wires.handler_state.handler_stack_node_process; + + // The process_id in the node IS the handler_id we want to return + wires + .arg(ArgName::Ret) + .conditional_enforce_equal(node_process, switch)?; + + Ok(wires) + } +} + +fn register_memory_segments>(mb: &mut M) { + mb.register_mem( + MemoryTag::ProcessTable.into(), + 1, + MemType::Rom, + "ROM_PROCESS_TABLE", + ); + mb.register_mem(MemoryTag::MustBurn.into(), 1, MemType::Rom, "ROM_MUST_BURN"); + mb.register_mem(MemoryTag::IsUtxo.into(), 1, MemType::Rom, "ROM_IS_UTXO"); + mb.register_mem( + MemoryTag::Interfaces.into(), + 1, + MemType::Rom, + "ROM_INTERFACES", + ); + mb.register_mem(MemoryTag::RefArena.into(), 1, MemType::Ram, "RAM_REF_ARENA"); + mb.register_mem(MemoryTag::RefSizes.into(), 1, MemType::Ram, "RAM_REF_SIZES"); + mb.register_mem( + MemoryTag::ExpectedInput.into(), + 1, + MemType::Ram, + "RAM_EXPECTED_INPUT", + ); + mb.register_mem( + MemoryTag::ExpectedResumer.into(), + 1, + MemType::Ram, + "RAM_EXPECTED_RESUMER", + ); + mb.register_mem(MemoryTag::OnYield.into(), 1, MemType::Ram, "RAM_ON_YIELD"); + mb.register_mem(MemoryTag::YieldTo.into(), 1, MemType::Ram, "RAM_YIELD_TO"); + mb.register_mem( + MemoryTag::Activation.into(), + 1, + MemType::Ram, + "RAM_ACTIVATION", + ); + mb.register_mem(MemoryTag::Init.into(), 1, MemType::Ram, "RAM_INIT"); + mb.register_mem( + MemoryTag::InitCaller.into(), + 1, + MemType::Ram, + "RAM_INIT_CALLER", + ); + mb.register_mem( + MemoryTag::Initialized.into(), + 1, + MemType::Ram, + "RAM_INITIALIZED", + ); + mb.register_mem( + MemoryTag::Finalized.into(), + 1, + MemType::Ram, + "RAM_FINALIZED", + ); + mb.register_mem(MemoryTag::DidBurn.into(), 1, MemType::Ram, "RAM_DID_BURN"); + mb.register_mem( + MemoryTag::Ownership.into(), + 1, + MemType::Ram, + "RAM_OWNERSHIP", + ); + mb.register_mem( + MemoryTag::HandlerStackArenaProcess.into(), + 1, + MemType::Ram, + "RAM_HANDLER_STACK_ARENA_PROCESS", + ); + mb.register_mem( + MemoryTag::HandlerStackArenaNextPtr.into(), + 1, + MemType::Ram, + "RAM_HANDLER_STACK_ARENA_NEXT_PTR", + ); + mb.register_mem( + MemoryTag::HandlerStackHeads.into(), + 1, + MemType::Ram, + "RAM_HANDLER_STACK_HEADS", + ); + mb.register_mem( + MemoryTag::TraceCommitments.into(), + 1, + MemType::Ram, + "RAM_TRACE_COMMITMENTS", + ); +} + +#[tracing::instrument(target = "gr1cs", skip_all)] +fn ivc_wires( + cs: &ConstraintSystemRef, + wires_in: &Wires, + wires_out: &Wires, +) -> Result { + let input = IvcWireIndices { + id_curr: fpvar_witness_index(cs, &wires_in.id_curr)?, + id_prev: fpvar_witness_index(cs, &wires_in.id_prev.encoded())?, + ref_arena_stack_ptr: fpvar_witness_index(cs, &wires_in.ref_arena_stack_ptr)?, + handler_stack_ptr: fpvar_witness_index(cs, &wires_in.handler_stack_ptr)?, + ref_building_remaining: fpvar_witness_index(cs, &wires_in.ref_building_remaining)?, + ref_building_ptr: fpvar_witness_index(cs, &wires_in.ref_building_ptr)?, + }; + + let output = IvcWireIndices { + id_curr: fpvar_witness_index(cs, &wires_out.id_curr)?, + id_prev: fpvar_witness_index(cs, &wires_out.id_prev.encoded())?, + ref_arena_stack_ptr: fpvar_witness_index(cs, &wires_out.ref_arena_stack_ptr)?, + handler_stack_ptr: fpvar_witness_index(cs, &wires_out.handler_stack_ptr)?, + ref_building_remaining: fpvar_witness_index(cs, &wires_out.ref_building_remaining)?, + ref_building_ptr: fpvar_witness_index(cs, &wires_out.ref_building_ptr)?, + }; + + Ok(IvcWireLayout { input, output }) +} + +fn fpvar_witness_index( + cs: &ConstraintSystemRef, + var: &FpVar, +) -> Result { + let witness_offset = cs.num_instance_variables(); + match var { + FpVar::Var(alloc) => { + let full_index = alloc + .variable + .get_variable_index(witness_offset) + .ok_or(SynthesisError::AssignmentMissing)?; + if alloc.variable.is_instance() { + return Err(SynthesisError::AssignmentMissing); + } + Ok(full_index - witness_offset) + } + FpVar::Constant(_) => Err(SynthesisError::AssignmentMissing), + } +} + +impl PreWires { + #[allow(clippy::too_many_arguments)] + pub fn new( + irw: InterRoundWires, + curr_mem_switches: MemSwitchboardBool, + target_mem_switches: MemSwitchboardBool, + rom_switches: RomSwitchboard, + handler_switches: HandlerSwitchboard, + ref_arena_switches: RefArenaSwitchboard, + interface_index: F, + opcode_discriminant: F, + ) -> Self { + Self { + switches: ExecutionSwitches::default(), + irw, + interface_index, + opcode_args: [F::ZERO; OPCODE_ARG_COUNT], + opcode_discriminant, + curr_mem_switches, + target_mem_switches, + rom_switches, + handler_switches, + ref_arena_switches, + } + } + + pub fn arg(&self, kind: ArgName) -> F { + self.opcode_args[kind.idx()] + } + + pub fn debug_print(&self) { + let _guard = debug_span!("witness assignments").entered(); + + tracing::debug!("target={}", self.arg(ArgName::Target)); + tracing::debug!("val={}", self.arg(ArgName::Val)); + tracing::debug!("ret={}", self.arg(ArgName::Ret)); + tracing::debug!("id_prev={}", self.irw.id_prev.encoded()); + } +} + +fn trace_ic>(curr_pid: usize, mb: &mut M, config: &OpcodeConfig) { + if config.execution_switches.nop { + return; + } + + let mut concat_data = [F::ZERO; 12]; + + for (i, slot) in concat_data.iter_mut().take(4).enumerate() { + let addr = (curr_pid * 4) + i; + + *slot = mb.conditional_read( + true, + Address { + tag: MemoryTag::TraceCommitments.into(), + addr: addr as u64, + }, + )[0]; + } + + concat_data[4] = config.opcode_discriminant; + concat_data[5..(OPCODE_ARG_COUNT + 5)].copy_from_slice(&config.opcode_args[..]); + + let new_commitment = ark_poseidon2::compress_12_trace(&concat_data).unwrap(); + + for (i, elem) in new_commitment.iter().enumerate() { + let addr = (curr_pid * 4) + i; + + mb.conditional_write( + true, + Address { + addr: addr as u64, + tag: MemoryTag::TraceCommitments.into(), + }, + vec![*elem], + ); + } +} + +fn trace_ic_wires>( + id_curr: FpVar, + rm: &mut M, + cs: &ConstraintSystemRef, + should_trace: &Boolean, + opcode_discriminant: &FpVar, + opcode_args: &[FpVar; OPCODE_ARG_COUNT], +) -> Result<(), SynthesisError> { + let mut current_commitment = vec![]; + + let mut addresses = vec![]; + + for i in 0..4 { + let offset = FpVar::new_constant(cs.clone(), F::from(i as u64))?; + let addr = &(id_curr.clone() * FpVar::new_constant(cs.clone(), F::from(4))?) + &offset; + let address = Address { + tag: MemoryTag::TraceCommitments.allocate(cs.clone())?, + addr, + }; + + addresses.push(address.clone()); + + let rv = rm.conditional_read(should_trace, &address)?[0].clone(); + current_commitment.push(rv); + } + + let compress_input = [ + current_commitment[0].clone(), + current_commitment[1].clone(), + current_commitment[2].clone(), + current_commitment[3].clone(), + opcode_discriminant.clone(), + opcode_args[0].clone(), + opcode_args[1].clone(), + opcode_args[2].clone(), + opcode_args[3].clone(), + opcode_args[4].clone(), + opcode_args[5].clone(), + opcode_args[6].clone(), + ]; + + let new_commitment = ark_poseidon2::compress_12(&compress_input)?; + + for i in 0..4 { + rm.conditional_write(should_trace, &addresses[i], &[new_commitment[i].clone()])?; + } + + Ok(()) +} diff --git a/interleaving/starstream-interleaving-proof/src/circuit_test.rs b/interleaving/starstream-interleaving-proof/src/circuit_test.rs new file mode 100644 index 00000000..445ef8a2 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/circuit_test.rs @@ -0,0 +1,648 @@ +use crate::{logging::setup_logger, prove}; +use starstream_interleaving_spec::{ + Hash, InterleavingInstance, InterleavingWitness, LedgerEffectsCommitment, ProcessId, Ref, + Value, WitEffectOutput, WitLedgerEffect, +}; + +pub fn h(n: u8) -> Hash { + Hash([n as u64, n as u64, 0, 0], std::marker::PhantomData) +} + +pub fn v(data: &[u8]) -> Value { + let mut bytes = [0u8; 8]; + let len = data.len().min(8); + bytes[..len].copy_from_slice(&data[..len]); + Value(u64::from_le_bytes(bytes)) +} + +fn v4_from_value(val: Value) -> [Value; 4] { + let mut out = [Value::nil(); 4]; + out[0] = val; + out +} + +fn ref_push1(val: Value) -> WitLedgerEffect { + WitLedgerEffect::RefPush { + vals: [val, Value::nil(), Value::nil(), Value::nil()], + } +} + +fn host_calls_roots(traces: &[Vec]) -> Vec { + traces + .iter() + .map(|trace| { + trace + .iter() + .cloned() + .fold(LedgerEffectsCommitment::iv(), |acc, op| { + crate::commit(acc, op) + }) + }) + .collect() +} + +#[test] +fn test_circuit_many_steps() { + setup_logger(); + + let utxo_id = 0; + let token_id = 1; + let coord_id = 2; + + let p0 = ProcessId(utxo_id); + let p1 = ProcessId(token_id); + let p2 = ProcessId(coord_id); + + let val_0 = v(&[0]); + let val_1 = v(&[1]); + let val_4 = v(&[4]); + + let ref_0 = Ref(0); + let ref_1 = Ref(4); + let ref_4 = Ref(8); + + let utxo_trace = vec![ + WitLedgerEffect::Init { + val: ref_4.into(), + caller: p2.into(), + }, + WitLedgerEffect::RefGet { + reff: ref_4, + offset: 0, + ret: v4_from_value(val_4).into(), + }, + WitLedgerEffect::Activation { + val: ref_0.into(), + caller: p2.into(), + }, + WitLedgerEffect::GetHandlerFor { + interface_id: h(100), + handler_id: p2.into(), + }, + WitLedgerEffect::Yield { + val: ref_1, // Yielding nothing + }, + ]; + + let token_trace = vec![ + WitLedgerEffect::Init { + val: ref_1.into(), + caller: p2.into(), + }, + WitLedgerEffect::RefGet { + reff: ref_1, + offset: 0, + ret: v4_from_value(val_1).into(), + }, + WitLedgerEffect::Activation { + val: ref_0.into(), + caller: p2.into(), + }, + WitLedgerEffect::Bind { owner_id: p0 }, + WitLedgerEffect::Yield { + val: ref_1, // Yielding nothing + }, + ]; + + let coord_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + ref_push1(val_0), + WitLedgerEffect::NewRef { + size: 1, + ret: ref_1.into(), + }, + ref_push1(val_1), + WitLedgerEffect::NewRef { + size: 1, + ret: ref_4.into(), + }, + ref_push1(val_4), + WitLedgerEffect::NewUtxo { + program_hash: h(0), + val: ref_4, + id: p0.into(), + }, + WitLedgerEffect::NewUtxo { + program_hash: h(1), + val: ref_1, + id: p1.into(), + }, + WitLedgerEffect::Resume { + target: p1, + val: ref_0, + ret: ref_1.into(), + caller: WitEffectOutput::Resolved(None), + }, + WitLedgerEffect::InstallHandler { + interface_id: h(100), + }, + WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_1.into(), + caller: Some(p0).into(), + }, + WitLedgerEffect::UninstallHandler { + interface_id: h(100), + }, + ]; + + let traces = vec![utxo_trace, token_trace, coord_trace]; + + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 2, + n_coords: 8, + entrypoint: p2, + process_table: vec![h(0), h(1), h(2)], + is_utxo: vec![true, true, false], + must_burn: vec![false, false, false], + ownership_in: vec![None, None, None], + ownership_out: vec![None, Some(ProcessId(0)), None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + + let result = prove(instance, wit); + assert!(result.is_ok()); +} + +#[test] +fn test_circuit_small() { + setup_logger(); + + let utxo_id = 0; + let coord_id = 1; + + let p0 = ProcessId(utxo_id); + let p1 = ProcessId(coord_id); + + let val_0 = v(&[0]); + + let ref_0 = Ref(0); + + let utxo_trace = vec![WitLedgerEffect::Yield { + val: ref_0, // Yielding nothing + }]; + + let coord_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + ref_push1(val_0), + WitLedgerEffect::NewUtxo { + program_hash: h(0), + val: ref_0, + id: p0.into(), + }, + WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + ]; + + let traces = vec![utxo_trace, coord_trace]; + + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 1, + n_coords: 1, + entrypoint: p1, + process_table: vec![h(0), h(1)], + is_utxo: vec![true, false], + must_burn: vec![false, false], + ownership_in: vec![None, None], + ownership_out: vec![None, None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + + let result = prove(instance, wit); + assert!(result.is_ok()); +} + +#[test] +#[should_panic] +fn test_circuit_resumer_mismatch() { + setup_logger(); + + let utxo_id = 0; + let coord_a_id = 1; + let coord_b_id = 2; + + let p0 = ProcessId(utxo_id); + let p1 = ProcessId(coord_a_id); + let p2 = ProcessId(coord_b_id); + + let val_0 = v(&[0]); + + let ref_0 = Ref(0); + + let utxo_trace = vec![WitLedgerEffect::Yield { val: ref_0 }]; + + let coord_a_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + ref_push1(val_0), + WitLedgerEffect::NewUtxo { + program_hash: h(0), + val: ref_0, + id: p0.into(), + }, + WitLedgerEffect::NewCoord { + program_hash: h(2), + val: ref_0, + id: p2.into(), + }, + WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + WitLedgerEffect::Resume { + target: p2, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + ]; + + let coord_b_trace = vec![WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }]; + + let traces = vec![utxo_trace, coord_a_trace, coord_b_trace]; + + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 1, + n_coords: 2, + entrypoint: p1, + process_table: vec![h(0), h(1), h(2)], + is_utxo: vec![true, false, false], + must_burn: vec![false, false, false], + ownership_in: vec![None, None, None], + ownership_out: vec![None, None, None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + + let result = prove(instance, wit); + assert!(result.is_err()); +} + +#[test] +fn test_ref_write_basic_sat() { + setup_logger(); + + let coord_id = 0; + let p0 = ProcessId(coord_id); + let ref_0 = Ref(0); + + let initial = Value(41); + let updated = Value(99); + + let initial_get = [initial, Value::nil(), Value::nil(), Value::nil()]; + let updated_get = [updated, Value::nil(), Value::nil(), Value::nil()]; + + let coord_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + WitLedgerEffect::RefPush { + vals: [initial, Value::nil(), Value::nil(), Value::nil()], + }, + WitLedgerEffect::RefGet { + ret: initial_get.into(), + reff: ref_0, + offset: 0, + }, + WitLedgerEffect::RefWrite { + reff: ref_0, + offset: 0, + vals: [updated, Value::nil(), Value::nil(), Value::nil()], + }, + WitLedgerEffect::RefGet { + ret: updated_get.into(), + reff: ref_0, + offset: 0, + }, + ]; + + let traces = vec![coord_trace]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 0, + n_coords: 1, + entrypoint: p0, + process_table: vec![h(0)], + is_utxo: vec![false], + must_burn: vec![false], + ownership_in: vec![None], + ownership_out: vec![None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + let result = prove(instance, wit); + assert!(result.is_ok()); +} + +#[test] +fn test_install_handler_get_sat() { + test_install_handler_get(ProcessId(0)); +} + +#[test] +#[should_panic] +fn test_install_handler_get_unsat() { + test_install_handler_get(ProcessId(1)); +} + +fn test_install_handler_get(exp: ProcessId) { + setup_logger(); + + let coord_id = 0; + let p0 = ProcessId(coord_id); + + let coord_trace = vec![ + WitLedgerEffect::InstallHandler { + interface_id: h(100), + }, + WitLedgerEffect::InstallHandler { + interface_id: h(105), + }, + WitLedgerEffect::GetHandlerFor { + interface_id: h(100), + handler_id: exp.into(), + }, + ]; + + let traces = vec![coord_trace]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 0, + n_coords: 1, + entrypoint: p0, + process_table: vec![h(0)], + is_utxo: vec![false], + must_burn: vec![false], + ownership_in: vec![None], + ownership_out: vec![None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + let result = prove(instance, wit); + assert!(result.is_ok()); +} + +#[test] +#[should_panic] +fn test_yield_parent_resumer_mismatch_trace() { + setup_logger(); + + let utxo_id = 0; + let coord_a_id = 1; + let coord_b_id = 2; + + let p0 = ProcessId(utxo_id); + let p1 = ProcessId(coord_a_id); + let p2 = ProcessId(coord_b_id); + + let ref_0 = Ref(0); + let ref_1 = Ref(4); + + // Coord A resumes UTXO but sets its own expected_resumer to Coord B. + // Then UTXO yields back to Coord A. Spec says this should fail. + let utxo_trace = vec![WitLedgerEffect::Yield { val: ref_1 }]; + + let coord_a_trace = vec![WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_1.into(), + caller: Some(p2).into(), + }]; + + let coord_b_trace = vec![]; + + let traces = vec![utxo_trace, coord_a_trace, coord_b_trace]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 1, + n_new: 0, + n_coords: 2, + entrypoint: p1, + process_table: vec![h(0), h(1), h(2)], + is_utxo: vec![true, false, false], + must_burn: vec![false, false, false], + ownership_in: vec![None, None, None], + ownership_out: vec![None, None, None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + + let _result = prove(instance, wit); +} + +#[test] +#[should_panic] +fn test_call_effect_handler_resumer_mismatch_trace() { + setup_logger(); + + let utxo_id = 0; + let coord_top_id = 1; + let coord_handler_id = 2; + + let p0 = ProcessId(utxo_id); + let p1 = ProcessId(coord_top_id); + let p2 = ProcessId(coord_handler_id); + + let ref_0 = Ref(0); + let val_0 = v(&[7]); + let iface = h(100); + + let utxo_trace = vec![ + WitLedgerEffect::Init { + val: ref_0.into(), + caller: p1.into(), + }, + WitLedgerEffect::CallEffectHandler { + interface_id: iface, + val: ref_0, + ret: ref_0.into(), + }, + WitLedgerEffect::Yield { val: ref_0 }, + ]; + + let coord_top_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + ref_push1(val_0), + WitLedgerEffect::NewUtxo { + program_hash: h(0), + val: ref_0, + id: p0.into(), + }, + WitLedgerEffect::NewCoord { + program_hash: h(2), + val: ref_0, + id: p2.into(), + }, + WitLedgerEffect::Resume { + target: p2, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + // Invalid on purpose: after p0 CallEffectHandler, only p2 should resume p0. + WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + ]; + + let coord_handler_trace = vec![ + WitLedgerEffect::Init { + val: ref_0.into(), + caller: p1.into(), + }, + WitLedgerEffect::InstallHandler { + interface_id: iface, + }, + WitLedgerEffect::Resume { + target: p0, + val: ref_0, + ret: ref_0.into(), + caller: WitEffectOutput::Resolved(None), + }, + WitLedgerEffect::Return {}, + ]; + + let traces = vec![utxo_trace, coord_top_trace, coord_handler_trace]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 1, + n_coords: 2, + entrypoint: p1, + process_table: vec![h(0), h(1), h(2)], + is_utxo: vec![true, false, false], + must_burn: vec![false, false, false], + ownership_in: vec![None, None, None], + ownership_out: vec![None, None, None], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + let _result = prove(instance, wit); +} + +#[test] +fn test_entrypoint_return_sat() { + setup_logger(); + + let p0 = ProcessId(0); + let ref_0 = Ref(0); + let val_0 = v(&[7]); + + let entry_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: ref_0.into(), + }, + ref_push1(val_0), + WitLedgerEffect::Return {}, + ]; + + let traces = vec![entry_trace]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 0, + n_coords: 1, + entrypoint: p0, + process_table: vec![h(0)], + is_utxo: vec![false], + must_burn: vec![], + ownership_in: vec![], + ownership_out: vec![], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + let result = prove(instance, wit); + assert!(result.is_ok()); +} + +#[test] +#[should_panic] +fn test_non_entrypoint_return_without_parent_panics() { + setup_logger(); + + let p0 = ProcessId(0); + + // p1 has a Return but is never reached from entrypoint p0, so this is an + // invalid interleaving shape and should fail. + let traces = vec![vec![], vec![WitLedgerEffect::Return {}]]; + let host_calls_roots = host_calls_roots(&traces); + + let instance = InterleavingInstance { + n_inputs: 0, + n_new: 0, + n_coords: 2, + entrypoint: p0, + process_table: vec![h(0), h(1)], + is_utxo: vec![false, false], + must_burn: vec![], + ownership_in: vec![], + ownership_out: vec![], + host_calls_roots, + input_states: vec![], + }; + + let wit = InterleavingWitness { traces }; + let _ = prove(instance, wit); +} diff --git a/interleaving/starstream-interleaving-proof/src/coroutine_args_gadget.rs b/interleaving/starstream-interleaving-proof/src/coroutine_args_gadget.rs new file mode 100644 index 00000000..63e11a9f --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/coroutine_args_gadget.rs @@ -0,0 +1,40 @@ +use crate::F; +use crate::circuit::MemoryTag; +use crate::opcode_dsl::OpcodeDsl; +use ark_r1cs_std::{eq::EqGadget as _, fields::fp::FpVar, prelude::Boolean}; +use ark_relations::gr1cs::SynthesisError; + +pub fn coroutine_args_ops( + dsl: &mut D, + activation_cond: &D::Bool, + init_cond: &D::Bool, + addr: &D::Val, +) -> Result<(D::Val, D::Val), D::Error> { + let activation = dsl.read(activation_cond, MemoryTag::Activation, addr)?; + let init = dsl.read(init_cond, MemoryTag::Init, addr)?; + Ok((activation, init)) +} + +pub fn check_activation( + switch: &Boolean, + activation: &FpVar, + arg_val: &FpVar, + expected_caller: &FpVar, + arg_caller: &FpVar, +) -> Result<(), SynthesisError> { + activation.conditional_enforce_equal(arg_val, switch)?; + expected_caller.conditional_enforce_equal(arg_caller, switch)?; + Ok(()) +} + +pub fn check_init( + switch: &Boolean, + init: &FpVar, + arg_val: &FpVar, + init_caller: &FpVar, + arg_caller: &FpVar, +) -> Result<(), SynthesisError> { + init.conditional_enforce_equal(arg_val, switch)?; + init_caller.conditional_enforce_equal(arg_caller, switch)?; + Ok(()) +} diff --git a/interleaving/starstream-interleaving-proof/src/execution_switches.rs b/interleaving/starstream-interleaving-proof/src/execution_switches.rs new file mode 100644 index 00000000..b83221df --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/execution_switches.rs @@ -0,0 +1,334 @@ +use ark_r1cs_std::{ + alloc::AllocVar as _, + eq::EqGadget as _, + fields::{FieldVar as _, fp::FpVar}, + prelude::Boolean, +}; +use ark_relations::gr1cs::{ConstraintSystemRef, LinearCombination, SynthesisError, Variable}; +use starstream_interleaving_spec::EffectDiscriminant; + +use crate::F; + +#[derive(Clone)] +pub(crate) struct ExecutionSwitches { + pub(crate) resume: T, + pub(crate) call_effect_handler: T, + pub(crate) yield_op: T, + pub(crate) return_op: T, + pub(crate) burn: T, + pub(crate) program_hash: T, + pub(crate) new_utxo: T, + pub(crate) new_coord: T, + pub(crate) activation: T, + pub(crate) init: T, + pub(crate) bind: T, + pub(crate) unbind: T, + pub(crate) new_ref: T, + pub(crate) ref_push: T, + pub(crate) get: T, + pub(crate) ref_write: T, + pub(crate) install_handler: T, + pub(crate) uninstall_handler: T, + pub(crate) get_handler_for: T, + pub(crate) nop: T, +} + +impl ExecutionSwitches { + /// Allocates circuit variables for the switches and enforces exactly one is true + pub(crate) fn allocate_and_constrain( + &self, + cs: ConstraintSystemRef, + opcode_discriminant: &FpVar, + ) -> Result>, SynthesisError> { + let switches = [ + self.resume, + self.call_effect_handler, + self.yield_op, + self.return_op, + self.nop, + self.burn, + self.program_hash, + self.new_utxo, + self.new_coord, + self.activation, + self.init, + self.bind, + self.unbind, + self.new_ref, + self.ref_push, + self.get, + self.ref_write, + self.install_handler, + self.uninstall_handler, + self.get_handler_for, + ]; + + let allocated_switches: Vec<_> = switches + .iter() + .map(|val| Boolean::new_witness(cs.clone(), || Ok(*val)).unwrap()) + .collect(); + + // Enforce exactly one switch is true + cs.enforce_r1cs_constraint( + || { + allocated_switches + .iter() + .fold(LinearCombination::new(), |acc, switch| acc + switch.lc()) + .clone() + }, + || LinearCombination::new() + Variable::one(), + || LinearCombination::new() + Variable::one(), + ) + .unwrap(); + + let [ + resume, + call_effect_handler, + yield_op, + return_op, + nop, + burn, + program_hash, + new_utxo, + new_coord, + activation, + init, + bind, + unbind, + new_ref, + ref_push, + get, + ref_write, + install_handler, + uninstall_handler, + get_handler_for, + ] = allocated_switches.as_slice() + else { + unreachable!() + }; + + let terms = [ + (resume, EffectDiscriminant::Resume as u64), + ( + call_effect_handler, + EffectDiscriminant::CallEffectHandler as u64, + ), + (yield_op, EffectDiscriminant::Yield as u64), + (return_op, EffectDiscriminant::Return as u64), + (burn, EffectDiscriminant::Burn as u64), + (program_hash, EffectDiscriminant::ProgramHash as u64), + (new_utxo, EffectDiscriminant::NewUtxo as u64), + (new_coord, EffectDiscriminant::NewCoord as u64), + (activation, EffectDiscriminant::Activation as u64), + (init, EffectDiscriminant::Init as u64), + (bind, EffectDiscriminant::Bind as u64), + (unbind, EffectDiscriminant::Unbind as u64), + (new_ref, EffectDiscriminant::NewRef as u64), + (ref_push, EffectDiscriminant::RefPush as u64), + (get, EffectDiscriminant::RefGet as u64), + (ref_write, EffectDiscriminant::RefWrite as u64), + (install_handler, EffectDiscriminant::InstallHandler as u64), + ( + uninstall_handler, + EffectDiscriminant::UninstallHandler as u64, + ), + (get_handler_for, EffectDiscriminant::GetHandlerFor as u64), + ]; + + let expected_opcode = terms.iter().fold(FpVar::zero(), |acc, (switch, disc)| { + acc + FpVar::from((*switch).clone()) * F::from(*disc) + }); + + expected_opcode.enforce_equal(opcode_discriminant)?; + + Ok(ExecutionSwitches { + resume: resume.clone(), + call_effect_handler: call_effect_handler.clone(), + yield_op: yield_op.clone(), + return_op: return_op.clone(), + nop: nop.clone(), + burn: burn.clone(), + program_hash: program_hash.clone(), + new_utxo: new_utxo.clone(), + new_coord: new_coord.clone(), + activation: activation.clone(), + init: init.clone(), + bind: bind.clone(), + unbind: unbind.clone(), + new_ref: new_ref.clone(), + ref_push: ref_push.clone(), + get: get.clone(), + ref_write: ref_write.clone(), + install_handler: install_handler.clone(), + uninstall_handler: uninstall_handler.clone(), + get_handler_for: get_handler_for.clone(), + }) + } + + pub(crate) fn nop() -> Self { + Self { + nop: true, + ..Self::default() + } + } + + pub(crate) fn resume() -> Self { + Self { + resume: true, + ..Self::default() + } + } + + pub(crate) fn call_effect_handler() -> Self { + Self { + call_effect_handler: true, + ..Self::default() + } + } + + pub(crate) fn yield_op() -> Self { + Self { + yield_op: true, + ..Self::default() + } + } + + pub(crate) fn return_op() -> Self { + Self { + return_op: true, + ..Self::default() + } + } + + pub(crate) fn burn() -> Self { + Self { + burn: true, + ..Self::default() + } + } + + pub(crate) fn program_hash() -> Self { + Self { + program_hash: true, + ..Self::default() + } + } + + pub(crate) fn new_utxo() -> Self { + Self { + new_utxo: true, + ..Self::default() + } + } + + pub(crate) fn new_coord() -> Self { + Self { + new_coord: true, + ..Self::default() + } + } + + pub(crate) fn activation() -> Self { + Self { + activation: true, + ..Self::default() + } + } + + pub(crate) fn init() -> Self { + Self { + init: true, + ..Self::default() + } + } + + pub(crate) fn bind() -> Self { + Self { + bind: true, + ..Self::default() + } + } + + pub(crate) fn unbind() -> Self { + Self { + unbind: true, + ..Self::default() + } + } + + pub(crate) fn new_ref() -> Self { + Self { + new_ref: true, + ..Self::default() + } + } + + pub(crate) fn ref_push() -> Self { + Self { + ref_push: true, + ..Self::default() + } + } + + pub(crate) fn get() -> Self { + Self { + get: true, + ..Self::default() + } + } + + pub(crate) fn ref_write() -> Self { + Self { + ref_write: true, + ..Self::default() + } + } + + pub(crate) fn install_handler() -> Self { + Self { + install_handler: true, + ..Self::default() + } + } + + pub(crate) fn uninstall_handler() -> Self { + Self { + uninstall_handler: true, + ..Self::default() + } + } + + pub(crate) fn get_handler_for() -> Self { + Self { + get_handler_for: true, + ..Self::default() + } + } +} + +impl Default for ExecutionSwitches { + fn default() -> Self { + Self { + resume: false, + call_effect_handler: false, + yield_op: false, + return_op: false, + burn: false, + program_hash: false, + new_utxo: false, + new_coord: false, + activation: false, + init: false, + bind: false, + unbind: false, + new_ref: false, + ref_push: false, + get: false, + ref_write: false, + install_handler: false, + uninstall_handler: false, + get_handler_for: false, + nop: false, + } + } +} diff --git a/interleaving/starstream-interleaving-proof/src/handler_stack_gadget.rs b/interleaving/starstream-interleaving-proof/src/handler_stack_gadget.rs new file mode 100644 index 00000000..b0d006da --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/handler_stack_gadget.rs @@ -0,0 +1,215 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use crate::opcode_dsl::{OpcodeDsl, OpcodeSynthDsl, OpcodeTraceDsl}; +use crate::switchboard::{HandlerSwitchboard, HandlerSwitchboardWires}; +use crate::{F, LedgerOperation}; +use crate::{circuit::MemoryTag, memory::IVCMemory, memory::IVCMemoryAllocated}; +use ark_ff::AdditiveGroup as _; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; + +#[derive(Clone)] +pub struct HandlerSwitches { + pub read_interface: B, + pub read_head: B, + pub read_node: B, + pub write_node: B, + pub write_head: B, +} + +impl From<&HandlerSwitchboard> for HandlerSwitches { + fn from(s: &HandlerSwitchboard) -> Self { + Self { + read_interface: s.read_interface, + read_head: s.read_head, + read_node: s.read_node, + write_node: s.write_node, + write_head: s.write_head, + } + } +} + +impl From<&HandlerSwitchboardWires> for HandlerSwitches> { + fn from(s: &HandlerSwitchboardWires) -> Self { + Self { + read_interface: s.read_interface.clone(), + read_head: s.read_head.clone(), + read_node: s.read_node.clone(), + write_node: s.write_node.clone(), + write_head: s.write_head.clone(), + } + } +} + +pub struct HandlerStackReads { + pub interface_rom_read: [V; 4], + pub handler_stack_node_process: V, +} + +fn handler_stack_ops( + dsl: &mut D, + switches: &HandlerSwitches, + interface_index: &D::Val, + handler_stack_counter: &D::Val, + id_curr: &D::Val, +) -> Result, D::Error> { + let four = dsl.const_u64(4)?; + let interface_base_addr = dsl.mul(interface_index, &four)?; + let read_interface_limb = |dsl: &mut D, limb: u64| -> Result { + let offset = dsl.const_u64(limb)?; + let addr = dsl.add(&interface_base_addr, &offset)?; + dsl.read(&switches.read_interface, MemoryTag::Interfaces, &addr) + }; + let interface_rom_read = [ + read_interface_limb(dsl, 0)?, + read_interface_limb(dsl, 1)?, + read_interface_limb(dsl, 2)?, + read_interface_limb(dsl, 3)?, + ]; + let handler_stack_head_read = dsl.read( + &switches.read_head, + MemoryTag::HandlerStackHeads, + interface_index, + )?; + let handler_stack_node_process = dsl.read( + &switches.read_node, + MemoryTag::HandlerStackArenaProcess, + &handler_stack_head_read, + )?; + let handler_stack_node_next = dsl.read( + &switches.read_node, + MemoryTag::HandlerStackArenaNextPtr, + &handler_stack_head_read, + )?; + + let zero = dsl.zero(); + let node_process = dsl.select(&switches.write_node, id_curr, &zero)?; + dsl.write( + &switches.write_node, + MemoryTag::HandlerStackArenaProcess, + handler_stack_counter, + &node_process, + )?; + + let node_next = dsl.select(&switches.write_node, &handler_stack_head_read, &zero)?; + dsl.write( + &switches.write_node, + MemoryTag::HandlerStackArenaNextPtr, + handler_stack_counter, + &node_next, + )?; + + let head_val = dsl.select( + &switches.write_node, + handler_stack_counter, + &handler_stack_node_next, + )?; + dsl.write( + &switches.write_head, + MemoryTag::HandlerStackHeads, + interface_index, + &head_val, + )?; + + Ok(HandlerStackReads { + interface_rom_read, + handler_stack_node_process, + }) +} + +pub fn trace_handler_stack_ops>( + mb: &mut M, + switches: &HandlerSwitchboard, + interface_index: &F, + handler_stack_counter: &F, + id_curr: &F, +) -> HandlerStackReads { + let mut dsl = OpcodeTraceDsl { mb }; + let switches = HandlerSwitches::from(switches); + handler_stack_ops( + &mut dsl, + &switches, + interface_index, + handler_stack_counter, + id_curr, + ) + .expect("trace handler stack ops") +} + +pub fn handler_stack_access_wires>( + cs: ConstraintSystemRef, + rm: &mut M, + switches: &HandlerSwitchboardWires, + interface_index: &ark_r1cs_std::fields::fp::FpVar, + handler_stack_counter: &ark_r1cs_std::fields::fp::FpVar, + id_curr: &ark_r1cs_std::fields::fp::FpVar, +) -> Result>, SynthesisError> { + let mut dsl = OpcodeSynthDsl { cs, rm }; + let switches = HandlerSwitches::from(switches); + handler_stack_ops( + &mut dsl, + &switches, + interface_index, + handler_stack_counter, + id_curr, + ) +} + +#[derive(Debug, Clone)] +pub(crate) struct InterfaceResolver { + mapping: BTreeMap<[F; 4], usize>, +} + +impl InterfaceResolver { + pub(crate) fn new(ops: &[LedgerOperation]) -> Self { + let mut unique_interfaces = BTreeSet::new(); + for op in ops.iter() { + match op { + LedgerOperation::InstallHandler { interface_id } => { + unique_interfaces.insert(*interface_id); + } + LedgerOperation::UninstallHandler { interface_id } => { + unique_interfaces.insert(*interface_id); + } + LedgerOperation::GetHandlerFor { interface_id, .. } => { + unique_interfaces.insert(*interface_id); + } + LedgerOperation::CallEffectHandler { interface_id, .. } => { + unique_interfaces.insert(*interface_id); + } + _ => (), + } + } + + let mapping = unique_interfaces + .iter() + .enumerate() + .map(|(index, interface_id)| (*interface_id, index)) + .collect(); + + Self { mapping } + } + + pub(crate) fn get_index(&self, interface_id: [F; 4]) -> usize { + *self.mapping.get(&interface_id).unwrap_or(&0) + } + + pub(crate) fn get_interface_index_field(&self, interface_id: [F; 4]) -> F { + F::from(self.get_index(interface_id) as u64) + } + + pub(crate) fn interfaces(&self) -> Vec<[F; 4]> { + let mut interfaces = vec![[F::ZERO; 4]; self.mapping.len()]; + for (interface_id, index) in &self.mapping { + interfaces[*index] = *interface_id; + } + interfaces + } +} + +#[derive(Clone)] +pub(crate) struct HandlerState { + pub(crate) handler_stack_node_process: FpVar, + pub(crate) interface_rom_read: [FpVar; 4], +} diff --git a/interleaving/starstream-interleaving-proof/src/ledger_operation.rs b/interleaving/starstream-interleaving-proof/src/ledger_operation.rs new file mode 100644 index 00000000..74183c32 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/ledger_operation.rs @@ -0,0 +1,98 @@ +use crate::optional::OptionalF; +use ark_ff::PrimeField; + +pub const REF_PUSH_BATCH_SIZE: usize = 4; +pub const REF_GET_BATCH_SIZE: usize = 4; +pub const REF_WRITE_BATCH_SIZE: usize = 4; + +#[derive(Debug, Clone)] +pub enum LedgerOperation { + /// A call to starstream_resume. + /// + /// This stores the input and outputs in memory, and sets the + /// current_program for the next iteration to `utxo_id`. + /// + /// Then, when evaluating Yield and YieldResume, we match the input/output + /// with the corresponding value. + Resume { + target: F, + val: F, + ret: F, + caller: OptionalF, + }, + CallEffectHandler { + interface_id: [F; 4], + val: F, + ret: F, + }, + /// Called by utxo to yield. + /// + Yield { + val: F, + }, + Return {}, + ProgramHash { + target: F, + program_hash: [F; 4], + }, + NewUtxo { + program_hash: [F; 4], + val: F, + target: F, + }, + NewCoord { + program_hash: [F; 4], + val: F, + target: F, + }, + Burn { + ret: F, + }, + Activation { + val: F, + caller: F, + }, + Init { + val: F, + caller: F, + }, + Bind { + owner_id: F, + }, + Unbind { + token_id: F, + }, + + NewRef { + size: F, + ret: F, + }, + RefPush { + vals: [F; REF_PUSH_BATCH_SIZE], + }, + RefGet { + reff: F, + offset: F, + ret: [F; REF_GET_BATCH_SIZE], + }, + RefWrite { + reff: F, + offset: F, + vals: [F; REF_WRITE_BATCH_SIZE], + }, + InstallHandler { + interface_id: [F; 4], + }, + UninstallHandler { + interface_id: [F; 4], + }, + GetHandlerFor { + interface_id: [F; 4], + handler_id: F, + }, + /// Auxiliary instructions. + /// + /// Nop is used as a dummy instruction to build the circuit layout on the + /// verifier side. + Nop {}, +} diff --git a/interleaving/starstream-interleaving-proof/src/lib.rs b/interleaving/starstream-interleaving-proof/src/lib.rs new file mode 100644 index 00000000..70744130 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/lib.rs @@ -0,0 +1,306 @@ +mod abi; +mod circuit; +#[cfg(test)] +mod circuit_test; +mod coroutine_args_gadget; +mod execution_switches; +mod handler_stack_gadget; +mod ledger_operation; +mod logging; +mod memory; +mod memory_tags; +mod neo; +mod opcode_dsl; +mod optional; +mod program_hash_gadget; +mod program_state; +mod ref_arena_gadget; +mod switchboard; + +use crate::circuit::{InterRoundWires, IvcWireLayout}; +use crate::memory::IVCMemory; +use crate::memory::twist_and_shout::{TSMemLayouts, TSMemory}; +use crate::neo::{CHUNK_SIZE, StarstreamVm, StepCircuitNeo}; +use abi::ledger_operation_from_wit; +use ark_relations::gr1cs::{ConstraintSystem, ConstraintSystemRef, SynthesisError}; +use circuit::StepCircuitBuilder; +pub use memory::nebula; +use neo_ajtai::AjtaiSModule; +use neo_fold::pi_ccs::FoldingMode; +use neo_fold::session::{FoldingSession, preprocess_shared_bus_r1cs}; +use neo_fold::shard::StepLinkingConfig; +use neo_params::NeoParams; +pub use optional::{OptionalF, OptionalFpVar}; +use rand::SeedableRng as _; +use starstream_interleaving_spec::{ + InterleavingInstance, InterleavingWitness, ProcessId, ZkTransactionProof, +}; +use std::collections::BTreeMap; +use std::sync::Arc; +use std::time::Instant; + +pub type F = ark_goldilocks::FpGoldilocks; + +pub type ProgramId = F; + +pub use abi::commit; +pub use ledger_operation::LedgerOperation; + +pub fn prove( + inst: InterleavingInstance, + wit: InterleavingWitness, +) -> Result { + logging::setup_logger(); + + let output_binding_config = inst.output_binding_config(); + + // map all the disjoints vectors of traces (one per process) into a single + // list, which is simpler to think about for ivc. + let mut ops = make_interleaved_trace(&inst, &wit); + + ops.resize( + ops.len().next_multiple_of(CHUNK_SIZE), + crate::LedgerOperation::Nop {}, + ); + + let max_steps = ops.len(); + + tracing::info!("making proof, steps {}", ops.len()); + + let mut circuit_builder = StepCircuitBuilder::>::new(inst, ops); + + let mb = circuit_builder.trace_memory_ops(()); + + let circuit = Arc::new(StepCircuitNeo::new(mb.init_tables())); + + let pre = { + let now = std::time::Instant::now(); + let pre = + preprocess_shared_bus_r1cs(Arc::clone(&circuit)).expect("preprocess_shared_bus_r1cs"); + tracing::info!( + "preprocess_shared_bus_r1cs took {}ms", + now.elapsed().as_millis() + ); + + pre + }; + + let m = pre.m(); + + // params copy-pasted from nightstream tests, this needs review + let base_params = NeoParams::goldilocks_auto_r1cs_ccs(m).expect("params"); + let params = NeoParams::new( + base_params.q, + base_params.eta, + base_params.d, + base_params.kappa, + base_params.m, + 4, // b + 16, // k_rho + base_params.T, + base_params.s, + base_params.lambda, + ) + .expect("params"); + + let committer = setup_ajtai_committer(m, params.kappa as usize); + let prover = pre + .into_prover(params, committer.clone()) + .expect("into_prover (R1csCpu shared-bus config)"); + + let mut session = FoldingSession::new(FoldingMode::Optimized, params, committer); + + session.set_step_linking(StepLinkingConfig::new(neo::ivc_step_linking_pairs())); + + let (constraints, shout, twist) = mb.split(); + + prover + .execute_into_session( + &mut session, + StarstreamVm::new(circuit_builder, constraints), + twist, + shout, + max_steps, + ) + .expect("execute_into_session should succeed"); + + let t_prove = Instant::now(); + let run = session + .fold_and_prove_with_output_binding_auto_simple(prover.ccs(), &output_binding_config) + .unwrap(); + tracing::info!("proof generated in {} ms", t_prove.elapsed().as_millis()); + + let status = session + .verify_with_output_binding_collected_simple(prover.ccs(), &run, &output_binding_config) + .unwrap(); + + assert!(status, "optimized verification should pass"); + + let mcss_public = session.mcss_public(); + let steps_public = session.steps_public(); + + let prover_output = ZkTransactionProof::NeoProof { + proof: run, + session, + ccs: prover.ccs().clone(), + mcss_public, + steps_public, + }; + + Ok(prover_output) +} + +fn setup_ajtai_committer(m: usize, kappa: usize) -> AjtaiSModule { + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(42); + let pp = neo_ajtai::setup(&mut rng, neo_math::D, kappa, m).expect("Ajtai setup"); + AjtaiSModule::new(Arc::new(pp)) +} + +fn make_interleaved_trace( + inst: &InterleavingInstance, + wit: &InterleavingWitness, +) -> Vec> { + let mut ops = vec![]; + let mut id_curr = inst.entrypoint.0; + let mut id_prev: Option = None; + let mut next_op_idx = vec![0usize; inst.process_table.len()]; + let mut on_yield = vec![true; inst.process_table.len()]; + let mut yield_to: Vec> = vec![None; inst.process_table.len()]; + let mut handler_stack: BTreeMap<[u64; 4], Vec> = BTreeMap::new(); + + let expected_len: usize = wit.traces.iter().map(|t| t.len()).sum(); + + loop { + let c = next_op_idx[id_curr]; + + let Some(trace) = wit.traces.get(id_curr) else { + // No trace for this process, this indicates the end of the transaction + // as the entrypoint script has finished and not jumped to another process. + break; + }; + + if c >= trace.len() { + // We've reached the end of the current trace. This is the end. + break; + } + + let instr = trace[c].clone(); + next_op_idx[id_curr] += 1; + + match instr { + starstream_interleaving_spec::WitLedgerEffect::Resume { target, .. } => { + if on_yield[target.0] && !inst.is_utxo[id_curr] { + yield_to[target.0] = Some(id_curr); + on_yield[target.0] = false; + } + id_prev = Some(id_curr); + id_curr = target.0; + } + starstream_interleaving_spec::WitLedgerEffect::InstallHandler { interface_id } => { + handler_stack + .entry(interface_id.0) + .or_default() + .push(id_curr); + } + starstream_interleaving_spec::WitLedgerEffect::UninstallHandler { interface_id } => { + let stack = handler_stack.entry(interface_id.0).or_default(); + + stack + .pop() + .expect("UninstallHandler with empty stack in interleaving trace"); + } + starstream_interleaving_spec::WitLedgerEffect::CallEffectHandler { + interface_id, + .. + } => { + let target = *handler_stack + .get(&interface_id.0) + .and_then(|stack| stack.last()) + .expect("CallEffectHandler with empty stack in interleaving trace"); + id_prev = Some(id_curr); + id_curr = target; + } + starstream_interleaving_spec::WitLedgerEffect::Yield { .. } => { + on_yield[id_curr] = true; + let Some(parent) = yield_to[id_curr] else { + break; + }; + let old_id_curr = id_curr; + id_curr = parent; + id_prev = Some(old_id_curr); + } + starstream_interleaving_spec::WitLedgerEffect::Return {} => { + if let Some(parent) = yield_to[id_curr] { + let old_id_curr = id_curr; + id_curr = parent; + id_prev = Some(old_id_curr); + } else if id_curr != inst.entrypoint.0 { + break; + } + } + starstream_interleaving_spec::WitLedgerEffect::Burn { .. } => { + let parent = id_prev.expect("Burn called without a parent process"); + let old_id_curr = id_curr; + id_curr = parent; + id_prev = Some(old_id_curr); + } + _ => {} + } + + let op = ledger_operation_from_wit(&instr); + + ops.push(op); + } + + assert_eq!( + ops.len(), + expected_len, + "interleaved trace doesn't match original length" + ); + + ops +} + +fn ccs_step_shape() -> Result<(ConstraintSystemRef, TSMemLayouts, IvcWireLayout), SynthesisError> +{ + let _span = tracing::info_span!("dummy circuit").entered(); + + tracing::debug!("constructing nop circuit to get initial (stable) ccs shape"); + + let cs = ConstraintSystem::new_ref(); + cs.set_optimization_goal(ark_relations::gr1cs::OptimizationGoal::Constraints); + + let hash = starstream_interleaving_spec::Hash([0u64; 4], std::marker::PhantomData); + + let inst = InterleavingInstance { + host_calls_roots: vec![], + process_table: vec![hash], + is_utxo: vec![false], + must_burn: vec![false], + n_inputs: 0, + n_new: 0, + n_coords: 1, + ownership_in: vec![None], + ownership_out: vec![None], + entrypoint: ProcessId(0), + input_states: vec![], + }; + + let mut dummy_tx = StepCircuitBuilder::>::new(inst, vec![LedgerOperation::Nop {}]); + + let mb = dummy_tx.trace_memory_ops(()); + + let irw = InterRoundWires::new(dummy_tx.instance.entrypoint.0 as u64); + + let mut running_mem = mb.constraints(); + + let (_irw, _mem, captured_layout) = + dummy_tx.make_step_circuit(0, &mut running_mem, cs.clone(), irw, true)?; + let ivc_layout = captured_layout.expect("ivc layout requested"); + + cs.finalize(); + + let mem_spec = running_mem.ts_mem_layouts(); + + Ok((cs, mem_spec, ivc_layout)) +} diff --git a/interleaving/starstream-interleaving-proof/src/logging.rs b/interleaving/starstream-interleaving-proof/src/logging.rs new file mode 100644 index 00000000..befbb805 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/logging.rs @@ -0,0 +1,25 @@ +use ark_relations::gr1cs::{ConstraintLayer, TracingMode}; +use tracing_subscriber::Layer; +use tracing_subscriber::{EnvFilter, Registry, fmt, layer::SubscriberExt as _}; + +pub(crate) fn setup_logger() { + static INIT: std::sync::Once = std::sync::Once::new(); + + INIT.call_once(|| { + let constraint_layer = ConstraintLayer::new(TracingMode::All); + let env_filter = EnvFilter::try_from_default_env() + .unwrap_or_else(|_| EnvFilter::new("starstream_interleaving_proof=info,g1rcs=off")); + + let fmt_layer = if cfg!(test) { + fmt::layer().with_test_writer().boxed() + } else { + fmt::layer().boxed() + } + .with_filter(env_filter); + + let subscriber = Registry::default().with(fmt_layer).with(constraint_layer); + + tracing::subscriber::set_global_default(subscriber) + .expect("Failed to set global default subscriber"); + }); +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/dummy.rs b/interleaving/starstream-interleaving-proof/src/memory/dummy.rs new file mode 100644 index 00000000..96bc3ba7 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/dummy.rs @@ -0,0 +1,217 @@ +use super::Address; +use super::IVCMemory; +use super::IVCMemoryAllocated; +use crate::memory::AllocatedAddress; +use crate::memory::MemType; +use crate::memory::twist_and_shout::Lanes; +use ark_ff::PrimeField; +use ark_r1cs_std::GR1CSVar as _; +use ark_r1cs_std::alloc::AllocVar as _; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::ConstraintSystemRef; +use ark_relations::gr1cs::SynthesisError; +use std::collections::BTreeMap; +use std::collections::VecDeque; +use std::marker::PhantomData; + +#[allow(dead_code)] +pub struct DummyMemory { + pub(crate) phantom: PhantomData, + pub(crate) reads: BTreeMap, VecDeque>>, + pub(crate) writes: BTreeMap, VecDeque>>, + pub(crate) init: BTreeMap, Vec>, + + pub(crate) mems: BTreeMap, +} + +impl IVCMemory for DummyMemory { + type Allocator = DummyMemoryConstraints; + type Params = (); + + fn new(_params: Self::Params) -> Self { + DummyMemory { + phantom: PhantomData, + reads: BTreeMap::default(), + writes: BTreeMap::default(), + init: BTreeMap::default(), + mems: BTreeMap::default(), + } + } + + fn register_mem_with_lanes( + &mut self, + tag: u64, + size: u64, + _mem_type: MemType, + _extra_info: Lanes, + debug_name: &'static str, + ) { + self.mems.insert(tag, (size, debug_name)); + } + + fn init(&mut self, address: Address, values: Vec) { + self.init.insert(address, values.clone()); + } + + fn conditional_read(&mut self, cond: bool, address: Address) -> Vec { + let reads = self.reads.entry(address.clone()).or_default(); + + if cond { + let last = self + .writes + .get(&address) + .and_then(|writes| writes.back().cloned()) + .unwrap_or_else(|| self.init.get(&address).unwrap().clone()); + + reads.push_back(last.clone()); + + last + } else { + let mem_value_size = self.mems.get(&address.tag).unwrap().0; + std::iter::repeat_n(F::from(0), mem_value_size as usize).collect() + } + } + + fn conditional_write(&mut self, cond: bool, address: Address, values: Vec) { + assert_eq!( + self.mems.get(&address.tag).unwrap().0 as usize, + values.len(), + "write doesn't match mem value size" + ); + + if cond { + self.writes.entry(address).or_default().push_back(values); + } + } + + fn finish_step(&mut self) {} + + fn required_steps(&self) -> usize { + 0 + } + + fn constraints(self) -> Self::Allocator { + DummyMemoryConstraints { + cs: None, + reads: self.reads, + writes: self.writes, + mems: self.mems, + } + } +} + +#[allow(dead_code)] +pub struct DummyMemoryConstraints { + pub(crate) cs: Option>, + pub(crate) reads: BTreeMap, VecDeque>>, + pub(crate) writes: BTreeMap, VecDeque>>, + + pub(crate) mems: BTreeMap, +} + +impl IVCMemoryAllocated for DummyMemoryConstraints { + type FinishStepPayload = (); + + fn start_step(&mut self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + self.cs.replace(cs); + + Ok(()) + } + + fn finish_step(&mut self, _is_last_step: bool) -> Result<(), SynthesisError> { + self.cs = None; + Ok(()) + } + + fn get_cs(&self) -> ConstraintSystemRef { + self.cs.as_ref().unwrap().clone() + } + + fn conditional_read( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError> { + let _guard = tracing::debug_span!("conditional_read").entered(); + + let mem = self.mems.get(&address.tag_value()).copied().unwrap(); + + if cond.value().unwrap() { + let address = Address { + addr: address.address_value(), + tag: address.tag_value(), + }; + + let vals = self.reads.get_mut(&address).unwrap(); + + let v = vals.pop_front().unwrap().clone(); + + let vals = v + .into_iter() + .map(|v| FpVar::new_witness(self.get_cs(), || Ok(v)).unwrap()) + .collect::>(); + + tracing::debug!( + "read {:?} at address {} in segment {}", + vals.iter() + .map(|v| v.value().unwrap().into_bigint()) + .collect::>(), + address.addr, + mem.1, + ); + + Ok(vals) + } else { + let vals = std::iter::repeat_with(|| { + FpVar::new_witness(self.get_cs(), || Ok(F::from(0))).unwrap() + }) + .take(mem.0 as usize); + + Ok(vals.collect()) + } + } + + fn conditional_write( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + vals: &[FpVar], + ) -> Result<(), SynthesisError> { + let _guard = tracing::debug_span!("conditional_write").entered(); + + if cond.value().unwrap() { + let address = Address { + addr: address.address_value(), + tag: address.tag_value(), + }; + + let writes = self.writes.get_mut(&address).unwrap(); + + let expected_vals = writes.pop_front().unwrap().clone(); + + for ((_, val), expected) in vals.iter().enumerate().zip(expected_vals.iter()) { + assert_eq!(val.value().unwrap(), *expected); + } + + let mem = self.mems.get(&address.tag).copied().unwrap(); + + assert_eq!( + mem.0 as usize, + vals.len(), + "write doesn't match mem value size" + ); + + tracing::debug!( + "write values {:?} at address {} in segment {}", + vals.iter() + .map(|v| v.value().unwrap().into_bigint()) + .collect::>(), + address.addr, + mem.1, + ); + } + + Ok(()) + } +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/mod.rs b/interleaving/starstream-interleaving-proof/src/memory/mod.rs new file mode 100644 index 00000000..083121a8 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/mod.rs @@ -0,0 +1,117 @@ +use crate::F; +use ark_ff::PrimeField; +use ark_r1cs_std::{GR1CSVar as _, alloc::AllocVar, fields::fp::FpVar, prelude::Boolean}; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; +use std::fmt; + +pub mod dummy; +pub mod nebula; +pub mod twist_and_shout; + +#[derive(PartialOrd, Ord, PartialEq, Eq, Debug, Clone)] +pub struct Address { + pub tag: T, + pub addr: A, +} + +pub type AllocatedAddress = Address, FpVar>; + +impl Address { + pub(crate) fn allocate( + &self, + cs: ConstraintSystemRef, + ) -> Result { + Ok(Address { + addr: FpVar::new_witness(cs.clone(), || Ok(F::from(self.addr)))?, + tag: FpVar::new_witness(cs, || Ok(F::from(self.tag)))?, + }) + } +} + +impl AllocatedAddress { + pub fn address_value(&self) -> u64 { + self.addr.value().unwrap().into_bigint().as_ref()[0] + } + + pub fn tag_value(&self) -> u64 { + self.tag.value().unwrap().into_bigint().as_ref()[0] + } + + pub fn values(&self) -> Address { + Address { + tag: self.tag_value(), + addr: self.address_value(), + } + } +} + +#[derive(Clone, Copy, PartialEq, PartialOrd, Debug)] +pub enum MemType { + Rom, + Ram, +} + +impl fmt::Display for MemType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + MemType::Rom => write!(f, "ROM"), + MemType::Ram => write!(f, "RAM"), + } + } +} + +pub trait IVCMemory { + type Allocator: IVCMemoryAllocated; + type Params; + + fn new(info: Self::Params) -> Self; + + fn register_mem(&mut self, tag: u64, size: u64, mem_type: MemType, debug_name: &'static str) { + self.register_mem_with_lanes(tag, size, mem_type, Default::default(), debug_name); + } + + fn register_mem_with_lanes( + &mut self, + tag: u64, + size: u64, + mem_type: MemType, + extra_info: twist_and_shout::Lanes, + debug_name: &'static str, + ); + + fn init(&mut self, address: Address, values: Vec); + + fn conditional_read(&mut self, cond: bool, address: Address) -> Vec; + + fn conditional_write(&mut self, cond: bool, address: Address, value: Vec); + + fn finish_step(&mut self); + + fn required_steps(&self) -> usize; + + fn constraints(self) -> Self::Allocator; +} + +pub trait IVCMemoryAllocated { + type FinishStepPayload; + + fn get_cs(&self) -> ConstraintSystemRef; + fn start_step(&mut self, cs: ConstraintSystemRef) -> Result<(), SynthesisError>; + fn finish_step( + &mut self, + is_last_step: bool, + ) -> Result; + + fn conditional_read( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError>; + + fn conditional_write( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + vals: &[FpVar], + ) -> Result<(), SynthesisError>; +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/nebula/gadget.rs b/interleaving/starstream-interleaving-proof/src/memory/nebula/gadget.rs new file mode 100644 index 00000000..42ff7e36 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/nebula/gadget.rs @@ -0,0 +1,583 @@ +use super::Address; +use super::ic::{IC, ICPlain}; +use super::{MemOp, MemOpAllocated}; +use crate::F; +use crate::memory::nebula::tracer::NebulaMemoryParams; +use crate::memory::{AllocatedAddress, IVCMemoryAllocated}; +use ark_ff::Field; +use ark_ff::PrimeField; +use ark_r1cs_std::GR1CSVar as _; +use ark_r1cs_std::alloc::AllocVar as _; +use ark_r1cs_std::eq::EqGadget; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::ConstraintSystemRef; +use ark_relations::gr1cs::SynthesisError; +use std::collections::BTreeMap; +use std::collections::VecDeque; +use std::iter::repeat_with; + +pub struct NebulaMemoryConstraints { + pub(crate) cs: Option>, + pub(crate) reads: BTreeMap, VecDeque>>, + pub(crate) writes: BTreeMap, VecDeque>>, + + pub(crate) fs: BTreeMap, MemOp>, + pub(crate) is: BTreeMap, MemOp>, + + pub(crate) mems: BTreeMap, + + pub(crate) ic_rs_ws: ICPlain, + pub(crate) ic_is_fs: ICPlain, + + pub(crate) step_ic_rs_ws: Option, + pub(crate) step_ic_is_fs: Option, + + pub(crate) expected_rw_ws: ICPlain, + pub(crate) expected_is_fs: ICPlain, + + pub(crate) ts: F, + pub(crate) step_ts: Option>, + + pub(crate) current_step: usize, + pub(crate) params: NebulaMemoryParams, + pub(crate) scan_batch_size: usize, + + pub(crate) c0: F, + pub(crate) c0_wire: Option>, + pub(crate) c1: F, + pub(crate) c1_wire: Option>, + pub(crate) c1_powers_cache: Option>>, + + pub(crate) multiset_fingerprints: FingerPrintPreWires, + pub(crate) fingerprint_wires: Option, + + pub(crate) scan_monotonic_last_addr: Option>, + pub(crate) scan_monotonic_last_addr_wires: Option, + + pub(crate) debug_sets: Multisets, +} + +#[derive(Default)] +pub struct Multisets { + is: BTreeMap, MemOp>, + fs: BTreeMap, MemOp>, + rs: BTreeMap, MemOp>, + ws: BTreeMap, MemOp>, +} + +impl std::fmt::Debug for Multisets { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let format_set = |set: &BTreeMap, MemOp>| { + let entries: Vec = set + .iter() + .map(|(addr, op)| format!("({}, {:?}, {})", addr.addr, op.values, op.timestamp)) + .collect(); + format!("[{}]", entries.join(", ")) + }; + + writeln!(f, "\n")?; + + writeln!(f, "is: {}", format_set(&self.is))?; + writeln!(f, "fs: {}", format_set(&self.fs))?; + writeln!(f, "rs: {}", format_set(&self.rs))?; + write!(f, "ws: {}", format_set(&self.ws)) + } +} + +pub struct FingerPrintPreWires { + pub is: F, + pub fs: F, + pub rs: F, + pub ws: F, +} + +impl FingerPrintPreWires { + fn allocate(&self, cs: ConstraintSystemRef) -> Result { + Ok(FingerPrintWires { + is: FpVar::new_witness(cs.clone(), || Ok(self.is))?, + fs: FpVar::new_witness(cs.clone(), || Ok(self.fs))?, + rs: FpVar::new_witness(cs.clone(), || Ok(self.rs))?, + ws: FpVar::new_witness(cs.clone(), || Ok(self.ws))?, + }) + } + + fn check(&self) -> bool { + let result = self.is * self.ws == self.fs * self.rs; + + if !result { + tracing::error!( + "multiset safety check failed: is={:?}, ws={:?}, fs={:?}, rs={:?}", + self.is, + self.ws, + self.fs, + self.rs + ); + } + + result + } +} + +pub struct FingerPrintWires { + pub is: FpVar, + pub fs: FpVar, + pub rs: FpVar, + pub ws: FpVar, +} + +impl FingerPrintWires { + fn values(&self) -> Result { + Ok(FingerPrintPreWires { + is: self.is.value()?, + fs: self.fs.value()?, + rs: self.rs.value()?, + ws: self.ws.value()?, + }) + } +} + +impl IVCMemoryAllocated for NebulaMemoryConstraints { + type FinishStepPayload = (); + + #[tracing::instrument(target = "gr1cs", skip(self, cs))] + fn start_step(&mut self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + self.cs.replace(cs.clone()); + + self.step_ic_rs_ws + .replace(self.ic_rs_ws.allocate(cs.clone())?); + + self.step_ts + .replace(FpVar::new_witness(cs.clone(), || Ok(self.ts))?); + + self.step_ic_is_fs + .replace(self.ic_is_fs.allocate(cs.clone())?); + + self.fingerprint_wires + .replace(self.multiset_fingerprints.allocate(cs.clone())?); + + self.c0_wire + .replace(FpVar::new_witness(cs.clone(), || Ok(self.c0))?); + + self.c1_wire + .replace(FpVar::new_witness(cs.clone(), || Ok(self.c1))?); + + // Precompute and cache c1 powers + let max_segment_size = self.max_segment_size() as usize; + let c1_wire = self.c1_wire.as_ref().unwrap(); + let mut c1_powers = Vec::with_capacity(max_segment_size); + let mut c1_p = c1_wire.clone(); + for _ in 0..max_segment_size { + c1_p *= c1_wire; + c1_powers.push(c1_p.clone()); + } + self.c1_powers_cache = Some(c1_powers); + + self.scan_monotonic_last_addr_wires.replace( + self.scan_monotonic_last_addr + .clone() + .unwrap_or(Address { addr: 0, tag: 0 }) + .allocate(cs.clone())?, + ); + + self.scan(cs)?; + + Ok(()) + } + + #[tracing::instrument(target = "gr1cs", skip(self))] + fn finish_step(&mut self, is_last_step: bool) -> Result<(), SynthesisError> { + self.cs = None; + self.c1_powers_cache = None; + + self.current_step += 1; + + self.ic_rs_ws = self.step_ic_rs_ws.take().unwrap().values(); + self.ic_is_fs = self.step_ic_is_fs.take().unwrap().values(); + + self.multiset_fingerprints = self.fingerprint_wires.take().unwrap().values()?; + + self.scan_monotonic_last_addr + .replace(self.scan_monotonic_last_addr_wires.take().unwrap().values()); + + if is_last_step { + assert!( + self.ic_rs_ws + .comm + .iter() + .zip(self.expected_rw_ws.comm.iter()) + .all(|(x, y)| x == y) + ); + + assert!( + self.ic_is_fs + .comm + .iter() + .zip(self.expected_is_fs.comm.iter()) + .all(|(x, y)| x == y) + ); + + for ops in self.reads.values() { + assert!(ops.is_empty()); + } + + for ops in self.writes.values() { + assert!(ops.is_empty()); + } + + if !self.multiset_fingerprints.check() { + dbg!(&self.debug_sets); + tracing::debug!(sets=?self.debug_sets); + panic!("sanity check of multisets failed"); + } + } + + Ok(()) + } + + fn get_cs(&self) -> ConstraintSystemRef { + self.cs.as_ref().unwrap().clone() + } + + #[tracing::instrument(target = "gr1cs", skip(self, cond, address))] + fn conditional_read( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError> { + let _guard = tracing::debug_span!("nebula_conditional_read").entered(); + + // ts <- ts + 1 + self.inc_ts(cond)?; + + let mem = self.get_mem_info(address); + let address_val = self.get_address_val(address); + + let rv = self.get_read_op(cond, &address_val, mem.0)?; + let wv = self.get_write_op(cond, &address_val, mem.0)?; + + self.update_ic_with_ops(cond, address, &rv, &wv)?; + + tracing::debug!( + "nebula {} read {:?} at address {} in segment {}", + cond.value()?, + rv.values + .iter() + .map(|v| v.value().unwrap().into_bigint()) + .collect::>(), + address_val.addr, + mem.1, + ); + + Ok(rv.values) + } + + #[tracing::instrument(target = "gr1cs", skip(self, cond, address, vals))] + fn conditional_write( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + vals: &[FpVar], + ) -> Result<(), SynthesisError> { + let _guard = tracing::debug_span!("nebula_conditional_write").entered(); + + let mem = self.get_mem_info(address); + + assert_eq!( + mem.0 as usize, + vals.len(), + "write doesn't match mem value size" + ); + + // ts <- ts + 1 + self.inc_ts(cond)?; + + let address_val = self.get_address_val(address); + + let rv = self.get_read_op(cond, &address_val, mem.0)?; + let wv = self.get_write_op(cond, &address_val, mem.0)?; + + self.update_ic_with_ops(cond, address, &rv, &wv)?; + + tracing::debug!( + "nebula ({}) write values {:?} at address {} in segment {}", + cond.value()?, + vals.iter() + .map(|v| v.value().unwrap().into_bigint()) + .collect::>(), + address_val.addr, + mem.1, + ); + + for ((index, val), expected) in vals.iter().enumerate().zip(wv.values.iter()) { + assert_eq!( + val.value().unwrap(), + expected.value().unwrap(), + "write doesn't match expectation at index {index}." + ); + } + + Ok(()) + } +} + +impl NebulaMemoryConstraints { + fn inc_ts(&mut self, cond: &Boolean) -> Result<(), SynthesisError> { + let ts = self.step_ts.as_mut().unwrap(); + let ts_plus_one = &*ts + FpVar::Constant(F::from(1)); + *ts = cond.select(&ts_plus_one, ts)?; + Ok(()) + } + + fn get_address_val(&self, address: &AllocatedAddress) -> Address { + Address { + addr: address.address_value(), + tag: address.tag_value(), + } + } + + fn get_mem_info(&self, address: &AllocatedAddress) -> (u64, &'static str) { + self.mems.get(&address.tag_value()).copied().unwrap() + } + + fn get_read_op( + &mut self, + cond: &Boolean, + address_val: &Address, + mem_size: u64, + ) -> Result, SynthesisError> { + let cs = self.get_cs(); + + if cond.value()? { + let a_reads = self.reads.get_mut(address_val).unwrap(); + a_reads + .pop_front() + .expect("no entry in read set") + .allocate(cs, mem_size as usize) + } else { + MemOp::padding().allocate(cs, mem_size as usize) + } + } + + fn get_write_op( + &mut self, + cond: &Boolean, + address_val: &Address, + mem_size: u64, + ) -> Result, SynthesisError> { + let cs = self.get_cs(); + + if cond.value()? { + let a_writes = self.writes.get_mut(address_val).unwrap(); + a_writes + .pop_front() + .expect("no entry in write set") + .allocate(cs, mem_size as usize) + } else { + MemOp::padding().allocate(cs, mem_size as usize) + } + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn update_ic_with_ops( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + rv: &MemOpAllocated, + wv: &MemOpAllocated, + ) -> Result<(), SynthesisError> { + Self::hash_avt( + cond, + &mut self.fingerprint_wires.as_mut().unwrap().rs, + self.c0_wire.as_ref().unwrap(), + self.c1_powers_cache.as_ref().unwrap(), + address, + rv, + &mut self.debug_sets.rs, + )?; + + self.step_ic_rs_ws.as_mut().unwrap().increment( + address, + rv, + self.params.unsound_disable_poseidon_commitment, + )?; + + Self::hash_avt( + cond, + &mut self.fingerprint_wires.as_mut().unwrap().ws, + self.c0_wire.as_ref().unwrap(), + self.c1_powers_cache.as_ref().unwrap(), + address, + wv, + &mut self.debug_sets.ws, + )?; + + self.step_ic_rs_ws.as_mut().unwrap().increment( + address, + wv, + self.params.unsound_disable_poseidon_commitment, + )?; + + Ok(()) + } + + fn max_segment_size(&mut self) -> u64 { + let max_segment_size = self.mems.values().map(|(sz, _)| sz).max().unwrap(); + *max_segment_size + } + + fn scan(&mut self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + let address_padding = Address { addr: 0, tag: 0 }; + let mem_padding = MemOp::padding(); + let max_segment_size = self.max_segment_size() as usize; + + let _: () = for (addr, is_v) in self + .is + .iter() + .skip(self.scan_batch_size * self.current_step) + .chain(std::iter::repeat((&address_padding, &mem_padding))) + // TODO: padding + .take(self.scan_batch_size) + { + let fs_v = self.fs.get(addr).unwrap_or(&mem_padding); + + let address = addr.allocate(cs.clone())?; + + // ensure commitment is monotonic + // so that it's not possible to insert an address twice + // + // we get disjoint ranges anyway because of the segments so we + // can have different memories with different sizes, but each + // segment is contiguous + let last_addr = self.scan_monotonic_last_addr_wires.as_mut().unwrap(); + + enforce_monotonic_commitment(&cs, &address, last_addr)?; + + *last_addr = address.clone(); + + let is_entry = is_v.allocate(cs.clone(), max_segment_size)?; + + self.step_ic_is_fs.as_mut().unwrap().increment( + &address, + &is_entry, + self.params.unsound_disable_poseidon_commitment, + )?; + + let fs_entry = fs_v.allocate(cs.clone(), max_segment_size)?; + + self.step_ic_is_fs.as_mut().unwrap().increment( + &address, + &fs_entry, + self.params.unsound_disable_poseidon_commitment, + )?; + + Self::hash_avt( + &Boolean::constant(true), + &mut self.fingerprint_wires.as_mut().unwrap().is, + self.c0_wire.as_ref().unwrap(), + self.c1_powers_cache.as_ref().unwrap(), + &address, + &is_entry, + &mut self.debug_sets.is, + )?; + + Self::hash_avt( + &Boolean::constant(true), + &mut self.fingerprint_wires.as_mut().unwrap().fs, + self.c0_wire.as_ref().unwrap(), + self.c1_powers_cache.as_ref().unwrap(), + &address, + &fs_entry, + &mut self.debug_sets.fs, + )?; + }; + Ok(()) + } + + fn hash_avt( + cond: &Boolean, + wire: &mut FpVar, + c0: &FpVar, + c1_powers: &[FpVar], + address: &AllocatedAddress, + vt: &MemOpAllocated, + debug_set: &mut BTreeMap, MemOp>, + ) -> Result<(), SynthesisError> { + let fingerprint = fingerprint_with_cached_powers( + c0, + c1_powers, + &vt.timestamp, + &address.addr, + vt.values.as_ref(), + )?; + + if cond.value()? { + debug_set.insert( + Address { + addr: address.addr.value()?, + tag: address.tag_value(), + }, + MemOp { + values: vt.debug_values(), + timestamp: vt.timestamp.value()?.into_bigint().as_ref()[0], + }, + ); + } + + *wire *= cond.select(&fingerprint, &FpVar::Constant(F::ONE))?; + + Ok(()) + } +} + +fn enforce_monotonic_commitment( + cs: &ConstraintSystemRef, + address: &Address, FpVar>, + last_addr: &mut Address, FpVar>, +) -> Result<(), SynthesisError> { + let same_segment = &address.tag.is_eq(&last_addr.tag)?; + + let next_segment = address + .tag + .is_eq(&(&last_addr.tag + FpVar::new_constant(cs.clone(), F::from(1))?))?; + + let is_padding = address + .tag + .is_eq(&FpVar::new_constant(cs.clone(), F::from(0))?)?; + + let segment_monotonic_constraint = same_segment | &next_segment | &is_padding; + + address.addr.conditional_enforce_equal( + &(&last_addr.addr + FpVar::new_constant(cs.clone(), F::from(1))?), + &(same_segment & !is_padding), + )?; + + segment_monotonic_constraint.enforce_equal(&Boolean::TRUE)?; + + Ok(()) +} + +fn fingerprint_with_cached_powers( + c0: &FpVar, + c1_powers: &[FpVar], + timestamp: &FpVar, + addr: &FpVar, + values: &[FpVar], +) -> Result, SynthesisError> { + let cs = c0.cs(); + + let mut x = timestamp + &c1_powers[0] * addr; + + for (v, c1_p) in values + .iter() + .cloned() + .chain(repeat_with(|| { + FpVar::new_witness(cs.clone(), || Ok(F::from(0))).unwrap() + })) + .zip(c1_powers.iter()) + { + x += v * c1_p; + } + + Ok(c0 - x) +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/nebula/ic.rs b/interleaving/starstream-interleaving-proof/src/memory/nebula/ic.rs new file mode 100644 index 00000000..a7539457 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/nebula/ic.rs @@ -0,0 +1,134 @@ +use super::Address; +use super::MemOp; +use crate::F; +use crate::memory::AllocatedAddress; +use crate::nebula::MemOpAllocated; +use ark_ff::AdditiveGroup as _; +use ark_r1cs_std::GR1CSVar as _; +use ark_r1cs_std::alloc::AllocVar as _; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::gr1cs::ConstraintSystemRef; +use ark_relations::gr1cs::SynthesisError; +use std::array; + +pub struct ICPlain { + pub comm: [F; 4], +} + +impl ICPlain { + pub fn zero() -> Self { + Self { comm: [F::ZERO; 4] } + } + + pub fn increment( + &mut self, + a: &Address, + vt: &MemOp, + unsound_make_nop: bool, + ) -> Result<(), SynthesisError> { + if !unsound_make_nop { + let hash_input = array::from_fn(|i| { + if i == 0 { + F::from(a.addr) + } else if i == 1 { + F::from(a.tag) + } else if i == 2 { + F::from(vt.timestamp) + } else { + vt.values.get(i - 3).copied().unwrap_or(F::ZERO) + } + }); + + let hash_to_field = ark_poseidon2::compress_8_trace(&hash_input)?; + + let concat = array::from_fn(|i| { + if i < 4 { + hash_to_field[i] + } else { + self.comm[i - 4] + } + }); + + self.comm = ark_poseidon2::compress_8_trace(&concat)?; + } + + Ok(()) + } + + pub fn allocate(&self, cs: ConstraintSystemRef) -> Result { + Ok(IC { + comm: [ + FpVar::new_witness(cs.clone(), || Ok(self.comm[0]))?, + FpVar::new_witness(cs.clone(), || Ok(self.comm[1]))?, + FpVar::new_witness(cs.clone(), || Ok(self.comm[2]))?, + FpVar::new_witness(cs.clone(), || Ok(self.comm[3]))?, + ], + }) + } +} + +pub struct IC { + pub comm: [FpVar; 4], +} + +impl IC { + pub fn zero(cs: ConstraintSystemRef) -> Result { + Ok(IC { + comm: [ + FpVar::new_witness(cs.clone(), || Ok(F::from(0)))?, + FpVar::new_witness(cs.clone(), || Ok(F::from(0)))?, + FpVar::new_witness(cs.clone(), || Ok(F::from(0)))?, + FpVar::new_witness(cs.clone(), || Ok(F::from(0)))?, + ], + }) + } + + pub fn increment( + &mut self, + a: &AllocatedAddress, + vt: &MemOpAllocated, + unsound_make_nop: bool, + ) -> Result<(), SynthesisError> { + if !unsound_make_nop { + let cs = self.comm.cs(); + + let hash_to_field = ark_poseidon2::compress_8(&array::from_fn(|i| { + if i == 0 { + a.addr.clone() + } else if i == 1 { + a.tag.clone() + } else if i == 2 { + vt.timestamp.clone() + } else { + vt.values + .get(i - 3) + .cloned() + .unwrap_or_else(|| FpVar::new_witness(cs.clone(), || Ok(F::ZERO)).unwrap()) + } + }))?; + + let concat = array::from_fn(|i| { + if i < 4 { + hash_to_field[i].clone() + } else { + self.comm[i - 4].clone() + } + }); + + self.comm = ark_poseidon2::compress_8(&concat)?; + } + + Ok(()) + } + + pub fn values(&self) -> ICPlain { + ICPlain { + comm: [ + self.comm[0].value().unwrap(), + self.comm[1].value().unwrap(), + self.comm[2].value().unwrap(), + self.comm[3].value().unwrap(), + ], + } + } +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/nebula/mod.rs b/interleaving/starstream-interleaving-proof/src/memory/nebula/mod.rs new file mode 100644 index 00000000..e036ad43 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/nebula/mod.rs @@ -0,0 +1,356 @@ +pub mod gadget; +pub mod ic; +pub mod tracer; + +use super::Address; +use crate::F; +use ark_ff::PrimeField; +use ark_r1cs_std::GR1CSVar as _; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; +pub use gadget::NebulaMemoryConstraints; +use std::iter::repeat; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MemOp { + pub values: Vec, + pub timestamp: u64, +} + +pub struct MemOpAllocated { + pub values: Vec>, + pub timestamp: FpVar, +} + +impl MemOp { + pub fn padding() -> MemOp { + MemOp { + values: vec![], + timestamp: 0, + } + } + + pub fn allocate( + &self, + cs: ConstraintSystemRef, + pad_to: usize, + ) -> Result, SynthesisError> + where + F: PrimeField, + { + let fp = F::from(0); + Ok(MemOpAllocated { + values: self + .values + .iter() + .chain(repeat(&fp)) + .take(pad_to) + .map(|v| FpVar::new_witness(cs.clone(), || Ok(*v))) + .collect::, _>>()?, + timestamp: FpVar::new_witness(cs.clone(), || Ok(F::from(self.timestamp)))?, + }) + } +} + +impl MemOpAllocated +where + F: PrimeField, +{ + // pub fn padding( + // cs: ConstraintSystemRef, + // segment_size: u64, + // ) -> Result, SynthesisError> { + // Ok(MemOpAllocated { + // values: std::iter::repeat_with(|| FpVar::new_witness(cs.clone(), || Ok(F::ZERO))) + // .take(segment_size as usize) + // .collect::, _>>()?, + // timestamp: FpVar::new_witness(cs, || Ok(F::from(0)))?, + // }) + // } + + pub fn debug_values(&self) -> Vec { + self.values.iter().map(|v| v.value().unwrap()).collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::logging::setup_logger; + use crate::memory::IVCMemory; + use crate::memory::IVCMemoryAllocated; + use crate::memory::MemType; + use crate::memory::nebula::tracer::NebulaMemory; + use crate::memory::nebula::tracer::NebulaMemoryParams; + use ark_r1cs_std::alloc::AllocVar; + use ark_r1cs_std::fields::fp::FpVar; + use ark_r1cs_std::prelude::Boolean; + use ark_relations::gr1cs::ConstraintSystem; + + #[test] + fn test_nebula_memory_constraints_satisfiability() { + setup_logger(); + + let mut memory = NebulaMemory::<1>::new(NebulaMemoryParams { + unsound_disable_poseidon_commitment: false, + }); + + memory.register_mem(1, 2, MemType::Ram, "test_segment"); + + let address = Address { addr: 10, tag: 1 }; + let initial_values = vec![F::from(42), F::from(100)]; + memory.init(address.clone(), initial_values.clone()); + memory.conditional_read(true, address.clone()); + + memory.conditional_write(true, address.clone(), vec![F::from(123), F::from(456)]); + memory.conditional_write(false, address.clone(), vec![F::from(0), F::from(0)]); + + assert_eq!( + memory.conditional_read(false, address.clone()), + vec![F::from(0), F::from(0)] + ); + + assert_eq!( + memory.conditional_read(true, address.clone()), + vec![F::from(123), F::from(456)] + ); + + let mut constraints = memory.constraints(); + + let cs = ConstraintSystem::::new_ref(); + + constraints.start_step(cs.clone()).unwrap(); + + let address_var = Address { addr: 10, tag: 1 }.allocate(cs.clone()).unwrap(); + + let true_cond = Boolean::new_witness(cs.clone(), || Ok(true)).unwrap(); + let false_cond = Boolean::new_witness(cs.clone(), || Ok(false)).unwrap(); + + let _read_result = constraints + .conditional_read(&true_cond, &address_var) + .unwrap(); + + let write_vals = vec![ + FpVar::new_witness(cs.clone(), || Ok(F::from(123))).unwrap(), + FpVar::new_witness(cs.clone(), || Ok(F::from(456))).unwrap(), + ]; + + let false_write_vals = vec![ + FpVar::new_witness(cs.clone(), || Ok(F::from(0))).unwrap(), + FpVar::new_witness(cs.clone(), || Ok(F::from(0))).unwrap(), + ]; + + constraints + .conditional_write(&true_cond, &address_var, &write_vals) + .unwrap(); + + constraints + .conditional_write(&false_cond, &address_var, &false_write_vals) + .unwrap(); + + constraints + .conditional_read(&false_cond, &address_var) + .unwrap(); + + let _read_result2 = constraints + .conditional_read(&true_cond, &address_var) + .unwrap(); + + constraints.finish_step(true).unwrap(); + + assert!( + cs.is_satisfied().unwrap(), + "Constraint system should be satisfiable" + ); + } + + // NOTE: for folding, we need conditional reads and conditional writes to be + // gated, but we still want to keep the same shape across steps + #[test] + fn test_circuit_shape_consistency_across_conditions() { + setup_logger(); + fn create_constraint_system_with_conditions( + read_cond: bool, + write_cond: bool, + ) -> ark_relations::gr1cs::ConstraintSystemRef { + let mut memory = NebulaMemory::<1>::new(NebulaMemoryParams { + unsound_disable_poseidon_commitment: false, + }); + memory.register_mem(1, 2, MemType::Ram, "test_segment"); + + let address = Address { addr: 10, tag: 1 }; + let initial_values = vec![F::from(42), F::from(100)]; + memory.init(address.clone(), initial_values); + + memory.conditional_read(read_cond, address.clone()); + memory.conditional_write( + write_cond, + address.clone(), + vec![F::from(123), F::from(456)], + ); + + let mut constraints = memory.constraints(); + let cs = ConstraintSystem::::new_ref(); + + constraints.start_step(cs.clone()).unwrap(); + + let address_var = Address { addr: 10, tag: 1 }.allocate(cs.clone()).unwrap(); + + let cond_read = Boolean::new_witness(cs.clone(), || Ok(read_cond)).unwrap(); + let cond_write = Boolean::new_witness(cs.clone(), || Ok(write_cond)).unwrap(); + + let _read_result = constraints + .conditional_read(&cond_read, &address_var) + .unwrap(); + + let write_vals = vec![ + FpVar::new_witness(cs.clone(), || Ok(F::from(if write_cond { 123 } else { 0 }))) + .unwrap(), + FpVar::new_witness(cs.clone(), || Ok(F::from(if write_cond { 456 } else { 0 }))) + .unwrap(), + ]; + + constraints + .conditional_write(&cond_write, &address_var, &write_vals) + .unwrap(); + + constraints.finish_step(true).unwrap(); + + std::mem::drop(constraints); + + cs + } + + let condition_combinations = [(true, true), (true, false), (false, true), (false, false)]; + let constraint_systems: Vec<_> = condition_combinations + .iter() + .map(|&(read_cond, write_cond)| { + create_constraint_system_with_conditions(read_cond, write_cond) + }) + .collect(); + + let reference_cs = &constraint_systems[0]; + let expected_constraints = reference_cs.num_constraints(); + let expected_instance_vars = reference_cs.num_instance_variables(); + let expected_witness_vars = reference_cs.num_witness_variables(); + + for (i, cs) in constraint_systems.iter().enumerate() { + let (read_cond, write_cond) = condition_combinations[i]; + + assert_eq!( + cs.num_constraints(), + expected_constraints, + "Number of constraints should be the same for ({},{})", + read_cond, + write_cond + ); + + assert_eq!( + cs.num_instance_variables(), + expected_instance_vars, + "Number of instance variables should be the same for ({},{})", + read_cond, + write_cond + ); + + assert_eq!( + cs.num_witness_variables(), + expected_witness_vars, + "Number of witness variables should be the same for ({},{})", + read_cond, + write_cond + ); + + assert!( + cs.is_satisfied().unwrap(), + "Constraint system ({},{}) should be satisfiable", + read_cond, + write_cond + ); + } + + println!( + "Circuit shape consistency verified: {} constraints, {} instance vars, {} witness vars", + expected_constraints, expected_instance_vars, expected_witness_vars + ); + } + + #[test] + fn test_scan_batch_size_multi_step() { + setup_logger(); + + const SCAN_BATCH_SIZE: usize = 2; + let num_steps = 3; + let total_addresses = SCAN_BATCH_SIZE * num_steps; // 6 addresses + + let mut memory = NebulaMemory::::new(NebulaMemoryParams::default()); + memory.register_mem(1, 2, MemType::Ram, "test_segment"); + + let addresses: Vec> = (0..total_addresses) + .map(|i| Address { + addr: i as u64, + tag: 1, + }) + .collect(); + + for (i, addr) in addresses.iter().enumerate() { + memory.init( + addr.clone(), + vec![F::from(i as u64 * 10), F::from(i as u64 * 10 + 1)], + ); + } + + for (step, addr) in addresses.iter().enumerate().take(num_steps) { + memory.conditional_read(true, addr.clone()); + memory.conditional_write( + true, + addr.clone(), + vec![F::from(100 + step as u64), F::from(200 + step as u64)], + ); + } + + let mut constraints = memory.constraints(); + + for step in 0..num_steps { + let cs = ConstraintSystem::::new_ref(); + constraints.start_step(cs.clone()).unwrap(); + + let address_var = Address { + addr: step as u64, + tag: 1, + } + .allocate(cs.clone()) + .unwrap(); + + let true_cond = Boolean::new_witness(cs.clone(), || Ok(true)).unwrap(); + + let _read_result = constraints + .conditional_read(&true_cond, &address_var) + .unwrap(); + + let write_vals = vec![ + FpVar::new_witness(cs.clone(), || Ok(F::from(100 + step as u64))).unwrap(), + FpVar::new_witness(cs.clone(), || Ok(F::from(200 + step as u64))).unwrap(), + ]; + + constraints + .conditional_write(&true_cond, &address_var, &write_vals) + .unwrap(); + + let is_last_step = step == num_steps - 1; + constraints.finish_step(is_last_step).unwrap(); + + assert!( + cs.is_satisfied().unwrap(), + "Constraint system should be satisfiable for step {}", + step + ); + } + + println!( + "Multi-step scan batch test completed: {} addresses, {} steps, batch size {}", + total_addresses, num_steps, SCAN_BATCH_SIZE + ); + } +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/nebula/tracer.rs b/interleaving/starstream-interleaving-proof/src/memory/nebula/tracer.rs new file mode 100644 index 00000000..91fcfcdc --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/nebula/tracer.rs @@ -0,0 +1,263 @@ +use ark_ff::AdditiveGroup; +use ark_ff::Field as _; + +use super::Address; +use super::MemOp; +use super::NebulaMemoryConstraints; +use super::ic::ICPlain; +use crate::F; +use crate::memory::IVCMemory; +use crate::memory::MemType; +use crate::memory::nebula::gadget::FingerPrintPreWires; +use crate::memory::twist_and_shout::Lanes; +use std::collections::BTreeMap; +use std::collections::VecDeque; + +pub struct NebulaMemory { + pub(crate) rs: BTreeMap, VecDeque>>, + pub(crate) ws: BTreeMap, VecDeque>>, + pub(crate) is: BTreeMap, MemOp>, + + pub(crate) mems: BTreeMap, + + ic_rs_ws: ICPlain, + + ts: u64, + + params: NebulaMemoryParams, +} + +impl NebulaMemory { + fn perform_memory_operation( + &mut self, + cond: bool, + address: &Address, + new_values: Option>, + ) -> Vec { + if cond { + self.ts += 1; + } + + let rv = if cond { + self.ws + .get(address) + .and_then(|writes| writes.back().cloned()) + .unwrap_or_else(|| { + self.is + .get(address) + .unwrap_or_else(|| panic!("read uninitialized address: {address:?}")) + .clone() + }) + } else { + MemOp::padding() + }; + + assert!(!cond || rv.timestamp < self.ts); + + let wv = if cond { + MemOp { + values: new_values.unwrap_or_else(|| rv.values.clone()), + timestamp: self.ts, + } + } else { + MemOp::padding() + }; + + // println!( + // "Tracing: incrementing ic_rs_ws with rv: {:?}, wv: {:?}", + // rv, wv + // ); + self.ic_rs_ws + .increment( + address, + &rv, + self.params.unsound_disable_poseidon_commitment, + ) + .unwrap(); + self.ic_rs_ws + .increment( + address, + &wv, + self.params.unsound_disable_poseidon_commitment, + ) + .unwrap(); + // println!( + // "Tracing: ic_rs_ws after increment: {:?}", + // self.ic_rs_ws.comm + // ); + + if !cond { + let mem_value_size = self.mems.get(&address.tag).unwrap().0; + return std::iter::repeat_n(F::from(0), mem_value_size as usize).collect(); + } + + let reads = self.rs.entry(address.clone()).or_default(); + reads.push_back(rv.clone()); + + self.ws + .entry(address.clone()) + .or_default() + .push_back(wv.clone()); + + rv.values + } + + pub fn get_ic_rs_ws(&self) -> [F; 4] { + self.ic_rs_ws.comm + } +} + +#[derive(Default)] +pub struct NebulaMemoryParams { + pub unsound_disable_poseidon_commitment: bool, +} + +impl IVCMemory for NebulaMemory { + type Allocator = NebulaMemoryConstraints; + + type Params = NebulaMemoryParams; + + fn new(params: Self::Params) -> Self { + NebulaMemory { + rs: BTreeMap::default(), + ws: BTreeMap::default(), + is: BTreeMap::default(), + mems: BTreeMap::default(), + ts: 0, + ic_rs_ws: ICPlain::zero(), + params, + } + } + + fn register_mem_with_lanes( + &mut self, + tag: u64, + size: u64, + _mem_type: MemType, + // TODO: this is not generic + _extra_info: Lanes, + debug_name: &'static str, + ) { + self.mems.insert(tag, (size, debug_name)); + } + + fn init(&mut self, address: Address, values: Vec) { + self.is.insert( + address, + MemOp { + values: values.clone(), + timestamp: 0, + }, + ); + } + + fn conditional_read(&mut self, cond: bool, address: Address) -> Vec { + self.perform_memory_operation(cond, &address, None) + } + + fn conditional_write(&mut self, cond: bool, address: Address, values: Vec) { + assert_eq!( + self.mems.get(&address.tag).unwrap().0 as usize, + values.len(), + "write doesn't match mem value size" + ); + + self.perform_memory_operation(cond, &address, Some(values)); + } + + fn finish_step(&mut self) {} + + fn required_steps(&self) -> usize { + self.is.len() / SCAN_BATCH_SIZE + } + + fn constraints(mut self) -> Self::Allocator { + let mut ic_is_fs = ICPlain::zero(); + + // Only pad when there is a remainder; avoid adding a full extra batch. + let rem = self.is.len() % SCAN_BATCH_SIZE; + let padding_required = (SCAN_BATCH_SIZE - rem) % SCAN_BATCH_SIZE; + + let mut max_address = self.is.keys().next_back().unwrap().clone(); + + max_address.tag += 1; + + self.register_mem(max_address.tag, 1, MemType::Ram, "PADDING_SEGMENT"); + + for _ in 0..padding_required { + self.is.insert( + max_address.clone(), + MemOp { + values: vec![F::ZERO], + timestamp: 0, + }, + ); + max_address.addr += 1; + } + + // compute FS such that: + // + // IS U WS = RS U FS + // + // (Lemma 2 in the Nebula paper) + let mut fs = BTreeMap::default(); + + for (addr, vt) in self.is.iter().chain( + self.ws + .iter() + .flat_map(|(addr, queue)| queue.iter().map(move |vt| (addr, vt))), + ) { + if !self.rs.get(addr).is_some_and(|vals| vals.contains(vt)) { + fs.insert(addr.clone(), vt.clone()); + } + } + + for (addr, is_v) in self.is.iter() { + let fs_v = fs.get(addr).unwrap_or(is_v); + + ic_is_fs + .increment(addr, is_v, self.params.unsound_disable_poseidon_commitment) + .unwrap(); + ic_is_fs + .increment(addr, fs_v, self.params.unsound_disable_poseidon_commitment) + .unwrap(); + } + + NebulaMemoryConstraints { + cs: None, + reads: self.rs, + writes: self.ws, + mems: self.mems, + ic_rs_ws: ICPlain::zero(), + ic_is_fs: ICPlain::zero(), + ts: F::ZERO, + step_ts: None, + expected_rw_ws: self.ic_rs_ws, + expected_is_fs: ic_is_fs, + fs, + is: self.is, + current_step: 0, + params: self.params, + scan_batch_size: SCAN_BATCH_SIZE, + step_ic_rs_ws: None, + step_ic_is_fs: None, + // TODO: fiat-shamir, these are derived by hashing the multisets + c0: F::from(1), + c1: F::from(2), + c0_wire: None, + c1_wire: None, + multiset_fingerprints: FingerPrintPreWires { + is: F::ONE, + fs: F::ONE, + rs: F::ONE, + ws: F::ONE, + }, + fingerprint_wires: None, + + debug_sets: Default::default(), + c1_powers_cache: None, + scan_monotonic_last_addr: None, + scan_monotonic_last_addr_wires: None, + } + } +} diff --git a/interleaving/starstream-interleaving-proof/src/memory/twist_and_shout/mod.rs b/interleaving/starstream-interleaving-proof/src/memory/twist_and_shout/mod.rs new file mode 100644 index 00000000..cb10aa05 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory/twist_and_shout/mod.rs @@ -0,0 +1,766 @@ +use super::Address; +use super::IVCMemory; +use super::IVCMemoryAllocated; +use crate::memory::AllocatedAddress; +use crate::memory::MemType; +use ark_ff::PrimeField; +use ark_r1cs_std::GR1CSVar as _; +use ark_r1cs_std::alloc::AllocVar as _; +use ark_r1cs_std::eq::EqGadget as _; +use ark_r1cs_std::fields::FieldVar as _; +use ark_r1cs_std::fields::fp::FpVar; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::ConstraintSystemRef; +use ark_relations::gr1cs::SynthesisError; +use neo_vm_trace::{Shout, Twist, TwistId, TwistOpKind}; +use std::collections::BTreeMap; +use std::collections::VecDeque; + +#[derive(Debug, Clone)] +pub struct ShoutCpuBinding { + pub has_lookup: usize, + pub addr: usize, + pub val: usize, +} + +#[derive(Debug, Clone)] +pub struct TwistCpuBinding { + pub ra: usize, + pub has_read: usize, + pub rv: usize, + pub wa: usize, + pub has_write: usize, + pub wv: usize, +} + +#[derive(Debug, Clone, Default)] +pub struct PartialTwistCpuBinding { + pub ra: Option, + pub has_read: Option, + pub rv: Option, + pub wa: Option, + pub has_write: Option, + pub wv: Option, +} + +impl PartialTwistCpuBinding { + pub fn to_complete(&self) -> TwistCpuBinding { + TwistCpuBinding { + ra: self.ra.unwrap(), + has_read: self.has_read.unwrap(), + rv: self.rv.unwrap(), + wa: self.wa.unwrap(), + has_write: self.has_write.unwrap(), + wv: self.wv.unwrap(), + } + } +} + +/// Event representing a shout lookup operation +#[derive(Debug, Clone)] +pub struct ShoutEvent { + pub shout_id: u32, + pub key: u64, + pub value: u64, +} + +/// Event representing a twist memory operation +#[derive(Debug, Clone)] +pub struct TwistEvent { + pub twist_id: u32, + pub addr: u64, + pub val: u64, + pub op: TwistOpKind, + pub cond: bool, + pub lane: Option, +} + +#[derive(Clone)] +pub struct TSMemory { + pub(crate) reads: BTreeMap, VecDeque>>, + pub(crate) writes: BTreeMap, VecDeque>>, + pub(crate) init: BTreeMap, Vec>, + + pub(crate) mems: BTreeMap, + + /// Captured shout events for witness generation, organized by address + pub(crate) shout_events: BTreeMap, VecDeque>, + /// Captured twist events for witness generation, organized by address + pub(crate) twist_events: BTreeMap, VecDeque>, + + pub(crate) current_step_read_lanes: BTreeMap, + pub(crate) current_step_write_lanes: BTreeMap, +} + +/// Initial ROM tables computed by TSMemory +pub struct TSMemInitTables { + pub mems: BTreeMap, + pub rom_sizes: BTreeMap, + pub init: BTreeMap>, +} + +/// Layout/bindings computed by TSMemoryConstraints +pub struct TSMemLayouts { + pub shout_bindings: BTreeMap>, + pub twist_bindings: BTreeMap>, +} + +#[derive(Debug, Clone, Copy)] +pub struct Lanes(pub usize); + +impl Default for Lanes { + fn default() -> Self { + Self(1) + } +} + +impl IVCMemory for TSMemory { + type Allocator = TSMemoryConstraints; + type Params = (); + + fn new(_params: Self::Params) -> Self { + TSMemory { + reads: BTreeMap::default(), + writes: BTreeMap::default(), + init: BTreeMap::default(), + mems: BTreeMap::default(), + shout_events: BTreeMap::default(), + twist_events: BTreeMap::default(), + current_step_read_lanes: BTreeMap::default(), + current_step_write_lanes: BTreeMap::default(), + } + } + + // TODO: remove it like for shout? + fn register_mem_with_lanes( + &mut self, + tag: u64, + size: u64, + mem_type: MemType, + lanes: Lanes, + debug_name: &'static str, + ) { + self.mems.insert(tag, (size, lanes, mem_type, debug_name)); + } + + fn init(&mut self, address: Address, values: Vec) { + self.init.insert(address, values.clone()); + } + + fn conditional_read(&mut self, cond: bool, address: Address) -> Vec { + *self.current_step_read_lanes.entry(address.tag).or_default() += 1; + + if let Some(&(_, _, MemType::Rom, debug_name)) = self.mems.get(&address.tag) { + if cond { + let value = self.init.get(&address).unwrap().clone(); + let shout_event = ShoutEvent { + shout_id: address.tag as u32, + key: address.addr, + value: value[0].into_bigint().as_ref()[0], + }; + + if address.tag == 20 { + tracing::info!( + "traced rom read from address {} in table {} with value {:?}", + address.addr, + debug_name, + value + ); + } + + let shout_events = self.shout_events.entry(address.clone()).or_default(); + shout_events.push_back(shout_event); + value + } else { + let mem_value_size = self.mems.get(&address.tag).unwrap().0; + std::iter::repeat_n(F::from(0), mem_value_size as usize).collect() + } + } else { + let reads = self.reads.entry(address.clone()).or_default(); + if cond { + let mem_value_size = self.mems.get(&address.tag).unwrap().0 as usize; + let last = self + .writes + .get(&address) + .and_then(|writes| writes.back().cloned()) + .or_else(|| self.init.get(&address).cloned()) + .unwrap_or_else(|| vec![F::ZERO; mem_value_size]); + + reads.push_back(last.clone()); + + let twist_event = TwistEvent { + twist_id: address.tag as u32, + addr: address.addr, + val: last[0].into_bigint().as_ref()[0], + op: TwistOpKind::Read, + cond, + lane: None, + }; + + self.twist_events + .entry(address.clone()) + .or_default() + .push_back(twist_event); + + last + } else { + let mem_value_size = self.mems.get(&address.tag).unwrap().0; + std::iter::repeat_n(F::from(0), mem_value_size as usize).collect() + } + } + } + + fn conditional_write(&mut self, cond: bool, address: Address, values: Vec) { + *self + .current_step_write_lanes + .entry(address.tag) + .or_default() += 1; + + assert_eq!( + self.mems.get(&address.tag).unwrap().0 as usize, + values.len(), + "write doesn't match mem value size" + ); + if cond { + self.writes + .entry(address.clone()) + .or_default() + .push_back(values.clone()); + + let twist_event = TwistEvent { + twist_id: address.tag as u32, + addr: address.addr, + val: values[0].into_bigint().as_ref()[0], + op: TwistOpKind::Write, + cond, + lane: None, + }; + + self.twist_events + .entry(address) + .or_default() + .push_back(twist_event); + } + } + + fn finish_step(&mut self) { + let mut current_step_read_lanes = BTreeMap::new(); + let mut current_step_write_lanes = BTreeMap::new(); + + std::mem::swap( + &mut current_step_read_lanes, + &mut self.current_step_read_lanes, + ); + + std::mem::swap( + &mut current_step_write_lanes, + &mut self.current_step_write_lanes, + ); + + for (tag, reads) in current_step_read_lanes { + if let Some(writes) = current_step_write_lanes.get(&tag) { + assert_eq!( + reads, *writes, + "each step must have the same number of (conditional) reads and writes per memory" + ); + } + + if let Some(entry) = self.mems.get_mut(&tag) { + entry.1 = Lanes(reads); + } + } + + self.current_step_read_lanes.clear(); + self.current_step_write_lanes.clear(); + } + + fn required_steps(&self) -> usize { + 0 + } + + fn constraints(self) -> Self::Allocator { + TSMemoryConstraints { + cs: None, + mems: self.mems, + shout_events: self.shout_events, + twist_events: self.twist_events, + shout_bindings: BTreeMap::new(), + partial_twist_bindings: BTreeMap::new(), + step_events_shout: vec![], + step_events_twist: vec![], + is_first_step: true, + write_lanes: BTreeMap::new(), + read_lanes: BTreeMap::new(), + } + } +} + +impl TSMemory { + pub fn init_tables(&self) -> TSMemInitTables { + let mut rom_sizes = BTreeMap::new(); + let mut init = BTreeMap::>::new(); + + for (address, val) in &self.init { + if let Some((_, _, MemType::Rom, _)) = self.mems.get(&address.tag) { + *rom_sizes.entry(address.tag).or_insert(0) += 1; + } + + init.entry(address.tag) + .or_default() + .insert(address.addr, val[0]); + } + + TSMemInitTables { + mems: self.mems.clone(), + rom_sizes, + init, + } + } + + pub fn split(self) -> (TSMemoryConstraints, TracedShout, TracedTwist) { + let mems = self.mems.clone(); + + let (init, twist_events, _mems, shout_events) = + (self.init, self.twist_events, self.mems, self.shout_events); + + let traced_shout = TracedShout { init }; + let traced_twist = TracedTwist { + twist_events: twist_events.clone(), + }; + + let constraints = TSMemoryConstraints { + cs: None, + mems, + shout_events, + twist_events, + shout_bindings: BTreeMap::new(), + partial_twist_bindings: BTreeMap::new(), + step_events_shout: vec![], + step_events_twist: vec![], + is_first_step: true, + write_lanes: BTreeMap::new(), + read_lanes: BTreeMap::new(), + }; + + (constraints, traced_shout, traced_twist) + } +} + +pub struct TracedShout { + pub init: BTreeMap, Vec>, +} + +impl Shout for TracedShout { + fn lookup(&mut self, shout_id: neo_vm_trace::ShoutId, key: u64) -> u64 { + let value = self + .init + .get(&Address { + tag: shout_id.0 as u64, + addr: key, + }) + .unwrap() + .clone(); + + value[0].into_bigint().as_ref()[0] + } +} + +pub struct TracedTwist { + pub twist_events: BTreeMap, VecDeque>, +} + +impl Twist for TracedTwist { + fn load(&mut self, id: TwistId, addr: u64) -> u64 { + let address = Address { + tag: id.0 as u64, + addr, + }; + + let event = self + .twist_events + .get_mut(&address) + .unwrap() + .pop_front() + .unwrap(); + + assert_eq!(event.op, TwistOpKind::Read); + event.val + } + + fn store(&mut self, id: TwistId, addr: u64, val: u64) { + let address = Address { + tag: id.0 as u64, + addr, + }; + let event = self + .twist_events + .get_mut(&address) + .unwrap() + .pop_front() + .unwrap(); + + assert_eq!(event.op, TwistOpKind::Write); + assert_eq!(event.val, val); + } +} + +pub struct TSMemoryConstraints { + pub(crate) cs: Option>, + + pub(crate) mems: BTreeMap, + + pub(crate) shout_events: BTreeMap, VecDeque>, + pub(crate) twist_events: BTreeMap, VecDeque>, + + pub(crate) shout_bindings: BTreeMap>, + pub(crate) partial_twist_bindings: BTreeMap>, + + step_events_shout: Vec, + step_events_twist: Vec, + + is_first_step: bool, + + write_lanes: BTreeMap, + read_lanes: BTreeMap, +} + +impl TSMemoryConstraints { + pub fn ts_mem_layouts(&self) -> TSMemLayouts { + let mut twist_bindings = BTreeMap::new(); + + for (tag, partials) in &self.partial_twist_bindings { + let mut complete = Vec::new(); + for p in partials { + complete.push(p.to_complete()); + } + + twist_bindings.insert(*tag, complete); + } + + TSMemLayouts { + shout_bindings: self.shout_bindings.clone(), + twist_bindings, + } + } + + pub fn get_shout_traced_values( + &mut self, + address: &Address, + ) -> Result<(F, F, F), SynthesisError> { + let (has_lookup_val, addr_val, val_val) = { + let event = self + .shout_events + .get_mut(address) + .unwrap() + .pop_front() + .unwrap(); + + self.step_events_shout.push(event.clone()); + + (F::from(1), F::from(event.key), F::from(event.value)) + }; + + Ok((has_lookup_val, addr_val, val_val)) + } + + pub fn get_twist_traced_values( + &mut self, + address: &Address, + lane: u32, + kind: TwistOpKind, + ) -> Result<(F, F, F), SynthesisError> { + let (ra, rv) = { + let queue = self.twist_events.get_mut(address).unwrap_or_else(|| { + panic!( + "missing twist events for address {:?} kind {:?} lane {}", + address, kind, lane + ) + }); + let mut event = queue.pop_front().unwrap_or_else(|| { + panic!( + "empty twist event queue for address {:?} kind {:?} lane {}", + address, kind, lane + ) + }); + + event.lane.replace(lane); + + assert_eq!(event.op, kind); + + self.step_events_twist.push(event.clone()); + + (F::from(event.addr), F::from(event.val)) + }; + + Ok((F::one(), ra, rv)) + } +} + +impl TSMemoryConstraints { + fn get_next_read_lane(&mut self, twist_id: u64) -> u32 { + *self + .read_lanes + .entry(twist_id.try_into().unwrap()) + .and_modify(|l| *l += 1) + .or_insert(0) + } + + fn get_next_write_lane(&mut self, twist_id: u64) -> u32 { + *self + .write_lanes + .entry(twist_id.try_into().unwrap()) + .and_modify(|l| *l += 1) + .or_insert(0) + } + + fn update_partial_twist_bindings_read(&mut self, tag: u64, base_index: usize, lane: usize) { + let bindings = self.partial_twist_bindings.entry(tag).or_default(); + + while bindings.len() <= lane { + bindings.push(PartialTwistCpuBinding::default()); + } + + let b = &mut bindings[lane]; + b.has_read = Some(base_index); + b.ra = Some(base_index + 1); + b.rv = Some(base_index + 2); + } + + fn update_partial_twist_bindings_write(&mut self, tag: u64, base_index: usize, lane: usize) { + let bindings = self.partial_twist_bindings.entry(tag).or_default(); + + while bindings.len() <= lane { + bindings.push(PartialTwistCpuBinding::default()); + } + + let b = &mut bindings[lane]; + b.has_write = Some(base_index); + b.wa = Some(base_index + 1); + b.wv = Some(base_index + 2); + } + + fn conditional_read_rom( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError> { + let address_val = Address { + addr: address.address_value(), + tag: address.tag_value(), + }; + + let cs = self.get_cs(); + + let base_index = cs.num_witness_variables(); + + let (has_lookup_val, addr_witness_val, val_witness_val) = if cond.value()? { + self.get_shout_traced_values(&address_val)? + } else { + (F::ZERO, F::from(address.address_value()), F::ZERO) + }; + + let has_lookup = FpVar::new_witness(cs.clone(), || Ok(has_lookup_val))?; + let addr_witness = FpVar::new_witness(cs.clone(), || Ok(addr_witness_val))?; + let val_witness = FpVar::new_witness(cs.clone(), || Ok(val_witness_val))?; + + let tag = address.tag_value(); + + if let Some(&(_, _lanes, MemType::Rom, _)) = self.mems.get(&tag) + && self.is_first_step + { + let binding = ShoutCpuBinding { + has_lookup: base_index, + addr: base_index + 1, + val: base_index + 2, + }; + self.shout_bindings.entry(tag).or_default().push(binding); + } + + FpVar::from(cond.clone()).enforce_equal(&has_lookup)?; + + let addr_fp = FpVar::new_witness(self.get_cs(), || Ok(F::from(address.address_value())))?; + addr_witness.enforce_equal(&addr_fp)?; + + Ok(vec![val_witness]) + } + + fn conditional_read_ram( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError> { + let twist_id = address.tag_value(); + let address_val = Address { + addr: address.address_value(), + tag: twist_id, + }; + + let cs = self.get_cs(); + let base_index = cs.num_witness_variables(); + + let cond_val = cond.value()?; + + let lane = self.get_next_read_lane(twist_id); + + let (has_read_val, ra_val, rv_val) = if cond_val { + self.get_twist_traced_values(&address_val, lane, TwistOpKind::Read)? + } else { + self.step_events_twist.push(TwistEvent { + twist_id: twist_id as u32, + addr: 0, + val: 0, + op: TwistOpKind::Write, + cond: cond_val, + lane: Some(lane), + }); + + (F::ZERO, F::ZERO, F::ZERO) + }; + + let has_read = FpVar::new_witness(cs.clone(), || Ok(has_read_val))?; + let ra = FpVar::new_witness(cs.clone(), || Ok(ra_val))?; + let rv = FpVar::new_witness(cs.clone(), || Ok(rv_val))?; + + assert_eq!(cs.num_witness_variables(), base_index + 3); + + let tag = address.tag_value(); + + if let Some(&(_, _lanes, MemType::Ram, _)) = self.mems.get(&tag) + && self.is_first_step + { + self.update_partial_twist_bindings_read(tag, base_index, lane as usize); + } + + FpVar::from(cond.clone()).enforce_equal(&has_read)?; + + let addr_fp = FpVar::new_witness(self.get_cs(), || Ok(F::from(address.address_value())))?; + let addr_constraint = cond.select(&addr_fp, &FpVar::zero())?; + ra.enforce_equal(&addr_constraint)?; + + Ok(vec![rv]) + } +} + +impl IVCMemoryAllocated for TSMemoryConstraints { + type FinishStepPayload = (Vec, Vec); + + fn start_step(&mut self, cs: ConstraintSystemRef) -> Result<(), SynthesisError> { + self.cs.replace(cs.clone()); + + Ok(()) + } + + fn finish_step( + &mut self, + _is_last_step: bool, + ) -> Result { + self.cs = None; + + let mut step_events_shout = vec![]; + std::mem::swap(&mut step_events_shout, &mut self.step_events_shout); + + let mut step_events_twist = vec![]; + std::mem::swap(&mut step_events_twist, &mut self.step_events_twist); + + self.is_first_step = false; + + self.read_lanes.clear(); + self.write_lanes.clear(); + + Ok((step_events_shout, step_events_twist)) + } + + fn get_cs(&self) -> ConstraintSystemRef { + self.cs.as_ref().unwrap().clone() + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn conditional_read( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + ) -> Result>, SynthesisError> { + let _guard = tracing::debug_span!("conditional_read").entered(); + + let mem = self.mems.get(&address.tag_value()).copied().unwrap(); + + if mem.2 == MemType::Rom { + self.conditional_read_rom(cond, address) + } else { + self.conditional_read_ram(cond, address) + } + } + + #[tracing::instrument(target = "gr1cs", skip_all)] + fn conditional_write( + &mut self, + cond: &Boolean, + address: &AllocatedAddress, + vals: &[FpVar], + ) -> Result<(), SynthesisError> { + let _guard = tracing::debug_span!("conditional_write").entered(); + + let mem_tag = address.tag_value(); + let mem = self.mems.get(&mem_tag).copied().unwrap(); + + if mem.2 != MemType::Ram { + unreachable!("can't write to Rom memory"); + } + + if cond.value().unwrap() { + assert_eq!( + mem.0 as usize, + vals.len(), + "write doesn't match mem value size" + ); + } + + let twist_id = address.tag_value(); + let address_cpu = Address { + addr: address.address_value(), + tag: twist_id, + }; + + let cs = self.get_cs(); + let base_index = cs.num_witness_variables(); + + let cond_val = cond.value()?; + + let lane = self.get_next_write_lane(twist_id); + + let (has_write_val, wa_val, wv_val) = if cond_val { + self.get_twist_traced_values(&address_cpu, lane, TwistOpKind::Write)? + } else { + self.step_events_twist.push(TwistEvent { + twist_id: twist_id as u32, + addr: 0, + val: 0, + op: TwistOpKind::Write, + cond: cond_val, + lane: Some(lane), + }); + + ( + F::ZERO, + F::from(address.address_value()), + vals[0].value().unwrap_or(F::ZERO), + ) + }; + + let has_write = FpVar::new_witness(cs.clone(), || Ok(has_write_val))?; + let wa = FpVar::new_witness(cs.clone(), || Ok(wa_val))?; + let wv = FpVar::new_witness(cs.clone(), || Ok(wv_val))?; + + if self.is_first_step { + self.update_partial_twist_bindings_write(mem_tag, base_index, lane as usize); + } + + FpVar::from(cond.clone()).enforce_equal(&has_write)?; + + let addr_fp = FpVar::new_witness(self.get_cs(), || Ok(F::from(address.address_value())))?; + wa.enforce_equal(&addr_fp)?; + wv.enforce_equal(&vals[0])?; + + Ok(()) + } +} diff --git a/interleaving/starstream-interleaving-proof/src/memory_tags.rs b/interleaving/starstream-interleaving-proof/src/memory_tags.rs new file mode 100644 index 00000000..cb21400b --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/memory_tags.rs @@ -0,0 +1,90 @@ +use crate::F; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MemoryTag { + // ROM tags + ProcessTable = 1, + MustBurn = 2, + IsUtxo = 3, + Interfaces = 4, + + // RAM tags + ExpectedInput = 5, + Activation = 6, + Initialized = 8, + Finalized = 9, + DidBurn = 10, + Ownership = 11, + Init = 12, + RefArena = 13, + RefSizes = 14, + HandlerStackArenaProcess = 15, + HandlerStackArenaNextPtr = 16, + HandlerStackHeads = 17, + TraceCommitments = 18, + ExpectedResumer = 19, + OnYield = 20, + YieldTo = 21, + InitCaller = 22, +} + +impl From for u64 { + fn from(tag: MemoryTag) -> u64 { + tag as u64 + } +} + +impl From for F { + fn from(tag: MemoryTag) -> F { + F::from(tag as u64) + } +} + +impl MemoryTag { + pub fn allocate(&self, cs: ConstraintSystemRef) -> Result, SynthesisError> { + FpVar::new_constant(cs, F::from(*self)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ProgramStateTag { + ExpectedInput, + ExpectedResumer, + OnYield, + YieldTo, + Activation, + Init, + InitCaller, + Initialized, + Finalized, + DidBurn, + Ownership, +} + +impl From for MemoryTag { + fn from(tag: ProgramStateTag) -> MemoryTag { + match tag { + ProgramStateTag::ExpectedInput => MemoryTag::ExpectedInput, + ProgramStateTag::ExpectedResumer => MemoryTag::ExpectedResumer, + ProgramStateTag::OnYield => MemoryTag::OnYield, + ProgramStateTag::YieldTo => MemoryTag::YieldTo, + ProgramStateTag::Activation => MemoryTag::Activation, + ProgramStateTag::Init => MemoryTag::Init, + ProgramStateTag::InitCaller => MemoryTag::InitCaller, + ProgramStateTag::Initialized => MemoryTag::Initialized, + ProgramStateTag::Finalized => MemoryTag::Finalized, + ProgramStateTag::DidBurn => MemoryTag::DidBurn, + ProgramStateTag::Ownership => MemoryTag::Ownership, + } + } +} + +impl From for u64 { + fn from(tag: ProgramStateTag) -> u64 { + let memory_tag: MemoryTag = tag.into(); + memory_tag.into() + } +} diff --git a/interleaving/starstream-interleaving-proof/src/neo.rs b/interleaving/starstream-interleaving-proof/src/neo.rs new file mode 100644 index 00000000..69b070f4 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/neo.rs @@ -0,0 +1,498 @@ +use crate::{ + ccs_step_shape, + circuit::{InterRoundWires, IvcWireLayout, StepCircuitBuilder}, + memory::twist_and_shout::{TSMemInitTables, TSMemLayouts, TSMemory, TSMemoryConstraints}, +}; +use ark_ff::PrimeField; +use ark_goldilocks::FpGoldilocks; +use ark_relations::gr1cs::{ConstraintSystem, OptimizationGoal, SynthesisError}; +use neo_fold::session::{NeoCircuit, WitnessLayout}; +use neo_memory::{ShoutCpuBinding, TwistCpuBinding}; +use neo_vm_trace::{Shout, StepTrace, Twist, VmCpu}; +use p3_field::PrimeCharacteristicRing; +use std::collections::HashMap; + +// TODO: benchmark properly +pub(crate) const CHUNK_SIZE: usize = 40; +const PER_STEP_COLS: usize = 1181; +const BASE_INSTANCE_COLS: usize = 1; +const EXTRA_INSTANCE_COLS: usize = IvcWireLayout::FIELD_COUNT * 2; +const M_IN: usize = BASE_INSTANCE_COLS + EXTRA_INSTANCE_COLS; +const USED_COLS: usize = M_IN + (PER_STEP_COLS - BASE_INSTANCE_COLS) * CHUNK_SIZE; + +pub(crate) struct StepCircuitNeo { + pub(crate) matrices: Vec>>, + pub(crate) num_constraints: usize, + pub(crate) ts_mem_spec: TSMemLayouts, + pub(crate) ts_mem_init: TSMemInitTables, + pub(crate) ivc_layout: crate::circuit::IvcWireLayout, +} + +impl StepCircuitNeo { + pub fn new(ts_mem_init: TSMemInitTables) -> Self { + let (ark_cs, ts_mem_spec, ivc_layout) = ccs_step_shape().unwrap(); + + let num_constraints = ark_cs.num_constraints(); + let num_instance_variables = ark_cs.num_instance_variables(); + let num_variables = ark_cs.num_variables(); + + tracing::info!("num constraints {}", num_constraints); + tracing::info!("num instance variables {}", num_instance_variables); + tracing::info!("num variables {}", num_variables); + + assert_eq!(num_variables, PER_STEP_COLS); + + let matrices = ark_cs + .into_inner() + .unwrap() + .to_matrices() + .unwrap() + .remove("R1CS") + .unwrap(); + + Self { + matrices, + num_constraints, + ts_mem_spec, + ts_mem_init, + ivc_layout, + } + } + + fn get_mem_content_iter<'a>( + &'a self, + tag: &'a u64, + ) -> impl Iterator + 'a { + self.ts_mem_init + .init + .get(tag) + .into_iter() + .flat_map(|content| content.iter()) + .map(|(addr, val)| (*addr, ark_field_to_p3_goldilocks(val))) + } +} + +pub(crate) fn ivc_step_linking_pairs() -> Vec<(usize, usize)> { + // Per-step instance vector is [1, inputs..., outputs...]. + // Enforce prev outputs == next inputs. + let input_base = BASE_INSTANCE_COLS; + let output_base = BASE_INSTANCE_COLS + IvcWireLayout::FIELD_COUNT; + (0..IvcWireLayout::FIELD_COUNT) + .map(|i| (output_base + i, input_base + i)) + .collect() +} + +#[derive(Clone)] +pub struct CircuitLayout {} + +impl WitnessLayout for CircuitLayout { + // instance.len() + const M_IN: usize = M_IN; + + // instance.len()+witness.len() + const USED_COLS: usize = USED_COLS; + + fn new_layout() -> Self { + CircuitLayout {} + } +} + +impl NeoCircuit for StepCircuitNeo { + type Layout = CircuitLayout; + + fn chunk_size(&self) -> usize { + CHUNK_SIZE + } + + fn const_one_col(&self, _layout: &Self::Layout) -> usize { + 0 + } + + fn resources(&self, resources: &mut neo_fold::session::SharedBusResources) { + for (tag, (_dims, lanes, ty, _)) in &self.ts_mem_init.mems { + match ty { + crate::memory::MemType::Rom => { + let size = self + .ts_mem_init + .rom_sizes + .get(tag) + .copied() + // it can't be empty + .unwrap_or(1usize); + + let mut dense_content = vec![neo_math::F::ZERO; size]; + + for (addr, val) in self.get_mem_content_iter(tag) { + dense_content[addr as usize] = val; + } + + resources + .shout(*tag as u32) + .lanes(lanes.0) + .padded_binary_table(dense_content); + } + crate::memory::MemType::Ram => { + let twist_id = *tag as u32; + let k = 256usize; // TODO: hardcoded number + assert!(k > 0, "set_binary_mem_layout: k must be > 0"); + assert!( + k.is_power_of_two(), + "set_binary_mem_layout: k must be a power of two" + ); + resources + .twist(twist_id) + .layout(neo_memory::PlainMemLayout { + k, + d: k.trailing_zeros() as usize, + n_side: 2, + lanes: lanes.0, + }) + .init(self.get_mem_content_iter(tag)); + } + } + } + } + + fn define_cpu_constraints( + &self, + cs: &mut neo_fold::session::CcsBuilder, + _layout: &Self::Layout, + ) -> Result<(), String> { + let matrices = &self.matrices; + let m_in = ::M_IN; + let base_m_in = BASE_INSTANCE_COLS; + + for ((matrix_a, matrix_b), matrix_c) in matrices[0] + .iter() + .zip(&matrices[1]) + .zip(&matrices[2]) + .take(self.num_constraints) + { + let a_row = ark_matrix_to_neo(matrix_a); + let b_row = ark_matrix_to_neo(matrix_b); + let c_row = ark_matrix_to_neo(matrix_c); + + for j in 0..CHUNK_SIZE { + let map_idx = |idx: usize| { + if idx < base_m_in { + idx + } else { + // NOTE: m_in includes the per folding step IVC + // variables (inputs and outputs) + m_in + (idx - base_m_in) * CHUNK_SIZE + j + } + }; + + let a_row: Vec<_> = a_row.iter().map(|(i, v)| (map_idx(*i), *v)).collect(); + let b_row: Vec<_> = b_row.iter().map(|(i, v)| (map_idx(*i), *v)).collect(); + let c_row: Vec<_> = c_row.iter().map(|(i, v)| (map_idx(*i), *v)).collect(); + + cs.r1cs_terms(a_row, b_row, c_row); + } + } + + let one = neo_math::F::ONE; + let minus_one = -neo_math::F::ONE; + let input_base = BASE_INSTANCE_COLS; + let output_base = BASE_INSTANCE_COLS + IvcWireLayout::FIELD_COUNT; + + for (field_offset, (&input_idx, &output_idx)) in self + .ivc_layout + .input_indices() + .iter() + .zip(self.ivc_layout.output_indices().iter()) + .enumerate() + { + for j in 0..(CHUNK_SIZE - 1) { + // The chunked matrix is interleaved column-major: + // for variable x, step 0 is at `m_in + x * CHUNK_SIZE`, step 1 is at + // `m_in + x * CHUNK_SIZE + 1`, etc. So `x * CHUNK_SIZE` picks the + // contiguous block for that variable, and `j` selects the step. + // + // We enforce continuity inside a chunk: + // wire[in][s+1] == wire[out][s] + let out_col = m_in + output_idx * CHUNK_SIZE + j; + let in_col = m_in + input_idx * CHUNK_SIZE + (j + 1); + cs.r1cs_terms( + vec![(out_col, one), (in_col, minus_one)], + vec![(0, one)], + vec![], + ); + } + + let first_chunk_in_col = m_in + input_idx * CHUNK_SIZE; + let last_chunk_out_col = m_in + output_idx * CHUNK_SIZE + (CHUNK_SIZE - 1); + + // The per-folding-step public instance is: + // [1, inputs, outputs] + // We wire the first chunk input to the instance inputs... + let in_instance_col = input_base + field_offset; + // ...and the last chunk output to the instance outputs. + let out_instance_col = output_base + field_offset; + + // This means step_linking in the IVC setup should link pairs: + // (output_base + i, input_base + i) + + cs.r1cs_terms( + vec![(in_instance_col, one), (first_chunk_in_col, minus_one)], + vec![(0, one)], + vec![], + ); + cs.r1cs_terms( + vec![(out_instance_col, one), (last_chunk_out_col, minus_one)], + vec![(0, one)], + vec![], + ); + } + + Ok(()) + } + + fn build_witness_prefix( + &self, + _layout: &Self::Layout, + chunk: &[StepTrace], + ) -> Result, String> { + if chunk.len() != CHUNK_SIZE { + return Err(format!( + "chunk len {} != CHUNK_SIZE {}", + chunk.len(), + CHUNK_SIZE + )); + } + + let m_in = ::M_IN; + let base_m_in = BASE_INSTANCE_COLS; + let per_step_cols = chunk[0].regs_after.len(); + if per_step_cols != PER_STEP_COLS { + return Err(format!( + "per-step witness len {} != PER_STEP_COLS {}", + per_step_cols, PER_STEP_COLS + )); + } + + let mut witness = vec![neo_math::F::ZERO; USED_COLS]; + + witness[0] = neo_math::F::from_u64(chunk[0].regs_after[0]); + + let input_base = BASE_INSTANCE_COLS; + let output_base = BASE_INSTANCE_COLS + IvcWireLayout::FIELD_COUNT; + let last_step = chunk.len() - 1; + + for (field_idx, (&input_idx, &output_idx)) in self + .ivc_layout + .input_indices() + .iter() + .zip(self.ivc_layout.output_indices().iter()) + .enumerate() + { + let input_full_idx = base_m_in + input_idx; + let output_full_idx = base_m_in + output_idx; + witness[input_base + field_idx] = + neo_math::F::from_u64(chunk[0].regs_after[input_full_idx]); + witness[output_base + field_idx] = + neo_math::F::from_u64(chunk[last_step].regs_after[output_full_idx]); + } + + for (j, step) in chunk.iter().enumerate() { + for i in base_m_in..per_step_cols { + let idx = m_in + (i - base_m_in) * CHUNK_SIZE + j; + witness[idx] = neo_math::F::from_u64(step.regs_after[i]); + } + } + + Ok(witness) + } + + fn cpu_bindings( + &self, + _layout: &Self::Layout, + ) -> Result< + ( + HashMap>, + HashMap>, + ), + String, + > { + let m_in = ::M_IN; + let map_idx = |witness_idx: usize| m_in + witness_idx * CHUNK_SIZE; + + let mut shout_map: HashMap> = HashMap::new(); + + for (tag, layouts) in &self.ts_mem_spec.shout_bindings { + for layout in layouts { + let entry = shout_map.entry(*tag as u32).or_default(); + entry.push(ShoutCpuBinding { + has_lookup: map_idx(layout.has_lookup), + addr: Some(map_idx(layout.addr)), + val: map_idx(layout.val), + }); + } + } + + let mut twist_map: HashMap> = HashMap::new(); + + for (tag, layouts) in &self.ts_mem_spec.twist_bindings { + for layout in layouts { + let entry = twist_map.entry(*tag as u32).or_default(); + entry.push(TwistCpuBinding { + read_addr: map_idx(layout.ra), + has_read: map_idx(layout.has_read), + rv: map_idx(layout.rv), + write_addr: map_idx(layout.wa), + has_write: map_idx(layout.has_write), + wv: map_idx(layout.wv), + inc: None, + }); + } + } + + Ok((shout_map, twist_map)) + } +} + +fn ark_matrix_to_neo(sparse_row: &[(FpGoldilocks, usize)]) -> Vec<(usize, neo_math::F)> { + let mut row = vec![]; + + for (col_v, col_i) in sparse_row.iter() { + row.push((*col_i, ark_field_to_p3_goldilocks(col_v))); + } + + row +} + +pub struct StarstreamVm { + step_circuit_builder: StepCircuitBuilder>, + step_i: usize, + mem: TSMemoryConstraints, + irw: InterRoundWires, + regs: Vec, +} + +impl StarstreamVm { + pub fn new( + step_circuit_builder: StepCircuitBuilder>, + mem: TSMemoryConstraints, + ) -> Self { + let irw = InterRoundWires::new(step_circuit_builder.instance.entrypoint.0 as u64); + + Self { + step_circuit_builder, + step_i: 0, + mem, + irw, + regs: vec![0; PER_STEP_COLS], + } + } +} + +impl VmCpu for StarstreamVm { + type Error = SynthesisError; + + fn snapshot_regs(&self) -> Vec { + self.regs.clone() + } + + fn pc(&self) -> u64 { + self.step_i as u64 + } + + fn halted(&self) -> bool { + self.step_i == self.step_circuit_builder.ops.len() + } + + fn step( + &mut self, + twist: &mut T, + shout: &mut S, + ) -> Result, Self::Error> + where + T: Twist, + S: Shout, + { + let cs = ConstraintSystem::::new_ref(); + cs.set_optimization_goal(OptimizationGoal::Constraints); + + let (irw, (shout_events, twist_events), _ivc_layout) = + self.step_circuit_builder.make_step_circuit( + self.step_i, + &mut self.mem, + cs.clone(), + self.irw.clone(), + false, + )?; + + if let Some(unsat) = cs.which_is_unsatisfied().unwrap() { + tracing::error!(location = unsat, "step CCS is unsat"); + } + + assert!(cs.is_satisfied().unwrap()); + + self.irw = irw; + + self.step_i += 1; + + self.regs = cs + .instance_assignment()? + .into_iter() + .map(|input| input.into_bigint().0[0]) + .chain( + cs.witness_assignment()? + .into_iter() + .map(|wit| wit.into_bigint().0[0]), + ) + .collect(); + + for event in shout_events { + assert_eq!( + shout.lookup(neo_vm_trace::ShoutId(event.shout_id), event.key), + event.value + ); + } + + for event in twist_events { + match event.op { + neo_vm_trace::TwistOpKind::Read => { + assert_eq!( + twist.load_if_lane( + event.cond, + neo_vm_trace::TwistId(event.twist_id), + event.addr, + event.val, + event.lane.unwrap(), + ), + event.val, + ); + } + neo_vm_trace::TwistOpKind::Write => { + twist.store_if_lane( + event.cond, + neo_vm_trace::TwistId(event.twist_id), + event.addr, + event.val, + event.lane.unwrap(), + ); + } + } + } + + Ok(neo_vm_trace::StepMeta { + pc_after: self.step_i as u64, + opcode: 0, + }) + } +} + +pub fn ark_field_to_p3_goldilocks(v: &FpGoldilocks) -> p3_goldilocks::Goldilocks { + let original_u64 = v.into_bigint().0[0]; + let result = neo_math::F::from_u64(original_u64); + + // Assert that we can convert back and get the same element + let converted_back = FpGoldilocks::from(original_u64); + assert_eq!( + *v, converted_back, + "Field element conversion is not reversible" + ); + + result +} diff --git a/interleaving/starstream-interleaving-proof/src/opcode_dsl.rs b/interleaving/starstream-interleaving-proof/src/opcode_dsl.rs new file mode 100644 index 00000000..dbb2370e --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/opcode_dsl.rs @@ -0,0 +1,196 @@ +use crate::F; +use crate::circuit::MemoryTag; +use crate::memory::{Address, IVCMemory, IVCMemoryAllocated}; +use ark_ff::{AdditiveGroup as _, PrimeField as _}; +use ark_r1cs_std::{ + alloc::AllocVar as _, + fields::{FieldVar as _, fp::FpVar}, + prelude::Boolean, +}; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; +use std::convert::Infallible; + +// a higher level DSL on top of arkworks and the MCC api +// +// in most cases the memory tracing logic mirrors the circuit synthesis logic. +// +// the idea of this trait is that we can then provide two backends +// +// one that works out-of-circuit, whose only purpose is to feed the memory +// tracer with the right values, and another backend that just re-uses those +// values for witness assignment, and emits the constraints for r1cs shape +// construction. +// +// NOTE: not all the circuit is currently using this yet +// NOTE: after we get all the opcodes to use this abstraction (which may require +// more changes), we may also replace the arkworks DSL entirely. +pub trait OpcodeDsl { + type Bool; + type Val: std::fmt::Debug; + type Error; + + fn zero(&self) -> Self::Val; + fn const_u64(&self, val: u64) -> Result; + fn add(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result; + fn mul(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result; + fn select( + &self, + cond: &Self::Bool, + t: &Self::Val, + f: &Self::Val, + ) -> Result; + fn read( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + ) -> Result; + fn write( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + val: &Self::Val, + ) -> Result<(), Self::Error>; +} + +pub struct OpcodeTraceDsl<'a, M> { + pub mb: &'a mut M, +} + +impl<'a, M: IVCMemory> OpcodeDsl for OpcodeTraceDsl<'a, M> { + type Bool = bool; + type Val = F; + type Error = Infallible; + + fn zero(&self) -> Self::Val { + F::ZERO + } + + fn const_u64(&self, val: u64) -> Result { + Ok(F::from(val)) + } + + fn add(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result { + Ok(*lhs + *rhs) + } + + fn mul(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result { + Ok(*lhs * *rhs) + } + + fn select( + &self, + cond: &Self::Bool, + t: &Self::Val, + f: &Self::Val, + ) -> Result { + Ok(if *cond { *t } else { *f }) + } + + fn read( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + ) -> Result { + let addr_u64 = addr.into_bigint().0[0]; + let read = self.mb.conditional_read( + *cond, + Address { + tag: tag.into(), + addr: addr_u64, + }, + ); + Ok(read[0]) + } + + fn write( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + val: &Self::Val, + ) -> Result<(), Self::Error> { + let addr_u64 = addr.into_bigint().0[0]; + self.mb.conditional_write( + *cond, + Address { + tag: tag.into(), + addr: addr_u64, + }, + vec![*val], + ); + Ok(()) + } +} + +pub struct OpcodeSynthDsl<'a, M> { + pub cs: ConstraintSystemRef, + pub rm: &'a mut M, +} + +impl<'a, M: IVCMemoryAllocated> OpcodeDsl for OpcodeSynthDsl<'a, M> { + type Bool = Boolean; + type Val = FpVar; + type Error = SynthesisError; + + fn zero(&self) -> Self::Val { + FpVar::zero() + } + + fn const_u64(&self, val: u64) -> Result { + FpVar::new_constant(self.cs.clone(), F::from(val)) + } + + fn add(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result { + Ok(lhs + rhs) + } + + fn mul(&self, lhs: &Self::Val, rhs: &Self::Val) -> Result { + Ok(lhs * rhs) + } + + fn select( + &self, + cond: &Self::Bool, + t: &Self::Val, + f: &Self::Val, + ) -> Result { + cond.select(t, f) + } + + fn read( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + ) -> Result { + let read = self.rm.conditional_read( + cond, + &Address { + tag: tag.allocate(self.cs.clone())?, + addr: addr.clone(), + }, + )?[0] + .clone(); + Ok(read) + } + + fn write( + &mut self, + cond: &Self::Bool, + tag: MemoryTag, + addr: &Self::Val, + val: &Self::Val, + ) -> Result<(), Self::Error> { + self.rm.conditional_write( + cond, + &Address { + tag: tag.allocate(self.cs.clone())?, + addr: addr.clone(), + }, + std::slice::from_ref(val), + ) + } +} diff --git a/interleaving/starstream-interleaving-proof/src/optional.rs b/interleaving/starstream-interleaving-proof/src/optional.rs new file mode 100644 index 00000000..ec992717 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/optional.rs @@ -0,0 +1,96 @@ +use ark_ff::PrimeField; +use ark_r1cs_std::{ + GR1CSVar, + boolean::Boolean, + eq::EqGadget as _, + fields::{FieldVar, fp::FpVar}, +}; +use ark_relations::gr1cs::SynthesisError; + +#[derive(Copy, Clone, Debug, Default)] +pub struct OptionalF(F); + +impl OptionalF { + pub fn none() -> Self { + Self(F::ZERO) + } + + pub fn new(value: F) -> Self { + Self(value + F::ONE) + } + + pub fn from_option(value: Option) -> Self { + value.map(Self::new).unwrap_or_else(Self::none) + } + + pub fn encoded(self) -> F { + self.0 + } + + pub fn from_encoded(value: F) -> Self { + Self(value) + } + + pub fn to_option(self) -> Option { + if self.0 == F::ZERO { + None + } else { + Some(self.0 - F::ONE) + } + } + + pub fn decode_or_zero(self) -> F { + self.to_option().unwrap_or(F::ZERO) + } +} + +#[derive(Clone)] +pub struct OptionalFpVar(FpVar); + +impl OptionalFpVar { + pub fn new(value: FpVar) -> Self { + Self(value) + } + + pub fn from_pid(value: &FpVar) -> Self { + Self(value + FpVar::one()) + } + + pub fn encoded(&self) -> FpVar { + self.0.clone() + } + + pub fn is_some(&self) -> Result, SynthesisError> { + Ok(!self.0.is_zero()?) + } + + pub fn decode_or_zero(&self) -> Result, SynthesisError> { + let is_zero = self.0.is_zero()?; + let value = &self.0 - FpVar::one(); + is_zero.select(&FpVar::zero(), &value) + } + + pub fn conditional_enforce_eq_if_some( + &self, + switch: &Boolean, + value: &FpVar, + ) -> Result<(), SynthesisError> { + let is_some = self.is_some()?; + let decoded = self.decode_or_zero()?; + decoded.conditional_enforce_equal(value, &(switch & is_some))?; + Ok(()) + } + + pub fn select_encoded( + switch: &Boolean, + when_true: &OptionalFpVar, + when_false: &OptionalFpVar, + ) -> Result, SynthesisError> { + let selected = switch.select(&when_true.encoded(), &when_false.encoded())?; + Ok(OptionalFpVar::new(selected)) + } + + pub fn value(&self) -> Result { + self.0.value() + } +} diff --git a/interleaving/starstream-interleaving-proof/src/program_hash_gadget.rs b/interleaving/starstream-interleaving-proof/src/program_hash_gadget.rs new file mode 100644 index 00000000..075d80c3 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/program_hash_gadget.rs @@ -0,0 +1,36 @@ +use crate::F; +use crate::opcode_dsl::{OpcodeDsl, OpcodeSynthDsl, OpcodeTraceDsl}; +use crate::{circuit::MemoryTag, memory::IVCMemory, memory::IVCMemoryAllocated}; +use ark_r1cs_std::fields::fp::FpVar; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; + +fn program_hash_ops( + dsl: &mut D, + read_cond: &D::Bool, + target: &D::Val, +) -> Result<[D::Val; 4], D::Error> { + let mut out = Vec::with_capacity(4); + let stride = dsl.const_u64(4)?; + for i in 0..4 { + let offset = dsl.const_u64(i as u64)?; + let addr = dsl.add(&dsl.mul(target, &stride)?, &offset)?; + let value = dsl.read(read_cond, MemoryTag::ProcessTable, &addr)?; + out.push(value); + } + Ok(out.try_into().expect("program hash length")) +} + +pub fn trace_program_hash_ops>(mb: &mut M, read_cond: bool, target: &F) -> [F; 4] { + let mut dsl = OpcodeTraceDsl { mb }; + program_hash_ops(&mut dsl, &read_cond, target).expect("trace program hash") +} + +pub fn program_hash_access_wires>( + cs: ConstraintSystemRef, + rm: &mut M, + read_cond: &ark_r1cs_std::prelude::Boolean, + target: &FpVar, +) -> Result<[FpVar; 4], SynthesisError> { + let mut dsl = OpcodeSynthDsl { cs, rm }; + program_hash_ops(&mut dsl, read_cond, target) +} diff --git a/interleaving/starstream-interleaving-proof/src/program_state.rs b/interleaving/starstream-interleaving-proof/src/program_state.rs new file mode 100644 index 00000000..372d399d --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/program_state.rs @@ -0,0 +1,326 @@ +use crate::F; +use crate::coroutine_args_gadget::coroutine_args_ops; +use crate::memory::{IVCMemory, IVCMemoryAllocated}; +use crate::memory_tags::MemoryTag; +use crate::opcode_dsl::{OpcodeSynthDsl, OpcodeTraceDsl}; +use crate::optional::{OptionalF, OptionalFpVar}; +use crate::switchboard::{MemSwitchboardBool, MemSwitchboardWires}; +use ark_ff::{AdditiveGroup, Field}; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::fields::{FieldVar, fp::FpVar}; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; + +#[derive(Clone, Debug)] +pub struct ProgramState { + pub expected_input: OptionalF, + pub expected_resumer: OptionalF, + pub on_yield: bool, + pub yield_to: OptionalF, + pub activation: F, + pub init: F, + pub init_caller: F, + pub initialized: bool, + pub finalized: bool, + pub did_burn: bool, + pub ownership: OptionalF, // encoded optional process id +} + +/// IVC wires (state between steps) +#[derive(Clone)] +pub struct ProgramStateWires { + pub expected_input: OptionalFpVar, + pub expected_resumer: OptionalFpVar, + pub on_yield: Boolean, + pub yield_to: OptionalFpVar, + pub activation: FpVar, + pub init: FpVar, + pub init_caller: FpVar, + pub initialized: Boolean, + pub finalized: Boolean, + pub did_burn: Boolean, + pub ownership: OptionalFpVar, // encoded optional process id +} + +struct RawProgramState { + expected_input: V, + expected_resumer: V, + on_yield: V, + yield_to: V, + activation: V, + init: V, + init_caller: V, + initialized: V, + finalized: V, + did_burn: V, + ownership: V, +} + +fn program_state_read_ops( + dsl: &mut D, + switches: &crate::switchboard::MemSwitchboard, + addr: &D::Val, +) -> Result, D::Error> { + let expected_input = dsl.read(&switches.expected_input, MemoryTag::ExpectedInput, addr)?; + let expected_resumer = + dsl.read(&switches.expected_resumer, MemoryTag::ExpectedResumer, addr)?; + let on_yield = dsl.read(&switches.on_yield, MemoryTag::OnYield, addr)?; + let yield_to = dsl.read(&switches.yield_to, MemoryTag::YieldTo, addr)?; + let (activation, init) = coroutine_args_ops(dsl, &switches.activation, &switches.init, addr)?; + let init_caller = dsl.read(&switches.init_caller, MemoryTag::InitCaller, addr)?; + let initialized = dsl.read(&switches.initialized, MemoryTag::Initialized, addr)?; + let finalized = dsl.read(&switches.finalized, MemoryTag::Finalized, addr)?; + let did_burn = dsl.read(&switches.did_burn, MemoryTag::DidBurn, addr)?; + let ownership = dsl.read(&switches.ownership, MemoryTag::Ownership, addr)?; + + Ok(RawProgramState { + expected_input, + expected_resumer, + on_yield, + yield_to, + activation, + init, + init_caller, + initialized, + finalized, + did_burn, + ownership, + }) +} + +fn program_state_write_ops( + dsl: &mut D, + switches: &crate::switchboard::MemSwitchboard, + addr: &D::Val, + state: &RawProgramState, +) -> Result<(), D::Error> { + dsl.write( + &switches.expected_input, + MemoryTag::ExpectedInput, + addr, + &state.expected_input, + )?; + dsl.write( + &switches.expected_resumer, + MemoryTag::ExpectedResumer, + addr, + &state.expected_resumer, + )?; + dsl.write( + &switches.on_yield, + MemoryTag::OnYield, + addr, + &state.on_yield, + )?; + dsl.write( + &switches.yield_to, + MemoryTag::YieldTo, + addr, + &state.yield_to, + )?; + dsl.write( + &switches.activation, + MemoryTag::Activation, + addr, + &state.activation, + )?; + dsl.write(&switches.init, MemoryTag::Init, addr, &state.init)?; + dsl.write( + &switches.init_caller, + MemoryTag::InitCaller, + addr, + &state.init_caller, + )?; + dsl.write( + &switches.initialized, + MemoryTag::Initialized, + addr, + &state.initialized, + )?; + dsl.write( + &switches.finalized, + MemoryTag::Finalized, + addr, + &state.finalized, + )?; + dsl.write( + &switches.did_burn, + MemoryTag::DidBurn, + addr, + &state.did_burn, + )?; + dsl.write( + &switches.ownership, + MemoryTag::Ownership, + addr, + &state.ownership, + )?; + Ok(()) +} + +fn raw_from_state(state: &ProgramState) -> RawProgramState { + RawProgramState { + expected_input: state.expected_input.encoded(), + expected_resumer: state.expected_resumer.encoded(), + on_yield: F::from(state.on_yield), + yield_to: state.yield_to.encoded(), + activation: state.activation, + init: state.init, + init_caller: state.init_caller, + initialized: F::from(state.initialized), + finalized: F::from(state.finalized), + did_burn: F::from(state.did_burn), + ownership: state.ownership.encoded(), + } +} + +fn raw_from_wires(state: &ProgramStateWires) -> RawProgramState> { + RawProgramState { + expected_input: state.expected_input.encoded(), + expected_resumer: state.expected_resumer.encoded(), + on_yield: state.on_yield.clone().into(), + yield_to: state.yield_to.encoded(), + activation: state.activation.clone(), + init: state.init.clone(), + init_caller: state.init_caller.clone(), + initialized: state.initialized.clone().into(), + finalized: state.finalized.clone().into(), + did_burn: state.did_burn.clone().into(), + ownership: state.ownership.encoded(), + } +} + +impl ProgramStateWires { + pub fn from_write_values( + cs: ConstraintSystemRef, + write_values: &ProgramState, + ) -> Result { + Ok(ProgramStateWires { + expected_input: OptionalFpVar::new(FpVar::new_witness(cs.clone(), || { + Ok(write_values.expected_input.encoded()) + })?), + expected_resumer: OptionalFpVar::new(FpVar::new_witness(cs.clone(), || { + Ok(write_values.expected_resumer.encoded()) + })?), + on_yield: Boolean::new_witness(cs.clone(), || Ok(write_values.on_yield))?, + yield_to: OptionalFpVar::new(FpVar::new_witness(cs.clone(), || { + Ok(write_values.yield_to.encoded()) + })?), + activation: FpVar::new_witness(cs.clone(), || Ok(write_values.activation))?, + init: FpVar::new_witness(cs.clone(), || Ok(write_values.init))?, + init_caller: FpVar::new_witness(cs.clone(), || Ok(write_values.init_caller))?, + initialized: Boolean::new_witness(cs.clone(), || Ok(write_values.initialized))?, + finalized: Boolean::new_witness(cs.clone(), || Ok(write_values.finalized))?, + did_burn: Boolean::new_witness(cs.clone(), || Ok(write_values.did_burn))?, + ownership: OptionalFpVar::new(FpVar::new_witness(cs.clone(), || { + Ok(write_values.ownership.encoded()) + })?), + }) + } +} + +// Out-of-circuit write version. +pub fn trace_program_state_writes>( + mem: &mut M, + pid: u64, + state: &ProgramState, + switches: &MemSwitchboardBool, +) { + let raw = raw_from_state(state); + let mut dsl = OpcodeTraceDsl { mb: mem }; + let addr = F::from(pid); + program_state_write_ops(&mut dsl, switches, &addr, &raw).expect("trace program state writes"); +} + +// In-circuit write version. +pub fn program_state_write_wires>( + rm: &mut M, + cs: &ConstraintSystemRef, + address: FpVar, + state: &ProgramStateWires, + switches: &MemSwitchboardWires, +) -> Result<(), SynthesisError> { + let raw = raw_from_wires(state); + let mut dsl = OpcodeSynthDsl { cs: cs.clone(), rm }; + program_state_write_ops(&mut dsl, switches, &address, &raw)?; + Ok(()) +} + +// Out-of-circuit read version. +pub fn trace_program_state_reads>( + mem: &mut M, + pid: u64, + switches: &MemSwitchboardBool, +) -> ProgramState { + let mut dsl = OpcodeTraceDsl { mb: mem }; + let addr = F::from(pid); + let raw = program_state_read_ops(&mut dsl, switches, &addr).expect("trace program state"); + + ProgramState { + expected_input: OptionalF::from_encoded(raw.expected_input), + expected_resumer: OptionalF::from_encoded(raw.expected_resumer), + on_yield: raw.on_yield == F::ONE, + yield_to: OptionalF::from_encoded(raw.yield_to), + activation: raw.activation, + init: raw.init, + init_caller: raw.init_caller, + initialized: raw.initialized == F::ONE, + finalized: raw.finalized == F::ONE, + did_burn: raw.did_burn == F::ONE, + ownership: OptionalF::from_encoded(raw.ownership), + } +} + +pub fn program_state_read_wires>( + rm: &mut M, + cs: &ConstraintSystemRef, + address: FpVar, + switches: &MemSwitchboardWires, +) -> Result { + let mut dsl = OpcodeSynthDsl { cs: cs.clone(), rm }; + let raw = program_state_read_ops(&mut dsl, switches, &address)?; + + Ok(ProgramStateWires { + expected_input: OptionalFpVar::new(raw.expected_input), + expected_resumer: OptionalFpVar::new(raw.expected_resumer), + on_yield: raw.on_yield.is_one()?, + yield_to: OptionalFpVar::new(raw.yield_to), + activation: raw.activation, + init: raw.init, + init_caller: raw.init_caller, + initialized: raw.initialized.is_one()?, + finalized: raw.finalized.is_one()?, + did_burn: raw.did_burn.is_one()?, + ownership: OptionalFpVar::new(raw.ownership), + }) +} + +impl ProgramState { + pub fn dummy() -> Self { + Self { + finalized: false, + expected_input: OptionalF::none(), + expected_resumer: OptionalF::none(), + on_yield: false, + yield_to: OptionalF::none(), + activation: F::ZERO, + init: F::ZERO, + init_caller: F::ZERO, + initialized: false, + did_burn: false, + ownership: OptionalF::none(), + } + } + + pub fn debug_print(&self) { + tracing::debug!("expected_input={}", self.expected_input.encoded()); + tracing::debug!("expected_resumer={}", self.expected_resumer.encoded()); + tracing::debug!("on_yield={}", self.on_yield); + tracing::debug!("yield_to={}", self.yield_to.encoded()); + tracing::debug!("activation={}", self.activation); + tracing::debug!("init={}", self.init); + tracing::debug!("finalized={}", self.finalized); + tracing::debug!("did_burn={}", self.did_burn); + tracing::debug!("ownership={}", self.ownership.encoded()); + } +} diff --git a/interleaving/starstream-interleaving-proof/src/ref_arena_gadget.rs b/interleaving/starstream-interleaving-proof/src/ref_arena_gadget.rs new file mode 100644 index 00000000..0db682ee --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/ref_arena_gadget.rs @@ -0,0 +1,296 @@ +use crate::circuit::MemoryTag; +use crate::opcode_dsl::{OpcodeDsl, OpcodeSynthDsl, OpcodeTraceDsl}; +use crate::switchboard::{RefArenaSwitchboard, RefArenaSwitchboardWires}; +use crate::{ + F, LedgerOperation, + abi::{ArgName, OPCODE_ARG_COUNT}, + ledger_operation::{REF_GET_BATCH_SIZE, REF_PUSH_BATCH_SIZE, REF_WRITE_BATCH_SIZE}, + memory::{IVCMemory, IVCMemoryAllocated}, +}; +use ark_ff::{AdditiveGroup as _, PrimeField as _}; +use ark_r1cs_std::alloc::AllocVar as _; +use ark_r1cs_std::{ + GR1CSVar as _, + eq::EqGadget, + fields::{FieldVar as _, fp::FpVar}, + prelude::Boolean, +}; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; +use std::ops::Not; + +fn ref_sizes_access_ops( + dsl: &mut D, + write_cond: &D::Bool, + write_addr: &D::Val, + write_val: &D::Val, + read_cond: &D::Bool, + read_addr: &D::Val, +) -> Result { + dsl.write(write_cond, MemoryTag::RefSizes, write_addr, write_val)?; + dsl.read(read_cond, MemoryTag::RefSizes, read_addr) +} + +#[allow(clippy::too_many_arguments)] +fn ref_arena_access_ops( + dsl: &mut D, + read_cond: &D::Bool, + write_cond: &D::Bool, + write_is_push: &D::Bool, + push_vals: &[D::Val; REF_PUSH_BATCH_SIZE], + write_vals: &[D::Val; REF_WRITE_BATCH_SIZE], + ref_building_ptr: &D::Val, + val: &D::Val, + offset: &D::Val, +) -> Result<[D::Val; REF_GET_BATCH_SIZE], D::Error> { + let scale_get = dsl.const_u64(REF_GET_BATCH_SIZE as u64)?; + let offset_scaled_get = dsl.mul(offset, &scale_get)?; + let get_base_addr = dsl.add(val, &offset_scaled_get)?; + + let mut ref_arena_read_vec = Vec::with_capacity(REF_GET_BATCH_SIZE); + for i in 0..REF_GET_BATCH_SIZE { + let off = dsl.const_u64(i as u64)?; + let addr = dsl.add(&get_base_addr, &off)?; + let read = dsl.read(read_cond, MemoryTag::RefArena, &addr)?; + ref_arena_read_vec.push(read); + } + + let scale_write = dsl.const_u64(REF_WRITE_BATCH_SIZE as u64)?; + let offset_scaled_write = dsl.mul(offset, &scale_write)?; + let write_base_write = dsl.add(val, &offset_scaled_write)?; + let write_base_sel = dsl.select(write_is_push, ref_building_ptr, &write_base_write)?; + let zero = dsl.zero(); + let write_base = dsl.select(write_cond, &write_base_sel, &zero)?; + + for i in 0..REF_WRITE_BATCH_SIZE { + let off = dsl.const_u64(i as u64)?; + let addr = dsl.add(&write_base, &off)?; + let val_sel = dsl.select(write_is_push, &push_vals[i], &write_vals[i])?; + let val = dsl.select(write_cond, &val_sel, &zero)?; + dsl.write(write_cond, MemoryTag::RefArena, &addr, &val)?; + } + + let ref_arena_read: [D::Val; REF_GET_BATCH_SIZE] = ref_arena_read_vec + .try_into() + .expect("ref arena read batch length"); + + Ok(ref_arena_read) +} + +pub(crate) fn trace_ref_arena_ops>( + mb: &mut M, + ref_building_id: &mut F, + ref_building_offset: &mut F, + ref_building_remaining: &mut F, + switches: &RefArenaSwitchboard, + instr: &LedgerOperation, +) { + let mut ref_push_vals = std::array::from_fn(|_| F::ZERO); + let mut ref_write_vals = std::array::from_fn(|_| F::ZERO); + let ref_sizes_write = switches.ref_sizes_write; + let ref_sizes_read = switches.ref_sizes_read; + let ref_arena_read = switches.ref_arena_read; + let ref_arena_write = switches.ref_arena_write; + let write_is_push = switches.ref_arena_write_is_push; + + let mut ref_get_ref = F::ZERO; + let mut ref_get_offset = F::ZERO; + let mut ref_write_ref = F::ZERO; + let mut ref_write_offset = F::ZERO; + match instr { + LedgerOperation::NewRef { size, ret } => { + *ref_building_id = *ret; + *ref_building_offset = F::ZERO; + *ref_building_remaining = *size; + } + LedgerOperation::RefPush { vals } => { + ref_push_vals = *vals; + } + LedgerOperation::RefGet { + reff, + offset, + ret: _, + } => { + ref_get_ref = *reff; + ref_get_offset = *offset; + } + LedgerOperation::RefWrite { reff, offset, vals } => { + ref_write_ref = *reff; + ref_write_offset = *offset; + ref_write_vals = *vals; + } + _ => {} + }; + + let ref_sizes_ref_id = if ref_arena_read { + ref_get_ref + } else if ref_arena_write && !write_is_push { + ref_write_ref + } else { + F::ZERO + }; + + let mut dsl = OpcodeTraceDsl { mb }; + let _ = ref_sizes_access_ops( + &mut dsl, + &ref_sizes_write, + ref_building_id, + ref_building_remaining, + &ref_sizes_read, + &ref_sizes_ref_id, + ) + .expect("trace ref sizes access"); + + let op_val = if ref_arena_read { + ref_get_ref + } else if ref_arena_write && !write_is_push { + ref_write_ref + } else { + F::ZERO + }; + let op_offset = if ref_arena_read { + ref_get_offset + } else if ref_arena_write && !write_is_push { + ref_write_offset + } else { + F::ZERO + }; + + let push_ptr = *ref_building_id + *ref_building_offset; + let _ = ref_arena_access_ops( + &mut dsl, + &ref_arena_read, + &ref_arena_write, + &write_is_push, + &ref_push_vals, + &ref_write_vals, + &push_ptr, + &op_val, + &op_offset, + ) + .expect("trace ref arena access"); + + let remaining = ref_building_remaining.into_bigint().0[0] as usize; + + if ref_arena_write && write_is_push { + *ref_building_offset += F::from(REF_PUSH_BATCH_SIZE as u64); + *ref_building_remaining = F::from(remaining.saturating_sub(1) as u64); + } +} + +pub(crate) fn ref_arena_read_size>( + cs: ConstraintSystemRef, + rm: &mut M, + switches: &RefArenaSwitchboardWires, + opcode_args: &[FpVar; OPCODE_ARG_COUNT], + addr: &FpVar, +) -> Result, SynthesisError> { + let write_cond = switches.ref_sizes_write.clone(); + let write_addr = opcode_args[ArgName::Ret.idx()].clone(); + let write_val = opcode_args[ArgName::Size.idx()].clone(); + + let read_cond = switches.ref_sizes_read.clone(); + let read_addr = read_cond.select(addr, &FpVar::zero())?; + + let mut dsl = OpcodeSynthDsl { cs, rm }; + ref_sizes_access_ops( + &mut dsl, + &write_cond, + &write_addr, + &write_val, + &read_cond, + &read_addr, + ) +} + +#[allow(clippy::too_many_arguments)] +pub(crate) fn ref_arena_access_wires>( + cs: ConstraintSystemRef, + rm: &mut M, + switches: &RefArenaSwitchboardWires, + opcode_args: &[FpVar; OPCODE_ARG_COUNT], + ref_building_ptr: &FpVar, + ref_building_remaining: &FpVar, + val: &FpVar, + offset: &FpVar, + ref_size_read: &FpVar, +) -> Result<[FpVar; REF_GET_BATCH_SIZE], SynthesisError> { + let _ = ref_building_remaining; + + let ref_push_vals = [ + opcode_args[ArgName::PackedRef0.idx()].clone(), + opcode_args[ArgName::PackedRef1.idx()].clone(), + opcode_args[ArgName::PackedRef2.idx()].clone(), + opcode_args[ArgName::PackedRef3.idx()].clone(), + ]; + let ref_write_vals = [ + opcode_args[ArgName::PackedRef0.idx()].clone(), + opcode_args[ArgName::PackedRef2.idx()].clone(), + opcode_args[ArgName::PackedRef4.idx()].clone(), + opcode_args[ArgName::PackedRef5.idx()].clone(), + ]; + + let size_sel_get = switches + .ref_arena_read + .select(ref_size_read, &FpVar::zero())?; + let offset_sel_get = switches.ref_arena_read.select(offset, &FpVar::zero())?; + let one_if_on_get = switches + .ref_arena_read + .select(&FpVar::one(), &FpVar::zero())?; + let offset_plus_one_get = &offset_sel_get + one_if_on_get; + let diff_get = &size_sel_get - &offset_plus_one_get; + + range_check_u16(cs.clone(), &switches.ref_arena_read, &size_sel_get)?; + range_check_u16(cs.clone(), &switches.ref_arena_read, &offset_sel_get)?; + range_check_u16(cs.clone(), &switches.ref_arena_read, &diff_get)?; + + let write_check_cond = + &switches.ref_arena_write & switches.ref_arena_write_is_push.clone().not(); + + let size_sel_write = write_check_cond.select(ref_size_read, &FpVar::zero())?; + let offset_sel_write = write_check_cond.select(offset, &FpVar::zero())?; + let one_if_on_write = write_check_cond.select(&FpVar::one(), &FpVar::zero())?; + let offset_plus_one_write = &offset_sel_write + one_if_on_write; + let diff_write = &size_sel_write - &offset_plus_one_write; + + range_check_u16(cs.clone(), &write_check_cond, &size_sel_write)?; + range_check_u16(cs.clone(), &write_check_cond, &offset_sel_write)?; + range_check_u16(cs.clone(), &write_check_cond, &diff_write)?; + + let mut dsl = OpcodeSynthDsl { cs: cs.clone(), rm }; + + ref_arena_access_ops( + &mut dsl, + &switches.ref_arena_read, + &switches.ref_arena_write, + &switches.ref_arena_write_is_push, + &ref_push_vals, + &ref_write_vals, + ref_building_ptr, + val, + offset, + ) +} + +fn range_check_u16( + cs: ConstraintSystemRef, + switch: &Boolean, + value: &FpVar, +) -> Result<(), SynthesisError> { + let value_u64 = value.value().unwrap().into_bigint().as_ref()[0] & 0xFFFF; + let mut bits = Vec::with_capacity(16); + for i in 0..16 { + let bit = Boolean::new_witness(cs.clone(), || Ok(((value_u64 >> i) & 1) == 1))?; + bits.push(bit); + } + + let mut recomposed = FpVar::zero(); + for (i, bit) in bits.iter().enumerate() { + let coeff = FpVar::new_constant(cs.clone(), F::from(1u64 << i))?; + let term = bit.select(&coeff, &FpVar::zero())?; + recomposed += term; + } + + recomposed.conditional_enforce_equal(value, switch)?; + + Ok(()) +} diff --git a/interleaving/starstream-interleaving-proof/src/switchboard.rs b/interleaving/starstream-interleaving-proof/src/switchboard.rs new file mode 100644 index 00000000..1a98e5b4 --- /dev/null +++ b/interleaving/starstream-interleaving-proof/src/switchboard.rs @@ -0,0 +1,147 @@ +use crate::F; +use ark_r1cs_std::alloc::AllocVar; +use ark_r1cs_std::prelude::Boolean; +use ark_relations::gr1cs::{ConstraintSystemRef, SynthesisError}; + +#[derive(Clone, Debug, Default)] +pub struct RomSwitchboard { + pub read_is_utxo_curr: bool, + pub read_is_utxo_target: bool, + pub read_must_burn_curr: bool, + pub read_program_hash_target: bool, +} + +#[derive(Clone)] +pub struct RomSwitchboardWires { + pub read_is_utxo_curr: Boolean, + pub read_is_utxo_target: Boolean, + pub read_must_burn_curr: Boolean, + pub read_program_hash_target: Boolean, +} + +#[derive(Clone, Debug, Default)] +pub struct MemSwitchboard { + pub expected_input: B, + pub expected_resumer: B, + pub on_yield: B, + pub yield_to: B, + pub activation: B, + pub init: B, + pub init_caller: B, + pub initialized: B, + pub finalized: B, + pub did_burn: B, + pub ownership: B, +} + +pub type MemSwitchboardBool = MemSwitchboard; +pub type MemSwitchboardWires = MemSwitchboard>; + +#[derive(Clone, Debug, Default)] +pub struct HandlerSwitchboard { + pub read_interface: bool, + pub read_head: bool, + pub read_node: bool, + pub write_node: bool, + pub write_head: bool, +} + +#[derive(Clone, Debug, Default)] +pub struct RefArenaSwitchboard { + pub ref_sizes_write: bool, + pub ref_sizes_read: bool, + pub ref_arena_read: bool, + pub ref_arena_write: bool, + pub ref_arena_write_is_push: bool, +} + +#[derive(Clone)] +pub struct HandlerSwitchboardWires { + pub read_interface: Boolean, + pub read_head: Boolean, + pub read_node: Boolean, + pub write_node: Boolean, + pub write_head: Boolean, +} + +#[derive(Clone)] +pub struct RefArenaSwitchboardWires { + pub ref_sizes_write: Boolean, + pub ref_sizes_read: Boolean, + pub ref_arena_read: Boolean, + pub ref_arena_write: Boolean, + pub ref_arena_write_is_push: Boolean, +} + +impl MemSwitchboardWires { + pub fn allocate( + cs: ConstraintSystemRef, + switches: &MemSwitchboardBool, + ) -> Result { + Ok(Self { + expected_input: Boolean::new_witness(cs.clone(), || Ok(switches.expected_input))?, + expected_resumer: Boolean::new_witness(cs.clone(), || Ok(switches.expected_resumer))?, + on_yield: Boolean::new_witness(cs.clone(), || Ok(switches.on_yield))?, + yield_to: Boolean::new_witness(cs.clone(), || Ok(switches.yield_to))?, + activation: Boolean::new_witness(cs.clone(), || Ok(switches.activation))?, + init: Boolean::new_witness(cs.clone(), || Ok(switches.init))?, + init_caller: Boolean::new_witness(cs.clone(), || Ok(switches.init_caller))?, + initialized: Boolean::new_witness(cs.clone(), || Ok(switches.initialized))?, + finalized: Boolean::new_witness(cs.clone(), || Ok(switches.finalized))?, + did_burn: Boolean::new_witness(cs.clone(), || Ok(switches.did_burn))?, + ownership: Boolean::new_witness(cs.clone(), || Ok(switches.ownership))?, + }) + } +} + +impl RomSwitchboardWires { + pub fn allocate( + cs: ConstraintSystemRef, + switches: &RomSwitchboard, + ) -> Result { + Ok(Self { + read_is_utxo_curr: Boolean::new_witness(cs.clone(), || Ok(switches.read_is_utxo_curr))?, + read_is_utxo_target: Boolean::new_witness(cs.clone(), || { + Ok(switches.read_is_utxo_target) + })?, + read_must_burn_curr: Boolean::new_witness(cs.clone(), || { + Ok(switches.read_must_burn_curr) + })?, + read_program_hash_target: Boolean::new_witness(cs.clone(), || { + Ok(switches.read_program_hash_target) + })?, + }) + } +} + +impl HandlerSwitchboardWires { + pub fn allocate( + cs: ConstraintSystemRef, + switches: &HandlerSwitchboard, + ) -> Result { + Ok(Self { + read_interface: Boolean::new_witness(cs.clone(), || Ok(switches.read_interface))?, + read_head: Boolean::new_witness(cs.clone(), || Ok(switches.read_head))?, + read_node: Boolean::new_witness(cs.clone(), || Ok(switches.read_node))?, + write_node: Boolean::new_witness(cs.clone(), || Ok(switches.write_node))?, + write_head: Boolean::new_witness(cs.clone(), || Ok(switches.write_head))?, + }) + } +} + +impl RefArenaSwitchboardWires { + pub fn allocate( + cs: ConstraintSystemRef, + switches: &RefArenaSwitchboard, + ) -> Result { + Ok(Self { + ref_sizes_write: Boolean::new_witness(cs.clone(), || Ok(switches.ref_sizes_write))?, + ref_sizes_read: Boolean::new_witness(cs.clone(), || Ok(switches.ref_sizes_read))?, + ref_arena_read: Boolean::new_witness(cs.clone(), || Ok(switches.ref_arena_read))?, + ref_arena_write: Boolean::new_witness(cs.clone(), || Ok(switches.ref_arena_write))?, + ref_arena_write_is_push: Boolean::new_witness(cs, || { + Ok(switches.ref_arena_write_is_push) + })?, + }) + } +} diff --git a/interleaving/starstream-interleaving-spec/ARCHITECTURE.md b/interleaving/starstream-interleaving-spec/ARCHITECTURE.md new file mode 100644 index 00000000..3cc4e52c --- /dev/null +++ b/interleaving/starstream-interleaving-spec/ARCHITECTURE.md @@ -0,0 +1,1169 @@ +## About + +This package implements a standalone circuit for the Starstream interleaving proof. + +### Architecture + +Let's start by defining some context. A Starstream transaction describes +coordinated state transitions between multiple concurrent on-chain programs, +where each transition is analogous to consuming a utxo (a utxo is a one-shot +continuation). Each program is modelled as a coroutine, which is a resumable +function. Resuming a utxo is equal to spending it. Yielding from the utxo is +equivalent to creating a new utxo. + +The ledger state for a program (utxo) is described by: + +1. A coroutine state. Which includes a program counter (to know from where to +resume the execution), and the values of variables in the stack. +2. The value of the last yield. + +The programs are represented as WASM modules, although the actual ISA does +not matter here. A program can either do native WASM operations, which for the +purposes of the interleaving verification is just a blackbox, or it can interact +with other programs. + +A program is either: + +1. A coordination script. Which has no state persisted in the ledger. Since it's +stateless, the proof needs to also ensure that it runs fully. They can suspend +for calling into effect handlers, but this is just internal to the transaction. +2. A utxo, which has persistent state. + +Coordination scripts can call into utxos with their address, or other +coordination scripts (these get transient ids that are local to the +transaction). + +Coordination scripts calling into each other is equivalent to plain coroutine +calls. + +Yielding doesn't necessarily change control flow to the coordination script +that called resume, because the transaction may end before that, and the next +coordination script could be a different one. Also because we have algebraic +effect handlers, control flow may go to a coordination script that was deeper in +the call stack. + +To model this, each process keeps a `yield_to` pointer and an `on_yield` flag. +When a process yields, it sets `on_yield = true`. The next resumer records +`yield_to[process] = resumer` and clears `on_yield`. A yield then returns control +to `yield_to[process]`, not necessarily to the most recent resumer in the trace. + +As mentioned before, programs are modelled as WASM programs, both in the case +of coordination scripts and in the case of utxos. Inter-program communication is +expressed as WASM host (imported) function calls. To verify execution, we use a +*WASM zkVM*. When proving, the only thing we assume about host calls is that the +only memory modified by it is that expressed by the function signature. + +This means we can think of a program trace as a list of native operations with +interspersed black-box operations (which is effectively a lookup argument of +host call execution). + +From the program trace, we can use the zkVM to make a zero knowledge that claims +that. + +1. The ISA instructions were executed in accordance with the WASM ISA rules. +2. Host calls interact with the stack according to function types. + +A single proof does not claim that the values returned by host calls were +correct. + +In the case of lookup arguments for the optimizations (e.g. witnesses for +division, sorting), this can be extended by adding some verification of the +returned values. This spec doesn't focus on this specific case. + +The case that matters for this spec is the case where the host call is supposed +to trigger a change in ledger state and get some value from another program. + +### Transaction + +A transaction is made of: + +1. A list of inputs. An input is a "pointer" to an entry in the ledger state (a +utxo). The entry in the ledger state has the serialized coroutine state and a +verification key for a zk proof that gatekeeps spending. +2. A list of outputs (these are the new entries to the ledger). These also need +to have a reference to input they are spending, if any. +3. Proofs: + +- One proof per input. +- One proof per coordination script (and there is at least one). +- The verification keys (which include a hash of the wasm module) for all +coordination scripts. +- The entrypoint (an index into the coordination scripts verification keys). +- The transaction or interleaving proof. + +Verification involves: + +1. For each input (spent utxo), take the proof (in the tx) and the verification +key (in the ledger). +2. Find if there is an output referencing it (if not, just use null). +3. Run `proof.verify(input, verification_key, output)`. +4. If succesful, remove that input from the ledger. And insert the new output. +Remove the output from the list. +5. Outputs without an input can just be added to the ledger. +6. Verify each coordination script proof by itself. Potentially in parallel. +7. Verify the interleaving/transaction proof. Verification for this uses a +commitment to all the lookup tables used in the other proofs. + +**Note** that the above could be compressed into a single proof too by encoding +the above as a circuit (with PCD for the wasm proofs, probably?), this is the +unaggregated scheme. + +What follows is a high level description of the operations a program can do in a +transaction, that involve interacting with the environment. + +For a more specific breakdown of the semantics there is a description +[here](SEMANTICS.md). + +### Shared + +#### resuming + +- **resume(f: Continuation, value: Value) -> (Value, Continuation)** + +Changes control flow from the current program to the utxo. The `value` argument +needs to match the return value of the last `yield` in that program. + +It's important that you get a handle too when resumed, since that allows +basically RPC, by allowing the utxo or coordination script to answer back with +an answer for a request. This means an effect handlers involves two resumes in a +row in a caller -> handler -> caller flow. + +When performed from a utxo, these don't create a utxo entry. It's expected that +effect handlers are atomic in the transaction. + +The circuit for **resume** needs to take care of: + +1. The Continuation returned matches the previous program (the previous opcode +in the witness was a resume from that program). +2. The next opcode in the witness trace has a vm id matching `f`. +3. The next opcode for the resumed utxo (which could be yield or resume) has a +return value that matches the input. +4. Utxo's can't resume utxos. +5. Maybe check that continuations are linear/one-shot (by enforcing a nonce on +the continuation). Not sure if this is needed or utxos can just take care of it. + +#### program hash (coordination script attestation) + +- **program_hash(f: Continuation) -> ProgramId** + +Get the hash of the program with the given id. This can be used to attest the +caller after resumption. For example, a method that only wants to be called by a +particular coordination script. + +** NOTE: ** that using this for attestation is fundamentally the same as lookup +argument. That is, instead of asking the program id and asserting a fixed one, +you could just ask for a proof and verify it with the program's key. + +The difference is that this allows "batching". It also has the flexibility of +allowing indirection to have a list of allowed callers, for example. + +In the proof context, this is essentially a lookup into a "process table", that +has one entry per wasm program in the transaction, and it stores a hash of the +wasm module. To verify the transaction this table has to be used as the public +instance for the respective wasm proof. + +#### Yield + +- **yield() -> (Value, Continuation)** + +Pause execution of the current program and move control flow to the previous +coordination script if any. If not, when called from a utxo, this may create a +new transaction output, with the PC = current pc and the coroutine state with +the current variables. + +The difference with `resume` is that it doesn't take a continuation as a target. + +Because of this, only `yield` can be used as the last operation for a utxo in a +transaction. + +#### Effect handlers + +- **get_handler(interface_id) -> Continuation** + +Gets the last handler installed for a specific interface (effect) in the current "call stack". + +This can be used in combination with `resume` to perform effects. + +#### Coordination script + +##### new utxo + +- **new(utxo: UtxoProgramHash, ..args) -> Continuation** + +Creates a new entry in the utxo set in the ledger state. It registers the +`ProgramId`, which acts as a verification key, and sets the initial coroutine +state. `Continuation` is basically the address. + +##### new coordination script (spawn) + +- **new_coord(coord: CoordScriptHash, ..args) -> Continuation** + +Register a new coordination script in this transaction, in a cold state (don't +start running it). It can then be called with `resume`. The `args` are the input +to the coordination script. + +When doing the non-zk mode run, this doesn't need to do anything except allocate +a new id for this vm and recording the inputs. But it also would be possible to +allocate it and write the inputs to the initial stack. + +In the interleaving circuit this is similar. This generates a new transient id +for this wasm vm, which is used as the continuation id. Then it stores the input +in memory. When checking the aggregated proofs, the verifier needs to use the +same inputs to verify the coordination script wasm proof. + +##### effect handlers + +- **install_handler(interface: InterfaceId)** + +Registers the current coroutine as the handler for a specific interface. `get_handler` then will return the id of the current coroutine. Installing implies pushing to a stack. If another coroutine registers for the same interface, then it takes priority for `get_handler`. + +- **uninstall_handler(interface: InterfaceId)** + +De-register the current coroutine as the handler for a specific interface. + +#### Utxo + +##### Burn + +- **burn()** + +Explicit drop in the utxo's program. This removes the program (and the coroutine +state) from the ledger state. It's equivalent to a coroutine return. + +##### Tokens + +- **bind(owner: Continuation)** +- **unbind(token: Continuation)** +- **is_owned_by(token: Continuation, utxo: Continuation) -> bool** +- **tokens(utxo: Continuation) -> Stream** + +Relational arguments. The ledger state has to keep relations of inclusion, where +tokens can be included in utxos. + +This of these as the ledger having an sql table of utxo relations (pairs). + +**bind** requires the token to be unbound, and in that case, it binds a +continuation to its caller (which has to be a utxo). + +**unbind** this has to be called from the owner's context. The token has to be +owned by this utxo. + +**is_owned_by** lookup check into the ownership table. + +**tokens** the other lookup. + +Note that `Continuation` implies that the tokens are included as inputs to the +transaction. They are not blockchain id's. + +You can think of tokens as being part of the utxo storage, but this simplifies +the management of ids. + +### Proving the interleaving + +All the operations that use the ledger state are communication operations. And +can be thought of as memory operations. Taking inspiration from offline memory +checking (and Nebula), we can bind each proof to an incremental commitment to +all the ledger operations in the current transaction. + +Let's say we have a coordination script, and after execution we get a trace like: + +``` +wasm_op_x +r1 <- call resume utxo_id v1 +wasm_op_y +r2 <- call resume utxo_id v2 +wasm_op_z +``` + +We use the zkVM and we get a proof of that execution. And also the proof is +bound to an incremental commitment `C_coord_comm := Commit(resume, v2, r2) + +Commit(resume, v1, r1)`. + +The straightforward construction for this is by using iterative hashing, for +example with Poseidon. For example: `H(, H(, +)`. + +To reduce the amount of hashing required, a more efficient construction would be +to combine a hash function with a vector commitment scheme, reducing the hashing +needed to one per operation. + +Note that we also need to include a tag for the operation in the commitment, +since that fixes the order of operations. + +Now let's say we have the trace of the utxo that gets resumed. It forms a +dual or complement with the coordination script trace. Where a yield is dual +to resume: + +``` +wasm_op_a +z1 <- call yield y1 +wasm_op_b +z2 <- call yield y2 +wasm_op_c +``` + +Then we bind the zkVM wasm proof to a different commitment: + +`C_utxo_comm := Commit(yield, y2, z2) + Commit(resume, y1, y1)` + +Then we can generate a new trace with the _actual_ interleaving defined by +the functional dependency, by combining both executions into the right order +of interleaving: + +``` +resume utxo:utxo_id in:v1 out:r1 +yield utxo:utxo_id in:y1 out:z1 +resume utxo:utxo_id in:v2 out:r2 +yield utxo:utxo_id in:y2 out:z2 +``` + +Then we need to prove the following things: + +1. The exchange of messages match. + +In this case this means that all of these hold: + + - `v1 == z1` + - `r1 == y1` + - `v2 == z2` + - `r2 == y2` + +2. The exchanged values match the ones in the individual original traces. + +For this we get a single proof, and two commitments: + +- `C_coord_coom'` +- `C_utxo_coom'` + +Then the verifier can check that the commitments match the ones from the +original proofs. This also enforces the same order of operations (and values) in +both (or all n in general) proofs. + +3. The order matches the ids. That is, a resume to a certain utxo_id must match +the id of the utxo that yields in the next step. + +*Note*: The interleaving proof doesn't need to know about the coroutine states, +just about the values of yield and resume. + +### Example + +As a more general example, let's say we have two coordination scripts and two utxos. + +``` +fn coord1(input: Data, u1: Utxo1, u2: Utxo2) { + let v_01 = f(input); + let v_02 = resume u1 v_01; + + let v_03 = g(v_03); + + let v_04 = coord2(v_03); + + let v_05 = h(v_04); + + let v_06 = resume u_2 v_05; + resume u2 nil; + + return; +} + +fn coord2(u1: Utxo1, v_10: u32) { + let v_11 = f(v_10); + let v_12 = resume u1; + return v_12; +} + +utxo Utxo1 { + main { + // current state + yield; + + yield v_20; + yield v_21; + } +} + +utxo Utxo2 { + main { + // current state + yield; + + let v_31 = yield v_30; + return; + } +} +``` + +The flow of execution to proving then looks something like this: + +Note: WASM is an arbitrary trace of wasm opcodes from the corresponding program. + +![img](graph.png) + +## Proving algebraic effects + +### Background + +Algebraic effect handlers involve two things: + +**The ability to declare which effects can be performed by a function.** + +For example: + +```rust +fn pure(x: u32) : u32 / {} = { + x + 1 +} +``` + +```rust +fn effectful(x: u32) : u32 / { IO } = { + do print("x: " ++ x) + + pure(x) +} +``` + +In their simpler form, the effects of a function are the union of all the +interfaces used in its body. In this case we can imagine that IO is an interface +that allows printing: + +```ts +interface IO { + print(x: String): () +} +``` + +**The ability to define handlers for effects.** + +```rust +fn pure2(x: u32) : (u32, String) / {} { + let mut logs = ""; + + try { + effectful(x) + } + with IO { + def print(s) = { + logs += s; + logs += "/n"; + + resume(()); + } + } +} +``` + +In a way, interfaces define a DSL of allowed operations, and handlers give +meaning to those operations by running them. + +Depending on the implementation, we can distinguish between a few types of +handlers: + +1. Single tail-resumption. + +```python +with Interface { + def op(x: u32): u32 { + let y = x+1 + resume(y) + + # nothing happens here + } +} +``` + +There are basically equivalent to function calls. In an environment with shared +memory, these can be implemented as closures. + +In our case, since the continuation could (will?) be running in a different VM +instance, instead we need to think of this more like a channel, where we use a +host call to pass the data from one VM to the other. + +The important thing is that control flow doesn't need to go back to the handler. + + +2. Single non-tail-resumption. + +```python +with Interface { + def op(x: u32): u32 { + let now = Time.now(); + + let y = x+1 + resume(y) + + print(now.elapsed()); # we eventually come back here after the continuation finishes. + } +} +``` + +The tricky part about these is that the handler may be invoked again before +reaching the "exit code". + +There are at least two ways of handling that. + +One is to put the code after resume in a stack, potentially as a closure. Then +before resuming, the continuation code gets pushed into a stack (in the function +stack). + +The other way is to make the handler run in its own VM, and just spawn a new one +(with its own proof) per call to the handler. + +3. Storing the continuation + +```python +let queue = emptyQueue(); +try { + f() + + while let Some(k) = queue.pop() { + k() + } +} +with Interface { + def op(x: u32): u32 { + queue.push(lambda: resume(x + 1)) + } +} +``` + +This can be compiled into a local closure (can only be called from the same +program), so it's not different from just executing the resumption inline. + +The issues with this are more about: + +- linearity: probably want to resume every program at least once (so the queue +has to be linear), and not allow resuming twice (since probably we don't want +this feature) +- static checking of captures maybe, since the call to `k` in the `try` may also +perform effects. It also shouldn't be possible to return the boxed closure. + +4. Non resumptive + +```python +with Interface { + def op(x: u32): never { + print(x + 1) + } +} +``` + +This is not necessarily difficult to implement, and it needs to happen at the +last yield of a utxo in the transaction anyway, since the resume will happen in +the next transaction. + +We may not want to allow defining these though, and enshrine `yield` as a +special effect. + +5. Multi-shot + +It's still undecided whether we want to have this feature (and what would be the semantics + +### Proving + +The cases outlined above can be proved with the architecture outlined in the +previous section without too many changes. The main constraint that we have +is that we can't share memory trivially, which also means we can't just send +closures back and forth. What we can pass back and forth however is the id +of a VM instance (a coroutine). Note that these can be just transaction-local +identifiers, like an index into a contiguous memory/table. Program identifier +hashes can be stored in this table and verified when necessary, but the main +thing we care about here is about interleaving consistency. + +The general outline follows. + +To handle effects, we introduce a scoped handler installation mechanism. +A coordination script can install a handler for a specific interface, identified +by its hash (`interface_id`). This installation is scoped to the execution of the +coordination script. + +The primary operations are: +- `install_handler(interface_id)`: Installs the current program as the handler +for the given interface. This is only callable from a coordination script, since +utxos can't call other utxos, they can't really wrap things. And utxo effects +can only be invoked as named handlers. +- `uninstall_handler(interface_id)`: Uninstalls the handler for the given interface. +- `get_handler_for(interface_id)`: Retrieves the currently installed handler for the given interface. This is a shared operation, since coordination scripts can perform effects. + +For example, a coordination script would do: + +```rust +fn coord(k: Continuation) { + starstream::install_handler(MyInterface::hash); + + starstream::resume(k); + + starstream::uninstall_handler(MyInterface::hash); +} +``` + +And a UTXO would use it like this: + +```rust +fn utxo() { + let k = starstream::get_handler_for(MyInterface::hash); // fails if not installed + + starstream::resume(k, MyInterface::foo); +} +``` + +This design is arguably simpler for codegen and for dynamic imports, as it +provides a clearer ABI than implicit capability passing. + +Invoking a handler is _resuming_ a specific coroutine (retrieved via `get_handler_for`). +The operation to perform can be passed as an argument encoded as a tagged union. +The tag doesn't have to be a small integer like it's usually done with enums, it +can be for example a hash that identifies the operation. + +Installing a handler is conceptually: + +1. Calling an operation to register a handler for a certain interface (the whole interface). +2. Setup a trampoline-like loop to drive the handlers. Note that if the +operation is not supported we can just flag an error here that aborts the +transaction. + +### Tail-resumptive basic code lowering example: + +![img](effect-handlers-codegen-simple.png) + +*Note:* matching colors are for the equality constraints that need to be +enforced by the proof, or to identify the coroutine id. + +### Non tail-resumptive example: + +For non-tail resumptive effects, the main difference is that a stack (heap +memory) is needed to keep track of the "exit calls" to run after resumptions. + +For example, the script could look like this: + +**Note** that this also technically stores a continuation. + +![img](effect-handlers-codegen-script-non-tail.png) + +### Alternative Handler Implementation: Implicit Capability Passing + +An alternative design is to use implicit capability passing. In this model, each +effectful function receives a dictionary mapping effects to coroutines. +This can be just encoded through parameter passing. So a function with type: +`() -> () / { singleton_effect }` is compiled into a function of (wasm) type +`(k: Continuation) -> ()`. + +This is most likely simpler to prove, but harder to do codegen for. The tradeoff is +whether we want to spend more time on codegen, or make the interleaving proof +more complex by keeping track of this dictionary. + +The decision here comes more to ABI, codegen complexity, whether we need dynamic +loading of scripts, and how much more complex is the circuit. + +## Concurrency + channels + +First thing to note here is that effect handlers are well studied for +implementing user-defined concurrency primitives. + +Some references (for something like green threads): + +https://kcsrk.info/ocaml/multicore/2015/05/20/effects-multicore/ + +Note that a fiber is not really that much different from a sandboxed wasm +function, except maybe for the overhead. Capturing a fiber by a pointer is not +any different than just having an id for each vm and storing that. + +There is also effekt: + +https://effekt-lang.org/docs/casestudies/scheduler + +Implementing channels on top of that is not necessarily that far-fetched. + +See for example: + +https://tinyurl.com/4rsf6738 + +But a more sophisticated system can also be implemented by also implementing +things like signals. + +The argument here is that algebraic effect handlers can be used to implement a +user-defined scheduler for multi-threading (and there are libraries that do this +in ocaml, scala, effekt, and probably others), so why hardcode one into the +ledger instead of just providing the primitives for that (one-shot +continuations). + +Especially considering that with zk we can just hide it from the ledger just by +encoding the control flow and communication rules into the interleaving circuit +(which the ledger knows about, but it's an opaque verification key required to +accept a transaction). That also makes it easier to port Starstream to different +ledgers. + +The only constraint is that effect handlers only give you cooperative +multithreading, since preemption requires actual hardware interrupts. But this +probably doesn't matter for our use-case, since we actually want utxos to run +until their predefined points, instead of arbitrarily getting stuck in +inconvenient states. + +An example (treat as pseudocode) of how this would work in our case: + +** DISCLAIMER: ** I'm going to use a mix of low level and high level syntax to +try to make things less magic. I'm going to use the high-level syntax for effect +handler definitions (see previous section for the lowering to the ISA). But a +lower-level syntax for coordination script handling, wherever needed. + +```typescript +type EventId = u64; +type ChannelId = EventId; + +// a green threads scheduler with conditions/signals +interface Scheduler { + fn suspend(); + fn wait_on(EventId); + fn signal(EventId); + fn schedule(Continuation); +} + +interface Channel { + fn new_chan(): ChannelId; + fn send(ChannelId, Any); + fn recv(ChannelId): Any; +} +``` + +```rust +mod concurrent { + script { + // actual high-level definition + fn with_threads(f: () => () / { Scheduler }); + + // lower level definition + fn with_threads(f: Continuation) { + let queue = empty_queue(); + let conditions = empty_map(); + + queue.insert(|| f.resume()); + + // which lowers to: + // queue.insert(|| starstream::resume(f)); + + while let Some(next) = queue.pop() { + try { + next(); + } + with Scheduler { + fn suspend() { + // reminder, this is a closure that just captures a "pointer" to a + // wasm sandbox (continuation or fiber if you want), because of + // this, it can be implemented as a normal closure, since it + // basically just captures an int. + + // this is a local capture, it can't be sent to other vm (since that + // requires shared memory). + // + // in practice we may also not allow returning even in the same + // module (since it shouldn't be resumed without installing the + // handler). + // + // check the previous section for the low level representation. + + // NOTE that it's not **possible** to make a transaction without + // resuming each task that calls `suspend` as long as this scheduler + // is used. + + // That's because the coordination script doesn't end in that case, + // so it's not possible to make a proof. + queue.push(|| resume()); + } + fn wait_on(event) { + conditions.insert(event, || resume()); + } + fn signal(event) { + if let Some(k) = conditions.remove(event) { + queue.insert(k); + } + + resume(); + } + fn schedule(k) { + queue.insert(k); + + resume(); + } + } + } + } + + // actual high-level definition + // + // read this as: + // + // 1. This function receives a coroutine that requires a channel handler, + // and provides one (since otherwise it would be part of the effects of this + // function). + // + // 2. This function requires the Scheduler capability. + fn with_channels(f: () => () / { Channel }) / { Scheduler }; + + // low-level representation + fn with_channels(f: Continuation, threads_handler: Continuation) { + let channels: Map> = empty_map(); + let channel_id = 0; + + try { + f.resume(); + + // which compiles to: + + // starstream::install_handler(Scheduler::hash) + // starstream::resume(f) + // ... + // handler code + // ... + // starstream::uninstall_handler(Scheduler::hash) + } + with Channel { + fn new_chan(): ChannelId { + channels.insert(channel_id, empty_queue()); + + channel_id += 1; + } + fn send(channel_id, msg) { + channels[channel_id].push(msg); + do signal(channel_id); + // low level: + // resume(threads_handler, Scheduler::Signal(channel_id)) + + do suspend(); + + // low level: + // resume(threads_handler, Scheduler::Suspend) + } + fn recv(channel_id): Any { + if let Some(msg) = channels[channel_id].pop() { + resume(msg); + } + else { + do wait_on(channel_id); + + let msg = channels[channel_id].pop().unwrap() + + resume(msg); + } + } + } + } + } +} +``` + +### Usage + +```rust +data Action = Add Int | End + +// a utxo that can be created by users under some condition (checked at insert), +// and that allows a collector (probably under some other condition) to iterate +// on these actions and fold them into an accumulator. +// +// in this example, actions are just numbers, and the accumulator just adds them +// together +// +// while this models something like a stream, it's implemented on top of less +// restrictive channels instead. +// +// the channel is used as an mpsc, there can be multiple instances of this +// contract writing to the same channel in this transaction, and there is only +// one reader (the accumulator). +utxo Streamable { + storage { + // SIDE NOTE: this doesn't need to be an actual list + // + // it could be a vector commitment and we could inject proofs of containment + // maybe + // + // but for now just assume it's an actual list in memory + actions: List + } + + main { + yield with Stream { + fn process(count: u32, channel_id: ChannelId) / { Channel } { + // assume that take also removes from the list + for action in storage.actions.take(count) { + do send(channel_id, storage.action); + } + + if storage.actions.is_empty() { + starstream::burn(); + } + } + + fn insert(action: Action) { + // check some permission probably + self.actions.push(action); + } + } + } +} +``` + +```rust +utxo Accumulator { + storage { + accum: Int + } + + main { + yield with Consumer { + fn process(channel_id: ChannelId) / { Channel } { + loop { + let action = do recv(channel_id); + + match action { + Add(i) => { + storage.accum += i + }, + End => { + break; + } + } + } + } + } + } +} +``` + +```rust +mod consumer { + script { + fn consumer_script(utxo: Consumer, chan: ChannelId) / { Channel } { + let (utxo, chan) = input; + utxo.process(chan); + } + } +} +``` + +```rust +mod streamable { + import consumer; + + script { + fn push_action(utxo: Streamable, action: Action) { + utxo.insert(action); + } + + fn process_10(utxo: Streamable, chan: ChannelId) { + utxo.process(10, chan); + } + + fn aggregate_on(consumer: Consumer) / { Channel, Scheduler } { + let chan = do new_chan(); + + let consumer_coord = starstream::new_coord(consumer_script, consumer, chan); + + do schedule(consumer_coord); + // or just take a list as a parameter + for utxo in Streamable { + let process_10_coord = starstream::new_coord(process_10, utxo, chan); + + do schedule(process_10_coord); + } + + // stop the consumer + do send(chan, Action::End); + } + } +} +``` + + +Putting everything together: + +```rust +script { + import concurrent; + import streamable; + + fn main(consumer: Consumer) { + with_threads { // here the required effect set is empty, so this can be run + with_channels { // with_channel requires the Scheduler capability + aggregate_on(consumer); // aggregate_on requires the Channel and Scheduler capabilities + } + } + } +} +``` +But what does the above compile to? + +With builtin scoped handlers, it can be compiled into just + +```rust +let aggregation_script = starstream::new_coord(aggregate_on, consumer); +let channel_handler = starstream::new_coord(concurrent::with_channels, aggregation_script); +let thread_handler = starstream::new_coord(concurrent::with_threads, channel_handler); + +resume(thread_handler, ()); +``` + +Each script will wrap things with a handler before resuming the received +continuation, so by the time an effect is needed then there would be one in +scope. + +In the implicit capability passing style, we would need to instead have a shim +to properly inject the outer handler to the wrapped coroutine: + +```rust +script { + import concurrent; + import streamable; + + // ths compiler should generate this shim from the structure above (which may + // be complex, not sure) + fn aggregate_on_shim(consumer: Consumer) { + // suspends and wait for `resume`. + // this effectively simulates currying or partial application. + + // the order here matters + let (scheduler_handler, _) = starstream::yield(); + let (channel_handler, _) = starstream::yield(); + + // remember, we need to pass handlers as implicit capabilities + // but the actual function signature just takes a single argument + streamable::aggregate_on(consumer, channel_handler, scheduler_handler); + } + + // or main + fn main(consumer: Consumer) { + // we partially apply consumer here + let aggregation_script = starstream::new_coord(aggregate_on_shim, consumer); + let channel_handler = starstream::new_coord(concurrent::with_channels, aggregation_script); + let thread_handler = starstream::new_coord(concurrent::with_threads, channel_handler); + + // with_channels takes a continuation that *only* requires the channel + // capability, so we need to bind this here. + // + // this is similar to effekt, although it may be implemented differently + // + // we don't need to pass `channel_handler` because channel_handler does + // that. + aggregation_script.resume(thread_handler); + + // remember that new_coord doesn't run anything, it just gives an id to a + // new instance, and binds (curry) the arguments. + + // you can also think that every coordination script just has a yield as the + // first instruction. + resume(thread_handler, ()); + + // actual syntax: + // thread_handler.resume(()); + } +} +``` + +## Attestation + +Interfaces are generally not enough to ensure safety as long as two or more +independent processes are involved. Even if the interface is implemented +"correctly", there are details that are just not possible to express without +having some way of doing attestation (or maybe contracts?). Of course in some case it may be enough to just check a signature, or have an embedded proof of some sort, but if anyone can interact with a contract there needs to be at least some way of asserting that they run the proper protocol. + +This is not unlike running a distributed system on a distributed scheduler, but +trying to ensure some properties about it (like fairness). A checked +coordination script is like running a kernel in a TEE (although it behaves more +like a lookup argument since it's based on the proof of the coordination script +wasm). + +Now lets say the previous script actually generates a **single** wasm module. +Basically, `import` behaves like it does in languages where everything gets +statically linked (like rust). + +Since the module has a unique hash, and all its functions can be trusted, then +both utxos can just check for that. And there is no need of doing it explicitly +in the syntax since the entire script can be trusted. + +Instead of hardcoding the script hash, a modifiable list can be kept, where only +the owner (whatever that means) of each utxo can add or remove to it. + +Or indirection could be used to keep a single utxo with a list. + +It may also be possible to only check this condition once per utxo if we don't +allow more than one wasm module for the coordination script. + +### Without wrapper script + +If for some reason we don't want to have the monolithic wrapper script, and we +want to just spawn and compose arbitrary ones. + +Here the tricky part is that the utxo potentially would only see the immediate +caller, and in this case the runtime would be the root. But + +1. Coordination scripts could also be able to check for their callers too in the +same way, making transitivity possible. So the coordination script enforces a +runtime. + +```rust +fn aggregate_on(consumer: Consumer) / { Channel, Scheduler } { + assert(context.caller == concurrent::hash); + + let chan = do new_chan(); + + // ... rest +} +``` + +There is no need for the utxo to examine the call-stack. + +Of course the issue here is that there may be something else in the middle, but +it's unclear whether that's a limitation in practice. + +2. Another way is to just check for the handler in every method call. + +It's possible to ask for `program_hash(channel_handler) == +concurrent::hash` before invoking a handler. + +What's the proper syntax for this is uncertain. It could be something like: + +```rust +fn process(channel_id: ChannelId) / { Channel[concurrent::hash] }; +``` + +Or some other annotation that allows using dynamic lists easily. + +But the lowering doesn't seem particularly different. + +And the same thing applies when receiving a request, since it's: + +```rust +let (r, k) = yield(); + +match { + Stream::process => { + assert(program_hash(k) == concurrent::hash) + } +} +``` + +It's not necessarily more expensive since the hash could be cached in the utxo's +memory (to reduce the amount of host calls), but it makes things more complex. + +3. Another option is to use handshakes. + +This still requires a wrapper script, but allows dynamic loading. + +So we have a wrapper script: + +```rust +script { + fn trusted_script(input: Utxo, known_coords: Utxo, dynamic_script: Coord) { + // this script, which is known and fixed on the utxo + + // without this handshake, the utxo will refuse everything. + input.accept_script(); + + // use indirection to check + // let's say only an admin can modify known_coords + assert(known_coords.contains(dynamic_script.hash())) + assert(known_coords.authenticate()); + + resume(starstream::new_choord(dynamic_script)); + } +} +``` diff --git a/interleaving/starstream-interleaving-spec/Cargo.toml b/interleaving/starstream-interleaving-spec/Cargo.toml new file mode 100644 index 00000000..3344556d --- /dev/null +++ b/interleaving/starstream-interleaving-spec/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "starstream-interleaving-spec" +version = "0.1.0" +edition = "2024" + +[dependencies] +hex = "0.4.3" +imbl = "6.1.0" +thiserror = "2.0.17" +ark-ff = { version = "0.5.0", default-features = false } +ark-goldilocks = { path = "../../ark-goldilocks" } + +# TODO: move to workspace deps +neo-fold = { workspace = true } +neo-math = { workspace = true } +neo-ccs = { workspace = true } +neo-ajtai = { workspace = true } +neo-memory = { workspace = true } + +p3-field = "0.4.1" +ark-poseidon2 = { path = "../../ark-poseidon2" } diff --git a/interleaving/starstream-interleaving-spec/EFFECTS_REFERENCE.md b/interleaving/starstream-interleaving-spec/EFFECTS_REFERENCE.md new file mode 100644 index 00000000..cf700572 --- /dev/null +++ b/interleaving/starstream-interleaving-spec/EFFECTS_REFERENCE.md @@ -0,0 +1,561 @@ +# Verification + +This document describes operational semantics for the +interleaving/transaction/communication circuit in a somewhat formal way (but +abstract). + +Each opcode corresponds to an operation that a wasm program can do in a +transaction and involves communication with another program (utxo -> coord, +coord -> utxo, coord -> coord). + +The rules are intented to be fairly low-level, and provide the contract with: + +1. Attestation. Being able to enforce a caller coordination script, which can +also be used to attest the other utxos (the coordination script can also check +if a utxo +is instance of a contract). +2. Control flow. Resuming a process (coroutine) needs to enforce that only that +program can do an operation next. Yield is a resume with an implicit target (the +caller, or previous program). +3. Sent data matches received data. And viceversa. Which is enforced through the +memory argument. Note that for this machine data can be just treated as opaque +blobs (Value). + +# 1. State Configuration + +The global state of the interleaving machine σ is defined as: + +```text +Configuration (σ) +================= +σ = (id_curr, id_prev, M, activation, init, ref_store, process_table, host_calls, must_burn, on_yield, yield_to, finalized, is_utxo, initialized, handler_stack, ownership, did_burn) + +Where: + id_curr : ID of the currently executing VM. In the range [0..#coord + #utxo] + id_prev : ID of the VM that executed immediately before the current one (trace-local). + expected_input : A map {ProcessID -> Ref} + expected_resumer : A map {ProcessID -> ProcessID} + on_yield : A map {ProcessID -> Bool} (true if the process most recently yielded) + yield_to : A map {ProcessID -> Option} (who to return to on yield) + activation : A map {ProcessID -> Option<(Ref, ProcessID)>} + init : A map {ProcessID -> Option<(Value, ProcessID)>} + ref_store : A map {Ref -> Value} + process_table : Read-only map {ID -> ProgramHash} for attestation. + host_calls : A map {ProcessID -> Host-calls lookup table} + must_burn : Read-only map {ProcessID -> Bool} (inputs without continuation must burn) + finalized : A map {ProcessID -> Bool} (true if the process ended with a terminal op, e.g. yield/burn) + is_utxo : Read-only map {ProcessID -> Bool} + initialized : A map {ProcessID -> Bool} + handler_stack : A map {InterfaceID -> Stack} + ownership : A map {ProcessID -> Option} (token -> owner) + did_burn : A map {ProcessID -> Bool} +``` + +Note that the maps are used here for convenience of notation. In practice they +are memory arguments enforced through an auxiliary protocol, like Twist And +Shout or Nebula. I'm also going to notate memory reads as preconditions, even if +in practice it's closer to emitting a constraint. But using a memory is easier +to reason about. + +Depending on the memory implementation it may be simpler to just flatten all the +maps into a multi-valued memory, or it may be better to have a flat memory and +use offsets (easy since all are fixed length). + +The rules are expressed in a notation inspired by denotational semantics, but +it's not functional. Only the fields that change are specified in the +post-condition, the rest can be assumed to remain equal. +A tick (') is used to indicate _next state_. + +The notation should be understood as + +```text +requirements (pattern match + conditions) +------------------------------------------ +new_state (assignments) +``` + +--- + +# 2. Shared Operations + +## Resume (Call) + +The primary control flow operation. Transfers control to `target`. It records +claims for the current process and consumes the target's pending claims after +validation. + +Since we are also resuming a currently suspended process, we can only do it if +our value matches its claim. + +When a process yields, it sets `on_yield = true`. The next resumer records +`yield_to[target] = id_curr` and clears `on_yield`. Subsequent resumes do not +change `yield_to` unless the process yields again. + +```text +Rule: Resume +============ + op = Resume(target, val_ref) -> (ret_ref, caller) + + 1. id_curr ≠ target + + (No self resume) + + 2. if expected_input[target] is set, it must equal val_ref + + (Check ref claim matches target's previous claim) + + 3. if expected_resumer[target] is set, it must equal id_curr + + (Check that the current process matches the expected resumer for the target) + + 4. is_utxo[id_curr] == False + + (Direct Resume is coordination-script only) + + 5. initialized[target] + + (Can't jump to an unitialized process) +-------------------------------------------------------------------------------------------- + 1. expected_input[id_curr] <- ret_ref (Claim, needs to be checked later by future resumer) + 2. expected_resumer[id_curr] <- caller (Claim, needs to be checked later by future resumer) + 3. expected_input[target] <- None (Target claim consumed by this resume) + 4. expected_resumer[target] <- None (Target claim consumed by this resume) + 5. id_prev' <- id_curr (Trace-local previous id) + 6. id_curr' <- target (Switch) + 7. if on_yield[target] then + yield_to'[target] <- Some(id_curr) + on_yield'[target] <- False + 8. activation'[target] <- Some(val_ref, id_curr) +``` + +## Activation + +Rule: Activation +=========== + op = Activation() -> (val, caller) + + 1. activation[id_curr] == Some(val, caller) + +----------------------------------------------------------------------- + 1. (No state changes) + +## Init + +TODO: init should probably only be callable in the tx that creates the utxo +(otherwise we'd need to explicitly store the input in the ledger, even if it's +never used again). + +Rule: Init +=========== + op = Init() -> (val, caller) + + 1. init[id_curr] == Some(val, caller) + 2. caller is the creator (the process that executed NewUtxo/NewCoord for this id) + +----------------------------------------------------------------------- + 1. (No state changes) + +## Yield + +Suspend the current continuation and transfer control to the saved parent +(`yield_to[id_curr]`). + +After a resume, programs must call `Activation()` to read the `(val, caller)` +tuple. + +```text +Rule: Yield +=========== + op = Yield(val_ref) + + 1. is_utxo[id_curr] == True + + (Only UTXOs may yield; coordination scripts must use Return) + + 2. yield_to[id_curr] is set + + 3. let val = ref_store[val_ref] in + if expected_input[yield_to[id_curr]] is set, it must equal val + + (Check val matches target's previous claim) + + 4. if expected_resumer[yield_to[id_curr]] is set, it must equal id_curr + + (Check that the current process matches the expected resumer for the parent) + +-------------------------------------------------------------------------------------------- + 1. on_yield'[id_curr] <- True + 2. id_curr' <- yield_to[id_curr] + 3. id_prev' <- id_curr + 4. finalized'[id_curr] <- True + 5. activation'[id_curr] <- None +``` + +## Program Hash + +Allows introspection of a Continuation's code identity without changing control flow. + +```text +Rule: Program Hash +================== + (Lookup the static hash of a program ID. State remains unchanged.) + + op = ProgramHash(target_id) + hash = process_table[target_id] + +----------------------------------------------------------------------- + 1. (No state changes) +``` + +--- + +# 3. Coordination Script Operations + +## New UTXO + +```text +Rule: New UTXO +============== +Assigns a new (transaction-local) ID for a UTXO program. + + op = NewUtxo(program_hash, val) -> id + + 1. process_table[id] == program_hash + + (The hash matches the one in the process table) + (Remember that this is just verifying, so we already have the full table) + + (The trace for this utxo starts fresh) + + 3. is_utxo[id] + + (We check that it is indeed a utxo) + + 4. is_utxo[id_curr] == False + + (A utxo can't crate utxos) + + (Host call lookup condition) + +----------------------------------------------------------------------- + 1. initialized[id] <- True + 2. init'[id] <- Some(val, id_curr) +``` + +## New Coordination Script (Spawn) + +Allocates a new transient VM ID. The start is "Cold" (it does not execute immediately). + +```text +Rule: New coord (Spawn) +======================= +Assigns a new (transaction-local) ID for a coordination script (an effect +handler) instance. + + op = NewCoord(program_hash, val) -> id + + 1. process_table[id] == program_hash + + (The hash matches the one in the process table) + (Remember that this is just verifying, so we already have the full table) + + (The trace for this handler starts fresh) + + 3. is_utxo[id] == False + + (We check that it is a coordination script) + + 4. is_utxo[id_curr] == False + + (A utxo can't spawn coordination scripts) + + (Host call lookup condition) + +----------------------------------------------------------------------- + 1. initialized[id] <- True + 2. init'[id] <- Some(val, id_curr) +``` + +--- + +# 4. Effect Handler Operations + +## Install Handler + +Pushes the current program ID onto the handler stack for a given interface. This operation is restricted to coordination scripts. + +```text +Rule: Install Handler +===================== + op = InstallHandler(interface_id) + + 1. is_utxo[id_curr] == False + + (Only coordination scripts can install handlers) + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. handler_stack'[interface_id].push(id_curr) +``` + +## Uninstall Handler + +Pops a program ID from the handler stack for a given interface. This operation is restricted to coordination scripts. + +```text +Rule: Uninstall Handler +======================= + op = UninstallHandler(interface_id) + + 1. is_utxo[id_curr] == False + + (Only coordination scripts can uninstall handlers) + + 2. handler_stack[interface_id].top() == id_curr + + (Only the installer can uninstall) + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. handler_stack'[interface_id].pop() +``` + +## Get Handler For + +Retrieves the handler for a given interface without altering the handler stack. + +```text +Rule: Get Handler For +===================== + op = GetHandlerFor(interface_id) -> handler_id + + 1. handler_id == handler_stack[interface_id].top() + + (Host call lookup condition) +----------------------------------------------------------------------- +``` + +## Call Effect Handler + +Calls the currently installed handler for an interface, without allowing an +arbitrary target choice. + +`interface_id` here is the full `InterfaceId` key (4-limb encoding in witness +space), and resolves to `handler_stack[interface_id].top()`. + +```text +Rule: Call Effect Handler +========================= + op = CallEffectHandler(interface_id, val_ref) -> ret_ref + + 1. target = handler_stack[interface_id].top() + + (There must be an installed handler) + + 2. id_curr ≠ target + + (No self call) + + 3. if expected_input[target] is set, it must equal val_ref + + (Check ref claim matches target's previous claim) + + 4. if expected_resumer[target] is set, it must equal id_curr + + (Check that current process matches expected resumer for target) + +-------------------------------------------------------------------------------------------- + 1. expected_input[id_curr] <- ret_ref + 2. expected_resumer[id_curr] <- Some(target) + 3. expected_input[target] <- None + 4. expected_resumer[target] <- None + 5. id_prev' <- id_curr + 6. id_curr' <- target + 7. activation'[target] <- Some(val_ref, id_curr) +``` + +--- + +# 5. UTXO Operations + +## Burn + +Terminates the UTXO. No matching output is created in the ledger. + +```text +Rule: Burn +========== +Destroys the UTXO state. + + op = Burn(ret) + + 1. is_utxo[id_curr] + 2. initialized[id_curr] + 3. must_burn[id_curr] == True + + 4. if expected_input[id_prev] is set, it must equal ret + + (Resume receives ret) + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. finalized'[id_curr] <- True + 2. id_curr' <- id_prev + + (Control flow goes to caller) + + 3. initialized'[id_curr] <- False + + (It's not possible to return to this, maybe it should be a different flag though) + + 5. activation'[id_curr] <- None + 6. did_burn'[id_curr] <- True +``` + +# 6. Tokens + +## Bind + +```text +Rule: Bind (token calls) +======================== + op = Bind(owner_id) + + token_id = id_curr + + 1. is_utxo[token_id] == True + (Only UTXOs can be tokens) + + 2. is_utxo[owner_id] == True + (Owners are UTXOs) + + 3. initialized[token_id] == True + initialized[owner_id] == True + (Both exist in this transaction's process set) + + 4. ownership[token_id] == ⊥ + (Token must currently be unbound) + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. ownership'[token_id] <- owner_id +``` + +## Unbind + +```text +Rule: Unbind (owner calls) +========================== + op = Unbind(token_id) + + owner_id = id_curr + + 1. is_utxo[owner_id] == True + + 2. is_utxo[token_id] == True + initialized[token_id] == True + (Token exists in this transaction's process set) + + 3. ownership[token_id] == owner_id + (Authorization: only current owner may unbind) + +----------------------------------------------------------------------- + 1. ownership'[token_id] <- ⊥ +``` + +# 7. Data Operations + +## NewRef + +Allocates a new reference with a specific size (in 4-value words). + +```text +Rule: NewRef +============== + op = NewRef(size_words) -> ref + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. size fits in 16 bits + 2. ref_store'[ref] <- [uninitialized; size_words * 4] (conceptually) + 3. ref_sizes'[ref] <- size_words + 5. ref_state[id_curr] <- (ref, 0, size_words) // storing the ref being built, current word offset, and total size +``` + +## RefPush + +Appends data to the currently building reference. + +```text +Rule: RefPush +============== + op = RefPush(vals[4]) + + 1. let (ref, offset_words, size_words) = ref_state[id_curr] + 2. offset_words < size_words + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. for i in 0..3: + ref_store'[ref][(offset_words * 4) + i] <- vals[i] + 2. ref_state[id_curr] <- (ref, offset_words + 1, size_words) +``` + +## RefGet + +```text +Rule: RefGet +============== + op = RefGet(ref, offset_words) -> vals[4] + + 1. let size_words = ref_sizes[ref] + 2. offset_words < size_words + 3. for i in 0..3: + vals[i] == ref_store[ref][(offset_words * 4) + i] + + (Host call lookup condition) +----------------------------------------------------------------------- +``` + +## RefWrite + +```text +Rule: RefWrite +============== + op = RefWrite(ref, offset_words, vals[4]) + + 1. let size_words = ref_sizes[ref] + 2. offset_words < size_words + + (Host call lookup condition) +----------------------------------------------------------------------- + 1. for i in 0..3: + ref_store'[ref][(offset_words * 4) + i] <- vals[i] +``` + +# Verification + +To verify the transaction, the following additional conditions need to be met: + +```text +for (process, proof, host_calls) in transaction.proofs: + + // we verified all the host calls for each process + + // every object had a constructor of some sort + assert(initialized[process]) + + // all utxos in the transaction ended in a terminal state + if is_utxo[process] { + assert(finalized[process]) + } + + // burned inputs (no continuation) must have executed Burn + if must_burn[process] { + assert(did_burn[process]) + } + +assert_not(is_utxo[id_curr]) + +// we finish in a coordination script +``` diff --git a/interleaving/starstream-interleaving-spec/effect-handlers-codegen-script-non-tail.png b/interleaving/starstream-interleaving-spec/effect-handlers-codegen-script-non-tail.png new file mode 100644 index 00000000..edc35836 Binary files /dev/null and b/interleaving/starstream-interleaving-spec/effect-handlers-codegen-script-non-tail.png differ diff --git a/interleaving/starstream-interleaving-spec/effect-handlers-codegen-simple.png b/interleaving/starstream-interleaving-spec/effect-handlers-codegen-simple.png new file mode 100644 index 00000000..05732298 Binary files /dev/null and b/interleaving/starstream-interleaving-spec/effect-handlers-codegen-simple.png differ diff --git a/interleaving/starstream-interleaving-spec/graph.png b/interleaving/starstream-interleaving-spec/graph.png new file mode 100644 index 00000000..24d9b972 Binary files /dev/null and b/interleaving/starstream-interleaving-spec/graph.png differ diff --git a/interleaving/starstream-interleaving-spec/src/builder.rs b/interleaving/starstream-interleaving-spec/src/builder.rs new file mode 100644 index 00000000..644086e7 --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/builder.rs @@ -0,0 +1,163 @@ +use super::*; +use crate::transaction_effects::witness::WitLedgerEffect; + +pub struct RefGenerator { + counter: u64, + map: HashMap<&'static str, Ref>, +} + +impl RefGenerator { + pub fn new() -> Self { + Self { + counter: 0, + map: HashMap::new(), + } + } + + pub fn get(&mut self, name: &'static str) -> Ref { + let entry = self.map.entry(name).or_insert_with(|| { + let r = Ref(self.counter); + self.counter += REF_PUSH_WIDTH as u64; + r + }); + *entry + } +} + +impl Default for RefGenerator { + fn default() -> Self { + Self::new() + } +} + +pub fn h(n: u8) -> Hash { + Hash([n as u64, 0, 0, 0], std::marker::PhantomData) +} + +pub fn v(data: &[u8]) -> Value { + let mut bytes = [0u8; 8]; + let len = data.len().min(8); + bytes[..len].copy_from_slice(&data[..len]); + Value(u64::from_le_bytes(bytes)) +} + +pub struct TransactionBuilder { + body: TransactionBody, + spending_proofs: Vec, + new_output_proofs: Vec, + coordination_scripts: Vec, +} + +impl TransactionBuilder { + pub fn new() -> Self { + Self { + body: TransactionBody { + inputs: vec![], + continuations: vec![], + new_outputs: vec![], + ownership_out: HashMap::new(), + coordination_scripts_keys: vec![], + entrypoint: 0, + }, + spending_proofs: vec![], + new_output_proofs: vec![], + coordination_scripts: vec![], + } + } + + pub fn with_input( + self, + utxo: UtxoId, + continuation: Option, + trace: Vec, + ) -> Self { + self.with_input_and_trace_commitment( + utxo, + continuation, + trace, + LedgerEffectsCommitment::iv(), + ) + } + + pub fn with_input_and_trace_commitment( + mut self, + utxo: UtxoId, + continuation: Option, + trace: Vec, + host_calls_root: LedgerEffectsCommitment, + ) -> Self { + self.body.inputs.push(utxo); + self.body.continuations.push(continuation); + self.spending_proofs.push(ZkWasmProof { + host_calls_root, + trace, + }); + self + } + + pub fn with_fresh_output(self, output: NewOutput, trace: Vec) -> Self { + self.with_fresh_output_and_trace_commitment(output, trace, LedgerEffectsCommitment::iv()) + } + + pub fn with_fresh_output_and_trace_commitment( + mut self, + output: NewOutput, + trace: Vec, + host_calls_root: LedgerEffectsCommitment, + ) -> Self { + self.body.new_outputs.push(output); + self.new_output_proofs.push(ZkWasmProof { + host_calls_root, + trace, + }); + self + } + + pub fn with_coord_script(self, key: Hash, trace: Vec) -> Self { + self.with_coord_script_and_trace_commitment(key, trace, LedgerEffectsCommitment::iv()) + } + + pub fn with_coord_script_and_trace_commitment( + mut self, + key: Hash, + trace: Vec, + host_calls_root: LedgerEffectsCommitment, + ) -> Self { + self.body.coordination_scripts_keys.push(key); + self.coordination_scripts.push(ZkWasmProof { + host_calls_root, + trace, + }); + self + } + + pub fn with_ownership(mut self, token: OutputRef, owner: OutputRef) -> Self { + self.body.ownership_out.insert(token, owner); + self + } + + pub fn with_entrypoint(mut self, entrypoint: usize) -> Self { + self.body.entrypoint = entrypoint; + self + } + + pub fn build(self, interleaving_proof: ZkTransactionProof) -> ProvenTransaction { + let witness = TransactionWitness { + spending_proofs: self.spending_proofs, + new_output_proofs: self.new_output_proofs, + interleaving_proof, + coordination_scripts: self.coordination_scripts, + }; + + ProvenTransaction { + body: self.body, + witness, + } + } +} + +impl Default for TransactionBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/interleaving/starstream-interleaving-spec/src/lib.rs b/interleaving/starstream-interleaving-spec/src/lib.rs new file mode 100644 index 00000000..d765ac1b --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/lib.rs @@ -0,0 +1,756 @@ +pub mod builder; +mod mocked_verifier; +mod transaction_effects; + +#[cfg(test)] +mod tests; + +pub use crate::{ + mocked_verifier::InterleavingWitness, mocked_verifier::LedgerEffectsCommitment, + transaction_effects::ProcessId, +}; +use imbl::{HashMap, HashSet}; +use neo_ajtai::Commitment; +use p3_field::PrimeCharacteristicRing; +use std::{hash::Hasher, marker::PhantomData}; +pub use transaction_effects::{ + InterfaceId, + instance::InterleavingInstance, + witness::{ + EffectDiscriminant, REF_GET_WIDTH, REF_PUSH_WIDTH, REF_WRITE_WIDTH, WitEffectOutput, + WitLedgerEffect, + }, +}; + +#[derive(PartialEq, Eq)] +pub struct Hash(pub [u64; 4], pub PhantomData); + +impl Copy for Hash {} + +impl Clone for Hash { + fn clone(&self) -> Self { + *self + } +} + +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct WasmModule(Vec); + +/// Opaque user data. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct Value(pub u64); + +impl std::fmt::Debug for Value { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Value({})", self.0) + } +} + +impl Value { + pub fn nil() -> Self { + Value(0) + } +} + +fn encode_hash_to_fields(hash: Hash) -> [neo_math::F; 4] { + hash.0.map(neo_math::F::from_u64) +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct Ref(pub u64); + +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct CoroutineState { + // For the purpose of this model, we only care *if* the state changed, + // not what it is. A simple program counter is sufficient to check if a + // coroutine continued execution in the tests. + // + pub pc: u64, + pub globals: Vec, +} + +// pub struct ZkTransactionProof {} + +#[allow(clippy::large_enum_variant)] +pub enum ZkTransactionProof { + NeoProof { + // does the verifier need this? + session: neo_fold::session::FoldingSession, + proof: neo_fold::shard::ShardProof, + mcss_public: Vec>, + steps_public: Vec>, + // TODO: this shouldn't be here I think, the ccs should be known somehow by + // the verifier + ccs: neo_ccs::CcsStructure, + }, + Dummy, +} + +impl ZkTransactionProof { + #[allow(clippy::result_large_err)] + pub fn verify( + &self, + inst: &InterleavingInstance, + wit: &InterleavingWitness, + ) -> Result<(), VerificationError> { + match self { + ZkTransactionProof::NeoProof { + session, + proof, + mcss_public, + steps_public, + ccs, + } => { + let output_binding_config = inst.output_binding_config(); + + let ok = session + .verify_with_output_binding_simple( + ccs, + mcss_public, + proof, + &output_binding_config, + ) + .expect("verify should run"); + + assert!(ok, "optimized verification should pass"); + + // dbg!(&self.steps_public[0].lut_insts[0].table); + + // NOTE: the indices in steps_public match the memory initializations + // ordered by MemoryTag in the circuit + let mut expected_fields = Vec::with_capacity(inst.process_table.len() * 4); + for hash in &inst.process_table { + let hash_fields = encode_hash_to_fields(*hash); + expected_fields.extend(hash_fields.iter().copied()); + } + // TODO: review if this is correct, I think all ROM's need to be + // of the same size, so we have some extra padding. + // + // we may need to check the length or something as a new check, + // or maybe try to just use a sparse definition? + let process_table = &steps_public[0].lut_insts[0].table[0..expected_fields.len()]; + assert!( + expected_fields + .iter() + .zip(process_table.iter()) + .all(|(expected, found)| *expected == *found), + "program hash table mismatch" + ); + + assert!( + inst.must_burn + .iter() + .zip(steps_public[0].lut_insts[1].table.iter()) + .all(|(expected, found)| { + neo_math::F::from_u64(if *expected { 1 } else { 0 }) == *found + }), + "must burn table mismatch" + ); + + assert!( + inst.is_utxo + .iter() + .zip(steps_public[0].lut_insts[2].table.iter()) + .all(|(expected, found)| { + neo_math::F::from_u64(if *expected { 1 } else { 0 }) == *found + }), + "must burn table mismatch" + ); + + // TODO: check interfaces? but I think this can be private + // dbg!(&self.steps_public[0].lut_insts[3].table); + + // dbg!(&steps_public[0].mcs_inst.x); + } + ZkTransactionProof::Dummy => {} + } + + Ok(mocked_verifier::verify_interleaving_semantics(inst, wit)?) + } +} + +pub struct ZkWasmProof { + pub host_calls_root: LedgerEffectsCommitment, + pub trace: Vec, +} + +impl ZkWasmProof { + pub fn public_instance(&self) -> WasmInstance { + WasmInstance { + host_calls_root: self.host_calls_root.clone(), + host_calls_len: self.trace.len() as u32, + } + } + + #[allow(clippy::result_large_err)] + pub fn verify( + &self, + _input: Option, + _key: &Hash, + _output: Option, + ) -> Result<(), VerificationError> { + Ok(()) + } +} + +#[derive(thiserror::Error, Debug)] +pub enum VerificationError { + #[error("Input continuation size mismatch")] + InputContinuationSizeMismatch, + #[error("Ownership size mismatch")] + OwnershipSizeMismatch, + #[error("Owner has no stable identity")] + OwnerHasNoStableIdentity, + #[error("Interleaving proof error: {0}")] + InterleavingProofError(#[from] mocked_verifier::InterleavingError), + #[error("Transaction input not found")] + InputNotFound, +} + +/// The actual utxo identity. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct UtxoId { + pub contract_hash: Hash, + /// A Global Sequence Number for this specific contract code. + /// - Assigned by Ledger at creation by keeping track of the utxos with the same wasm. + /// - Utxo's can't know about it, otherwise it would lead to contention. + pub nonce: u64, +} + +/// Uniquely identifies a "process" or "chain" of states. +/// Defined by the transaction that spawned it (Genesis). +/// +/// This is an internal id, transactions don't know/care about this. +/// +/// The ledger uses stable identities internally to keep track of ownership +/// without having to rewrite all the tuples in the relation each time a utxo +/// with tokens gets resumed. +/// +/// When resuming a utxo, the utxo_to_coroutine mapping gets updated. +/// +/// But utxos just refer to each other through relative indexing in the +/// transaction input/outputs. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct CoroutineId { + pub creation_tx_hash: Hash, + pub creation_output_index: u64, +} + +#[derive(Clone, PartialEq, Eq)] +pub struct TransactionBody { + pub inputs: Vec, + + /// Continuation outputs aligned with inputs. + /// + /// Must have length == inputs.len(). + /// - continuations[i] = Some(out): input[i] continues with out.state + /// - continuations[i] = None: input[i] is burned (no continuation) + pub continuations: Vec>, + + /// New spawns created by coordination scripts (no parent input). + /// Basically the condition for this is that a utxo called `new`, this is + /// also then used to verify the interleaving proof. + pub new_outputs: Vec, + + /// Final ownership snapshot for utxos IN THE TRANSACTION. + /// + /// ownership_out[p] == Some(q) means process p (token) is owned by process q at the end. + /// + /// Note that absence here means the token shouldn't have an owner. + /// + /// So this is a delta, where None means "remove the owner". + pub ownership_out: HashMap, + + pub coordination_scripts_keys: Vec>, + pub entrypoint: usize, +} + +// and OutputRef is an index into the output segment of the transaction +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct OutputRef(usize); + +impl From for OutputRef { + fn from(v: usize) -> Self { + OutputRef(v) + } +} + +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct NewOutput { + pub state: CoroutineState, + pub contract_hash: Hash, +} + +/// Public instance extracted from a zkWasm proof (per process). +pub struct WasmInstance { + /// Commitment to the ordered list of host calls performed by this vm. Each + /// entry encodes opcode + args + return. + pub host_calls_root: LedgerEffectsCommitment, + + /// Number of host calls (length of the list committed by host_calls_root). + pub host_calls_len: u32, +} + +/// In practice this is going to be aggregated into a single proof (or maybe +/// two). +pub struct TransactionWitness { + /// ZK Proofs corresponding to inputs (spending). + /// + /// Note that to verify these, the matching output state has to be provided + /// too. + pub spending_proofs: Vec, + + /// ZK Proofs corresponding to new coroutines. + pub new_output_proofs: Vec, + + /// The global transaction proof. + /// + /// This has access to all the operations peformed by each spending_proof + // that require either talking to another coroutine, or making changes/reads + // in the ledger (like token ownership). + // + /// Plus the attestation capability (getting the hash of one of the + /// coroutines). + /// + /// Note that the circuit for this is fixed in the ledger (just like the + /// zkwasm one), so in practice this encodes the transaction rules. + /// + // NOTE: this is optional for now just for testing purposes + pub interleaving_proof: ZkTransactionProof, + + /// Coordination script proofs. + pub coordination_scripts: Vec, +} + +/// A transaction that can be applied to the ledger +pub struct ProvenTransaction { + pub body: TransactionBody, + pub witness: TransactionWitness, +} + +#[derive(Clone)] +#[must_use] +pub struct Ledger { + pub utxos: HashMap, + + // ContractHash -> NextAvailableNonce + pub contract_counters: HashMap, u64>, + + pub utxo_to_coroutine: HashMap, + + // Ownership registry. + // + // many to one mapping of inclusion: token -> utxo. + pub ownership_registry: HashMap, +} + +#[derive(PartialEq, Eq, Hash, Clone)] +pub struct UtxoEntry { + pub state: CoroutineState, + pub contract_hash: Hash, +} + +impl Ledger { + pub fn new() -> Self { + Ledger { + utxos: HashMap::new(), + contract_counters: HashMap::new(), + utxo_to_coroutine: HashMap::new(), + ownership_registry: HashMap::new(), + } + } + + /// Returns transaction-local input ownership for the given input order. + /// + /// For each input at process id `pid`, the value is: + /// - `Some(owner_pid)` if the input's stable coroutine is currently owned by + /// another input coroutine in this same list. + /// - `None` otherwise. + pub fn input_ownership_for_inputs(&self, inputs: &[UtxoId]) -> Vec> { + let mut ownership = vec![None; inputs.len()]; + let mut coroutine_to_pid: HashMap = HashMap::new(); + + for (pid, utxo_id) in inputs.iter().enumerate() { + let Some(cid) = self.utxo_to_coroutine.get(utxo_id) else { + continue; + }; + coroutine_to_pid.insert(cid.clone(), ProcessId(pid)); + } + + for (pid, utxo_id) in inputs.iter().enumerate() { + let Some(token_cid) = self.utxo_to_coroutine.get(utxo_id) else { + continue; + }; + let Some(owner_cid) = self.ownership_registry.get(token_cid) else { + continue; + }; + ownership[pid] = coroutine_to_pid.get(owner_cid).copied(); + } + + ownership + } + + #[allow(clippy::result_large_err)] + pub fn apply_transaction(&self, tx: &ProvenTransaction) -> Result { + let mut new_ledger = self.clone(); + + self.verify_witness(&tx.body, &tx.witness)?; + + let tx_hash = tx.body.hash(); + + // Canonical process order used by the interleaving public instance: + // processes = inputs ++ new_outputs ++ coordination_scripts_keys + let n_inputs = tx.body.inputs.len(); + let n_new = tx.body.new_outputs.len(); + + // For translating ProcessId -> stable CoroutineId when applying ownership changes. + // Coord scripts have no stable identity, so we store None for those slots. + // + // reminder: + // + // a ProcessId is the offset of that program in the transaction + // a CoroutineId is the genesis of a chain of utxo states (like a branch name). + // + // Coordination scripts are processes (or threads or fibers) from the + // point of view of the interleaving machine, but don't have an identity + // in the ledger. The proof just cares about the order, and that's used + // for addressing. + // + // CoroutineIds are mostly kept around to keep relations simpler. If a + // utxo gets resumed, all the tokens that point to it (are owned by) are + // technically owned by the new utxo. + // + // But there is no reason to go and change all the links instead of just + // changing a pointer. + let mut process_to_coroutine: Vec> = vec![None; n_inputs + n_new]; + + // Pre-state stable ids for inputs (aligned with inputs/process ids 0..n_inputs-1). + for (i, utxo_id) in tx.body.inputs.iter().enumerate() { + let cid = self.utxo_to_coroutine[utxo_id].clone(); + process_to_coroutine[i] = Some(cid); + } + + // Track which input indices are *not* removed from the ledger because + // the continuation reuses the same UtxoId. + let mut is_reference_input: HashSet = HashSet::new(); + + // inputs and continuations have to be aligned, so we just zip over them + for (i, cont_opt) in tx.body.continuations.iter().enumerate() { + let Some(cont) = cont_opt else { continue }; + + // parent meaning previous state (each continuation is a chain of + // utxos) + let parent_utxo_id = &tx.body.inputs[i]; + + // A continuation has the same contract hash as the input it resumes + let contract_hash = self.utxos[parent_utxo_id].contract_hash; + + let parent_state = self.utxos[parent_utxo_id].state.clone(); + + // same state we don't change the nonce + // + let utxo_id = if cont.pc == parent_state.pc && cont.globals == parent_state.globals { + is_reference_input.insert(i); + parent_utxo_id.clone() + } else { + // Allocate new UtxoId for the continued output + let counter = new_ledger + .contract_counters + .entry(contract_hash) + .or_insert(0); + + let utxo_id = UtxoId { + contract_hash, + nonce: *counter, + }; + *counter += 1; + + utxo_id + }; + + // Same stable CoroutineId as the input + let coroutine_id = process_to_coroutine[i] + .clone() + .expect("input must have coroutine id"); + + // update index + new_ledger + .utxo_to_coroutine + .insert(utxo_id.clone(), coroutine_id); + + // actual utxo entry + new_ledger.utxos.insert( + utxo_id, + UtxoEntry { + state: cont.clone(), + contract_hash, + }, + ); + } + + // new utxos that don't resume anything + for (j, out) in tx.body.new_outputs.iter().enumerate() { + // note that the nonce is not 0, this counts instances of the same + // code, not just resumptions of the same coroutine + let counter = new_ledger + .contract_counters + .entry(out.contract_hash) + .or_insert(0); + let utxo_id = UtxoId { + contract_hash: out.contract_hash, + nonce: *counter, + }; + *counter += 1; + + let coroutine_id = CoroutineId { + creation_tx_hash: tx_hash, + // creation_output_index is relative to new_outputs (as before) + creation_output_index: j as u64, + }; + + // Fill stable ids for processes in the "new_outputs" segment: + // ProcessId(inputs.len() + j) + process_to_coroutine[n_inputs + j] = Some(coroutine_id.clone()); + + new_ledger + .utxo_to_coroutine + .insert(utxo_id.clone(), coroutine_id); + new_ledger.utxos.insert( + utxo_id, + UtxoEntry { + state: out.state.clone(), + contract_hash: out.contract_hash, + }, + ); + } + + // Apply ownership updates for the processes that exist in this transaction's process set. + // + // NOTE: we don't check things here, that's part of the + // interleaving/transaction proof, which we already verified + // + // We only translate ProcessId -> stable CoroutineId for utxo processes + // (inputs and new_outputs). Coord scripts have None and thus can't appear in + // the on-ledger ownership_registry. + // assert_eq!(tx.body.ownership_out.len(), n_processes); + + for token_pid in 0..n_inputs + n_new { + let token_cid = process_to_coroutine[token_pid].clone().unwrap(); + + if let Some(owner_pid) = tx.body.ownership_out.get(&OutputRef(token_pid)) { + let owner_cid = process_to_coroutine[owner_pid.0].clone().unwrap(); + + new_ledger.ownership_registry.insert(token_cid, owner_cid); + } else { + new_ledger.ownership_registry.remove(&token_cid); + } + } + + // 4) Remove spent inputs + for (i, input_id) in tx.body.inputs.iter().enumerate() { + if is_reference_input.contains(&i) { + continue; + } + + if new_ledger.utxos.remove(input_id).is_none() { + return Err(VerificationError::InputNotFound); + } + + new_ledger.utxo_to_coroutine.remove(input_id); + } + + Ok(new_ledger) + } + + #[allow(clippy::result_large_err)] + pub fn verify_witness( + &self, + body: &TransactionBody, + witness: &TransactionWitness, + ) -> Result<(), VerificationError> { + assert_eq!(witness.spending_proofs.len(), body.inputs.len()); + + if body.continuations.len() != body.inputs.len() { + return Err(VerificationError::InputContinuationSizeMismatch); + } + + let n_inputs = body.inputs.len(); + let n_new = body.new_outputs.len(); + let n_coords = body.coordination_scripts_keys.len(); + let n_processes = n_inputs + n_new + n_coords; + + // if a utxo doesn't have a continuation state, we explicitly need to + // check that it has a call to "burn". + let mut burned = Vec::with_capacity(n_inputs); + + // verify continuation wasm proofs + for (i, (utxo_id, proof)) in body.inputs.iter().zip(&witness.spending_proofs).enumerate() { + let cont = &body.continuations[i]; + + burned.push(cont.is_none()); + + let Some(utxo_entry) = self.utxos.get(utxo_id) else { + return Err(VerificationError::InputNotFound); + }; + + proof.verify( + Some(utxo_entry.state.clone()), + &utxo_entry.contract_hash, + cont.clone(), + )?; + } + + for (proof, entry) in witness + .new_output_proofs + .iter() + .zip(body.new_outputs.iter()) + { + proof.verify(None, &entry.contract_hash, Some(entry.state.clone()))?; + } + + // verify all the coordination script proofs + for (proof, key) in witness + .coordination_scripts + .iter() + .zip(body.coordination_scripts_keys.iter()) + { + proof.verify(None, key, None)?; + } + + // Canonical process kind flags (used by the interleaving public instance). + let is_utxo = (0..n_processes) + .map(|pid| pid < (n_inputs + n_new)) + .collect::>(); + + // 1. for each input, the verification key (wasm module hash) stored in the ledger. + // 2. for each new output, the verification key (wasm module hash) included in it. + // 3. for each coordination script, the verification key (wasm module hash) included in it. + // + // note that the order is bound too + let process_table = body + .inputs + .iter() + .map(|input| self.utxos[input].contract_hash) + .chain(body.new_outputs.iter().map(|o| o.contract_hash)) + .chain(body.coordination_scripts_keys.iter().cloned()) + .collect::>(); + + // Initial ownership snapshot for utxos IN THE TRANSACTION. + // This has len == process_table.len(). Coord scripts are None. + // + // token -> owner (both stable ids), projected into ProcessId space by matching + // the transaction-local processes that correspond to stable ids. + // + // (The circuit enforces that ownership_out is derived legally from this.) + let mut ownership_in = self.input_ownership_for_inputs(&body.inputs); + ownership_in.extend(std::iter::repeat(None).take(n_new)); + + // Build wasm instances in the same canonical order as process_table: + // inputs ++ new_outputs ++ coord scripts + let wasm_instances = build_wasm_instances_in_canonical_order( + &witness.spending_proofs, + &witness.new_output_proofs, + &witness.coordination_scripts, + )?; + + let ownership_out = (0..witness.spending_proofs.len() + witness.new_output_proofs.len()) + .map(|i| { + body.ownership_out + .get(&OutputRef(i)) + .copied() + .map(ProcessId::from) + }) + .collect::>(); + + let input_states: Vec = body + .inputs + .iter() + .map(|utxo_id| self.utxos[utxo_id].state.clone()) + .collect(); + + let inst = InterleavingInstance { + host_calls_roots: wasm_instances + .iter() + .map(|w| w.host_calls_root.clone()) + .collect(), + process_table: process_table.to_vec(), + + is_utxo: is_utxo.to_vec(), + must_burn: burned.to_vec(), + + n_inputs, + n_new, + n_coords, + + ownership_in: ownership_in.to_vec(), + ownership_out: ownership_out.to_vec(), + + entrypoint: ProcessId(body.entrypoint), + input_states, + }; + + let interleaving_proof: &ZkTransactionProof = &witness.interleaving_proof; + + let wit = InterleavingWitness { + traces: witness + .spending_proofs + .iter() + .map(|p| p.trace.clone()) + .chain(witness.new_output_proofs.iter().map(|p| p.trace.clone())) + .chain(witness.coordination_scripts.iter().map(|p| p.trace.clone())) + .collect(), + }; + + // note however that this is mocked right now, and it's using a non-zk + // verifier. + // + // but the circuit in theory in theory encode the same machine + interleaving_proof.verify(&inst, &wit)?; + + Ok(()) + } +} + +impl Default for Ledger { + fn default() -> Self { + Self::new() + } +} + +#[allow(clippy::result_large_err)] +pub fn build_wasm_instances_in_canonical_order( + spending: &[ZkWasmProof], + new_outputs: &[ZkWasmProof], + coords: &[ZkWasmProof], +) -> Result, VerificationError> { + Ok(spending + .iter() + .map(|p| p.public_instance()) + .chain(new_outputs.iter().map(|p| p.public_instance())) + .chain(coords.iter().map(|p| p.public_instance())) + .collect()) +} + +impl TransactionBody { + pub fn hash(&self) -> Hash { + Hash([0u64; 4], PhantomData) + } +} + +impl From for ProcessId { + fn from(val: OutputRef) -> Self { + ProcessId(val.0) + } +} + +impl std::hash::Hash for Hash { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl std::fmt::Debug for Hash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Hash({:016x?})", self.0) + } +} diff --git a/interleaving/starstream-interleaving-spec/src/mocked_verifier.rs b/interleaving/starstream-interleaving-spec/src/mocked_verifier.rs new file mode 100644 index 00000000..d834e6d8 --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/mocked_verifier.rs @@ -0,0 +1,959 @@ +//! A *mock* interpreter for the interleaving / transaction semantics. +//! +//! This is meant for tests/examples: you can “mock” traces as Vec per process. +//! +//! It also doesn't use commitments to the tables, it just has access to the +//! actual traces (where actual the circuit would, but as witnesses, it won't +//! happen *in* the ledger). +//! +//! It's mainly a direct translation of the algorithm in the README + +use crate::{ + Hash, InterleavingInstance, REF_GET_WIDTH, REF_PUSH_WIDTH, Ref, Value, WasmModule, + transaction_effects::{ + InterfaceId, ProcessId, + witness::{REF_WRITE_WIDTH, WitLedgerEffect}, + }, +}; +use ark_ff::Zero; +use ark_goldilocks::FpGoldilocks; +use std::collections::HashMap; +use std::sync::OnceLock; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct LedgerEffectsCommitment(pub [FpGoldilocks; 4]); + +impl Default for LedgerEffectsCommitment { + fn default() -> Self { + Self::iv() + } +} + +impl LedgerEffectsCommitment { + pub fn iv() -> Self { + static TRACE_IV: OnceLock<[FpGoldilocks; 4]> = OnceLock::new(); + let iv = TRACE_IV.get_or_init(|| { + let domain = encode_domain_rate8("starstream/trace_ic/v1/poseidon2"); + ark_poseidon2::sponge_8_trace(&domain).expect("trace iv sponge should succeed") + }); + Self(*iv) + } +} + +fn encode_domain_rate8(domain: &str) -> [FpGoldilocks; 8] { + let bytes = domain.as_bytes(); + assert!( + bytes.len() <= 49, + "domain tag too long for safe 7-byte/limb encoding: {} bytes", + bytes.len() + ); + + let mut out = [FpGoldilocks::zero(); 8]; + // Goldilocks field elements cannot safely encode arbitrary 8-byte u64 values + // without modular wraparound. We pack 7 bytes per limb and store the string + // length in limb 0 to avoid ambiguity from trailing zero padding. + out[0] = FpGoldilocks::from(bytes.len() as u64); + for (i, chunk) in bytes.chunks(7).enumerate() { + let mut limb = [0u8; 8]; + limb[..chunk.len()].copy_from_slice(chunk); + out[i + 1] = FpGoldilocks::from(u64::from_le_bytes(limb)); + } + out +} + +/// A “proof input” for tests: provide per-process traces directly. +#[derive(Clone, Debug)] +pub struct InterleavingWitness { + /// One trace per process in canonical order: inputs ++ new_outputs ++ coord scripts + pub traces: Vec>, +} + +#[derive(thiserror::Error, Debug)] +pub enum InterleavingError { + #[error("instance shape mismatch: {0}")] + Shape(&'static str), + + #[error("unknown process id {0}")] + BadPid(ProcessId), + + #[error("resume self-resume forbidden (pid={0})")] + SelfResume(ProcessId), + + #[error("utxo cannot resume utxo: caller={caller} target={target}")] + UtxoResumesUtxo { + caller: ProcessId, + target: ProcessId, + }, + + #[error("resume target not initialized: target={0}")] + TargetNotInitialized(ProcessId), + + #[error("resume claim mismatch: target={target} expected={expected:?} got={got:?}")] + ResumeClaimMismatch { + target: ProcessId, + expected: Ref, + got: Ref, + }, + + #[error("yield claim mismatch: id_prev={id_prev:?} expected={expected:?} got={got:?}")] + YieldClaimMismatch { + id_prev: Option, + expected: Vec, + got: Vec, + }, + + #[error("resumer mismatch: target={target} expected={expected} got={got}")] + ResumerMismatch { + target: ProcessId, + expected: ProcessId, + got: ProcessId, + }, + + #[error("program hash mismatch: target={target} expected={expected:?} got={got:?}")] + ProgramHashMismatch { + target: ProcessId, + expected: Hash, + got: Hash, + }, + + #[error("coord-only op used by utxo (pid={0})")] + CoordOnly(ProcessId), + + #[error("utxo-only op used by coord (pid={0})")] + UtxoOnly(ProcessId), + + #[error( + "uninstall handler: only the installing process can uninstall itself (top != current): interface={interface_id:?} pid={pid}" + )] + InstalledHandlerIsNotCurrent { + interface_id: InterfaceId, + pid: ProcessId, + }, + + #[error("uninstall handler not found: interface={interface_id:?} pid={pid}")] + HandlerNotFound { + interface_id: InterfaceId, + pid: ProcessId, + }, + + #[error( + "get_handler_for returned wrong handler: interface={interface_id:?} expected={expected:?} got={got}" + )] + HandlerGetMismatch { + interface_id: InterfaceId, + expected: Option, + got: ProcessId, + }, + + #[error("bind: token already owned (token={token} owner={owner:?})")] + TokenAlreadyOwned { + token: ProcessId, + owner: Option, + }, + + #[error("bind: owner is not utxo (owner={0})")] + OwnerNotUtxo(ProcessId), + + #[error("unbind: not current owner (token={token} owner={owner:?} caller={caller})")] + UnbindNotOwner { + token: ProcessId, + owner: Option, + caller: ProcessId, + }, + + #[error("yield called without a parent process (pid={pid})")] + YieldWithNoParent { pid: ProcessId }, + + #[error("burn called without a parent process (pid={pid})")] + BurnWithNoParent { pid: ProcessId }, + + #[error("verification: utxo not finalized (finalized=false) pid={0}")] + UtxoNotFinalized(ProcessId), + + #[error("verification: utxo does not have a continuation but it did not call Burn pid={0}")] + UtxoShouldBurn(ProcessId), + + #[error("verification: utxo does have a continuation but it did call Burn pid={0}")] + UtxoShouldNotBurn(ProcessId), + + #[error("verification: finished in utxo (id_curr={0})")] + FinishedInUtxo(ProcessId), + + #[error("verification: burned input did not Burn (pid={pid})")] + BurnedInputNoBurn { pid: ProcessId }, + + #[error( + "ownership_out does not match computed end state (pid={pid} expected={expected:?} got={got:?})" + )] + OwnershipOutMismatch { + pid: ProcessId, + expected: Option, + got: Option, + }, + #[error("a process was not initialized {pid}")] + ProcessNotInitialized { pid: ProcessId }, + + #[error("resume target already has arg (re-entrancy): target={0}")] + ReentrantResume(ProcessId), + + #[error("ref not found: {0:?}")] + RefNotFound(Ref), + + #[error("RefPush called but not building ref (pid={0})")] + RefPushNotBuilding(ProcessId), + + #[error("building ref but called other op (pid={0})")] + BuildingRefButCalledOther(ProcessId), + + #[error("RefPush called but full (pid={pid} size={size})")] + RefPushOutOfBounds { pid: ProcessId, size: usize }, + + #[error("RefGet offset out of bounds: ref={0:?} offset={1} len={2}")] + RefGetOutOfBounds(Ref, usize, usize), + + #[error("NewRef result mismatch. Got: {0:?}. Expected: {0:?}")] + RefInitializationMismatch(Ref, Ref), +} + +// ---------------------------- verifier ---------------------------- + +pub struct Rom { + process_table: Vec>, + must_burn: Vec, + is_utxo: Vec, + + // mocked, this should be only a commitment + traces: Vec>, +} + +#[derive(Clone, Debug)] +pub struct InterleavingState { + id_curr: ProcessId, + id_prev: Option, + entrypoint: ProcessId, + + /// Claims memory: M[pid] = expected argument to next Resume into pid. + expected_input: Vec>, + expected_resumer: Vec>, + on_yield: Vec, + yield_to: Vec>, + + activation: Vec>, + init: Vec>, + + ref_counter: u64, + ref_store: HashMap>, + ref_sizes: HashMap, + ref_building: HashMap, + + /// If a new output or coordination script is created, it must be through a + /// spawn from a coordinator script + initialized: Vec, + /// Whether the last instruction in that utxo was Yield or Burn + finalized: Vec, + /// Keep track of whether Burn is called + did_burn: Vec, + + /// token -> owner (both ProcessId). None => unowned. + ownership: Vec>, + + /// A stack per possible interface + handler_stack: HashMap>, +} + +#[allow(clippy::result_large_err)] +pub fn verify_interleaving_semantics( + inst: &InterleavingInstance, + wit: &InterleavingWitness, +) -> Result<(), InterleavingError> { + inst.check_shape()?; + + let n = inst.n_inputs + inst.n_new + inst.n_coords; + + if wit.traces.len() != n { + return Err(InterleavingError::Shape( + "witness traces len != process_table len", + )); + } + + // Inputs do not start with an expected resume argument unless we track it + // explicitly in the instance. + let claims_memory = vec![None; n]; + + let rom = Rom { + process_table: inst.process_table.clone(), + must_burn: inst.must_burn.clone(), + is_utxo: inst.is_utxo.clone(), + traces: wit.traces.clone(), + }; + + let mut state = InterleavingState { + id_curr: ProcessId(inst.entrypoint.into()), + id_prev: None, + entrypoint: inst.entrypoint, + expected_input: claims_memory, + expected_resumer: vec![None; n], + on_yield: vec![true; n], + yield_to: vec![None; n], + activation: vec![None; n], + init: vec![None; n], + ref_counter: 0, + ref_store: HashMap::new(), + ref_sizes: HashMap::new(), + ref_building: HashMap::new(), + handler_stack: HashMap::new(), + ownership: inst.ownership_in.clone(), + initialized: vec![false; n], + finalized: vec![false; n], + did_burn: vec![false; n], + }; + + // Inputs exist already (on-ledger) so they start initialized. + // The entrypoint coordination script is the currently executing VM, so it + // starts initialized. + // Everything else must be explicitly constructed/spawned via NewUtxo/NewCoord. + for pid in 0..n { + state.initialized[pid] = pid < inst.n_inputs; + } + + state.initialized[inst.entrypoint.0] = true; + + // ---------- run until current trace ends ---------- + let mut current_state = state; + let mut next_op_idx = vec![0usize; n]; + loop { + let pid = current_state.id_curr; + let c = next_op_idx[pid.0]; + + let trace = &rom.traces[pid.0]; + if c >= trace.len() { + break; + } + + let op = trace[c].clone(); + current_state = state_transition(current_state, &rom, &mut next_op_idx, op)?; + } + let state = current_state; + + // ---------- final verification conditions ---------- + // 1) process called burn but it has a continuation output in the tx + for i in 0..inst.n_inputs { + if rom.must_burn[i] { + let has_burn = rom.traces[i] + .iter() + .any(|hc| matches!(hc, WitLedgerEffect::Burn { ret: _ })); + if !has_burn { + return Err(InterleavingError::BurnedInputNoBurn { pid: ProcessId(i) }); + } + } + } + + // 2) all utxos finalize + for pid in 0..(inst.n_inputs + inst.n_new) { + if !state.finalized[pid] { + return Err(InterleavingError::UtxoNotFinalized(ProcessId(pid))); + } + } + + // 3) all utxos without continuation did call Burn + for pid in 0..inst.n_inputs { + if rom.must_burn[pid] && !state.did_burn[pid] { + return Err(InterleavingError::UtxoShouldBurn(ProcessId(pid))); + } + } + + // 4) finish in a coordination script + if rom.is_utxo[state.id_curr.0] { + return Err(InterleavingError::FinishedInUtxo(state.id_curr)); + } + + // 5) ownership_out matches computed end state + for pid in 0..(inst.n_inputs + inst.n_new) { + let expected = inst.ownership_out[pid]; + let got = state.ownership[pid]; + if expected != got { + return Err(InterleavingError::OwnershipOutMismatch { + pid: ProcessId(pid), + expected, + got, + }); + } + } + + // every object had a constructor called by a coordination script. + // + // TODO: this may be redundant, since resume should not work on unitialized + // programs, and without resuming then the trace counter will catch this + for pid in 0..n { + if !state.initialized[pid] { + return Err(InterleavingError::ProcessNotInitialized { + pid: ProcessId(pid), + }); + } + } + + Ok(()) +} + +#[allow(clippy::result_large_err)] +pub fn state_transition( + mut state: InterleavingState, + rom: &Rom, + next_op_idx: &mut [usize], + op: WitLedgerEffect, +) -> Result { + let id_curr = state.id_curr; + let c = next_op_idx[id_curr.0]; + let trace = &rom.traces[id_curr.0]; + + // For every rule, enforce "host call lookup condition" by checking op == + // t[c]. + // + // Here op is always t[c] anyway because there is no commitment, since this + // doesn't do any zk, it's just trace, but in the circuit this would be a + // lookup constraint into the right table. + if c >= trace.len() { + return Err(InterleavingError::Shape("host call index out of bounds")); + } + let got = trace[c].clone(); + if got != op { + return Err(InterleavingError::Shape("host call mismatch")); + } + + if state.ref_building.contains_key(&id_curr) && !matches!(op, WitLedgerEffect::RefPush { .. }) { + return Err(InterleavingError::BuildingRefButCalledOther(id_curr)); + } + + next_op_idx[id_curr.0] += 1; + + match op { + WitLedgerEffect::Resume { + target, + val, + ret, + caller, + } => { + if id_curr == target { + return Err(InterleavingError::SelfResume(id_curr)); + } + + if rom.is_utxo[id_curr.0] && rom.is_utxo[target.0] { + return Err(InterleavingError::UtxoResumesUtxo { + caller: id_curr, + target, + }); + } + + if !state.initialized[target.0] { + return Err(InterleavingError::TargetNotInitialized(target)); + } + + if state.activation[target.0].is_some() { + return Err(InterleavingError::ReentrantResume(target)); + } + + state.activation[id_curr.0] = None; + + if let Some(expected) = state.expected_input[target.0] + && expected != val + { + return Err(InterleavingError::ResumeClaimMismatch { + target, + expected, + got: val, + }); + } + + if let Some(expected) = state.expected_resumer[target.0] + && expected != id_curr + { + return Err(InterleavingError::ResumerMismatch { + target, + expected, + got: id_curr, + }); + } + + // Expectations are consumed by the resume. + state.expected_input[target.0] = None; + state.expected_resumer[target.0] = None; + + state.activation[target.0] = Some((val, id_curr)); + + state.expected_input[id_curr.0] = ret.to_option(); + state.expected_resumer[id_curr.0] = caller.to_option().flatten(); + + if state.on_yield[target.0] && !rom.is_utxo[id_curr.0] { + state.yield_to[target.0] = Some(id_curr); + state.on_yield[target.0] = false; + } + + state.id_prev = Some(id_curr); + + state.id_curr = target; + + state.finalized[target.0] = false; + } + + WitLedgerEffect::Yield { val } => { + if !rom.is_utxo[id_curr.0] { + return Err(InterleavingError::UtxoOnly(id_curr)); + } + + let parent = state + .yield_to + .get(id_curr.0) + .and_then(|p| *p) + .ok_or(InterleavingError::YieldWithNoParent { pid: id_curr })?; + + let val = state + .ref_store + .get(&val) + .ok_or(InterleavingError::RefNotFound(val))?; + + if let Some(expected) = state.expected_resumer[parent.0] + && expected != id_curr + { + return Err(InterleavingError::ResumerMismatch { + target: parent, + expected, + got: id_curr, + }); + } + + state.finalized[id_curr.0] = true; + + if let Some(expected_ref) = state.expected_input[parent.0] { + let expected = state + .ref_store + .get(&expected_ref) + .ok_or(InterleavingError::RefNotFound(expected_ref))?; + if expected != val { + return Err(InterleavingError::YieldClaimMismatch { + id_prev: state.id_prev, + expected: expected.clone(), + got: val.clone(), + }); + } + } + + state.on_yield[id_curr.0] = true; + state.id_prev = Some(id_curr); + state.activation[id_curr.0] = None; + state.id_curr = parent; + } + WitLedgerEffect::Return {} => { + if rom.is_utxo[id_curr.0] { + return Err(InterleavingError::CoordOnly(id_curr)); + } + + state.finalized[id_curr.0] = true; + state.activation[id_curr.0] = None; + + if let Some(parent) = state.yield_to[id_curr.0] { + state.id_prev = Some(id_curr); + state.id_curr = parent; + } else if id_curr != state.entrypoint { + return Err(InterleavingError::Shape( + "Return requires parent unless current process is entrypoint", + )); + } + } + + WitLedgerEffect::ProgramHash { + target, + program_hash, + } => { + // check lookup against process_table + let expected = rom.process_table[target.0]; + if expected != program_hash.unwrap() { + return Err(InterleavingError::ProgramHashMismatch { + target, + expected, + got: program_hash.unwrap(), + }); + } + } + + WitLedgerEffect::NewUtxo { + program_hash, + val, + id, + } => { + let id = id.unwrap(); + if rom.is_utxo[id_curr.0] { + return Err(InterleavingError::CoordOnly(id_curr)); + } + if !rom.is_utxo[id.0] { + return Err(InterleavingError::Shape("NewUtxo id must be utxo")); + } + if rom.process_table[id.0] != program_hash { + return Err(InterleavingError::ProgramHashMismatch { + target: id, + expected: rom.process_table[id.0], + got: program_hash, + }); + } + if state.initialized[id.0] { + return Err(InterleavingError::Shape( + "NewUtxo requires initialized[id]==false", + )); + } + state.initialized[id.0] = true; + state.init[id.0] = Some((val, id_curr)); + state.expected_input[id.0] = None; + state.expected_resumer[id.0] = None; + state.on_yield[id.0] = true; + state.yield_to[id.0] = None; + } + + WitLedgerEffect::NewCoord { + program_hash, + val, + id, + } => { + let id = id.unwrap(); + if rom.is_utxo[id_curr.0] { + return Err(InterleavingError::CoordOnly(id_curr)); + } + if rom.is_utxo[id.0] { + return Err(InterleavingError::Shape("NewCoord id must be coord")); + } + if rom.process_table[id.0] != program_hash { + return Err(InterleavingError::ProgramHashMismatch { + target: id, + expected: rom.process_table[id.0], + got: program_hash, + }); + } + if state.initialized[id.0] { + return Err(InterleavingError::Shape( + "NewCoord requires initialized[id]==false", + )); + } + + state.initialized[id.0] = true; + state.init[id.0] = Some((val, id_curr)); + state.expected_input[id.0] = None; + state.expected_resumer[id.0] = None; + state.on_yield[id.0] = true; + state.yield_to[id.0] = None; + } + + WitLedgerEffect::InstallHandler { interface_id } => { + if rom.is_utxo[id_curr.0] { + return Err(InterleavingError::CoordOnly(id_curr)); + } + state + .handler_stack + .entry(interface_id) + .or_default() + .push(id_curr); + } + + WitLedgerEffect::UninstallHandler { interface_id } => { + if rom.is_utxo[id_curr.0] { + return Err(InterleavingError::CoordOnly(id_curr)); + } + let stack = state.handler_stack.entry(interface_id).or_default(); + let Some(top) = stack.last().copied() else { + return Err(InterleavingError::HandlerNotFound { + interface_id, + pid: id_curr, + }); + }; + if top != id_curr { + return Err(InterleavingError::InstalledHandlerIsNotCurrent { + interface_id, + pid: id_curr, + }); + } + let Some(_) = stack.pop() else { + return Err(InterleavingError::HandlerNotFound { + interface_id, + pid: id_curr, + }); + }; + } + + WitLedgerEffect::GetHandlerFor { + interface_id, + handler_id, + } => { + let stack = state.handler_stack.entry(interface_id).or_default(); + let expected = stack.last().copied(); + if expected != Some(handler_id.unwrap()) { + return Err(InterleavingError::HandlerGetMismatch { + interface_id, + expected, + got: handler_id.unwrap(), + }); + } + } + WitLedgerEffect::CallEffectHandler { + interface_id, + val, + ret, + } => { + let stack = state.handler_stack.entry(interface_id).or_default(); + let target = stack + .last() + .copied() + .ok_or(InterleavingError::HandlerNotFound { + interface_id, + pid: id_curr, + })?; + + if state.activation[target.0].is_some() { + return Err(InterleavingError::ReentrantResume(target)); + } + + state.activation[id_curr.0] = None; + + if let Some(expected) = state.expected_input[target.0] + && expected != val + { + return Err(InterleavingError::ResumeClaimMismatch { + target, + expected, + got: val, + }); + } + + if let Some(expected) = state.expected_resumer[target.0] + && expected != id_curr + { + return Err(InterleavingError::ResumerMismatch { + target, + expected, + got: id_curr, + }); + } + + state.expected_input[target.0] = None; + state.expected_resumer[target.0] = None; + + state.activation[target.0] = Some((val, id_curr)); + state.expected_input[id_curr.0] = ret.to_option(); + state.expected_resumer[id_curr.0] = Some(target); + + state.id_prev = Some(id_curr); + state.id_curr = target; + state.finalized[target.0] = false; + } + + WitLedgerEffect::Activation { val, caller } => { + let curr = state.id_curr; + + let Some((v, c)) = &state.activation[curr.0] else { + return Err(InterleavingError::Shape( + "Activation called with no arg set", + )); + }; + + if v != &val.unwrap() || c != &caller.unwrap() { + return Err(InterleavingError::Shape("Activation result mismatch")); + } + } + + WitLedgerEffect::Init { val, caller } => { + let curr = state.id_curr; + + let Some((v, c)) = state.init[curr.0].take() else { + return Err(InterleavingError::Shape("Init called with no arg set")); + }; + + if v != val.unwrap() || c != caller.unwrap() { + return Err(InterleavingError::Shape("Init result mismatch")); + } + } + + WitLedgerEffect::NewRef { size, ret } => { + if state.ref_building.contains_key(&id_curr) { + return Err(InterleavingError::BuildingRefButCalledOther(id_curr)); + } + let new_ref = Ref(state.ref_counter); + let size_words = size; + let size_elems = size_words * REF_PUSH_WIDTH; + state.ref_counter += size_elems as u64; + if new_ref != ret.unwrap() { + return Err(InterleavingError::RefInitializationMismatch( + ret.unwrap(), + new_ref, + )); + } + state.ref_store.insert(new_ref, vec![Value(0); size_elems]); + state.ref_sizes.insert(new_ref, size_words); + state.ref_building.insert(id_curr, (new_ref, 0, size_words)); + } + + WitLedgerEffect::RefPush { vals } => { + let (reff, offset_words, size_words) = state + .ref_building + .remove(&id_curr) + .ok_or(InterleavingError::RefPushNotBuilding(id_curr))?; + + let new_offset = offset_words + 1; + + let vec = state + .ref_store + .get_mut(&reff) + .ok_or(InterleavingError::RefNotFound(reff))?; + + for (i, val) in vals.iter().enumerate() { + let pos = (offset_words * REF_PUSH_WIDTH) + i; + if pos >= size_words * REF_PUSH_WIDTH && *val != Value::nil() { + return Err(InterleavingError::RefPushOutOfBounds { + pid: id_curr, + size: size_words, + }); + } + + if let Some(pos) = vec.get_mut(pos) { + *pos = *val; + } + } + + if new_offset < size_words { + state + .ref_building + .insert(id_curr, (reff, new_offset, size_words)); + } + } + + WitLedgerEffect::RefGet { reff, offset, ret } => { + let vec = state + .ref_store + .get(&reff) + .ok_or(InterleavingError::RefNotFound(reff))?; + let size_words = state + .ref_sizes + .get(&reff) + .copied() + .ok_or(InterleavingError::RefNotFound(reff))?; + if offset >= size_words { + return Err(InterleavingError::Shape("RefGet out of bounds")); + } + let mut val = [Value::nil(); REF_GET_WIDTH]; + for (i, slot) in val.iter_mut().enumerate() { + let idx = (offset * REF_GET_WIDTH) + i; + if idx < size_words * REF_GET_WIDTH { + *slot = vec[idx]; + } + } + if val != ret.unwrap() { + return Err(InterleavingError::Shape("RefGet result mismatch")); + } + } + + WitLedgerEffect::RefWrite { reff, offset, vals } => { + let vec = state + .ref_store + .get_mut(&reff) + .ok_or(InterleavingError::RefNotFound(reff))?; + let size_words = state + .ref_sizes + .get(&reff) + .copied() + .ok_or(InterleavingError::RefNotFound(reff))?; + + if offset >= size_words { + return Err(InterleavingError::Shape("RefWrite out of bounds")); + } + + for (i, val) in vals.iter().enumerate() { + let idx = (offset * REF_WRITE_WIDTH) + i; + vec[idx] = *val; + } + } + + WitLedgerEffect::Burn { ret } => { + if !rom.is_utxo[id_curr.0] { + return Err(InterleavingError::UtxoOnly(id_curr)); + } + + if !rom.must_burn[id_curr.0] { + return Err(InterleavingError::UtxoShouldNotBurn(id_curr)); + } + + let parent = state + .id_prev + .ok_or(InterleavingError::BurnWithNoParent { pid: id_curr })?; + + let ret_val = state + .ref_store + .get(&ret) + .ok_or(InterleavingError::RefNotFound(ret))?; + + if let Some(expected_ref) = state.expected_input[parent.0] { + let expected_val = state + .ref_store + .get(&expected_ref) + .ok_or(InterleavingError::RefNotFound(expected_ref))?; + + if expected_val != ret_val { + // Burn is the final return of the coroutine + return Err(InterleavingError::YieldClaimMismatch { + id_prev: state.id_prev, + expected: expected_val.clone(), + got: ret_val.clone(), + }); + } + } + + state.activation[id_curr.0] = None; + state.finalized[id_curr.0] = true; + state.did_burn[id_curr.0] = true; + state.expected_input[id_curr.0] = Some(ret); + state.id_prev = Some(id_curr); + state.id_curr = parent; + } + + WitLedgerEffect::Bind { owner_id } => { + let token_id = id_curr; + + if !rom.is_utxo[token_id.0] { + return Err(InterleavingError::Shape("Bind: token_id must be utxo")); + } + if !rom.is_utxo[owner_id.0] { + return Err(InterleavingError::OwnerNotUtxo(owner_id)); + } + if !state.initialized[token_id.0] || !state.initialized[owner_id.0] { + return Err(InterleavingError::Shape("Bind: both must be initialized")); + } + if state.ownership[token_id.0].is_some() { + return Err(InterleavingError::TokenAlreadyOwned { + token: token_id, + owner: state.ownership[token_id.0], + }); + } + + state.ownership[token_id.0] = Some(owner_id); + } + + WitLedgerEffect::Unbind { token_id } => { + let owner_id = id_curr; + + if !rom.is_utxo[owner_id.0] { + return Err(InterleavingError::Shape("Unbind: caller must be utxo")); + } + if !rom.is_utxo[token_id.0] || !state.initialized[token_id.0] { + return Err(InterleavingError::Shape( + "Unbind: token must exist and be utxo", + )); + } + let cur_owner = state.ownership[token_id.0]; + if cur_owner != Some(owner_id) { + return Err(InterleavingError::UnbindNotOwner { + token: token_id, + owner: cur_owner, + caller: owner_id, + }); + } + + state.ownership[token_id.0] = None; + } + } + + Ok(state) +} diff --git a/interleaving/starstream-interleaving-spec/src/tests.rs b/interleaving/starstream-interleaving-spec/src/tests.rs new file mode 100644 index 00000000..57c3404c --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/tests.rs @@ -0,0 +1,619 @@ +use super::*; +use crate::builder::{RefGenerator, TransactionBuilder, h, v}; +use crate::{mocked_verifier::InterleavingError, transaction_effects::witness::WitLedgerEffect}; + +fn v7(data: &[u8]) -> [Value; REF_PUSH_WIDTH] { + let mut out = [Value::nil(); REF_PUSH_WIDTH]; + out[0] = v(data); + out +} + +fn mock_genesis() -> (Ledger, UtxoId, UtxoId, CoroutineId, CoroutineId) { + let input_hash_1 = h(10); + let input_hash_2 = h(11); + + // Create input UTXO IDs + let input_utxo_1 = UtxoId { + contract_hash: input_hash_1, + nonce: 0, + }; + let input_utxo_2 = UtxoId { + contract_hash: input_hash_2, + nonce: 0, + }; + + let mut ledger = Ledger { + utxos: HashMap::new(), + contract_counters: HashMap::new(), + utxo_to_coroutine: HashMap::new(), + ownership_registry: HashMap::new(), + }; + + let input_1_coroutine = CoroutineId { + creation_tx_hash: Hash([1u64, 0, 0, 0], PhantomData), + creation_output_index: 0, + }; + + let input_2_coroutine = CoroutineId { + creation_tx_hash: Hash([1u64, 0, 0, 0], PhantomData), + creation_output_index: 1, + }; + + ledger.utxos.insert( + input_utxo_1.clone(), + UtxoEntry { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: input_hash_1, + }, + ); + ledger.utxos.insert( + input_utxo_2.clone(), + UtxoEntry { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: input_hash_2, + }, + ); + + ledger + .utxo_to_coroutine + .insert(input_utxo_1.clone(), input_1_coroutine.clone()); + ledger + .utxo_to_coroutine + .insert(input_utxo_2.clone(), input_2_coroutine.clone()); + + // Set up contract counters + ledger.contract_counters.insert(input_hash_1, 1); + ledger.contract_counters.insert(input_hash_2, 1); + ledger.contract_counters.insert(h(1), 0); // coord_hash + ledger.contract_counters.insert(h(2), 0); // utxo_hash_a + ledger.contract_counters.insert(h(3), 0); // utxo_hash_b + + ( + ledger, + input_utxo_1, + input_utxo_2, + input_1_coroutine, + input_2_coroutine, + ) +} + +#[allow(clippy::result_large_err)] +fn mock_genesis_and_apply_tx(proven_tx: ProvenTransaction) -> Result { + let (ledger, _, _, _, _) = mock_genesis(); + ledger.apply_transaction(&proven_tx) +} + +#[test] +fn test_transaction_with_coord_and_utxos() { + // This test simulates a "complex" transaction involving 2 input UTXOs, 2 new UTXOs, + // and 1 coordination script that orchestrates the control flow. + // The diagram below shows the lifecycle of each process: `(input)` marks a UTXO + // that is consumed by the transaction, and `(output)` marks one that is + // created by the transaction. P1 is burned, so it is an input but not an output. + // + // P4 (Coord) P0 (Input 1) P1 (Input 2) P2 (New UTXO A) P3 (New UTXO B) + // | | | | | + // NewRef("init_a") | | | | + // NewUtxo(A) ------------> Init | | | + // NewRef("init_b") | | | | + // NewUtxo(B) ----------------------------------> Init | | + // NewRef("spend_input_1") | | | | + // NewRef("continued_1") | | | | + // Resume ----------------> Activation | | | + // |<--- -------------- Yield | | | + // NewRef("spend_input_2") | | | | + // NewRef("burned_2") | | | | + // Resume ------------------------------------> Activation | | + // |<--------------------------------------- Burn | | + // NewRef("done_a") | | | | + // Resume -----------------------------------------------------------> Activation | + // |<------------------------------------------------------------- Bind | + // |<------------------------------------------------------------- Yield | + // NewRef("done_b") | | | | + // Resume ---------------------------------------------------------------------------------> Activation + // |<----------------------------------------------------------------------------------- Yield + // | | | | | + // (end) (continued) (burned) (new_output) (new_output) + + let (ledger, input_utxo_1, input_utxo_2, _, _) = mock_genesis(); + + let coord_hash = h(1); + let utxo_hash_a = h(2); + let utxo_hash_b = h(3); + + let mut refs = RefGenerator::new(); + + // Pre-allocate all refs in the order they appear in coord_trace to ensure consistent numbering + let init_a_ref = refs.get("init_a"); + let init_b_ref = refs.get("init_b"); + let spend_input_1_ref = refs.get("spend_input_1"); + let continued_1_ref = refs.get("continued_1"); + let spend_input_2_ref = refs.get("spend_input_2"); + let burned_2_ref = refs.get("burned_2"); + let done_a_ref = refs.get("done_a"); + let done_b_ref = refs.get("done_b"); + + // Host refs each process in canonical order: inputs ++ new_outputs ++ coord_scripts + // Process 0: Input 1, Process 1: Input 2, Process 2: UTXO A (spawn), Process 3: UTXO B (spawn), Process 4: Coordination script + let input_1_trace = vec![ + WitLedgerEffect::Activation { + val: spend_input_1_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Yield { + val: continued_1_ref, + }, + ]; + + let input_2_trace = vec![ + WitLedgerEffect::Activation { + val: spend_input_2_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Burn { ret: burned_2_ref }, + ]; + + let utxo_a_trace = vec![ + WitLedgerEffect::Init { + val: init_a_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Activation { + val: init_a_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Bind { + owner_id: ProcessId(3), + }, + WitLedgerEffect::Yield { val: done_a_ref }, + ]; + + let utxo_b_trace = vec![ + WitLedgerEffect::Init { + val: init_b_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Activation { + val: init_b_ref.into(), + caller: ProcessId(4).into(), + }, + WitLedgerEffect::Yield { val: done_b_ref }, + ]; + + let coord_trace = vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: init_a_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"init_a"), + }, + WitLedgerEffect::NewUtxo { + program_hash: utxo_hash_a, + val: init_a_ref, + id: ProcessId(2).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: init_b_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"init_b"), + }, + WitLedgerEffect::NewUtxo { + program_hash: utxo_hash_b, + val: init_b_ref, + id: ProcessId(3).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: spend_input_1_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"spend_input_1"), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: continued_1_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"continued_1"), + }, + WitLedgerEffect::Resume { + target: ProcessId(0), + val: spend_input_1_ref, + ret: continued_1_ref.into(), + caller: Some(ProcessId(0)).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: spend_input_2_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"spend_input_2"), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: burned_2_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"burned_2"), + }, + WitLedgerEffect::Resume { + target: ProcessId(1), + val: spend_input_2_ref, + ret: burned_2_ref.into(), + caller: Some(ProcessId(1)).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: done_a_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"done_a"), + }, + WitLedgerEffect::Resume { + target: ProcessId(2), + val: init_a_ref, + ret: done_a_ref.into(), + caller: Some(ProcessId(2)).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: done_b_ref.into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"done_b"), + }, + WitLedgerEffect::Resume { + target: ProcessId(3), + val: init_b_ref, + ret: done_b_ref.into(), + caller: Some(ProcessId(3)).into(), + }, + ]; + + let proven_tx = TransactionBuilder::new() + .with_input( + input_utxo_1, + Some(CoroutineState { + pc: 1, + globals: vec![], + }), + input_1_trace, + ) + .with_input(input_utxo_2, None, input_2_trace) + .with_fresh_output( + NewOutput { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: utxo_hash_a, + }, + utxo_a_trace, + ) + .with_fresh_output( + NewOutput { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: utxo_hash_b, + }, + utxo_b_trace, + ) + .with_coord_script(coord_hash, coord_trace) + .with_ownership(OutputRef(2), OutputRef(3)) + .with_entrypoint(4) + .build(ZkTransactionProof::Dummy); + + let ledger = ledger.apply_transaction(&proven_tx).unwrap(); + + assert_eq!(ledger.utxos.len(), 3); + assert_eq!(ledger.ownership_registry.len(), 1); +} + +#[test] +fn test_effect_handlers() { + let coord_hash = h(1); + let utxo_hash = h(2); + let interface_id = h(42); + + let mut ref_gen = RefGenerator::new(); + + let utxo_trace = vec![ + WitLedgerEffect::Activation { + val: ref_gen.get("init_utxo").into(), + caller: ProcessId(1).into(), + }, + WitLedgerEffect::ProgramHash { + target: ProcessId(1), + program_hash: coord_hash.into(), + }, + WitLedgerEffect::GetHandlerFor { + interface_id, + handler_id: ProcessId(1).into(), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: ref_gen.get("effect_request").into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"Interface::Effect(42)"), + }, + WitLedgerEffect::Resume { + target: ProcessId(1), + val: ref_gen.get("effect_request"), + ret: ref_gen.get("effect_request_response").into(), + caller: Some(ProcessId(1)).into(), + }, + WitLedgerEffect::Yield { + val: ref_gen.get("utxo_final"), + }, + ]; + + let coord_trace = vec![ + WitLedgerEffect::InstallHandler { interface_id }, + WitLedgerEffect::NewRef { + size: 1, + ret: ref_gen.get("init_utxo").into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"init_utxo"), + }, + WitLedgerEffect::NewUtxo { + program_hash: h(2), + val: ref_gen.get("init_utxo"), + id: ProcessId(0).into(), + }, + WitLedgerEffect::Resume { + target: ProcessId(0), + val: ref_gen.get("init_utxo"), + ret: ref_gen.get("effect_request").into(), + caller: WitEffectOutput::Resolved(None), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: ref_gen.get("effect_request_response").into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"Interface::EffectResponse(43)"), + }, + WitLedgerEffect::NewRef { + size: 1, + ret: ref_gen.get("utxo_final").into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"utxo_final"), + }, + WitLedgerEffect::Resume { + target: ProcessId(0), + val: ref_gen.get("effect_request_response"), + ret: ref_gen.get("utxo_final").into(), + caller: Some(ProcessId(0)).into(), + }, + WitLedgerEffect::UninstallHandler { interface_id }, + ]; + + let proven_tx = TransactionBuilder::new() + .with_fresh_output( + NewOutput { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: utxo_hash, + }, + utxo_trace, + ) + .with_coord_script(coord_hash, coord_trace) + .with_entrypoint(1) + .build(ZkTransactionProof::Dummy); + + let ledger = Ledger { + utxos: HashMap::new(), + contract_counters: HashMap::new(), + utxo_to_coroutine: HashMap::new(), + ownership_registry: HashMap::new(), + }; + + let ledger = ledger.apply_transaction(&proven_tx).unwrap(); + + assert_eq!(ledger.utxos.len(), 1); + assert_eq!(ledger.ownership_registry.len(), 0); +} + +#[test] +fn test_burn_with_continuation_fails() { + let (_, input_utxo_1, _, _, _) = mock_genesis(); + let tx = TransactionBuilder::new() + .with_input( + input_utxo_1, + Some(CoroutineState { + pc: 1, + globals: vec![], + }), + vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: Ref(0).into(), + }, + WitLedgerEffect::RefPush { + vals: v7(b"burned"), + }, + WitLedgerEffect::Burn { ret: Ref(0) }, + ], + ) + .with_entrypoint(0) + .build(ZkTransactionProof::Dummy); + let result = mock_genesis_and_apply_tx(tx); + assert!(matches!( + result, + Err(VerificationError::InterleavingProofError( + InterleavingError::UtxoShouldNotBurn(_) + )) + )); +} + +#[test] +fn test_utxo_resumes_utxo_fails() { + let (_, input_utxo_1, input_utxo_2, _, _) = mock_genesis(); + let tx = TransactionBuilder::new() + .with_input( + input_utxo_1, + None, + vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: Ref(0).into(), + }, + WitLedgerEffect::RefPush { vals: v7(b"") }, + WitLedgerEffect::Resume { + target: ProcessId(1), + val: Ref(0), + ret: Ref(0).into(), + caller: WitEffectOutput::Resolved(None), + }, + ], + ) + .with_input(input_utxo_2, None, vec![]) + .with_entrypoint(0) + .build(ZkTransactionProof::Dummy); + let result = mock_genesis_and_apply_tx(tx); + assert!(matches!( + result, + Err(VerificationError::InterleavingProofError( + InterleavingError::UtxoResumesUtxo { .. } + )) + )); +} + +#[test] +fn test_continuation_without_yield_fails() { + let (_, input_utxo_1, _, _, _) = mock_genesis(); + let tx = TransactionBuilder::new() + .with_input( + input_utxo_1, + Some(CoroutineState { + pc: 1, + globals: vec![], + }), + vec![], + ) + .with_entrypoint(0) + .build(ZkTransactionProof::Dummy); + let result = mock_genesis_and_apply_tx(tx); + assert!(matches!( + result, + Err(VerificationError::InterleavingProofError( + InterleavingError::UtxoNotFinalized(_) + )) + )); +} + +#[test] +fn test_unbind_not_owner_fails() { + let (_, input_utxo_1, input_utxo_2, _, _) = mock_genesis(); + let tx = TransactionBuilder::new() + .with_input(input_utxo_1, None, vec![]) + .with_input( + input_utxo_2, + None, + vec![WitLedgerEffect::Unbind { + token_id: ProcessId(0), + }], + ) + .with_entrypoint(1) + .build(ZkTransactionProof::Dummy); + let result = mock_genesis_and_apply_tx(tx); + assert!(matches!( + result, + Err(VerificationError::InterleavingProofError( + InterleavingError::UnbindNotOwner { .. } + )) + )); +} + +#[test] +fn test_duplicate_input_utxo_fails() { + let input_id = UtxoId { + contract_hash: h(1), + nonce: 0, + }; + + let mut utxos = HashMap::new(); + + utxos.insert( + input_id.clone(), + UtxoEntry { + state: CoroutineState { + pc: 0, + globals: vec![], + }, + contract_hash: h(1), + }, + ); + + let mut contract_counters = HashMap::new(); + + contract_counters.insert(h(1), 0); + + let mut utxo_to_coroutine = HashMap::new(); + + utxo_to_coroutine.insert( + input_id.clone(), + CoroutineId { + creation_tx_hash: Hash([1u64, 0, 0, 0], PhantomData), + creation_output_index: 0, + }, + ); + + let ledger = Ledger { + utxos, + contract_counters, + utxo_to_coroutine, + ownership_registry: HashMap::new(), + }; + + let coord_hash = h(42); + let tx = TransactionBuilder::new() + .with_input( + input_id.clone(), + None, + vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: Ref(4).into(), + }, + WitLedgerEffect::RefPush { vals: v7(b"") }, + WitLedgerEffect::Burn { ret: Ref(0) }, + ], + ) + .with_coord_script( + coord_hash, + vec![ + WitLedgerEffect::NewRef { + size: 1, + ret: Ref(0).into(), + }, + WitLedgerEffect::RefPush { vals: v7(b"") }, + WitLedgerEffect::Resume { + target: 0.into(), + val: Ref(0), + ret: Ref(0).into(), + caller: WitEffectOutput::Resolved(None), + }, + ], + ) + .with_entrypoint(1) + .build(ZkTransactionProof::Dummy); + + let _ledger = ledger.apply_transaction(&tx).unwrap(); +} diff --git a/interleaving/starstream-interleaving-spec/src/transaction_effects/instance.rs b/interleaving/starstream-interleaving-spec/src/transaction_effects/instance.rs new file mode 100644 index 00000000..5a2aadde --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/transaction_effects/instance.rs @@ -0,0 +1,108 @@ +use ark_ff::PrimeField as _; +use neo_fold::output_binding::OutputBindingConfig; +use neo_memory::ProgramIO; +use p3_field::PrimeCharacteristicRing; + +use crate::{ + CoroutineState, Hash, WasmModule, mocked_verifier::InterleavingError, + mocked_verifier::LedgerEffectsCommitment, transaction_effects::ProcessId, +}; + +// this mirrors the configuration described in SEMANTICS.md +#[derive(Clone)] +pub struct InterleavingInstance { + /// Digest of all per-process host call tables the circuit is wired to. + /// One per wasm proof. + pub host_calls_roots: Vec, + + /// Process table in canonical order: inputs, new_outputs, coord scripts. + pub process_table: Vec>, + pub is_utxo: Vec, + + /// Burned/continuation mask for inputs (length = #inputs). + pub must_burn: Vec, + + /// Offsets into the process table + pub n_inputs: usize, + pub n_new: usize, + pub n_coords: usize, + + /// Initial ownership snapshot for inputs IN THE TRANSACTION. + /// + /// This has len == process_table + /// + /// process[i] == Some(j) means that utxo i is owned by j at the beginning of + /// the transaction. + /// + /// None means not owned. + pub ownership_in: Vec>, + + /// Final ownership snapshot for utxos IN THE TRANSACTION. + /// + /// This has len == process_table + /// + /// final state of the ownership graph (new ledger state). + pub ownership_out: Vec>, + + /// First coordination script + pub entrypoint: ProcessId, + + pub input_states: Vec, +} + +impl InterleavingInstance { + #[allow(clippy::result_large_err)] + pub fn check_shape(&self) -> Result<(), InterleavingError> { + // ---------- shape checks ---------- + // TODO: a few of these may be redundant + // + // the data layout is still a bit weird + let n = self.process_table.len(); + + if self.is_utxo.len() != n { + return Err(InterleavingError::Shape("is_utxo len != process_table len")); + } + + if self.ownership_in.len() != self.n_inputs + self.n_new + || self.ownership_out.len() != self.n_inputs + self.n_new + { + return Err(InterleavingError::Shape( + "ownership_* len != self.n_inputs len + self.n_new len", + )); + } + + if self.entrypoint.0 >= n { + return Err(InterleavingError::BadPid(self.entrypoint)); + } + + if self.must_burn.len() != self.n_inputs { + return Err(InterleavingError::Shape("burned len != n_inputs")); + } + + Ok(()) + } + + pub fn output_binding_config(&self) -> OutputBindingConfig { + let mut program_io = ProgramIO::new(); + + let mut addr = 0; + for comm in self.host_calls_roots.iter() { + for v in comm.0 { + program_io = + program_io.with_output(addr, neo_math::F::from_u64(v.into_bigint().0[0])); + addr += 1; + } + } + + let num_bits = 8; + // currently the twist tables have a size of 256, so 2**8 == 256 + // + // need to figure out if that can be generalized, or if we need a bound or not + + // TraceCommitments RAM index in the sorted twist_id list (see proof MemoryTag ordering). + // + // TODO: de-harcode the 12 + // it's supposed to be the twist index of the TraceCommitments memory + OutputBindingConfig::new(num_bits, program_io).with_mem_idx(12) + } +} diff --git a/interleaving/starstream-interleaving-spec/src/transaction_effects/mod.rs b/interleaving/starstream-interleaving-spec/src/transaction_effects/mod.rs new file mode 100644 index 00000000..da61c2cf --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/transaction_effects/mod.rs @@ -0,0 +1,30 @@ +use crate::Hash; + +pub mod instance; +pub mod witness; + +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +pub struct Blob(Vec); + +pub type InterfaceId = Hash; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct ProcessId(pub usize); + +impl std::fmt::Display for ProcessId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for ProcessId { + fn from(value: usize) -> Self { + ProcessId(value) + } +} + +impl From for usize { + fn from(value: ProcessId) -> Self { + value.0 + } +} diff --git a/interleaving/starstream-interleaving-spec/src/transaction_effects/witness.rs b/interleaving/starstream-interleaving-spec/src/transaction_effects/witness.rs new file mode 100644 index 00000000..3cb4c867 --- /dev/null +++ b/interleaving/starstream-interleaving-spec/src/transaction_effects/witness.rs @@ -0,0 +1,231 @@ +use crate::{ + Hash, Ref, Value, WasmModule, + transaction_effects::{InterfaceId, ProcessId}, +}; + +// Discriminants for host calls +#[derive(Debug)] +pub enum EffectDiscriminant { + Resume = 0, + Yield = 1, + NewUtxo = 2, + NewCoord = 3, + InstallHandler = 4, + UninstallHandler = 5, + GetHandlerFor = 6, + Burn = 7, + Activation = 8, + Init = 9, + NewRef = 10, + RefPush = 11, + RefGet = 12, + Bind = 13, + Unbind = 14, + ProgramHash = 15, + RefWrite = 16, + Return = 17, + CallEffectHandler = 18, +} + +pub const REF_PUSH_WIDTH: usize = 4; +pub const REF_GET_WIDTH: usize = 4; +pub const REF_WRITE_WIDTH: usize = 4; + +// Both used to indicate which fields are outputs, and to have a placeholder +// value for the runtime executor (trace generator) +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum WitEffectOutput { + Resolved(T), + Thunk, +} + +/// One entry in the per-process trace. +// +// Note that since these are witnesses, they include the inputs and the outputs +// for each operation. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum WitLedgerEffect { + Resume { + // in + target: ProcessId, + val: Ref, + // out + ret: WitEffectOutput, + caller: WitEffectOutput>, + }, + Yield { + // in + val: Ref, + }, + Return {}, + ProgramHash { + // in + target: ProcessId, + // out + program_hash: WitEffectOutput>, + }, + NewUtxo { + // in + program_hash: Hash, + val: Ref, + // out + id: WitEffectOutput, + }, + NewCoord { + // int + program_hash: Hash, + val: Ref, + + // out + id: WitEffectOutput, + }, + // Scoped handlers for custom effects + // + // coord only (mainly because utxos can't resume utxos anyway) + InstallHandler { + // in + interface_id: InterfaceId, + }, + UninstallHandler { + // in + interface_id: InterfaceId, + // out + // + // does not return anything + }, + GetHandlerFor { + // in + interface_id: InterfaceId, + // out + handler_id: WitEffectOutput, + }, + CallEffectHandler { + // in + interface_id: InterfaceId, + val: Ref, + // out + ret: WitEffectOutput, + }, + + // UTXO-only + Burn { + // in + ret: Ref, + }, + + Activation { + // out + val: WitEffectOutput, + caller: WitEffectOutput, + }, + + Init { + // out + val: WitEffectOutput, + caller: WitEffectOutput, + }, + + NewRef { + // in + // Size is in 4-value words. + size: usize, + // out + ret: WitEffectOutput, + }, + RefPush { + // in + vals: [Value; REF_PUSH_WIDTH], + // out + // does not return anything + }, + RefGet { + // in + reff: Ref, + // Offset is in 4-value words. + offset: usize, + + // out + ret: WitEffectOutput<[Value; REF_GET_WIDTH]>, + }, + RefWrite { + // in + reff: Ref, + // Offset is in 4-value words. + offset: usize, + vals: [Value; REF_WRITE_WIDTH], + // out + // does not return anything + }, + + // Tokens + Bind { + owner_id: ProcessId, + // does not return anything + }, + Unbind { + token_id: ProcessId, + // does not return anything + }, +} + +impl WitEffectOutput { + pub fn unwrap(self) -> T { + match self { + WitEffectOutput::Resolved(v) => v, + WitEffectOutput::Thunk => panic!("Called unwrap on a Thunk"), + } + } + + pub fn is_resolved(&self) -> bool { + matches!(self, WitEffectOutput::Resolved(_)) + } + + pub fn to_option(self) -> Option { + match self { + WitEffectOutput::Resolved(t) => Some(t), + WitEffectOutput::Thunk => None, + } + } +} + +impl From for WitEffectOutput { + fn from(value: T) -> WitEffectOutput { + WitEffectOutput::Resolved(value) + } +} + +impl From> for WitEffectOutput { + fn from(value: Option) -> WitEffectOutput { + match value { + Some(t) => WitEffectOutput::Resolved(t), + None => WitEffectOutput::Thunk, + } + } +} + +impl From for EffectDiscriminant { + fn from(value: u64) -> Self { + match value { + 0 => EffectDiscriminant::Resume, + 1 => EffectDiscriminant::Yield, + 2 => EffectDiscriminant::NewUtxo, + 3 => EffectDiscriminant::NewCoord, + 4 => EffectDiscriminant::InstallHandler, + 5 => EffectDiscriminant::UninstallHandler, + 6 => EffectDiscriminant::GetHandlerFor, + 7 => EffectDiscriminant::Burn, + 8 => EffectDiscriminant::Activation, + 9 => EffectDiscriminant::Init, + 10 => EffectDiscriminant::NewRef, + 11 => EffectDiscriminant::RefPush, + 12 => EffectDiscriminant::RefGet, + 13 => EffectDiscriminant::Bind, + 14 => EffectDiscriminant::Unbind, + 15 => EffectDiscriminant::ProgramHash, + 16 => EffectDiscriminant::RefWrite, + 17 => EffectDiscriminant::Return, + 18 => EffectDiscriminant::CallEffectHandler, + _ => todo!(), + } + } +} diff --git a/interleaving/starstream-runtime/Cargo.toml b/interleaving/starstream-runtime/Cargo.toml new file mode 100644 index 00000000..422eba1e --- /dev/null +++ b/interleaving/starstream-runtime/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "starstream-runtime" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +repository.workspace = true +license.workspace = true + +[dependencies] +starstream-interleaving-proof = { path = "../starstream-interleaving-proof" } +starstream-interleaving-spec = { path = "../starstream-interleaving-spec" } +thiserror = "2.0.17" +wasmi = "1.0.7" +ark-ff = { version = "0.5.0", default-features = false } +ark-poseidon2 = { path = "../../ark-poseidon2" } +wasm-encoder = { workspace = true } + +[dev-dependencies] +imbl = "6.1.0" +wat = "1.0" +wasmprinter = "0.2" diff --git a/interleaving/starstream-runtime/src/lib.rs b/interleaving/starstream-runtime/src/lib.rs new file mode 100644 index 00000000..4f0449c5 --- /dev/null +++ b/interleaving/starstream-runtime/src/lib.rs @@ -0,0 +1,1281 @@ +use ark_ff::PrimeField; +use starstream_interleaving_proof::commit; +use starstream_interleaving_spec::{ + CoroutineState, Hash, InterfaceId, InterleavingInstance, InterleavingWitness, + LedgerEffectsCommitment, NewOutput, OutputRef, ProcessId, ProvenTransaction, Ref, UtxoId, + Value, WasmModule, WitEffectOutput, WitLedgerEffect, builder::TransactionBuilder, +}; +use std::collections::{HashMap, HashSet}; +use wasmi::{ + Caller, Config, Engine, Linker, Memory, Store, TypedResumableCall, TypedResumableCallHostTrap, + Val, errors::HostError, +}; + +mod trace_mermaid; +pub use trace_mermaid::{ + register_mermaid_decoder, register_mermaid_default_decoder, register_mermaid_process_labels, +}; + +#[doc(hidden)] +pub mod test_support; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("invalid proof: {0}")] + InvalidProof(String), + #[error("runtime error: {0}")] + RuntimeError(String), + #[error("wasmi error: {0}")] + Wasmi(#[from] wasmi::Error), +} + +pub type WasmProgram = Vec; + +#[derive(Debug)] +struct Interrupt {} + +impl std::fmt::Display for Interrupt { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} + +impl HostError for Interrupt {} + +fn pack_bytes_to_safe_limbs(bytes: &[u8]) -> Vec { + let mut out = Vec::with_capacity(bytes.len().div_ceil(7)); + + for chunk in bytes.chunks(7) { + let mut limb = [0u8; 8]; + limb[..chunk.len()].copy_from_slice(chunk); + out.push(ark_poseidon2::F::from(u64::from_le_bytes(limb))); + } + + out +} + +pub fn poseidon_program_hash(program_bytes: &[u8]) -> [u64; 4] { + let mut msg = pack_bytes_to_safe_limbs("starstream/program_hash/v1/poseidon2".as_bytes()); + msg.push(ark_poseidon2::F::from(program_bytes.len() as u64)); + msg.extend(pack_bytes_to_safe_limbs(program_bytes)); + + let hash = ark_poseidon2::sponge_12_trace(&msg).unwrap(); + + let mut out = [0; 4]; + + out[0] = hash[0].into_bigint().0[0]; + out[1] = hash[0].into_bigint().0[0]; + out[2] = hash[0].into_bigint().0[0]; + out[3] = hash[0].into_bigint().0[0]; + + out +} + +fn snapshot_globals( + store: &Store, + globals: &[wasmi::Global], +) -> Result, Error> { + let mut values = Vec::with_capacity(globals.len()); + for global in globals { + let val = global.get(store); + let value = match val { + Val::I64(v) => Value(v as u64), + Val::I32(v) => Value(v as u32 as u64), + _ => { + return Err(Error::RuntimeError( + "unsupported global type (only i32/i64 supported)".into(), + )); + } + }; + values.push(value); + } + Ok(values) +} + +fn restore_globals( + store: &mut Store, + globals: &[wasmi::Global], + values: &[Value], +) -> Result<(), Error> { + if globals.len() != values.len() { + return Err(Error::RuntimeError(format!( + "global count mismatch: expected {}, got {}", + globals.len(), + values.len() + ))); + } + + for (global, value) in globals.iter().zip(values.iter()) { + if global.ty(&mut *store).mutability().is_const() { + continue; + } + let val = match global.ty(&mut *store).content() { + wasmi::ValType::I64 => Val::I64(value.0 as i64), + wasmi::ValType::I32 => Val::I32(value.0 as i32), + _ => { + return Err(Error::RuntimeError( + "unsupported global type (only i32/i64 supported)".into(), + )); + } + }; + global + .set(&mut *store, val) + .map_err(|e| Error::RuntimeError(e.to_string()))?; + } + + Ok(()) +} + +fn suspend_with_effect( + caller: &mut Caller<'_, RuntimeState>, + effect: WitLedgerEffect, +) -> Result { + let current_pid = caller.data().current_process; + caller + .data_mut() + .traces + .entry(current_pid) + .or_default() + .push(effect); + Err(wasmi::Error::host(Interrupt {})) +} + +fn effect_result_arity(effect: &WitLedgerEffect) -> usize { + match effect { + WitLedgerEffect::Resume { .. } + | WitLedgerEffect::Activation { .. } + | WitLedgerEffect::Init { .. } => 2, + WitLedgerEffect::ProgramHash { .. } => 4, + WitLedgerEffect::NewUtxo { .. } + | WitLedgerEffect::NewCoord { .. } + | WitLedgerEffect::GetHandlerFor { .. } + | WitLedgerEffect::CallEffectHandler { .. } + | WitLedgerEffect::NewRef { .. } => 1, + WitLedgerEffect::RefGet { .. } => 4, + WitLedgerEffect::InstallHandler { .. } + | WitLedgerEffect::UninstallHandler { .. } + | WitLedgerEffect::Burn { .. } + | WitLedgerEffect::Yield { .. } + | WitLedgerEffect::Return { .. } + | WitLedgerEffect::Bind { .. } + | WitLedgerEffect::Unbind { .. } + | WitLedgerEffect::RefPush { .. } + | WitLedgerEffect::RefWrite { .. } => 0, + } +} + +pub struct UnprovenTransaction { + pub inputs: Vec, + pub input_states: Vec, + pub input_ownership: Vec>, + pub programs: Vec, + pub is_utxo: Vec, + pub entrypoint: usize, +} + +pub struct RuntimeState { + pub traces: HashMap>, + pub interleaving: Vec<(ProcessId, WitLedgerEffect)>, + pub current_process: ProcessId, + pub memories: HashMap, + + pub handler_stack: HashMap>, + pub ref_store: HashMap>, + pub ref_sizes: HashMap, + pub ref_state: HashMap, // (ref, elem_offset, size_words) + pub next_ref: u64, + + pub pending_activation: HashMap, + pub pending_init: HashMap, + pub globals: HashMap>, + + pub ownership: HashMap>, + pub process_hashes: HashMap>, + pub is_utxo: HashMap, + pub allocated_processes: HashSet, + pub yield_to: HashMap, + pub on_yield: HashMap, + + pub must_burn: HashSet, + pub n_new: usize, + pub n_coord: usize, +} + +pub struct Runtime { + pub engine: Engine, + pub linker: Linker, + pub store: Store, +} + +impl Default for Runtime { + fn default() -> Self { + Self::new() + } +} + +impl Runtime { + pub fn new() -> Self { + let config = Config::default(); + let engine = Engine::new(&config); + let mut linker = Linker::new(&engine); + + let state = RuntimeState { + traces: HashMap::new(), + interleaving: Vec::new(), + current_process: ProcessId(0), + memories: HashMap::new(), + handler_stack: HashMap::new(), + ref_store: HashMap::new(), + ref_sizes: HashMap::new(), + ref_state: HashMap::new(), + next_ref: 0, + pending_activation: HashMap::new(), + pending_init: HashMap::new(), + globals: HashMap::new(), + ownership: HashMap::new(), + process_hashes: HashMap::new(), + is_utxo: HashMap::new(), + allocated_processes: HashSet::new(), + yield_to: HashMap::new(), + on_yield: HashMap::new(), + must_burn: HashSet::new(), + n_new: 0, + n_coord: 1, + }; + + let store = Store::new(&engine, state); + + linker + .func_wrap( + "env", + "starstream_resume", + |mut caller: Caller<'_, RuntimeState>, + target: u64, + val: u64| + -> Result<(u64, u64), wasmi::Error> { + let current_pid = caller.data().current_process; + let target = ProcessId(target as usize); + let val = Ref(val); + let ret = WitEffectOutput::Thunk; + + caller + .data_mut() + .pending_activation + .insert(target, (val, current_pid)); + + let curr_is_utxo = caller + .data() + .is_utxo + .get(¤t_pid) + .copied() + .unwrap_or(false); + let was_on_yield = caller.data().on_yield.get(&target).copied().unwrap_or(true); + if !curr_is_utxo && was_on_yield { + caller.data_mut().yield_to.insert(target, current_pid); + caller.data_mut().on_yield.insert(target, false); + } + + suspend_with_effect( + &mut caller, + WitLedgerEffect::Resume { + target, + val, + ret, + caller: WitEffectOutput::Thunk, + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_yield", + |mut caller: Caller<'_, RuntimeState>, val: u64| -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + caller.data_mut().on_yield.insert(current_pid, true); + suspend_with_effect(&mut caller, WitLedgerEffect::Yield { val: Ref(val) }) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_return", + |mut caller: Caller<'_, RuntimeState>| -> Result<(), wasmi::Error> { + suspend_with_effect(&mut caller, WitLedgerEffect::Return {}) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_new_utxo", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64, + val: u64| + -> Result { + let current_pid = caller.data().current_process; + let h = Hash([h0, h1, h2, h3], std::marker::PhantomData); + let val = Ref(val); + + let mut found_id = None; + let limit = caller.data().process_hashes.len(); + for i in 0..limit { + let pid = ProcessId(i); + if !caller.data().allocated_processes.contains(&pid) + && let Some(ph) = caller.data().process_hashes.get(&pid) + && *ph == h + && let Some(&is_u) = caller.data().is_utxo.get(&pid) + && is_u + { + found_id = Some(pid); + break; + } + } + let id = found_id.ok_or(wasmi::Error::new("no matching utxo process found"))?; + caller.data_mut().allocated_processes.insert(id); + + caller + .data_mut() + .pending_init + .insert(id, (val, current_pid)); + caller.data_mut().n_new += 1; + + suspend_with_effect( + &mut caller, + WitLedgerEffect::NewUtxo { + program_hash: h, + val, + id: WitEffectOutput::Resolved(id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_new_coord", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64, + val: u64| + -> Result { + let current_pid = caller.data().current_process; + let h = Hash([h0, h1, h2, h3], std::marker::PhantomData); + let val = Ref(val); + + let mut found_id = None; + let limit = caller.data().process_hashes.len(); + for i in 0..limit { + let pid = ProcessId(i); + if !caller.data().allocated_processes.contains(&pid) + && let Some(ph) = caller.data().process_hashes.get(&pid) + && *ph == h + && let Some(&is_u) = caller.data().is_utxo.get(&pid) + && !is_u + { + found_id = Some(pid); + break; + } + } + let id = + found_id.ok_or(wasmi::Error::new("no matching coord process found"))?; + caller.data_mut().allocated_processes.insert(id); + + caller + .data_mut() + .pending_init + .insert(id, (val, current_pid)); + caller.data_mut().n_coord += 1; + + suspend_with_effect( + &mut caller, + WitLedgerEffect::NewCoord { + program_hash: h, + val, + id: WitEffectOutput::Resolved(id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_install_handler", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64| + -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + let interface_id = Hash([h0, h1, h2, h3], std::marker::PhantomData); + caller + .data_mut() + .handler_stack + .entry(interface_id) + .or_default() + .push(current_pid); + suspend_with_effect( + &mut caller, + WitLedgerEffect::InstallHandler { interface_id }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_uninstall_handler", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64| + -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + let interface_id = Hash([h0, h1, h2, h3], std::marker::PhantomData); + let stack = caller + .data_mut() + .handler_stack + .get_mut(&interface_id) + .ok_or(wasmi::Error::new("handler stack not found"))?; + if stack.pop() != Some(current_pid) { + return Err(wasmi::Error::new("uninstall handler mismatch")); + } + suspend_with_effect( + &mut caller, + WitLedgerEffect::UninstallHandler { interface_id }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_get_handler_for", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64| + -> Result { + let interface_id = Hash([h0, h1, h2, h3], std::marker::PhantomData); + let handler_id = { + let stack = caller + .data() + .handler_stack + .get(&interface_id) + .ok_or(wasmi::Error::new("handler stack not found"))?; + *stack + .last() + .ok_or(wasmi::Error::new("handler stack empty"))? + }; + suspend_with_effect( + &mut caller, + WitLedgerEffect::GetHandlerFor { + interface_id, + handler_id: WitEffectOutput::Resolved(handler_id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_call_effect_handler", + |mut caller: Caller<'_, RuntimeState>, + h0: u64, + h1: u64, + h2: u64, + h3: u64, + val: u64| + -> Result { + let interface_id = Hash([h0, h1, h2, h3], std::marker::PhantomData); + suspend_with_effect( + &mut caller, + WitLedgerEffect::CallEffectHandler { + interface_id, + val: Ref(val), + ret: WitEffectOutput::Thunk, + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_activation", + |mut caller: Caller<'_, RuntimeState>| -> Result<(u64, u64), wasmi::Error> { + let current_pid = caller.data().current_process; + let (val, caller_id) = { + let (val, caller_id) = caller + .data() + .pending_activation + .get(¤t_pid) + .ok_or(wasmi::Error::new("no pending activation"))?; + (*val, *caller_id) + }; + suspend_with_effect( + &mut caller, + WitLedgerEffect::Activation { + val: WitEffectOutput::Resolved(val), + caller: WitEffectOutput::Resolved(caller_id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_init", + |mut caller: Caller<'_, RuntimeState>| -> Result<(u64, u64), wasmi::Error> { + let current_pid = caller.data().current_process; + let (val, caller_id) = { + let (val, caller_id) = caller + .data() + .pending_init + .get(¤t_pid) + .ok_or(wasmi::Error::new("no pending init"))?; + (*val, *caller_id) + }; + suspend_with_effect( + &mut caller, + WitLedgerEffect::Init { + val: WitEffectOutput::Resolved(val), + caller: WitEffectOutput::Resolved(caller_id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_new_ref", + |mut caller: Caller<'_, RuntimeState>, size: u64| -> Result { + let current_pid = caller.data().current_process; + let size_words = size as usize; + let size_elems = size_words + .checked_mul(starstream_interleaving_spec::REF_PUSH_WIDTH) + .ok_or(wasmi::Error::new("ref size overflow"))?; + let ref_id = Ref(caller.data().next_ref); + caller.data_mut().next_ref += size_elems as u64; + + caller + .data_mut() + .ref_store + .insert(ref_id, vec![Value(0); size_elems]); + caller.data_mut().ref_sizes.insert(ref_id, size_words); + caller + .data_mut() + .ref_state + .insert(current_pid, (ref_id, 0, size_words)); + + suspend_with_effect( + &mut caller, + WitLedgerEffect::NewRef { + size: size_words, + ret: WitEffectOutput::Resolved(ref_id), + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_ref_push", + |mut caller: Caller<'_, RuntimeState>, + val_0: u64, + val_1: u64, + val_2: u64, + val_3: u64| + -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + let vals = [Value(val_0), Value(val_1), Value(val_2), Value(val_3)]; + let (ref_id, offset, size_words) = *caller + .data() + .ref_state + .get(¤t_pid) + .ok_or(wasmi::Error::new("no ref state"))?; + + let store = caller + .data_mut() + .ref_store + .get_mut(&ref_id) + .ok_or(wasmi::Error::new("ref not found"))?; + + let elem_offset = offset; + for (i, val) in vals.iter().enumerate() { + if let Some(pos) = store.get_mut(elem_offset + i) { + *pos = *val; + } + } + + caller.data_mut().ref_state.insert( + current_pid, + ( + ref_id, + elem_offset + starstream_interleaving_spec::REF_PUSH_WIDTH, + size_words, + ), + ); + + suspend_with_effect(&mut caller, WitLedgerEffect::RefPush { vals }) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_ref_write", + |mut caller: Caller<'_, RuntimeState>, + reff: u64, + offset: u64, + val_0: u64, + val_1: u64, + val_2: u64, + val_3: u64| + -> Result<(), wasmi::Error> { + let ref_id = Ref(reff); + let offset_words = offset as usize; + + let size = *caller + .data() + .ref_sizes + .get(&ref_id) + .ok_or(wasmi::Error::new("ref size not found"))?; + + if offset_words >= size { + return Err(wasmi::Error::new("ref write overflow")); + } + + let vals = [Value(val_0), Value(val_1), Value(val_2), Value(val_3)]; + let store = caller + .data_mut() + .ref_store + .get_mut(&ref_id) + .ok_or(wasmi::Error::new("ref not found"))?; + + let elem_offset = offset_words * starstream_interleaving_spec::REF_WRITE_WIDTH; + for (i, val) in vals.iter().enumerate() { + store[elem_offset + i] = *val; + } + + suspend_with_effect( + &mut caller, + WitLedgerEffect::RefWrite { + reff: ref_id, + offset: offset_words, + vals, + }, + ) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_ref_get", + |mut caller: Caller<'_, RuntimeState>, + reff: u64, + offset: u64| + -> Result<(i64, i64, i64, i64), wasmi::Error> { + let ref_id = Ref(reff); + let offset_words = offset as usize; + let store = caller + .data() + .ref_store + .get(&ref_id) + .ok_or(wasmi::Error::new("ref not found"))?; + let size = *caller + .data() + .ref_sizes + .get(&ref_id) + .ok_or(wasmi::Error::new("ref size not found"))?; + if offset_words >= size { + return Err(wasmi::Error::new("ref get overflow")); + } + let mut ret = [Value::nil(); starstream_interleaving_spec::REF_GET_WIDTH]; + for (i, slot) in ret.iter_mut().enumerate() { + let idx = (offset_words * starstream_interleaving_spec::REF_GET_WIDTH) + i; + if idx < size * starstream_interleaving_spec::REF_GET_WIDTH { + *slot = store[idx]; + } + } + suspend_with_effect( + &mut caller, + WitLedgerEffect::RefGet { + reff: ref_id, + offset: offset_words, + ret: WitEffectOutput::Resolved(ret), + }, + )?; + Ok(( + ret[0].0 as i64, + ret[1].0 as i64, + ret[2].0 as i64, + ret[3].0 as i64, + )) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_bind", + |mut caller: Caller<'_, RuntimeState>, owner_id: u64| -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + let owner_id = ProcessId(owner_id as usize); + caller + .data_mut() + .ownership + .insert(current_pid, Some(owner_id)); + suspend_with_effect(&mut caller, WitLedgerEffect::Bind { owner_id }) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_unbind", + |mut caller: Caller<'_, RuntimeState>, token_id: u64| -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + let token_id = ProcessId(token_id as usize); + if caller.data().ownership.get(&token_id) != Some(&Some(current_pid)) { + eprintln!( + "unbind called by non-owner: token_id={}, current_pid={}", + token_id.0, current_pid.0 + ); + } + caller.data_mut().ownership.insert(token_id, None); + suspend_with_effect(&mut caller, WitLedgerEffect::Unbind { token_id }) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_burn", + |mut caller: Caller<'_, RuntimeState>, ret: u64| -> Result<(), wasmi::Error> { + let current_pid = caller.data().current_process; + caller.data_mut().must_burn.insert(current_pid); + suspend_with_effect(&mut caller, WitLedgerEffect::Burn { ret: Ref(ret) }) + }, + ) + .unwrap(); + + linker + .func_wrap( + "env", + "starstream_get_program_hash", + |mut caller: Caller<'_, RuntimeState>, + target_pid: u64| + -> Result<(u64, u64, u64, u64), wasmi::Error> { + let target = ProcessId(target_pid as usize); + let program_hash = *caller + .data() + .process_hashes + .get(&target) + .ok_or(wasmi::Error::new("process hash not found"))?; + + suspend_with_effect( + &mut caller, + WitLedgerEffect::ProgramHash { + target, + program_hash: WitEffectOutput::Resolved(program_hash), + }, + ) + }, + ) + .unwrap(); + + Self { + engine, + linker, + store, + } + } +} + +impl UnprovenTransaction { + fn get_globals(&self, pid: usize, state: &RuntimeState) -> Result, Error> { + state + .globals + .get(&ProcessId(pid)) + .cloned() + .ok_or_else(|| Error::RuntimeError(format!("No globals for pid {}", pid))) + } + + pub fn prove(self) -> Result { + let (instance, state, witness) = self.execute()?; + + let proof = starstream_interleaving_proof::prove(instance.clone(), witness.clone()) + .map_err(|e| Error::RuntimeError(e.to_string()))?; + + trace_mermaid::emit_trace_mermaid(&instance, &state); + + let mut builder = TransactionBuilder::new(); + builder = builder.with_entrypoint(self.entrypoint); + + let n_inputs = instance.n_inputs; + if self.inputs.len() != n_inputs { + return Err(Error::RuntimeError(format!( + "Input count mismatch: expected {}, got {}", + n_inputs, + self.inputs.len() + ))); + } + + let traces = &witness.traces; + + // Inputs + for (i, trace) in traces.iter().enumerate().take(n_inputs) { + let trace = trace.clone(); + let utxo_id = self.inputs[i].clone(); + let host_calls_root = instance.host_calls_roots[i].clone(); + + let continuation = if instance.must_burn[i] { + None + } else { + let globals = self.get_globals(i, &state)?; + Some(CoroutineState { pc: 0, globals }) + }; + + builder = builder.with_input_and_trace_commitment( + utxo_id, + continuation, + trace, + host_calls_root, + ); + } + + // New Outputs + for (i, trace) in traces + .iter() + .enumerate() + .skip(n_inputs) + .take(instance.n_new) + { + let trace = trace.clone(); + let globals = self.get_globals(i, &state)?; + let contract_hash = state.process_hashes[&ProcessId(i)]; + let host_calls_root = instance.host_calls_roots[i].clone(); + + builder = builder.with_fresh_output_and_trace_commitment( + NewOutput { + state: CoroutineState { pc: 0, globals }, + contract_hash, + }, + trace, + host_calls_root, + ); + } + + // Coords + for (i, trace) in traces + .iter() + .enumerate() + .skip(n_inputs + instance.n_new) + .take(instance.n_coords) + { + let trace = trace.clone(); + let contract_hash = state.process_hashes[&ProcessId(i)]; + let host_calls_root = instance.host_calls_roots[i].clone(); + builder = builder.with_coord_script_and_trace_commitment( + contract_hash, + trace, + host_calls_root, + ); + } + + // Ownership + for (token, owner_opt) in state.ownership { + if let Some(owner) = owner_opt { + builder = + builder.with_ownership(OutputRef::from(token.0), OutputRef::from(owner.0)); + } + } + + Ok(builder.build(proof)) + } + + pub fn to_instance(&self) -> InterleavingInstance { + self.execute().unwrap().0 + } + + pub fn execute( + &self, + ) -> Result<(InterleavingInstance, RuntimeState, InterleavingWitness), Error> { + let mut runtime = Runtime::new(); + + let n_inputs = self.inputs.len(); + if !self.input_states.is_empty() && self.input_states.len() != n_inputs { + return Err(Error::RuntimeError(format!( + "Input state count mismatch: expected {}, got {}", + n_inputs, + self.input_states.len() + ))); + } + if !self.input_ownership.is_empty() && self.input_ownership.len() != n_inputs { + return Err(Error::RuntimeError(format!( + "Input ownership count mismatch: expected {}, got {}", + n_inputs, + self.input_ownership.len() + ))); + } + + let mut instances = Vec::new(); + let mut process_table = Vec::new(); + let mut globals_by_pid: Vec> = Vec::new(); + + for (pid, program_bytes) in self.programs.iter().enumerate() { + let hash = Hash( + poseidon_program_hash(program_bytes), + std::marker::PhantomData, + ); + + runtime + .store + .data_mut() + .process_hashes + .insert(ProcessId(pid), hash); + + // Populate is_utxo map + runtime + .store + .data_mut() + .is_utxo + .insert(ProcessId(pid), self.is_utxo[pid]); + + process_table.push(hash); + + let module = wasmi::Module::new(&runtime.engine, program_bytes)?; + let instance = runtime + .linker + .instantiate_and_start(&mut runtime.store, &module)?; + + // Store memory in RuntimeState for hash reading + if let Some(extern_) = instance.get_export(&runtime.store, "memory") + && let Some(memory) = extern_.into_memory() + { + runtime + .store + .data_mut() + .memories + .insert(ProcessId(pid), memory); + } + + let mut globals = Vec::new(); + for export in instance.exports(&runtime.store) { + let name = export.name().to_string(); + if let Some(global) = export.into_global() { + globals.push((name, global)); + } + } + globals.sort_by(|a, b| a.0.cmp(&b.0)); + let globals: Vec = globals.into_iter().map(|(_, g)| g).collect(); + + if pid < n_inputs && !self.input_states.is_empty() { + let values = &self.input_states[pid].globals; + restore_globals(&mut runtime.store, &globals, values)?; + } + + globals_by_pid.push(globals); + instances.push(instance); + } + + if !self.input_ownership.is_empty() { + for (pid, owner_opt) in self.input_ownership.iter().enumerate().take(n_inputs) { + runtime + .store + .data_mut() + .ownership + .insert(ProcessId(pid), *owner_opt); + } + } + + // Map of suspended processes + let mut resumables: HashMap> = HashMap::new(); + + // Start entrypoint + let mut current_pid = ProcessId(self.entrypoint); + runtime + .store + .data_mut() + .allocated_processes + .insert(current_pid); + + runtime.store.data_mut().current_process = current_pid; + + // Initial argument? 0? + let mut next_args = [0u64; 5]; + + loop { + runtime.store.data_mut().current_process = current_pid; + + let result = if let Some(continuation) = resumables.remove(¤t_pid) { + let n_results = { + let traces = &runtime.store.data().traces; + let trace = traces.get(¤t_pid).expect("trace exists"); + let last = trace.last().expect("trace not empty"); + effect_result_arity(last) + }; + + // Update previous effect with return value + let traces = &mut runtime.store.data_mut().traces; + if let Some(trace) = traces.get_mut(¤t_pid) + && let Some(last) = trace.last_mut() + { + match last { + WitLedgerEffect::Resume { ret, caller, .. } => { + *ret = WitEffectOutput::Resolved(Ref(next_args[0])); + *caller = + WitEffectOutput::Resolved(Some(ProcessId(next_args[1] as usize))); + } + WitLedgerEffect::CallEffectHandler { ret, .. } => { + *ret = WitEffectOutput::Resolved(Ref(next_args[0])); + } + _ => {} + } + } + + let vals = [ + Val::I64(next_args[0] as i64), + Val::I64(next_args[1] as i64), + Val::I64(next_args[2] as i64), + Val::I64(next_args[3] as i64), + Val::I64(next_args[4] as i64), + ]; + + continuation.resume(&mut runtime.store, &vals[..n_results])? + } else { + let instance = instances[current_pid.0]; + // Start with _start, 0 args, 0 results + let func = instance.get_typed_func::<(), ()>(&runtime.store, "_start")?; + func.call_resumable(&mut runtime.store, ()).unwrap() + }; + + match result { + TypedResumableCall::Finished(_) => { + // Process finished naturally. + break; + } + TypedResumableCall::HostTrap(invocation) => { + // It suspended. + + // Inspect the last effect + let last_effect = { + let traces = &runtime.store.data().traces; + let trace = traces + .get(¤t_pid) + .expect("trace exists after suspend"); + trace.last().expect("trace not empty after suspend").clone() + }; + + runtime + .store + .data_mut() + .interleaving + .push((current_pid, last_effect.clone())); + + resumables.insert(current_pid, invocation); + + match last_effect { + WitLedgerEffect::Resume { target, val, .. } => { + next_args = [val.0, current_pid.0 as u64, 0, 0, 0]; + current_pid = target; + } + WitLedgerEffect::CallEffectHandler { + interface_id, val, .. + } => { + let target = { + let stack = runtime + .store + .data() + .handler_stack + .get(&interface_id) + .expect("handler stack not found while dispatching call_effect_handler"); + *stack.last().expect( + "handler stack empty while dispatching call_effect_handler", + ) + }; + next_args = [val.0, current_pid.0 as u64, 0, 0, 0]; + current_pid = target; + } + WitLedgerEffect::Yield { val, .. } => { + let caller = *runtime + .store + .data() + .yield_to + .get(¤t_pid) + .expect("yield on missing yield_to"); + next_args = [val.0, current_pid.0 as u64, 0, 0, 0]; + current_pid = caller; + } + WitLedgerEffect::Return { .. } => { + if let Some(caller) = runtime.store.data().yield_to.get(¤t_pid) { + next_args = [0; 5]; + current_pid = *caller; + } else if current_pid == ProcessId(self.entrypoint) { + break; + } else { + return Err(Error::RuntimeError( + "return on missing yield_to for non-entrypoint".into(), + )); + } + } + WitLedgerEffect::Burn { .. } => { + let caller = *runtime + .store + .data() + .yield_to + .get(¤t_pid) + .expect("burn on missing yield_to"); + next_args = [0; 5]; + current_pid = caller; + } + WitLedgerEffect::NewUtxo { id, .. } => { + next_args = [id.unwrap().0 as u64, 0, 0, 0, 0]; + } + WitLedgerEffect::NewCoord { id, .. } => { + next_args = [id.unwrap().0 as u64, 0, 0, 0, 0]; + } + WitLedgerEffect::GetHandlerFor { handler_id, .. } => { + next_args = [handler_id.unwrap().0 as u64, 0, 0, 0, 0]; + } + WitLedgerEffect::Activation { val, caller } => { + next_args = [val.unwrap().0, caller.unwrap().0 as u64, 0, 0, 0]; + } + WitLedgerEffect::Init { val, caller } => { + next_args = [val.unwrap().0, caller.unwrap().0 as u64, 0, 0, 0]; + } + WitLedgerEffect::NewRef { ret, .. } => { + next_args = [ret.unwrap().0, 0, 0, 0, 0]; + } + WitLedgerEffect::RefGet { ret, .. } => { + let ret = ret.unwrap(); + next_args = [ret[0].0, ret[1].0, ret[2].0, ret[3].0, 0]; + } + WitLedgerEffect::RefWrite { .. } => { + next_args = [0; 5]; + } + WitLedgerEffect::ProgramHash { program_hash, .. } => { + let limbs = program_hash.unwrap().0; + next_args = [limbs[0], limbs[1], limbs[2], limbs[3], 0]; + } + _ => { + next_args = [0; 5]; + } + } + } + TypedResumableCall::OutOfFuel(_) => { + todo!(); + } + } + } + + let mut host_calls_roots = Vec::new(); + let mut must_burn = Vec::new(); + let mut ownership_in = Vec::new(); + let mut ownership_out = Vec::new(); + let mut traces = Vec::new(); + + let is_utxo = self.is_utxo.clone(); + let n_coords = runtime.store.data().n_coord; + let n_new = runtime.store.data().n_new; + let n_inputs = process_table.len() - n_new - n_coords; + + for pid in 0..self.programs.len() { + let data = runtime.store.data(); + let trace = data + .traces + .get(&ProcessId(pid)) + .cloned() + .unwrap_or_default(); + let mut commitment = LedgerEffectsCommitment::iv(); + for op in &trace { + commitment = commit(commitment, op.clone()); + } + host_calls_roots.push(commitment); + traces.push(trace); + + if pid < n_inputs { + must_burn.push(data.must_burn.contains(&ProcessId(pid))); + } + } + + let utxo_count = n_inputs + n_new; + ownership_in.resize(utxo_count, None); + ownership_out.resize(utxo_count, None); + for pid in 0..utxo_count { + let proc_id = ProcessId(pid); + if pid < n_inputs && !self.input_ownership.is_empty() { + ownership_in[pid] = self.input_ownership[pid]; + } + ownership_out[pid] = runtime + .store + .data() + .ownership + .get(&proc_id) + .copied() + .flatten(); + } + + for (pid, globals) in globals_by_pid.iter().enumerate() { + let globals = snapshot_globals(&runtime.store, globals)?; + runtime + .store + .data_mut() + .globals + .insert(ProcessId(pid), globals); + } + + let instance = InterleavingInstance { + host_calls_roots, + process_table, + is_utxo, + must_burn, + n_inputs, + n_new, + n_coords, + ownership_in, + ownership_out, + entrypoint: ProcessId(self.entrypoint), + input_states: self.input_states.clone(), + }; + + let witness = starstream_interleaving_spec::InterleavingWitness { traces }; + + Ok((instance, runtime.store.into_data(), witness)) + } +} diff --git a/interleaving/starstream-runtime/src/test_support/mod.rs b/interleaving/starstream-runtime/src/test_support/mod.rs new file mode 100644 index 00000000..f91d90f5 --- /dev/null +++ b/interleaving/starstream-runtime/src/test_support/mod.rs @@ -0,0 +1 @@ +pub mod wasm_dsl; diff --git a/interleaving/starstream-runtime/src/test_support/wasm_dsl.rs b/interleaving/starstream-runtime/src/test_support/wasm_dsl.rs new file mode 100644 index 00000000..72390d5b --- /dev/null +++ b/interleaving/starstream-runtime/src/test_support/wasm_dsl.rs @@ -0,0 +1,722 @@ +#![allow(dead_code)] + +use wasm_encoder::{ + BlockType, CodeSection, ConstExpr, EntityType, ExportKind, ExportSection, Function, + FunctionSection, GlobalSection, GlobalType, ImportSection, Instruction, Module, TypeSection, + ValType, +}; + +#[derive(Clone, Copy, Debug)] +pub struct Local(u32); + +#[derive(Clone, Copy, Debug)] +pub struct FuncRef { + pub idx: u32, + pub results: usize, +} + +#[derive(Clone, Copy, Debug)] +pub enum Value { + Local(Local), + Const(i64), +} + +impl Value { + fn emit(self, instrs: &mut Vec>) { + match self { + Value::Local(Local(idx)) => instrs.push(Instruction::LocalGet(idx)), + Value::Const(value) => instrs.push(Instruction::I64Const(value)), + } + } +} + +pub struct FuncBuilder { + locals: Vec, + instrs: Vec>, +} + +impl FuncBuilder { + pub fn new() -> Self { + Self { + locals: Vec::new(), + instrs: Vec::new(), + } + } + + pub fn local_i64(&mut self) -> Local { + let idx = self.locals.len() as u32; + self.locals.push(ValType::I64); + Local(idx) + } + + pub fn set_const(&mut self, dst: Local, value: i64) { + self.instrs.push(Instruction::I64Const(value)); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn set_local(&mut self, dst: Local, src: Local) { + self.instrs.push(Instruction::LocalGet(src.0)); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn call(&mut self, func: FuncRef, args: Vec, results: &[Local]) { + for arg in args { + arg.emit(&mut self.instrs); + } + self.instrs.push(Instruction::Call(func.idx)); + for local in results.iter().rev() { + self.instrs.push(Instruction::LocalSet(local.0)); + } + } + + pub fn assert_eq(&mut self, lhs: Value, rhs: Value) { + lhs.emit(&mut self.instrs); + rhs.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Ne); + self.instrs.push(Instruction::If(BlockType::Empty)); + self.instrs.push(Instruction::Unreachable); + self.instrs.push(Instruction::End); + } + + pub fn add_i64(&mut self, a: Value, b: Value, dst: Local) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Add); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn sub_i64(&mut self, a: Value, b: Value, dst: Local) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Sub); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn mul_i64(&mut self, a: Value, b: Value, dst: Local) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Mul); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn div_i64(&mut self, a: Value, b: Value, dst: Local) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64DivS); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn global_get(&mut self, global: u32, dst: Local) { + self.instrs.push(Instruction::GlobalGet(global)); + self.instrs.push(Instruction::LocalSet(dst.0)); + } + + pub fn global_set(&mut self, global: u32, val: Value) { + val.emit(&mut self.instrs); + self.instrs.push(Instruction::GlobalSet(global)); + } + + pub fn emit_eq_i64(&mut self, a: Value, b: Value) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Eq); + } + + pub fn emit_lt_i64(&mut self, a: Value, b: Value) { + a.emit(&mut self.instrs); + b.emit(&mut self.instrs); + self.instrs.push(Instruction::I64LtS); + } + + pub fn loop_begin(&mut self) { + // block { loop { ... } } so depth 1 is break and depth 0 is continue. + self.instrs.push(Instruction::Block(BlockType::Empty)); + self.instrs.push(Instruction::Loop(BlockType::Empty)); + } + + pub fn loop_end(&mut self) { + self.instrs.push(Instruction::End); + self.instrs.push(Instruction::End); + } + + pub fn br(&mut self, depth: u32) { + self.instrs.push(Instruction::Br(depth)); + } + + pub fn br_if(&mut self, depth: u32) { + self.instrs.push(Instruction::BrIf(depth)); + } + + pub fn if_eq(&mut self, lhs: Value, rhs: Value, f: F) + where + F: FnOnce(&mut FuncBuilder), + { + lhs.emit(&mut self.instrs); + rhs.emit(&mut self.instrs); + self.instrs.push(Instruction::I64Eq); + self.instrs.push(Instruction::If(BlockType::Empty)); + f(self); + self.instrs.push(Instruction::End); + } + + fn finish(self) -> Function { + let mut groups: Vec<(u32, ValType)> = Vec::new(); + for ty in self.locals { + if let Some((count, last_ty)) = groups.last_mut() + && *last_ty == ty + { + *count += 1; + continue; + } + groups.push((1, ty)); + } + let mut func = Function::new(groups); + for instr in self.instrs { + func.instruction(&instr); + } + func.instruction(&Instruction::End); + func + } +} + +impl Default for FuncBuilder { + fn default() -> Self { + Self::new() + } +} + +pub struct ModuleBuilder { + types: TypeSection, + imports: ImportSection, + functions: FunctionSection, + codes: CodeSection, + exports: ExportSection, + globals: GlobalSection, + type_count: u32, + import_count: u32, + starstream: Option, +} + +#[derive(Clone, Copy, Debug)] +pub struct Imports { + pub activation: FuncRef, + pub get_program_hash: FuncRef, + pub get_handler_for: FuncRef, + pub call_effect_handler: FuncRef, + pub install_handler: FuncRef, + pub uninstall_handler: FuncRef, + pub new_ref: FuncRef, + pub ref_push: FuncRef, + pub ref_get: FuncRef, + pub ref_write: FuncRef, + pub resume: FuncRef, + pub yield_: FuncRef, + pub return_: FuncRef, + pub new_utxo: FuncRef, + pub new_coord: FuncRef, + pub burn: FuncRef, + pub bind: FuncRef, + pub unbind: FuncRef, + pub init: FuncRef, +} + +impl ModuleBuilder { + pub fn new() -> Self { + let mut builder = Self { + types: TypeSection::new(), + imports: ImportSection::new(), + functions: FunctionSection::new(), + codes: CodeSection::new(), + exports: ExportSection::new(), + globals: GlobalSection::new(), + type_count: 0, + import_count: 0, + starstream: None, + }; + let imports = builder.import_starstream(); + builder.starstream = Some(imports); + builder + } + + pub fn starstream(&self) -> Imports { + self.starstream.expect("starstream imports available") + } + + pub fn import_starstream(&mut self) -> Imports { + let activation = self.import_func( + "env", + "starstream_activation", + &[], + &[ValType::I64, ValType::I64], + ); + let get_program_hash = self.import_func( + "env", + "starstream_get_program_hash", + &[ValType::I64], + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + ); + let get_handler_for = self.import_func( + "env", + "starstream_get_handler_for", + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + &[ValType::I64], + ); + let call_effect_handler = self.import_func( + "env", + "starstream_call_effect_handler", + &[ + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ], + &[ValType::I64], + ); + let install_handler = self.import_func( + "env", + "starstream_install_handler", + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + &[], + ); + let uninstall_handler = self.import_func( + "env", + "starstream_uninstall_handler", + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + &[], + ); + let new_ref = self.import_func( + "env", + "starstream_new_ref", + &[ValType::I64], + &[ValType::I64], + ); + let ref_push = self.import_func( + "env", + "starstream_ref_push", + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + &[], + ); + let ref_get = self.import_func( + "env", + "starstream_ref_get", + &[ValType::I64, ValType::I64], + &[ValType::I64, ValType::I64, ValType::I64, ValType::I64], + ); + let ref_write = self.import_func( + "env", + "starstream_ref_write", + &[ + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ], + &[], + ); + let resume = self.import_func( + "env", + "starstream_resume", + &[ValType::I64, ValType::I64], + &[ValType::I64, ValType::I64], + ); + let yield_ = self.import_func("env", "starstream_yield", &[ValType::I64], &[]); + let return_ = self.import_func("env", "starstream_return", &[], &[]); + let new_utxo = self.import_func( + "env", + "starstream_new_utxo", + &[ + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ], + &[ValType::I64], + ); + let new_coord = self.import_func( + "env", + "starstream_new_coord", + &[ + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ValType::I64, + ], + &[ValType::I64], + ); + let burn = self.import_func("env", "starstream_burn", &[ValType::I64], &[]); + let bind = self.import_func("env", "starstream_bind", &[ValType::I64], &[]); + let unbind = self.import_func("env", "starstream_unbind", &[ValType::I64], &[]); + let init = self.import_func("env", "starstream_init", &[], &[ValType::I64, ValType::I64]); + + Imports { + activation, + get_program_hash, + get_handler_for, + call_effect_handler, + install_handler, + uninstall_handler, + new_ref, + ref_push, + ref_get, + ref_write, + resume, + yield_, + return_, + new_utxo, + new_coord, + burn, + bind, + unbind, + init, + } + } + + pub fn import_func( + &mut self, + module: &str, + name: &str, + params: &[ValType], + results: &[ValType], + ) -> FuncRef { + let type_idx = self.type_count; + self.type_count += 1; + self.types + .ty() + .function(params.iter().copied(), results.iter().copied()); + self.imports + .import(module, name, EntityType::Function(type_idx)); + let idx = self.import_count; + self.import_count += 1; + FuncRef { + idx, + results: results.len(), + } + } + + pub fn func(&self) -> FuncBuilder { + FuncBuilder::new() + } + + pub fn add_global_i64(&mut self, initial: i64, mutable: bool) -> u32 { + let global_type = GlobalType { + val_type: ValType::I64, + mutable, + shared: false, + }; + self.globals + .global(global_type, &ConstExpr::i64_const(initial)); + let idx = self.globals.len() - 1; + let name = format!("__global_{}", idx); + self.exports.export(&name, ExportKind::Global, idx); + idx + } + + pub fn finish(mut self, func: FuncBuilder) -> Vec { + let type_idx = self.type_count; + self.type_count += 1; + self.types.ty().function([], []); + self.functions.function(type_idx); + self.codes.function(&func.finish()); + let start_idx = self.import_count; + self.exports.export("_start", ExportKind::Func, start_idx); + + let mut module = Module::new(); + module.section(&self.types); + module.section(&self.imports); + module.section(&self.functions); + if !self.globals.is_empty() { + module.section(&self.globals); + } + module.section(&self.exports); + module.section(&self.codes); + module.finish() + } +} + +impl Default for ModuleBuilder { + fn default() -> Self { + Self::new() + } +} + +#[macro_export] +macro_rules! wasm_module { + ({ $($body:tt)* }) => {{ + let mut __builder = $crate::test_support::wasm_dsl::ModuleBuilder::new(); + let __imports = __builder.starstream(); + let mut __func = __builder.func(); + $crate::wasm!(__func, __imports, { $($body)* }); + __builder.finish(__func) + }}; + ($builder:expr, { $($body:tt)* }) => {{ + let __imports = $builder.starstream(); + let mut __func = $builder.func(); + $crate::wasm!(__func, __imports, { $($body)* }); + $builder.finish(__func) + }}; +} + +#[macro_export] +macro_rules! wasm_value { + (const($expr:expr)) => { + $crate::test_support::wasm_dsl::Value::Const($expr as i64) + }; + ($lit:literal) => { + $crate::test_support::wasm_dsl::Value::Const($lit) + }; + ($var:ident) => { + $crate::test_support::wasm_dsl::Value::Local($var) + }; +} + +#[macro_export] +macro_rules! wasm_args { + () => { + Vec::<$crate::test_support::wasm_dsl::Value>::new() + }; + ($($arg:tt)+) => {{ + let mut args = Vec::<$crate::test_support::wasm_dsl::Value>::new(); + $crate::wasm_args_push!(args, $($arg)+); + args + }}; +} + +#[macro_export] +macro_rules! wasm_args_push { + ($args:ident,) => {}; + ($args:ident, const($expr:expr) $(, $($rest:tt)*)?) => {{ + $args.push($crate::test_support::wasm_dsl::Value::Const($expr as i64)); + $( $crate::wasm_args_push!($args, $($rest)*); )? + }}; + ($args:ident, $lit:literal $(, $($rest:tt)*)?) => {{ + $args.push($crate::test_support::wasm_dsl::Value::Const($lit)); + $( $crate::wasm_args_push!($args, $($rest)*); )? + }}; + ($args:ident, $var:ident $(, $($rest:tt)*)?) => {{ + $args.push($crate::test_support::wasm_dsl::Value::Local($var)); + $( $crate::wasm_args_push!($args, $($rest)*); )? + }}; +} + +#[macro_export] +macro_rules! wasm { + ($f:ident, $imports:ident, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + }; +} + +#[macro_export] +macro_rules! wasm_repeat { + ($f:ident, $imports:ident, 0, { $($body:tt)* }) => {}; + ($f:ident, $imports:ident, 1, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + }; + ($f:ident, $imports:ident, 2, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + }; + ($f:ident, $imports:ident, 3, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + }; + ($f:ident, $imports:ident, 4, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + }; + ($f:ident, $imports:ident, 5, { $($body:tt)* }) => { + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + $crate::wasm_stmt!($f, $imports, $($body)*); + }; +} + +#[macro_export] +macro_rules! wasm_stmt { + ($f:ident, $imports:ident,) => {}; + + ($f:ident, $imports:ident, repeat $n:literal { $($body:tt)* } $($rest:tt)*) => { + $crate::wasm_repeat!($f, $imports, $n, { $($body)* }); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + // Structured loop: `block { loop { ... } }` so depth 1 is "break" and depth 0 is "continue". + ($f:ident, $imports:ident, loop { $($body:tt)* } $($rest:tt)*) => { + $f.loop_begin(); + $crate::wasm_stmt!($f, $imports, $($body)*); + $f.loop_end(); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + // Loop control (valid inside the `loop { ... }` form above). + ($f:ident, $imports:ident, break; $($rest:tt)*) => { + $f.br(1); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, continue; $($rest:tt)*) => { + $f.br(0); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, break_if $a:tt == $b:tt; $($rest:tt)*) => { + $f.emit_eq_i64($crate::wasm_value!($a), $crate::wasm_value!($b)); + $f.br_if(1); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, continue_if $a:tt == $b:tt; $($rest:tt)*) => { + $f.emit_eq_i64($crate::wasm_value!($a), $crate::wasm_value!($b)); + $f.br_if(0); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, break_if $a:tt < $b:tt; $($rest:tt)*) => { + $f.emit_lt_i64($crate::wasm_value!($a), $crate::wasm_value!($b)); + $f.br_if(1); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, continue_if $a:tt < $b:tt; $($rest:tt)*) => { + $f.emit_lt_i64($crate::wasm_value!($a), $crate::wasm_value!($b)); + $f.br_if(0); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + // Assignment (must target an existing local). + ($f:ident, $imports:ident, set $var:ident = const($expr:expr); $($rest:tt)*) => { + $f.set_const($var, $expr as i64); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = $lit:literal; $($rest:tt)*) => { + $f.set_const($var, $lit); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = $src:ident; $($rest:tt)*) => { + $f.set_local($var, $src); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = add $a:tt, $b:tt; $($rest:tt)*) => { + $f.add_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = sub $a:tt, $b:tt; $($rest:tt)*) => { + $f.sub_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = mul $a:tt, $b:tt; $($rest:tt)*) => { + $f.mul_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = div $a:tt, $b:tt; $($rest:tt)*) => { + $f.div_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set ($($var:ident),+ $(,)?) = call $func:ident ( $($arg:tt)* ); $($rest:tt)*) => { + $f.call($imports.$func, $crate::wasm_args!($($arg)*), &[$($var),+]); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set $var:ident = call $func:ident ( $($arg:tt)* ); $($rest:tt)*) => { + $f.call($imports.$func, $crate::wasm_args!($($arg)*), &[$var]); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = const($expr:expr); $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.set_const($var, $expr as i64); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = $lit:literal; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.set_const($var, $lit); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = $src:ident; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.set_local($var, $src); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = add $a:tt, $b:tt; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.add_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = sub $a:tt, $b:tt; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.sub_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = mul $a:tt, $b:tt; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.mul_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = div $a:tt, $b:tt; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.div_i64($crate::wasm_value!($a), $crate::wasm_value!($b), $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = global_get $idx:literal; $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.global_get($idx as u32, $var); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, set_global $idx:literal = $val:tt; $($rest:tt)*) => { + $f.global_set($idx as u32, $crate::wasm_value!($val)); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let ($($var:ident),+ $(,)?) = call $func:ident ( $($arg:tt)* ); $($rest:tt)*) => { + $(let $var = $f.local_i64();)+ + $f.call($imports.$func, $crate::wasm_args!($($arg)*), &[$($var),+]); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, let $var:ident = call $func:ident ( $($arg:tt)* ); $($rest:tt)*) => { + let $var = $f.local_i64(); + $f.call($imports.$func, $crate::wasm_args!($($arg)*), &[$var]); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, call $func:ident ( $($arg:tt)* ); $($rest:tt)*) => { + $f.call($imports.$func, $crate::wasm_args!($($arg)*), &[]); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, assert_eq $lhs:ident, $rhs:tt; $($rest:tt)*) => { + $f.assert_eq($crate::wasm_value!($lhs), $crate::wasm_value!($rhs)); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; + + ($f:ident, $imports:ident, if $lhs:ident == $rhs:tt { $($body:tt)* } $($rest:tt)*) => { + $f.if_eq($crate::wasm_value!($lhs), $crate::wasm_value!($rhs), |$f| { + $crate::wasm_stmt!($f, $imports, $($body)*); + }); + $crate::wasm_stmt!($f, $imports, $($rest)*); + }; +} diff --git a/interleaving/starstream-runtime/src/trace_mermaid.rs b/interleaving/starstream-runtime/src/trace_mermaid.rs new file mode 100644 index 00000000..1fad04a9 --- /dev/null +++ b/interleaving/starstream-runtime/src/trace_mermaid.rs @@ -0,0 +1,456 @@ +use crate::RuntimeState; +use starstream_interleaving_spec::{ + InterfaceId, InterleavingInstance, ProcessId, REF_PUSH_WIDTH, REF_WRITE_WIDTH, Ref, Value, + WitEffectOutput, WitLedgerEffect, +}; +use std::collections::HashMap; +use std::process::{Command, Stdio}; +use std::sync::{Arc, Mutex, OnceLock}; +use std::{env, fs, time}; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum DecodeMode { + None, + ResponseOnly, + RequestAndResponse, +} + +type MermaidDecoder = Arc Option + Send + Sync + 'static>; + +static MERMAID_DECODERS: OnceLock>> = OnceLock::new(); +static MERMAID_DEFAULT_DECODER: OnceLock>> = OnceLock::new(); + +static MERMAID_LABELS_OVERRIDES: OnceLock> = OnceLock::new(); +static MERMAID_COMBINED: OnceLock>> = OnceLock::new(); + +pub fn register_mermaid_decoder( + interface_id: InterfaceId, + decoder: impl Fn(&[Value]) -> Option + Send + Sync + 'static, +) { + let map = MERMAID_DECODERS.get_or_init(|| Mutex::new(HashMap::new())); + let mut map = map.lock().expect("mermaid decoder lock poisoned"); + map.insert(interface_id, Arc::new(decoder)); +} + +pub fn register_mermaid_default_decoder( + decoder: impl Fn(&[Value]) -> Option + Send + Sync + 'static, +) { + let slot = MERMAID_DEFAULT_DECODER.get_or_init(|| Mutex::new(None)); + let mut slot = slot.lock().expect("mermaid default decoder lock poisoned"); + *slot = Some(Arc::new(decoder)); +} + +pub fn register_mermaid_process_labels(labels: Vec) { + MERMAID_LABELS_OVERRIDES.get_or_init(|| labels); +} + +pub fn emit_trace_mermaid(instance: &InterleavingInstance, state: &RuntimeState) { + if env::var("STARSTREAM_RUNTIME_TRACE_MERMAID") + .ok() + .filter(|v| v != "0") + .is_none() + { + return; + } + + if state.interleaving.is_empty() { + return; + } + + let labels = MERMAID_LABELS_OVERRIDES + .get() + .cloned() + .unwrap_or_else(|| build_process_labels(&instance.is_utxo)); + let mut out = String::new(); + out.push_str("sequenceDiagram\n"); + for label in &labels { + out.push_str(&format!(" participant {label}\n")); + } + + let mut ref_store: HashMap> = HashMap::new(); + let mut ref_state: HashMap = HashMap::new(); + let mut handler_targets: HashMap = HashMap::new(); + let mut handler_interfaces: HashMap = HashMap::new(); + + for (idx, (pid, effect)) in state.interleaving.iter().enumerate() { + let ctx = EdgeContext { + labels: &labels, + is_utxo: &instance.is_utxo, + ref_store: &ref_store, + handler_targets: &handler_targets, + handler_interfaces: &handler_interfaces, + interleaving: &state.interleaving, + }; + + if let Some(line) = format_edge_line(idx, &ctx, *pid, effect) { + out.push_str(" "); + out.push_str(&line); + out.push('\n'); + } + + apply_ref_mutations(*pid, effect, &mut ref_store, &mut ref_state); + update_handler_targets(*pid, effect, &mut handler_targets, &mut handler_interfaces); + } + + emit_trace_mermaid_combined(labels, out); +} + +#[derive(Clone)] +struct CombinedTrace { + labels: Vec, + next_tx: usize, + edges: String, + mmd_path: std::path::PathBuf, +} + +fn emit_trace_mermaid_combined(labels: Vec, per_tx_diagram: String) { + let slot = MERMAID_COMBINED.get_or_init(|| Mutex::new(None)); + let mut guard = slot.lock().expect("mermaid combined trace lock poisoned"); + let needs_reset = guard.as_ref().map(|s| s.labels != labels).unwrap_or(true); + if needs_reset { + *guard = Some(CombinedTrace { + labels: labels.clone(), + next_tx: 1, + edges: String::new(), + mmd_path: env::temp_dir().join(format!( + "starstream_trace_combined_{}.mmd", + std::process::id() + )), + }); + } + let state = guard.as_mut().expect("combined state must exist"); + + if !state.edges.is_empty() { + state.edges.push('\n'); + } + let first = state + .labels + .first() + .cloned() + .unwrap_or_else(|| "p0".to_string()); + let last = state + .labels + .last() + .cloned() + .unwrap_or_else(|| first.clone()); + state.edges.push_str(&format!( + " Note over {first},{last}: tx {}\n", + state.next_tx + )); + for line in per_tx_diagram + .lines() + .skip_while(|line| *line != "sequenceDiagram") + .skip(1) + { + if line.trim_start().starts_with("participant ") { + continue; + } + state.edges.push_str(line); + state.edges.push('\n'); + } + state.next_tx += 1; + + let mut merged = String::new(); + merged.push_str("sequenceDiagram\n"); + for label in &state.labels { + merged.push_str(&format!(" participant {label}\n")); + } + merged.push_str(&state.edges); + + write_mermaid_artifacts(&state.mmd_path, &merged); +} + +fn write_mermaid_artifacts(mmd_path: &std::path::Path, mmd: &str) { + if let Err(err) = fs::write(mmd_path, mmd) { + eprintln!("mermaid: failed to write {}: {err}", mmd_path.display()); + return; + } + + let ts = time::SystemTime::now() + .duration_since(time::UNIX_EPOCH) + .unwrap_or_default() + .as_millis(); + let svg_path = mmd_path.with_extension("svg"); + if mmdc_available() { + let puppeteer_config_path = env::temp_dir().join(format!("starstream_mmdc_{ts}.json")); + let puppeteer_config = r#"{"args":["--no-sandbox","--disable-setuid-sandbox"]}"#; + let _ = fs::write(&puppeteer_config_path, puppeteer_config); + + match Command::new("mmdc") + .arg("-p") + .arg(&puppeteer_config_path) + .arg("-i") + .arg(mmd_path) + .arg("-o") + .arg(&svg_path) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + { + Ok(status) if status.success() => { + println!("mermaid svg: {}", svg_path.display()); + return; + } + Ok(status) => { + eprintln!( + "mermaid: mmdc failed (exit={})", + status.code().unwrap_or(-1) + ); + } + Err(err) => { + eprintln!("mermaid: failed to run mmdc: {err}"); + } + } + } + + println!("mermaid mmd: {}", mmd_path.display()); +} + +fn mmdc_available() -> bool { + Command::new("mmdc") + .arg("--version") + .output() + .map(|o| o.status.success()) + .unwrap_or(false) +} + +fn build_process_labels(is_utxo: &[bool]) -> Vec { + let mut labels = Vec::with_capacity(is_utxo.len()); + let mut utxo_idx = 0usize; + let mut coord_idx = 0usize; + for is_u in is_utxo.iter() { + if *is_u { + labels.push(format!("utxo{utxo_idx}")); + utxo_idx += 1; + } else { + labels.push(format!("coord{coord_idx}")); + coord_idx += 1; + } + } + labels +} + +struct EdgeContext<'a> { + labels: &'a [String], + is_utxo: &'a [bool], + ref_store: &'a HashMap>, + handler_targets: &'a HashMap, + handler_interfaces: &'a HashMap, + interleaving: &'a [(ProcessId, WitLedgerEffect)], +} + +fn format_edge_line( + idx: usize, + ctx: &EdgeContext<'_>, + pid: ProcessId, + effect: &WitLedgerEffect, +) -> Option { + let from = ctx.labels.get(pid.0)?; + match effect { + WitLedgerEffect::Resume { target, val, .. } => { + let interface_id = ctx + .handler_interfaces + .get(target) + .or_else(|| ctx.handler_interfaces.get(&pid)); + let decode_mode = if ctx.handler_targets.get(&pid) == Some(target) { + DecodeMode::RequestAndResponse + } else if ctx.handler_targets.get(target) == Some(&pid) + || ctx.is_utxo.get(target.0).copied().unwrap_or(false) + { + DecodeMode::ResponseOnly + } else { + DecodeMode::None + }; + let label = format!( + "resume
{}", + format_ref_with_value(ctx.ref_store, *val, interface_id, decode_mode) + ); + let to = ctx.labels.get(target.0)?; + Some(format!("{from} ->> {to}: {label}")) + } + WitLedgerEffect::CallEffectHandler { val, .. } => { + let target = ctx.interleaving.get(idx + 1).map(|(p, _)| *p)?; + let interface_id = ctx.handler_interfaces.get(&target); + let label = format!( + "call_effect_handler
{}", + format_ref_with_value( + ctx.ref_store, + *val, + interface_id, + DecodeMode::RequestAndResponse + ) + ); + let to = ctx.labels.get(target.0)?; + Some(format!("{from} ->> {to}: {label}")) + } + WitLedgerEffect::Yield { val, .. } => { + let next_pid = ctx.interleaving.get(idx + 1).map(|(p, _)| *p)?; + let label = format!( + "yield
{}", + format_ref_with_value(ctx.ref_store, *val, None, DecodeMode::None) + ); + let to = ctx.labels.get(next_pid.0)?; + Some(format!("{from} -->> {to}: {label}")) + } + WitLedgerEffect::Return {} => { + let next_pid = ctx.interleaving.get(idx + 1).map(|(p, _)| *p)?; + let to = ctx.labels.get(next_pid.0)?; + Some(format!("{from} -->> {to}: return")) + } + WitLedgerEffect::NewUtxo { val, id, .. } => { + let WitEffectOutput::Resolved(pid) = id else { + return None; + }; + let created = ctx.labels.get(pid.0)?; + let label = format!( + "new_utxo
{}", + format_ref_with_values(ctx.ref_store, *val) + ); + Some(format!("{from} ->> {created}: {label}")) + } + WitLedgerEffect::NewCoord { val, id, .. } => { + let WitEffectOutput::Resolved(pid) = id else { + return None; + }; + let created = ctx.labels.get(pid.0)?; + let label = format!( + "new_coord
{}", + format_ref_with_values(ctx.ref_store, *val) + ); + Some(format!("{from} ->> {created}: {label}")) + } + WitLedgerEffect::Bind { owner_id } => { + let to = ctx.labels.get(owner_id.0)?; + Some(format!("{from} ->> {to}: bind")) + } + WitLedgerEffect::Unbind { token_id } => { + let to = ctx.labels.get(token_id.0)?; + Some(format!("{from} ->> {to}: unbind")) + } + _ => None, + } +} + +fn format_ref_with_value( + ref_store: &HashMap>, + reff: Ref, + interface_id: Option<&InterfaceId>, + decode_mode: DecodeMode, +) -> String { + let mut out = format!("ref={}", reff.0); + if let Some(values) = ref_store.get(&reff) { + let extra = match interface_id { + Some(id) => decode_with_registry(id, values, decode_mode), + None => decode_with_default(values, decode_mode), + } + .unwrap_or_else(|| format_raw_values(values)); + out.push(' '); + out.push('['); + out.push_str(&extra); + out.push(']'); + } + out +} + +fn format_ref_with_values(ref_store: &HashMap>, reff: Ref) -> String { + let mut out = format!("ref={}", reff.0); + if let Some(values) = ref_store.get(&reff) { + out.push(' '); + out.push('['); + out.push_str(&format_raw_values(values)); + out.push(']'); + } + out +} + +fn format_raw_values(values: &[Value]) -> String { + let v0 = values.first().map(|v| v.0).unwrap_or(0); + let v1 = values.get(1).map(|v| v.0).unwrap_or(0); + let v2 = values.get(2).map(|v| v.0).unwrap_or(0); + let v3 = values.get(3).map(|v| v.0).unwrap_or(0); + format!("vals={v0},{v1},{v2},{v3}") +} + +fn decode_with_registry( + interface_id: &InterfaceId, + values: &[Value], + decode_mode: DecodeMode, +) -> Option { + if decode_mode == DecodeMode::None { + return None; + } + let map = MERMAID_DECODERS.get_or_init(|| Mutex::new(HashMap::new())); + let map = map.lock().ok()?; + let decoder = map.get(interface_id)?.clone(); + decoder(values) +} + +fn decode_with_default(values: &[Value], decode_mode: DecodeMode) -> Option { + if decode_mode == DecodeMode::None { + return None; + } + let slot = MERMAID_DEFAULT_DECODER.get_or_init(|| Mutex::new(None)); + let slot = slot.lock().ok()?; + let decoder = slot.as_ref()?.clone(); + decoder(values) +} + +fn apply_ref_mutations( + pid: ProcessId, + effect: &WitLedgerEffect, + ref_store: &mut HashMap>, + ref_state: &mut HashMap, +) { + match effect { + WitLedgerEffect::NewRef { + size, + ret: WitEffectOutput::Resolved(ref_id), + } => { + let size_words = *size; + let size_elems = size_words * REF_PUSH_WIDTH; + ref_store.insert(*ref_id, vec![Value(0); size_elems]); + ref_state.insert(pid, (*ref_id, 0, size_words)); + } + WitLedgerEffect::RefPush { vals } => { + if let Some((ref_id, offset, _size_words)) = ref_state.get_mut(&pid) + && let Some(store) = ref_store.get_mut(ref_id) + { + let elem_offset = *offset; + for (i, val) in vals.iter().enumerate() { + if let Some(pos) = store.get_mut(elem_offset + i) { + *pos = *val; + } + } + *offset = elem_offset + REF_PUSH_WIDTH; + } + } + WitLedgerEffect::RefWrite { reff, offset, vals } => { + if let Some(store) = ref_store.get_mut(reff) { + let elem_offset = offset * REF_WRITE_WIDTH; + for (i, val) in vals.iter().enumerate() { + if let Some(slot) = store.get_mut(elem_offset + i) { + *slot = *val; + } + } + } + } + _ => {} + } +} + +fn update_handler_targets( + pid: ProcessId, + effect: &WitLedgerEffect, + handler_targets: &mut HashMap, + handler_interfaces: &mut HashMap, +) { + if let WitLedgerEffect::GetHandlerFor { + handler_id, + interface_id, + } = effect + && let WitEffectOutput::Resolved(handler_id) = handler_id + { + handler_targets.insert(pid, *handler_id); + handler_interfaces.insert(*handler_id, *interface_id); + } +} diff --git a/interleaving/starstream-runtime/tests/integration.rs b/interleaving/starstream-runtime/tests/integration.rs new file mode 100644 index 00000000..7b2f22ba --- /dev/null +++ b/interleaving/starstream-runtime/tests/integration.rs @@ -0,0 +1,277 @@ +use starstream_interleaving_spec::{Hash, InterfaceId, Ledger}; +use starstream_runtime::{ + UnprovenTransaction, poseidon_program_hash, register_mermaid_decoder, wasm_module, +}; +use std::marker::PhantomData; + +fn interface_id(a: u64, b: u64, c: u64, d: u64) -> InterfaceId { + Hash([a, b, c, d], PhantomData) +} + +#[test] +fn test_runtime_simple_effect_handlers() { + register_mermaid_decoder(interface_id(1, 0, 0, 0), |values| { + let v0 = values.first()?.0; + Some(format!("val={v0}")) + }); + let utxo_bin = wasm_module!({ + let (_init_ref, caller) = call activation(); + + let (caller_hash_a, caller_hash_b, caller_hash_c, caller_hash_d) = call get_program_hash(caller); + let (script_hash_a, script_hash_b, script_hash_c, script_hash_d) = call get_program_hash(1); + + assert_eq caller_hash_a, script_hash_a; + assert_eq caller_hash_b, script_hash_b; + assert_eq caller_hash_c, script_hash_c; + assert_eq caller_hash_d, script_hash_d; + + let req = call new_ref(1); + call ref_push(42, 0, 0, 0); + + let resp = call call_effect_handler(1, 0, 0, 0, req); + let (resp_val, _b, _c, _d) = call ref_get(resp, 0); + + assert_eq resp_val, 1; + call yield_(resp); + }); + + let (utxo_hash_limb_a, utxo_hash_limb_b, utxo_hash_limb_c, utxo_hash_limb_d) = + hash_program(&utxo_bin); + + let coord_bin = wasm_module!({ + call install_handler(1, 0, 0, 0); + + let init_val = call new_ref(1); + + call ref_push(0, 0, 0, 0); + + let _utxo_id = call new_utxo( + const(utxo_hash_limb_a), + const(utxo_hash_limb_b), + const(utxo_hash_limb_c), + const(utxo_hash_limb_d), + init_val + ); + + let (req, _caller) = call resume(0, init_val); + let (req_val, _b, _c, _d) = call ref_get(req, 0); + + assert_eq req_val, 42; + + let resp = call new_ref(1); + call ref_push(1, 0, 0, 0); + + let (_ret, _caller2) = call resume(0, resp); + + call uninstall_handler(1, 0, 0, 0); + call return_(); + }); + + print_wat("simple/utxo", &utxo_bin); + print_wat("simple/coord", &coord_bin); + + let programs = vec![utxo_bin, coord_bin.clone()]; + + let tx = UnprovenTransaction { + inputs: vec![], + input_states: vec![], + input_ownership: vec![], + programs, + is_utxo: vec![true, false], + entrypoint: 1, + }; + + let proven_tx = tx.prove().unwrap(); + + let ledger = Ledger::new(); + + let ledger = ledger.apply_transaction(&proven_tx).unwrap(); + + assert_eq!(ledger.utxos.len(), 1); +} + +#[test] +fn test_runtime_effect_handlers_cross_calls() { + register_mermaid_decoder(interface_id(1, 2, 3, 4), |values| { + let disc = values.first()?.0; + let v1 = values.get(1).map(|v| v.0).unwrap_or(0); + let label = match disc { + 1 => format!("disc=forward num_ref={v1}"), + 2 => format!("disc=stop num_ref={v1}"), + _ => return None, + }; + Some(label) + }); + // this test emulates a coordination script acting as a middle-man for a channel-like flow + // + // utxo1 sends numbers, by encoding the request as (1, arg) + // + // the coord script recognizes 1 as a request to forward the message (like + // an enum), and sends the arg utxo2. + // + // utxo2 gets the new message, and answers with x+1 + // + // coord manages the hand-out of that value to utxo1 again + // + // TODO: + // - each coroutine allocates a new ref each time, this is not as efficient + // - coord should check that the answer it receives actually comes from + // the right process (or maybe this should be an optional arg to resume and be enforced by the circuit?) + let utxo1_bin = wasm_module!({ + let (_init_ref, _caller) = call activation(); + + let x0 = 99; + let n = 5; + let i0 = 0; + let x = x0; + let i = i0; + + // Call the +1 service n times, then send disc=2 to break. + loop { + break_if i == n; + + // Allocate a ref for the number, then send a nested ref message (disc, num_ref). + let num_ref = call new_ref(1); + call ref_push(x, 0, 0, 0); + + let req = call new_ref(1); + call ref_push(1, num_ref, 0, 0); + + let resp = call call_effect_handler(1, 2, 3, 4, req); + let (y, _b, _c, _d) = call ref_get(resp, 0); + let expected = add x, 1; + assert_eq y, expected; + + set x = y; + set i = add i, 1; + continue; + } + + let stop_num_ref = call new_ref(1); + call ref_push(x, 0, 0, 0); + let stop = call new_ref(1); + call ref_push(2, stop_num_ref, 0, 0); + let _resp_stop = call call_effect_handler(1, 2, 3, 4, stop); + + call yield_(stop); + }); + + let utxo2_bin = wasm_module!({ + let (init_ref, _caller) = call activation(); + let req = init_ref; + + // Serve x -> x+1 for each incoming request, writing back into the same ref. + loop { + let (x, _b, _c, _d) = call ref_get(req, 0); + let y = add x, 1; + call ref_write(req, 0, y, 0, 0, 0); + call yield_(req); + let (next_req, _caller2) = call activation(); + set req = next_req; + continue; + } + }); + + let (utxo1_hash_limb_a, utxo1_hash_limb_b, utxo1_hash_limb_c, utxo1_hash_limb_d) = + hash_program(&utxo1_bin); + + let (utxo2_hash_limb_a, utxo2_hash_limb_b, utxo2_hash_limb_c, utxo2_hash_limb_d) = + hash_program(&utxo2_bin); + + let coord_bin = wasm_module!({ + call install_handler(1, 2, 3, 4); + + let init_val = call new_ref(1); + call ref_push(0, 0, 0, 0); + + let utxo_id1 = call new_utxo( + const(utxo1_hash_limb_a), + const(utxo1_hash_limb_b), + const(utxo1_hash_limb_c), + const(utxo1_hash_limb_d), + init_val + ); + + let utxo_id2 = call new_utxo( + const(utxo2_hash_limb_a), + const(utxo2_hash_limb_b), + const(utxo2_hash_limb_c), + const(utxo2_hash_limb_d), + init_val + ); + + // Start utxo1 and then route messages until disc=2. + let (req0, caller0) = call resume(utxo_id1, init_val); + let req = req0; + let caller1 = caller0; + + loop { + let (disc, num_ref, _c, _d) = call ref_get(req, 0); + if disc == 2 { + let (_ret_stop, _caller_stop) = call resume(caller1, num_ref); + } + break_if disc == 2; + + // coord -> utxo2 (mutates num_ref in place) + let (resp2, _caller2) = call resume(utxo_id2, num_ref); + + // coord -> utxo1, which will resume the handler again + let (req_next, caller_next) = call resume(caller1, resp2); + set req = req_next; + set caller1 = caller_next; + continue; + } + + call uninstall_handler(1, 2, 3, 4); + call return_(); + }); + + print_wat("cross/utxo1", &utxo1_bin); + print_wat("cross/utxo2", &utxo2_bin); + print_wat("cross/coord", &coord_bin); + + let programs = vec![utxo1_bin.clone(), utxo2_bin.clone(), coord_bin.clone()]; + + let tx = UnprovenTransaction { + inputs: vec![], + input_states: vec![], + input_ownership: vec![], + programs, + is_utxo: vec![true, true, false], + entrypoint: 2, + }; + + let proven_tx = tx.prove().unwrap(); + + let ledger = Ledger::new(); + + let ledger = ledger.apply_transaction(&proven_tx).unwrap(); + + assert_eq!(ledger.utxos.len(), 2); +} + +fn hash_program(utxo_bin: &Vec) -> (i64, i64, i64, i64) { + let limbs = poseidon_program_hash(utxo_bin); + let utxo_hash_limb_a = limbs[0] as i64; + let utxo_hash_limb_b = limbs[1] as i64; + let utxo_hash_limb_c = limbs[2] as i64; + let utxo_hash_limb_d = limbs[3] as i64; + + ( + utxo_hash_limb_a, + utxo_hash_limb_b, + utxo_hash_limb_c, + utxo_hash_limb_d, + ) +} + +fn print_wat(name: &str, wasm: &[u8]) { + if std::env::var_os("DEBUG_WAT").is_none() { + return; + } + + match wasmprinter::print_bytes(wasm) { + Ok(wat) => eprintln!("--- WAT: {name} ---\n{wat}"), + Err(err) => eprintln!("--- WAT: {name} (failed: {err}) ---"), + } +} diff --git a/interleaving/starstream-runtime/tests/multi_tx_continuation.rs b/interleaving/starstream-runtime/tests/multi_tx_continuation.rs new file mode 100644 index 00000000..f8944769 --- /dev/null +++ b/interleaving/starstream-runtime/tests/multi_tx_continuation.rs @@ -0,0 +1,169 @@ +use starstream_interleaving_spec::{Ledger, UtxoId, Value}; +use starstream_runtime::{ + UnprovenTransaction, poseidon_program_hash, test_support::wasm_dsl, wasm_module, +}; + +fn hash_program(wasm: &Vec) -> (i64, i64, i64, i64) { + let limbs = poseidon_program_hash(wasm); + let a = limbs[0] as i64; + let b = limbs[1] as i64; + let c = limbs[2] as i64; + let d = limbs[3] as i64; + + (a, b, c, d) +} + +fn print_wat(name: &str, wasm: &[u8]) { + if std::env::var_os("DEBUG_WAT").is_none() { + return; + } + + match wasmprinter::print_bytes(wasm) { + Ok(wat) => eprintln!("--- WAT: {name} ---\n{wat}"), + Err(err) => eprintln!("--- WAT: {name} (failed: {err}) ---"), + } +} + +fn print_ledger(label: &str, ledger: &Ledger) { + eprintln!("--- Ledger: {label} ---"); + eprintln!("utxos: {}", ledger.utxos.len()); + let mut utxos: Vec<_> = ledger.utxos.iter().collect(); + utxos.sort_by_key(|(id, _)| (id.contract_hash.0, id.nonce)); + for (id, entry) in utxos { + eprintln!( + " utxo hash={} nonce={} pc={} globals={:?}", + format!("{:?}", id.contract_hash), + id.nonce, + entry.state.pc, + entry.state.globals + ); + } + eprintln!("ownership: {:?}", ledger.ownership_registry); + eprintln!("--- /Ledger: {label} ---"); +} + +#[test] +fn test_multi_tx_accumulator_global() { + let mut builder = wasm_dsl::ModuleBuilder::new(); + // global 0 = gpc, global 1 = acc + builder.add_global_i64(0, true); + builder.add_global_i64(0, true); + let utxo_bin = wasm_module!(builder, { + let (state_ref, _caller) = call activation(); + let (disc, arg, _b, _c) = call ref_get(state_ref, 0); + if disc == 1 { + let curr = global_get 1; + let next = add curr, arg; + set_global 1 = next; + let pc = global_get 0; + let next_pc = add pc, 1; + set_global 0 = next_pc; + call ref_write(state_ref, 0, next, 0, 0, 0); + } + let resp = call new_ref(1); + let acc = global_get 1; + call ref_push(acc, 0, 0, 0); + call yield_(resp); + }); + + let (utxo_hash_a, utxo_hash_b, utxo_hash_c, utxo_hash_d) = hash_program(&utxo_bin); + + let coord_bin = wasm_module!({ + let init_ref = call new_ref(1); + call ref_push(0, 0, 0, 0); + + let utxo_id = call new_utxo( + const(utxo_hash_a), + const(utxo_hash_b), + const(utxo_hash_c), + const(utxo_hash_d), + init_ref + ); + + let req = call new_ref(1); + call ref_push(1, 5, 0, 0); + let (resp, _caller) = call resume(utxo_id, req); + let (val, _b, _c, _d) = call ref_get(resp, 0); + assert_eq val, 5; + call return_(); + }); + + let coord2_bin = wasm_module!({ + let req = call new_ref(1); + call ref_push(1, 7, 0, 0); + let (resp, _caller) = call resume(0, req); + let (val, _b, _c, _d) = call ref_get(resp, 0); + assert_eq val, 12; + call return_(); + }); + + let coord3_bin = wasm_module!({ + let req = call new_ref(1); + call ref_push(2, 0, 0, 0); + let (resp, _caller) = call resume(0, req); + let (val, _b, _c, _d) = call ref_get(resp, 0); + assert_eq val, 12; + call return_(); + }); + + print_wat("globals/utxo", &utxo_bin); + print_wat("globals/coord1", &coord_bin); + print_wat("globals/coord2", &coord2_bin); + + let tx1 = UnprovenTransaction { + inputs: vec![], + input_states: vec![], + input_ownership: vec![], + programs: vec![utxo_bin.clone(), coord_bin.clone()], + is_utxo: vec![true, false], + entrypoint: 1, + }; + + let proven_tx1 = tx1.prove().unwrap(); + let mut ledger = Ledger::new(); + ledger = ledger.apply_transaction(&proven_tx1).unwrap(); + print_ledger("after tx1", &ledger); + + let input_id: UtxoId = ledger.utxos.keys().next().cloned().unwrap(); + assert_eq!(input_id.nonce, 0); + assert_eq!( + ledger.utxos[&input_id].state.globals, + vec![Value(1), Value(5)] + ); + + let tx2 = UnprovenTransaction { + inputs: vec![input_id.clone()], + input_states: vec![ledger.utxos[&input_id].state.clone()], + input_ownership: vec![None], + programs: vec![utxo_bin.clone(), coord2_bin], + is_utxo: vec![true, false], + entrypoint: 1, + }; + + let proven_tx2 = tx2.prove().unwrap(); + ledger = ledger.apply_transaction(&proven_tx2).unwrap(); + print_ledger("after tx2", &ledger); + + let output_id: UtxoId = ledger.utxos.keys().next().cloned().unwrap(); + assert_eq!(output_id.nonce, 1); + let globals = &ledger.utxos[&output_id].state.globals; + assert_eq!(globals, &[Value(2), Value(12)]); + + let tx3 = UnprovenTransaction { + inputs: vec![output_id.clone()], + input_states: vec![ledger.utxos[&output_id].state.clone()], + input_ownership: vec![None], + programs: vec![utxo_bin, coord3_bin], + is_utxo: vec![true, false], + entrypoint: 1, + }; + + let proven_tx3 = tx3.prove().unwrap(); + ledger = ledger.apply_transaction(&proven_tx3).unwrap(); + print_ledger("after tx3", &ledger); + + let output_id3: UtxoId = ledger.utxos.keys().next().cloned().unwrap(); + assert_eq!(output_id3, output_id); + let globals = &ledger.utxos[&output_id3].state.globals; + assert_eq!(globals, &[Value(2), Value(12)]); +} diff --git a/interleaving/starstream-runtime/tests/test_dex_swap_flow.rs b/interleaving/starstream-runtime/tests/test_dex_swap_flow.rs new file mode 100644 index 00000000..fdcf7484 --- /dev/null +++ b/interleaving/starstream-runtime/tests/test_dex_swap_flow.rs @@ -0,0 +1,358 @@ +use starstream_interleaving_spec::{Ledger, UtxoId, Value}; +use starstream_runtime::{ + UnprovenTransaction, poseidon_program_hash, register_mermaid_default_decoder, + register_mermaid_process_labels, test_support::wasm_dsl, wasm_module, +}; + +fn hash_program(wasm: &Vec) -> (i64, i64, i64, i64) { + let limbs = poseidon_program_hash(wasm); + ( + limbs[0] as i64, + limbs[1] as i64, + limbs[2] as i64, + limbs[3] as i64, + ) +} + +fn print_wat(name: &str, wasm: &[u8]) { + if std::env::var_os("DEBUG_WAT").is_none() { + return; + } + + match wasmprinter::print_bytes(wasm) { + Ok(wat) => eprintln!("--- WAT: {name} ---\n{wat}"), + Err(err) => eprintln!("--- WAT: {name} (failed: {err}) ---"), + } +} + +#[test] +fn test_dex_swap_flow() { + register_mermaid_default_decoder(|values| { + let disc = values.first()?.0; + let arg1 = values.get(1).map(|v| v.0).unwrap_or(0); + let arg2 = values.get(2).map(|v| v.0).unwrap_or(0); + let label = match disc { + 1 => "start_swap".to_string(), + 2 => format!("add_token token_id={arg1} dy={arg2}"), + 3 => format!("remove_token token_id={arg1}"), + 4 => "end_swap".to_string(), + 101 => "token_get_amount".to_string(), + 102 => format!("token_bind owner={arg1}"), + _ => return None, + }; + Some(label) + }); + + let mut token_builder = wasm_dsl::ModuleBuilder::new(); + token_builder.add_global_i64(-1, true); + let token_bin = wasm_module!(token_builder, { + let uninit = const(-1); + let curr = global_get 0; + if curr == uninit { + let (init_ref, _init_caller) = call init(); + let (amt, _b0, _c0, _d0) = call ref_get(init_ref, 0); + set_global 0 = amt; + } + let (req, _caller_id) = call activation(); + + loop { + let (disc, arg, _b, _c) = call ref_get(req, 0); + + if disc == 102 { + call bind(arg); + } + + let resp = call new_ref(1); + + if disc == 101 { + let amt = global_get 0; + call ref_push(amt, 0, 0, 0); + } + + if disc == 102 { + call ref_push(0, 0, 0, 0); + } + + call yield_(resp); + let (next_req, _caller2) = call activation(); + set req = next_req; + continue; + } + }); + + let (token_hash_a, token_hash_b, token_hash_c, token_hash_d) = hash_program(&token_bin); + + let coord_swap_bin = wasm_module!({ + let utxo_id = const(0); + let token_y_id = const(1); + let token_x_id = const(2); + + // start_swap + let start = call new_ref(1); + call ref_push(1, 0, 0, 0); + let (resp_start, caller) = call resume(utxo_id, start); + let caller_next = caller; + + // read token_y amount + let get_amt = call new_ref(1); + call ref_push(101, 0, 0, 0); + let (resp_amt, _caller_amt) = call resume(token_y_id, get_amt); + let (dy, _b0, _c0, _d0) = call ref_get(resp_amt, 0); + + // add_token(token_y_id, dy) + let add = call new_ref(1); + call ref_push(2, token_y_id, dy, 0); + let (resp_add, caller) = call resume(caller_next, add); + let caller_next = caller; + + // remove_token(token_x_id) -> dx + let remove = call new_ref(1); + call ref_push(3, token_x_id, 0, 0); + let (resp_remove, caller) = call resume(caller_next, remove); + let caller_next = caller; + let (dx, _b1, _c1, _d1) = call ref_get(resp_remove, 0); + + // finalize token_x process in tx_swap without mutating it + let read_x = call new_ref(1); + call ref_push(101, 0, 0, 0); + let (_resp_x, _caller_x) = call resume(token_x_id, read_x); + + // end_swap (k must match) + let end = call new_ref(1); + call ref_push(4, 0, 0, 0); + let (resp_end, _caller_end) = call resume(caller_next, end); + let (_k_val, _b2, _c2, _d2) = call ref_get(resp_end, 0); + call return_(); + }); + + let (coord_hash_a, coord_hash_b, coord_hash_c, coord_hash_d) = hash_program(&coord_swap_bin); + + let mut builder = wasm_dsl::ModuleBuilder::new(); + // global 0 = x, global 1 = y, global 2 = k_saved, global 3 = in_swap + // global 4..7 = coord hash limbs + builder.add_global_i64(10, true); + builder.add_global_i64(20, true); + builder.add_global_i64(0, true); + builder.add_global_i64(0, true); + builder.add_global_i64(coord_hash_a, false); + builder.add_global_i64(coord_hash_b, false); + builder.add_global_i64(coord_hash_c, false); + builder.add_global_i64(coord_hash_d, false); + let utxo_bin = wasm_module!(builder, { + let (state_ref, caller_id) = call activation(); + let req = state_ref; + let caller = caller_id; + let caller_auth = caller_id; + + loop { + let (disc, token_id, dy, _c) = call ref_get(req, 0); + + if disc == 1 { + let (_ch_a, _ch_b, _ch_c, _ch_d) = call get_program_hash(caller_auth); + let _exp_a = global_get 4; + let _exp_b = global_get 5; + let _exp_c = global_get 6; + let _exp_d = global_get 7; + + set_global 3 = 1; + + let x = global_get 0; + let y = global_get 1; + let k = mul x, y; + set_global 2 = k; + } + + if disc == 2 { + let y = global_get 1; + let next_y = add y, dy; + set_global 1 = next_y; + } + + if disc == 3 { + call unbind(token_id); + } + + let resp = call new_ref(1); + + if disc == 1 { + call ref_push(0, 0, 0, 0); + } + + if disc == 2 { + call ref_push(0, 0, 0, 0); + } + + if disc == 0 { + call ref_push(0, 0, 0, 0); + } + + if disc == 3 { + let x = global_get 0; + let y = global_get 1; + let k = global_get 2; + let next_x = div k, y; + let dx = sub x, next_x; + set_global 0 = next_x; + call ref_push(dx, 0, 0, 0); + } + + if disc == 4 { + let x = global_get 0; + let y = global_get 1; + let k = global_get 2; + let k_curr = mul x, y; + call ref_push(k_curr, 0, 0, 0); + set_global 3 = 0; + } + + call yield_(resp); + let (next_req, _caller_next) = call activation(); + set req = next_req; + continue; + } + }); + + let (utxo_hash_a, utxo_hash_b, utxo_hash_c, utxo_hash_d) = hash_program(&utxo_bin); + + let coord_create_bin = wasm_module!({ + let init_ref = call new_ref(1); + call ref_push(0, 0, 0, 0); + + let utxo_id = call new_utxo( + const(utxo_hash_a), + const(utxo_hash_b), + const(utxo_hash_c), + const(utxo_hash_d), + init_ref + ); + + // create token_y with amount=5 + let token_init = call new_ref(1); + call ref_push(5, 0, 0, 0); + let token_y_id = call new_utxo( + const(token_hash_a), + const(token_hash_b), + const(token_hash_c), + const(token_hash_d), + token_init + ); + + // create token_x with amount=2 + let token_x_init = call new_ref(1); + call ref_push(2, 0, 0, 0); + let token_x_id = call new_utxo( + const(token_hash_a), + const(token_hash_b), + const(token_hash_c), + const(token_hash_d), + token_x_init + ); + + // pre-bind both tokens to DEX in tx_init_pool + let bind_y = call new_ref(1); + call ref_push(102, utxo_id, 0, 0); + let (_resp_bind_y, _caller_bind_y) = call resume(token_y_id, bind_y); + + let bind_x = call new_ref(1); + call ref_push(102, utxo_id, 0, 0); + let (_resp_bind_x, _caller_bind_x) = call resume(token_x_id, bind_x); + + // finalize token_y once in tx_init_pool without changing state + let read_y = call new_ref(1); + call ref_push(101, 0, 0, 0); + let (_resp_read_y, _caller_read_y) = call resume(token_y_id, read_y); + + // finalize token_x once in tx_init_pool without changing state + let read_x = call new_ref(1); + call ref_push(101, 0, 0, 0); + let (_resp_read_x, _caller_read_x) = call resume(token_x_id, read_x); + + // finalize DEX once in tx_init_pool without changing state + let noop = call new_ref(1); + call ref_push(0, 0, 0, 0); + let (_resp_noop, _caller_noop) = call resume(utxo_id, noop); + call return_(); + }); + + print_wat("dex/token", &token_bin); + print_wat("dex/utxo", &utxo_bin); + print_wat("dex/coord_create", &coord_create_bin); + print_wat("dex/coord_swap", &coord_swap_bin); + + register_mermaid_process_labels(vec![ + "DEX".to_string(), + "token_x".to_string(), + "token_y".to_string(), + "coord".to_string(), + ]); + + let token_bin_2 = token_bin.clone(); + let tx_init = UnprovenTransaction { + inputs: vec![], + input_states: vec![], + input_ownership: vec![], + programs: vec![ + utxo_bin.clone(), + token_bin.clone(), + token_bin_2.clone(), + coord_create_bin, + ], + is_utxo: vec![true, true, true, false], + entrypoint: 3, + }; + + let proven_init = tx_init.prove().unwrap(); + let mut ledger = Ledger::new(); + ledger = ledger.apply_transaction(&proven_init).unwrap(); + + assert_eq!(ledger.utxos.len(), 3); + let mut dex_id: Option = None; + let mut token_y_id: Option = None; + let mut token_x_id: Option = None; + for (id, entry) in &ledger.utxos { + if entry.state.globals.len() >= 4 { + dex_id = Some(id.clone()); + } else if entry.state.globals.len() == 1 && entry.state.globals[0] == Value(5) { + token_y_id = Some(id.clone()); + } else if entry.state.globals.len() == 1 && entry.state.globals[0] == Value(2) { + token_x_id = Some(id.clone()); + } + } + let dex_id = dex_id.unwrap(); + let token_y_id = token_y_id.unwrap(); + let token_x_id = token_x_id.unwrap(); + let swap_inputs = vec![dex_id.clone(), token_y_id.clone(), token_x_id.clone()]; + let swap_input_ownership = ledger.input_ownership_for_inputs(&swap_inputs); + + let tx_swap = UnprovenTransaction { + inputs: swap_inputs, + input_states: vec![ + ledger.utxos[&dex_id].state.clone(), + ledger.utxos[&token_y_id].state.clone(), + ledger.utxos[&token_x_id].state.clone(), + ], + input_ownership: swap_input_ownership, + programs: vec![utxo_bin, token_bin, token_bin_2, coord_swap_bin], + is_utxo: vec![true, true, true, false], + entrypoint: 3, + }; + + let proven_swap = tx_swap.prove().unwrap(); + ledger = ledger.apply_transaction(&proven_swap).unwrap(); + + assert_eq!(ledger.utxos.len(), 3); + let utxos: Vec<_> = ledger.utxos.values().collect(); + let utxo = utxos.iter().find(|u| u.state.globals.len() >= 4).unwrap(); + assert_eq!( + &utxo.state.globals[..4], + &[Value(8), Value(25), Value(200), Value(0)] + ); + let tokens: Vec<_> = utxos + .iter() + .filter(|u| u.state.globals.len() == 1) + .collect(); + assert_eq!(tokens.len(), 2); + let mut amounts: Vec<_> = tokens.iter().map(|u| u.state.globals[0]).collect(); + amounts.sort_by_key(|v| v.0); + assert_eq!(amounts, vec![Value(2), Value(5)]); +} diff --git a/interleaving/starstream-runtime/tests/wrapper_coord_test.rs b/interleaving/starstream-runtime/tests/wrapper_coord_test.rs new file mode 100644 index 00000000..82b138c9 --- /dev/null +++ b/interleaving/starstream-runtime/tests/wrapper_coord_test.rs @@ -0,0 +1,324 @@ +use starstream_interleaving_spec::{Hash, InterfaceId, Ledger}; +use starstream_runtime::{ + UnprovenTransaction, poseidon_program_hash, register_mermaid_decoder, wasm_module, +}; +use std::marker::PhantomData; + +// this tests tries to encode something like a coordination script that provides a Cell interface +// +// we have the Cell implementation +// +// fn wrapper(inner: Coroutine) { +// let cells = []; +// +// try { +// resume(inner, ()); +// } +// with Cell { +// fn new(): CellId { +// cells.push(F::ZERO); // note however that the test is simplified to a single cell to avoid needing an array +// resume cells.len(); +// } +// +// fn write(cell: CellId, val: Val) { +// cells[cell] = val; +// +// resume (); +// } +// +// fn read(cell: CellId): Val { +// resume cells[cell]; +// } +// } +// } +// +// fn inner(utxo1: Utxo1, utxo2: Utxo2) { +// let cell = raise Cell::new(); +// +// utxo1.foo(cell); +// utxo2.bar(cell); +// } +// +// utxo Utxo1 { +// fn foo(cell: CellId) / { Cell } { +// raise Cell::write(cell, 42); +// } +// } +// +// utxo Utxo2 { +// fn bar(cell: CellId) / { Cell } { +// let v = raise Cell::read(cell); +// } +// } +// +// fn main() { +// let utxo1 = Utxo1::new(); +// let utxo2 = Utxo2::new(); +// +// wrapper { +// inner(utxo1, utxo2) +// } +// } + +#[test] +fn test_runtime_wrapper_coord_newcoord_handlers() { + register_mermaid_decoder(interface_id(1, 2, 3, 4), |values| { + let disc = values.first()?.0; + let v1 = values.get(1).map(|v| v.0).unwrap_or(0); + let v2 = values.get(2).map(|v| v.0).unwrap_or(0); + let label = match disc { + 1 => "disc=new_cell".to_string(), + 2 => format!("disc=write cell={v1} value={v2}"), + 3 => format!("disc=read cell={v1}"), + 4 => "disc=end".to_string(), + 10 => "disc=ack".to_string(), + 11 => format!("disc=new_cell_resp cell={v1}"), + 12 => format!("disc=read_resp value={v1}"), + 13 => "disc=end_ack".to_string(), + _ => return None, + }; + Some(label) + }); + + let utxo1_bin = wasm_module!({ + let (init_ref, _caller) = call activation(); + let (cell_ref, _b, _c, _d) = call ref_get(init_ref, 0); + + let req = call new_ref(1); + call ref_push(2, cell_ref, 42, 0); + + let _resp = call call_effect_handler(1, 2, 3, 4, req); + + let done = call new_ref(1); + call ref_push(0, 0, 0, 0); + call yield_(done); + }); + + let utxo2_bin = wasm_module!({ + let (init_ref, _caller) = call activation(); + let (cell_ref, _b, _c, _d) = call ref_get(init_ref, 0); + + let req = call new_ref(1); + call ref_push(3, cell_ref, 0, 0); + + let resp = call call_effect_handler(1, 2, 3, 4, req); + let (_disc, val, _c2, _d2) = call ref_get(resp, 0); + assert_eq val, 42; + + let done = call new_ref(1); + call ref_push(0, 0, 0, 0); + call yield_(done); + }); + + let (utxo1_hash_limb_a, utxo1_hash_limb_b, utxo1_hash_limb_c, utxo1_hash_limb_d) = + hash_program(&utxo1_bin); + let (utxo2_hash_limb_a, utxo2_hash_limb_b, utxo2_hash_limb_c, utxo2_hash_limb_d) = + hash_program(&utxo2_bin); + + let inner_coord_bin = wasm_module!({ + let (init_ref, _caller) = call init(); + let (utxo1_id, utxo2_id, _c, _d) = call ref_get(init_ref, 0); + + let handler_id = call get_handler_for(1, 2, 3, 4); + + // new_cell + let req_new = call new_ref(1); + call ref_push(1, 0, 0, 0); + let (resp_new, _caller2) = call resume(handler_id, req_new); + let (_disc, cell_ref, _c2, _d2) = call ref_get(resp_new, 0); + + let cell_init = call new_ref(1); + call ref_push(cell_ref, 0, 0, 0); + + // utxo1 writes 42 + let (_ret1, _caller3) = call resume(utxo1_id, cell_init); + + // utxo2 reads 42 + let (_ret2, _caller4) = call resume(utxo2_id, cell_init); + + // end + let req_end = call new_ref(1); + call ref_push(4, 0, 0, 0); + let (_resp_end, _caller5) = call resume(handler_id, req_end); + + call return_(); + }); + + let (inner_hash_limb_a, inner_hash_limb_b, inner_hash_limb_c, inner_hash_limb_d) = + hash_program(&inner_coord_bin); + + let wrapper_coord_bin = wasm_module!({ + let (init_ref, _caller) = call init(); + let (inner_id, inner_init, _c, _d) = call ref_get(init_ref, 0); + + call install_handler(1, 2, 3, 4); + + let (req0, caller0) = call resume(inner_id, 0); + let req = req0; + let caller = caller0; + let handled = const(0); + let cell_val = const(0); + // Single-cell wrapper for this test: ignore cell_ref and return a fixed cell id. + let cell_id = const(1); + + loop { + set handled = const(0); + let (disc, cell_ref, value, _d2) = call ref_get(req, 0); + + if disc == 4 { + let resp = call new_ref(1); + call ref_push(13, 0, 0, 0); + let (_req_next, _caller_next) = call resume(caller, resp); + set handled = const(2); + } + + if disc == 1 { + let resp = call new_ref(1); + call ref_push(11, cell_id, 0, 0); + + let (req_next, caller_next) = call resume(caller, resp); + set req = req_next; + set caller = caller_next; + set handled = const(1); + } + + if disc == 2 { + set cell_val = value; + let resp = call new_ref(1); + call ref_push(10, 0, 0, 0); + let (req_next, caller_next) = call resume(caller, resp); + set req = req_next; + set caller = caller_next; + set handled = const(1); + } + + // disc == 3 (read) + if handled == 0 { + let resp = call new_ref(1); + call ref_push(12, cell_val, 0, 0); + let (req_next, caller_next) = call resume(caller, resp); + set req = req_next; + set caller = caller_next; + set handled = const(1); + } + + break_if handled == 2; + continue_if handled == 1; + } + + call uninstall_handler(1, 2, 3, 4); + call return_(); + }); + + print_wat("wrapper", &wrapper_coord_bin); + + let (wrapper_hash_limb_a, wrapper_hash_limb_b, wrapper_hash_limb_c, wrapper_hash_limb_d) = + hash_program(&wrapper_coord_bin); + + // Patch wrapper hash constants into driver. + let driver_coord_bin = wasm_module!({ + let init_val = call new_ref(1); + call ref_push(0, 0, 0, 0); + + let utxo1_id = call new_utxo( + const(utxo1_hash_limb_a), + const(utxo1_hash_limb_b), + const(utxo1_hash_limb_c), + const(utxo1_hash_limb_d), + init_val + ); + + let utxo2_id = call new_utxo( + const(utxo2_hash_limb_a), + const(utxo2_hash_limb_b), + const(utxo2_hash_limb_c), + const(utxo2_hash_limb_d), + init_val + ); + + let inner_init = call new_ref(1); + call ref_push(utxo1_id, utxo2_id, 0, 0); + + let inner_id = call new_coord( + const(inner_hash_limb_a), + const(inner_hash_limb_b), + const(inner_hash_limb_c), + const(inner_hash_limb_d), + inner_init + ); + + let wrapper_init = call new_ref(1); + call ref_push(inner_id, inner_init, 0, 0); + + let wrapper_id = call new_coord( + const(wrapper_hash_limb_a), + const(wrapper_hash_limb_b), + const(wrapper_hash_limb_c), + const(wrapper_hash_limb_d), + wrapper_init + ); + + let (_ret, _caller) = call resume(wrapper_id, wrapper_init); + call return_(); + }); + + let programs = vec![ + utxo1_bin.clone(), + utxo2_bin.clone(), + inner_coord_bin.clone(), + wrapper_coord_bin.clone(), + driver_coord_bin.clone(), + ]; + + let tx = UnprovenTransaction { + inputs: vec![], + input_states: vec![], + input_ownership: vec![], + programs, + is_utxo: vec![true, true, false, false, false], + entrypoint: 4, + }; + + let proven_tx = match tx.prove() { + Ok(tx) => tx, + Err(err) => { + if std::env::var_os("DEBUG_TRACE").is_some() { + eprintln!("prove failed: {err:?}"); + } + panic!("{err:?}"); + } + }; + let ledger = Ledger::new(); + let ledger = ledger.apply_transaction(&proven_tx).unwrap(); + assert_eq!(ledger.utxos.len(), 2); +} + +fn hash_program(utxo_bin: &Vec) -> (i64, i64, i64, i64) { + let limbs = poseidon_program_hash(utxo_bin); + let utxo_hash_limb_a = limbs[0] as i64; + let utxo_hash_limb_b = limbs[1] as i64; + let utxo_hash_limb_c = limbs[2] as i64; + let utxo_hash_limb_d = limbs[3] as i64; + + ( + utxo_hash_limb_a, + utxo_hash_limb_b, + utxo_hash_limb_c, + utxo_hash_limb_d, + ) +} + +fn print_wat(name: &str, wasm: &[u8]) { + if std::env::var_os("DEBUG_WAT").is_none() { + return; + } + + match wasmprinter::print_bytes(wasm) { + Ok(wat) => eprintln!("--- WAT: {name} ---\n{wat}"), + Err(err) => eprintln!("--- WAT: {name} (failed: {err}) ---"), + } +} + +fn interface_id(a: u64, b: u64, c: u64, d: u64) -> InterfaceId { + Hash([a, b, c, d], PhantomData) +} diff --git a/mock-ledger/old/Cargo.toml b/mock-ledger/old/Cargo.toml index f6660cf2..aaa34283 100644 --- a/mock-ledger/old/Cargo.toml +++ b/mock-ledger/old/Cargo.toml @@ -2,5 +2,3 @@ name = "mock-ledger" version = "0.1.0" edition = "2021" - -[dependencies]